/*-
* Copyright (c) 2000 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Frank van der Linden for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* First find the vendor. */
for (ap = agp_products; ap->ap_attach != NULL; ap++) {
if (PCI_VENDOR(pa->pa_id) == ap->ap_vendor)
break;
}
if (ap->ap_attach == NULL)
return (NULL);
/* Now find the product within the vendor's domain. */
for (; ap->ap_attach != NULL; ap++) {
if (PCI_VENDOR(pa->pa_id) != ap->ap_vendor) {
/* Ran out of this vendor's section of the table. */
return (NULL);
}
if (ap->ap_product == PCI_PRODUCT(pa->pa_id)) {
/* Exact match. */
break;
}
if (ap->ap_product == (uint32_t) -1) {
/* Wildcard match. */
break;
}
}
if (ap->ap_attach == NULL)
return (NULL);
/* Now let the product-specific driver filter the match. */
if (ap->ap_match != NULL && (*ap->ap_match)(pa) == 0)
return (NULL);
/*
* Work out an upper bound for agp memory allocation. This
* uses a heuristic table from the Linux driver.
*/
memsize = physmem >> (20 - PAGE_SHIFT); /* memsize is in MB */
for (i = 0; i < agp_max_size; i++) {
if (memsize <= agp_max[i][0])
break;
}
if (i == agp_max_size)
i = agp_max_size - 1;
sc->as_maxmem = agp_max[i][1] << 20U;
/*
* The mutex is used to prevent re-entry to
* agp_generic_bind_memory() since that function can sleep.
*/
mutex_init(&sc->as_mtx, MUTEX_DEFAULT, IPL_NONE);
TAILQ_INIT(&sc->as_memory);
ret = (*ap->ap_attach)(parent, self, pa);
if (ret == 0)
aprint_normal(": aperture at 0x%lx, size 0x%lx\n",
(unsigned long)sc->as_apaddr,
(unsigned long)AGP_GET_APERTURE(sc));
else
sc->as_chipc = NULL;
if (!pmf_device_register(self, NULL, agp_resume))
aprint_error_dev(self, "couldn't establish power handler\n");
}
/* Set RQ to the min of mode, tstatus and mstatus */
rq = AGP_MODE_GET_RQ(mode);
if (AGP_MODE_GET_RQ(tstatus) < rq)
rq = AGP_MODE_GET_RQ(tstatus);
if (AGP_MODE_GET_RQ(mstatus) < rq)
rq = AGP_MODE_GET_RQ(mstatus);
/* Set SBA if all three can deal with SBA */
sba = (AGP_MODE_GET_SBA(tstatus)
& AGP_MODE_GET_SBA(mstatus)
& AGP_MODE_GET_SBA(mode));
/* Similar for FW */
fw = (AGP_MODE_GET_FW(tstatus)
& AGP_MODE_GET_FW(mstatus)
& AGP_MODE_GET_FW(mode));
/* Figure out the max rate */
rate = (AGP_MODE_GET_RATE(tstatus)
& AGP_MODE_GET_RATE(mstatus)
& AGP_MODE_GET_RATE(mode));
if (rate & AGP_MODE_V2_RATE_4x)
rate = AGP_MODE_V2_RATE_4x;
else if (rate & AGP_MODE_V2_RATE_2x)
rate = AGP_MODE_V2_RATE_2x;
else
rate = AGP_MODE_V2_RATE_1x;
/* Construct the new mode word and tell the hardware */
command = AGP_MODE_SET_RQ(0, rq);
command = AGP_MODE_SET_SBA(command, sba);
command = AGP_MODE_SET_FW(command, fw);
command = AGP_MODE_SET_RATE(command, rate);
command = AGP_MODE_SET_AGP(command, 1);
pci_conf_write(sc->as_pc, sc->as_tag,
sc->as_capoff + PCI_AGP_COMMAND, command);
pci_conf_write(pa->pa_pc, pa->pa_tag, capoff + PCI_AGP_COMMAND,
command);
return 0;
}
static int
agp_generic_enable_v3(struct agp_softc *sc, const struct pci_attach_args *pa,
int capoff, u_int32_t mode)
{
pcireg_t tstatus, mstatus;
pcireg_t command;
int rq, sba, fw, rate, arqsz, cal;
/* Set RQ to the min of mode, tstatus and mstatus */
rq = AGP_MODE_GET_RQ(mode);
if (AGP_MODE_GET_RQ(tstatus) < rq)
rq = AGP_MODE_GET_RQ(tstatus);
if (AGP_MODE_GET_RQ(mstatus) < rq)
rq = AGP_MODE_GET_RQ(mstatus);
/*
* ARQSZ - Set the value to the maximum one.
* Don't allow the mode register to override values.
*/
arqsz = AGP_MODE_GET_ARQSZ(mode);
if (AGP_MODE_GET_ARQSZ(tstatus) > arqsz)
arqsz = AGP_MODE_GET_ARQSZ(tstatus);
if (AGP_MODE_GET_ARQSZ(mstatus) > arqsz)
arqsz = AGP_MODE_GET_ARQSZ(mstatus);
/* Calibration cycle - don't allow override by mode register */
cal = AGP_MODE_GET_CAL(tstatus);
if (AGP_MODE_GET_CAL(mstatus) < cal)
cal = AGP_MODE_GET_CAL(mstatus);
/* SBA must be supported for AGP v3. */
sba = 1;
/* Set FW if all three support it. */
fw = (AGP_MODE_GET_FW(tstatus)
& AGP_MODE_GET_FW(mstatus)
& AGP_MODE_GET_FW(mode));
/* Figure out the max rate */
rate = (AGP_MODE_GET_RATE(tstatus)
& AGP_MODE_GET_RATE(mstatus)
& AGP_MODE_GET_RATE(mode));
if (rate & AGP_MODE_V3_RATE_8x)
rate = AGP_MODE_V3_RATE_8x;
else
rate = AGP_MODE_V3_RATE_4x;
if (offset < start
|| (offset & (AGP_PAGE_SIZE - 1)) != 0
|| offset > end
|| mem->am_size > (end - offset)) {
aprint_error_dev(sc->as_dev,
"binding memory at bad offset %#lx\n",
(unsigned long) offset);
mutex_exit(&sc->as_mtx);
return EINVAL;
}
/*
* XXXfvdl
* The memory here needs to be directly accessible from the
* AGP video card, so it should be allocated using bus_dma.
* However, it need not be contiguous, since individual pages
* are translated using the GATT.
*
* Using a large chunk of contiguous memory may get in the way
* of other subsystems that may need one, so we try to be friendly
* and ask for allocation in chunks of a minimum of 8 pages
* of contiguous memory on average, falling back to 4, 2 and 1
* if really needed. Larger chunks are preferred, since allocating
* a bus_dma_segment per page would be overkill.
*/
if (contigpages == 0) {
mutex_exit(&sc->as_mtx);
return ENOMEM;
}
/*
* Bind the individual pages and flush the chipset's
* TLB.
*/
done = 0;
for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
seg = &mem->am_dmamap->dm_segs[i];
/*
* Install entries in the GATT, making sure that if
* AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
* aligned to PAGE_SIZE, we don't modify too many GATT
* entries.
*/
for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
j += AGP_PAGE_SIZE) {
pa = seg->ds_addr + j;
AGP_DPF(("binding offset %#lx to pa %#lx\n",
(unsigned long)(offset + done + j),
(unsigned long)pa));
error = AGP_BIND_PAGE(sc, offset + done + j, pa);
if (error) {
/*
* Bail out. Reverse all the mappings
* and unwire the pages.
*/
for (k = 0; k < done + j; k += AGP_PAGE_SIZE)
AGP_UNBIND_PAGE(sc, offset + k);
/*
* Flush the CPU cache since we are providing a new mapping
* for these pages.
*/
agp_flush_cache();
/*
* Make sure the chipset gets the new mappings.
*/
AGP_FLUSH_TLB(sc);
mem->am_offset = offset;
mem->am_is_bound = 1;
mutex_exit(&sc->as_mtx);
return 0;
}
int
agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
{
int i;
mutex_enter(&sc->as_mtx);
if (!mem->am_is_bound) {
aprint_error_dev(sc->as_dev, "memory is not bound\n");
mutex_exit(&sc->as_mtx);
return EINVAL;
}
/*
* Unbind the individual pages and flush the chipset's
* TLB. Unwire the pages so they can be swapped.
*/
for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
AGP_UNBIND_PAGE(sc, mem->am_offset + i);
static int
agpopen(dev_t dev, int oflags, int devtype, struct lwp *l)
{
struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev));
if (sc == NULL)
return ENXIO;
if (sc->as_chipc == NULL)
return ENXIO;
if (!sc->as_isopen)
sc->as_isopen = 1;
else
return EBUSY;
return 0;
}
static int
agpclose(dev_t dev, int fflag, int devtype, struct lwp *l)
{
struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev));
struct agp_memory *mem;
if (sc == NULL)
return ENODEV;
/*
* Clear the GATT and force release on last close
*/
if (sc->as_state == AGP_ACQUIRE_USER) {
while ((mem = TAILQ_FIRST(&sc->as_memory))) {
if (mem->am_is_bound) {
printf("agpclose: mem %d is bound\n",
mem->am_id);
AGP_UNBIND_MEMORY(sc, mem);
}
/*
* XXX it is not documented, but if the protocol allows
* allocate->acquire->bind, it would be possible that
* memory ranges are allocated by the kernel here,
* which we shouldn't free. We'd have to keep track of
* the memory range's owner.
* The kernel API is unsed yet, so we get away with
* freeing all.
*/
AGP_FREE_MEMORY(sc, mem);
}
agp_release_helper(sc, AGP_ACQUIRE_USER);
}
sc->as_isopen = 0;
return 0;
}
static int
agpioctl(dev_t dev, u_long cmd, void *data, int fflag, struct lwp *l)
{
struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev));
switch (cmd) {
case AGPIOC_INFO:
return agp_info_user(sc, (agp_info *) data);
case AGPIOC_ACQUIRE:
return agp_acquire_helper(sc, AGP_ACQUIRE_USER);
case AGPIOC_RELEASE:
return agp_release_helper(sc, AGP_ACQUIRE_USER);
case AGPIOC_SETUP:
return agp_setup_user(sc, (agp_setup *)data);
#ifdef __x86_64__
{
/*
* Handle paddr_t change from 32 bit for non PAE kernels
* to 64 bit.
*/
#define AGPIOC_OALLOCATE _IOWR(AGPIOC_BASE, 6, agp_oallocate)
typedef struct _agp_oallocate {
int key; /* tag of allocation */
size_t pg_count; /* number of pages */
uint32_t type; /* 0 == normal, other devspec */
u_long physical; /* device specific (some devices
* need a phys address of the
* actual page behind the gatt
* table) */
} agp_oallocate;
case AGPIOC_OALLOCATE: {
int ret;
agp_allocate aga;
agp_oallocate *oaga = data;
int
agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags,
bus_dmamap_t *mapp, void **vaddr, bus_addr_t *baddr,
bus_dma_segment_t *seg, int nseg, int *rseg)