/*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matthew Fredette.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1998-2004 Michael Shalayeff
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Mappings are established in HPPA_FLEX_SIZE units,
* either with BTLB, or regular mappings of the whole area.
*/
for (pa = bpa ; size != 0; pa = epa) {
flex = HPPA_FLEX(pa);
spa = pa & HPPA_FLEX_MASK;
epa = spa + HPPA_FLEX_SIZE; /* may wrap to 0... */
size -= uimin(size, HPPA_FLEX_SIZE - (pa - spa));
/* do need a new mapping? */
if (bmm[flex / 32] & (1 << (flex % 32))) {
DPRINTF(("%s: already mapped flex=%x, mask=%x\n",
__func__, flex, bmm[flex / 32]));
continue;
}
void
mbus_barrier(void *v, bus_space_handle_t h, bus_size_t o, bus_size_t l, int op)
{
sync_caches();
}
void*
mbus_vaddr(void *v, bus_space_handle_t h)
{
/*
* We must only be called with addresses in I/O space.
*/
KASSERT(h >= HPPA_IOSPACE);
return (void*)h;
}
paddr_t
mbus_mmap(void *v, bus_addr_t addr, off_t off, int prot, int flags)
{
return btop(addr + off);
}
/*
* Common function for DMA map creation. May be called by bus-specific DMA map
* creation functions.
*/
int
mbus_dmamap_create(void *v, bus_size_t size, int nsegments, bus_size_t maxsegsz,
bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
{
struct hppa_bus_dmamap *map;
/*
* Allocate and initialize the DMA map. The end of the map is a
* variable-sized array of segments, so we allocate enough room for
* them in one shot.
*
* Note we don't preserve the WAITOK or NOWAIT flags. Preservation of
* ALLOCNOW notifies others that we've reserved these resources, and
* they are not to be freed.
*
* The bus_dmamap_t includes one bus_dma_segment_t, hence the
* (nsegments - 1).
*/
map = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
(flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
if (!map)
return ENOMEM;
/*
* Common function for DMA map destruction. May be called by bus-specific DMA
* map destruction functions.
*/
void
mbus_dmamap_destroy(void *v, bus_dmamap_t map)
{
/*
* If the handle contains a valid mapping, unload it.
*/
if (map->dm_mapsize != 0)
mbus_dmamap_unload(v, map);
/*
* Like bus_dmamap_load(), but for mbufs.
*/
int
mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0,
int flags)
{
vaddr_t lastaddr;
int seg, error, first;
struct mbuf *m;
/*
* Make sure that on error condition we return "no valid mappings."
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
KASSERT(m0->m_flags & M_PKTHDR);
if (m0->m_pkthdr.len > map->_dm_size)
return EINVAL;
first = 1;
seg = 0;
error = 0;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len,
vmspace_kernel(), flags, &lastaddr, &seg, first);
first = 0;
}
if (error == 0) {
map->dm_mapsize = m0->m_pkthdr.len;
map->dm_nsegs = seg + 1;
}
return error;
}
/*
* Like bus_dmamap_load(), but for uios.
*/
int
mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio,
int flags)
{
vaddr_t lastaddr;
int seg, i, error, first;
bus_size_t minlen, resid;
struct iovec *iov;
void *addr;
/*
* Make sure that on error condition we return "no valid mappings."
*/
map->dm_mapsize = 0;
map->dm_nsegs = 0;
resid = uio->uio_resid;
iov = uio->uio_iov;
first = 1;
seg = 0;
error = 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
minlen = MIN(resid, iov[i].iov_len);
addr = (void *)iov[i].iov_base;
/*
* Like bus_dmamap_load(), but for raw memory allocated with
* bus_dmamem_alloc().
*/
int
mbus_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
int nsegs, bus_size_t size, int flags)
{
struct pglist *mlist;
struct vm_page *m;
paddr_t pa, pa_next;
bus_size_t mapsize;
bus_size_t pagesz = PAGE_SIZE;
int seg;
/*
* Make sure that on error condition we return "no valid mappings".
*/
map->dm_nsegs = 0;
map->dm_mapsize = 0;
/* Load the allocated pages. */
mlist = segs[0]._ds_mlist;
pa_next = 0;
seg = -1;
mapsize = size;
for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
if (size == 0)
panic("mbus_dmamap_load_raw: size botch");
pa = VM_PAGE_TO_PHYS(m);
if (pa != pa_next) {
if (++seg >= map->_dm_segcnt)
panic("mbus_dmamap_load_raw: nsegs botch");
map->dm_segs[seg].ds_addr = pa;
map->dm_segs[seg].ds_len = 0;
}
pa_next = pa + PAGE_SIZE;
if (size < pagesz)
pagesz = size;
map->dm_segs[seg].ds_len += pagesz;
size -= pagesz;
}
/* Make the map truly valid. */
map->dm_nsegs = seg + 1;
map->dm_mapsize = mapsize;
return 0;
}
/*
* unload a DMA map.
*/
void
mbus_dmamap_unload(void *v, bus_dmamap_t map)
{
/*
* If this map was loaded with mbus_dmamap_load, we don't need to do
* anything. If this map was loaded with mbus_dmamap_load_raw, we also
* don't need to do anything.
*/
/* Mark the mappings as invalid. */
map->dm_mapsize = 0;
map->dm_nsegs = 0;
}
void
mbus_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t offset, bus_size_t len,
int ops)
{
int i;
/*
* Mixing of PRE and POST operations is not allowed.
*/
if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
(ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
panic("mbus_dmamap_sync: mix PRE and POST");
#ifdef DIAGNOSTIC
if (offset >= map->dm_mapsize)
panic("mbus_dmamap_sync: bad offset %lu (map size is %lu)",
offset, map->dm_mapsize);
if ((offset + len) > map->dm_mapsize)
panic("mbus_dmamap_sync: bad length");
#endif
/*
* For a virtually-indexed write-back cache, we need to do the
* following things:
*
* PREREAD -- Invalidate the D-cache. We do this here in case a
* write-back is required by the back-end.
*
* PREWRITE -- Write-back the D-cache. Note that if we are doing
* a PREREAD|PREWRITE, we can collapse the whole thing into a
* single Wb-Inv.
*
* POSTREAD -- Nothing.
*
* POSTWRITE -- Nothing.
*/
for (i = 0; len != 0 && i < map->dm_nsegs; i++) {
if (offset >= map->dm_segs[i].ds_len)
offset -= map->dm_segs[i].ds_len;
else {
bus_size_t l = map->dm_segs[i].ds_len - offset;
/* for either operation sync the shit away */
__asm __volatile ("sync\n\tsyncdma\n\tsync\n\t"
"nop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop" ::: "memory");
}
/*
* Common function for DMA-safe memory allocation. May be called by bus-
* specific DMA memory allocation functions.
*/
int
mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags)
{
paddr_t low, high;
struct pglist *mlist;
struct vm_page *m;
paddr_t pa, pa_next;
int seg;
int error;
/*
* Simply keep a pointer around to the linked list, so
* bus_dmamap_free() can return it.
*
* Nobody should touch the pageq.queue fields while these pages are in
* our custody.
*/
segs[0]._ds_mlist = mlist;
/*
* We now have physical pages, but no kernel virtual addresses yet.
* These may be allocated in bus_dmamap_map.
*/
return 0;
}
void
mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
{
struct pglist *mlist;
/*
* Return the list of physical pages back to the VM system.
*/
mlist = segs[0]._ds_mlist;
if (mlist == NULL)
return;
/*
* Common function for mapping DMA-safe memory. May be called by bus-specific
* DMA memory map functions.
*/
int
mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
void **kvap, int flags)
{
bus_addr_t addr;
vaddr_t va;
int curseg;
u_int pmflags =
hppa_cpu_hastlbu_p() ? PMAP_NOCACHE : 0;
const uvm_flag_t kmflags =
(flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
size = round_page(size);
/* Get a chunk of kernel virtual space. */
va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
if (__predict_false(va == 0))
return ENOMEM;
/*
* Common function for unmapping DMA-safe memory. May be called by bus-
* specific DMA memory unmapping functions.
*/
void
mbus_dmamem_unmap(void *v, void *kva, size_t size)
{
/*
* Common function for mmap(2)'ing DMA-safe memory. May be called by bus-
* specific DMA mmap(2)'ing functions.
*/
paddr_t
mbus_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs,
off_t off, int prot, int flags)
{
int i;
for (i = 0; i < nsegs; i++) {
KASSERT((off & PGOFSET) == 0);
KASSERT((segs[i].ds_addr & PGOFSET) == 0);
KASSERT((segs[i].ds_len & PGOFSET) == 0);
if (off >= segs[i].ds_len) {
off -= segs[i].ds_len;
continue;
}
return btop((u_long)segs[i].ds_addr + off);
}
/* Page not found. */
return -1;
}
int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
int *segp, int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vaddr_t vaddr = (vaddr_t)buf;
int seg;
pmap_t pmap;
for (seg = *segp; buflen > 0; ) {
bool ok __diagused;
/*
* Get the physical address for this segment.
*/
ok = pmap_extract(pmap, vaddr, &curaddr);
KASSERT(ok == true);
/*
* Compute the segment size, and adjust counts.
*/
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (map->_dm_boundary > 0) {
baddr = (curaddr + map->_dm_boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with previous
* segment if possible.
*/
if (first) {
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
first = 0;
} else {
if (curaddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
(map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
(curaddr & bmask)))
map->dm_segs[seg].ds_len += sgsize;
else {
if (++seg >= map->_dm_segcnt)
break;
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
}
}
/*
* Map all of Fixed Physical, Local Broadcast, and Global Broadcast
* space. These spaces are adjacent and in that order and run to the
* end of the address space.
*/
/*
* XXX fredette - this may be a copout, or it may be a great idea. I'm
* not sure which yet.
*/
/* map all the way till the end of the memory */
if (bus_space_map(&hppa_bustag, hppa_mcpuhpa, (~0LU - hppa_mcpuhpa + 1),
0, &ioh))
panic("%s: cannot map mainbus IO space", __func__);
/*
* Local-Broadcast the HPA to all modules on the bus
*/
((struct iomod *)(hppa_mcpuhpa & HPPA_FLEX_MASK))[FPA_IOMOD].io_flex =
(void *)((hppa_mcpuhpa & HPPA_FLEX_MASK) | DMA_ENABLE);