/*
* Copyright (c) 2007 Mark Kettenis
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct iommu_page_map {
SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
int ipm_maxpage; /* Size of allocated page map */
int ipm_pagecnt; /* Number of entries in use */
struct iommu_page_entry ipm_map[1];
};
/*
* per-map IOMMU state
*/
struct iommu_map_state {
struct astro_softc *ims_sc;
bus_addr_t ims_dvmastart;
bus_size_t ims_dvmasize;
struct iommu_page_map ims_map; /* map must be last (array at end) */
};
m = TAILQ_FIRST(&mlist);
r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
/* Map the pages. */
for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
pa = VM_PAGE_TO_PHYS(m);
pmap_enter(pmap_kernel(), va, pa,
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
memset(sc->sc_pdir, 0, size);
/*
* The PDC might have set up some devices to do DMA. It will do
* this for the onboard USB controller if an USB keyboard is used
* for console input. In that case, bad things will happen if we
* enable iova space. So reset the PDC devices before we do that.
* Don't do this if we're using a serial console though, since it
* will stop working if we do. This is fine since the serial port
* doesn't do DMA.
*/
pagezero_cookie = hppa_pagezero_map();
if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
pdcproc_ioreset();
hppa_pagezero_unmap(pagezero_cookie);
for (seg = 0; seg < map->dm_nsegs; seg++) {
struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
paend = round_page(ds->ds_addr + ds->ds_len);
for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
err = iommu_iomap_insert_page(ims, va, pa);
if (err) {
printf("iomap insert error: %d for "
"va 0x%lx pa 0x%lx\n", err, va, pa);
bus_dmamap_unload(sc->sc_dmat, map);
iommu_iomap_clear_pages(ims);
}
}
}
dva = dvmaddr;
for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
e->ipe_dva = dva;
iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
dva += PAGE_SIZE;
}
for (seg = 0; seg < map->dm_nsegs; seg++) {
struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
}
return (0);
}
int
iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
struct proc *p, int flags)
{
struct astro_softc *sc = v;
int err;
/*
* Utility function used by splay tree to order page entries by pa.
*/
static inline int
iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
{
return ((a->ipe_pa > b->ipe_pa) ? 1 :
(a->ipe_pa < b->ipe_pa) ? -1 : 0);
}
/* Initialize the map. */
ims->ims_map.ipm_maxpage = n;
SPLAY_INIT(&ims->ims_map.ipm_tree);
return (ims);
}
/*
* Destroy an iomap.
*/
void
iommu_iomap_destroy(struct iommu_map_state *ims)
{
#ifdef DIAGNOSTIC
if (ims->ims_map.ipm_pagecnt > 0)
printf("iommu_iomap_destroy: %d page entries in use\n",
ims->ims_map.ipm_pagecnt);
#endif
const int n = ims->ims_map.ipm_maxpage;
const size_t sz =
sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]);
kmem_free(ims, sz);
}
/*
* Insert a pa entry in the iomap.
*/
int
iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
{
struct iommu_page_map *ipm = &ims->ims_map;
struct iommu_page_entry *e;
if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
struct iommu_page_entry ipe;
ipe.ipe_pa = pa;
if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
return (0);
return (ENOMEM);
}
e = &ipm->ipm_map[ipm->ipm_pagecnt];
e->ipe_pa = pa;
e->ipe_va = va;
e->ipe_dva = 0;
e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
/* Duplicates are okay, but only count them once. */
if (e)
return (0);
++ipm->ipm_pagecnt;
return (0);
}
/*
* Translate a physical address (pa) into a DVMA address.
*/
bus_addr_t
iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
{
struct iommu_page_map *ipm = &ims->ims_map;
struct iommu_page_entry *e;
struct iommu_page_entry pe;
paddr_t offset = pa & PAGE_MASK;
pe.ipe_pa = trunc_page(pa);
e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
if (e == NULL) {
panic("couldn't find pa %lx\n", pa);
return 0;
}
return (e->ipe_dva | offset);
}
/*
* Clear the iomap table and tree.
*/
void
iommu_iomap_clear_pages(struct iommu_map_state *ims)
{
ims->ims_map.ipm_pagecnt = 0;
SPLAY_INIT(&ims->ims_map.ipm_tree);
}
/*
* Add an entry to the IOMMU table.
*/
void
iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
int flags)
{
volatile uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
uint64_t tte;
uint32_t ci;
#ifdef ASTRODEBUG
printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
#endif