Пример #1
0
/*
 * Initialize IOMMU
 * This looks like initialization of CPU MMU but
 * the routine is higher in food chain.
 */
static struct iommu_regs *
iommu_init(struct iommu *t, uint64_t base)
{
    unsigned int *ptab;
    int ptsize;
#ifdef CONFIG_DEBUG_IOMMU
    unsigned int impl, vers;
#endif
    unsigned int tmp;
    struct iommu_regs *regs;
    int ret;
    unsigned long vasize;

    regs = (struct iommu_regs *)ofmem_map_io(base, IOMMU_REGS);
    if (regs == NULL) {
        DPRINTF("Cannot map IOMMU\n");
        for (;;) { }
    }
    t->regs = regs;
#ifdef CONFIG_DEBUG_IOMMU
    impl = (regs->control & IOMMU_CTRL_IMPL) >> 28;
    vers = (regs->control & IOMMU_CTRL_VERS) >> 24;
#endif

    tmp = regs->control;
    tmp &= ~(IOMMU_CTRL_RNGE);

    tmp |= (IOMMU_RNGE_32MB | IOMMU_CTRL_ENAB);
    t->plow = 0xfe000000;		/* End - 32 MB */
    /* Size of VA region that we manage */
    vasize = 0x2000000; /* 32 MB */

    regs->control = tmp;
    iommu_invalidate(regs);

    /* Allocate IOMMU page table */
    /* Tremendous alignment causes great waste... */
    ptsize = (vasize / PAGE_SIZE) * sizeof(int);
    ret = ofmem_posix_memalign((void *)&ptab, ptsize, ptsize);
    if (ret != 0) {
        DPRINTF("Cannot allocate IOMMU table [0x%x]\n", ptsize);
        for (;;) { }
    }
    t->page_table = ptab;

    /* flush_cache_all(); */
    /** flush_tlb_all(); **/
    tmp = (unsigned int)va2pa((unsigned long)ptab);
    regs->base = tmp >> 4;
    iommu_invalidate(regs);

    DPRINTF("IOMMU: impl %d vers %d page table at 0x%p (pa 0x%x) of size %d bytes\n",
            impl, vers, t->page_table, tmp, ptsize);

    mem_init(&cdvmem, (char*)t->plow, (char *)0xfffff000);
    return regs;
}
Пример #2
0
static void __init sbus_iommu_init(struct of_device *op)
{
    struct iommu_struct *iommu;
    unsigned int impl, vers;
    unsigned long *bitmap;
    unsigned long tmp;

    iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
    if (!iommu) {
        prom_printf("Unable to allocate iommu structure\n");
        prom_halt();
    }

    iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
                             "iommu_regs");
    if (!iommu->regs) {
        prom_printf("Cannot map IOMMU registers\n");
        prom_halt();
    }
    impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
    vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
    tmp = iommu->regs->control;
    tmp &= ~(IOMMU_CTRL_RNGE);
    tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
    iommu->regs->control = tmp;
    iommu_invalidate(iommu->regs);
    iommu->start = IOMMU_START;
    iommu->end = 0xffffffff;

    /* Allocate IOMMU page table */
    /* Stupid alignment constraints give me a headache.
       We need 256K or 512K or 1M or 2M area aligned to
           its size and current gfp will fortunately give
           it to us. */
    tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
    if (!tmp) {
        prom_printf("Unable to allocate iommu table [0x%08x]\n",
                    IOMMU_NPTES*sizeof(iopte_t));
        prom_halt();
    }
    iommu->page_table = (iopte_t *)tmp;

    /* Initialize new table. */
    memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
    flush_cache_all();
    flush_tlb_all();
    iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
    iommu_invalidate(iommu->regs);

    bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
    if (!bitmap) {
        prom_printf("Unable to allocate iommu bitmap [%d]\n",
                    (int)(IOMMU_NPTES>>3));
        prom_halt();
    }
Пример #3
0
void __init
iommu_init(int iommund, struct sbus_bus *sbus)
{
	unsigned int impl, vers;
	unsigned long tmp;
	struct iommu_struct *iommu;
	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
	struct resource r;
	unsigned long *bitmap;

	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
	if (!iommu) {
		prom_printf("Unable to allocate iommu structure\n");
		prom_halt();
	}
	iommu->regs = NULL;
	if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
			 sizeof(iommu_promregs)) != -1) {
		memset(&r, 0, sizeof(r));
		r.flags = iommu_promregs[0].which_io;
		r.start = iommu_promregs[0].phys_addr;
		iommu->regs = (struct iommu_regs *)
			sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
	}
	if (!iommu->regs) {
		prom_printf("Cannot map IOMMU registers\n");
		prom_halt();
	}
	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
	tmp = iommu->regs->control;
	tmp &= ~(IOMMU_CTRL_RNGE);
	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
	iommu->regs->control = tmp;
	iommu_invalidate(iommu->regs);
	iommu->start = IOMMU_START;
	iommu->end = 0xffffffff;

	/* Allocate IOMMU page table */
	/* Stupid alignment constraints give me a headache. 
	   We need 256K or 512K or 1M or 2M area aligned to
           its size and current gfp will fortunately give
           it to us. */
        tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
	if (!tmp) {
		prom_printf("Unable to allocate iommu table [0x%08x]\n",
			    IOMMU_NPTES*sizeof(iopte_t));
		prom_halt();
	}
	iommu->page_table = (iopte_t *)tmp;

	/* Initialize new table. */
	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
	flush_cache_all();
	flush_tlb_all();
	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
	iommu_invalidate(iommu->regs);

	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
	if (!bitmap) {
		prom_printf("Unable to allocate iommu bitmap [%d]\n",
			    (int)(IOMMU_NPTES>>3));
		prom_halt();
	}