Exemple #1
0
int
mbus_alloc(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
    bus_size_t align, bus_size_t boundary, int flags,
    bus_addr_t *addrp, bus_space_handle_t *bshp)
{
	bus_addr_t bpa;
	int error;

	rstart &= HPPA_PHYSMAP;
	rend &= HPPA_PHYSMAP;
	if (rstart < hppa_ex->ex_start || rend > hppa_ex->ex_end)
		panic("bus_space_alloc: bad region start/end");

	if ((error = extent_alloc_subregion(hppa_ex, rstart, rend, size,
	    align, 0, boundary, EX_NOWAIT, &bpa)))
		return (error);

	if ((error = mbus_add_mapping(bpa, size, flags, bshp))) {
		if (extent_free(hppa_ex, bpa, size, EX_NOWAIT)) {
			printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n",
				bpa, size);
			printf("bus_space_alloc: can't free region\n");
		}
	}

	*addrp = bpa | ~HPPA_PHYSMAP;
	return (error);
}
int
pciaddr_do_resource_allocate(struct shpcic_softc *sc, pci_chipset_tag_t pc,
    pcitag_t tag, int mapreg, struct extent *ex, int  type, bus_addr_t *addr,
    bus_size_t size)
{
	bus_addr_t start;
	int error;
	
	if (type == PCI_MAPREG_TYPE_IO) {
		if ((*addr & PCIADDR_PORT_END) != 0)
			return (0);
	} else if (*addr) /* no need to allocate */
		return (0);

	/* XXX Don't allocate if device is AGP device to avoid conflict. */
	if (pciaddr_device_is_agp(pc, tag)) 
		return (0);
	
	start = (type == PCI_MAPREG_TYPE_MEM ? sc->sc_membus_space.bus_base
	    : sc->sc_iobus_space.bus_base);
	if (start < ex->ex_start || start + size - 1 >= ex->ex_end) {
		PCIBIOS_PRINTV(("No available resources. fixup failed\n"));
		return (1);
	}
	error = extent_alloc_subregion(ex, start, ex->ex_end, size, size, 0, 0,
	    EX_FAST|EX_NOWAIT|EX_MALLOCOK, addr);
	if (error) {
		PCIBIOS_PRINTV(("No available resources. fixup failed\n"));
		return (1);
	}

	/* write new address to PCI device configuration header */
	pci_conf_write(pc, tag, mapreg, *addr);
	/* check */
	if (pcibr_flags & PCIBR_VERBOSE) {
		printf("pci_addr_fixup: ");
		pciaddr_print_devid(pc, tag);
	}

	if (pciaddr_ioaddr(pci_conf_read(pc, tag, mapreg)) != *addr) {
		pci_conf_write(pc, tag, mapreg, 0); /* clear */
		printf("fixup failed. (new address=%#lx)\n", *addr);
		return (1);
	}
	if (pcibr_flags & PCIBR_VERBOSE)
		printf("new address 0x%08lx\n", *addr);

	return (0);
}
Exemple #3
0
static int
pciaddr_do_resource_allocate(pci_chipset_tag_t pc, pcitag_t tag,
    int mapreg, void *ctx, int type, bus_addr_t *addr, bus_size_t size)
{
 	struct pciaddr *pciaddrmap = (struct pciaddr *)ctx;
	bus_addr_t start;
	int error;
 	struct extent *ex;

	if (*addr != 0) /* no need to allocate */
		return 0;

 	ex = (type == PCI_MAPREG_TYPE_MEM ?
 	      pciaddrmap->extent_mem : pciaddrmap->extent_port);

	/* XXX Don't allocate if device is AGP device to avoid conflict. */
	if (device_is_agp(pc, tag))
		return 0;

	start = (type == PCI_MAPREG_TYPE_MEM ?
		 pciaddrmap->mem_alloc_start : pciaddrmap->port_alloc_start);

	if (start < ex->ex_start || start + size - 1 >= ex->ex_end) {
		aprint_debug("No available resources. fixup failed\n");
		return 1;
	}
	error = extent_alloc_subregion(ex, start, ex->ex_end, size,
				       size, 0,
				       EX_FAST|EX_NOWAIT|EX_MALLOCOK,
				       (u_long *)addr);
	if (error) {
		aprint_debug("No available resources. fixup failed\n");
		return 1;
	}

	/* write new address to PCI device configuration header */
	pci_conf_write(pc, tag, mapreg, *addr);
	/* check */
	aprint_debug("pci_addr_fixup: ");
	pciaddr_print_devid(pc, tag);
	if (pciaddr_ioaddr(pci_conf_read(pc, tag, mapreg)) != *addr) {
		pci_conf_write(pc, tag, mapreg, 0); /* clear */
		aprint_error("fixup failed. (new address=%#x)\n", (unsigned)*addr);
		return 1;
	}
	aprint_debug("new address 0x%08x\n", (unsigned)*addr);

	return 0;
}
Exemple #4
0
int
au_himem_alloc(void *cookie, bus_addr_t start, bus_addr_t end,
    bus_size_t size, bus_size_t align, bus_size_t boundary, int flags,
    bus_addr_t *addrp, bus_space_handle_t *bshp)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	int			err;

	err = extent_alloc_subregion(c->c_extent, start, end, size,
	    align, boundary, EX_FAST | EX_NOWAIT, addrp);
	if (err) {
		return err;
	}
	err = au_himem_map(cookie, *addrp, size, flags, bshp, 0);
	if (err)
		extent_free(c->c_extent, *addrp, size, EX_NOWAIT);
	return err;
}
Exemple #5
0
int
__C(CHIP,_mem_alloc)(
    void *v,
    bus_addr_t rstart,
    bus_addr_t rend,
    bus_size_t size,
    bus_size_t align,
    bus_size_t boundary,
    int flags,
    bus_addr_t *addrp,
    bus_space_handle_t *bshp)
{
    bus_addr_t memaddr;
    int error;

    /*
     * Do the requested allocation.
     */
#ifdef EXTENT_DEBUG
    printf("mem: allocating from 0x%lx to 0x%lx\n", rstart, rend);
#endif
    error = extent_alloc_subregion(CHIP_MEM_EXTENT(v), rstart, rend,
                                   size, align, boundary,
                                   EX_FAST | EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0),
                                   &memaddr);
    if (error) {
#ifdef EXTENT_DEBUG
        printf("mem: allocation failed (%d)\n", error);
        extent_print(CHIP_MEM_EXTENT(v));
#endif
    }

#ifdef EXTENT_DEBUG
    printf("mem: allocated 0x%lx to 0x%lx\n", memaddr, memaddr + size - 1);
#endif

    *addrp = memaddr;
    *bshp = ALPHA_PHYS_TO_K0SEG(CHIP_MEM_SYS_START(v)) + memaddr;

    return (0);
}
int
tx3912video_framebuffer_alloc(struct video_chip *chip, paddr_t fb_start,
    paddr_t *fb_end /* buffer allocation hint */)
{
	struct extent_fixed ex_fixed[10];
	struct extent *ex;
	u_long addr, size;
	int error;

	/* calcurate frame buffer size */
	size = (chip->vc_fbwidth * chip->vc_fbheight * chip->vc_fbdepth) /
	    NBBY;

	/* extent V-RAM region */
	ex = extent_create("Frame buffer address", fb_start, *fb_end,
	    (void *)ex_fixed, sizeof ex_fixed,
	    EX_NOWAIT);
	if (ex == 0)
		return (1);

	/* Allocate V-RAM area */
	error = extent_alloc_subregion(ex, fb_start, fb_start + size - 1,
	    size, TX3912_FRAMEBUFFER_ALIGNMENT,
	    TX3912_FRAMEBUFFER_BOUNDARY, EX_FAST|EX_NOWAIT, &addr);
	extent_destroy(ex);

	if (error != 0)
		return (1);

	chip->vc_fbpaddr = addr;
	chip->vc_fbvaddr = MIPS_PHYS_TO_KSEG1(addr);
	chip->vc_fbsize = size;
	
	*fb_end = addr + size;

	return (0);
}
Exemple #7
0
static int
_default_alloc(void *t, bus_addr_t rstart, bus_addr_t rend,
    bus_size_t size, bus_size_t alignment, bus_size_t boundary,
    int flags, bus_addr_t *bpap, bus_space_handle_t *bshp)
{
	struct playstation2_bus_space *pbs = t;
	struct extent *ex = pbs->pbs_extent;
	u_long bpa, base;
	int error;

	if (ex == 0) {
		*bshp = *bpap = rstart + pbs->pbs_base_addr;
		return (0);
	}

	base = ex->ex_start;

	error = extent_alloc_subregion(ex, rstart + base, rend + base, size,
	    alignment, boundary, 
	    EX_FAST | EX_NOWAIT | EX_MALLOCOK,
	    &bpa);

	if (error) {
		DPRINTF("failed.\n");
		return (error);
	}

	*bshp = (bus_space_handle_t)bpa;

	if (bpap)
		*bpap = bpa;

	DPRINTF("success.\n");

	return (0);
}
Exemple #8
0
int
arc_bus_space_alloc(bus_space_tag_t bst, bus_addr_t start, bus_addr_t end,
    bus_size_t size, bus_size_t align, bus_size_t boundary, int flags,
    bus_addr_t *addrp, bus_space_handle_t *bshp)
{
	u_long addr;
	int err;

	if (bst->bs_extent == NULL)
		panic("arc_bus_space_alloc: extent map %s not available",
		    bst->bs_name);

	if (start < bst->bs_start ||
	    start + size > bst->bs_start + bst->bs_size)
		return EINVAL;

	err = extent_alloc_subregion(bst->bs_extent, start, end, size,
	    align, boundary, EX_FAST | EX_NOWAIT | malloc_safe, &addr);
	if (err)
		return err;

	*addrp = addr;
	return bus_space_compose_handle(bst, addr, size, flags, bshp);
}
Exemple #9
0
void
ppb_alloc_resources(struct ppb_softc *sc, struct pci_attach_args *pa)
{
	pci_chipset_tag_t pc = sc->sc_pc;
	pcireg_t id, busdata, blr, bhlcr, type, csr;
	pcireg_t addr, mask;
	pcitag_t tag;
	int bus, dev;
	int reg, reg_start, reg_end, reg_rom;
	int io_count = 0;
	int mem_count = 0;
	bus_addr_t start, end;
	u_long base, size;

	if (pa->pa_memex == NULL)
		return;

	busdata = pci_conf_read(pc, sc->sc_tag, PPB_REG_BUSINFO);
	bus = PPB_BUSINFO_SECONDARY(busdata);
	if (bus == 0)
		return;

	/*
	 * Count number of devices.  If there are no devices behind
	 * this bridge, there's no point in allocating any address
	 * space.
	 */
	for (dev = 0; dev < pci_bus_maxdevs(pc, bus); dev++) {
		tag = pci_make_tag(pc, bus, dev, 0);
		id = pci_conf_read(pc, tag, PCI_ID_REG);

		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID ||
		    PCI_VENDOR(id) == 0)
			continue;

		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
		switch (PCI_HDRTYPE_TYPE(bhlcr)) {
		case 0:
			reg_start = PCI_MAPREG_START;
			reg_end = PCI_MAPREG_END;
			reg_rom = PCI_ROM_REG;
			break;
		case 1:	/* PCI-PCI bridge */
			reg_start = PCI_MAPREG_START;
			reg_end = PCI_MAPREG_PPB_END;
			reg_rom = 0;	/* 0x38 */
			break;
		case 2:	/* PCI-Cardbus bridge */
			reg_start = PCI_MAPREG_START;
			reg_end = PCI_MAPREG_PCB_END;
			reg_rom = 0;
			break;
		default:
			return;
		}

		for (reg = reg_start; reg < reg_end; reg += 4) {
			if (pci_mapreg_probe(pc, tag, reg, &type) == 0)
				continue;

			if (type == PCI_MAPREG_TYPE_IO)
				io_count++;
			else
				mem_count++;
		}

		if (reg_rom != 0) {
			addr = pci_conf_read(pc, tag, reg_rom);
			pci_conf_write(pc, tag, reg_rom, ~PCI_ROM_ENABLE);
			mask = pci_conf_read(pc, tag, reg_rom);
			pci_conf_write(pc, tag, reg_rom, addr);
			if (PCI_ROM_SIZE(mask))
				mem_count++;
		}
	}

	csr = pci_conf_read(pc, sc->sc_tag, PCI_COMMAND_STATUS_REG);

	/*
	 * Get the bridge in a consistent state.  If memory mapped I/O
	 * is disabled, disabled the associated windows as well.  
	 */
	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0) {
		pci_conf_write(pc, sc->sc_tag, PPB_REG_MEM, 0x0000ffff);
		pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFMEM, 0x0000ffff);
		pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFBASE_HI32, 0);
		pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFLIM_HI32, 0);
	}

	/* Allocate I/O address space if necessary. */
	if (io_count > 0 && pa->pa_ioex) {
		blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IOSTATUS);
		sc->sc_iobase = (blr << PPB_IO_SHIFT) & PPB_IO_MASK;
		sc->sc_iolimit = (blr & PPB_IO_MASK) | 0x00000fff;
		blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IO_HI);
		sc->sc_iobase |= (blr & 0x0000ffff) << 16;
		sc->sc_iolimit |= (blr & 0xffff0000);
		if (sc->sc_iolimit < sc->sc_iobase || sc->sc_iobase == 0) {
			start = max(PCI_IO_START, pa->pa_ioex->ex_start);
			end = min(PCI_IO_END, pa->pa_ioex->ex_end);
			for (size = 0x2000; size >= PPB_IO_MIN; size >>= 1)
				if (extent_alloc_subregion(pa->pa_ioex, start,
				    end, size, size, 0, 0, 0, &base) == 0)
					break;
			if (size >= PPB_IO_MIN) {
				sc->sc_iobase = base;
				sc->sc_iolimit = base + size - 1;
				blr = pci_conf_read(pc, sc->sc_tag,
				    PPB_REG_IOSTATUS);
				blr &= 0xffff0000;
				blr |= sc->sc_iolimit & PPB_IO_MASK;
				blr |= (sc->sc_iobase >> PPB_IO_SHIFT);
				pci_conf_write(pc, sc->sc_tag,
				    PPB_REG_IOSTATUS, blr);
				blr = (sc->sc_iobase & 0xffff0000) >> 16;
				blr |= sc->sc_iolimit & 0xffff0000;
				pci_conf_write(pc, sc->sc_tag,
				    PPB_REG_IO_HI, blr);

				csr |= PCI_COMMAND_IO_ENABLE;
			}
Exemple #10
0
/*
 * Load a dvmamap from an array of segs or an mlist (if the first
 * "segs" entry's mlist is non-null).  It calls iommu_dvmamap_load_segs()
 * or iommu_dvmamap_load_mlist() for part of the 2nd pass through the
 * mapping.  This is ugly.  A better solution would probably be to have
 * function pointers for implementing the traversal.  That way, there
 * could be one core load routine for each of the three required algorithms
 * (buffer, seg, and mlist).  That would also mean that the traversal
 * algorithm would then only need one implementation for each algorithm
 * instead of two (one for populating the iomap and one for populating
 * the dvma map).
 */
int
viommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{
	int i;
	int left;
	int err = 0;
	bus_size_t sgsize;
	bus_size_t boundary, align;
	u_long dvmaddr, sgstart, sgend;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load_raw: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load_raw: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/* Already in use?? */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load_raw: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = segs[0]._ds_boundary) == 0)
		boundary = map->_dm_boundary;

	align = MAX(segs[0]._ds_align, PAGE_SIZE);

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	iommu_iomap_clear_pages(ims);
	if (segs[0]._ds_mlist) {
		struct pglist *mlist = segs[0]._ds_mlist;
		struct vm_page *m;
		for (m = TAILQ_FIRST(mlist); m != NULL;
		    m = TAILQ_NEXT(m,pageq)) {
			err = iommu_iomap_insert_page(ims, VM_PAGE_TO_PHYS(m));

			if(err) {
				printf("iomap insert error: %d for "
				    "pa 0x%lx\n", err, VM_PAGE_TO_PHYS(m));
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	} else {
		/* Count up the total number of pages we need */
		for (i = 0, left = size; left > 0 && i < nsegs; i++) {
			bus_addr_t a, aend;
			bus_size_t len = segs[i].ds_len;
			bus_addr_t addr = segs[i].ds_addr;
			int seg_len = MIN(left, len);

			if (len < 1)
				continue;

			aend = round_page(addr + seg_len);
			for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {

				err = iommu_iomap_insert_page(ims, a);
				if (err) {
					printf("iomap insert error: %d for "
					    "pa 0x%llx\n", err, a);
					iommu_iomap_clear_pages(ims);
					return (EFBIG);
				}
			}

			left -= seg_len;
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

	if (err != 0) {
		iommu_iomap_clear_pages(ims);
		return (err);
	}

#ifdef DEBUG
	if (dvmaddr == (bus_addr_t)-1)	{ 
		printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) "
		    "failed!\n", (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#else
		panic("");
#endif
	}		
#endif	

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = size;

	viommu_iomap_load_map(is, ims, dvmaddr, flags);

	if (segs[0]._ds_mlist)
		err = viommu_dvmamap_load_mlist(t, is, map, segs[0]._ds_mlist,
		    flags, size, boundary);
	else
		err = viommu_dvmamap_load_seg(t, is, map, segs, nsegs,
		    flags, size, boundary);

	if (err)
		viommu_dvmamap_unload(t, t0, map);

	return (err);
}
Exemple #11
0
/*
 * Load a contiguous kva buffer into a dmamap.  The physical pages are
 * not assumed to be contiguous.  Two passes are made through the buffer
 * and both call pmap_extract() for the same va->pa translations.  It
 * is possible to run out of pa->dvma mappings; the code should be smart
 * enough to resize the iomap (when the "flags" permit allocation).  It
 * is trivial to compute the number of entries required (round the length
 * up to the page size and then divide by the page size)...
 */
int
viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    void *buf, bus_size_t buflen, struct proc *p, int flags)
{
	int err = 0;
	bus_size_t sgsize;
	u_long dvmaddr, sgstart, sgend;
	bus_size_t align, boundary;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;
	pmap_t pmap;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/*
		 * Is it still in use? _bus_dmamap_load should have taken care
		 * of this.
		 */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	if (buflen < 1 || buflen > map->_dm_size) {
		DPRINTF(IDB_BUSDMA,
		    ("iommu_dvmamap_load(): error %d > %d -- "
		     "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
		return (EINVAL);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
		boundary = map->_dm_boundary;
	align = MAX(map->dm_segs[0]._ds_align, PAGE_SIZE);

	pmap = p ? p->p_vmspace->vm_map.pmap : pmap_kernel();

	/* Count up the total number of pages we need */
	iommu_iomap_clear_pages(ims);
	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (bus_addr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			paddr_t pa;

			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}

			err = iommu_iomap_insert_page(ims, pa);
			if (err) {
				printf("iomap insert error: %d for "
				    "va 0x%llx pa 0x%lx "
				    "(buf %p len %lld/%llx)\n",
				    err, a, pa, buf, buflen, buflen);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

#ifdef DEBUG
	if (err || (dvmaddr == (bus_addr_t)-1))	{ 
		printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
		    (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#endif
	}		
#endif	
	if (err != 0) {
		iommu_iomap_clear_pages(ims);
		return (err);
	}

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = buflen;

	viommu_iomap_load_map(is, ims, dvmaddr, flags);

	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (bus_addr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			bus_addr_t pgstart;
			bus_addr_t pgend;
			paddr_t pa;
			int pglen;

			/* Yuck... Redoing the same pmap_extract... */
			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				err = EFBIG;
				break;
			}

			pgstart = pa | (MAX(a, addr) & PAGE_MASK);
			pgend = pa | (MIN(a + PAGE_SIZE - 1,
			    addr + seg_len - 1) & PAGE_MASK);
			pglen = pgend - pgstart + 1;

			if (pglen < 1)
				continue;

			err = viommu_dvmamap_append_range(t, map, pgstart,
			    pglen, flags, boundary);
			if (err == EFBIG)
				break;
			else if (err) {
				printf("iomap load seg page: %d for "
				    "va 0x%llx pa %lx (%llx - %llx) "
				    "for %d/0x%x\n",
				    err, a, pa, pgstart, pgend, pglen, pglen);
				break;
			}
		}
	}

	if (err)
		viommu_dvmamap_unload(t, t0, map);

	return (err);
}
Exemple #12
0
int
bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
    bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
    bus_addr_t *bpap, bus_space_handle_t *bshp)
{
	struct extent *ex;
	u_long bpa;
	int error;

	/*
	 * Pick the appropriate extent map.
	 */
	if (t == X86_BUS_SPACE_IO) {
		ex = ioport_ex;
	} else if (t == X86_BUS_SPACE_MEM)
		ex = iomem_ex;
	else
		panic("bus_space_alloc: bad bus space tag");

	/*
	 * Sanity check the allocation against the extent's boundaries.
	 */
	if (rstart < ex->ex_start || rend > ex->ex_end)
		panic("bus_space_alloc: bad region start/end");

	/*
	 * Do the requested allocation.
	 */
	error = extent_alloc_subregion(ex, rstart, rend, size, alignment,
	    0, boundary,
	    EX_FAST | EX_NOWAIT | (ioport_malloc_safe ?  EX_MALLOCOK : 0),
	    &bpa);

	if (error)
		return (error);

	/*
	 * For I/O space, that's all she wrote.
	 */
	if (t == X86_BUS_SPACE_IO) {
		*bshp = *bpap = bpa;
		return (0);
	}

	/*
	 * For memory space, map the bus physical address to
	 * a kernel virtual address.
	 */
	error = x86_mem_add_mapping(bpa, size, flags, bshp);
	if (error) {
		if (extent_free(iomem_ex, bpa, size, EX_NOWAIT |
		    (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
			printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n",
			    bpa, size);
			printf("bus_space_alloc: can't free region\n");
		}
	}

	*bpap = bpa;

	return (error);
}
Exemple #13
0
/**
 * radeon_driver_load_kms - Main load function for KMS.
 *
 * @dev: drm dev pointer
 * @flags: device flags
 *
 * This is the main load function for KMS (all asics).
 * It calls radeon_device_init() to set up the non-display
 * parts of the chip (asic init, CP, writeback, etc.), and
 * radeon_modeset_init() to set up the display parts
 * (crtcs, encoders, hotplug detect, etc.).
 * Returns 0 on success, error on failure.
 */
void
radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
{
	struct radeon_device	*rdev = (struct radeon_device *)self;
	struct drm_device	*dev;
	struct pci_attach_args	*pa = aux;
	const struct drm_pcidev *id_entry;
	int			 is_agp;
	pcireg_t		 type;
	uint8_t			 iobar;
#if !defined(__sparc64__)
	pcireg_t		 addr, mask;
	int			 s;
#endif

#if defined(__sparc64__) || defined(__macppc__)
	extern int fbnode;
#endif

	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
	    PCI_PRODUCT(pa->pa_id), radeondrm_pciidlist);
	rdev->flags = id_entry->driver_data;
	rdev->pc = pa->pa_pc;
	rdev->pa_tag = pa->pa_tag;
	rdev->iot = pa->pa_iot;
	rdev->memt = pa->pa_memt;
	rdev->dmat = pa->pa_dmat;

#if defined(__sparc64__) || defined(__macppc__)
	if (fbnode == PCITAG_NODE(rdev->pa_tag))
		rdev->console = 1;
#else
	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
	    (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
	    == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) {
		rdev->console = 1;
#if NVGA > 0
		vga_console_attached = 1;
#endif
	}
#if NEFIFB > 0
	if (efifb_is_console(pa)) {
		rdev->console = 1;
		efifb_cndetach();
	}
#endif
#endif

#define RADEON_PCI_MEM		0x10
#define RADEON_PCI_IO		0x14
#define RADEON_PCI_MMIO		0x18
#define RADEON_PCI_IO2		0x20

	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM);
	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
	    pci_mapreg_info(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM,
	    type, &rdev->fb_aper_offset, &rdev->fb_aper_size, NULL)) {
		printf(": can't get frambuffer info\n");
		return;
	}

	if (PCI_MAPREG_MEM_TYPE(type) != PCI_MAPREG_MEM_TYPE_64BIT)
		iobar = RADEON_PCI_IO;
	else
		iobar = RADEON_PCI_IO2;
	
	if (pci_mapreg_map(pa, iobar, PCI_MAPREG_TYPE_IO, 0,
	    NULL, &rdev->rio_mem, NULL, &rdev->rio_mem_size, 0)) {
		printf(": can't map IO space\n");
		return;
	}

	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MMIO);
	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
	    pci_mapreg_map(pa, RADEON_PCI_MMIO, type, 0, NULL,
	    &rdev->rmmio, &rdev->rmmio_base, &rdev->rmmio_size, 0)) {
		printf(": can't map mmio space\n");
		return;
	}

#if !defined(__sparc64__)
	/*
	 * Make sure we have a base address for the ROM such that we
	 * can map it later.
	 */
	s = splhigh();
	addr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
	mask = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, addr);
	splx(s);

	if (addr == 0 && PCI_ROM_SIZE(mask) != 0 && pa->pa_memex) {
		bus_size_t size, start, end;
		bus_addr_t base;

		size = PCI_ROM_SIZE(mask);
		start = max(PCI_MEM_START, pa->pa_memex->ex_start);
		end = min(PCI_MEM_END, pa->pa_memex->ex_end);
		if (extent_alloc_subregion(pa->pa_memex, start, end, size,
		    size, 0, 0, 0, &base) == 0)
			pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, base);
	}
#endif

#ifdef notyet
	mtx_init(&rdev->swi_lock, IPL_TTY);
#endif

	/* update BUS flag */
	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, NULL, NULL)) {
		rdev->flags |= RADEON_IS_AGP;
	} else if (pci_get_capability(pa->pa_pc, pa->pa_tag,
	    PCI_CAP_PCIEXPRESS, NULL, NULL)) {
		rdev->flags |= RADEON_IS_PCIE;
	} else {
		rdev->flags |= RADEON_IS_PCI;
	}

	DRM_DEBUG("%s card detected\n",
		 ((rdev->flags & RADEON_IS_AGP) ? "AGP" :
		 (((rdev->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));

	is_agp = pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
	    NULL, NULL);

	printf("\n");

	kms_driver.num_ioctls = radeon_max_kms_ioctl;

	dev = (struct drm_device *)drm_attach_pci(&kms_driver, pa, is_agp,
	    rdev->console, self);
	rdev->ddev = dev;
	rdev->pdev = dev->pdev;

	rdev->family = rdev->flags & RADEON_FAMILY_MASK;
	if (!radeon_msi_ok(rdev))
		pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;

	rdev->msi_enabled = 0;
	if (pci_intr_map_msi(pa, &rdev->intrh) == 0)
		rdev->msi_enabled = 1;
	else if (pci_intr_map(pa, &rdev->intrh) != 0) {
		printf(": couldn't map interrupt\n");
		return;
	}
	printf("%s: %s\n", rdev->dev.dv_xname,
	    pci_intr_string(pa->pa_pc, rdev->intrh));

	rdev->irqh = pci_intr_establish(pa->pa_pc, rdev->intrh, IPL_TTY,
	    radeon_driver_irq_handler_kms, rdev->ddev, rdev->dev.dv_xname);
	if (rdev->irqh == NULL) {
		printf("%s: couldn't establish interrupt\n",
		    rdev->dev.dv_xname);
		return;
	}

#ifdef __sparc64__
{
	struct rasops_info *ri;
	int node, console;

	node = PCITAG_NODE(pa->pa_tag);
	console = (fbnode == node);

	fb_setsize(&rdev->sf, 8, 1152, 900, node, 0);

	/*
	 * The firmware sets up the framebuffer such that at starts at
	 * an offset from the start of video memory.
	 */
	rdev->fb_offset =
	    bus_space_read_4(rdev->memt, rdev->rmmio, RADEON_CRTC_OFFSET);
	if (bus_space_map(rdev->memt, rdev->fb_aper_offset + rdev->fb_offset,
	    rdev->sf.sf_fbsize, BUS_SPACE_MAP_LINEAR, &rdev->memh)) {
		printf("%s: can't map video memory\n", rdev->dev.dv_xname);
		return;
	}

	ri = &rdev->sf.sf_ro;
	ri->ri_bits = bus_space_vaddr(rdev->memt, rdev->memh);
	ri->ri_hw = rdev;
	ri->ri_updatecursor = NULL;

	fbwscons_init(&rdev->sf, RI_VCONS | RI_WRONLY | RI_BSWAP, console);
	if (console)
		fbwscons_console_init(&rdev->sf, -1);
}
#endif

	rdev->shutdown = true;
	config_mountroot(self, radeondrm_attachhook);
}
int
__BS(alloc)(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
    bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp,
    bus_space_handle_t *bshp)
{
#ifdef CHIP_EXTENT
	struct mips_bus_space_translation mbst;
	bus_addr_t addr;
	int error;
#if CHIP_ALIGN_STRIDE != 0
	int linear = flags & BUS_SPACE_MAP_LINEAR;

	/*
	 * Can't map xxx space linearly.
	 */
	if (linear)
		return (EOPNOTSUPP);
#endif

	/*
	 * Do the requested allocation.
	 */
#ifdef EXTENT_DEBUG
	printf("xxx: allocating from 0x%lx to 0x%lx\n", rstart, rend);
#endif
	error = extent_alloc_subregion(CHIP_EXTENT(v), rstart, rend, size,
	    align, boundary,
	    EX_FAST | EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0),
	    &addr);
	if (error) {
#ifdef EXTENT_DEBUG
		printf("xxx: allocation failed (%d)\n", error);
		extent_print(CHIP_EXTENT(v));
#endif
		return (error);
	}

#ifdef EXTENT_DEBUG
	printf("xxx: allocated 0x%lx to 0x%lx\n", addr, addr + size - 1);
#endif

	error = __BS(translate)(v, addr, size, flags, &mbst);
	if (error) {
		(void) extent_free(CHIP_EXTENT(v), addr, size,
		    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
		return (error);
	}

	*addrp = addr;
	if (flags & BUS_SPACE_MAP_CACHEABLE)
		*bshp = MIPS_PHYS_TO_KSEG0(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
	else
		*bshp = MIPS_PHYS_TO_KSEG1(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));

	return (0);
#else /* ! CHIP_EXTENT */
	return (EOPNOTSUPP);
#endif /* CHIP_EXTENT */
}