Beispiel #1
0
/*
 * Given a range of kernel virtual space, remap all the
 * pages found there into the DVMA space (dup mappings).
 * This IS safe to call at interrupt time.
 * (Typically called at SPLBIO)
 */
void *
dvma_mapin(void *kva, int len, int canwait /* ignored */)
{
	vaddr_t seg_kva, seg_dma;
	vsize_t seg_len, seg_off;
	vaddr_t v, x;
	int s, sme, error;

	/* Get seg-aligned address and length. */
	seg_kva = (vaddr_t)kva;
	seg_len = (vsize_t)len;
	seg_off = seg_kva & SEGOFSET;
	seg_kva -= seg_off;
	seg_len = m68k_round_seg(seg_len + seg_off);

	s = splvm();

	/* Allocate the DVMA segment(s) */

	error = extent_alloc(dvma_extent, seg_len, NBSG, 0,
	    EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma);
	if (error) {
		splx(s);
		return NULL;
	}

#ifdef	DIAGNOSTIC
	if (seg_dma & SEGOFSET)
		panic("dvma_mapin: seg not aligned");
#endif

	/* Duplicate the mappings into DMA space. */
	v = seg_kva;
	x = seg_dma;
	while (seg_len > 0) {
		sme = get_segmap(v);
#ifdef	DIAGNOSTIC
		if (sme == SEGINV)
			panic("dvma_mapin: seg not mapped");
#endif
#ifdef	HAVECACHE
		/* flush write-back on old mappings */
		if (cache_size)
			cache_flush_segment(v);
#endif
		set_segmap_allctx(x, sme);
		v += NBSG;
		x += NBSG;
		seg_len -= NBSG;
	}
	seg_dma += seg_off;

	splx(s);
	return (void *)seg_dma;
}
Beispiel #2
0
/* ARGSUSED */
int
bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	vaddr_t kva;
	vsize_t offset;
	int error;

	if (t->bustype == HP300_BUS_SPACE_INTIO) {
		/*
		 * Intio space is direct-mapped in pmap_bootstrap(); just
		 * do the translation.
		 */
		*bshp = (bus_space_handle_t)IIOV(INTIOBASE + bpa);
		return 0;
	}

	if (t->bustype != HP300_BUS_SPACE_DIO &&
	    t->bustype != HP300_BUS_SPACE_SGC)
		panic("%s: bad space tag", __func__);

	/*
	 * Allocate virtual address space from the extio extent map.
	 */
	offset = m68k_page_offset(bpa);
	size = m68k_round_page(offset + size);
	error = extent_alloc(extio_ex, size, PAGE_SIZE, 0,
	    EX_FAST | EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0),
	    &kva);
	if (error)
		return error;

	/*
	 * Map the range.  The range is always cache-inhibited on the hp300.
	 */
	physaccess((void *)kva, (void *)bpa, size, PG_RW|PG_CI);

	/*
	 * All done.
	 */
	*bshp = (bus_space_handle_t)(kva + offset);
	return 0;
}
Beispiel #3
0
/*
 * Allocate/deallocate a cache-inhibited range of kernel virtual address
 * space mapping the indicated physical address range [pa - pa+size)
 */
void *
iomap(void *pa, int size)
{
	u_long kva;
	int error;

#ifdef DEBUG
	if (((int)pa & PGOFSET) || (size & PGOFSET))
		panic("iomap: unaligned");
#endif

	error = extent_alloc(extio_ex, size, PAGE_SIZE, 0,
	    EX_FAST | EX_NOWAIT | (extio_ex_malloc_safe ? EX_MALLOCOK : 0),
	    &kva);
	if (error)
		return 0;

	physaccess((void *) kva, pa, size, PG_RW|PG_CI);
	return (void *)kva;
}
int
b3_617_map_vme(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasizes, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
{
	vme_addr_t vmebase, vmeend, va;
	unsigned long maplen, first, i;
	u_int32_t mapreg;
	bus_addr_t pcibase;
	int res;
	struct b3_617_vmeresc *r;

	/* first mapped address */
	vmebase = vmeaddr & ~(VME_PAGESIZE - 1);
	/* base of last mapped page */
	vmeend = (vmeaddr + len - 1) & ~(VME_PAGESIZE - 1);
	/* bytes in scatter table required */
	maplen = ((vmeend - vmebase) / VME_PAGESIZE + 1) * 4;

	if (extent_alloc(sc->vmeext, maplen, 4, 0, EX_FAST, &first))
		return (ENOMEM);

	/*
	 * set up adapter mapping registers
	 */
	mapreg = (am << MR_AMOD_SHIFT) | MR_FC_RRAM | swap;

	for (i = first, va = vmebase;
	     i < first + maplen;
	     i += 4, va += VME_PAGESIZE) {
		write_mapmem(sc, i, mapreg | va);
#ifdef BIT3DEBUG
		printf("mapreg@%lx=%x\n", i, read_mapmem(sc, i));
#endif
	}

#ifdef DIAGNOSTIC
	if (va != vmeend + VME_PAGESIZE)
		panic("b3_617_map_pci_vme: botch");
#endif
	/*
	 * map needed range in PCI space
	 */
	pcibase = sc->vmepbase + (first - MR_PCI_VME) / 4 * VME_PAGESIZE
	    + (vmeaddr & (VME_PAGESIZE - 1));

	if ((res = bus_space_map(sc->sc_vmet, pcibase, len, 0, handle))) {
		for (i = first; i < first + maplen; i += 4)
			write_mapmem(sc, i, MR_RAM_INVALID);
		extent_free(sc->vmeext, first, maplen, 0);
		return (res);
	}

	*tag = sc->sc_vmet;

	/*
	 * save all data needed for later unmapping
	 */
	r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT); /* XXX check! */
	r->handle = *handle;
	r->len = len;
	r->firstpage = first;
	r->maplen = maplen;
	*resc = r;
	return (0);
}
Beispiel #5
0
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	vaddr_t kva, dva;
	vsize_t off, sgsize;
	paddr_t pa;
	pmap_t pmap;
	int error, rv, s;

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;
	map->dm_mapsize = 0;

	if (buflen > map->_dm_size)
		return EINVAL;

	kva = (vaddr_t)buf;
	off = kva & PGOFSET;
	sgsize = round_page(off + buflen);

	/* Try to allocate DVMA space. */
	s = splvm();
	error = extent_alloc(dvma_extent, sgsize, PAGE_SIZE, 0,
	    EX_FAST | ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT),
	    &dva);
	splx(s);
	if (error)
		return ENOMEM;

	/* Fill in the segment. */
	map->dm_segs[0].ds_addr = dva + off;
	map->dm_segs[0].ds_len = buflen;
	map->dm_segs[0]._ds_va = dva;
	map->dm_segs[0]._ds_sgsize = sgsize;

	/*
	 * Now map the DVMA addresses we allocated to point to the
	 * pages of the caller's buffer.
	 */
	if (p != NULL)
		pmap = p->p_vmspace->vm_map.pmap;
	else
		pmap = pmap_kernel();

	while (sgsize > 0) {
		rv = pmap_extract(pmap, kva, &pa);
#ifdef DIAGNOSTIC
		if (rv == false)
			panic("%s: unmapped VA", __func__);
#endif
		pmap_enter(pmap_kernel(), dva, pa | PMAP_NC,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		kva += PAGE_SIZE;
		dva += PAGE_SIZE;
		sgsize -= PAGE_SIZE;
	}

	map->dm_nsegs = 1;
	map->dm_mapsize = map->dm_segs[0].ds_len;

	return 0;
}
Beispiel #6
0
/*
 * Check the MCHBAR on the host bridge is enabled, and if not allocate it.
 * we do not need to actually map it because we access the bar through it's
 * mirror on the IGD, however, if it is disabled or not allocated then
 * the mirror does not work. *sigh*.
 *
 * we return a trinary state:
 * 0 = already enabled, or can not enable
 * 1 = enabled, needs disable
 * 2 = enabled, needs disable and free.
 */
int
intel_setup_mchbar(struct inteldrm_softc *dev_priv,
    struct pci_attach_args *bpa)
{
	struct drm_device	*dev = (struct drm_device *)dev_priv->drmdev;
	u_int64_t		 mchbar_addr;
	pcireg_t		 tmp, low, high = 0;
	u_long			 addr;
	int			 reg, ret = 1, enabled = 0;

	reg = INTEL_INFO(dev)->gen >= 4 ?  MCHBAR_I965 : MCHBAR_I915;

	if (IS_I915G(dev) || IS_I915GM(dev)) {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, DEVEN_REG);
		enabled = !!(tmp & DEVEN_MCHBAR_EN);
	} else {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
		enabled = tmp & 1;
	}

	if (enabled) {
		return (0);
	}

	if (INTEL_INFO(dev)->gen >= 4)
		high = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg + 4);
	low = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
	mchbar_addr = ((u_int64_t)high << 32) | low;

	/*
	 * XXX need to check to see if it's allocated in the pci resources,
	 * right now we just check to see if there's any address there
	 *
	 * if there's no address, then we allocate one.
	 * note that we can't just use pci_mapreg_map here since some intel
	 * BARs are special in that they set bit 0 to show they're enabled,
	 * this is not handled by generic pci code.
	 */
	if (mchbar_addr == 0) {
		addr = (u_long)mchbar_addr;
		if (bpa->pa_memex == NULL || extent_alloc(bpa->pa_memex,
	            MCHBAR_SIZE, MCHBAR_SIZE, 0, 0, 0, &addr)) {
			return (0); /* just say we don't need to disable */
		} else {
			mchbar_addr = addr;
			ret = 2;
			/* We've allocated it, now fill in the BAR again */
			if (INTEL_INFO(dev)->gen >= 4)
				pci_conf_write(bpa->pa_pc, bpa->pa_tag,
				    reg + 4, upper_32_bits(mchbar_addr));
			pci_conf_write(bpa->pa_pc, bpa->pa_tag,
			    reg, mchbar_addr & 0xffffffff);
		}
	}
	/* set the enable bit */
	if (IS_I915G(dev) || IS_I915GM(dev)) {
		pci_conf_write(bpa->pa_pc, bpa->pa_tag, DEVEN_REG,
		    tmp | DEVEN_MCHBAR_EN);
	} else {
		tmp = pci_conf_read(bpa->pa_pc, bpa->pa_tag, reg);
		pci_conf_write(bpa->pa_pc, bpa->pa_tag, reg, tmp | 1);
	}

	return (ret);
}