Exemple #1
0
/*
 * Match for SCSI devices on the onboard and GIO32 adapter WD33C93 chips
 */
int
wdsc_match(struct device *parent, void *vcf, void *aux)
{
	struct hpc_attach_args *haa = aux;
	struct cfdata *cf = vcf;
	vaddr_t reset, asr;
	uint32_t dummy;
	uint8_t reg;

	if (strcmp(haa->ha_name, cf->cf_driver->cd_name) != 0)
		return 0;

	reset = PHYS_TO_XKPHYS(haa->ha_sh + haa->ha_dmaoff +
	    haa->hpc_regs->scsi0_ctl, CCA_NC);
	if (guarded_read_4(reset, &dummy) != 0)
		return 0;
	*(volatile uint32_t *)reset = haa->hpc_regs->scsi_dmactl_reset;
	delay(1000);
	*(volatile uint32_t *)reset = 0x0;
	delay(1000);

	asr = PHYS_TO_XKPHYS(haa->ha_sh + haa->ha_devoff + 3, CCA_NC);
	if (guarded_read_1(asr, &reg) != 0)
		return 0;
	if ((reg & 0xff) != SBIC_ASR_INT)
		return 0;

	return 1;
}
Exemple #2
0
int
mace_space_map(bus_space_tag_t t, bus_addr_t offs, bus_size_t size,
    int flags, bus_space_handle_t *bshp)
{
	if (ISSET(flags, BUS_SPACE_MAP_CACHEABLE))
		offs +=
		    PHYS_TO_XKPHYS(0, CCA_CACHED) - PHYS_TO_XKPHYS(0, CCA_NC);
	*bshp = t->bus_base + offs;
	return 0;
}
paddr_t
octeon_pcibus_device_to_pa(bus_addr_t addr)
{
	printf("%s:%d: addr=%p\n", __func__, __LINE__, addr);

	return PHYS_TO_XKPHYS(addr, CCA_NC);
}
Exemple #4
0
int
octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m)
{
	paddr_t gaddr = 0;
	uint64_t *gbuf = NULL;
	int result = 0, error;

	gaddr = cn30xxfpa_buf_get_paddr(octeon_eth_fb_sg);
	if (gaddr == 0) {
		log(LOG_WARNING,
		    "%s: cannot allocate gather buffer from free pool allocator\n",
		    sc->sc_dev.dv_xname);
		result = 1;
		goto done;
	}

	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED);

	error = octeon_eth_send_buf(sc, m, gbuf);
	if (error != 0) {
		/* already logging */
		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, gaddr);
		result = error;
		goto done;
	}

	octeon_eth_send_queue_add(sc, m, gbuf);

done:
	return result;
}
paddr_t
octeon_pcibus_device_to_pa(bus_addr_t addr)
{
    OCTEON_PCIDEBUG(("%s:%d: addr=%lx\n", __func__, __LINE__, addr));

    return PHYS_TO_XKPHYS(addr, CCA_NC);
}
Exemple #6
0
int
gio_search(struct device *parent, void *vcf, void *aux)
{
	struct gio_softc *sc = (struct gio_softc *)parent;
	struct cfdata *cf = (struct cfdata *)vcf;
	struct gio_attach_args ga;

	/* Handled by direct configuration, so skip here */
	if (cf->cf_loc[1 /*GIOCF_ADDR*/] == -1)
		return 0;

	ga.ga_addr = (uint64_t)cf->cf_loc[1 /*GIOCF_ADDR*/];
	ga.ga_iot = sc->sc_iot;
	ga.ga_ioh = PHYS_TO_XKPHYS(ga.ga_addr, CCA_NC);
	ga.ga_dmat = sc->sc_dmat;
	ga.ga_slot = cf->cf_loc[0 /*GIOCF_SLOT*/];
	ga.ga_product = -1;
	ga.ga_descr = NULL;

	if ((*cf->cf_attach->ca_match)(parent, cf, &ga) == 0)
		return 0;

	config_attach(parent, cf, &ga, gio_print);

	return 1;
}
Exemple #7
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	void *data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
/*
 * Check for an R4000 end-of-page errata condition in an executable code page.
 * Returns a bitmask to set in the given page pg_flags.
 */
u_int
eop_page_check(paddr_t pa)
{
	uint32_t insn;

	insn = *(uint32_t *)PHYS_TO_XKPHYS(pa + PAGE_SIZE - 4, CCA_CACHED);
	if (classify_insn(insn) != INSNCLASS_NEUTRAL)
		return PGF_EOP_VULN;

	return 0;
}
Exemple #9
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & BUS_DMA_COHERENT)
			*kvap = (caddr_t)PHYS_TO_UNCACHED(pa);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			if (flags & BUS_DMA_COHERENT)
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Exemple #10
0
int
giofb_cnattach()
{
	struct gio_attach_args ga;

	ga.ga_addr = giofb_consaddr;
	ga.ga_iot = &imcbus_tag;
	ga.ga_ioh = PHYS_TO_XKPHYS(ga.ga_addr, CCA_NC);
	ga.ga_dmat = &imc_bus_dma_tag;
	ga.ga_slot = -1;
	ga.ga_product = giofb_consid;
	ga.ga_descr = NULL;

	switch (giofb_consid) {
	default:
#if NIMPACT_GIO > 0
		if (GIO_PRODUCT_32BIT_ID(giofb_consid) &&
		    GIO_PRODUCT_PRODUCTID(giofb_consid) == GIO_PRODUCT_IMPACT) {
			if (impact_gio_cnattach(&ga) == 0)
				return 0;
		}
#endif
		break;
	case GIO_PRODUCT_FAKEID_GRTWO:
#if NGRTWO > 0
		if (grtwo_cnattach(&ga) == 0)
			return 0;
#endif
		break;
	case GIO_PRODUCT_FAKEID_LIGHT:
#if NLIGHT > 0
		if (light_cnattach(&ga) == 0)
			return 0;
#endif
		break;
	case GIO_PRODUCT_FAKEID_NEWPORT:
#if NNEWPORT > 0
		if (newport_cnattach(&ga) == 0)
			return 0;
#endif
		break;
	}

	giofb_consaddr = 0;
	return ENXIO;
}
Exemple #11
0
int
octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
    uint64_t pko_cmd_w1)
{
	uint64_t *cmdptr;
	int result = 0;

	cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
	cmdptr += sc->sc_cmdptr.cmdptr_idx;

	OCTEON_ETH_KASSERT(cmdptr != NULL);

	*cmdptr++ = pko_cmd_w0;
	*cmdptr++ = pko_cmd_w1;

	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);

	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
		paddr_t buf;

		buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd);
		if (buf == 0) {
			log(LOG_WARNING,
			    "%s: cannot allocate command buffer from free pool allocator\n",
			    sc->sc_dev.dv_xname);
			result = 1;
			goto done;
		}
		*cmdptr++ = buf;
		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
		sc->sc_cmdptr.cmdptr_idx = 0;
	} else {
		sc->sc_cmdptr.cmdptr_idx += 2;
	}

	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);

done:
	return result;
}
Exemple #12
0
int
smfb_cnattach()
{
	long defattr;
	struct rasops_info *ri;
	vaddr_t fbbase;
	int rc;
	extern paddr_t loongson_pci_base;

	/* XXX hardwired fbmem address */
	fbbase = PHYS_TO_XKPHYS(loongson_pci_base + 0x14000000, CCA_NC);

	rc = smfb_setup(&smfbcn, fbbase);
	if (rc != 0)
		return rc;

	ri = &smfbcn.ri;
	ri->ri_ops.alloc_attr(ri, 0, 0, 0, &defattr);
	wsdisplay_cnattach(&smfbcn.wsd, ri, 0, 0, defattr);

	return 0;
}
Exemple #13
0
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	boolean_t allowed;
	int error = 0;
	size_t c;
	vaddr_t v;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

/* minor device 0 is physical memory */
		case 0:
			v = uio->uio_offset;
			c = iov->iov_len;
			if (v + c < v || v + c > ptoa((psize_t)physmem))
				return (EFAULT);
			v = (vaddr_t)PHYS_TO_XKPHYS(v, CCA_NONCOHERENT);
			error = uiomove((caddr_t)v, c, uio);
			continue;

/* minor device 1 is kernel memory */
		case 1:
			v = uio->uio_offset;
			c = ulmin(iov->iov_len, MAXPHYS);

			/* Allow access to RAM through XKPHYS... */
			if (IS_XKPHYS(v))
				allowed = is_memory_range(XKPHYS_TO_PHYS(v),
				    (psize_t)c, 0);
			/* ...or through CKSEG0... */
			else if (v >= CKSEG0_BASE &&
			    v < CKSEG0_BASE + CKSEG_SIZE)
				allowed = is_memory_range(CKSEG0_TO_PHYS(v),
				    (psize_t)c, CKSEG_SIZE);
			/* ...or through CKSEG1... */
			else if (v >= CKSEG1_BASE &&
			    v < CKSEG1_BASE + CKSEG_SIZE)
				allowed = is_memory_range(CKSEG1_TO_PHYS(v),
				    (psize_t)c, CKSEG_SIZE);
			/* ...otherwise, check it's within kernel kvm limits. */
			else
				allowed = uvm_kernacc((caddr_t)v, c,
				    uio->uio_rw == UIO_READ ? B_READ : B_WRITE);

			if (allowed) {
				error = uiomove((caddr_t)v, c, uio);
				continue;
			} else {
				return (EFAULT);
			}

/* minor device 2 is EOF/RATHOLE */
		case 2:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return (0);

/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
		case 12:
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			if (zeropage == NULL)
				zeropage = malloc(PAGE_SIZE, M_TEMP,
				    M_WAITOK | M_ZERO);
			c = ulmin(iov->iov_len, PAGE_SIZE);
			error = uiomove(zeropage, c, uio);
			continue;

		default:
			return (ENODEV);
		}
		if (error)
			break;
		iov->iov_base += c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	return error;
}
Exemple #14
0
void
crime_setintrmask(int level)
{
	*(volatile uint64_t *)(PHYS_TO_XKPHYS(CRIMEBUS_BASE, CCA_NC) +
	    CRIME_INT_MASK) = crime_intem & ~crime_imask[level];
}
Exemple #15
0
void	 mace_space_barrier(bus_space_tag_t, bus_space_handle_t, bus_size_t,
	    bus_size_t, int);

bus_addr_t macebus_pa_to_device(paddr_t);
paddr_t	 macebus_device_to_pa(bus_addr_t);

struct cfattach macebus_ca = {
	sizeof(struct device), macebusmatch, macebusattach
};

struct cfdriver macebus_cd = {
	NULL, "macebus", DV_DULL
};

bus_space_t macebus_tag = {
	PHYS_TO_XKPHYS(MACEBUS_BASE, CCA_NC),
	NULL,
	mace_read_1, mace_write_1,
	mace_read_2, mace_write_2,
	mace_read_4, mace_write_4,
	mace_read_8, mace_write_8,
	mace_read_raw_2, mace_write_raw_2,
	mace_read_raw_4, mace_write_raw_4,
	mace_read_raw_8, mace_write_raw_8,
	mace_space_map, mace_space_unmap, mace_space_region,
	mace_space_vaddr, mace_space_barrier
};

bus_space_t crimebus_tag = {
	PHYS_TO_XKPHYS(CRIMEBUS_BASE, CCA_NC),
	NULL,
Exemple #16
0
void	*obio_space_vaddr(bus_space_tag_t, bus_space_handle_t);

bus_addr_t obio_pa_to_device(paddr_t);
paddr_t	 obio_device_to_pa(bus_addr_t);

struct cfattach obio_ca = {
	sizeof(struct device), obiomatch, obioattach
};

struct cfdriver obio_cd = {
	NULL, "obio", DV_DULL
};

bus_space_t obio_tag = {
	PHYS_TO_XKPHYS(0, CCA_NC),
	NULL,
	obio_read_1, obio_write_1,
	obio_read_2, obio_write_2,
	obio_read_4, obio_write_4,
	obio_read_8, obio_write_8,
	obio_read_raw_2, obio_write_raw_2,
	obio_read_raw_4, obio_write_raw_4,
	obio_read_raw_8, obio_write_raw_8,
	obio_space_map, obio_space_unmap, obio_space_region,
	obio_space_vaddr
};

bus_space_handle_t obio_h;

struct machine_bus_dma_tag obio_bus_dma_tag = {
Exemple #17
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	caddr_t data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		/*
		 * If the packet is IP, the hardware has padded it so that the
		 * IP source address starts on the next 64-bit word boundary.
		 */
		data = (caddr_t)&work[4] + ETHER_ALIGN;
		if (!ISSET(word2, PIP_WQE_WORD2_IP_NI) &&
		    !ISSET(word2, PIP_WQE_WORD2_IP_V6))
			data += 4;
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
	._pa_to_device =	octeon_pcibus_pa_to_device,
	._device_to_pa =	octeon_pcibus_device_to_pa
};

int octeon_pcibus_io_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
    bus_space_handle_t *);
int octeon_pcibus_mem_map(bus_space_tag_t, bus_addr_t, bus_size_t, int,
    bus_space_handle_t *);

#define _OCTEON_PCIBUS_PCIIO_BASE	0x00001000
#define _OCTEON_PCIBUS_PCIIO_SIZE	0x08000000
#define _OCTEON_PCIBUS_PCIMEM_BASE	0x80000000
#define _OCTEON_PCIBUS_PCIMEM_SIZE	0x40000000

struct mips_bus_space octeon_pcibus_pci_io_space_tag = {
	.bus_base = PHYS_TO_XKPHYS(_OCTEON_PCIBUS_PCIIO_BASE, CCA_NC),
	.bus_private = NULL,
	._space_read_1 =	generic_space_read_1,
	._space_write_1 =	generic_space_write_1,
	._space_read_2 =	generic_space_read_2,
	._space_write_2 =	generic_space_write_2,
	._space_read_4 =	generic_space_read_4,
	._space_write_4 =	generic_space_write_4,
	._space_read_8 =	generic_space_read_8,
	._space_write_8 =	generic_space_write_8,
	._space_read_raw_2 =	generic_space_read_raw_2,
	._space_write_raw_2 =	generic_space_write_raw_2,
	._space_read_raw_4 =	generic_space_read_raw_4,
	._space_write_raw_4 =	generic_space_write_raw_4,
	._space_read_raw_8 =	generic_space_read_raw_8,
	._space_write_raw_8 =	generic_space_write_raw_8,
Exemple #19
0
	sizeof(struct device), imc_match, imc_attach
};

struct cfdriver imc_cd = {
	NULL, "imc", DV_DULL
};

uint32_t imc_bus_error(uint32_t, struct trap_frame *);
int	 imc_watchdog_cb(void *, int);

void	 imc_space_barrier(bus_space_tag_t, bus_space_handle_t, bus_size_t,
	    bus_size_t, int);

/* can't be static for gio_cnattach() */
bus_space_t imcbus_tag = {
	PHYS_TO_XKPHYS(0, CCA_NC),
	NULL,
	imc_read_1, imc_write_1,
	imc_read_2, imc_write_2,
	imc_read_4, imc_write_4,
	imc_read_8, imc_write_8,
	imc_read_raw_2, imc_write_raw_2,
	imc_read_raw_4, imc_write_raw_4,
	imc_read_raw_8, imc_write_raw_8,
	imc_space_map, imc_space_unmap, imc_space_region,
	imc_space_vaddr, imc_space_barrier
};

#if NEISA > 0
void	 imc_eisa_read_raw_2(bus_space_tag_t, bus_space_handle_t, bus_addr_t,
	    uint8_t *, bus_size_t);
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;
	const struct kmem_dyn_mode *kd;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did 
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached. 
			 * If there are no multiple mappings of that 
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) 
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PGF_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Exemple #21
0
int
giofb_cnprobe()
{
	struct gio_attach_args ga;
	uint32_t id;
	int i;
	int sys_type;

	switch (sys_config.system_type) {
	case SGI_IP20:
		sys_type = SGI_IP20;
		break;
	default:
	case SGI_IP22:
	case SGI_IP26:
	case SGI_IP28:
		sys_type = SGI_IP22;
		break;
	}

	for (i = 0; gfx_bases[i].base != 0; i++) {
		if (giofb_consaddr != 0 &&
		    gfx_bases[i].base != giofb_consaddr)
			continue;

		/* skip bases that don't apply to us */
		if (gfx_bases[i].mach_type != sys_type)
			continue;

		if (gfx_bases[i].mach_subtype != -1 &&
		    gfx_bases[i].mach_subtype != sys_config.system_subtype)
			continue;

		ga.ga_addr = gfx_bases[i].base;
		ga.ga_iot = &imcbus_tag;
		ga.ga_ioh = PHYS_TO_XKPHYS(ga.ga_addr, CCA_NC);
		ga.ga_dmat = &imc_bus_dma_tag;
		ga.ga_slot = -1;
		ga.ga_descr = NULL;

		id = gio_id(ga.ga_ioh, ga.ga_addr, 1);
		if (!gio_is_framebuffer_id(id))
			continue;

		ga.ga_product = giofb_consid = id;
		switch (id) {
		default:
#if NIMPACT_GIO > 0
			if (GIO_PRODUCT_32BIT_ID(id) &&
			    GIO_PRODUCT_PRODUCTID(id) == GIO_PRODUCT_IMPACT) {
				if (impact_gio_cnprobe(&ga) != 0)
					return 0;
			}
#endif
			break;
		case GIO_PRODUCT_FAKEID_GRTWO:
#if NGRTWO > 0
			if (grtwo_cnprobe(&ga) != 0)
				return 0;
#endif
			break;
		case GIO_PRODUCT_FAKEID_LIGHT:
#if NLIGHT > 0
			if (light_cnprobe(&ga) != 0)
				return 0;
#endif
			break;
		case GIO_PRODUCT_FAKEID_NEWPORT:
#if NNEWPORT > 0
			if (newport_cnprobe(&ga) != 0)
				return 0;
#endif
			break;
		}
	}

	return ENXIO;
}
Exemple #22
0
void	*iobus_space_vaddr(bus_space_tag_t, bus_space_handle_t);

bus_addr_t iobus_pa_to_device(paddr_t);
paddr_t	 iobus_device_to_pa(bus_addr_t);

struct cfattach iobus_ca = {
	sizeof(struct device), iobusmatch, iobusattach
};

struct cfdriver iobus_cd = {
	NULL, "iobus", DV_DULL
};

bus_space_t iobus_tag = {
	.bus_base = PHYS_TO_XKPHYS(0, CCA_NC),
	.bus_private = NULL,
	._space_read_1 =	generic_space_read_1,
	._space_write_1 =	generic_space_write_1,
	._space_read_2 =	generic_space_read_2,
	._space_write_2 =	generic_space_write_2,
	._space_read_4 =	generic_space_read_4,
	._space_write_4 =	generic_space_write_4,
	._space_read_8 =	generic_space_read_8,
	._space_write_8 =	generic_space_write_8,
	._space_read_raw_2 =	generic_space_read_raw_2,
	._space_write_raw_2 =	generic_space_write_raw_2,
	._space_read_raw_4 =	generic_space_read_raw_4,
	._space_write_raw_4 =	generic_space_write_raw_4,
	._space_read_raw_8 =	generic_space_read_raw_8,
	._space_write_raw_8 =	generic_space_write_raw_8,
Exemple #23
0
void
gio_attach(struct device *parent, struct device *self, void *aux)
{
	struct gio_softc *sc = (struct gio_softc *)self;
	struct imc_attach_args *iaa = (struct imc_attach_args *)aux;
	struct gio_attach_args ga;
	uint32_t gfx[GIO_MAX_FB], id;
	uint i, j, ngfx;
	int sys_type;

	printf("\n");

	sc->sc_iot = iaa->iaa_st;
	sc->sc_dmat = iaa->iaa_dmat;

	switch (sys_config.system_type) {
	case SGI_IP20:
		sys_type = SGI_IP20;
		break;
	default:
	case SGI_IP22:
	case SGI_IP26:
	case SGI_IP28:
		sys_type = SGI_IP22;
		break;
	}

	ngfx = 0;
	memset(gfx, 0, sizeof(gfx));

	/*
	 * Try and attach graphics devices first.
	 * Unfortunately, they - not being GIO devices after all - do not
	 * contain a Product Identification Word, nor have a slot number.
	 *
	 * Record addresses to which graphics devices attach so that
	 * we do not confuse them with expansion slots, should the
	 * addresses coincide.
	 *
	 * If only the ARCBios component tree would be so kind as to give
	 * us the address of the frame buffer components...
	 */
	if (sys_type != SGI_IP22 ||
	    sys_config.system_subtype != IP22_CHALLS) {
		for (i = 0; gfx_bases[i].base != 0; i++) {
			/* skip slots that don't apply to us */
			if (gfx_bases[i].mach_type != sys_type)
				continue;

			if (gfx_bases[i].mach_subtype != -1 &&
			    gfx_bases[i].mach_subtype !=
			      sys_config.system_subtype)
				continue;

			ga.ga_addr = gfx_bases[i].base;
			ga.ga_ioh = PHYS_TO_XKPHYS(ga.ga_addr, CCA_NC);

			/* no need to probe a glass console again */
			if (ga.ga_addr == giofb_consaddr && giofb_consid != 0)
				id = giofb_consid;
			else {
				id = gio_id(ga.ga_ioh, ga.ga_addr, 1);
				if (!gio_is_framebuffer_id(id))
					continue;
			}

			ga.ga_iot = sc->sc_iot;
			ga.ga_dmat = sc->sc_dmat;
			ga.ga_slot = -1;
			ga.ga_product = id;
			/*
			 * Note that this relies upon ARCBios listing frame
			 * buffers in ascending address order, which seems
			 * to be the case so far on multihead Indigo2 systems.
			 */
			if (ngfx < GIO_MAX_FB)
				ga.ga_descr = giofb_names[ngfx];
			else
				ga.ga_descr = NULL;	/* shouldn't happen */

			if (config_found_sm(self, &ga, gio_print_fb,
			    gio_submatch))
				gfx[ngfx] = gfx_bases[i].base;

			ngfx++;
		}
	}

	/*
	 * Now attach any GIO expansion cards.
	 *
	 * Be sure to skip any addresses to which a graphics device has
	 * already been attached.
	 */
	for (i = 0; slot_bases[i].base != 0; i++) {
		int skip = 0;

		/* skip slots that don't apply to us */
		if (slot_bases[i].mach_type != sys_type)
			continue;

		if (slot_bases[i].mach_subtype != -1 &&
		    slot_bases[i].mach_subtype != sys_config.system_subtype)
			continue;

		for (j = 0; j < ngfx; j++) {
			if (slot_bases[i].base == gfx[j]) {
				skip = 1;
				break;
			}
		}
		if (skip)
			continue;

		ga.ga_addr = slot_bases[i].base;
		ga.ga_iot = sc->sc_iot;
		ga.ga_ioh = PHYS_TO_XKPHYS(ga.ga_addr, CCA_NC);

		id = gio_id(ga.ga_ioh, ga.ga_addr, 0);
		if (id == 0)
			continue;

		ga.ga_dmat = sc->sc_dmat;
		ga.ga_slot = slot_bases[i].slot;
		ga.ga_product = id;
		ga.ga_descr = NULL;

		config_found_sm(self, &ga, gio_print, gio_submatch);
	}

	config_search(gio_search, self, aux);
}
Exemple #24
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;

#if defined(TGT_INDIGO2)
	/*
	 * On ECC MC systems, which do not allow uncached writes to memory
	 * during regular operation, fail requests for uncached (coherent)
	 * memory, unless the caller tells us it is aware of this and will
	 * do the right thing, by passing BUS_DMA_BUS1 as well.
	 */
	if ((flags & (BUS_DMA_COHERENT | BUS_DMA_BUS1)) == BUS_DMA_COHERENT &&
	    ip22_ecc)
		return EINVAL;
#endif

#ifdef TGT_COHERENT
	/* coherent mappings do not need to be uncached on these platforms */
	flags &= ~BUS_DMA_COHERENT;
#endif

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
#ifdef DIAGNOSTIC
			if (size == 0)
				panic("_dmamem_map: size botch");
#endif
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached.
			 * If there are no multiple mappings of that
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}