static int
generic_pcie_activate_resource(device_t dev, device_t child, int type,
    int rid, struct resource *r)
{
	struct generic_pcie_core_softc *sc;
	uint64_t phys_base;
	uint64_t pci_base;
	uint64_t size;
	int found;
	int res;
	int i;

	sc = device_get_softc(dev);

	if ((res = rman_activate_resource(r)) != 0)
		return (res);

	switch (type) {
	case SYS_RES_IOPORT:
		found = 0;
		for (i = 0; i < MAX_RANGES_TUPLES; i++) {
			pci_base = sc->ranges[i].pci_base;
			phys_base = sc->ranges[i].phys_base;
			size = sc->ranges[i].size;

			if ((rid > pci_base) && (rid < (pci_base + size))) {
				found = 1;
				break;
			}
		}
		if (found) {
			rman_set_start(r, rman_get_start(r) + phys_base);
			rman_set_end(r, rman_get_end(r) + phys_base);
			res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev),
			    child, type, rid, r);
		} else {
			device_printf(dev,
			    "Failed to activate IOPORT resource\n");
			res = 0;
		}
		break;
	case SYS_RES_MEMORY:
	case SYS_RES_IRQ:
		res = BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child,
		    type, rid, r);
		break;
	default:
		break;
	}

	return (res);
}
Beispiel #2
0
static int
sa1110_setup_intr(device_t dev, device_t child,
        struct resource *ires,  int flags, driver_filter_t *filt, 
	driver_intr_t *intr, void *arg, void **cookiep)
{
	int saved_cpsr;
	
	if (flags & INTR_TYPE_TTY) 
		rman_set_start(ires, 15);
	else if (flags & INTR_TYPE_CLK) {
		if (rman_get_start(ires) == 0)
			rman_set_start(ires, 26);
		else
			rman_set_start(ires, 27);
	}
	saved_cpsr = SetCPSR(I32_bit, I32_bit);                 

	SetCPSR(I32_bit, saved_cpsr & I32_bit);
	BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, filt, 
	    intr, arg, cookiep);
	return (0);
}
Beispiel #3
0
static int
zbpci_activate_resource(device_t bus, device_t child, int type, int rid,
			struct resource *res)
{
	int error;
	void *vaddr;
	u_long orig_paddr, paddr, psize;

	paddr = rman_get_start(res);
	psize = rman_get_size(res);
	orig_paddr = paddr;

#if _BYTE_ORDER == _BIG_ENDIAN
	/*
	 * The CFE allocates PCI memory resources that map to the
	 * "match byte lanes" address space. This address space works
	 * best for DMA transfers because it does not do any automatic
	 * byte swaps when data crosses the pci-cpu interface.
	 *
	 * This also makes it sub-optimal for accesses to PCI device
	 * registers because it exposes the little-endian nature of
	 * the PCI bus to the big-endian CPU. The Sibyte has another
	 * address window called the "match bit lanes" window which
	 * automatically swaps bytes when data crosses the pci-cpu
	 * interface.
	 *
	 * We "assume" that any bus_space memory accesses done by the
	 * CPU to a PCI device are register/configuration accesses and
	 * are done through the "match bit lanes" window. Any DMA
	 * transfers will continue to be through the "match byte lanes"
	 * window because the PCI BAR registers will not be changed.
	 */
	if (type == SYS_RES_MEMORY) {
		if (paddr >= PCI_MATCH_BYTE_LANES_START &&
		    paddr + psize - 1 <= PCI_MATCH_BYTE_LANES_END) {
			paddr |= PCI_MATCH_BIT_LANES_MASK;
			rman_set_start(res, paddr);
			rman_set_end(res, paddr + psize - 1);
		}
	}
#endif

	if (type != SYS_RES_IOPORT) {
		error = bus_generic_activate_resource(bus, child, type,
						      rid, res);
#if _BYTE_ORDER == _BIG_ENDIAN
		if (type == SYS_RES_MEMORY) {
			rman_set_start(res, orig_paddr);
			rman_set_end(res, orig_paddr + psize - 1);
		}
#endif
		return (error);
	}

	/*
	 * Map the I/O space resource through the memory window starting
	 * at PCI_IOSPACE_ADDR.
	 */
	vaddr = pmap_mapdev(paddr + PCI_IOSPACE_ADDR, psize);

	rman_set_virtual(res, vaddr);
	rman_set_bustag(res, mips_bus_space_generic);
	rman_set_bushandle(res, (bus_space_handle_t)vaddr);

	return (rman_activate_resource(res));
}