Пример #1
0
static struct resource *
ofwbus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
	struct ofwbus_softc *sc;
	struct rman *rm;
	struct resource *rv;
	struct resource_list_entry *rle;
	int isdefault, passthrough;

	isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
	passthrough = (device_get_parent(child) != bus);
	sc = device_get_softc(bus);
	rle = NULL;
	if (!passthrough && isdefault) {
		rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child),
		    type, *rid);
		if (rle == NULL) {
			if (bootverbose)
				device_printf(bus, "no default resources for "
				    "rid = %d, type = %d\n", *rid, type);
			return (NULL);
		}
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}

	switch (type) {
	case SYS_RES_IRQ:
		rm = &sc->sc_intr_rman;
		break;
	case SYS_RES_MEMORY:
		rm = &sc->sc_mem_rman;
		break;
	default:
		return (NULL);
	}

	rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
	    child);
	if (rv == NULL)
		return (NULL);
	rman_set_rid(rv, *rid);

	if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child, type,
	    *rid, rv) != 0) {
		rman_release_resource(rv);
		return (NULL);
	}

	if (!passthrough && rle != NULL) {
		rle->res = rv;
		rle->start = rman_get_start(rv);
		rle->end = rman_get_end(rv);
		rle->count = rle->end - rle->start + 1;
	}

	return (rv);
}
Пример #2
0
static struct resource *
ofwbus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct ofwbus_softc *sc;
	struct rman *rm;
	struct resource *rv;
	struct resource_list_entry *rle;
	int isdefault, passthrough;

	isdefault = (start == 0UL && end == ~0UL);
	passthrough = (device_get_parent(child) != bus);
	sc = device_get_softc(bus);
	rle = NULL;

	if (!passthrough && isdefault) {
		rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child),
		    type, *rid);
		if (rle == NULL)
			return (NULL);
		if (rle->res != NULL)
			panic("%s: resource entry is busy", __func__);
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}

	switch (type) {
	case SYS_RES_IRQ:
		rm = &sc->sc_intr_rman;
		break;
	case SYS_RES_MEMORY:
		rm = &sc->sc_mem_rman;
		break;
	default:
		return (NULL);
	}

	rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
	    child);
	if (rv == NULL)
		return (NULL);
	rman_set_rid(rv, *rid);

	if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child, type,
	    *rid, rv) != 0) {
		rman_release_resource(rv);
		return (NULL);
	}

	if (!passthrough && rle != NULL) {
		rle->res = rv;
		rle->start = rman_get_start(rv);
		rle->end = rman_get_end(rv);
		rle->count = rle->end - rle->start + 1;
	}

	return (rv);
}
Пример #3
0
static struct resource *
central_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct resource_list *rl;
	struct resource_list_entry *rle;
	struct central_softc *sc;
	struct resource *res;
	bus_addr_t coffset;
	bus_addr_t cend;
	bus_addr_t phys;
	int isdefault;
	int passthrough;
	int i;

	isdefault = (start == 0UL && end == ~0UL);
	passthrough = (device_get_parent(child) != bus);
	res = NULL;
	rle = NULL;
	rl = BUS_GET_RESOURCE_LIST(bus, child);
	sc = device_get_softc(bus);
	switch (type) {
	case SYS_RES_IRQ:
		return (resource_list_alloc(rl, bus, child, type, rid, start,
		    end, count, flags));
	case SYS_RES_MEMORY:
		if (!passthrough) {
			rle = resource_list_find(rl, type, *rid);
			if (rle == NULL)
				return (NULL);
			if (rle->res != NULL)
				panic("%s: resource entry is busy", __func__);
			if (isdefault) {
				start = rle->start;
				count = ulmax(count, rle->count);
				end = ulmax(rle->end, start + count - 1);
			}
		}
		for (i = 0; i < sc->sc_nrange; i++) {
			coffset = sc->sc_ranges[i].coffset;
			cend = coffset + sc->sc_ranges[i].size - 1;
			if (start >= coffset && end <= cend) {
				start -= coffset;
				end -= coffset;
				phys = sc->sc_ranges[i].poffset |
				    ((bus_addr_t)sc->sc_ranges[i].pspace << 32);
				res = bus_generic_alloc_resource(bus, child,
				    type, rid, phys + start, phys + end,
				    count, flags);
				if (!passthrough)
					rle->res = res;
				break;
			}
		}
		break;
	}
	return (res);
}
Пример #4
0
static struct resource *
at91_alloc_resource(device_t dev, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct at91_softc *sc = device_get_softc(dev);
	struct resource_list_entry *rle;
	struct at91_ivar *ivar = device_get_ivars(child);
	struct resource_list *rl = &ivar->resources;

	if (device_get_parent(child) != dev)
		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
		    type, rid, start, end, count, flags));
	
	rle = resource_list_find(rl, type, *rid);
	if (rle == NULL)
		return (NULL);
	if (rle->res)
		panic("Resource rid %d type %d already in use", *rid, type);
	if (start == 0UL && end == ~0UL) {
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}
	switch (type)
	{
	case SYS_RES_IRQ:
		rle->res = rman_reserve_resource(&sc->sc_irq_rman,
		    start, end, count, flags, child);
		break;
	case SYS_RES_MEMORY:
#if 0
		if (start >= 0x00300000 && start <= 0x003fffff)
			rle->res = rman_reserve_resource(&sc->sc_usbmem_rman,
			    start, end, count, flags, child);
		else
#endif
			rle->res = rman_reserve_resource(&sc->sc_mem_rman,
			    start, end, count, flags, child);
		if (rle->res != NULL) {
			rman_set_bustag(rle->res, &at91_bs_tag);
			rman_set_bushandle(rle->res, start);
		}
		break;
	}
	if (rle->res) {
		rle->start = rman_get_start(rle->res);
		rle->end = rman_get_end(rle->res);
		rle->count = count;
		rman_set_rid(rle->res, *rid);
	}
	return (rle->res);
}
Пример #5
0
/**
 *	omap_alloc_resource
 *
 *	This function will be called when bus_alloc_resource(...) if the memory
 *	region requested is in the range of the managed values set by
 *	rman_manage_region(...) above.
 *
 *	For SYS_RES_MEMORY resource types the omap_attach() calls rman_manage_region
 *	with the list of pyshical mappings defined in the omap_devmap region map.
 *	However because we are working with physical addresses, we need to convert
 *	the physical to virtual within this function and return the virtual address
 *	in the bus tag field.
 *
 */
static struct resource *
omap_alloc_resource(device_t dev, device_t child, int type, int *rid,
                     u_long start, u_long end, u_long count, u_int flags)
{
	struct omap_softc *sc = device_get_softc(dev);
	struct resource_list_entry *rle;
	struct omap_ivar *ivar = device_get_ivars(child);
	struct resource_list *rl = &ivar->resources;

	/* If we aren't the parent pass it onto the actual parent */
	if (device_get_parent(child) != dev) {
		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
		    type, rid, start, end, count, flags));
	}
	
	/* Find the resource in the list */
	rle = resource_list_find(rl, type, *rid);
	if (rle == NULL)
		return (NULL);
	if (rle->res)
		panic("Resource rid %d type %d already in use", *rid, type);

	if (start == 0UL && end == ~0UL) {
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}

	switch (type)
	{
	case SYS_RES_IRQ:
		rle->res = rman_reserve_resource(&sc->sc_irq_rman,
		    start, end, count, flags, child);
		break;
	case SYS_RES_MEMORY:
		rle->res = rman_reserve_resource(&sc->sc_mem_rman,
		    start, end, count, flags, child);
		if (rle->res != NULL) {
			rman_set_bustag(rle->res, &omap_bs_tag);
			rman_set_bushandle(rle->res, omap_devmap_phys2virt(start));
		}
		break;
	}

	if (rle->res) {
		rle->start = rman_get_start(rle->res);
		rle->end = rman_get_end(rle->res);
		rle->count = count;
		rman_set_rid(rle->res, *rid);
	}
	return (rle->res);
}
Пример #6
0
static struct resource *
at91_alloc_resource(device_t dev, device_t child, int type, int *rid,
    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
	struct at91_softc *sc = device_get_softc(dev);
	struct resource_list_entry *rle;
	struct at91_ivar *ivar = device_get_ivars(child);
	struct resource_list *rl = &ivar->resources;
	bus_space_handle_t bsh;

	if (device_get_parent(child) != dev)
		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
		    type, rid, start, end, count, flags));
	
	rle = resource_list_find(rl, type, *rid);
	if (rle == NULL)
		return (NULL);
	if (rle->res)
		panic("Resource rid %d type %d already in use", *rid, type);
	if (start == 0UL && end == ~0UL) {
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}
	switch (type)
	{
	case SYS_RES_IRQ:
		rle->res = rman_reserve_resource(&sc->sc_irq_rman,
		    start, end, count, flags, child);
		break;
	case SYS_RES_MEMORY:
		rle->res = rman_reserve_resource(&sc->sc_mem_rman,
		    start, end, count, flags, child);
		if (rle->res != NULL) {
			bus_space_map(arm_base_bs_tag, start,
			    rman_get_size(rle->res), 0, &bsh);
			rman_set_bustag(rle->res, arm_base_bs_tag);
			rman_set_bushandle(rle->res, bsh);
		}
		break;
	}
	if (rle->res) {
		rle->start = rman_get_start(rle->res);
		rle->end = rman_get_end(rle->res);
		rle->count = count;
		rman_set_rid(rle->res, *rid);
	}
	return (rle->res);
}
Пример #7
0
struct resource *
pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type,
    int *rid, u_long start, u_long end, u_long count, u_int flags)
{
	struct resource_list_entry *rle;
	struct resource *r;
	u_long new_start, new_end;

	if (flags & RF_PREFETCHABLE)
		KASSERT(type == SYS_RES_MEMORY,
		    ("only memory is prefetchable"));

	rle = resource_list_find(&hr->hr_rl, type, 0);
	if (rle == NULL) {
		/*
		 * No decoding ranges for this resource type, just pass
		 * the request up to the parent.
		 */
		return (bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid,
		    start, end, count, flags));
	}

restart:
	/* Try to allocate from each decoded range. */
	for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) {
		if (rle->type != type)
			continue;
		if (((flags & RF_PREFETCHABLE) != 0) !=
		    ((rle->flags & RLE_PREFETCH) != 0))
			continue;
		new_start = ulmax(start, rle->start);
		new_end = ulmin(end, rle->end);
		if (new_start > new_end ||
		    new_start + count - 1 > new_end ||
		    new_start + count < new_start)
			continue;
		r = bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid,
		    new_start, new_end, count, flags);
		if (r != NULL) {
			if (bootverbose)
				device_printf(hr->hr_pcib,
			    "allocated type %d (%#lx-%#lx) for rid %x of %s\n",
				    type, rman_get_start(r), rman_get_end(r),
				    *rid, pcib_child_name(dev));
			return (r);
		}
	}

	/*
	 * If we failed to find a prefetch range for a memory
	 * resource, try again without prefetch.
	 */
	if (flags & RF_PREFETCHABLE) {
		flags &= ~RF_PREFETCHABLE;
		rle = resource_list_find(&hr->hr_rl, type, 0);
		goto restart;
	}
	return (NULL);
}
Пример #8
0
uintptr_t
powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
{
	struct		pcpu *pc;
	vm_offset_t	startkernel, endkernel;
	void		*kmdp;
        char		*env;
        bool		ofw_bootargs = false;
#ifdef DDB
	vm_offset_t ksym_start;
	vm_offset_t ksym_end;
#endif

	kmdp = NULL;

	/* First guess at start/end kernel positions */
	startkernel = __startkernel;
	endkernel = __endkernel;

	/* Check for ePAPR loader, which puts a magic value into r6 */
	if (mdp == (void *)0x65504150)
		mdp = NULL;

#ifdef AIM
	/*
	 * If running from an FDT, make sure we are in real mode to avoid
	 * tromping on firmware page tables. Everything in the kernel assumes
	 * 1:1 mappings out of firmware, so this won't break anything not
	 * already broken. This doesn't work if there is live OF, since OF
	 * may internally use non-1:1 mappings.
	 */
	if (ofentry == 0)
		mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
#endif

	/*
	 * Parse metadata if present and fetch parameters.  Must be done
	 * before console is inited so cninit gets the right value of
	 * boothowto.
	 */
	if (mdp != NULL) {
		preload_metadata = mdp;
		kmdp = preload_search_by_type("elf kernel");
		if (kmdp != NULL) {
			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
			init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *),
			    0);
			endkernel = ulmax(endkernel, MD_FETCH(kmdp,
			    MODINFOMD_KERNEND, vm_offset_t));
#ifdef DDB
			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
			db_fetch_ksymtab(ksym_start, ksym_end);
#endif
		}
Пример #9
0
uintptr_t
powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
    vm_offset_t basekernel, void *mdp)
{
	struct		pcpu *pc;
	void		*generictrap;
	size_t		trap_offset;
	void		*kmdp;
        char		*env;
	register_t	msr, scratch;
#ifdef WII
	register_t 	vers;
#endif
	uint8_t		*cache_check;
	int		cacheline_warn;
	#ifndef __powerpc64__
	int		ppc64;
	#endif

	kmdp = NULL;
	trap_offset = 0;
	cacheline_warn = 0;

	/* Save trap vectors. */
	ofw_save_trap_vec(save_trap_init);

#ifdef WII
	/*
	 * The Wii loader doesn't pass us any environment so, mdp
	 * points to garbage at this point. The Wii CPU is a 750CL.
	 */
	vers = mfpvr();
	if ((vers & 0xfffff0e0) == (MPC750 << 16 | MPC750CL)) 
		mdp = NULL;
#endif

	/*
	 * Parse metadata if present and fetch parameters.  Must be done
	 * before console is inited so cninit gets the right value of
	 * boothowto.
	 */
	if (mdp != NULL) {
		preload_metadata = mdp;
		kmdp = preload_search_by_type("elf kernel");
		if (kmdp != NULL) {
			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
			kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
			endkernel = ulmax(endkernel, MD_FETCH(kmdp,
			    MODINFOMD_KERNEND, vm_offset_t));
#ifdef DDB
			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
#endif
		}
Пример #10
0
uintptr_t
powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp)
{
	struct		pcpu *pc;
	vm_offset_t	startkernel, endkernel;
	void		*kmdp;
        char		*env;
#ifdef DDB
	vm_offset_t ksym_start;
	vm_offset_t ksym_end;
#endif

	kmdp = NULL;

	/* First guess at start/end kernel positions */
	startkernel = __startkernel;
	endkernel = __endkernel;

	/* Check for ePAPR loader, which puts a magic value into r6 */
	if (mdp == (void *)0x65504150)
		mdp = NULL;

	/*
	 * Parse metadata if present and fetch parameters.  Must be done
	 * before console is inited so cninit gets the right value of
	 * boothowto.
	 */
	if (mdp != NULL) {
		preload_metadata = mdp;
		kmdp = preload_search_by_type("elf kernel");
		if (kmdp != NULL) {
			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
			init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *),
			    0);
			endkernel = ulmax(endkernel, MD_FETCH(kmdp,
			    MODINFOMD_KERNEND, vm_offset_t));
#ifdef DDB
			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
			db_fetch_ksymtab(ksym_start, ksym_end);
#endif
		}
Пример #11
0
uintptr_t
powerpc_init(vm_offset_t startkernel, vm_offset_t endkernel,
    vm_offset_t basekernel, void *mdp)
{
	struct		pcpu *pc;
	void		*generictrap;
	size_t		trap_offset;
	void		*kmdp;
        char		*env;
	register_t	msr, scratch;
	uint8_t		*cache_check;
	int		cacheline_warn;
	#ifndef __powerpc64__
	int		ppc64;
	#endif

	kmdp = NULL;
	trap_offset = 0;
	cacheline_warn = 0;

	/*
	 * Parse metadata if present and fetch parameters.  Must be done
	 * before console is inited so cninit gets the right value of
	 * boothowto.
	 */
	if (mdp != NULL) {
		preload_metadata = mdp;
		kmdp = preload_search_by_type("elf kernel");
		if (kmdp != NULL) {
			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
			kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
			endkernel = ulmax(endkernel, MD_FETCH(kmdp,
			    MODINFOMD_KERNEND, vm_offset_t));
#ifdef DDB
			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
#endif
		}
Пример #12
0
static struct resource *
nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
                     u_long start, u_long end, u_long count, u_int flags)
{
    struct nexus_softc *sc;
    struct rman *rm;
    struct resource *rv;
    struct resource_list_entry *rle;
    device_t nexus;
    int isdefault, needactivate, passthrough;

    isdefault = (start == 0UL && end == ~0UL);
    needactivate = flags & RF_ACTIVE;
    passthrough = (device_get_parent(child) != bus);
    nexus = bus;
    while (strcmp(device_get_name(device_get_parent(nexus)), "root") != 0)
        nexus = device_get_parent(nexus);
    sc = device_get_softc(nexus);
    rle = NULL;

    if (!passthrough) {
        rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child),
                                 type, *rid);
        if (rle == NULL)
            return (NULL);
        if (rle->res != NULL)
            panic("%s: resource entry is busy", __func__);
        if (isdefault) {
            start = rle->start;
            count = ulmax(count, rle->count);
            end = ulmax(rle->end, start + count - 1);
        }
    }

    switch (type) {
    case SYS_RES_IRQ:
        rm = &sc->sc_intr_rman;
        break;
    case SYS_RES_MEMORY:
        rm = &sc->sc_mem_rman;
        break;
    default:
        return (NULL);
    }

    flags &= ~RF_ACTIVE;
    rv = rman_reserve_resource(rm, start, end, count, flags, child);
    if (rv == NULL)
        return (NULL);
    rman_set_rid(rv, *rid);
    if (type == SYS_RES_MEMORY) {
        rman_set_bustag(rv, &nexus_bustag);
        rman_set_bushandle(rv, rman_get_start(rv));
    }

    if (needactivate) {
        if (bus_activate_resource(child, type, *rid, rv) != 0) {
            rman_release_resource(rv);
            return (NULL);
        }
    }

    if (!passthrough) {
        rle->res = rv;
        rle->start = rman_get_start(rv);
        rle->end = rman_get_end(rv);
        rle->count = rle->end - rle->start + 1;
    }

    return (rv);
}
Пример #13
0
static struct resource *
ebus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct ebus_softc *sc;
	struct resource_list *rl;
	struct resource_list_entry *rle = NULL;
	struct resource *res;
	struct ebus_rinfo *ri;
	bus_space_tag_t bt;
	bus_space_handle_t bh;
	int passthrough = (device_get_parent(child) != bus);
	int isdefault = (start == 0UL && end == ~0UL);
	int ridx, rv;

	sc = (struct ebus_softc *)device_get_softc(bus);
	rl = BUS_GET_RESOURCE_LIST(bus, child);
	/*
	 * Map EBus ranges to PCI ranges.  This may include changing the
	 * allocation type.
	 */
	switch (type) {
	case SYS_RES_MEMORY:
		KASSERT(!(isdefault && passthrough),
		    ("ebus_alloc_resource: passthrough of default alloc"));
		if (!passthrough) {
			rle = resource_list_find(rl, type, *rid);
			if (rle == NULL)
				return (NULL);
			KASSERT(rle->res == NULL,
			    ("ebus_alloc_resource: resource entry is busy"));
			if (isdefault) {
				start = rle->start;
				count = ulmax(count, rle->count);
				end = ulmax(rle->end, start + count - 1);
			}
		}

		(void)ofw_isa_range_map(sc->sc_range, sc->sc_nrange,
		    &start, &end, &ridx);

		ri = &sc->sc_rinfo[ridx];
		res = rman_reserve_resource(&ri->eri_rman, start, end, count,
		    flags, child);
		if (res == NULL)
			return (NULL);
		rman_set_rid(res, *rid);
		bt = rman_get_bustag(ri->eri_res);
		rman_set_bustag(res, bt);
		rv = bus_space_subregion(bt, rman_get_bushandle(ri->eri_res),
		    rman_get_start(res) - rman_get_start(ri->eri_res), count,
		    &bh);
		if (rv != 0) {
			rman_release_resource(res);
			return (NULL);
		}
		rman_set_bushandle(res, bh);
		if (!passthrough)
			rle->res = res;
		return (res);
	case SYS_RES_IRQ:
		return (resource_list_alloc(rl, bus, child, type, rid, start,
		    end, count, flags));
	}
	return (NULL);
}
Пример #14
0
static struct resource *
chipc_alloc_resource(device_t dev, device_t child, int type,
    int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
	struct chipc_softc		*sc;
	struct chipc_region		*cr;
	struct resource_list_entry	*rle;
	struct resource			*rv;
	struct rman			*rm;
	int				 error;
	bool				 passthrough, isdefault;

	sc = device_get_softc(dev);
	passthrough = (device_get_parent(child) != dev);
	isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
	rle = NULL;

	/* Fetch the resource manager, delegate request if necessary */
	rm = chipc_get_rman(sc, type);
	if (rm == NULL) {
		/* Requested resource type is delegated to our parent */
		rv = bus_generic_rl_alloc_resource(dev, child, type, rid,
		    start, end, count, flags);
		return (rv);
	}

	/* Populate defaults */
	if (!passthrough && isdefault) {
		/* Fetch the resource list entry. */
		rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
		    type, *rid);
		if (rle == NULL) {
			device_printf(dev,
			    "default resource %#x type %d for child %s "
			    "not found\n", *rid, type,
			    device_get_nameunit(child));			
			return (NULL);
		}
		
		if (rle->res != NULL) {
			device_printf(dev,
			    "resource entry %#x type %d for child %s is busy "
			    "[%d]\n",
			    *rid, type, device_get_nameunit(child),
			    rman_get_flags(rle->res));
			
			return (NULL);
		}

		start = rle->start;
		end = rle->end;
		count = ulmax(count, rle->count);
	}

	/* Locate a mapping region */
	if ((cr = chipc_find_region(sc, start, end)) == NULL) {
		/* Resource requests outside our shared port regions can be
		 * delegated to our parent. */
		rv = bus_generic_rl_alloc_resource(dev, child, type, rid,
		    start, end, count, flags);
		return (rv);
	}

	/* Try to retain a region reference */
	if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED)))
		return (NULL);

	/* Make our rman reservation */
	rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
	    child);
	if (rv == NULL) {
		chipc_release_region(sc, cr, RF_ALLOCATED);
		return (NULL);
	}

	rman_set_rid(rv, *rid);

	/* Activate */
	if (flags & RF_ACTIVE) {
		error = bus_activate_resource(child, type, *rid, rv);
		if (error) {
			device_printf(dev,
			    "failed to activate entry %#x type %d for "
				"child %s: %d\n",
			     *rid, type, device_get_nameunit(child), error);

			chipc_release_region(sc, cr, RF_ALLOCATED);
			rman_release_resource(rv);

			return (NULL);
		}
	}

	/* Update child's resource list entry */
	if (rle != NULL) {
		rle->res = rv;
		rle->start = rman_get_start(rv);
		rle->end = rman_get_end(rv);
		rle->count = rman_get_size(rv);
	}

	return (rv);
}
Пример #15
0
Файл: fhc.c Проект: MarginC/kame
struct resource *
fhc_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct resource_list_entry *rle;
	struct fhc_devinfo *fdi;
	struct fhc_softc *sc;
	struct resource *res;
	bus_addr_t coffset;
	bus_addr_t cend;
	bus_addr_t phys;
	int isdefault;
	uint32_t map;
	uint32_t vec;
	int i;

	isdefault = (start == 0UL && end == ~0UL);
	res = NULL;
	sc = device_get_softc(bus);
	switch (type) {
	case SYS_RES_IRQ:
		if (!isdefault || count != 1 || *rid < FHC_FANFAIL ||
		    *rid > FHC_TOD)
			break;

		map = bus_space_read_4(sc->sc_bt[*rid], sc->sc_bh[*rid],
		    FHC_IMAP);
		vec = INTINO(map) | (sc->sc_ign << INTMAP_IGN_SHIFT);
		bus_space_write_4(sc->sc_bt[*rid], sc->sc_bh[*rid],
		    FHC_IMAP, vec);
		bus_space_read_4(sc->sc_bt[*rid], sc->sc_bh[*rid], FHC_IMAP);

		res = bus_generic_alloc_resource(bus, child, type, rid,
		    vec, vec, 1, flags);
		if (res != NULL)
			rman_set_rid(res, *rid);
		break;
	case SYS_RES_MEMORY:
		fdi = device_get_ivars(child);
		rle = resource_list_find(&fdi->fdi_rl, type, *rid);
		if (rle == NULL)
			return (NULL);
		if (rle->res != NULL)
			panic("fhc_alloc_resource: resource entry is busy");
		if (isdefault) {
			start = rle->start;
			count = ulmax(count, rle->count);
			end = ulmax(rle->end, start + count - 1);
		}
		for (i = 0; i < sc->sc_nrange; i++) {
			coffset = sc->sc_ranges[i].coffset;
			cend = coffset + sc->sc_ranges[i].size - 1;
			if (start >= coffset && end <= cend) {
				start -= coffset;
				end -= coffset;
				phys = sc->sc_ranges[i].poffset |
				    ((bus_addr_t)sc->sc_ranges[i].pspace << 32);
				res = bus_generic_alloc_resource(bus, child,
				    type, rid, phys + start, phys + end,
				    count, flags);
				rle->res = res;
				break;
			}
		}
		break;
	default:
		break;
	}
	return (res);
}
Пример #16
0
static struct resource *
sbus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct sbus_softc *sc;
	struct sbus_devinfo *sdi;
	struct rman *rm;
	struct resource *rv;
	struct resource_list *rl;
	struct resource_list_entry *rle;
	bus_space_handle_t bh;
	bus_addr_t toffs;
	bus_size_t tend;
	int i;
	int isdefault = (start == 0UL && end == ~0UL);
	int needactivate = flags & RF_ACTIVE;

	sc = (struct sbus_softc *)device_get_softc(bus);
	sdi = device_get_ivars(child);
	rl = &sdi->sdi_rl;
	rle = resource_list_find(rl, type, *rid);
	if (rle == NULL)
		return (NULL);
	if (rle->res != NULL)
		panic("sbus_alloc_resource: resource entry is busy");
	if (isdefault) {
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
	}
	switch (type) {
	case SYS_RES_IRQ:
		rv = bus_alloc_resource(bus, type, rid, start, end,
		    count, flags);
		if (rv == NULL)
			return (NULL);
		break;
	case SYS_RES_MEMORY:
		rm = NULL;
		bh = toffs = tend = 0;
		for (i = 0; i < sc->sc_nrange; i++) {
			if (sc->sc_rd[i].rd_slot != sdi->sdi_slot ||
			    start < sc->sc_rd[i].rd_coffset ||
			    start > sc->sc_rd[i].rd_cend)
				continue;
			/* Disallow cross-range allocations. */
			if (end > sc->sc_rd[i].rd_cend)
				return (NULL);
			/* We've found the connection to the parent bus */
			toffs = start - sc->sc_rd[i].rd_coffset;
			tend = end - sc->sc_rd[i].rd_coffset;
			rm = &sc->sc_rd[i].rd_rman;
			bh = sc->sc_rd[i].rd_bushandle;
		}
		if (toffs == NULL)
			return (NULL);
		flags &= ~RF_ACTIVE;
		rv = rman_reserve_resource(rm, toffs, tend, count, flags,
		    child);
		if (rv == NULL)
			return (NULL);
		rman_set_bustag(rv, sc->sc_cbustag);
		rman_set_bushandle(rv, bh + rman_get_start(rv));
		if (needactivate) {
			if (bus_activate_resource(child, type, *rid, rv)) {
				rman_release_resource(rv);
				return (NULL);
			}
		}
		break;
	default:
		return (NULL);
	}
	rle->res = rv;
	return (rv);
}
Пример #17
0
/*
 * Tcp output routine: figure out what should be sent and send it.
 */
int
tcp_output(struct tcpcb *tp)
{
	struct inpcb * const inp = tp->t_inpcb;
	struct socket *so = inp->inp_socket;
	long len, recvwin, sendwin;
	int nsacked = 0;
	int off, flags, error = 0;
#ifdef TCP_SIGNATURE
	int sigoff = 0;
#endif
	struct mbuf *m;
	struct ip *ip;
	struct tcphdr *th;
	u_char opt[TCP_MAXOLEN];
	unsigned int ipoptlen, optlen, hdrlen;
	int idle;
	boolean_t sendalot;
	struct ip6_hdr *ip6;
#ifdef INET6
	const boolean_t isipv6 = INP_ISIPV6(inp);
#else
	const boolean_t isipv6 = FALSE;
#endif
	boolean_t can_tso = FALSE, use_tso;
	boolean_t report_sack, idle_cwv = FALSE;
	u_int segsz, tso_hlen, tso_lenmax = 0;
	int segcnt = 0;
	boolean_t need_sched = FALSE;

	KKASSERT(so->so_port == &curthread->td_msgport);

	/*
	 * Determine length of data that should be transmitted,
	 * and flags that will be used.
	 * If there is some data or critical controls (SYN, RST)
	 * to send, then transmit; otherwise, investigate further.
	 */

	/*
	 * If we have been idle for a while, the send congestion window
	 * could be no longer representative of the current state of the
	 * link; need to validate congestion window.  However, we should
	 * not perform congestion window validation here, since we could
	 * be asked to send pure ACK.
	 */
	if (tp->snd_max == tp->snd_una &&
	    (ticks - tp->snd_last) >= tp->t_rxtcur && tcp_idle_restart)
		idle_cwv = TRUE;

	/*
	 * Calculate whether the transmit stream was previously idle 
	 * and adjust TF_LASTIDLE for the next time.
	 */
	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
	if (idle && (tp->t_flags & TF_MORETOCOME))
		tp->t_flags |= TF_LASTIDLE;
	else
		tp->t_flags &= ~TF_LASTIDLE;

	if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
	    !IN_FASTRECOVERY(tp))
		nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt);

	/*
	 * Find out whether TSO could be used or not
	 *
	 * For TSO capable devices, the following assumptions apply to
	 * the processing of TCP flags:
	 * - If FIN is set on the large TCP segment, the device must set
	 *   FIN on the last segment that it creates from the large TCP
	 *   segment.
	 * - If PUSH is set on the large TCP segment, the device must set
	 *   PUSH on the last segment that it creates from the large TCP
	 *   segment.
	 */
#if !defined(IPSEC) && !defined(FAST_IPSEC)
	if (tcp_do_tso
#ifdef TCP_SIGNATURE
	    && (tp->t_flags & TF_SIGNATURE) == 0
#endif
	) {
		if (!isipv6) {
			struct rtentry *rt = inp->inp_route.ro_rt;

			if (rt != NULL && (rt->rt_flags & RTF_UP) &&
			    (rt->rt_ifp->if_hwassist & CSUM_TSO)) {
				can_tso = TRUE;
				tso_lenmax = rt->rt_ifp->if_tsolen;
			}
		}
	}
#endif	/* !IPSEC && !FAST_IPSEC */

again:
	m = NULL;
	ip = NULL;
	th = NULL;
	ip6 = NULL;

	if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) ==
		TF_SACK_PERMITTED &&
	    (!TAILQ_EMPTY(&tp->t_segq) ||
	     tp->reportblk.rblk_start != tp->reportblk.rblk_end))
		report_sack = TRUE;
	else
		report_sack = FALSE;

	/* Make use of SACK information when slow-starting after a RTO. */
	if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
	    !IN_FASTRECOVERY(tp)) {
		tcp_seq old_snd_nxt = tp->snd_nxt;

		tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt);
		nsacked += tp->snd_nxt - old_snd_nxt;
	}

	sendalot = FALSE;
	off = tp->snd_nxt - tp->snd_una;
	sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked);
	sendwin = min(sendwin, tp->snd_bwnd);

	flags = tcp_outflags[tp->t_state];
	/*
	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
	 * state flags.
	 */
	if (tp->t_flags & TF_NEEDFIN)
		flags |= TH_FIN;
	if (tp->t_flags & TF_NEEDSYN)
		flags |= TH_SYN;

	/*
	 * If in persist timeout with window of 0, send 1 byte.
	 * Otherwise, if window is small but nonzero
	 * and timer expired, we will send what we can
	 * and go to transmit state.
	 */
	if (tp->t_flags & TF_FORCE) {
		if (sendwin == 0) {
			/*
			 * If we still have some data to send, then
			 * clear the FIN bit.  Usually this would
			 * happen below when it realizes that we
			 * aren't sending all the data.  However,
			 * if we have exactly 1 byte of unsent data,
			 * then it won't clear the FIN bit below,
			 * and if we are in persist state, we wind
			 * up sending the packet without recording
			 * that we sent the FIN bit.
			 *
			 * We can't just blindly clear the FIN bit,
			 * because if we don't have any more data
			 * to send then the probe will be the FIN
			 * itself.
			 */
			if (off < so->so_snd.ssb_cc)
				flags &= ~TH_FIN;
			sendwin = 1;
		} else {
			tcp_callout_stop(tp, tp->tt_persist);
			tp->t_rxtshift = 0;
		}
	}

	/*
	 * If snd_nxt == snd_max and we have transmitted a FIN, the
	 * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in
	 * a negative length.  This can also occur when TCP opens up
	 * its congestion window while receiving additional duplicate
	 * acks after fast-retransmit because TCP will reset snd_nxt
	 * to snd_max after the fast-retransmit.
	 *
	 * A negative length can also occur when we are in the
	 * TCPS_SYN_RECEIVED state due to a simultanious connect where
	 * our SYN has not been acked yet.
	 *
	 * In the normal retransmit-FIN-only case, however, snd_nxt will
	 * be set to snd_una, the offset will be 0, and the length may
	 * wind up 0.
	 */
	len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off;

	/*
	 * Lop off SYN bit if it has already been sent.  However, if this
	 * is SYN-SENT state and if segment contains data, suppress sending
	 * segment (sending the segment would be an option if we still
	 * did TAO and the remote host supported it).
	 */
	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
		flags &= ~TH_SYN;
		off--, len++;
		if (len > 0 && tp->t_state == TCPS_SYN_SENT) {
			tp->t_flags &= ~(TF_ACKNOW | TF_XMITNOW);
			return 0;
		}
	}

	/*
	 * Be careful not to send data and/or FIN on SYN segments.
	 * This measure is needed to prevent interoperability problems
	 * with not fully conformant TCP implementations.
	 */
	if (flags & TH_SYN) {
		len = 0;
		flags &= ~TH_FIN;
	}

	if (len < 0) {
		/*
		 * A negative len can occur if our FIN has been sent but not
		 * acked, or if we are in a simultanious connect in the
		 * TCPS_SYN_RECEIVED state with our SYN sent but not yet
		 * acked.
		 *
		 * If our window has contracted to 0 in the FIN case
		 * (which can only occur if we have NOT been called to
		 * retransmit as per code a few paragraphs up) then we
		 * want to shift the retransmit timer over to the
		 * persist timer.
		 *
		 * However, if we are in the TCPS_SYN_RECEIVED state
		 * (the SYN case) we will be in a simultanious connect and
		 * the window may be zero degeneratively.  In this case we
		 * do not want to shift to the persist timer after the SYN
		 * or the SYN+ACK transmission.
		 */
		len = 0;
		if (sendwin == 0 && tp->t_state != TCPS_SYN_RECEIVED) {
			tcp_callout_stop(tp, tp->tt_rexmt);
			tp->t_rxtshift = 0;
			tp->snd_nxt = tp->snd_una;
			if (!tcp_callout_active(tp, tp->tt_persist))
				tcp_setpersist(tp);
		}
	}

	KASSERT(len >= 0, ("%s: len < 0", __func__));
	/*
	 * Automatic sizing of send socket buffer.  Often the send buffer
	 * size is not optimally adjusted to the actual network conditions
	 * at hand (delay bandwidth product).  Setting the buffer size too
	 * small limits throughput on links with high bandwidth and high
	 * delay (eg. trans-continental/oceanic links).  Setting the
	 * buffer size too big consumes too much real kernel memory,
	 * especially with many connections on busy servers.
	 *
	 * The criteria to step up the send buffer one notch are:
	 *  1. receive window of remote host is larger than send buffer
	 *     (with a fudge factor of 5/4th);
	 *  2. hiwat has not significantly exceeded bwnd (inflight)
	 *     (bwnd is a maximal value if inflight is disabled).
	 *  3. send buffer is filled to 7/8th with data (so we actually
	 *     have data to make use of it);
	 *  4. hiwat has not hit maximal automatic size;
	 *  5. our send window (slow start and cogestion controlled) is
	 *     larger than sent but unacknowledged data in send buffer.
	 *
	 * The remote host receive window scaling factor may limit the
	 * growing of the send buffer before it reaches its allowed
	 * maximum.
	 *
	 * It scales directly with slow start or congestion window
	 * and does at most one step per received ACK.  This fast
	 * scaling has the drawback of growing the send buffer beyond
	 * what is strictly necessary to make full use of a given
	 * delay*bandwith product.  However testing has shown this not
	 * to be much of an problem.  At worst we are trading wasting
	 * of available bandwith (the non-use of it) for wasting some
	 * socket buffer memory.
	 *
	 * The criteria for shrinking the buffer is based solely on
	 * the inflight code (snd_bwnd).  If inflight is disabled,
	 * the buffer will not be shrinked.  Note that snd_bwnd already
	 * has a fudge factor.  Our test adds a little hysteresis.
	 */
	if (tcp_do_autosndbuf && (so->so_snd.ssb_flags & SSB_AUTOSIZE)) {
		const int asbinc = tcp_autosndbuf_inc;
		const int hiwat = so->so_snd.ssb_hiwat;
		const int lowat = so->so_snd.ssb_lowat;
		u_long newsize;

		if ((tp->snd_wnd / 4 * 5) >= hiwat &&
		    so->so_snd.ssb_cc >= (hiwat / 8 * 7) &&
		    hiwat < tp->snd_bwnd + hiwat / 10 &&
		    hiwat + asbinc < tcp_autosndbuf_max &&
		    hiwat < (TCP_MAXWIN << tp->snd_scale) &&
		    sendwin >= (so->so_snd.ssb_cc -
				(tp->snd_nxt - tp->snd_una))) {
			newsize = ulmin(hiwat + asbinc, tcp_autosndbuf_max);
			if (!ssb_reserve(&so->so_snd, newsize, so, NULL))
				atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
#if 0
			if (newsize >= (TCP_MAXWIN << tp->snd_scale))
				atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE);
#endif
		} else if ((long)tp->snd_bwnd <
			   (long)(hiwat * 3 / 4 - lowat - asbinc) &&
			   hiwat > tp->t_maxseg * 2 + asbinc &&
			   hiwat + asbinc >= tcp_autosndbuf_min &&
			   tcp_do_autosndbuf == 1) {
			newsize = ulmax(hiwat - asbinc, tp->t_maxseg * 2);
			ssb_reserve(&so->so_snd, newsize, so, NULL);
		}
	}

	/*
	 * Don't use TSO, if:
	 * - Congestion window needs validation
	 * - There are SACK blocks to report
	 * - RST or SYN flags is set
	 * - URG will be set
	 *
	 * XXX
	 * Checking for SYN|RST looks overkill, just to be safe than sorry
	 */
	use_tso = can_tso;
	if (report_sack || idle_cwv || (flags & (TH_RST | TH_SYN)))
		use_tso = FALSE;
	if (use_tso) {
		tcp_seq ugr_nxt = tp->snd_nxt;

		if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) &&
		    tp->snd_nxt == tp->snd_max)
			--ugr_nxt;

		if (SEQ_GT(tp->snd_up, ugr_nxt))
			use_tso = FALSE;
	}

	if (use_tso) {
		/*
		 * Find out segment size and header length for TSO
		 */
		error = tcp_tso_getsize(tp, &segsz, &tso_hlen);
		if (error)
			use_tso = FALSE;
	}
	if (!use_tso) {
		segsz = tp->t_maxseg;
		tso_hlen = 0; /* not used */
	}

	/*
	 * Truncate to the maximum segment length if not TSO, and ensure that
	 * FIN is removed if the length no longer contains the last data byte.
	 */
	if (len > segsz) {
		if (!use_tso) {
			len = segsz;
			++segcnt;
		} else {
			int nsegs;

			if (__predict_false(tso_lenmax < segsz))
				tso_lenmax = segsz << 1;

			/*
			 * Truncate TSO transfers to (IP_MAXPACKET - iphlen -
			 * thoff), and make sure that we send equal size
			 * transfers down the stack (rather than big-small-
			 * big-small-...).
			 */
			len = min(len, tso_lenmax);
			nsegs = min(len, (IP_MAXPACKET - tso_hlen)) / segsz;
			KKASSERT(nsegs > 0);

			len = nsegs * segsz;

			if (len <= segsz) {
				use_tso = FALSE;
				++segcnt;
			} else {
				segcnt += nsegs;
			}
		}
		sendalot = TRUE;
	} else {
		use_tso = FALSE;
		if (len > 0)
			++segcnt;
	}
	if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc))
		flags &= ~TH_FIN;

	recvwin = ssb_space(&so->so_rcv);

	/*
	 * Sender silly window avoidance.   We transmit under the following
	 * conditions when len is non-zero:
	 *
	 *	- We have a full segment
	 *	- This is the last buffer in a write()/send() and we are
	 *	  either idle or running NODELAY
	 *	- we've timed out (e.g. persist timer)
	 *	- we have more then 1/2 the maximum send window's worth of
	 *	  data (receiver may be limiting the window size)
	 *	- we need to retransmit
	 */
	if (len) {
		if (len >= segsz)
			goto send;
		/*
		 * NOTE! on localhost connections an 'ack' from the remote
		 * end may occur synchronously with the output and cause
		 * us to flush a buffer queued with moretocome.  XXX
		 *
		 * note: the len + off check is almost certainly unnecessary.
		 */
		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
		    (idle || (tp->t_flags & TF_NODELAY)) &&
		    len + off >= so->so_snd.ssb_cc &&
		    !(tp->t_flags & TF_NOPUSH)) {
			goto send;
		}
		if (tp->t_flags & TF_FORCE)		/* typ. timeout case */
			goto send;
		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
			goto send;
		if (SEQ_LT(tp->snd_nxt, tp->snd_max))	/* retransmit case */
			goto send;
		if (tp->t_flags & TF_XMITNOW)
			goto send;
	}

	/*
	 * Compare available window to amount of window
	 * known to peer (as advertised window less
	 * next expected input).  If the difference is at least two
	 * max size segments, or at least 50% of the maximum possible
	 * window, then want to send a window update to peer.
	 */
	if (recvwin > 0) {
		/*
		 * "adv" is the amount we can increase the window,
		 * taking into account that we are limited by
		 * TCP_MAXWIN << tp->rcv_scale.
		 */
		long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) -
			(tp->rcv_adv - tp->rcv_nxt);
		long hiwat;

		/*
		 * This ack case typically occurs when the user has drained
		 * the TCP socket buffer sufficiently to warrent an ack
		 * containing a 'pure window update'... that is, an ack that
		 * ONLY updates the tcp window.
		 *
		 * It is unclear why we would need to do a pure window update
		 * past 2 segments if we are going to do one at 1/2 the high
		 * water mark anyway, especially since under normal conditions
		 * the user program will drain the socket buffer quickly.
		 * The 2-segment pure window update will often add a large
		 * number of extra, unnecessary acks to the stream.
		 *
		 * avoid_pure_win_update now defaults to 1.
		 */
		if (avoid_pure_win_update == 0 ||
		    (tp->t_flags & TF_RXRESIZED)) {
			if (adv >= (long) (2 * segsz)) {
				goto send;
			}
		}
		hiwat = (long)(TCP_MAXWIN << tp->rcv_scale);
		if (hiwat > (long)so->so_rcv.ssb_hiwat)
			hiwat = (long)so->so_rcv.ssb_hiwat;
		if (adv >= hiwat / 2)
			goto send;
	}

	/*
	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
	 * is also a catch-all for the retransmit timer timeout case.
	 */
	if (tp->t_flags & TF_ACKNOW)
		goto send;
	if ((flags & TH_RST) ||
	    ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN)))
		goto send;
	if (SEQ_GT(tp->snd_up, tp->snd_una))
		goto send;
	/*
	 * If our state indicates that FIN should be sent
	 * and we have not yet done so, then we need to send.
	 */
	if ((flags & TH_FIN) &&
	    (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una))
		goto send;

	/*
	 * TCP window updates are not reliable, rather a polling protocol
	 * using ``persist'' packets is used to insure receipt of window
	 * updates.  The three ``states'' for the output side are:
	 *	idle			not doing retransmits or persists
	 *	persisting		to move a small or zero window
	 *	(re)transmitting	and thereby not persisting
	 *
	 * tcp_callout_active(tp, tp->tt_persist)
	 *	is true when we are in persist state.
	 * The TF_FORCE flag in tp->t_flags
	 *	is set when we are called to send a persist packet.
	 * tcp_callout_active(tp, tp->tt_rexmt)
	 *	is set when we are retransmitting
	 * The output side is idle when both timers are zero.
	 *
	 * If send window is too small, there is data to transmit, and no
	 * retransmit or persist is pending, then go to persist state.
	 *
	 * If nothing happens soon, send when timer expires:
	 * if window is nonzero, transmit what we can, otherwise force out
	 * a byte.
	 *
	 * Don't try to set the persist state if we are in TCPS_SYN_RECEIVED
	 * with data pending.  This situation can occur during a
	 * simultanious connect.
	 */
	if (so->so_snd.ssb_cc > 0 &&
	    tp->t_state != TCPS_SYN_RECEIVED &&
	    !tcp_callout_active(tp, tp->tt_rexmt) &&
	    !tcp_callout_active(tp, tp->tt_persist)) {
		tp->t_rxtshift = 0;
		tcp_setpersist(tp);
	}

	/*
	 * No reason to send a segment, just return.
	 */
	tp->t_flags &= ~TF_XMITNOW;
	return (0);

send:
	if (need_sched && len > 0) {
		tcp_output_sched(tp);
		return 0;
	}

	/*
	 * Before ESTABLISHED, force sending of initial options
	 * unless TCP set not to do any options.
	 * NOTE: we assume that the IP/TCP header plus TCP options
	 * always fit in a single mbuf, leaving room for a maximum
	 * link header, i.e.
	 *	max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES
	 */
	optlen = 0;
	if (isipv6)
		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
	else
		hdrlen = sizeof(struct tcpiphdr);
	if (flags & TH_SYN) {
		tp->snd_nxt = tp->iss;
		if (!(tp->t_flags & TF_NOOPT)) {
			u_short mss;

			opt[0] = TCPOPT_MAXSEG;
			opt[1] = TCPOLEN_MAXSEG;
			mss = htons((u_short) tcp_mssopt(tp));
			memcpy(opt + 2, &mss, sizeof mss);
			optlen = TCPOLEN_MAXSEG;

			if ((tp->t_flags & TF_REQ_SCALE) &&
			    (!(flags & TH_ACK) ||
			     (tp->t_flags & TF_RCVD_SCALE))) {
				*((u_int32_t *)(opt + optlen)) = htonl(
					TCPOPT_NOP << 24 |
					TCPOPT_WINDOW << 16 |
					TCPOLEN_WINDOW << 8 |
					tp->request_r_scale);
				optlen += 4;
			}

			if ((tcp_do_sack && !(flags & TH_ACK)) ||
			    tp->t_flags & TF_SACK_PERMITTED) {
				uint32_t *lp = (uint32_t *)(opt + optlen);

				*lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED);
				optlen += TCPOLEN_SACK_PERMITTED_ALIGNED;
			}
		}
	}

	/*
	 * Send a timestamp and echo-reply if this is a SYN and our side
	 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
	 * and our peer have sent timestamps in our SYN's.
	 */
	if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
	    !(flags & TH_RST) &&
	    (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) {
		u_int32_t *lp = (u_int32_t *)(opt + optlen);

		/* Form timestamp option as shown in appendix A of RFC 1323. */
		*lp++ = htonl(TCPOPT_TSTAMP_HDR);
		*lp++ = htonl(ticks);
		*lp   = htonl(tp->ts_recent);
		optlen += TCPOLEN_TSTAMP_APPA;
	}

	/* Set receive buffer autosizing timestamp. */
	if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE))
		tp->rfbuf_ts = ticks;

	/*
	 * If this is a SACK connection and we have a block to report,
	 * fill in the SACK blocks in the TCP options.
	 */
	if (report_sack)
		tcp_sack_fill_report(tp, opt, &optlen);

#ifdef TCP_SIGNATURE
	if (tp->t_flags & TF_SIGNATURE) {
		int i;
		u_char *bp;
		/*
		 * Initialize TCP-MD5 option (RFC2385)
		 */
		bp = (u_char *)opt + optlen;
		*bp++ = TCPOPT_SIGNATURE;
		*bp++ = TCPOLEN_SIGNATURE;
		sigoff = optlen + 2;
		for (i = 0; i < TCP_SIGLEN; i++)
			*bp++ = 0;
		optlen += TCPOLEN_SIGNATURE;
		/*
		 * Terminate options list and maintain 32-bit alignment.
		 */
		*bp++ = TCPOPT_NOP;
		*bp++ = TCPOPT_EOL;
		optlen += 2;
	}
#endif /* TCP_SIGNATURE */
	KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options"));
	hdrlen += optlen;

	if (isipv6) {
		ipoptlen = ip6_optlen(inp);
	} else {
		if (inp->inp_options) {
			ipoptlen = inp->inp_options->m_len -
			    offsetof(struct ipoption, ipopt_list);
		} else {
Пример #18
0
static struct resource *
ebus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    u_long start, u_long end, u_long count, u_int flags)
{
	struct ebus_softc *sc;
	struct resource_list *rl;
	struct resource_list_entry *rle = NULL;
	struct resource *res;
	struct ebus_rinfo *eri;
	struct ebus_nexus_ranges *enr;
	uint64_t cend, cstart, offset;
	int i, isdefault, passthrough, ridx;

	isdefault = (start == 0UL && end == ~0UL);
	passthrough = (device_get_parent(child) != bus);
	sc = device_get_softc(bus);
	rl = BUS_GET_RESOURCE_LIST(bus, child);
	switch (type) {
	case SYS_RES_MEMORY:
		KASSERT(!(isdefault && passthrough),
		    ("%s: passthrough of default allocation", __func__));
		if (!passthrough) {
			rle = resource_list_find(rl, type, *rid);
			if (rle == NULL)
				return (NULL);
			KASSERT(rle->res == NULL,
			    ("%s: resource entry is busy", __func__));
			if (isdefault) {
				start = rle->start;
				count = ulmax(count, rle->count);
				end = ulmax(rle->end, start + count - 1);
			}
		}

		res = NULL;
		if ((sc->sc_flags & EBUS_PCI) != 0) {
			/*
			 * Map EBus ranges to PCI ranges.  This may include
			 * changing the allocation type.
			 */
			(void)ofw_isa_range_map(sc->sc_range, sc->sc_nrange,
			    &start, &end, &ridx);
			eri = &sc->sc_rinfo[ridx];
			res = rman_reserve_resource(&eri->eri_rman, start,
			    end, count, flags & ~RF_ACTIVE, child);
			if (res == NULL)
				return (NULL);
			rman_set_rid(res, *rid);
			if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(
			    child, type, *rid, res) != 0) {
				rman_release_resource(res);
				return (NULL);
			}
		} else {
			/* Map EBus ranges to nexus ranges. */
			for (i = 0; i < sc->sc_nrange; i++) {
				enr = &((struct ebus_nexus_ranges *)
				    sc->sc_range)[i];
				cstart = (((uint64_t)enr->child_hi) << 32) |
				    enr->child_lo;
				cend = cstart + enr->size - 1;
				if (start >= cstart && end <= cend) {
					offset =
					    (((uint64_t)enr->phys_hi) << 32) |
					    enr->phys_lo;
					start += offset - cstart;
					end += offset - cstart;
					res = bus_generic_alloc_resource(bus,
					    child, type, rid, start, end,
					    count, flags);
					break;
				}
			}
		}
		if (!passthrough)
			rle->res = res;
		return (res);
	case SYS_RES_IRQ:
		return (resource_list_alloc(rl, bus, child, type, rid, start,
		    end, count, flags));
	}
	return (NULL);
}
Пример #19
0
/*
 * Allocate a device specific dma_tag.
 */
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
    void *lockfuncarg, bus_dma_tag_t *dmat)
{
	bus_dma_tag_t newtag;

	/* Return a NULL tag on failure */
	*dmat = NULL;

	/* Enforce the usage of BUS_GET_DMA_TAG(). */
	if (parent == NULL)
		panic("%s: parent DMA tag NULL", __func__);

	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
	if (newtag == NULL)
		return (ENOMEM);

	/*
	 * The method table pointer and the cookie need to be taken over from
	 * the parent.
	 */
	newtag->dt_cookie = parent->dt_cookie;
	newtag->dt_mt = parent->dt_mt;

	newtag->dt_parent = parent;
	newtag->dt_alignment = alignment;
	newtag->dt_boundary = boundary;
	newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
	newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
	    (PAGE_SIZE - 1);
	newtag->dt_filter = filter;
	newtag->dt_filterarg = filterarg;
	newtag->dt_maxsize = maxsize;
	newtag->dt_nsegments = nsegments;
	newtag->dt_maxsegsz = maxsegsz;
	newtag->dt_flags = flags;
	newtag->dt_ref_count = 1; /* Count ourselves */
	newtag->dt_map_count = 0;

	if (lockfunc != NULL) {
		newtag->dt_lockfunc = lockfunc;
		newtag->dt_lockfuncarg = lockfuncarg;
	} else {
		newtag->dt_lockfunc = dflt_lock;
		newtag->dt_lockfuncarg = NULL;
	}

	newtag->dt_segments = NULL;

	/* Take into account any restrictions imposed by our parent tag. */
	newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, newtag->dt_lowaddr);
	newtag->dt_highaddr = ulmax(parent->dt_highaddr, newtag->dt_highaddr);
	if (newtag->dt_boundary == 0)
		newtag->dt_boundary = parent->dt_boundary;
	else if (parent->dt_boundary != 0)
		newtag->dt_boundary = ulmin(parent->dt_boundary,
		    newtag->dt_boundary);
	atomic_add_int(&parent->dt_ref_count, 1);

	if (newtag->dt_boundary > 0)
		newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz,
		    newtag->dt_boundary);

	*dmat = newtag;
	return (0);
}
Пример #20
0
uintptr_t
powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
    uint32_t mdp_cookie)
{
	struct		pcpu *pc;
	struct cpuref	bsp;
	vm_offset_t	startkernel, endkernel;
	char		*env;
        bool		ofw_bootargs = false;
#ifdef DDB
	vm_offset_t ksym_start;
	vm_offset_t ksym_end;
#endif

	/* First guess at start/end kernel positions */
	startkernel = __startkernel;
	endkernel = __endkernel;

	/*
	 * If the metadata pointer cookie is not set to the magic value,
	 * the number in mdp should be treated as nonsense.
	 */
	if (mdp_cookie != 0xfb5d104d)
		mdp = NULL;

#if !defined(BOOKE)
	/*
	 * On BOOKE the BSS is already cleared and some variables
	 * initialized.  Do not wipe them out.
	 */
	bzero(__sbss_start, __sbss_end - __sbss_start);
	bzero(__bss_start, _end - __bss_start);
#endif

	cpu_feature_setup();

#ifdef AIM
	aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
#endif

	/*
	 * Parse metadata if present and fetch parameters.  Must be done
	 * before console is inited so cninit gets the right value of
	 * boothowto.
	 */
	if (mdp != NULL) {
		void *kmdp = NULL;
		char *envp = NULL;
		uintptr_t md_offset = 0;
		vm_paddr_t kernelendphys;

#ifdef AIM
		if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
			md_offset = DMAP_BASE_ADDRESS;
#else /* BOOKE */
		md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
#endif

		preload_metadata = mdp;
		if (md_offset > 0) {
			preload_metadata += md_offset;
			preload_bootstrap_relocate(md_offset);
		}
		kmdp = preload_search_by_type("elf kernel");
		if (kmdp != NULL) {
			boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
			envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
			if (envp != NULL)
				envp += md_offset;
			init_static_kenv(envp, 0);
			if (fdt == 0) {
				fdt = MD_FETCH(kmdp, MODINFOMD_DTBP, uintptr_t);
				if (fdt != 0)
					fdt += md_offset;
			}
			kernelendphys = MD_FETCH(kmdp, MODINFOMD_KERNEND,
			    vm_offset_t);
			if (kernelendphys != 0)
				kernelendphys += md_offset;
			endkernel = ulmax(endkernel, kernelendphys);
#ifdef DDB
			ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
			ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
			db_fetch_ksymtab(ksym_start, ksym_end);
#endif
		}
Пример #21
0
static struct resource *
ps3bus_alloc_resource(device_t bus, device_t child, int type, int *rid,
    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
	struct	ps3bus_devinfo *dinfo;
	struct	ps3bus_softc *sc;
	int	needactivate;
        struct	resource *rv;
        struct	rman *rm;
        rman_res_t	adjstart, adjend, adjcount;
        struct	resource_list_entry *rle;

	sc = device_get_softc(bus);
	dinfo = device_get_ivars(child);
	needactivate = flags & RF_ACTIVE;
	flags &= ~RF_ACTIVE;

	switch (type) {
	case SYS_RES_MEMORY:
		rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
		    *rid);
		if (rle == NULL) {
			device_printf(bus, "no rle for %s memory %d\n",
				      device_get_nameunit(child), *rid);
			return (NULL);
		}

		if (start < rle->start)
			adjstart = rle->start;
		else if (start > rle->end)
			adjstart = rle->end;
		else
			adjstart = start;

		if (end < rle->start)
			adjend = rle->start;
		else if (end > rle->end)
			adjend = rle->end;
		else
			adjend = end;

		adjcount = adjend - adjstart;

		rm = &sc->sc_mem_rman;
		break;
	case SYS_RES_IRQ:
		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
		    *rid);
		rm = &sc->sc_intr_rman;
		adjstart = rle->start;
		adjcount = ulmax(count, rle->count);
		adjend = ulmax(rle->end, rle->start + adjcount - 1);
		break;
	default:
		device_printf(bus, "unknown resource request from %s\n",
			      device_get_nameunit(child));
		return (NULL);
        }

	rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags,
	    child);
	if (rv == NULL) {
		device_printf(bus,
			"failed to reserve resource %#lx - %#lx (%#lx)"
			" for %s\n", adjstart, adjend, adjcount,
			device_get_nameunit(child));
		return (NULL);
	}

	rman_set_rid(rv, *rid);

	if (needactivate) {
		if (bus_activate_resource(child, type, *rid, rv) != 0) {
			device_printf(bus,
				"failed to activate resource for %s\n",
				device_get_nameunit(child));
				rman_release_resource(rv);
			return (NULL);
		}
	}

	return (rv);
}
Пример #22
0
/**
 * Helper function for implementing BHND_BUS_ALLOC_RESOURCE().
 * 
 * This simple implementation of BHND_BUS_ALLOC_RESOURCE() determines
 * any default values via BUS_GET_RESOURCE_LIST(), and calls
 * BHND_BUS_ALLOC_RESOURCE() method of the parent of @p dev.
 * 
 * If no parent device is available, the request is instead delegated to
 * BUS_ALLOC_RESOURCE().
 */
struct bhnd_resource *
bhnd_generic_alloc_bhnd_resource(device_t dev, device_t child, int type,
	int *rid, rman_res_t start, rman_res_t end, rman_res_t count,
	u_int flags)
{
	struct bhnd_resource		*r;
	struct resource_list		*rl;
	struct resource_list_entry	*rle;
	bool				 isdefault;
	bool				 passthrough;

	passthrough = (device_get_parent(child) != dev);
	isdefault = RMAN_IS_DEFAULT_RANGE(start, end);

	/* the default RID must always be the first device port/region. */
	if (!passthrough && *rid == 0) {
		int rid0 = bhnd_get_port_rid(child, BHND_PORT_DEVICE, 0, 0);
		KASSERT(*rid == rid0,
		    ("rid 0 does not map to the first device port (%d)", rid0));
	}

	/* Determine locally-known defaults before delegating the request. */
	if (!passthrough && isdefault) {
		/* fetch resource list from child's bus */
		rl = BUS_GET_RESOURCE_LIST(dev, child);
		if (rl == NULL)
			return (NULL); /* no resource list */

		/* look for matching type/rid pair */
		rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
		    type, *rid);
		if (rle == NULL)
			return (NULL);

		/* set default values */
		start = rle->start;
		end = rle->end;
		count = ulmax(count, rle->count);
	}

	/* Try to delegate to our parent. */
	if (device_get_parent(dev) != NULL) {
		return (BHND_BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
		    type, rid, start, end, count, flags));
	}

	/* If this is the bus root, use a real bus-allocated resource */
	r = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT);
	if (r == NULL)
		return NULL;

	/* Allocate the bus resource, marking it as 'direct' (not requiring
	 * any bus window remapping to perform I/O) */
	r->direct = true;
	r->res = BUS_ALLOC_RESOURCE(dev, child, type, rid, start, end,
	    count, flags);

	if (r->res == NULL) {
		free(r, M_BHND);
		return NULL;
	}

	return (r);
}
Пример #23
0
static struct resource *
sbus_alloc_resource(device_t bus, device_t child, int type, int *rid,
                    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
    struct sbus_softc *sc;
    struct rman *rm;
    struct resource *rv;
    struct resource_list *rl;
    struct resource_list_entry *rle;
    device_t schild;
    bus_addr_t toffs;
    bus_size_t tend;
    int i, slot;
    int isdefault, passthrough;

    isdefault = RMAN_IS_DEFAULT_RANGE(start, end);
    passthrough = (device_get_parent(child) != bus);
    rle = NULL;
    sc = device_get_softc(bus);
    rl = BUS_GET_RESOURCE_LIST(bus, child);
    switch (type) {
    case SYS_RES_IRQ:
        return (resource_list_alloc(rl, bus, child, type, rid, start,
                                    end, count, flags));
    case SYS_RES_MEMORY:
        if (!passthrough) {
            rle = resource_list_find(rl, type, *rid);
            if (rle == NULL)
                return (NULL);
            if (rle->res != NULL)
                panic("%s: resource entry is busy", __func__);
            if (isdefault) {
                start = rle->start;
                count = ulmax(count, rle->count);
                end = ulmax(rle->end, start + count - 1);
            }
        }
        rm = NULL;
        schild = child;
        while (device_get_parent(schild) != bus)
            schild = device_get_parent(schild);
        slot = sbus_get_slot(schild);
        for (i = 0; i < sc->sc_nrange; i++) {
            if (sc->sc_rd[i].rd_slot != slot ||
                    start < sc->sc_rd[i].rd_coffset ||
                    start > sc->sc_rd[i].rd_cend)
                continue;
            /* Disallow cross-range allocations. */
            if (end > sc->sc_rd[i].rd_cend)
                return (NULL);
            /* We've found the connection to the parent bus */
            toffs = start - sc->sc_rd[i].rd_coffset;
            tend = end - sc->sc_rd[i].rd_coffset;
            rm = &sc->sc_rd[i].rd_rman;
            break;
        }
        if (rm == NULL)
            return (NULL);

        rv = rman_reserve_resource(rm, toffs, tend, count, flags &
                                   ~RF_ACTIVE, child);
        if (rv == NULL)
            return (NULL);
        rman_set_rid(rv, *rid);

        if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child,
                type, *rid, rv)) {
            rman_release_resource(rv);
            return (NULL);
        }
        if (!passthrough)
            rle->res = rv;
        return (rv);
    default:
        return (NULL);
    }
}
Пример #24
0
static int
qman_portals_fdt_attach(device_t dev)
{
	struct dpaa_portals_softc *sc;
	struct resource_list_entry *rle;
	phandle_t node, child, cpu_node;
	vm_paddr_t portal_pa;
	vm_size_t portal_size;
	uint32_t addr, size;
	ihandle_t cpu;
	int cpu_num, cpus, intr_rid;
	struct dpaa_portals_devinfo di;
	struct ofw_bus_devinfo ofw_di = {};

	cpus = 0;
	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	node = ofw_bus_get_node(dev);
	get_addr_props(node, &addr, &size);

	/* Find portals tied to CPUs */
	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
		if (!fdt_is_compatible(child, "fsl,qman-portal")) {
			continue;
		}
		/* Checkout related cpu */
		if (OF_getprop(child, "cpu-handle", (void *)&cpu,
		    sizeof(cpu)) <= 0) {
			continue;
		}
		/* Acquire cpu number */
		cpu_node = OF_instance_to_package(cpu);
		if (OF_getencprop(cpu_node, "reg", &cpu_num, sizeof(cpu_num)) <= 0) {
			device_printf(dev, "Could not retrieve CPU number.\n");
			return (ENXIO);
		}

		cpus++;

		if (cpus > MAXCPU)
			break;

		if (ofw_bus_gen_setup_devinfo(&ofw_di, child) != 0) {
			device_printf(dev, "could not set up devinfo\n");
			continue;
		}

		resource_list_init(&di.di_res);
		if (ofw_bus_reg_to_rl(dev, child, addr, size, &di.di_res)) {
			device_printf(dev, "%s: could not process 'reg' "
			    "property\n", ofw_di.obd_name);
			ofw_bus_gen_destroy_devinfo(&ofw_di);
			continue;
		}
		if (ofw_bus_intr_to_rl(dev, child, &di.di_res, &intr_rid)) {
			device_printf(dev, "%s: could not process "
			    "'interrupts' property\n", ofw_di.obd_name);
			resource_list_free(&di.di_res);
			ofw_bus_gen_destroy_devinfo(&ofw_di);
			continue;
		}
		di.di_intr_rid = intr_rid;
		
		ofw_reg_to_paddr(child, 0, &portal_pa, &portal_size, NULL);
		rle = resource_list_find(&di.di_res, SYS_RES_MEMORY, 0);

		if (sc->sc_dp_pa == 0)
			sc->sc_dp_pa = portal_pa - rle->start;

		portal_size = rle->end + 1;
		rle = resource_list_find(&di.di_res, SYS_RES_MEMORY, 1);
		portal_size = ulmax(rle->end + 1, portal_size);
		sc->sc_dp_size = ulmax(sc->sc_dp_size, portal_size);

		if (dpaa_portal_alloc_res(dev, &di, cpu_num))
			goto err;
	}

	ofw_bus_gen_destroy_devinfo(&ofw_di);

	return (qman_portals_attach(dev));
err:
	resource_list_free(&di.di_res);
	ofw_bus_gen_destroy_devinfo(&ofw_di);
	qman_portals_detach(dev);
	return (ENXIO);
}
Пример #25
0
Файл: isa.c Проект: MarginC/kame
struct resource *
isa_alloc_resource(device_t bus, device_t child, int type, int *rid,
		   u_long start, u_long end, u_long count, u_int flags)
{
	/*
	 * Consider adding a resource definition. We allow rid 0-1 for
	 * irq and drq, 0-3 for memory and 0-7 for ports which is
	 * sufficient for isapnp.
	 */
	int passthrough = (device_get_parent(child) != bus);
	int isdefault = (start == 0UL && end == ~0UL);
	struct isa_device* idev = DEVTOISA(child);
	struct resource_list *rl = &idev->id_resources;
	struct resource_list_entry *rle;
	u_long base, limit;

	if (!passthrough && !isdefault) {
		rle = resource_list_find(rl, type, *rid);
		if (!rle) {
			if (*rid < 0)
				return 0;
			switch (type) {
			case SYS_RES_IRQ:
				if (*rid >= ISA_NIRQ)
					return 0;
				break;
			case SYS_RES_DRQ:
				if (*rid >= ISA_NDRQ)
					return 0;
				break;
			case SYS_RES_MEMORY:
				if (*rid >= ISA_NMEM)
					return 0;
				break;
			case SYS_RES_IOPORT:
				if (*rid >= ISA_NPORT)
					return 0;
				break;
			default:
				return 0;
			}
			resource_list_add(rl, type, *rid, start, end, count);
		}
	}

	/*
	 * Add the base, change default allocations to be between base and
	 * limit, and reject allocations if a resource type is not enabled.
	 */
	base = limit = 0;
	switch(type) {
	case SYS_RES_MEMORY:
		if (isa_mem_bt == NULL)
			return (NULL);
		base = isa_mem_base;
		limit = base + isa_mem_limit;
		break;
	case SYS_RES_IOPORT:
		if (isa_io_bt == NULL)
			return (NULL);
		base = isa_io_base;
		limit = base + isa_io_limit;
		break;
	case SYS_RES_IRQ:
		if (isdefault && passthrough)
			panic("isa_alloc_resource: cannot pass through default "
			    "irq allocation");
		if (!isdefault) {
			start = end = isa_route_intr_res(bus, start, end);
			if (start == 255)
				return (NULL);
		}
		break;
	default:
		panic("isa_alloc_resource: unsupported resource type %d", type);
	}
	if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
		start = ulmin(start + base, limit);
		end = ulmin(end + base, limit);
	}
			
	/*
	 * This inlines a modified resource_list_alloc(); this is needed
	 * because the resources need to have offsets added to them, which
	 * cannot be done beforehand without patching the resource list entries
	 * (which is ugly).
	 */
	if (passthrough) {
		return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
		    type, rid, start, end, count, flags));
	}

	rle = resource_list_find(rl, type, *rid);
	if (rle == NULL)
		return (NULL);		/* no resource of that type/rid */

	if (rle->res != NULL)
		panic("isa_alloc_resource: resource entry is busy");

	if (isdefault) {
		start = rle->start;
		count = ulmax(count, rle->count);
		end = ulmax(rle->end, start + count - 1);
		switch (type) {
		case SYS_RES_MEMORY:
		case SYS_RES_IOPORT:
			start += base;
			end += base;
			if (!INRANGE(start, base, limit) ||
			    !INRANGE(end, base, limit))
				return (NULL);
			break;
		case SYS_RES_IRQ:
			start = end = isa_route_intr_res(bus, start, end);
			if (start == 255)
				return (NULL);
			break;
		}
	}

	rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
	    type, rid, start, end, count, flags);

	/*
	 * Record the new range.
	 */
	if (rle->res != NULL) {
		rle->start = rman_get_start(rle->res) - base;
		rle->end = rman_get_end(rle->res) - base;
		rle->count = count;
	}

	return (rle->res);
}