예제 #1
0
/*
 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically
 * once per polling systimer tick.
 */
static void
stpoll_handler(netmsg_t msg)
{
	struct stpoll_ctx *st_ctx = &stpoll_context;
	struct thread *td = curthread;
	int i;

	KKASSERT(&td->td_msgport == netisr_cpuport(0));

	crit_enter_quick(td);

	/* Reply ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (st_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	for (i = 0; i < st_ctx->poll_handlers; ++i) {
		const struct stpoll_rec *rec = &st_ctx->pr[i];
		struct ifnet *ifp = rec->ifp;

		if (!lwkt_serialize_try(rec->serializer))
			continue;

		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
		    (IFF_RUNNING | IFF_NPOLLING))
			rec->status_func(ifp);

		lwkt_serialize_exit(rec->serializer);
	}

	crit_exit_quick(td);
}
예제 #2
0
static int
iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx)
{
	int i, error;

	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	for (i = 0; i < io_ctx->poll_handlers; ++i) {
		if (io_ctx->pr[i].ifp == ifp) /* Found it */
			break;
	}
	if (i == io_ctx->poll_handlers) {
		error = ENOENT;
	} else {
		io_ctx->poll_handlers--;
		if (i < io_ctx->poll_handlers) {
			/* Last entry replaces this one. */
			io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers];
		}

		if (io_ctx->poll_handlers == 0)
			iopoll_reset_state(io_ctx);
		error = 0;
	}
	return error;
}
예제 #3
0
int
ifpoll_deregister(struct ifnet *ifp)
{
	struct netmsg_base nmsg;
	int error;

	if (ifp->if_npoll == NULL)
		return EOPNOTSUPP;

	ifnet_serialize_all(ifp);

	if ((ifp->if_flags & IFF_NPOLLING) == 0) {
		ifnet_deserialize_all(ifp);
		return EINVAL;
	}
	ifp->if_flags &= ~IFF_NPOLLING;

	ifnet_deserialize_all(ifp);

	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
		    0, ifpoll_deregister_handler);
	nmsg.lmsg.u.ms_resultp = ifp;

	error = lwkt_domsg(netisr_cpuport(0), &nmsg.lmsg, 0);
	if (!error) {
		ifnet_serialize_all(ifp);
		ifp->if_npoll(ifp, NULL);
		ifnet_deserialize_all(ifp);
	}
	return error;
}
예제 #4
0
static int
sysctl_burstmax(SYSCTL_HANDLER_ARGS)
{
	struct iopoll_ctx *io_ctx = arg1;
	struct iopoll_sysctl_netmsg msg;
	uint32_t burst_max;
	int error;

	burst_max = io_ctx->poll_burst_max;
	error = sysctl_handle_int(oidp, &burst_max, 0, req);
	if (error || req->newptr == NULL)
		return error;
	if (burst_max < MIN_IOPOLL_BURST_MAX)
		burst_max = MIN_IOPOLL_BURST_MAX;
	else if (burst_max > MAX_IOPOLL_BURST_MAX)
		burst_max = MAX_IOPOLL_BURST_MAX;

	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
		    0, sysctl_burstmax_handler);
	msg.base.lmsg.u.ms_result = burst_max;
	msg.ctx = io_ctx;

	return lwkt_domsg(netisr_cpuport(io_ctx->poll_cpuid),
	    &msg.base.lmsg, 0);
}
예제 #5
0
/*
 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when
 * appropriate, typically once per polling systimer tick.
 *
 * Note that the message is replied immediately in order to allow a new
 * ISR to be scheduled in the handler.
 */
static void
rxpoll_handler(netmsg_t msg)
{
	struct iopoll_ctx *io_ctx;
	struct thread *td = curthread;
	int i, cycles;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Reply ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 3;
	if (io_ctx->residual_burst == 0) {
		/* First call in this tick */
		ifpoll_time_get(&io_ctx->poll_start_t);
		io_ctx->residual_burst = io_ctx->poll_burst;
	}
	cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ?
		 io_ctx->residual_burst : io_ctx->poll_each_burst;
	io_ctx->residual_burst -= cycles;

	for (i = 0; i < io_ctx->poll_handlers; i++) {
		const struct iopoll_rec *rec = &io_ctx->pr[i];
		struct ifnet *ifp = rec->ifp;

		if (!lwkt_serialize_try(rec->serializer))
			continue;

		if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
		    (IFF_RUNNING | IFF_NPOLLING))
			rec->poll_func(ifp, rec->arg, cycles);

		lwkt_serialize_exit(rec->serializer);
	}

	/*
	 * Do a quick exit/enter to catch any higher-priority
	 * interrupt sources.
	 */
	crit_exit_quick(td);
	crit_enter_quick(td);

	sched_iopollmore(io_ctx);
	io_ctx->phase = 4;

	crit_exit_quick(td);
}
예제 #6
0
static void
tcp6_usr_listen(netmsg_t msg)
{
	struct socket *so = msg->listen.base.nm_so;
	struct thread *td = msg->listen.nm_td;
	int error = 0;
	struct inpcb *inp;
	struct tcpcb *tp;
	struct netmsg_inswildcard nm;

	COMMON_START(so, inp, 0);

	if (tp->t_flags & TF_LISTEN)
		goto out;

	if (inp->inp_lport == 0) {
		error = in6_pcbbind(inp, NULL, td);
		if (error)
			goto out;
	}

	tp->t_state = TCPS_LISTEN;
	tp->t_flags |= TF_LISTEN;
	tp->tt_msg = NULL; /* Catch any invalid timer usage */

	if (ncpus2 > 1) {
		/*
		 * Put this inpcb into wildcard hash on other cpus.
		 */
		KKASSERT(so->so_port == netisr_cpuport(0));
		ASSERT_IN_NETISR(0);
		KKASSERT(inp->inp_pcbinfo == &tcbinfo[0]);
		ASSERT_INP_NOTINHASH(inp);

		netmsg_init(&nm.base, NULL, &curthread->td_msgport,
			    MSGF_PRIORITY, in_pcbinswildcardhash_handler);
		nm.nm_inp = inp;
		lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
	}
	in_pcbinswildcardhash(inp);
	COMMON_END(PRU_LISTEN);
}
예제 #7
0
static int
acpi_cst_cx_reprobe_cst(struct acpi_cst_softc *sc)
{
    struct netmsg_acpi_cst msg;

    netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
	acpi_cst_cx_reprobe_cst_handler);
    msg.sc = sc;

    return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0);
}
예제 #8
0
static void
acpi_cst_c3_bm_rld(struct acpi_cst_softc *sc)
{
    struct netmsg_acpi_cst msg;

    netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY,
	acpi_cst_c3_bm_rld_handler);
    msg.sc = sc;

    lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0);
}
예제 #9
0
static void
ifpoll_register_handler(netmsg_t nmsg)
{
	const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp;
	int cpuid = mycpuid, nextcpu;
	int error;

	KKASSERT(cpuid < ncpus2);
	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));

	if (cpuid == 0) {
		error = stpoll_register(info->ifpi_ifp, &info->ifpi_status);
		if (error)
			goto failed;
	}

	error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid],
				&info->ifpi_rx[cpuid]);
	if (error)
		goto failed;

	error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid],
				&info->ifpi_tx[cpuid]);
	if (error)
		goto failed;

	/* Adjust polling frequency, after all registration is done */
	poll_comm_adjust_pollhz(poll_common[cpuid]);

	nextcpu = cpuid + 1;
	if (nextcpu < ncpus2)
		lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->lmsg);
	else
		lwkt_replymsg(&nmsg->lmsg, 0);
	return;
failed:
	lwkt_replymsg(&nmsg->lmsg, error);
}
예제 #10
0
static void
in_pcbinswildcardhash_handler(netmsg_t msg)
{
	struct netmsg_inswildcard *nm = (struct netmsg_inswildcard *)msg;
	int cpu = mycpuid, nextcpu;

	in_pcbinswildcardhash_oncpu(nm->nm_inp, &tcbinfo[cpu]);

	nextcpu = cpu + 1;
	if (nextcpu < ncpus2)
		lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
	else
		lwkt_replymsg(&nm->base.lmsg, 0);
}
예제 #11
0
static void
udp_notifyall_oncpu(netmsg_t msg)
{
	struct netmsg_udp_notify *nm = (struct netmsg_udp_notify *)msg;
	int nextcpu, cpu = mycpuid;

	in_pcbnotifyall(&udbinfo[cpu], nm->nm_faddr, nm->nm_arg, nm->nm_notify);

	nextcpu = cpu + 1;
	if (nextcpu < ncpus2)
		lwkt_forwardmsg(netisr_cpuport(nextcpu), &nm->base.lmsg);
	else
		lwkt_replymsg(&nm->base.lmsg, 0);
}
예제 #12
0
int
ifpoll_register(struct ifnet *ifp)
{
	struct ifpoll_info *info;
	struct netmsg_base nmsg;
	int error;

	if (ifp->if_npoll == NULL) {
		/* Device does not support polling */
		return EOPNOTSUPP;
	}

	info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO);

	/*
	 * Attempt to register.  Interlock with IFF_NPOLLING.
	 */

	ifnet_serialize_all(ifp);

	if (ifp->if_flags & IFF_NPOLLING) {
		/* Already polling */
		ifnet_deserialize_all(ifp);
		kfree(info, M_TEMP);
		return EBUSY;
	}

	info->ifpi_ifp = ifp;

	ifp->if_flags |= IFF_NPOLLING;
	ifp->if_npoll(ifp, info);

	ifnet_deserialize_all(ifp);

	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
		    0, ifpoll_register_handler);
	nmsg.lmsg.u.ms_resultp = info;

	error = lwkt_domsg(netisr_cpuport(0), &nmsg.lmsg, 0);
	if (error) {
		if (!ifpoll_deregister(ifp)) {
			if_printf(ifp, "ifpoll_register: "
				  "ifpoll_deregister failed!\n");
		}
	}

	kfree(info, M_TEMP);
	return error;
}
예제 #13
0
void
udp_ctlinput(netmsg_t msg)
{
	struct sockaddr *sa = msg->ctlinput.nm_arg;
	struct ip *ip = msg->ctlinput.nm_extra;
	int cmd = msg->ctlinput.nm_cmd, cpuid;
	inp_notify_t notify;
	struct in_addr faddr;

	notify = udp_get_inpnotify(cmd, sa, &ip, &cpuid);
	if (notify == NULL)
		goto done;

	faddr = ((struct sockaddr_in *)sa)->sin_addr;
	if (ip) {
		const struct udphdr *uh;
		struct inpcb *inp;

		if (cpuid != mycpuid)
			goto done;

		uh = (const struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
		inp = in_pcblookup_hash(&udbinfo[mycpuid], faddr, uh->uh_dport,
					ip->ip_src, uh->uh_sport, 0, NULL);
		if (inp != NULL && inp->inp_socket != NULL)
			notify(inp, inetctlerrmap[cmd]);
	} else if (msg->ctlinput.nm_direct) {
		if (cpuid != ncpus && cpuid != mycpuid)
			goto done;
		if (mycpuid >= ncpus2)
			goto done;

		in_pcbnotifyall(&udbinfo[mycpuid], faddr, inetctlerrmap[cmd],
		    notify);
	} else {
		struct netmsg_udp_notify *nm;

		ASSERT_IN_NETISR(0);
		nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT);
		netmsg_init(&nm->base, NULL, &netisr_afree_rport,
			    0, udp_notifyall_oncpu);
		nm->nm_faddr = faddr;
		nm->nm_arg = inetctlerrmap[cmd];
		nm->nm_notify = notify;
		lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg);
	}
done:
	lwkt_replymsg(&msg->lmsg, 0);
}
예제 #14
0
static void
ifpoll_deregister_handler(netmsg_t nmsg)
{
	struct ifnet *ifp = nmsg->lmsg.u.ms_resultp;
	int cpuid = mycpuid, nextcpu;

	KKASSERT(cpuid < ncpus2);
	KKASSERT(&curthread->td_msgport == netisr_cpuport(cpuid));

	/* Ignore errors */
	if (cpuid == 0)
		stpoll_deregister(ifp);
	iopoll_deregister(ifp, rxpoll_context[cpuid]);
	iopoll_deregister(ifp, txpoll_context[cpuid]);

	/* Adjust polling frequency, after all deregistration is done */
	poll_comm_adjust_pollhz(poll_common[cpuid]);

	nextcpu = cpuid + 1;
	if (nextcpu < ncpus2)
		lwkt_forwardmsg(netisr_cpuport(nextcpu), &nmsg->lmsg);
	else
		lwkt_replymsg(&nmsg->lmsg, 0);
}
예제 #15
0
void
ip_dn_queue(struct mbuf *m)
{
	struct netmsg_packet *nmp;
	lwkt_port_t port;

	M_ASSERTPKTHDR(m);
	KASSERT(m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED,
		("mbuf is not tagged for dummynet!"));

	nmp = &m->m_hdr.mh_netmsg;
	netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
			0, ip_dn_dispatch);
	nmp->nm_packet = m;

	port = netisr_cpuport(ip_dn_cpu);
	lwkt_sendmsg(port, &nmp->base.lmsg);
}
예제 #16
0
static void
sysctl_burstmax_handler(netmsg_t nmsg)
{
	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
	struct iopoll_ctx *io_ctx;

	io_ctx = msg->ctx;
	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result;
	if (io_ctx->poll_each_burst > io_ctx->poll_burst_max)
		io_ctx->poll_each_burst = io_ctx->poll_burst_max;
	if (io_ctx->poll_burst > io_ctx->poll_burst_max)
		io_ctx->poll_burst = io_ctx->poll_burst_max;
	if (io_ctx->residual_burst > io_ctx->poll_burst_max)
		io_ctx->residual_burst = io_ctx->poll_burst_max;

	lwkt_replymsg(&nmsg->lmsg, 0);
}
예제 #17
0
static void
sysctl_eachburst_handler(netmsg_t nmsg)
{
	struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg;
	struct iopoll_ctx *io_ctx;
	uint32_t each_burst;

	io_ctx = msg->ctx;
	KKASSERT(&curthread->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	each_burst = nmsg->lmsg.u.ms_result;
	if (each_burst > io_ctx->poll_burst_max)
		each_burst = io_ctx->poll_burst_max;
	else if (each_burst < 1)
		each_burst = 1;
	io_ctx->poll_each_burst = each_burst;

	lwkt_replymsg(&nmsg->lmsg, 0);
}
예제 #18
0
static int
stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec)
{
	struct stpoll_ctx *st_ctx = &stpoll_context;
	int error;

	KKASSERT(&curthread->td_msgport == netisr_cpuport(0));

	if (st_rec->status_func == NULL)
		return 0;

	/*
	 * Check if there is room.
	 */
	if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) {
		/*
		 * List full, cannot register more entries.
		 * This should never happen; if it does, it is probably a
		 * broken driver trying to register multiple times. Checking
		 * this at runtime is expensive, and won't solve the problem
		 * anyways, so just report a few times and then give up.
		 */
		static int verbose = 10; /* XXX */

		if (verbose > 0) {
			kprintf("status poll handlers list full, "
				"maybe a broken driver ?\n");
			verbose--;
		}
		error = ENOENT;
	} else {
		struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers];

		rec->ifp = ifp;
		rec->serializer = st_rec->serializer;
		rec->status_func = st_rec->status_func;

		st_ctx->poll_handlers++;
		error = 0;
	}
	return error;
}
예제 #19
0
static int
sysctl_eachburst(SYSCTL_HANDLER_ARGS)
{
	struct iopoll_ctx *io_ctx = arg1;
	struct iopoll_sysctl_netmsg msg;
	uint32_t each_burst;
	int error;

	each_burst = io_ctx->poll_each_burst;
	error = sysctl_handle_int(oidp, &each_burst, 0, req);
	if (error || req->newptr == NULL)
		return error;

	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
		    0, sysctl_eachburst_handler);
	msg.base.lmsg.u.ms_result = each_burst;
	msg.ctx = io_ctx;

	return lwkt_domsg(netisr_cpuport(io_ctx->poll_cpuid),
	    &msg.base.lmsg, 0);
}
예제 #20
0
static void
txpollmore_handler(netmsg_t msg)
{
	struct thread *td = curthread;
	struct iopoll_ctx *io_ctx;
	uint32_t pending_polls;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Replay ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 5;

	io_ctx->pending_polls--;
	pending_polls = io_ctx->pending_polls;

	if (pending_polls == 0) {
		/* We are done */
		io_ctx->phase = 0;
	} else {
		/*
		 * Last cycle was long and caused us to miss one or more
		 * hardclock ticks.  Restart processing again.
		 */
		sched_iopoll(io_ctx);
		io_ctx->phase = 6;
	}

	crit_exit_quick(td);
}
예제 #21
0
static int
stpoll_deregister(struct ifnet *ifp)
{
	struct stpoll_ctx *st_ctx = &stpoll_context;
	int i, error;

	KKASSERT(&curthread->td_msgport == netisr_cpuport(0));

	for (i = 0; i < st_ctx->poll_handlers; ++i) {
		if (st_ctx->pr[i].ifp == ifp) /* Found it */
			break;
	}
	if (i == st_ctx->poll_handlers) {
		error = ENOENT;
	} else {
		st_ctx->poll_handlers--;
		if (i < st_ctx->poll_handlers) {
			/* Last entry replaces this one. */
			st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers];
		}
		error = 0;
	}
	return error;
}
예제 #22
0
static __inline void
ifpoll_sendmsg_oncpu(netmsg_t msg)
{
	if (msg->lmsg.ms_flags & MSGF_DONE)
		lwkt_sendmsg(netisr_cpuport(mycpuid), &msg->lmsg);
}
예제 #23
0
/*
 * Parse a _CST package and set up its Cx states.  Since the _CST object
 * can change dynamically, our notify handler may call this function
 * to clean up and probe the new _CST package.
 */
static int
acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe)
{
    struct	 acpi_cst_cx *cx_ptr;
    ACPI_STATUS	 status;
    ACPI_BUFFER	 buf;
    ACPI_OBJECT	*top;
    ACPI_OBJECT	*pkg;
    uint32_t	 count;
    int		 i;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

#ifdef INVARIANTS
    if (reprobe)
	KKASSERT(&curthread->td_msgport == netisr_cpuport(sc->cst_cpuid));
#endif

    buf.Pointer = NULL;
    buf.Length = ACPI_ALLOCATE_BUFFER;
    status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf);
    if (ACPI_FAILURE(status))
	return (ENXIO);

    /* _CST is a package with a count and at least one Cx package. */
    top = (ACPI_OBJECT *)buf.Pointer;
    if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
	device_printf(sc->cst_dev, "invalid _CST package\n");
	AcpiOsFree(buf.Pointer);
	return (ENXIO);
    }
    if (count != top->Package.Count - 1) {
	device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n",
	       count, top->Package.Count - 1);
	count = top->Package.Count - 1;
    }
    if (count > MAX_CX_STATES) {
	device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count);
	count = MAX_CX_STATES;
    }

    sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT;
    cpu_sfence();

    /*
     * Free all previously allocated resources
     *
     * NOTE: It is needed for _CST reprobing.
     */
    acpi_cst_free_resource(sc, 0);

    /* Set up all valid states. */
    sc->cst_cx_count = 0;
    cx_ptr = sc->cst_cx_states;
    for (i = 0; i < count; i++) {
	int error;

	pkg = &top->Package.Elements[i + 1];
	if (!ACPI_PKG_VALID(pkg, 4) ||
	    acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
	    acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
	    acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {

	    device_printf(sc->cst_dev, "skipping invalid Cx state package\n");
	    continue;
	}

	/* Validate the state to see if we should use it. */
	switch (cx_ptr->type) {
	case ACPI_STATE_C1:
	    sc->cst_non_c3 = i;
	    cx_ptr->enter = acpi_cst_c1_halt_enter;
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C1 CST HALT setup failed: %d", error);
	    if (sc->cst_cx_count != 0) {
		/*
		 * C1 is not the first C-state; something really stupid
		 * is going on ...
		 */
		sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;
	    }
	    cx_ptr++;
	    sc->cst_cx_count++;
	    continue;
	case ACPI_STATE_C2:
	    sc->cst_non_c3 = i;
	    break;
	case ACPI_STATE_C3:
	default:
	    if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				 "cpu_cst%d: C3[%d] not available.\n",
				 device_get_unit(sc->cst_dev), i));
		continue;
	    }
	    break;
	}

	/*
	 * Allocate the control register for C2 or C3(+).
	 */
	KASSERT(cx_ptr->res == NULL, ("still has res"));
	acpi_PkgRawGas(pkg, 0, &cx_ptr->gas);

	/*
	 * We match number of C2/C3 for hyperthreads, only if the
	 * register is "Fixed Hardware", e.g. on most of the Intel
	 * CPUs.  We don't have much to do for the rest of the
	 * register types.
	 */
	if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE)
	    sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;

	cx_ptr->rid = sc->cst_parent->cpu_next_rid;
	acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid,
	    &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE);
	if (cx_ptr->res != NULL) {
	    sc->cst_parent->cpu_next_rid++;
	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			     "cpu_cst%d: Got C%d - %d latency\n",
			     device_get_unit(sc->cst_dev), cx_ptr->type,
			     cx_ptr->trans_lat));
	    cx_ptr->enter = acpi_cst_cx_io_enter;
	    cx_ptr->btag = rman_get_bustag(cx_ptr->res);
	    cx_ptr->bhand = rman_get_bushandle(cx_ptr->res);
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C%d CST I/O setup failed: %d", cx_ptr->type, error);
	    cx_ptr++;
	    sc->cst_cx_count++;
	} else {
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (!error) {
		KASSERT(cx_ptr->enter != NULL,
		    ("C%d enter is not set", cx_ptr->type));
		cx_ptr++;
		sc->cst_cx_count++;
	    }
	}
    }
    AcpiOsFree(buf.Pointer);

    if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) {
	cpumask_t mask;

	mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL);
	if (CPUMASK_TESTNZERO(mask)) {
	    int cpu;

	    for (cpu = 0; cpu < ncpus; ++cpu) {
		struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu];

		if (sc1 == NULL || sc1 == sc ||
		    (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 ||
		    (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0)
		    continue;
		if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid))
		    continue;

		if (sc1->cst_cx_count != sc->cst_cx_count) {
		    struct acpi_cst_softc *src_sc, *dst_sc;

		    if (bootverbose) {
			device_printf(sc->cst_dev,
			    "inconstent C-state count: %d, %s has %d\n",
			    sc->cst_cx_count,
			    device_get_nameunit(sc1->cst_dev),
			    sc1->cst_cx_count);
		    }
		    if (sc1->cst_cx_count > sc->cst_cx_count) {
			src_sc = sc1;
			dst_sc = sc;
		    } else {
			src_sc = sc;
			dst_sc = sc1;
		    }
		    acpi_cst_copy(dst_sc, src_sc);
		}
	    }
	}
    }

    if (reprobe) {
	/* If there are C3(+) states, always enable bus master wakeup */
	if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) {
	    for (i = 0; i < sc->cst_cx_count; ++i) {
		struct acpi_cst_cx *cx = &sc->cst_cx_states[i];

		if (cx->type >= ACPI_STATE_C3) {
		    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
		    break;
		}
	    }
	}

	/* Fix up the lowest Cx being used */
	acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req);
    }

    /*
     * Cache the lowest non-C3 state.
     * NOTE: must after cst_cx_lowest is set.
     */
    acpi_cst_non_c3(sc);

    cpu_sfence();
    sc->cst_flags &= ~ACPI_CST_FLAG_PROBING;

    return (0);
}
예제 #24
0
/*
 * Prepare to accept connections.
 */
static void
tcp_usr_listen(netmsg_t msg)
{
	struct socket *so = msg->listen.base.nm_so;
	struct thread *td = msg->listen.nm_td;
	int error = 0;
	struct inpcb *inp;
	struct tcpcb *tp;
	struct netmsg_inswildcard nm;
	lwkt_port_t port0 = netisr_cpuport(0);

	COMMON_START(so, inp, 0);

	if (&curthread->td_msgport != port0) {
		lwkt_msg_t lmsg = &msg->listen.base.lmsg;

		KASSERT((msg->listen.nm_flags & PRUL_RELINK) == 0,
		    ("already asked to relink"));

		in_pcbunlink(so->so_pcb, &tcbinfo[mycpuid]);
		msg->listen.nm_flags |= PRUL_RELINK;

		/* See the related comment in tcp_connect() */
		lwkt_setmsg_receipt(lmsg, tcp_sosetport);
		lwkt_forwardmsg(port0, lmsg);
		/* msg invalid now */
		return;
	}
	KASSERT(so->so_port == port0, ("so_port is not netisr0"));

	if (msg->listen.nm_flags & PRUL_RELINK) {
		msg->listen.nm_flags &= ~PRUL_RELINK;
		in_pcblink(so->so_pcb, &tcbinfo[mycpuid]);
	}
	KASSERT(inp->inp_pcbinfo == &tcbinfo[0], ("pcbinfo is not tcbinfo0"));

	if (tp->t_flags & TF_LISTEN)
		goto out;

	if (inp->inp_lport == 0) {
		error = in_pcbbind(inp, NULL, td);
		if (error)
			goto out;
	}

	tp->t_state = TCPS_LISTEN;
	tp->t_flags |= TF_LISTEN;
	tp->tt_msg = NULL; /* Catch any invalid timer usage */

	if (ncpus2 > 1) {
		/*
		 * Put this inpcb into wildcard hash on other cpus.
		 */
		ASSERT_INP_NOTINHASH(inp);
		netmsg_init(&nm.base, NULL, &curthread->td_msgport,
			    MSGF_PRIORITY, in_pcbinswildcardhash_handler);
		nm.nm_inp = inp;
		lwkt_domsg(netisr_cpuport(1), &nm.base.lmsg, 0);
	}
	in_pcbinswildcardhash(inp);
	COMMON_END(PRU_LISTEN);
}
예제 #25
0
/*
 * rxpollmore_handler and txpollmore_handler are called after other netisr's,
 * possibly scheduling another rxpoll_handler or txpoll_handler call, or
 * adapting the burst size for the next cycle.
 *
 * It is very bad to fetch large bursts of packets from a single card at once,
 * because the burst could take a long time to be completely processed leading
 * to unfairness.  To reduce the problem, and also to account better for time
 * spent in network-related processing, we split the burst in smaller chunks
 * of fixed size, giving control to the other netisr's between chunks.  This
 * helps in improving the fairness, reducing livelock and accounting for the
 * work performed in low level handling.
 */
static void
rxpollmore_handler(netmsg_t msg)
{
	struct thread *td = curthread;
	struct iopoll_ctx *io_ctx;
	union ifpoll_time t;
	int kern_load;
	uint32_t pending_polls;

	io_ctx = msg->lmsg.u.ms_resultp;
	KKASSERT(&td->td_msgport == netisr_cpuport(io_ctx->poll_cpuid));

	crit_enter_quick(td);

	/* Replay ASAP */
	lwkt_replymsg(&msg->lmsg, 0);

	if (io_ctx->poll_handlers == 0) {
		crit_exit_quick(td);
		return;
	}

	io_ctx->phase = 5;
	if (io_ctx->residual_burst > 0) {
		sched_iopoll(io_ctx);
		crit_exit_quick(td);
		/* Will run immediately on return, followed by netisrs */
		return;
	}

	/* Here we can account time spent in iopoll's in this tick */
	ifpoll_time_get(&t);
	kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t);
	kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */
	io_ctx->kern_frac = kern_load;

	if (kern_load > (100 - io_ctx->user_frac)) {
		/* Try decrease ticks */
		if (io_ctx->poll_burst > 1)
			io_ctx->poll_burst--;
	} else {
		if (io_ctx->poll_burst < io_ctx->poll_burst_max)
			io_ctx->poll_burst++;
	}

	io_ctx->pending_polls--;
	pending_polls = io_ctx->pending_polls;

	if (pending_polls == 0) {
		/* We are done */
		io_ctx->phase = 0;
	} else {
		/*
		 * Last cycle was long and caused us to miss one or more
		 * hardclock ticks.  Restart processing again, but slightly
		 * reduce the burst size to prevent that this happens again.
		 */
		io_ctx->poll_burst -= (io_ctx->poll_burst / 8);
		if (io_ctx->poll_burst < 1)
			io_ctx->poll_burst = 1;
		sched_iopoll(io_ctx);
		io_ctx->phase = 6;
	}

	crit_exit_quick(td);
}