Ejemplo n.º 1
0
int
so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
{
	struct netmsg_pr_ctloutput msg;
	int error;

	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));

	if (sopt->sopt_dir == SOPT_SET && so->so_proto->pr_ctloutmsg != NULL) {
		struct netmsg_pr_ctloutput *amsg;

		/* Fast path: asynchronous pr_ctloutput */
		amsg = so->so_proto->pr_ctloutmsg(sopt);
		if (amsg != NULL) {
			netmsg_init(&amsg->base, so, &netisr_afree_rport, 0,
			    so->so_proto->pr_ctloutput);
			/* nm_flags and nm_sopt are setup by pr_ctloutmsg */
			lwkt_sendmsg(so->so_port, &amsg->base.lmsg);
			return 0;
		}
		/* FALLTHROUGH */
	}

	netmsg_init(&msg.base, so, &curthread->td_msgport,
		    0, so->so_proto->pr_ctloutput);
	msg.nm_flags = 0;
	msg.nm_sopt = sopt;
	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
	return (error);
}
Ejemplo n.º 2
0
void
so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
	    struct sockaddr *addr0, struct mbuf *control, struct thread *td)
{
	struct netmsg_pru_send *msg;
	struct sockaddr *addr = NULL;

	KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
	    ("async pru_send is not supported"));

	flags |= PRUS_NOREPLY;
	if (addr0 != NULL) {
		addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK);
		memcpy(addr, addr0, addr0->sa_len);
		flags |= PRUS_FREEADDR;
	}

	msg = &m->m_hdr.mh_sndmsg;
	netmsg_init(&msg->base, so, &netisr_apanic_rport,
		    0, so->so_proto->pr_usrreqs->pru_send);
	msg->nm_flags = flags;
	msg->nm_m = m;
	msg->nm_addr = addr;
	msg->nm_control = control;
	msg->nm_td = td;
	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
}
Ejemplo n.º 3
0
/*
 * Queue an ACPI message for execution by allocating a LWKT message structure
 * and sending the message to the helper thread.  The reply port is setup
 * to automatically free the message.
 */
ACPI_STATUS
AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
	      void *Context)
{
    struct acpi_task	*at;

    switch (Type) {
    case OSL_GLOBAL_LOCK_HANDLER:
    case OSL_NOTIFY_HANDLER:
    case OSL_GPE_HANDLER:
    case OSL_DEBUGGER_EXEC_THREAD:
    case OSL_EC_POLL_HANDLER:
    case OSL_EC_BURST_HANDLER:
	break;
    default:
	return_ACPI_STATUS (AE_BAD_PARAMETER);
    }

    /* Note: Interrupt Context */
    at = kmalloc(sizeof(*at), M_ACPITASK, M_INTWAIT | M_ZERO);
    lwkt_initmsg(&at->at_msg, &acpi_afree_rport, 0);
    at->at_function = Function;
    at->at_context = Context;
    at->at_type = Type;
    lwkt_sendmsg(&acpi_task_td->td_msgport, &at->at_msg);
    return_ACPI_STATUS (AE_OK);
}
Ejemplo n.º 4
0
int
so_pru_attach_fast(struct socket *so, int proto, struct pru_attach_info *ai)
{
	struct netmsg_pru_attach *msg;
	int error;

	error = so->so_proto->pr_usrreqs->pru_preattach(so, proto, ai);
	if (error)
		return error;

	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_NULLOK);
	if (msg == NULL) {
		/*
		 * Fail to allocate message; fallback to
		 * synchronized pru_attach.
		 */
		return so_pru_attach(so, proto, NULL /* postattach */);
	}

	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
	    so->so_proto->pr_usrreqs->pru_attach);
	msg->nm_proto = proto;
	msg->nm_ai = NULL; /* postattach */
	lwkt_sendmsg(so->so_port, &msg->base.lmsg);

	return 0;
}
Ejemplo n.º 5
0
/*
 * Abort a socket and free it, asynchronously.  Called from
 * soaborta() only.  soaborta() got a ref on the socket which we must
 * free on reply.
 */
void
so_pru_aborta(struct socket *so)
{
	struct netmsg_pru_abort *msg;

	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
	netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
		    0, so->so_proto->pr_usrreqs->pru_abort);
	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
}
Ejemplo n.º 6
0
/*
 * Caller should be in critical section
 */
static void
tcp_send_timermsg(struct tcpcb *tp, uint32_t task)
{
	struct netmsg_tcp_timer *tmsg = tp->tt_msg;

	KKASSERT(tmsg != NULL && tmsg->tt_cpuid == mycpuid &&
		 tmsg->tt_tcb != NULL);

	tmsg->tt_tasks |= task;
	if (tmsg->tt_msg.lmsg.ms_flags & MSGF_DONE)
		lwkt_sendmsg(tmsg->tt_msgport, &tmsg->tt_msg.lmsg);
}
Ejemplo n.º 7
0
void
so_pru_rcvd_async(struct socket *so)
{
	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;

	KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
	    ("async pru_rcvd is not supported"));

	spin_lock(&so->so_rcvd_spin);
	if (lmsg->ms_flags & MSGF_DONE)
		lwkt_sendmsg(so->so_port, lmsg);
	spin_unlock(&so->so_rcvd_spin);
}
Ejemplo n.º 8
0
/*
 * Start call
 */
static void
lgue_start_ipifunc(void *arg)
{
	struct ifnet *ifp;
	struct lwkt_msg *lmsg;

	ifp = arg;
	lmsg = &ifp->if_start_nmsg[mycpuid].lmsg;
	crit_enter();
	if (lmsg->ms_flags & MSGF_DONE)
		lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg);
	crit_exit();
}
Ejemplo n.º 9
0
void
udp_ctlinput(netmsg_t msg)
{
	struct sockaddr *sa = msg->ctlinput.nm_arg;
	struct ip *ip = msg->ctlinput.nm_extra;
	int cmd = msg->ctlinput.nm_cmd, cpuid;
	inp_notify_t notify;
	struct in_addr faddr;

	notify = udp_get_inpnotify(cmd, sa, &ip, &cpuid);
	if (notify == NULL)
		goto done;

	faddr = ((struct sockaddr_in *)sa)->sin_addr;
	if (ip) {
		const struct udphdr *uh;
		struct inpcb *inp;

		if (cpuid != mycpuid)
			goto done;

		uh = (const struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
		inp = in_pcblookup_hash(&udbinfo[mycpuid], faddr, uh->uh_dport,
					ip->ip_src, uh->uh_sport, 0, NULL);
		if (inp != NULL && inp->inp_socket != NULL)
			notify(inp, inetctlerrmap[cmd]);
	} else if (msg->ctlinput.nm_direct) {
		if (cpuid != ncpus && cpuid != mycpuid)
			goto done;
		if (mycpuid >= ncpus2)
			goto done;

		in_pcbnotifyall(&udbinfo[mycpuid], faddr, inetctlerrmap[cmd],
		    notify);
	} else {
		struct netmsg_udp_notify *nm;

		ASSERT_IN_NETISR(0);
		nm = kmalloc(sizeof(*nm), M_LWKTMSG, M_INTWAIT);
		netmsg_init(&nm->base, NULL, &netisr_afree_rport,
			    0, udp_notifyall_oncpu);
		nm->nm_faddr = faddr;
		nm->nm_arg = inetctlerrmap[cmd];
		nm->nm_notify = notify;
		lwkt_sendmsg(netisr_cpuport(0), &nm->base.lmsg);
	}
done:
	lwkt_replymsg(&msg->lmsg, 0);
}
Ejemplo n.º 10
0
void
ip_dn_packet_free(struct dn_pkt *pkt)
{
	struct netmsg_packet *nmp;
	struct mbuf *m = pkt->dn_m;

	M_ASSERTPKTHDR(m);
	KASSERT(m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED,
		("mbuf is not tagged for dummynet!"));

	nmp = &m->m_hdr.mh_netmsg;
	netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
			0, ip_dn_freepkt_dispatch);
	nmp->nm_packet = m;

	lwkt_sendmsg(pkt->msgport, &nmp->base.lmsg);
}
Ejemplo n.º 11
0
void
ip_dn_queue(struct mbuf *m)
{
	struct netmsg_packet *nmp;
	lwkt_port_t port;

	M_ASSERTPKTHDR(m);
	KASSERT(m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED,
		("mbuf is not tagged for dummynet!"));

	nmp = &m->m_hdr.mh_netmsg;
	netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
			0, ip_dn_dispatch);
	nmp->nm_packet = m;

	port = netisr_cpuport(ip_dn_cpu);
	lwkt_sendmsg(port, &nmp->base.lmsg);
}
Ejemplo n.º 12
0
int
so_pru_connect_async(struct socket *so, struct sockaddr *nam, struct thread *td)
{
	struct netmsg_pru_connect *msg;
	int error, flags;

	KASSERT(so->so_proto->pr_usrreqs->pru_preconnect != NULL,
	    ("async pru_connect is not supported"));

	/* NOTE: sockaddr immediately follows netmsg */
	msg = kmalloc(sizeof(*msg) + nam->sa_len, M_LWKTMSG,
	    M_WAITOK | M_NULLOK);
	if (msg == NULL) {
		/*
		 * Fail to allocate message; fallback to
		 * synchronized pru_connect.
		 */
		return so_pru_connect(so, nam, td);
	}

	error = so->so_proto->pr_usrreqs->pru_preconnect(so, nam, td);
	if (error) {
		kfree(msg, M_LWKTMSG);
		return error;
	}

	flags = PRUC_ASYNC;
	if (td != NULL && (so->so_proto->pr_flags & PR_ACONN_HOLDTD)) {
		lwkt_hold(td);
		flags |= PRUC_HELDTD;
	}

	netmsg_init(&msg->base, so, &netisr_afree_rport, 0,
	    so->so_proto->pr_usrreqs->pru_connect);
	msg->nm_nam = (struct sockaddr *)(msg + 1);
	memcpy(msg->nm_nam, nam, nam->sa_len);
	msg->nm_td = td;
	msg->nm_m = NULL;
	msg->nm_sndflags = 0;
	msg->nm_flags = flags;
	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
	return 0;
}
Ejemplo n.º 13
0
void
so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
    struct sockaddr *addr0, struct mbuf *control, struct thread *td)
{
	struct netmsg_pru_send *msg;
	struct sockaddr *addr = NULL;

	KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
	    ("async pru_send is not supported"));

	if (addr0 != NULL) {
		addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK);
		if (addr == NULL) {
			/*
			 * Fail to allocate address; fallback to
			 * synchronized pru_send.
			 */
			so_pru_send(so, flags, m, addr0, control, td);
			return;
		}
		memcpy(addr, addr0, addr0->sa_len);
		flags |= PRUS_FREEADDR;
	}
	flags |= PRUS_NOREPLY;

	if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) {
		lwkt_hold(td);
		flags |= PRUS_HELDTD;
	}

	msg = &m->m_hdr.mh_sndmsg;
	netmsg_init(&msg->base, so, &netisr_apanic_rport,
		    0, so->so_proto->pr_usrreqs->pru_send);
	msg->nm_flags = flags;
	msg->nm_m = m;
	msg->nm_addr = addr;
	msg->nm_control = control;
	msg->nm_td = td;
	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
}
Ejemplo n.º 14
0
static void
schedpoll_oncpu(netmsg_t msg)
{
	if (msg->lmsg.ms_flags & MSGF_DONE)
		lwkt_sendmsg(cpu_portfn(mycpuid), &msg->lmsg);
}
Ejemplo n.º 15
0
static __inline void
ifpoll_sendmsg_oncpu(netmsg_t msg)
{
	if (msg->lmsg.ms_flags & MSGF_DONE)
		lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg);
}