/** * Handle data on netgraph hooks. * Frames processing is deferred to a taskqueue because this might * be called with non-sleepable locks held and code paths inside * the virtual switch might sleep. */ static int ng_vboxnetflt_rcvdata(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); PVBOXNETFLTINS pThis = NG_NODE_PRIVATE(node); struct ifnet *ifp = pThis->u.s.ifp; struct mbuf *m; struct m_tag *mtag; bool fActive; VBOXCURVNET_SET(ifp->if_vnet); fActive = vboxNetFltTryRetainBusyActive(pThis); NGI_GET_M(item, m); NG_FREE_ITEM(item); /* Locate tag to see if processing should be skipped for this frame */ mtag = m_tag_locate(m, MTAG_VBOX, PACKET_TAG_VBOX, NULL); if (mtag != NULL) { m_tag_unlink(m, mtag); m_tag_free(mtag); } /* * Handle incoming hook. This is connected to the * input path of the interface, thus handling incoming frames. */ if (pThis->u.s.input == hook) { if (mtag != NULL || !fActive) { ether_demux(ifp, m); if (fActive) vboxNetFltRelease(pThis, true /*fBusy*/); VBOXCURVNET_RESTORE(); return (0); } mtx_lock_spin(&pThis->u.s.inq.ifq_mtx); _IF_ENQUEUE(&pThis->u.s.inq, m); mtx_unlock_spin(&pThis->u.s.inq.ifq_mtx); taskqueue_enqueue_fast(taskqueue_fast, &pThis->u.s.tskin); } /* * Handle mbufs on the outgoing hook, frames going to the interface */ else if (pThis->u.s.output == hook) { if (mtag != NULL || !fActive) { int rc = ether_output_frame(ifp, m); if (fActive) vboxNetFltRelease(pThis, true /*fBusy*/); VBOXCURVNET_RESTORE(); return rc; } mtx_lock_spin(&pThis->u.s.outq.ifq_mtx); _IF_ENQUEUE(&pThis->u.s.outq, m); mtx_unlock_spin(&pThis->u.s.outq.ifq_mtx); taskqueue_enqueue_fast(taskqueue_fast, &pThis->u.s.tskout); } else { m_freem(m); } if (fActive) vboxNetFltRelease(pThis, true /*fBusy*/); VBOXCURVNET_RESTORE(); return (0); }
int ipfw_check_out(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir, struct inpcb *inp) { struct ip_fw_args args; struct m_tag *dn_tag; int ipfw = 0; int divert; #ifdef IPFIREWALL_FORWARD struct m_tag *fwd_tag; #endif KASSERT(dir == PFIL_OUT, ("ipfw_check_out wrong direction!")); if (!fw_enable) goto pass; bzero(&args, sizeof(args)); dn_tag = m_tag_find(*m0, PACKET_TAG_DUMMYNET, NULL); if (dn_tag != NULL) { struct dn_pkt_tag *dt; dt = (struct dn_pkt_tag *)(dn_tag+1); args.rule = dt->rule; m_tag_delete(*m0, dn_tag); } again: args.m = *m0; args.oif = ifp; args.inp = inp; ipfw = ipfw_chk(&args); *m0 = args.m; if ((ipfw & IP_FW_PORT_DENY_FLAG) || *m0 == NULL) goto drop; if (ipfw == 0 && args.next_hop == NULL) goto pass; if (DUMMYNET_LOADED && (ipfw & IP_FW_PORT_DYNT_FLAG) != 0) { ip_dn_io_ptr(*m0, ipfw & 0xffff, DN_TO_IP_OUT, &args); *m0 = NULL; return 0; /* packet consumed */ } if (ipfw != 0 && (ipfw & IP_FW_PORT_DYNT_FLAG) == 0) { if ((ipfw & IP_FW_PORT_TEE_FLAG) != 0) divert = ipfw_divert(m0, DIV_DIR_OUT, 1); else divert = ipfw_divert(m0, DIV_DIR_OUT, 0); if (divert) { *m0 = NULL; return 0; /* packet consumed */ } else goto again; /* continue with packet */ } #ifdef IPFIREWALL_FORWARD if (ipfw == 0 && args.next_hop != NULL) { /* Overwrite existing tag. */ fwd_tag = m_tag_find(*m0, PACKET_TAG_IPFORWARD, NULL); if (fwd_tag == NULL) { fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, sizeof(struct sockaddr_in), M_NOWAIT); if (fwd_tag == NULL) goto drop; } else m_tag_unlink(*m0, fwd_tag); bcopy(args.next_hop, (fwd_tag+1), sizeof(struct sockaddr_in)); m_tag_prepend(*m0, fwd_tag); if (in_localip(args.next_hop->sin_addr)) (*m0)->m_flags |= M_FASTFWD_OURS; goto pass; } #endif drop: if (*m0) m_freem(*m0); *m0 = NULL; return (EACCES); pass: return 0; /* not filtered */ }