void inject(char* msg, int len) { struct mbuf *m = m_devget(msg, len, 0, NULL, NULL); enqueue(&ipintrq, m); updatetime(); ipintr(); }
Static void udbp_in_transfer_cb(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status err) { udbp_p sc = priv; /* XXX see priv above */ int s; int len; struct mbuf *m; if (err) { if (err != USBD_CANCELLED) { DPRINTF(("%s: bulk-out transfer failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err))); } else { /* USBD_CANCELLED happens at unload of the driver */ return; } /* Transfer has failed, packet is not received */ } else { len = xfer->actlen; s = splimp(); /* block network stuff too */ if (sc->hook) { /* get packet from device and send on */ m = m_devget(sc->sc_bulkin_buffer, len, 0, NULL, NULL); NG_SEND_DATA_ONLY(err, sc->hook, m); } splx(s); } /* schedule the next in transfer */ udbp_setup_in_transfer(sc); }
struct socket* listenon(unsigned short port) { struct socket* so = NULL; socreate(AF_INET, &so, SOCK_STREAM, 0); struct sockaddr_in addr; bzero(&addr, sizeof addr); addr.sin_len = sizeof addr; addr.sin_family = AF_INET; addr.sin_port = htons(port); struct mbuf* nam = m_devget((caddr_t)&addr, sizeof addr, 0, NULL, NULL); sobind(so, nam); solisten(so, 5); return so; }
void handshake() { int port = 1234; listenon(port); struct socket* so = NULL; socreate(AF_INET, &so, SOCK_STREAM, 0); struct sockaddr_in addr; bzero(&addr, sizeof addr); addr.sin_len = sizeof addr; addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = htonl(0x7f000001); struct mbuf* nam = m_devget((caddr_t)&addr, sizeof addr, 0, NULL, NULL); soconnect(so, nam); ipintr(); }
/** * @group dTSEC FMan PORT routines. * @{ */ static e_RxStoreResponse dtsec_im_fm_port_rx_callback(t_Handle app, uint8_t *data, uint16_t length, uint16_t status, uint8_t position, t_Handle buf_context) { struct dtsec_softc *sc; struct mbuf *m; /* TODO STATUS / Position checking */ sc = app; m = m_devget(data, length, 0, sc->sc_ifnet, NULL); if (m) (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m); XX_FreeSmart(data); return (e_RX_STORE_RESPONSE_CONTINUE); }
/* * Function: ip_pkt_to_mbuf * Purpose: * Put the given IP packet into an mbuf, calculate the * IP checksum. */ struct mbuf * ip_pkt_to_mbuf(caddr_t pkt, int pktsize) { struct ip * ip; struct mbuf * m; m = (struct mbuf *)m_devget(pkt, pktsize, 0, NULL, NULL); if (m == 0) { printf("dhcp: ip_pkt_to_mbuf: m_devget failed\n"); return NULL; } m->m_flags |= M_BCAST; /* Compute the checksum */ ip = mtod(m, struct ip *); ip->ip_sum = 0; ip->ip_sum = in_cksum(m, sizeof(struct ip)); return (m); }
static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; struct mbuf *m; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); m = m_devget(offset, len, 0, ifp, NULL); m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; entry->buf = (void *)m; /* Ensure that the data is globally visible before clearing the flag */ wmb(); CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, m); ntb_rx_copy_callback(qp, entry); }
static void ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; struct mbuf *m; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); m = m_devget(offset, len, 0, ifp, NULL); m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; entry->buf = (void *)m; CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry, m); ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); }
static void if_netmap_receive(void *arg) { struct if_netmap_softc *sc; struct ifnet *ifp; struct uhi_pollfd pfd; struct mbuf *m; struct if_netmap_bufinfo *bi; void *slotbuf; uint32_t slotindex; uint32_t pktlen; uint32_t cur; uint32_t avail; uint32_t reserved; uint32_t returned; uint32_t new_reserved; unsigned int n; int rv; int done; /* Zero-copy receive * * A packet header mbuf is allocated for each received netmap * buffer, and the netmap buffer is attached to this mbuf as * external storage, along with a free routine and piece of context * that enables the free routine to move the netmap buffer on its * way back to the receive ring. The per-buffer context objects * (struct if_netmap_bufinfo) are managed by this driver. * * When the mbuf layer calls the free routine for an mbuf-attached * netmap buffer, its associated context object is added to a list * that is part of the pool of those objects. On each pass through * the receive loop below, all of the context objects that have been * returned to the list since the last pass are processed, and their * associated netmap buffers are returned to the receive ring. * * With this approach, a given netmap buffer may be available for * netmap's use on the ring, may be newly available for our * consumption on the ring, may have been passed to the stack for * processing and not yet returned, or may have been returned to us * from the stack but not yet returned to the netmap ring. */ sc = (struct if_netmap_softc *)arg; ifp = sc->ifp; if (sc->cfg->cpu >= 0) sched_bind(sc->rx_thread.thr, sc->cfg->cpu); rv = if_netmap_rxsync(sc->nm_host_ctx, NULL, NULL, NULL); if (rv == -1) printf("could not sync rx descriptors before receive loop\n"); reserved = if_netmap_rxreserved(sc->nm_host_ctx); sc->hw_rx_rsvd_begin = if_netmap_rxcur(sc->nm_host_ctx); sc->rx_thread.last_stop_check = ticks; done = 0; for (;;) { while (!done && (0 == (avail = if_netmap_rxavail(sc->nm_host_ctx)))) { memset(&pfd, 0, sizeof pfd); pfd.fd = sc->fd; pfd.events = UHI_POLLIN; rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS); if (rv == 0) { done = if_netmap_stoppable_thread_check(&sc->rx_thread); } else if (rv == -1) printf("error from poll for receive\n"); } if (ticks - sc->rx_thread.last_stop_check >= sc->stop_check_ticks) { done = if_netmap_stoppable_thread_check(&sc->rx_thread); } if (done) break; cur = if_netmap_rxcur(sc->nm_host_ctx); new_reserved = 0; for (n = 0; n < avail; n++) { slotbuf = if_netmap_rxslot(sc->nm_host_ctx, &cur, &pktlen, &slotindex); ifp->if_ipackets++; ifp->if_ibytes += pktlen; bi = if_netmap_bufinfo_alloc(&sc->rx_bufinfo, slotindex); if (NULL == bi) { /* copy receive */ ifp->if_icopies++; /* could streamline this a little since we * know the data is going to fit in a * cluster */ m = m_devget(slotbuf, pktlen, ETHER_ALIGN, sc->ifp, NULL); /* Recover this buffer at the far end of the * reserved trail from prior zero-copy * activity. */ if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex); } else { /* zero-copy receive */ ifp->if_izcopies++; m = m_gethdr(M_DONTWAIT, MT_DATA); if (NULL == m) { if_netmap_bufinfo_unalloc(&sc->rx_bufinfo); if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex); } else { /* XXX presumably in this path the * IP header isn't aligned on a * 32-bit boundary because the * ethernet header is and there is * no ETHER_ALIGN adjustment? this * would be an issue for ip_src and * ip_dst on platforms that don't * support 16-bit aligned access to * 32-bit values. */ m->m_pkthdr.len = m->m_len = pktlen; m->m_pkthdr.rcvif = sc->ifp; m->m_ext.ref_cnt = &bi->refcnt; m_extadd(m, slotbuf, if_netmap_rxbufsize(sc->nm_host_ctx), if_netmap_free, sc, bi, 0, EXT_EXTREF); new_reserved++; } } if (m) { sc->ifp->if_input(sc->ifp, m); } else { ifp->if_iqdrops++; } } avail -= n; reserved += new_reserved; /* Return any netmap buffers freed by the stack to the ring */ returned = if_netmap_sweep_trail(sc); reserved -= returned; rv = if_netmap_rxsync(sc->nm_host_ctx, &avail, &cur, &reserved); if (rv == -1) printf("could not sync rx descriptors after receive\n"); } if_netmap_stoppable_thread_done(&sc->rx_thread); }
/* * icintr() */ static void icintr (device_t dev, int event, char *ptr) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); int unit = device_get_unit(dev); int len; struct mbuf *top; crit_enter(); switch (event) { case INTR_GENERAL: case INTR_START: sc->ic_cp = sc->ic_ifbuf; sc->ic_xfercnt = 0; break; case INTR_STOP: /* if any error occured during transfert, * drop the packet */ if (sc->ic_iferrs) goto err; if ((len = sc->ic_xfercnt) == 0) break; /* ignore */ if (len <= ICHDRLEN) goto err; len -= ICHDRLEN; sc->ic_if.if_ipackets ++; sc->ic_if.if_ibytes += len; BPF_TAP(&sc->ic_if, sc->ic_ifbuf, len + ICHDRLEN); top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, &sc->ic_if, 0); if (top) netisr_queue(NETISR_IP, top); break; err: kprintf("ic%d: errors (%d)!\n", unit, sc->ic_iferrs); sc->ic_iferrs = 0; /* reset error count */ sc->ic_if.if_ierrors ++; break; case INTR_RECEIVE: if (sc->ic_xfercnt >= sc->ic_if.if_mtu+ICHDRLEN) { sc->ic_iferrs ++; } else { *sc->ic_cp++ = *ptr; sc->ic_xfercnt ++; } break; case INTR_NOACK: /* xfer terminated by master */ break; case INTR_TRANSMIT: *ptr = 0xff; /* XXX */ break; case INTR_ERROR: sc->ic_iferrs ++; break; default: panic("%s: unknown event (%d)!", __func__, event); } crit_exit(); }
/* * Receive interrupt. */ int cas_rint(struct cas_softc *sc) { struct ifnet *ifp = &sc->sc_arpcom.ac_if; bus_space_tag_t t = sc->sc_memt; bus_space_handle_t h = sc->sc_memh; struct cas_rxsoft *rxs; struct mbuf *m; u_int64_t word[4]; int len, off, idx; int i, skip; caddr_t cp; for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { CAS_CDRXCSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); /* Stop if the hardware still owns the descriptor. */ if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) break; len = CAS_RC1_HDR_LEN(word[1]); if (len > 0) { off = CAS_RC1_HDR_OFF(word[1]); idx = CAS_RC1_HDR_IDX(word[1]); rxs = &sc->sc_rxsoft[idx]; DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", idx, off, len)); bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); cp = rxs->rxs_kva + off * 256; m = m_devget(cp, len + ETHER_ALIGN, 0, ifp, NULL); if (word[0] & CAS_RC0_RELEASE_HDR) cas_add_rxbuf(sc, idx); if (m != NULL) { m_adj(m, ETHER_ALIGN); #if NBPFILTER > 0 /* * Pass this up to any BPF listeners, but only * pass it up the stack if its for us. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif /* NBPFILTER > 0 */ ifp->if_ipackets++; ether_input_mbuf(ifp, m); } else ifp->if_ierrors++; } len = CAS_RC0_DATA_LEN(word[0]); if (len > 0) { off = CAS_RC0_DATA_OFF(word[0]); idx = CAS_RC0_DATA_IDX(word[0]); rxs = &sc->sc_rxsoft[idx]; DPRINTF(sc, ("data at idx %d, off %d, len %d\n", idx, off, len)); bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); /* XXX We should not be copying the packet here. */ cp = rxs->rxs_kva + off; m = m_devget(cp, len + ETHER_ALIGN, 0, ifp, NULL); if (word[0] & CAS_RC0_RELEASE_DATA) cas_add_rxbuf(sc, idx); if (m != NULL) { m_adj(m, ETHER_ALIGN); #if NBPFILTER > 0 /* * Pass this up to any BPF listeners, but only * pass it up the stack if its for us. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif /* NBPFILTER > 0 */ ifp->if_ipackets++; ether_input_mbuf(ifp, m); } else ifp->if_ierrors++; } if (word[0] & CAS_RC0_SPLIT) printf("split packet\n"); skip = CAS_RC0_SKIP(word[0]); } while (sc->sc_rxptr != i) { sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); CAS_CDRXCSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); } bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); return (1); }
/* * Incoming messages get passed up to the control socket. * Unless they are for us specifically (socket_type) */ static int ngs_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ngsock *const priv = NG_NODE_PRIVATE(node); struct ngpcb *const pcbp = priv->ctlsock; struct socket *so; struct sockaddr_ng addr; struct ng_mesg *msg; struct mbuf *m; ng_ID_t retaddr = NGI_RETADDR(item); int addrlen; int error = 0; NGI_GET_MSG(item, msg); NG_FREE_ITEM(item); /* * Only allow mesgs to be passed if we have the control socket. * Data sockets can only support the generic messages. */ if (pcbp == NULL) { TRAP_ERROR; NG_FREE_MSG(msg); return (EINVAL); } so = pcbp->ng_socket; #ifdef TRACE_MESSAGES kprintf("[%x]:---------->[socket]: c=<%d>cmd=%x(%s) f=%x #%d\n", retaddr, msg->header.typecookie, msg->header.cmd, msg->header.cmdstr, msg->header.flags, msg->header.token); #endif if (msg->header.typecookie == NGM_SOCKET_COOKIE) { switch (msg->header.cmd) { case NGM_SOCK_CMD_NOLINGER: priv->flags |= NGS_FLAG_NOLINGER; break; case NGM_SOCK_CMD_LINGER: priv->flags &= ~NGS_FLAG_NOLINGER; break; default: error = EINVAL; /* unknown command */ } /* Free the message and return. */ NG_FREE_MSG(msg); return (error); } /* Get the return address into a sockaddr. */ bzero(&addr, sizeof(addr)); addr.sg_len = sizeof(addr); addr.sg_family = AF_NETGRAPH; addrlen = ksnprintf((char *)&addr.sg_data, sizeof(addr.sg_data), "[%x]:", retaddr); if (addrlen < 0 || addrlen > sizeof(addr.sg_data)) { kprintf("%s: ksnprintf([%x]) failed - %d\n", __func__, retaddr, addrlen); NG_FREE_MSG(msg); return (EINVAL); } /* Copy the message itself into an mbuf chain. */ m = m_devget((caddr_t)msg, sizeof(struct ng_mesg) + msg->header.arglen, 0, NULL, NULL); /* * Here we free the message. We need to do that * regardless of whether we got mbufs. */ NG_FREE_MSG(msg); if (m == NULL) { TRAP_ERROR; return (ENOBUFS); } /* Send it up to the socket. */ if (sbappendaddr((struct sockbuf *)&so->so_rcv, (struct sockaddr *)&addr, m, NULL) == 0) { TRAP_ERROR; m_freem(m); return (ENOBUFS); } sorwakeup(so); return (error); }
/* * icintr() */ static void icintr (device_t dev, int event, char *ptr) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); int unit = device_get_unit(dev); int s, len; struct mbuf *top; s = splhigh(); switch (event) { case INTR_GENERAL: case INTR_START: sc->ic_cp = sc->ic_ifbuf; sc->ic_xfercnt = 0; break; case INTR_STOP: /* if any error occured during transfert, * drop the packet */ if (sc->ic_iferrs) goto err; if ((len = sc->ic_xfercnt) == 0) break; /* ignore */ if (len <= ICHDRLEN) goto err; if (IF_QFULL(&ipintrq)) { IF_DROP(&ipintrq); break; } len -= ICHDRLEN; sc->ic_if.if_ipackets ++; sc->ic_if.if_ibytes += len; if (sc->ic_if.if_bpf) bpf_tap(&sc->ic_if, sc->ic_ifbuf, len + ICHDRLEN); top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, &sc->ic_if, 0); if (top) { IF_ENQUEUE(&ipintrq, top); schednetisr(NETISR_IP); } break; err: printf("ic%d: errors (%d)!\n", unit, sc->ic_iferrs); sc->ic_iferrs = 0; /* reset error count */ sc->ic_if.if_ierrors ++; break; case INTR_RECEIVE: if (sc->ic_xfercnt >= sc->ic_if.if_mtu+ICHDRLEN) { sc->ic_iferrs ++; } else { *sc->ic_cp++ = *ptr; sc->ic_xfercnt ++; } break; case INTR_NOACK: /* xfer terminated by master */ break; case INTR_TRANSMIT: *ptr = 0xff; /* XXX */ break; case INTR_ERROR: sc->ic_iferrs ++; break; default: panic("%s: unknown event (%d)!", __FUNCTION__, event); } splx(s); return; }
void vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag) { struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag; struct ldc_conn *lc = &sc->sc_lc; struct ldc_map *map = sc->sc_lm; struct ifnet *ifp = &sc->sc_ac.ac_if; struct mbuf_list ml = MBUF_LIST_INITIALIZER(); struct mbuf *m; caddr_t buf; paddr_t pa; psize_t nbytes; u_int cons; int err; switch(tag->stype) { case VIO_SUBTYPE_INFO: buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); if (buf == NULL) { ifp->if_ierrors++; goto skip; } nbytes = roundup(dm->nbytes, 8); if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) { ifp->if_ierrors++; goto skip; } pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, dm->cookie[0].addr, pa, nbytes, &nbytes); if (err != H_EOK) { pool_put(&sc->sc_pool, buf); ifp->if_ierrors++; goto skip; } /* Stupid OBP doesn't align properly. */ m = m_devget(buf, dm->nbytes, ETHER_ALIGN); pool_put(&sc->sc_pool, buf); if (m == NULL) { ifp->if_ierrors++; goto skip; } /* Pass it on. */ ml_enqueue(&ml, m); if_input(ifp, &ml); skip: dm->tag.stype = VIO_SUBTYPE_ACK; dm->tag.sid = sc->sc_local_sid; vnet_sendmsg(sc, dm, sizeof(*dm)); break; case VIO_SUBTYPE_ACK: DPRINTF(("DATA/ACK/DESC_DATA\n")); if (dm->desc_handle != sc->sc_tx_cons) { printf("out of order\n"); return; } cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0; atomic_dec_int(&map->lm_count); pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf); ifp->if_opackets++; sc->sc_tx_cons++; break; case VIO_SUBTYPE_NACK: DPRINTF(("DATA/NACK/DESC_DATA\n")); break; default: DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype)); break; } }
/* * icintr() */ static int icintr(device_t dev, int event, char *ptr) { struct ic_softc *sc = (struct ic_softc *)device_get_softc(dev); struct mbuf *top; int len; mtx_lock(&sc->ic_lock); switch (event) { case INTR_GENERAL: case INTR_START: sc->ic_cp = sc->ic_ifbuf; sc->ic_xfercnt = 0; sc->ic_flags |= IC_IFBUF_BUSY; break; case INTR_STOP: /* if any error occured during transfert, * drop the packet */ sc->ic_flags &= ~IC_IFBUF_BUSY; if ((sc->ic_flags & (IC_BUFFERS_BUSY | IC_BUFFER_WAITER)) == IC_BUFFER_WAITER) wakeup(&sc); if (sc->ic_iferrs) goto err; if ((len = sc->ic_xfercnt) == 0) break; /* ignore */ if (len <= ICHDRLEN) goto err; len -= ICHDRLEN; sc->ic_ifp->if_ipackets++; sc->ic_ifp->if_ibytes += len; BPF_TAP(sc->ic_ifp, sc->ic_ifbuf, len + ICHDRLEN); top = m_devget(sc->ic_ifbuf + ICHDRLEN, len, 0, sc->ic_ifp, 0); if (top) { mtx_unlock(&sc->ic_lock); M_SETFIB(top, sc->ic_ifp->if_fib); netisr_dispatch(NETISR_IP, top); mtx_lock(&sc->ic_lock); } break; err: if_printf(sc->ic_ifp, "errors (%d)!\n", sc->ic_iferrs); sc->ic_iferrs = 0; /* reset error count */ sc->ic_ifp->if_ierrors++; break; case INTR_RECEIVE: if (sc->ic_xfercnt >= sc->ic_ifp->if_mtu + ICHDRLEN) { sc->ic_iferrs++; } else { *sc->ic_cp++ = *ptr; sc->ic_xfercnt++; } break; case INTR_NOACK: /* xfer terminated by master */ break; case INTR_TRANSMIT: *ptr = 0xff; /* XXX */ break; case INTR_ERROR: sc->ic_iferrs++; break; default: panic("%s: unknown event (%d)!", __func__, event); } mtx_unlock(&sc->ic_lock); return (0); }