ip6_cantforward(const struct ip6_hdr *ip6, const struct ifnet *srcifp, const struct ifnet *dstifp, const char *fmt, ...) { char sbuf[INET6_ADDRSTRLEN], dbuf[INET6_ADDRSTRLEN]; char reason[256]; va_list ap; uint64_t *ip6s; /* update statistics */ ip6s = IP6_STAT_GETREF(); ip6s[IP6_STAT_CANTFORWARD]++; if (dstifp) ip6s[IP6_STAT_BADSCOPE]++; IP6_STAT_PUTREF(); if (dstifp) in6_ifstat_inc(dstifp, ifs6_in_discard); if (ip6_log_time + ip6_log_interval >= time_uptime) return; ip6_log_time = time_uptime; va_start(ap, fmt); vsnprintf(reason, sizeof(reason), fmt, ap); va_end(ap); log(LOG_DEBUG, "Cannot forward from %s@%s to %s@%s nxt %d (%s)\n", IN6_PRINT(sbuf, &ip6->ip6_src), srcifp ? if_name(srcifp) : "?", IN6_PRINT(dbuf, &ip6->ip6_dst), dstifp ? if_name(dstifp) : "?", ip6->ip6_nxt, reason); }
STATIC void DHCPv6SocketCloseSocket(DHCPv6SocketRef sock) { if (sock->fd_open == FALSE) { return; } if (S_globals->read_fd_refcount <= 0) { my_log(LOG_ERR, "DHCPv6SocketCloseSocket(%s): refcount %d", if_name(sock->if_p), S_globals->read_fd_refcount); return; } S_globals->read_fd_refcount--; my_log(LOG_DEBUG, "DHCPv6SocketCloseSocket(%s): refcount %d", if_name(sock->if_p), S_globals->read_fd_refcount); sock->fd_open = FALSE; if (S_globals->read_fd_refcount == 0) { struct timeval tv; my_log(LOG_DEBUG, "DHCPv6SocketCloseSocket(): scheduling delayed close"); tv.tv_sec = 1; /* close it after 1 second of non-use */ tv.tv_usec = 0; timer_set_relative(S_globals->timer_callout, tv, DHCPv6SocketDelayedClose, NULL, NULL, NULL); } return; }
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, int entries, int ring, enum cq_type mode, int node) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq; int err; cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node); if (!cq) { cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL); if (!cq) { en_err(priv, "Failed to allocate CW struture\n"); return -ENOMEM; } } cq->size = entries; cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT, taskqueue_thread_enqueue, &cq->tq); if (mode == RX) { TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq); taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq", if_name(priv->dev)); } else { TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq); taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq", if_name(priv->dev)); } cq->ring = ring; cq->is_tx = mode; spin_lock_init(&cq->lock); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, cq->buf_size, 2 * PAGE_SIZE); if (err) goto err_cq; err = mlx4_en_map_buffer(&cq->wqres.buf); if (err) goto err_res; cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; *pcq = cq; return 0; err_res: mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); err_cq: kfree(cq); return err; }
struct qfq_if * qfq_alloc(struct ifnet *ifp, int how) { struct qfq_if *qif; qif = (how == M_WAITOK) ? zalloc(qfq_zone) : zalloc_noblock(qfq_zone); if (qif == NULL) return (NULL); bzero(qif, qfq_size); qif->qif_ifq = &ifp->if_snd; qif->qif_maxclasses = IFCQ_SC_MAX; /* * TODO: [email protected] * * Ideally I would like to have the following * but QFQ needs further modifications. * * qif->qif_maxslots = IFCQ_SC_MAX; */ qif->qif_maxslots = QFQ_MAX_SLOTS; if ((qif->qif_class_tbl = _MALLOC(sizeof (struct qfq_class *) * qif->qif_maxclasses, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate class table array\n", if_name(ifp), qfq_style(qif)); goto error; } if ((qif->qif_groups = _MALLOC(sizeof (struct qfq_group *) * (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate group array\n", if_name(ifp), qfq_style(qif)); goto error; } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler allocated\n", if_name(ifp), qfq_style(qif)); } return (qif); error: if (qif->qif_class_tbl != NULL) { _FREE(qif->qif_class_tbl, M_DEVBUF); qif->qif_class_tbl = NULL; } if (qif->qif_groups != NULL) { _FREE(qif->qif_groups, M_DEVBUF); qif->qif_groups = NULL; } zfree(qfq_zone, qif); return (NULL); }
static int priq_throttle(struct priq_if *pif, cqrq_throttle_t *tr) { struct ifclassq *ifq = pif->pif_ifq; struct priq_class *cl; int err = 0; IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(!(pif->pif_flags & PRIQIFF_ALTQ)); if (!tr->set) { tr->level = pif->pif_throttle; return (0); } if (tr->level == pif->pif_throttle) return (EALREADY); /* Current throttling levels only involve BK_SYS class */ cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; switch (tr->level) { case IFNET_THROTTLE_OFF: err = priq_resumeq(pif, cl); break; case IFNET_THROTTLE_OPPORTUNISTIC: err = priq_suspendq(pif, cl); break; default: VERIFY(0); /* NOTREACHED */ } if (err == 0 || err == ENXIO) { if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s throttling level %sset %d->%d\n", if_name(PRIQIF_IFP(pif)), priq_style(pif), (err == 0) ? "" : "lazy ", pif->pif_throttle, tr->level); } pif->pif_throttle = tr->level; if (err != 0) err = 0; else priq_purgeq(pif, cl, 0, NULL, NULL); } else { log(LOG_ERR, "%s: %s unable to set throttling level " "%d->%d [error=%d]\n", if_name(PRIQIF_IFP(pif)), priq_style(pif), pif->pif_throttle, tr->level, err); } return (err); }
/* * sfb support routines */ struct sfb * sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags) { struct sfb *sp; int i; VERIFY(ifp != NULL && qlim > 0); sp = zalloc(sfb_zone); if (sp == NULL) { log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp)); return (NULL); } bzero(sp, sfb_size); if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) { log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp)); sfb_destroy(sp); return (NULL); } bzero(sp->sfb_bins, sfb_bins_size); if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) { log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n", if_name(ifp)); sfb_destroy(sp); return (NULL); } bzero(sp->sfb_fc_lists, sfb_fcl_size); for (i = 0; i < SFB_BINS; ++i) STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist); sp->sfb_ifp = ifp; sp->sfb_qlim = qlim; sp->sfb_qid = qid; sp->sfb_flags = (flags & SFBF_USERFLAGS); #if !PF_ECN if (sp->sfb_flags & SFBF_ECN) { sp->sfb_flags &= ~SFBF_ECN; log(LOG_ERR, "%s: SFB qid=%d, ECN not available; ignoring " "SFBF_ECN flag!\n", if_name(ifp), sp->sfb_qid); } #endif /* !PF_ECN */ sfb_resetq(sp, CLASSQ_EV_INIT); return (sp); }
static errno_t pflogoutput(struct ifnet *ifp, struct mbuf *m) { printf("%s: freeing data for %s\n", __func__, if_name(ifp)); m_freem(m); return (ENOTSUP); }
static void priq_updateq(struct priq_if *pif, struct priq_class *cl, cqev_t ev) { IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq); if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n", if_name(PRIQIF_IFP(pif)), priq_style(pif), cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev)); } #if CLASSQ_RIO if (q_is_rio(&cl->cl_q)) return (rio_updateq(cl->cl_rio, ev)); #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (q_is_red(&cl->cl_q)) return (red_updateq(cl->cl_red, ev)); #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (q_is_blue(&cl->cl_q)) return (blue_updateq(cl->cl_blue, ev)); #endif /* CLASSQ_BLUE */ if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) return (sfb_updateq(cl->cl_sfb, ev)); }
errno_t ether_attach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { #pragma unused(protocol_family) struct ifnet_attach_proto_param proto; struct ifnet_demux_desc demux[1]; u_short en_6native = htons(ETHERTYPE_IPV6); errno_t error; bzero(&proto, sizeof (proto)); demux[0].type = DLIL_DESC_ETYPE2; demux[0].data = &en_6native; demux[0].datalen = sizeof (en_6native); proto.demux_list = demux; proto.demux_count = 1; proto.input = ether_inet6_input; proto.pre_output = ether_inet6_pre_output; proto.ioctl = ether_inet6_prmod_ioctl; proto.resolve = ether_inet6_resolve_multi; error = ifnet_attach_protocol(ifp, protocol_family, &proto); if (error && error != EEXIST) { printf("WARNING: %s can't attach ipv6 to %s\n", __func__, if_name(ifp)); } return (error); }
/* Set up a SLIP link to use AX.25 */ int kiss_init( struct iface *ifp ){ int xdev; struct slip *sp; char *ifn; for(xdev = 0;xdev < SLIP_MAX;xdev++){ sp = &Slip[xdev]; if(sp->iface == NULL) break; } if(xdev >= SLIP_MAX) { printf("Too many slip devices\n"); return -1; } ifp->ioctl = kiss_ioctl; ifp->raw = kiss_raw; ifp->show = slip_status; if(ifp->hwaddr == NULL) ifp->hwaddr = mallocw(AXALEN); memcpy(ifp->hwaddr,Mycall,AXALEN); ifp->xdev = xdev; sp->iface = ifp; sp->send = asy_send; sp->get = get_asy; sp->type = CL_KISS; ifp->rxproc = newproc( ifn = if_name( ifp, " rx" ), 256,slip_rx,xdev,NULL,NULL,0); free(ifn); return 0; }
PRIVATE_EXTERN RTADVSocketRef RTADVSocketCreate(interface_t * if_p) { RTADVSocketRef sock; RTADVSocketGlobalsRef globals; globals = RTADVSocketGetGlobals(); if (globals == NULL) { my_log(LOG_NOTICE, "RTADVSocketCreate: could not allocate globals"); return (NULL); } sock = RTADVSocketFind(if_link_index(if_p)); if (sock != NULL) { my_log(LOG_NOTICE, "RTADVSocketCreate(%s): socket already allocated", if_name(if_p)); return (NULL); } sock = malloc(sizeof(*sock)); if (sock == NULL) { return (NULL); } bzero(sock, sizeof(*sock)); if (dynarray_add(&globals->sockets, sock) == FALSE) { free(sock); return (NULL); } sock->if_p = if_p; return (sock); }
struct in_addr S_find_linklocal_address(ServiceRef service_p) { int count; int i; interface_t * if_p; struct in_addr ll_addr; ll_addr = linklocal_get_address(service_p); if (ll_addr.s_addr != 0) { return (ll_addr); } if_p = service_interface(service_p); count = if_inet_count(if_p); for (i = 0; i < count; i++) { inet_addrinfo_t * info = if_inet_addr_at(if_p, i); if (ip_is_linklocal(info->addr)) { my_log(LOG_DEBUG, "LINKLOCAL %s: found address " IP_FORMAT, if_name(if_p), IP_LIST(&info->addr)); return (info->addr); } } return (G_ip_zeroes); }
/* * Checks if kernel interface is contained in our tracked * interface list and calls attach/detach handler. */ static void ipfw_kifhandler(void *arg, struct ifnet *ifp) { struct ip_fw_chain *ch; struct ipfw_iface *iif; struct namedobj_instance *ii; uintptr_t htype; if (V_ipfw_vnet_ready == 0) return; ch = &V_layer3_chain; htype = (uintptr_t)arg; IPFW_UH_WLOCK(ch); ii = CHAIN_TO_II(ch); if (ii == NULL) { IPFW_UH_WUNLOCK(ch); return; } iif = (struct ipfw_iface*)ipfw_objhash_lookup_name(ii, 0, if_name(ifp)); if (iif != NULL) { if (htype == 1) handle_ifattach(ch, iif, ifp->if_index); else handle_ifdetach(ch, iif, ifp->if_index); } IPFW_UH_WUNLOCK(ch); }
static errno_t pflogdemux(struct ifnet *ifp, struct mbuf *m, char *h, protocol_family_t *ppf) { #pragma unused(h, ppf) printf("%s: freeing data for %s\n", __func__, if_name(ifp)); m_freem(m); return (EJUSTRETURN); }
link_status_t if_link_status_update(interface_t * if_p) { struct ifmediareq ifmr; if (S_get_ifmediareq(if_name(if_p), &ifmr) == FALSE) { if (errno != ENXIO && errno != EPWROFF && errno != EINVAL) { IPConfigLogFL(LOG_NOTICE, "%s: failed to get media status, %s", if_name(if_p), strerror(errno)); } } else { if_p->link_status = S_ifmediareq_get_link_status(&ifmr); } return (if_p->link_status); }
static void tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow, u_int32_t *packets, u_int32_t *bytes) { struct ifclassq *ifq = tif->tif_ifq; u_int32_t cnt = 0, len = 0, qlen; IFCQ_LOCK_ASSERT_HELD(ifq); if ((qlen = qlen(&cl->cl_q)) == 0) goto done; /* become regular mutex before freeing mbufs */ IFCQ_CONVERT_LOCK(ifq); #if CLASSQ_RIO if (q_is_rio(&cl->cl_q)) rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (q_is_red(&cl->cl_q)) red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (q_is_blue(&cl->cl_q)) blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_BLUE */ if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); else _flushq_flow(&cl->cl_q, flow, &cnt, &len); if (cnt > 0) { VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len); IFCQ_DROP_ADD(ifq, cnt, len); VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); IFCQ_LEN(ifq) -= cnt; if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d " "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n", if_name(TCQIF_IFP(tif)), tcq_style(tif), cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q), cnt, len, flow); } } done: if (packets != NULL) *packets = cnt; if (bytes != NULL) *bytes = len; }
static int priq_class_destroy(struct priq_if *pif, struct priq_class *cl) { struct ifclassq *ifq = pif->pif_ifq; int pri; IFCQ_LOCK_ASSERT_HELD(ifq); if (!qempty(&cl->cl_q)) priq_purgeq(pif, cl, 0, NULL, NULL); VERIFY(cl->cl_pri < PRIQ_MAXPRI); VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap)); pif->pif_classes[cl->cl_pri] = NULL; if (pif->pif_maxpri == cl->cl_pri) { for (pri = cl->cl_pri; pri >= 0; pri--) if (pif->pif_classes[pri] != NULL) { pif->pif_maxpri = pri; break; } if (pri < 0) pif->pif_maxpri = -1; } if (pif->pif_default == cl) pif->pif_default = NULL; if (cl->cl_qalg.ptr != NULL) { #if CLASSQ_RIO if (q_is_rio(&cl->cl_q)) rio_destroy(cl->cl_rio); #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (q_is_red(&cl->cl_q)) red_destroy(cl->cl_red); #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (q_is_blue(&cl->cl_q)) blue_destroy(cl->cl_blue); #endif /* CLASSQ_BLUE */ if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) sfb_destroy(cl->cl_sfb); cl->cl_qalg.ptr = NULL; qtype(&cl->cl_q) = Q_DROPTAIL; qstate(&cl->cl_q) = QS_RUNNING; } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n", if_name(PRIQIF_IFP(pif)), priq_style(pif), cl->cl_handle, cl->cl_pri); } zfree(priq_cl_zone, cl); return (0); }
static int in6_ifattach_loopback(struct ifnet *ifp) { struct in6_aliasreq ifra; int error; memset(&ifra, 0, sizeof(ifra)); /* * in6_update_ifa() does not use ifra_name, but we accurately set it * for safety. */ strncpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); sockaddr_in6_init(&ifra.ifra_prefixmask, &in6mask128, 0, 0, 0); /* * Always initialize ia_dstaddr (= broadcast address) to loopback * address. Follows IPv4 practice - see in_ifinit(). */ sockaddr_in6_init(&ifra.ifra_dstaddr, &in6addr_loopback, 0, 0, 0); sockaddr_in6_init(&ifra.ifra_addr, &in6addr_loopback, 0, 0, 0); /* the loopback address should NEVER expire. */ ifra.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME; ifra.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME; /* we don't need to perform DAD on loopback interfaces. */ ifra.ifra_flags |= IN6_IFF_NODAD; /* * We are sure that this is a newly assigned address, so we can set * NULL to the 3rd arg. */ if ((error = in6_update_ifa(ifp, &ifra, NULL, 0)) != 0) { nd6log((LOG_ERR, "in6_ifattach_loopback: failed to configure " "the loopback address on %s (errno=%d)\n", if_name(ifp), error)); return -1; } return 0; }
static void failover_timed_out(ServiceRef service_p) { my_log(LOG_DEBUG, "FAILOVER %s: address timer fired", if_name(service_interface(service_p))); failover_cancel_pending_events(service_p); service_remove_address(service_p); service_publish_failure(service_p, ipconfig_status_address_timed_out_e); return; }
/** * Function to update link status. */ static void cvm_oct_update_link(void *context, int pending) { cvm_oct_private_t *priv = (cvm_oct_private_t *)context; struct ifnet *ifp = priv->ifp; cvmx_helper_link_info_t link_info; link_info.u64 = priv->link_info; if (link_info.s.link_up) { if_link_state_change(ifp, LINK_STATE_UP); DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, queue %2d\n", if_name(ifp), link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port, priv->queue); } else { if_link_state_change(ifp, LINK_STATE_DOWN); DEBUGPRINT("%s: Link down\n", if_name(ifp)); } priv->need_link_update = 0; }
PRIVATE_EXTERN void RTADVSocketEnableReceive(RTADVSocketRef sock, RTADVSocketReceiveFuncPtr func, void * arg1, void * arg2) { sock->receive_func = func; sock->receive_arg1 = arg1; sock->receive_arg2 = arg2; if (RTADVSocketOpenSocket(sock) == FALSE) { my_log_fl(LOG_NOTICE, "%s: failed", if_name(sock->if_p)); } return; }
/* * altq_priq_enqueue is an enqueue function to be registered to * (*altq_enqueue) in struct ifaltq. */ static int altq_priq_enqueue(struct ifaltq *altq, struct mbuf *m) { /* grab class set by classifier */ if (!(m->m_flags & M_PKTHDR)) { /* should not happen */ printf("%s: packet for %s does not have pkthdr\n", __func__, if_name(altq->altq_ifcq->ifcq_ifp)); m_freem(m); return (ENOBUFS); } return (priq_enqueue(altq->altq_disc, NULL, m, m_pftag(m))); }
void DHCPv6SocketEnableReceive(DHCPv6SocketRef sock, DHCPv6SocketReceiveFuncPtr func, void * arg1, void * arg2) { sock->receive_func = func; sock->receive_arg1 = arg1; sock->receive_arg2 = arg2; if (DHCPv6SocketOpenSocket(sock) == FALSE) { my_log(LOG_ERR, "DHCPv6SocketEnableReceive(%s): failed", if_name(sock->if_p)); } return; }
static void linklocal_failed(ServiceRef service_p, ipconfig_status_t status) { Service_linklocal_t * linklocal; linklocal = (Service_linklocal_t *)ServiceGetPrivate(service_p); linklocal->enable_arp_collision_detection = FALSE; linklocal_cancel_pending_events(service_p); arp_linklocal_disable(if_name(service_interface(service_p))); service_remove_address(service_p); if (status != ipconfig_status_media_inactive_e) { linklocal->our_ip = G_ip_zeroes; } service_publish_failure(service_p, status); return; }
static int fairq_destroy_locked(struct fairq_if *fif) { IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq); (void) fairq_clear_interface(fif); if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler destroyed\n", if_name(FAIRQIF_IFP(fif)), fairq_style(fif)); } zfree(fairq_zone, fif); return (0); }
static int tcq_destroy_locked(struct tcq_if *tif) { IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); (void) tcq_clear_interface(tif); if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler destroyed\n", if_name(TCQIF_IFP(tif)), tcq_style(tif)); } zfree(tcq_zone, tif); return (0); }
static int priq_destroy_locked(struct priq_if *pif) { IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq); (void) priq_clear_interface(pif); if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s scheduler destroyed\n", if_name(PRIQIF_IFP(pif)), priq_style(pif)); } zfree(priq_zone, pif); return (0); }
static #ifdef __FreeBSD__ __inline #else inline #endif int iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) { /* Check by name or by IP address */ if (byname) { #if defined(__NetBSD__) || defined(__FreeBSD__) { if (strncmp(if_name(ifp), ifu->fu_via_if.name, IP6FW_IFNLEN)) return (0); } #else /* Check unit number (-1 is wildcard) */ if (ifu->fu_via_if.unit != -1 && ifp->if_unit != ifu->fu_via_if.unit) return (0); /* Check name */ if (strncmp(ifp->if_name, ifu->fu_via_if.name, IP6FW_IFNLEN)) return (0); #endif return (1); } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ struct ifaddr *ia; for (ia = ifp->if_addrlist.tqh_first; ia; ia = ia->ifa_list.tqe_next) { if (ia->ifa_addr == NULL) continue; if (ia->ifa_addr->sa_family != AF_INET6) continue; if (!IN6_ARE_ADDR_EQUAL(&ifu->fu_via_ip6, &(((struct sockaddr_in6 *) (ia->ifa_addr))->sin6_addr))) continue; return (1); } return (0); } return (1); }
static int qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) { struct ifclassq *ifq = qif->qif_ifq; int i; #if !MACH_ASSERT #pragma unused(ifq) #endif IFCQ_LOCK_ASSERT_HELD(ifq); qfq_purgeq(qif, cl, 0, NULL, NULL); if (cl->cl_inv_w != 0) { qif->qif_wsum -= (QFQ_ONE_FP / cl->cl_inv_w); cl->cl_inv_w = 0; /* reset weight to avoid run twice */ } for (i = 0; i < qif->qif_maxclasses; i++) { if (qif->qif_class_tbl[i] == cl) { qif->qif_class_tbl[i] = NULL; break; } } qif->qif_classes--; if (cl->cl_qalg.ptr != NULL) { if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) sfb_destroy(cl->cl_sfb); cl->cl_qalg.ptr = NULL; qtype(&cl->cl_q) = Q_DROPTAIL; qstate(&cl->cl_q) = QS_RUNNING; } if (qif->qif_default == cl) qif->qif_default = NULL; if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s destroyed qid=%d\n", if_name(QFQIF_IFP(qif)), qfq_style(qif), cl->cl_handle); } zfree(qfq_cl_zone, cl); return (0); }
static void DHCPv6SocketDemux(int if_index, const DHCPv6PacketRef pkt, int pkt_len) { DHCPv6SocketReceiveData data; DHCPv6OptionErrorString err; int i; if (pkt_len < DHCPV6_PACKET_HEADER_LENGTH) { return; } data.pkt = pkt; data.pkt_len = pkt_len; data.options = DHCPv6OptionListCreateWithPacket(pkt, pkt_len, &err); if (data.options == NULL) { my_log(LOG_NOTICE, "DHCPv6Socket: options parse failed, %s", err.str); return; } for (i = 0; i < dynarray_count(&S_globals->sockets); i++) { DHCPv6SocketRef client; client = dynarray_element(&S_globals->sockets, i); if (if_index != if_link_index(DHCPv6SocketGetInterface(client))) { continue; } if (S_verbose) { CFMutableStringRef str; str = CFStringCreateMutable(NULL, 0); DHCPv6PacketPrintToString(str, pkt, pkt_len); if (data.options != NULL) { DHCPv6OptionListPrintToString(str, data.options); } my_log(-LOG_DEBUG, "[%s] Receive %@", if_name(DHCPv6SocketGetInterface(client)), str); CFRelease(str); } if (client->receive_func != NULL) { (*client->receive_func)(client->receive_arg1, client->receive_arg2, &data); } } DHCPv6OptionListRelease(&data.options); return; }