/* called from BPF program, therefore rcu_read_lock is held * bpf_check() verified that 'buf' pointer to BPF's stack * and it has 'len' bytes for us to read */ void bpf_channel_push_struct(struct bpf_context *pctx, u32 struct_id, const void *buf, u32 len) { struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context, context); struct dp_upcall_info upcall; struct plum *plum; struct nlattr *nla; if (unlikely(!ctx->skb)) return; plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]); if (unlikely(!plum)) return; /* allocate temp nlattr to pass it into ovs_dp_upcall */ nla = kzalloc(nla_total_size(4 + len), GFP_ATOMIC); if (unlikely(!nla)) return; nla->nla_type = OVS_PACKET_ATTR_USERDATA; nla->nla_len = nla_attr_size(4 + len); memcpy(nla_data(nla), &struct_id, 4); memcpy(nla_data(nla) + 4, buf, len); upcall.cmd = OVS_PACKET_CMD_ACTION; upcall.key = NULL; upcall.userdata = nla; upcall.portid = plum->upcall_pid; ovs_dp_upcall(ctx->dp, NULL, &upcall); kfree(nla); }
static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); unsigned int qlen; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; sch_tree_lock(sch); q->quantum = ctl->quantum ? : psched_mtu(sch->dev); q->perturb_period = ctl->perturb_period * HZ; if (ctl->limit) q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q->perturbation = net_random(); } sch_tree_unlock(sch); return 0; }
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy, int len) { int nested_len = nla_len(nla) - NLA_ALIGN(len); if (nested_len < 0) return -EINVAL; if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), nested_len, policy); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; }
static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen; struct red_parms *p = NULL; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) ctl_v1 = nla_data(opt); if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; } sch_tree_lock(sch); if (ctl->quantum) { q->quantum = ctl->quantum; q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); } q->perturb_period = ctl->perturb_period * HZ; if (ctl->flows) q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { q->divisor = ctl->divisor; q->maxflows = min_t(u32, q->maxflows, q->divisor); } if (ctl_v1) { if (ctl_v1->depth) q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { swap(q->red_parms, p); red_set_parms(q->red_parms, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } q->flags = ctl_v1->flags; q->headdrop = ctl_v1->headdrop; } if (ctl->limit) { q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); q->maxflows = min_t(u32, q->maxflows, q->limit); } qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q->perturbation = prandom_u32(); } sch_tree_unlock(sch); kfree(p); return 0; }
/* This is an inline function, we don't really care about a long * list of arguments */ static inline int __build_packet_message(struct nfnl_log_net *log, struct nfulnl_instance *inst, const struct sk_buff *skb, unsigned int data_len, u_int8_t pf, unsigned int hooknum, const struct net_device *indev, const struct net_device *outdev, const char *prefix, unsigned int plen, const struct nfnl_ct_hook *nfnl_ct, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; sk_buff_data_t old_tail = inst->skb->tail; struct sock *sk; const unsigned char *hwhdrp; nlh = nlmsg_put(inst->skb, 0, 0, nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET), sizeof(struct nfgenmsg), 0); if (!nlh) return -1; nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); memset(&pmsg, 0, sizeof(pmsg)); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg)) goto nla_put_failure; if (prefix && nla_put(inst->skb, NFULA_PREFIX, plen, prefix)) goto nla_put_failure; if (indev) { #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex))) goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_thresh or * nf_log_packet. */ nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, htonl(br_port_get_rcu(indev)->br->dev->ifindex))) goto nla_put_failure; } else { struct net_device *physindev; /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex))) goto nla_put_failure; physindev = nf_bridge_get_physindev(skb); if (physindev && nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(physindev->ifindex))) goto nla_put_failure; } #endif } if (outdev) { #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) goto nla_put_failure; #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)) || /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_thresh or * nf_log_packet. */ nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) goto nla_put_failure; } else { struct net_device *physoutdev; /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) goto nla_put_failure; physoutdev = nf_bridge_get_physoutdev(skb); if (physoutdev && nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(physoutdev->ifindex))) goto nla_put_failure; } #endif } if (skb->mark && nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark))) goto nla_put_failure; if (indev && skb->dev && skb->mac_header != skb->network_header) { struct nfulnl_msg_packet_hw phw; int len; memset(&phw, 0, sizeof(phw)); len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) goto nla_put_failure; } } if (indev && skb_mac_header_was_set(skb)) { if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || nla_put_be16(inst->skb, NFULA_HWLEN, htons(skb->dev->hard_header_len))) goto nla_put_failure; hwhdrp = skb_mac_header(skb); if (skb->dev->type == ARPHRD_SIT) hwhdrp -= ETH_HLEN; if (hwhdrp >= skb->head && nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, hwhdrp)) goto nla_put_failure; } if (skb->tstamp) { struct nfulnl_msg_packet_timestamp ts; struct timespec64 kts = ktime_to_timespec64(skb->tstamp); ts.sec = cpu_to_be64(kts.tv_sec); ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts)) goto nla_put_failure; } /* UID */ sk = skb->sk; if (sk && sk_fullsock(sk)) { read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket && sk->sk_socket->file) { struct file *file = sk->sk_socket->file; const struct cred *cred = file->f_cred; struct user_namespace *user_ns = inst->peer_user_ns; __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid)); __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid)); read_unlock_bh(&sk->sk_callback_lock); if (nla_put_be32(inst->skb, NFULA_UID, uid) || nla_put_be32(inst->skb, NFULA_GID, gid)) goto nla_put_failure; } else read_unlock_bh(&sk->sk_callback_lock); } /* local sequence number */ if ((inst->flags & NFULNL_CFG_F_SEQ) && nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++))) goto nla_put_failure; /* global sequence number */ if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, htonl(atomic_inc_return(&log->global_seq)))) goto nla_put_failure; if (ct && nfnl_ct->build(inst->skb, ct, ctinfo, NFULA_CT, NFULA_CT_INFO) < 0) goto nla_put_failure; if (data_len) { struct nlattr *nla; int size = nla_attr_size(data_len); if (skb_tailroom(inst->skb) < nla_total_size(data_len)) goto nla_put_failure; nla = skb_put(inst->skb, nla_total_size(data_len)); nla->nla_type = NFULA_PAYLOAD; nla->nla_len = size; if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = inst->skb->tail - old_tail; return 0; nla_put_failure: PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); return -1; }
/* This is an inline function, we don't really care about a long * list of arguments */ static inline int __build_packet_message(struct nfulnl_instance *inst, const struct sk_buff *skb, unsigned int data_len, u_int8_t pf, unsigned int hooknum, const struct net_device *indev, const struct net_device *outdev, const char *prefix, unsigned int plen) { struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; sk_buff_data_t old_tail = inst->skb->tail; nlh = NLMSG_PUT(inst->skb, 0, 0, NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); if (prefix) NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex)); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, htonl(indev->ifindex)); if (skb->nf_bridge && skb->nf_bridge->physindev) NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, htonl(skb->nf_bridge->physindev->ifindex)); } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); if (skb->nf_bridge && skb->nf_bridge->physoutdev) NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, htonl(skb->nf_bridge->physoutdev->ifindex)); } #endif } if (skb->mark) NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); if (indev && skb->dev && skb->mac_header != skb->network_header) { struct nfulnl_msg_packet_hw phw; int len = dev_parse_header(skb, phw.hw_addr); if (len > 0) { phw.hw_addrlen = htons(len); NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); } } if (indev && skb_mac_header_was_set(skb)) { NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); NLA_PUT_BE16(inst->skb, NFULA_HWLEN, htons(skb->dev->hard_header_len)); NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, skb_mac_header(skb)); } if (skb->tstamp.tv64) { struct nfulnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(skb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } /* UID */ if (skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) { struct file *file = skb->sk->sk_socket->file; __be32 uid = htonl(file->f_cred->fsuid); __be32 gid = htonl(file->f_cred->fsgid); /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); NLA_PUT_BE32(inst->skb, NFULA_UID, uid); NLA_PUT_BE32(inst->skb, NFULA_GID, gid); } else read_unlock_bh(&skb->sk->sk_callback_lock); } /* local sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ) NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); /* global sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, htonl(atomic_inc_return(&global_seq))); if (data_len) { struct nlattr *nla; int size = nla_attr_size(data_len); if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); goto nlmsg_failure; } nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); nla->nla_type = NFULA_PAYLOAD; nla->nla_len = size; if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = inst->skb->tail - old_tail; return 0; nlmsg_failure: nla_put_failure: PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); return -1; }
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; }
/** * Return length of padding at the tail of the attribute. * @arg payload Payload length of attribute. * * @code * +------------------+- - -+- - - - - - - - - +- - -+ * | Attribute Header | Pad | Payload | Pad | * +------------------+- - -+- - - - - - - - - +- - -+ * <---> * @endcode * * @return Length of padding in bytes. */ int nla_padlen(int payload) { return nla_total_size(payload) - nla_attr_size(payload); }
/** * Return size of attribute including padding. * @arg payload Payload length of attribute. * * @code * <----------- nla_total_size(payload) -----------> * +------------------+- - -+- - - - - - - - - +- - -+ * | Attribute Header | Pad | Payload | Pad | * +------------------+- - -+- - - - - - - - - +- - -+ * @endcode * * @return Size of attribute in bytes. */ int nla_total_size(int payload) { return NLA_ALIGN(nla_attr_size(payload)); }
static int nm_os_catch_qdisc(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); struct nm_generic_qdisc *qdiscopt = NULL; struct Qdisc *fqdisc = NULL; struct nlattr *nla = NULL; struct netdev_queue *txq; unsigned int i; if (!gna->txqdisc) { return 0; } if (intercept) { nla = kmalloc(nla_attr_size(sizeof(*qdiscopt)), GFP_KERNEL); if (!nla) { D("Failed to allocate netlink attribute"); return ENOMEM; } nla->nla_type = RTM_NEWQDISC; nla->nla_len = nla_attr_size(sizeof(*qdiscopt)); qdiscopt = (struct nm_generic_qdisc *)nla_data(nla); memset(qdiscopt, 0, sizeof(*qdiscopt)); qdiscopt->limit = na->num_tx_desc; } if (ifp->flags & IFF_UP) { dev_deactivate(ifp); } /* Replace the current qdiscs with our own. */ for (i = 0; i < ifp->real_num_tx_queues; i++) { struct Qdisc *nqdisc = NULL; struct Qdisc *oqdisc; int err; txq = netdev_get_tx_queue(ifp, i); if (intercept) { /* This takes a refcount to netmap module, alloc the * qdisc and calls the init() op with NULL netlink * attribute. */ nqdisc = qdisc_create_dflt( #ifndef NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS ifp, #endif /* NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS */ txq, &generic_qdisc_ops, TC_H_UNSPEC); if (!nqdisc) { D("Failed to create qdisc"); goto qdisc_create; } fqdisc = fqdisc ?: nqdisc; /* Call the change() op passing a valid netlink * attribute. This is used to set the queue idx. */ qdiscopt->qidx = i; err = nqdisc->ops->change(nqdisc, nla); if (err) { D("Failed to init qdisc"); goto qdisc_create; } } oqdisc = dev_graft_qdisc(txq, nqdisc); /* We can call this also with * odisc == &noop_qdisc, since the noop * qdisc has the TCQ_F_BUILTIN flag set, * and so qdisc_destroy will skip it. */ qdisc_destroy(oqdisc); } kfree(nla); if (ifp->qdisc) { qdisc_destroy(ifp->qdisc); } if (intercept) { atomic_inc(&fqdisc->refcnt); ifp->qdisc = fqdisc; } else { ifp->qdisc = &noop_qdisc; } if (ifp->flags & IFF_UP) { dev_activate(ifp); } return 0; qdisc_create: if (nla) { kfree(nla); } nm_os_catch_qdisc(gna, 0); return -1; }
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_fifo_qopt opt = { .limit = sch->limit }; if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: return -1; } struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { .id = "pfifo", .priv_size = 0, .enqueue = pfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; EXPORT_SYMBOL(pfifo_qdisc_ops); struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { .id = "bfifo", .priv_size = 0, .enqueue = bfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; EXPORT_SYMBOL(bfifo_qdisc_ops); struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = { .id = "pfifo_head_drop", .priv_size = 0, .enqueue = pfifo_tail_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, .dump = fifo_dump, .owner = THIS_MODULE, }; /* Pass size change message down to embedded FIFO */ int fifo_set_limit(struct Qdisc *q, unsigned int limit) { struct nlattr *nla; int ret = -ENOMEM; /* Hack to avoid sending change message to non-FIFO */ if (strncmp(q->ops->id + 1, "fifo", 4) != 0) return 0; nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (nla) { nla->nla_type = RTM_NEWQDISC; nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt)); ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit; ret = q->ops->change(q, nla); kfree(nla); } return ret; } EXPORT_SYMBOL(fifo_set_limit); struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, unsigned int limit) { struct Qdisc *q; int err = -ENOMEM; q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); if (q) { err = fifo_set_limit(q, limit); if (err < 0) { qdisc_destroy(q); q = NULL; } } return q ? : ERR_PTR(err); } EXPORT_SYMBOL(fifo_create_dflt);