static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p, *s_p; struct rtattr *r ; int i= 0, n_i = 0; r = (struct rtattr*) skb->tail; RTA_PUT(skb, a->order, 0, NULL); RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; while (p != NULL) { s_p = p->tcfc_next; if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) module_put(a->ops->owner); n_i++; p = s_p; } } RTA_PUT(skb, TCA_FCNT, 4, &n_i); r->rta_len = skb->tail - (u8*)r; return n_i; rtattr_failure: skb_trim(skb, (u8*)r - skb->data); return -EINVAL; }
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb->tail; struct tc_mirred opt; struct tcf_mirred *p = PRIV(a, mirred); struct tcf_t t; opt.index = p->index; opt.action = p->action; opt.refcnt = p->refcnt - ref; opt.bindcnt = p->bindcnt - bind; opt.eaction = p->eaction; opt.ifindex = p->ifindex; DPRINTK("tcf_mirred_dump index %d action %d eaction %d ifindex %d\n", p->index, p->action, p->eaction, p->ifindex); RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); t.expires = jiffies_to_clock_t(p->tm.expires); RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); unsigned char *b = skb->tail; struct rtattr *rta; struct tc_tbf_qopt opt; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); opt.limit = q->limit; opt.rate = q->R_tab->rate; if (q->P_tab) opt.peakrate = q->P_tab->rate; else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = q->mtu; opt.buffer = q->buffer; RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); rta->rta_len = skb->tail - b; return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) { unsigned char *b = skb->tail; struct tc_police opt; opt.index = p->index; opt.action = p->action; opt.mtu = p->mtu; opt.burst = p->burst; if (p->R_tab) opt.rate = p->R_tab->rate; else memset(&opt.rate, 0, sizeof(opt.rate)); if (p->P_tab) opt.peakrate = p->P_tab->rate; else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); if (p->result) RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); #ifdef CONFIG_NET_ESTIMATOR if (p->ewma_rate) RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); #endif return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb->tail; struct tc_gact opt; struct tcf_gact *p = PRIV(a, gact); struct tcf_t t; opt.index = p->index; opt.refcnt = p->refcnt - ref; opt.bindcnt = p->bindcnt - bind; opt.action = p->action; RTA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); #ifdef CONFIG_GACT_PROB if (p->ptype) { struct tc_gact_p p_opt; p_opt.paction = p->paction; p_opt.pval = p->pval; p_opt.ptype = p->ptype; RTA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); } #endif t.install = jiffies_to_clock_t(jiffies - p->tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); t.expires = jiffies_to_clock_t(p->tm.expires); RTA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); unsigned char *b = skb->tail; struct rtattr *rta = (struct rtattr *) b; struct tc_netem_qopt qopt; struct tc_netem_corr cor; qopt.latency = q->latency; qopt.jitter = q->jitter; qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); rta->rta_len = skb->tail - b; return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb->tail; struct tcf_mirred *p = PRIV(a, mirred); struct tc_mirred opt = { .index = p->index, .action = p->action, .refcnt = p->refcnt - ref, .bindcnt = p->bindcnt - bind, .eaction = p->eaction, .ifindex = p->ifindex, }; struct tcf_t t; DPRINTK("tcf_mirred_dump index %d action %d eaction %d ifindex %d\n", p->index, p->action, p->eaction, p->ifindex); RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); t.expires = jiffies_to_clock_t(p->tm.expires); RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; } static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .type = TCA_ACT_MIRRED, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_mirred, .dump = tcf_mirred_dump, .cleanup = tcf_mirred_cleanup, .lookup = tcf_hash_search, .init = tcf_mirred_init, .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002)"); MODULE_DESCRIPTION("Device Mirror/redirect actions"); MODULE_LICENSE("GPL"); static int __init mirred_init_module(void) { printk("Mirror/redirect action on\n"); return tcf_register_action(&act_mirred_ops); } static void __exit mirred_cleanup_module(void) { tcf_unregister_action(&act_mirred_ops); }
static int tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, u16 flags, int event, int bind, int ref) { struct tcamsg *t; struct nlmsghdr *nlh; unsigned char *b = skb->tail; struct rtattr *x; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; x = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); if (tcf_action_dump(skb, a, bind, ref) < 0) goto rtattr_failure; x->rta_len = skb->tail - (u8*)x; nlh->nlmsg_len = skb->tail - b; return skb->len; rtattr_failure: nlmsg_failure: skb_trim(skb, b - skb->data); return -1; }
int tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { struct tc_action *a; int err = -EINVAL; unsigned char *b = skb->tail; struct rtattr *r ; while ((a = act) != NULL) { r = (struct rtattr*) skb->tail; act = a->next; RTA_PUT(skb, a->order, 0, NULL); err = tcf_action_dump_1(skb, a, bind, ref); if (err < 0) goto errout; r->rta_len = skb->tail - (u8*)r; } return 0; rtattr_failure: err = -EINVAL; errout: skb_trim(skb, b - skb->data); return err; }
/* * Fill a rtnetlink message with our event data. * Note that we propage only the specified event and don't dump the * current wireless config. Dumping the wireless config is far too * expensive (for each parameter, the driver need to query the hardware). */ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb, struct net_device * dev, int type, char * event, int event_len) { struct ifinfomsg *r; struct nlmsghdr *nlh; unsigned char *b = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r)); r = NLMSG_DATA(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev->flags; r->ifi_change = 0; /* Wireless changes don't affect those flags */ /* Add the wireless events in the netlink packet */ RTA_PUT(skb, IFLA_WIRELESS, event_len, event); nlh->nlmsg_len = skb->tail - b; return skb->len; nlmsg_failure: rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int nowait, unsigned int flags) { struct dn_route *rt = (struct dn_route *)skb->dst; struct rtmsg *r; struct nlmsghdr *nlh; unsigned char *b = skb->tail; struct rta_cacheinfo ci; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); r = NLMSG_DATA(nlh); r->rtm_family = AF_DECnet; r->rtm_dst_len = 16; r->rtm_src_len = 0; r->rtm_tos = 0; r->rtm_table = RT_TABLE_MAIN; r->rtm_type = rt->rt_type; r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); if (rt->fl.fld_src) { r->rtm_src_len = 16; RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); } if (rt->u.dst.dev) RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); /* * Note to self - change this if input routes reverse direction when * they deal only with inputs and not with replies like they do * currently. */ RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); if (rt->rt_daddr != rt->rt_gateway) RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) goto rtattr_failure; ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); ci.rta_used = rt->u.dst.__use; ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); if (rt->u.dst.expires) ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); else ci.rta_expires = 0; ci.rta_error = rt->u.dst.error; ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); if (rt->fl.iif) RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); nlh->nlmsg_len = skb->tail - b; return skb->len; nlmsg_failure: rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) { struct rtattr *mx = (struct rtattr*)skb->tail; int i; RTA_PUT(skb, RTA_METRICS, 0, NULL); for (i=0; i<RTAX_MAX; i++) { if (metrics[i]) RTA_PUT(skb, i+1, sizeof(u32), metrics+i); } mx->rta_len = skb->tail - (u8*)mx; if (mx->rta_len == RTA_LENGTH(0)) skb_trim(skb, (u8*)mx - skb->data); return 0; rtattr_failure: skb_trim(skb, (u8*)mx - skb->data); return -1; }
static inline int gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) { RTA_PUT(d->skb, type, size, buf); return 0; rtattr_failure: spin_unlock_bh(d->lock); return -1; }
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: return -1; }
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change) { struct ifinfomsg *r; struct nlmsghdr *nlh; unsigned char *b = skb->tail; nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*r)); if (pid) nlh->nlmsg_flags |= NLM_F_MULTI; r = NLMSG_DATA(nlh); r->ifi_family = AF_UNSPEC; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev->flags; r->ifi_change = change; if (!netif_running(dev) || !netif_carrier_ok(dev)) r->ifi_flags &= ~IFF_RUNNING; else r->ifi_flags |= IFF_RUNNING; RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name); if (dev->addr_len) { RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); RTA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); } if (1) { unsigned mtu = dev->mtu; RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu); } if (dev->ifindex != dev->iflink) RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink); if (dev->qdisc_sleeping) RTA_PUT(skb, IFLA_QDISC, strlen(dev->qdisc_sleeping->ops->id) + 1, dev->qdisc_sleeping->ops->id); if (dev->master) RTA_PUT(skb, IFLA_MASTER, sizeof(int), &dev->master->ifindex); if (dev->get_stats) { struct net_device_stats *stats = dev->get_stats(dev); if (stats) RTA_PUT(skb, IFLA_STATS, sizeof(*stats), stats); } nlh->nlmsg_len = skb->tail - b; return skb->len; nlmsg_failure: rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = a->priv; struct tc_nat *opt; struct tcf_t t; int s; s = sizeof(*opt); /* netlink spinlocks held above us - must use ATOMIC */ opt = kzalloc(s, GFP_ATOMIC); if (unlikely(!opt)) return -ENOBUFS; opt->old_addr = p->old_addr; opt->new_addr = p->new_addr; opt->mask = p->mask; opt->flags = p->flags; opt->index = p->tcf_index; opt->action = p->tcf_action; opt->refcnt = p->tcf_refcnt - ref; opt->bindcnt = p->tcf_bindcnt - bind; RTA_PUT(skb, TCA_NAT_PARMS, s, opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); RTA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); kfree(opt); return skb->len; rtattr_failure: nlmsg_trim(skb, b); kfree(opt); return -1; }
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) { unsigned char *b = skb->tail; struct tc_prio_qopt opt; opt.bands = 3; memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { int err = -EINVAL; unsigned char *b = skb->tail; struct rtattr *r; if (a->ops == NULL || a->ops->dump == NULL) return err; RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); if (tcf_action_copy_stats(skb, a, 0)) goto rtattr_failure; r = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { r->rta_len = skb->tail - (u8*)r; return err; } rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fifo_sched_data *q = (void*)sch->data; unsigned char *b = skb->tail; struct tc_fifo_qopt opt; opt.limit = q->limit; RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_mirred *m = a->priv; struct tc_mirred opt; struct tcf_t t; opt.index = m->tcf_index; opt.action = m->tcf_action; opt.refcnt = m->tcf_refcnt - ref; opt.bindcnt = m->tcf_bindcnt - bind; opt.eaction = m->tcfm_eaction; opt.ifindex = m->tcfm_ifindex; RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(m->tcf_tm.expires); RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); return skb->len; rtattr_failure: nlmsg_trim(skb, b); return -1; }
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb->tail; struct tcf_defact *d = a->priv; struct tc_defact opt; struct tcf_t t; opt.index = d->tcf_index; opt.refcnt = d->tcf_refcnt - ref; opt.bindcnt = d->tcf_bindcnt - bind; opt.action = d->tcf_action; RTA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); RTA_PUT(skb, TCA_DEF_DATA, d->tcfd_datalen, d->tcfd_defdata); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); RTA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct prio_sched_data *q = (struct prio_sched_data *)sch->data; unsigned char *b = skb->tail; struct tc_prio_qopt opt; opt.bands = q->bands; memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_police *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct rtattr *r; read_lock(&police_lock); s_i = cb->args[0]; for (i = 0; i < MY_TAB_SIZE; i++) { p = tcf_police_ht[tcf_police_hash(i)]; for (; p; p = p->next) { index++; if (index < s_i) continue; a->priv = p; a->order = index; r = (struct rtattr*) skb->tail; RTA_PUT(skb, a->order, 0, NULL); if (type == RTM_DELACTION) err = tcf_action_dump_1(skb, a, 0, 1); else err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; skb_trim(skb, (u8*)r - skb->data); goto done; } r->rta_len = skb->tail - (u8*)r; n_i++; } } done: read_unlock(&police_lock); if (n_i) cb->args[0] += n_i; return n_i; rtattr_failure: skb_trim(skb, (u8*)r - skb->data); goto done; }
static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, struct tc_action *a) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct rtattr *r; read_lock_bh(&police_lock); s_i = cb->args[0]; for (i = 0; i < (POL_TAB_MASK + 1); i++) { p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = index; r = (struct rtattr *)skb_tail_pointer(skb); RTA_PUT(skb, a->order, 0, NULL); if (type == RTM_DELACTION) err = tcf_action_dump_1(skb, a, 0, 1); else err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nlmsg_trim(skb, r); goto done; } r->rta_len = skb_tail_pointer(skb) - (u8 *)r; n_i++; } } done: read_unlock_bh(&police_lock); if (n_i) cb->args[0] += n_i; return n_i; rtattr_failure: nlmsg_trim(skb, r); goto done; }
static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a, struct tcf_hashinfo *hinfo) { struct tcf_common *p; int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; struct rtattr *r ; read_lock(hinfo->lock); s_i = cb->args[0]; for (i = 0; i < (hinfo->hmask + 1); i++) { p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; for (; p; p = p->tcfc_next) { index++; if (index < s_i) continue; a->priv = p; a->order = n_i; r = (struct rtattr*) skb->tail; RTA_PUT(skb, a->order, 0, NULL); err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; skb_trim(skb, (u8*)r - skb->data); goto done; } r->rta_len = skb->tail - (u8*)r; n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; } } done: read_unlock(hinfo->lock); if (n_i) cb->args[0] += n_i; return n_i; rtattr_failure: skb_trim(skb, (u8*)r - skb->data); goto done; }
static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, u16 flags) { struct tcamsg *t; struct nlmsghdr *nlh; struct sk_buff *skb; struct rtattr *x; unsigned char *b; int err = 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; b = (unsigned char *)skb->tail; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; x = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); if (tcf_action_dump(skb, a, 0, 0) < 0) goto rtattr_failure; x->rta_len = skb->tail - (u8*)x; nlh->nlmsg_len = skb->tail - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO); if (err > 0) err = 0; return err; rtattr_failure: nlmsg_failure: kfree_skb(skb); return -1; }
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_sfq_qopt opt; opt.quantum = q->quantum; opt.perturb_period = q->perturb_period; opt.limit = q->limit; opt.divisor = q->hash_divisor; opt.flows = q->depth; opt.hash_kind = q->hash_kind; RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: nlmsg_trim(skb, b); return -1; }
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; unsigned char *b = skb->tail; struct tc_sfq_qopt opt; opt.quantum = q->quantum; opt.perturb_period = q->perturb_period/HZ; opt.limit = q->limit; opt.divisor = SFQ_HASH_DIVISOR; opt.flows = q->limit; RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); return -1; }
static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { struct ipt_entry_target *t; struct tcf_t tm; struct tc_cnt c; unsigned char *b = skb->tail; struct tcf_ipt *p = PRIV(a, ipt); /* for simple targets kernel size == user size ** user name = target name ** for foolproof you need to not assume this */ t = kmalloc(p->t->u.user.target_size, GFP_ATOMIC); if (t == NULL) goto rtattr_failure; c.bindcnt = p->bindcnt - bind; c.refcnt = p->refcnt - ref; memcpy(t, p->t, p->t->u.user.target_size); strcpy(t->u.user.name, p->t->u.kernel.target->name); DPRINTK("\ttcf_ipt_dump tablename %s length %d\n", p->tname, strlen(p->tname)); DPRINTK("\tdump target name %s size %d size user %d " "data[0] %x data[1] %x\n", p->t->u.kernel.target->name, p->t->u.target_size, p->t->u.user.target_size, p->t->data[0], p->t->data[1]); RTA_PUT(skb, TCA_IPT_TARG, p->t->u.user.target_size, t); RTA_PUT(skb, TCA_IPT_INDEX, 4, &p->index); RTA_PUT(skb, TCA_IPT_HOOK, 4, &p->hook); RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, p->tname); tm.install = jiffies_to_clock_t(jiffies - p->tm.install); tm.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); tm.expires = jiffies_to_clock_t(p->tm.expires); RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); kfree(t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); kfree(t); return -1; }
static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb->tail; struct tcf_ipt *ipt = a->priv; struct ipt_entry_target *t; struct tcf_t tm; struct tc_cnt c; /* for simple targets kernel size == user size ** user name = target name ** for foolproof you need to not assume this */ t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); if (unlikely(!t)) goto rtattr_failure; c.bindcnt = ipt->tcf_bindcnt - bind; c.refcnt = ipt->tcf_refcnt - ref; strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); RTA_PUT(skb, TCA_IPT_INDEX, 4, &ipt->tcf_index); RTA_PUT(skb, TCA_IPT_HOOK, 4, &ipt->tcfi_hook); RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, ipt->tcfi_tname); tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); kfree(t); return skb->len; rtattr_failure: skb_trim(skb, b - skb->data); kfree(t); return -1; }