static int sfq_msg_parser(struct rtnl_qdisc *qdisc) { struct rtnl_sfq *sfq; struct tc_sfq_qopt *opts; if (!(qdisc->ce_mask & TCA_ATTR_OPTS)) return 0; if (qdisc->q_opts->d_size < sizeof(*opts)) return nl_error(EINVAL, "SFQ specific options size mismatch"); sfq = sfq_alloc(qdisc); if (!sfq) return nl_errno(ENOMEM); opts = (struct tc_sfq_qopt *) qdisc->q_opts->d_data; sfq->qs_quantum = opts->quantum; sfq->qs_perturb = opts->perturb_period; sfq->qs_limit = opts->limit; sfq->qs_divisor = opts->divisor; sfq->qs_flows = opts->flows; sfq->qs_mask = (SCH_SFQ_ATTR_QUANTUM | SCH_SFQ_ATTR_PERTURB | SCH_SFQ_ATTR_LIMIT | SCH_SFQ_ATTR_DIVISOR | SCH_SFQ_ATTR_FLOWS); return 0; }
static int dsmark_qdisc_msg_parser(struct rtnl_qdisc *qdisc) { int err; struct nlattr *tb[TCA_DSMARK_MAX + 1]; struct rtnl_dsmark_qdisc *dsmark; err = tca_parse(tb, TCA_DSMARK_MAX, (struct rtnl_tca *) qdisc, dsmark_policy); if (err < 0) return err; dsmark = dsmark_qdisc_alloc(qdisc); if (!dsmark) return nl_errno(ENOMEM); if (tb[TCA_DSMARK_INDICES]) { dsmark->qdm_indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); dsmark->qdm_mask |= SCH_DSMARK_ATTR_INDICES; } if (tb[TCA_DSMARK_DEFAULT_INDEX]) { dsmark->qdm_default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]); dsmark->qdm_mask |= SCH_DSMARK_ATTR_DEFAULT_INDEX; } if (tb[TCA_DSMARK_SET_TC_INDEX]) { dsmark->qdm_set_tc_index = 1; dsmark->qdm_mask |= SCH_DSMARK_ATTR_SET_TC_INDEX; } return 0; }
static int cbq_msg_parser(struct rtnl_tca *tca) { struct nlattr *tb[TCA_CBQ_MAX + 1]; struct rtnl_cbq *cbq; int err; err = tca_parse(tb, TCA_CBQ_MAX, tca, cbq_policy); if (err < 0) return err; cbq = cbq_alloc(tca); if (!cbq) return nl_errno(ENOMEM); nla_memcpy(&cbq->cbq_lss, tb[TCA_CBQ_LSSOPT], sizeof(cbq->cbq_lss)); nla_memcpy(&cbq->cbq_rate, tb[TCA_CBQ_RATE], sizeof(cbq->cbq_rate)); nla_memcpy(&cbq->cbq_wrr, tb[TCA_CBQ_WRROPT], sizeof(cbq->cbq_wrr)); nla_memcpy(&cbq->cbq_fopt, tb[TCA_CBQ_FOPT], sizeof(cbq->cbq_fopt)); nla_memcpy(&cbq->cbq_ovl, tb[TCA_CBQ_OVL_STRATEGY], sizeof(cbq->cbq_ovl)); nla_memcpy(&cbq->cbq_police, tb[TCA_CBQ_POLICE], sizeof(cbq->cbq_police)); return 0; }
/** * Get quantum of SFQ qdisc. * @arg qdisc SFQ qdisc. * @return Quantum in bytes or a negative error code. */ int rtnl_sfq_get_quantum(struct rtnl_qdisc *qdisc) { struct rtnl_sfq *sfq; sfq = sfq_qdisc(qdisc); if (sfq && sfq->qs_mask & SCH_SFQ_ATTR_QUANTUM) return sfq->qs_quantum; else return nl_errno(ENOENT); }
/** * Get limit of a FIFO qdisc. * @arg qdisc FIFO qdisc. * @return Numeric limit or a negative error code. */ int rtnl_qdisc_fifo_get_limit(struct rtnl_qdisc *qdisc) { struct rtnl_fifo *fifo; fifo = fifo_qdisc(qdisc); if (fifo && fifo->qf_mask & SCH_FIFO_ATTR_LIMIT) return fifo->qf_limit; else return nl_errno(ENOMEM); }
/** * Get limit of SFQ qdisc. * @arg qdisc SFQ qdisc. * @return Limit or a negative error code. */ int rtnl_sfq_get_limit(struct rtnl_qdisc *qdisc) { struct rtnl_sfq *sfq; sfq = sfq_qdisc(qdisc); if (sfq && sfq->qs_mask & SCH_SFQ_ATTR_LIMIT) return sfq->qs_limit; else return nl_errno(ENOENT); }
/** * Get perturbation interval of SFQ qdisc. * @arg qdisc SFQ qdisc. * @return Perturbation interval in seconds or a negative error code. */ int rtnl_sfq_get_perturb(struct rtnl_qdisc *qdisc) { struct rtnl_sfq *sfq; sfq = sfq_qdisc(qdisc); if (sfq && sfq->qs_mask & SCH_SFQ_ATTR_PERTURB) return sfq->qs_perturb; else return nl_errno(ENOENT); }
/** * Get divisor of SFQ qdisc. * @arg qdisc SFQ qdisc. * @return Divisor in number of entries or a negative error code. */ int rtnl_sfq_get_divisor(struct rtnl_qdisc *qdisc) { struct rtnl_sfq *sfq; sfq = sfq_qdisc(qdisc); if (sfq && sfq->qs_mask & SCH_SFQ_ATTR_DIVISOR) return sfq->qs_divisor; else return nl_errno(ENOENT); }
static int vlan_alloc(struct rtnl_link *link) { struct vlan_info *vi; if ((vi = calloc(1, sizeof(*vi))) == NULL) return nl_errno(ENOMEM); link->l_info = vi; return 0; }
/** * Send raw data over netlink socket. * @arg handle Netlink handle. * @arg buf Data buffer. * @arg size Size of data buffer. * @return Number of characters written on success or a negative error code. */ int nl_sendto(struct nl_handle *handle, void *buf, size_t size) { int ret; ret = sendto(handle->h_fd, buf, size, 0, (struct sockaddr *) &handle->h_peer, sizeof(handle->h_peer)); if (ret < 0) return nl_errno(errno); return ret; }
int genlmsg_parse(struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, struct nla_policy *policy) { struct genlmsghdr *ghdr; if (!genlmsg_valid_hdr(nlh, hdrlen)) return nl_errno(EINVAL); ghdr = nlmsg_data(nlh); return nla_parse(tb, maxtype, genlmsg_attrdata(ghdr, hdrlen), genlmsg_attrlen(ghdr, hdrlen), policy); }
/** * Set limit of FIFO qdisc. * @arg qdisc FIFO qdisc to be modified. * @arg limit New limit. * @return 0 on success or a negative error code. */ int rtnl_qdisc_fifo_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_fifo *fifo; fifo = fifo_alloc(qdisc); if (!fifo) return nl_errno(ENOMEM); fifo->qf_limit = limit; fifo->qf_mask |= SCH_FIFO_ATTR_LIMIT; return 0; }
/** * Set quantum of SFQ qdisc. * @arg qdisc SFQ qdisc to be modified. * @arg quantum New quantum in bytes. * @return 0 on success or a negative error code. */ int rtnl_sfq_set_quantum(struct rtnl_qdisc *qdisc, int quantum) { struct rtnl_sfq *sfq; sfq = sfq_alloc(qdisc); if (!sfq) return nl_errno(ENOMEM); sfq->qs_quantum = quantum; sfq->qs_mask |= SCH_SFQ_ATTR_QUANTUM; return 0; }
/** * Set limit of SFQ qdisc. * @arg qdisc SFQ qdisc to be modified. * @arg limit New limit in number of packets. * @return 0 on success or a negative error code. */ int rtnl_sfq_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_sfq *sfq; sfq = sfq_alloc(qdisc); if (!sfq) return nl_errno(ENOMEM); sfq->qs_limit = limit; sfq->qs_mask |= SCH_SFQ_ATTR_LIMIT; return 0; }
/** * Set perturbation interval of SFQ qdisc. * @arg qdisc SFQ qdisc to be modified. * @arg perturb New perturbation interval in seconds. * @note A value of 0 disables perturbation altogether. * @return 0 on success or a negative error code. */ int rtnl_sfq_set_perturb(struct rtnl_qdisc *qdisc, int perturb) { struct rtnl_sfq *sfq; sfq = sfq_alloc(qdisc); if (!sfq) return nl_errno(ENOMEM); sfq->qs_perturb = perturb; sfq->qs_mask |= SCH_SFQ_ATTR_PERTURB; return 0; }
/** * Unregister a qdisc module * @arg qops qdisc module operations */ int rtnl_qdisc_unregister(struct rtnl_qdisc_ops *qops) { struct rtnl_qdisc_ops *o, **op; for (op = &qdisc_ops_list; (o = *op) != NULL; op = &o->qo_next) if (!strcasecmp(qops->qo_kind, o->qo_kind)) break; if (!o) return nl_errno(ENOENT); *op = qops->qo_next; return 0; }
/** * Unregister a class module * @arg cops class module operations */ int rtnl_class_unregister(struct rtnl_class_ops *cops) { struct rtnl_class_ops *o, **op; for (op = &class_ops_list; (o = *op) != NULL; op = &o->co_next) if (!strcasecmp(cops->co_kind, o->co_kind)) break; if (!o) return nl_errno(ENOENT); *op = cops->co_next; return 0; }
/** * Register a qdisc module * @arg qops qdisc module operations */ int rtnl_qdisc_register(struct rtnl_qdisc_ops *qops) { struct rtnl_qdisc_ops *o, **op; if (!qops->qo_kind[0]) BUG(); for (op = &qdisc_ops_list; (o = *op) != NULL; op = &o->qo_next) if (!strcasecmp(qops->qo_kind, o->qo_kind)) return nl_errno(EEXIST); qops->qo_next = NULL; *op = qops; return 0; }
/** * Register a class module * @arg cops class module operations */ int rtnl_class_register(struct rtnl_class_ops *cops) { struct rtnl_class_ops *o, **op; if (!cops->co_kind[0]) BUG(); for (op = &class_ops_list; (o = *op) != NULL; op = &o->co_next) if (!strcasecmp(cops->co_kind, o->co_kind)) return nl_errno(EEXIST); cops->co_next = NULL; *op = cops; return 0; }
/** * Add a new rule * @arg handle netlink handle * @arg tmpl template with requested changes * @arg flags additional netlink message flags * * Builds a netlink message by calling rtnl_rule_build_add_request(), * sends the request to the kernel and waits for the next ACK to be * received and thus blocks until the request has been fullfilled. * * @return 0 on sucess or a negative error if an error occured. */ int rtnl_rule_add(struct nl_handle *handle, struct rtnl_rule *tmpl, int flags) { int err; struct nl_msg *msg; msg = rtnl_rule_build_add_request(tmpl, flags); if (!msg) return nl_errno(ENOMEM); err = nl_send_auto_complete(handle, msg); if (err < 0) return err; nlmsg_free(msg); return nl_wait_for_ack(handle); }
/** * Perform FIB Lookup * @arg handle Netlink handle. * @arg req Lookup request object. * @arg cache Cache for result. * * Builds a netlink message to request a FIB lookup, waits for the * reply and adds the result to the specified cache. * * @return 0 on success or a negative error code. */ int flnl_lookup(struct nl_handle *handle, struct flnl_request *req, struct nl_cache *cache) { struct nl_msg *msg; int err; msg = flnl_lookup_build_request(req, 0); if (!msg) return nl_errno(ENOMEM); err = nl_send_auto_complete(handle, msg); nlmsg_free(msg); if (err < 0) return err; return nl_cache_pickup(handle, cache); }
static int fifo_msg_parser(struct rtnl_qdisc *qdisc) { struct rtnl_fifo *fifo; struct tc_fifo_qopt *opt; if (qdisc->q_opts->d_size < sizeof(struct tc_fifo_qopt)) return nl_error(EINVAL, "FIFO options size mismatch"); fifo = fifo_alloc(qdisc); if (!fifo) return nl_errno(ENOMEM); opt = (struct tc_fifo_qopt *) qdisc->q_opts->d_data; fifo->qf_limit = opt->limit; fifo->qf_mask = SCH_FIFO_ATTR_LIMIT; return 0; }
static int vlan_clone(struct rtnl_link *dst, struct rtnl_link *src) { struct vlan_info *vdst, *vsrc = src->l_info; int err; dst->l_info = NULL; if ((err = rtnl_link_set_info_type(dst, "vlan")) < 0) return err; vdst = dst->l_info; vdst->vi_egress_qos = calloc(vsrc->vi_egress_size, sizeof(struct vlan_map)); if (!vdst->vi_egress_qos) return nl_errno(ENOMEM); memcpy(vdst->vi_egress_qos, vsrc->vi_egress_qos, vsrc->vi_egress_size * sizeof(struct vlan_map)); return 0; }
static int tbf_msg_parser(struct rtnl_qdisc *q) { int err; struct nlattr *tb[TCA_TBF_MAX + 1]; struct rtnl_tbf *tbf; err = tca_parse(tb, TCA_TBF_MAX, (struct rtnl_tca *) q, tbf_policy); if (err < 0) return err; tbf = tbf_qdisc(q); if (!tbf) return nl_errno(ENOMEM); if (tb[TCA_TBF_PARMS]) { struct tc_tbf_qopt opts; int bufsize; nla_memcpy(&opts, tb[TCA_TBF_PARMS], sizeof(opts)); tbf->qt_limit = opts.limit; tbf->qt_mpu = opts.rate.mpu; rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate); tbf->qt_rate_txtime = opts.buffer; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.buffer), opts.rate.rate); tbf->qt_rate_bucket = bufsize; rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate); tbf->qt_peakrate_txtime = opts.mtu; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.mtu), opts.peakrate.rate); tbf->qt_peakrate_bucket = bufsize; tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_MPU | TBF_ATTR_RATE | TBF_ATTR_PEAKRATE); } return 0; }
/** * Allocate and initialize new non-default netlink handle. * @arg kind Kind of callback handler to use per default. * * Allocates and initializes a new netlink handle, the netlink process id * is set to the local process id which may conflict if multiple handles * are created, therefore you may have to overwrite it using * nl_handle_set_pid(). The initial sequence number is initialized to the * current UNIX time. * * @return Newly allocated netlink handle or NULL. */ struct nl_handle *nl_handle_alloc_nondefault(enum nl_cb_kind kind) { struct nl_handle *handle; handle = calloc(1, sizeof(*handle)); if (!handle) goto errout; handle->h_cb = nl_cb_new(kind); if (!handle->h_cb) goto errout; handle->h_local.nl_family = AF_NETLINK; handle->h_peer.nl_family = AF_NETLINK; handle->h_local.nl_pid = getpid(); handle->h_seq_expect = handle->h_seq_next = time(0); return handle; errout: nl_handle_destroy(handle); nl_errno(ENOMEM); return NULL; }
/** * Allocate a new callback handle * @arg kind callback kind to be used for initialization * @return Newly allocated callback handle or NULL */ struct nl_cb *nl_cb_alloc(enum nl_cb_kind kind) { int i; struct nl_cb *cb; if (kind < 0 || kind > NL_CB_KIND_MAX) return NULL; cb = calloc(1, sizeof(*cb)); if (!cb) { nl_errno(ENOMEM); return NULL; } cb->cb_refcnt = 1; for (i = 0; i <= NL_CB_TYPE_MAX; i++) nl_cb_set(cb, i, kind, NULL, NULL); nl_cb_err(cb, kind, NULL, NULL); return cb; }
static int vlan_parse(struct rtnl_link *link, struct nlattr *data, struct nlattr *xstats) { struct nlattr *tb[IFLA_VLAN_MAX+1]; struct vlan_info *vi; int err; NL_DBG(3, "Parsing VLAN link info"); if ((err = nla_parse_nested(tb, IFLA_VLAN_MAX, data, vlan_policy)) < 0) goto errout; if ((err = vlan_alloc(link)) < 0) goto errout; vi = link->l_info; if (tb[IFLA_VLAN_ID]) { vi->vi_vlan_id = nla_get_u16(tb[IFLA_VLAN_ID]); vi->vi_mask |= VLAN_HAS_ID; } if (tb[IFLA_VLAN_FLAGS]) { struct ifla_vlan_flags flags; nla_memcpy(&flags, tb[IFLA_VLAN_FLAGS], sizeof(flags)); vi->vi_flags = flags.flags; vi->vi_mask |= VLAN_HAS_FLAGS; } if (tb[IFLA_VLAN_INGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining; memset(vi->vi_ingress_qos, 0, sizeof(vi->vi_ingress_qos)); nla_for_each_nested(nla, tb[IFLA_VLAN_INGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); map = nla_data(nla); if (map->from < 0 || map->from > VLAN_PRIO_MAX) { return nl_error(EINVAL, "VLAN prio %d out of " "range", map->from); } vi->vi_ingress_qos[map->from] = map->to; } vi->vi_mask |= VLAN_HAS_INGRESS_QOS; } if (tb[IFLA_VLAN_EGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining, i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); i++; } /* align to have a little reserve */ vi->vi_egress_size = (i + 32) & ~31; vi->vi_egress_qos = calloc(vi->vi_egress_size, sizeof(*map)); if (vi->vi_egress_qos == NULL) return nl_errno(ENOMEM); i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { map = nla_data(nla); NL_DBG(4, "Assigning egress qos mapping %d\n", i); vi->vi_egress_qos[i].vm_from = map->from; vi->vi_egress_qos[i++].vm_to = map->to; } vi->vi_negress = i; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; }
static int vlan_put_attrs(struct nl_msg *msg, struct rtnl_link *link) { struct vlan_info *vi = link->l_info; struct nlattr *data; if (!(data = nla_nest_start(msg, IFLA_INFO_DATA))) return nl_errno(ENOBUFS); if (vi->vi_mask & VLAN_HAS_ID) NLA_PUT_U16(msg, IFLA_VLAN_ID, vi->vi_vlan_id); if (vi->vi_mask & VLAN_HAS_FLAGS) { struct ifla_vlan_flags flags = { .flags = vi->vi_flags, .mask = vi->vi_flags_mask, }; NLA_PUT(msg, IFLA_VLAN_FLAGS, sizeof(flags), &flags); } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_INGRESS_QOS))) goto nla_put_failure; for (i = 0; i <= VLAN_PRIO_MAX; i++) { if (vi->vi_ingress_qos[i]) { map.from = i; map.to = vi->vi_ingress_qos[i]; NLA_PUT(msg, i, sizeof(map), &map); } } nla_nest_end(msg, qos); } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_EGRESS_QOS))) goto nla_put_failure; for (i = 0; i < vi->vi_negress; i++) { map.from = vi->vi_egress_qos[i].vm_from; map.to = vi->vi_egress_qos[i].vm_to; NLA_PUT(msg, i, sizeof(map), &map); } nla_nest_end(msg, qos); } nla_nest_end(msg, data); nla_put_failure: return 0; } static struct rtnl_link_info_ops vlan_info_ops = { .io_name = "vlan", .io_alloc = vlan_alloc, .io_parse = vlan_parse, .io_dump[NL_DUMP_BRIEF] = vlan_dump_brief, .io_dump[NL_DUMP_FULL] = vlan_dump_full, .io_clone = vlan_clone, .io_put_attrs = vlan_put_attrs, .io_free = vlan_free, }; int rtnl_link_vlan_set_id(struct rtnl_link *link, int id) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_vlan_id = id; vi->vi_mask |= VLAN_HAS_ID; return 0; } int rtnl_link_vlan_get_id(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (vi->vi_mask & VLAN_HAS_ID) return vi->vi_vlan_id; else return 0; } int rtnl_link_vlan_set_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags |= flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } int rtnl_link_vlan_unset_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags &= ~flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } unsigned int rtnl_link_vlan_get_flags(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); return vi->vi_flags; } int rtnl_link_vlan_set_ingress_map(struct rtnl_link *link, int from, uint32_t to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (from < 0 || from > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); vi->vi_ingress_qos[from] = to; vi->vi_mask |= VLAN_HAS_INGRESS_QOS; return 0; } uint32_t *rtnl_link_vlan_get_ingress_map(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) return vi->vi_ingress_qos; else return NULL; } int rtnl_link_vlan_set_egress_map(struct rtnl_link *link, uint32_t from, int to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (to < 0 || to > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); if (vi->vi_negress >= vi->vi_egress_size) { int new_size = vi->vi_egress_size + 32; void *ptr; ptr = realloc(vi->vi_egress_qos, new_size); if (!ptr) return nl_errno(ENOMEM); vi->vi_egress_qos = ptr; vi->vi_egress_size = new_size; } vi->vi_egress_qos[vi->vi_negress].vm_from = from; vi->vi_egress_qos[vi->vi_negress].vm_to = to; vi->vi_negress++; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; return 0; } struct vlan_map *rtnl_link_vlan_get_egress_map(struct rtnl_link *link, int *negress) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (negress == NULL) { nl_error(EINVAL, "Require pointer to store negress"); return NULL; } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { *negress = vi->vi_negress; return vi->vi_egress_qos; } else { *negress = 0; return NULL; } } static void __init vlan_init(void) { rtnl_link_register_info(&vlan_info_ops); } static void __exit vlan_exit(void) { rtnl_link_unregister_info(&vlan_info_ops); }
static void copy_cacheinfo_into_route(struct rta_cacheinfo *ci, struct rtnl_route *route) { struct rtnl_rtcacheinfo nci = { .rtci_clntref = ci->rta_clntref, .rtci_last_use = ci->rta_lastuse, .rtci_expires = ci->rta_expires, .rtci_error = ci->rta_error, .rtci_used = ci->rta_used, .rtci_id = ci->rta_id, .rtci_ts = ci->rta_ts, .rtci_tsage = ci->rta_tsage, }; rtnl_route_set_cacheinfo(route, &nci); } static int route_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who, struct nlmsghdr *nlh, struct nl_parser_param *pp) { struct rtmsg *rtm; struct rtnl_route *route; struct nlattr *tb[RTA_MAX + 1]; struct nl_addr *src = NULL, *dst = NULL, *addr; int err; route = rtnl_route_alloc(); if (!route) { err = nl_errno(ENOMEM); goto errout; } route->ce_msgtype = nlh->nlmsg_type; err = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX, route_policy); if (err < 0) goto errout; rtm = nlmsg_data(nlh); rtnl_route_set_family(route, rtm->rtm_family); rtnl_route_set_tos(route, rtm->rtm_tos); rtnl_route_set_table(route, rtm->rtm_table); rtnl_route_set_type(route, rtm->rtm_type); rtnl_route_set_scope(route, rtm->rtm_scope); rtnl_route_set_protocol(route, rtm->rtm_protocol); rtnl_route_set_flags(route, rtm->rtm_flags); if (tb[RTA_DST]) { dst = nla_get_addr(tb[RTA_DST], rtm->rtm_family); if (dst == NULL) goto errout_errno; } else { dst = nl_addr_alloc(0); nl_addr_set_family(dst, rtm->rtm_family); } nl_addr_set_prefixlen(dst, rtm->rtm_dst_len); err = rtnl_route_set_dst(route, dst); if (err < 0) goto errout; nl_addr_put(dst); if (tb[RTA_SRC]) { src = nla_get_addr(tb[RTA_SRC], rtm->rtm_family); if (src == NULL) goto errout_errno; } else if (rtm->rtm_src_len) src = nl_addr_alloc(0); if (src) { nl_addr_set_prefixlen(src, rtm->rtm_src_len); rtnl_route_set_src(route, src); nl_addr_put(src); } if (tb[RTA_IIF]) rtnl_route_set_iif(route, nla_get_string(tb[RTA_IIF])); if (tb[RTA_OIF]) rtnl_route_set_oif(route, nla_get_u32(tb[RTA_OIF])); if (tb[RTA_GATEWAY]) { addr = nla_get_addr(tb[RTA_GATEWAY], route->rt_family); if (addr == NULL) goto errout_errno; rtnl_route_set_gateway(route, addr); nl_addr_put(addr); } if (tb[RTA_PRIORITY]) rtnl_route_set_prio(route, nla_get_u32(tb[RTA_PRIORITY])); if (tb[RTA_PREFSRC]) { addr = nla_get_addr(tb[RTA_PREFSRC], route->rt_family); if (addr == NULL) goto errout_errno; rtnl_route_set_pref_src(route, addr); nl_addr_put(addr); } if (tb[RTA_METRICS]) { struct nlattr *mtb[RTAX_MAX + 1]; int i; err = nla_parse_nested(mtb, RTAX_MAX, tb[RTA_METRICS], NULL); if (err < 0) goto errout; for (i = 1; i <= RTAX_MAX; i++) { if (mtb[i] && nla_len(mtb[i]) >= sizeof(uint32_t)) { uint32_t m = nla_get_u32(mtb[i]); if (rtnl_route_set_metric(route, i, m) < 0) goto errout_errno; } } } if (tb[RTA_MULTIPATH]) { struct rtnl_nexthop *nh; struct rtnexthop *rtnh = nla_data(tb[RTA_MULTIPATH]); size_t tlen = nla_len(tb[RTA_MULTIPATH]); while (tlen >= sizeof(*rtnh) && tlen >= rtnh->rtnh_len) { nh = rtnl_route_nh_alloc(); if (!nh) goto errout; rtnl_route_nh_set_weight(nh, rtnh->rtnh_hops); rtnl_route_nh_set_ifindex(nh, rtnh->rtnh_ifindex); rtnl_route_nh_set_flags(nh, rtnh->rtnh_flags); if (rtnh->rtnh_len > sizeof(*rtnh)) { struct nlattr *ntb[RTA_MAX + 1]; nla_parse(ntb, RTA_MAX, (struct nlattr *) RTNH_DATA(rtnh), rtnh->rtnh_len - sizeof(*rtnh), route_policy); if (ntb[RTA_GATEWAY]) { nh->rtnh_gateway = nla_get_addr( ntb[RTA_GATEWAY], route->rt_family); nh->rtnh_mask = NEXTHOP_HAS_GATEWAY; } } rtnl_route_add_nexthop(route, nh); tlen -= RTNH_ALIGN(rtnh->rtnh_len); rtnh = RTNH_NEXT(rtnh); } } if (tb[RTA_FLOW]) rtnl_route_set_realms(route, nla_get_u32(tb[RTA_FLOW])); if (tb[RTA_CACHEINFO]) copy_cacheinfo_into_route(nla_data(tb[RTA_CACHEINFO]), route); if (tb[RTA_MP_ALGO]) rtnl_route_set_mp_algo(route, nla_get_u32(tb[RTA_MP_ALGO])); err = pp->pp_cb((struct nl_object *) route, pp); if (err < 0) goto errout; err = P_ACCEPT; errout: rtnl_route_put(route); return err; errout_errno: err = nl_get_errno(); goto errout; } static int route_request_update(struct nl_cache *c, struct nl_handle *h) { return nl_rtgen_request(h, RTM_GETROUTE, AF_UNSPEC, NLM_F_DUMP); } /** * @name Cache Management * @{ */ /** * Build a route cache holding all routes currently configured in the kernel * @arg handle netlink handle * * Allocates a new cache, initializes it properly and updates it to * contain all routes currently configured in the kernel. * * @note The caller is responsible for destroying and freeing the * cache after using it. * @return The cache or NULL if an error has occured. */ struct nl_cache *rtnl_route_alloc_cache(struct nl_handle *handle) { struct nl_cache *cache; cache = nl_cache_alloc(&rtnl_route_ops); if (!cache) return NULL; if (handle && nl_cache_refill(handle, cache) < 0) { free(cache); return NULL; } return cache; } /** @} */ /** * @name Route Addition * @{ */ static struct nl_msg *build_route_msg(struct rtnl_route *tmpl, int cmd, int flags) { struct nl_msg *msg; struct nl_addr *addr; int scope, i, oif, nmetrics = 0; struct nlattr *metrics; struct rtmsg rtmsg = { .rtm_family = rtnl_route_get_family(tmpl), .rtm_dst_len = rtnl_route_get_dst_len(tmpl), .rtm_src_len = rtnl_route_get_src_len(tmpl), .rtm_tos = rtnl_route_get_tos(tmpl), .rtm_table = rtnl_route_get_table(tmpl), .rtm_type = rtnl_route_get_type(tmpl), .rtm_protocol = rtnl_route_get_protocol(tmpl), .rtm_flags = rtnl_route_get_flags(tmpl), }; if (rtmsg.rtm_family == AF_UNSPEC) { nl_error(EINVAL, "Cannot build route message, address " \ "family is unknown."); return NULL; } scope = rtnl_route_get_scope(tmpl); if (scope == RT_SCOPE_NOWHERE) { if (rtmsg.rtm_type == RTN_LOCAL) scope = RT_SCOPE_HOST; else { /* XXX Change to UNIVERSE if gw || nexthops */ scope = RT_SCOPE_LINK; } } rtmsg.rtm_scope = scope; msg = nlmsg_alloc_simple(cmd, flags); if (msg == NULL) return NULL; if (nlmsg_append(msg, &rtmsg, sizeof(rtmsg), NLMSG_ALIGNTO) < 0) goto nla_put_failure; addr = rtnl_route_get_dst(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_DST, addr); addr = rtnl_route_get_src(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_SRC, addr); addr = rtnl_route_get_gateway(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_GATEWAY, addr); addr = rtnl_route_get_pref_src(tmpl); if (addr) NLA_PUT_ADDR(msg, RTA_PREFSRC, addr); NLA_PUT_U32(msg, RTA_PRIORITY, rtnl_route_get_prio(tmpl)); oif = rtnl_route_get_oif(tmpl); if (oif != RTNL_LINK_NOT_FOUND) NLA_PUT_U32(msg, RTA_OIF, oif); for (i = 1; i <= RTAX_MAX; i++) if (rtnl_route_get_metric(tmpl, i) != UINT_MAX) nmetrics++; if (nmetrics > 0) { unsigned int val; metrics = nla_nest_start(msg, RTA_METRICS); if (metrics == NULL) goto nla_put_failure; for (i = 1; i <= RTAX_MAX; i++) { val = rtnl_route_get_metric(tmpl, i); if (val != UINT_MAX) NLA_PUT_U32(msg, i, val); } nla_nest_end(msg, metrics); } #if 0 RTA_IIF, RTA_MULTIPATH, RTA_PROTOINFO, RTA_FLOW, RTA_CACHEINFO, RTA_SESSION, RTA_MP_ALGO, #endif return msg; nla_put_failure: nlmsg_free(msg); return NULL; } struct nl_msg *rtnl_route_build_add_request(struct rtnl_route *tmpl, int flags) { return build_route_msg(tmpl, RTM_NEWROUTE, NLM_F_CREATE | flags); } int rtnl_route_add(struct nl_handle *handle, struct rtnl_route *route, int flags) { struct nl_msg *msg; int err; msg = rtnl_route_build_add_request(route, flags); if (!msg) return nl_get_errno(); err = nl_send_auto_complete(handle, msg); nlmsg_free(msg); if (err < 0) return err; return nl_wait_for_ack(handle); } struct nl_msg *rtnl_route_build_del_request(struct rtnl_route *tmpl, int flags) { return build_route_msg(tmpl, RTM_DELROUTE, flags); } int rtnl_route_del(struct nl_handle *handle, struct rtnl_route *route, int flags) { struct nl_msg *msg; int err; msg = rtnl_route_build_del_request(route, flags); if (!msg) return nl_get_errno(); err = nl_send_auto_complete(handle, msg); nlmsg_free(msg); if (err < 0) return err; return nl_wait_for_ack(handle); } /** @} */ static struct nl_af_group route_groups[] = { { AF_INET, RTNLGRP_IPV4_ROUTE }, { AF_INET6, RTNLGRP_IPV6_ROUTE }, { AF_DECnet, RTNLGRP_DECnet_ROUTE }, { END_OF_GROUP_LIST }, }; static struct nl_cache_ops rtnl_route_ops = { .co_name = "route/route", .co_hdrsize = sizeof(struct rtmsg), .co_msgtypes = { { RTM_NEWROUTE, NL_ACT_NEW, "new" }, { RTM_DELROUTE, NL_ACT_DEL, "del" }, { RTM_GETROUTE, NL_ACT_GET, "get" }, END_OF_MSGTYPES_LIST, }, .co_protocol = NETLINK_ROUTE, .co_groups = route_groups, .co_request_update = route_request_update, .co_msg_parser = route_msg_parser, .co_obj_ops = &route_obj_ops, }; static void __init route_init(void) { nl_cache_mngt_register(&rtnl_route_ops); } static void __exit route_exit(void) { nl_cache_mngt_unregister(&rtnl_route_ops); }
static int addr_msg_parser(struct sockaddr_nl *who, struct nlmsghdr *nlh, void *arg) { struct rtnl_addr *addr; struct nl_parser_param *pp = arg; struct ifaddrmsg *ifa; struct nlattr *tb[IFA_MAX+1]; int err = -ENOMEM, peer_prefix = 0; addr = rtnl_addr_alloc(); if (!addr) { err = nl_errno(ENOMEM); goto errout; } addr->ce_msgtype = nlh->nlmsg_type; err = nlmsg_parse(nlh, sizeof(*ifa), tb, IFA_MAX, addr_policy); if (err < 0) goto errout_free; ifa = nlmsg_data(nlh); addr->a_family = ifa->ifa_family; addr->a_prefixlen = ifa->ifa_prefixlen; addr->a_flags = ifa->ifa_flags; addr->a_scope = ifa->ifa_scope; addr->a_ifindex = ifa->ifa_index; addr->a_mask = (ADDR_ATTR_FAMILY | ADDR_ATTR_PREFIXLEN | ADDR_ATTR_FLAGS | ADDR_ATTR_SCOPE | ADDR_ATTR_IFINDEX); if (tb[IFA_LABEL]) { nla_strlcpy(addr->a_label, tb[IFA_LABEL], IFNAMSIZ); addr->a_mask |= ADDR_ATTR_LABEL; } if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ca; ca = nla_data(tb[IFA_CACHEINFO]); addr->a_cacheinfo.aci_prefered = ca->ifa_prefered; addr->a_cacheinfo.aci_valid = ca->ifa_valid; addr->a_cacheinfo.aci_cstamp = ca->cstamp; addr->a_cacheinfo.aci_tstamp = ca->tstamp; addr->a_mask |= ADDR_ATTR_CACHEINFO; } if (tb[IFA_LOCAL]) { addr->a_local = nla_get_addr(tb[IFA_LOCAL], addr->a_family); if (!addr->a_local) goto errout_free; addr->a_mask |= ADDR_ATTR_LOCAL; } if (tb[IFA_ADDRESS]) { struct nl_addr *a; a = nla_get_addr(tb[IFA_ADDRESS], addr->a_family); if (!a) goto errout_free; /* IPv6 sends the local address as IFA_ADDRESS with * no IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS * with IFA_ADDRESS being the peer address if they differ */ if (!tb[IFA_LOCAL] || !nl_addr_cmp(a, addr->a_local)) { nl_addr_put(addr->a_local); addr->a_local = a; addr->a_mask |= ADDR_ATTR_LOCAL; } else { addr->a_peer = a; addr->a_mask |= ADDR_ATTR_PEER; peer_prefix = 1; } } nl_addr_set_prefixlen(peer_prefix ? addr->a_peer : addr->a_local, addr->a_prefixlen); if (tb[IFA_BROADCAST]) { addr->a_bcast = nla_get_addr(tb[IFA_BROADCAST], addr->a_family); if (!addr->a_bcast) goto errout_free; addr->a_mask |= ADDR_ATTR_BROADCAST; } if (tb[IFA_ANYCAST]) { addr->a_anycast = nla_get_addr(tb[IFA_ANYCAST], addr->a_family); if (!addr->a_anycast) goto errout_free; addr->a_mask |= ADDR_ATTR_ANYCAST; } if (tb[IFA_MULTICAST]) { addr->a_multicast = nla_get_addr(tb[IFA_MULTICAST], addr->a_family); if (!addr->a_multicast) goto errout_free; addr->a_mask |= ADDR_ATTR_MULTICAST; } err = pp->pp_cb((struct nl_object *) addr, pp); if (err < 0) goto errout_free; return P_ACCEPT; errout_free: rtnl_addr_free(addr); errout: return err; }