TError TNlHtb::Create(const TNlLink &link, uint32_t defaultClass) { TError error = TError::Success(); int ret; struct rtnl_qdisc *qdisc; qdisc = rtnl_qdisc_alloc(); if (!qdisc) return TError(EError::Unknown, std::string("Unable to allocate qdisc object")); rtnl_tc_set_ifindex(TC_CAST(qdisc), link.GetIndex()); rtnl_tc_set_parent(TC_CAST(qdisc), Parent); rtnl_tc_set_handle(TC_CAST(qdisc), Handle); ret = rtnl_tc_set_kind(TC_CAST(qdisc), "htb"); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to set qdisc type: ") + nl_geterror(ret)); goto free_qdisc; } rtnl_htb_set_defcls(qdisc, TC_H_MIN(defaultClass)); rtnl_htb_set_rate2quantum(qdisc, 10); link.Dump("add", qdisc); ret = rtnl_qdisc_add(link.GetSock(), qdisc, NLM_F_CREATE); if (ret < 0) error = TError(EError::Unknown, std::string("Unable to add qdisc: ") + nl_geterror(ret)); free_qdisc: rtnl_qdisc_put(qdisc); return error; }
bool TNlCgFilter::Exists(const TNlLink &link) { int ret; struct nl_cache *clsCache; ret = rtnl_cls_alloc_cache(link.GetSock(), link.GetIndex(), Parent, &clsCache); if (ret < 0) { L_ERR() << "Can't allocate filter cache: " << nl_geterror(ret) << std::endl; return false; } link.LogCache(clsCache); struct CgFilterIter { uint32_t parent; uint32_t handle; bool exists; } data = { Parent, Handle, false }; nl_cache_foreach(clsCache, [](struct nl_object *obj, void *data) { CgFilterIter *p = (CgFilterIter *)data; if (rtnl_tc_get_handle(TC_CAST(obj)) == p->handle && rtnl_tc_get_parent(TC_CAST(obj)) == p->parent) p->exists = true; }, &data); nl_cache_free(clsCache); return data.exists; }
TError TNlCgFilter::Remove(const TNlLink &link) { TError error = TError::Success(); struct rtnl_cls *cls; int ret; cls = rtnl_cls_alloc(); if (!cls) return TError(EError::Unknown, std::string("Unable to allocate filter object")); rtnl_tc_set_ifindex(TC_CAST(cls), link.GetIndex()); ret = rtnl_tc_set_kind(TC_CAST(cls), FilterType); if (ret < 0) { error = TError(EError::Unknown, std::string("Unable to set filter type: ") + nl_geterror(ret)); goto free_cls; } rtnl_cls_set_prio(cls, FilterPrio); rtnl_cls_set_protocol(cls, 0); rtnl_tc_set_parent(TC_CAST(cls), Parent); link.Dump("remove", cls); ret = rtnl_cls_delete(link.GetSock(), cls, 0); if (ret < 0) error = TError(EError::Unknown, std::string("Unable to remove filter: ") + nl_geterror(ret)); free_cls: rtnl_cls_put(cls); return error; }
bool TNlHtb::Valid(const TNlLink &link, uint32_t defaultClass) { int ret; struct nl_cache *qdiscCache; bool valid = true; ret = rtnl_qdisc_alloc_cache(link.GetSock(), &qdiscCache); if (ret < 0) { L_ERR() << "can't alloc qdisc cache" << std::endl; return false; } struct rtnl_qdisc *qdisc = rtnl_qdisc_get(qdiscCache, link.GetIndex(), Handle); if (qdisc) { link.Dump("found", qdisc); if (rtnl_tc_get_ifindex(TC_CAST(qdisc)) != link.GetIndex()) valid = false; else if (rtnl_tc_get_parent(TC_CAST(qdisc)) != Parent) valid = false; else if (rtnl_tc_get_handle(TC_CAST(qdisc)) != Handle) valid = false; else if (rtnl_tc_get_kind(TC_CAST(qdisc)) != std::string("htb")) valid = false; else if (rtnl_htb_get_defcls(qdisc) != TC_H_MIN(defaultClass)) valid = false; } else { valid = false; } rtnl_qdisc_put(qdisc); nl_cache_free(qdiscCache); return valid; }
int netem_set_params(const char *iface, struct netem_params *params) { struct rtnl_link *link; struct rtnl_qdisc *qdisc; int err; pthread_mutex_lock(&nl_sock_mutex); /* filter link by name */ if ((link = rtnl_link_get_by_name(link_cache, iface)) == NULL) { fprintf(stderr, "unknown interface/link name.\n"); pthread_mutex_unlock(&nl_sock_mutex); return -1; } if (!(qdisc = rtnl_qdisc_alloc())) { /* OOM error */ fprintf(stderr, "couldn't alloc qdisc\n"); pthread_mutex_unlock(&nl_sock_mutex); return -1; } rtnl_tc_set_link(TC_CAST(qdisc), link); rtnl_tc_set_parent(TC_CAST(qdisc), TC_H_ROOT); rtnl_tc_set_kind(TC_CAST(qdisc), "netem"); rtnl_netem_set_delay(qdisc, params->delay * 1000); /* expects microseconds */ rtnl_netem_set_jitter(qdisc, params->jitter * 1000); /* params->loss is given in 10ths of a percent */ rtnl_netem_set_loss(qdisc, (params->loss * (UINT_MAX / 1000))); /* Submit request to kernel and wait for response */ err = rtnl_qdisc_add(sock, qdisc, NLM_F_CREATE | NLM_F_REPLACE); /* Return the qdisc object to free memory resources */ rtnl_qdisc_put(qdisc); if (err < 0) { fprintf(stderr, "Unable to add qdisc: %s\n", nl_geterror(err)); pthread_mutex_unlock(&nl_sock_mutex); return err; } if ((err = nl_cache_refill(sock, link_cache)) < 0) { fprintf(stderr, "Unable to resync link cache: %s\n", nl_geterror(err)); pthread_mutex_unlock(&nl_sock_mutex); return -1; } pthread_mutex_unlock(&nl_sock_mutex); return 0; }
Result<ingress::Discipline> decode<ingress::Discipline>( const Netlink<struct rtnl_qdisc>& qdisc) { if (rtnl_tc_get_kind(TC_CAST(qdisc.get())) != string("ingress") || rtnl_tc_get_parent(TC_CAST(qdisc.get())) != INGRESS_ROOT.get() || rtnl_tc_get_handle(TC_CAST(qdisc.get())) != ingress::HANDLE.get()) { return None(); } return ingress::Discipline(); }
static void print_tc_childs(struct rtnl_tc *tc, void *arg) { struct rtnl_class *filter; filter = nl_cli_class_alloc(); rtnl_tc_set_parent(TC_CAST(filter), rtnl_tc_get_handle(tc)); rtnl_tc_set_ifindex(TC_CAST(filter), rtnl_tc_get_ifindex(tc)); nl_cache_foreach_filter(class_cache, OBJ_CAST(filter), &print_class, arg); rtnl_class_put(filter); }
/* * Function that adds a new filter and attach it to a hash table * and set next hash table link with hash mask * */ static int u32_add_filter_on_ht_with_hashmask(struct nl_sock *sock, struct rtnl_link *rtnlLink, uint32_t prio, uint32_t keyval, uint32_t keymask, int keyoff, int keyoffmask, uint32_t htid, uint32_t htlink, uint32_t hmask, uint32_t hoffset, struct rtnl_act *act, struct rtnl_act *act2) { struct rtnl_cls *cls; int err; cls=rtnl_cls_alloc(); if (!(cls)) { printf("Can not allocate classifier\n"); nl_socket_free(sock); exit(1); } rtnl_tc_set_link(TC_CAST(cls), rtnlLink); if ((err = rtnl_tc_set_kind(TC_CAST(cls), "u32"))) { printf("Can not set classifier as u32\n"); return 1; } rtnl_cls_set_prio(cls, prio); rtnl_cls_set_protocol(cls, ETH_P_IP); rtnl_tc_set_parent(TC_CAST(cls), TC_HANDLE(0xffff, 0)); if (htid) rtnl_u32_set_hashtable(cls, htid); rtnl_u32_add_key_uint32(cls, keyval, keymask, keyoff, keyoffmask); rtnl_u32_set_hashmask(cls, hmask, hoffset); rtnl_u32_set_link(cls, htlink); rtnl_u32_add_action(cls, act); rtnl_u32_add_action(cls, act2); if ((err = rtnl_cls_add(sock, cls, NLM_F_CREATE))) { printf("Can not add classifier: %s\n", nl_geterror(err)); return -1; } rtnl_cls_put(cls); return 0; }
static int qdisc_request_update(struct nl_cache *c, struct nl_sock *sk) { struct tcmsg tchdr = { .tcm_family = AF_UNSPEC, .tcm_ifindex = c->c_iarg1, }; return nl_send_simple(sk, RTM_GETQDISC, NLM_F_DUMP, &tchdr, sizeof(tchdr)); } /** * @name Allocation/Freeing * @{ */ struct rtnl_qdisc *rtnl_qdisc_alloc(void) { struct rtnl_tc *tc; tc = TC_CAST(nl_object_alloc(&qdisc_obj_ops)); if (tc) tc->tc_type = RTNL_TC_TYPE_QDISC; return (struct rtnl_qdisc *) tc; } void rtnl_qdisc_put(struct rtnl_qdisc *qdisc) { nl_object_put((struct nl_object *) qdisc); }
static int cls_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who, struct nlmsghdr *nlh, struct nl_parser_param *pp) { struct rtnl_cls *cls; int err; if (!(cls = rtnl_cls_alloc())) return -NLE_NOMEM; if ((err = rtnl_tc_msg_parse(nlh, TC_CAST(cls))) < 0) goto errout; cls->c_prio = TC_H_MAJ(cls->c_info) >> 16; if (cls->c_prio) cls->ce_mask |= CLS_ATTR_PRIO; cls->c_protocol = ntohs(TC_H_MIN(cls->c_info)); if (cls->c_protocol) cls->ce_mask |= CLS_ATTR_PROTOCOL; err = pp->pp_cb(OBJ_CAST(cls), pp); errout: rtnl_cls_put(cls); return err; }
/** * Append new 32-bit key to the selector * * @arg cls classifier to be modifier * @arg val value to be matched (network byte-order) * @arg mask mask to be applied before matching (network byte-order) * @arg off offset, in bytes, to start matching * @arg offmask offset mask * * General selectors define the pattern, mask and offset the pattern will be * matched to the packet contents. Using the general selectors you can match * virtually any single bit in the IP (or upper layer) header. * */ int rtnl_u32_add_key(struct rtnl_cls *cls, uint32_t val, uint32_t mask, int off, int offmask) { struct tc_u32_sel *sel; struct rtnl_u32 *u; int err; if (!(u = rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; sel = u32_selector_alloc(u); if (!sel) return -NLE_NOMEM; if (sel->nkeys == UCHAR_MAX) return -NLE_NOMEM; err = nl_data_append(u->cu_selector, NULL, sizeof(struct tc_u32_key)); if (err < 0) return err; /* the selector might have been moved by realloc */ sel = u32_selector(u); sel->keys[sel->nkeys].mask = mask; sel->keys[sel->nkeys].val = val & mask; sel->keys[sel->nkeys].off = off; sel->keys[sel->nkeys].offmask = offmask; sel->nkeys++; u->cu_mask |= U32_ATTR_SELECTOR; return 0; }
TError TNlClass::Load(const TNl &nl) { struct nl_cache *cache; struct rtnl_class *tclass; int ret = rtnl_class_alloc_cache(nl.GetSock(), Index, &cache); if (ret < 0) return nl.Error(ret, "Cannot allocate class cache"); tclass = rtnl_class_get(cache, Index, Handle); if (!tclass) { nl_cache_free(cache); return TError(EError::Unknown, "Can't find tc class"); } Kind = rtnl_tc_get_kind(TC_CAST(tclass)); if (Kind == "htb") { Prio = rtnl_htb_get_prio(tclass); Rate = rtnl_htb_get_rate(tclass); Ceil = rtnl_htb_get_ceil(tclass); } if (Kind == "hfsc") { struct tc_service_curve sc; if (!rtnl_class_hfsc_get_fsc(tclass, &sc)) Rate = sc.m2; if (!rtnl_class_hfsc_get_usc(tclass, &sc)) Ceil = sc.m2; } rtnl_class_put(tclass); nl_cache_free(cache); return TError::Success(); }
int rtnl_u32_set_hashmask(struct rtnl_cls *cls, uint32_t hashmask, uint32_t offset) { struct rtnl_u32 *u; struct tc_u32_sel *sel; int err; hashmask = htonl(hashmask); if (!(u = (struct rtnl_u32 *) rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; sel = u32_selector_alloc(u); if (!sel) return -NLE_NOMEM; err = nl_data_append(u->cu_selector, NULL, sizeof(struct tc_u32_key)); if(err < 0) return err; sel = u32_selector(u); sel->hmask = hashmask; sel->hoff = offset; return 0; }
static int qdisc_request_update(struct nl_cache *c, struct nl_sock *sk) { struct tcmsg tchdr = { .tcm_family = AF_UNSPEC, .tcm_ifindex = c->c_iarg1, }; return nl_send_simple(sk, RTM_GETQDISC, NLM_F_DUMP, &tchdr, sizeof(tchdr)); } /** * @name QDisc Addition * @{ */ static int qdisc_build(struct rtnl_qdisc *qdisc, int type, int flags, struct nl_msg **result) { return rtnl_tc_msg_build(TC_CAST(qdisc), type, flags, result); #if 0 /* Some qdiscs don't accept properly nested messages (e.g. netem). To * accomodate for this, they can complete the message themselves. */ else if (qops && qops->qo_build_msg) { err = qops->qo_build_msg(qdisc, *result); if (err < 0) goto errout; } #endif }
/** * Set limit of TBF qdisc by latency. * @arg qdisc TBF qdisc to be modified. * @arg latency Latency in micro seconds. * * Calculates and sets the limit based on the desired latency and the * configured rate and peak rate. In order for this operation to succeed, * the rate and if required the peak rate must have been set in advance. * * @f[ * limit_n = \frac{{rate_n} \times {latency}}{10^6}+{bucketsize}_n * @f] * @f[ * limit = min(limit_{rate},limit_{peak}) * @f] * * @return 0 on success or a negative error code. */ int rtnl_qdisc_tbf_set_limit_by_latency(struct rtnl_qdisc *qdisc, int latency) { struct rtnl_tbf *tbf; double limit, limit2; if (!(tbf = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); if (!(tbf->qt_mask & TBF_ATTR_RATE)) return -NLE_MISSING_ATTR; limit = calc_limit(&tbf->qt_rate, latency, tbf->qt_rate_bucket); if (tbf->qt_mask & TBF_ATTR_PEAKRATE) { limit2 = calc_limit(&tbf->qt_peakrate, latency, tbf->qt_peakrate_bucket); if (limit2 < limit) limit = limit2; } rtnl_qdisc_tbf_set_limit(qdisc, (int) limit); return 0; }
Try<Nothing> encode<ingress::Discipline>( const Netlink<struct rtnl_qdisc>& qdisc, const ingress::Discipline& discipline) { int error = rtnl_tc_set_kind(TC_CAST(qdisc.get()), "ingress"); if (error != 0) { return Error( "Failed to set the kind of the queueing discipline: " + string(nl_geterror(error))); } rtnl_tc_set_parent(TC_CAST(qdisc.get()), INGRESS_ROOT.get()); rtnl_tc_set_handle(TC_CAST(qdisc.get()), ingress::HANDLE.get()); return Nothing(); }
uint32_t rtnl_mirred_get_ifindex(struct rtnl_act *act) { struct rtnl_mirred *u; if ((u = (struct rtnl_mirred *) rtnl_tc_data(TC_CAST(act)))) return u->m_parm.ifindex; return 0; }
int rtnl_mirred_get_policy(struct rtnl_act *act) { struct rtnl_mirred *u; if (!(u = (struct rtnl_mirred *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; return u->m_parm.action; }
int rtnl_skbedit_get_action(struct rtnl_act *act) { struct rtnl_skbedit *u; if (!(u = (struct rtnl_skbedit *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; return u->s_parm.action; }
TError TNlHtb::Remove(const TNlLink &link) { struct rtnl_qdisc *qdisc; qdisc = rtnl_qdisc_alloc(); if (!qdisc) return TError(EError::Unknown, std::string("Unable to allocate qdisc object")); rtnl_tc_set_ifindex(TC_CAST(qdisc), link.GetIndex()); rtnl_tc_set_parent(TC_CAST(qdisc), Parent); link.Dump("remove", qdisc); rtnl_qdisc_delete(link.GetSock(), qdisc); rtnl_qdisc_put(qdisc); return TError::Success(); }
Result<basic::Classifier> decode<basic::Classifier>( const Netlink<struct rtnl_cls>& cls) { if (rtnl_tc_get_kind(TC_CAST(cls.get())) != string("basic")) { return None(); } return basic::Classifier(rtnl_cls_get_protocol(cls.get())); }
Result<htb::Config> decode<htb::Config>( const Netlink<struct rtnl_qdisc>& qdisc) { if (rtnl_tc_get_kind(TC_CAST(qdisc.get())) != htb::KIND) { return None(); } return htb::Config(); }
struct rtnl_ematch_tree *rtnl_cgroup_get_ematch(struct rtnl_cls *cls) { struct rtnl_cgroup *c; if (!(c = rtnl_tc_data(TC_CAST(cls)))) BUG(); return c->cg_ematch; }
/** * Set limit of TBF qdisc. * @arg qdisc TBF qdisc to be modified. * @arg limit New limit in bytes. * @return 0 on success or a negative error code. */ void rtnl_qdisc_tbf_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_tbf *tbf; if (!(tbf = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); tbf->qt_limit = limit; tbf->qt_mask |= TBF_ATTR_LIMIT; }
/** * Set packet delay correlation probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet delay correlation probability. */ void rtnl_netem_set_delay_correlation(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_corr.nmc_delay = prob; netem->qnm_mask |= SCH_NETEM_ATTR_DELAY_CORR; }
/** * Set packet delay jitter of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg jitter New packet delay jitter in micro seconds. * @return 0 on success or a negative error code. */ void rtnl_netem_set_jitter(struct rtnl_qdisc *qdisc, int jitter) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_jitter = nl_us2ticks(jitter); netem->qnm_mask |= SCH_NETEM_ATTR_JITTER; }
/** * Set packet delay of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg delay New packet delay in micro seconds. * @return 0 on success or a negative error code. */ void rtnl_netem_set_delay(struct rtnl_qdisc *qdisc, int delay) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_latency = nl_us2ticks(delay); netem->qnm_mask |= SCH_NETEM_ATTR_LATENCY; }
/** * Set packet duplication probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet duplication probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_duplicate(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_duplicate = prob; netem->qnm_mask |= SCH_NETEM_ATTR_DUPLICATE; }
/** * Set packet loss probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet loss probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_loss(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_loss = prob; netem->qnm_mask |= SCH_NETEM_ATTR_LOSS; }
/** * Set corruption correlation probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New corruption correlation probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_corruption_correlation(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_crpt.nmcr_correlation = prob; netem->qnm_mask |= SCH_NETEM_ATTR_CORRUPT_CORR; }