int rtnl_u32_set_hashmask(struct rtnl_cls *cls, uint32_t hashmask, uint32_t offset) { struct rtnl_u32 *u; struct tc_u32_sel *sel; int err; hashmask = htonl(hashmask); if (!(u = (struct rtnl_u32 *) rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; sel = u32_selector_alloc(u); if (!sel) return -NLE_NOMEM; err = nl_data_append(u->cu_selector, NULL, sizeof(struct tc_u32_key)); if(err < 0) return err; sel = u32_selector(u); sel->hmask = hashmask; sel->hoff = offset; return 0; }
/** * Append new 32-bit key to the selector * * @arg cls classifier to be modifier * @arg val value to be matched (network byte-order) * @arg mask mask to be applied before matching (network byte-order) * @arg off offset, in bytes, to start matching * @arg offmask offset mask * * General selectors define the pattern, mask and offset the pattern will be * matched to the packet contents. Using the general selectors you can match * virtually any single bit in the IP (or upper layer) header. * */ int rtnl_u32_add_key(struct rtnl_cls *cls, uint32_t val, uint32_t mask, int off, int offmask) { struct tc_u32_sel *sel; struct rtnl_u32 *u; int err; if (!(u = rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; sel = u32_selector_alloc(u); if (!sel) return -NLE_NOMEM; if (sel->nkeys == UCHAR_MAX) return -NLE_NOMEM; err = nl_data_append(u->cu_selector, NULL, sizeof(struct tc_u32_key)); if (err < 0) return err; /* the selector might have been moved by realloc */ sel = u32_selector(u); sel->keys[sel->nkeys].mask = mask; sel->keys[sel->nkeys].val = val & mask; sel->keys[sel->nkeys].off = off; sel->keys[sel->nkeys].offmask = offmask; sel->nkeys++; u->cu_mask |= U32_ATTR_SELECTOR; return 0; }
/** * Set limit of TBF qdisc by latency. * @arg qdisc TBF qdisc to be modified. * @arg latency Latency in micro seconds. * * Calculates and sets the limit based on the desired latency and the * configured rate and peak rate. In order for this operation to succeed, * the rate and if required the peak rate must have been set in advance. * * @f[ * limit_n = \frac{{rate_n} \times {latency}}{10^6}+{bucketsize}_n * @f] * @f[ * limit = min(limit_{rate},limit_{peak}) * @f] * * @return 0 on success or a negative error code. */ int rtnl_qdisc_tbf_set_limit_by_latency(struct rtnl_qdisc *qdisc, int latency) { struct rtnl_tbf *tbf; double limit, limit2; if (!(tbf = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); if (!(tbf->qt_mask & TBF_ATTR_RATE)) return -NLE_MISSING_ATTR; limit = calc_limit(&tbf->qt_rate, latency, tbf->qt_rate_bucket); if (tbf->qt_mask & TBF_ATTR_PEAKRATE) { limit2 = calc_limit(&tbf->qt_peakrate, latency, tbf->qt_peakrate_bucket); if (limit2 < limit) limit = limit2; } rtnl_qdisc_tbf_set_limit(qdisc, (int) limit); return 0; }
int rtnl_skbedit_get_action(struct rtnl_act *act) { struct rtnl_skbedit *u; if (!(u = (struct rtnl_skbedit *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; return u->s_parm.action; }
int rtnl_mirred_get_policy(struct rtnl_act *act) { struct rtnl_mirred *u; if (!(u = (struct rtnl_mirred *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; return u->m_parm.action; }
uint32_t rtnl_mirred_get_ifindex(struct rtnl_act *act) { struct rtnl_mirred *u; if ((u = (struct rtnl_mirred *) rtnl_tc_data(TC_CAST(act)))) return u->m_parm.ifindex; return 0; }
struct rtnl_ematch_tree *rtnl_cgroup_get_ematch(struct rtnl_cls *cls) { struct rtnl_cgroup *c; if (!(c = rtnl_tc_data(TC_CAST(cls)))) BUG(); return c->cg_ematch; }
/** * Set limit of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg limit New limit in bytes. * @return 0 on success or a negative error code. */ void rtnl_netem_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_limit = limit; netem->qnm_mask |= SCH_NETEM_ATTR_LIMIT; }
/** * Get quantum of a fq_codel qdisc. * @arg qdisc fq_codel qdisc. * @return Numeric quantum or zero. */ uint32_t rtnl_qdisc_fq_codel_get_quantum(struct rtnl_qdisc *qdisc) { struct rtnl_fq_codel *fq_codel; if ((fq_codel = rtnl_tc_data(TC_CAST(qdisc))) && (fq_codel->fq_mask & SCH_FQ_CODEL_ATTR_QUANTUM)) return fq_codel->fq_quantum; else return 0; }
/** * Get target of a fq_codel qdisc. * @arg qdisc fq_codel qdisc. * @return Numeric interval or zero. */ uint32_t rtnl_qdisc_fq_codel_get_interval(struct rtnl_qdisc *qdisc) { struct rtnl_fq_codel *fq_codel; if ((fq_codel = rtnl_tc_data(TC_CAST(qdisc))) && fq_codel->fq_mask & SCH_FQ_CODEL_ATTR_INTERVAL) return fq_codel->fq_interval; else return 0; }
/** * Get target of a fq_codel qdisc. * @arg qdisc fq_codel qdisc. * @return Numeric target or zero. */ uint32_t rtnl_qdisc_fq_codel_get_target(struct rtnl_qdisc *qdisc) { struct rtnl_fq_codel *fq_codel; if ((fq_codel = rtnl_tc_data(TC_CAST(qdisc))) && fq_codel->fq_mask & SCH_FQ_CODEL_ATTR_TARGET) return fq_codel->fq_target; else return 0; }
/** * Set limit of RED qdisc. * @arg qdisc RED qdisc to be modified. * @arg limit New limit in number of packets. * @return 0 on success or a negative error code. */ void rtnl_red_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_red *red; if (!(red = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); red->qr_limit = limit; red->qr_mask |= RED_ATTR_LIMIT; }
int rtnl_mirred_set_ifindex(struct rtnl_act *act, uint32_t ifindex) { struct rtnl_mirred *u; if (!(u = (struct rtnl_mirred *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; u->m_parm.ifindex = ifindex; return 0; }
/** * Set re-ordering gap of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg gap New gap in number of packets. * @return 0 on success or a negative error code. */ void rtnl_netem_set_gap(struct rtnl_qdisc *qdisc, int gap) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_gap = gap; netem->qnm_mask |= SCH_NETEM_ATTR_GAP; }
/** * Set re-ordering probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New re-ordering probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_reorder_probability(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_ro.nmro_probability = prob; netem->qnm_mask |= SCH_NETEM_ATTR_RO_PROB; }
/** * Set packet delay correlation probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet delay correlation probability. */ void rtnl_netem_set_delay_correlation(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_corr.nmc_delay = prob; netem->qnm_mask |= SCH_NETEM_ATTR_DELAY_CORR; }
/** * Set limit of TBF qdisc. * @arg qdisc TBF qdisc to be modified. * @arg limit New limit in bytes. * @return 0 on success or a negative error code. */ void rtnl_qdisc_tbf_set_limit(struct rtnl_qdisc *qdisc, int limit) { struct rtnl_tbf *tbf; if (!(tbf = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); tbf->qt_limit = limit; tbf->qt_mask |= TBF_ATTR_LIMIT; }
/** * Set packet delay of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg delay New packet delay in micro seconds. * @return 0 on success or a negative error code. */ void rtnl_netem_set_delay(struct rtnl_qdisc *qdisc, int delay) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_latency = nl_us2ticks(delay); netem->qnm_mask |= SCH_NETEM_ATTR_LATENCY; }
/** * Set packet delay jitter of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg jitter New packet delay jitter in micro seconds. * @return 0 on success or a negative error code. */ void rtnl_netem_set_jitter(struct rtnl_qdisc *qdisc, int jitter) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_jitter = nl_us2ticks(jitter); netem->qnm_mask |= SCH_NETEM_ATTR_JITTER; }
/** * Set packet duplication probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet duplication probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_duplicate(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_duplicate = prob; netem->qnm_mask |= SCH_NETEM_ATTR_DUPLICATE; }
/** * Set packet loss probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New packet loss probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_loss(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_loss = prob; netem->qnm_mask |= SCH_NETEM_ATTR_LOSS; }
/** * Set corruption correlation probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New corruption correlation probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_corruption_correlation(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_crpt.nmcr_correlation = prob; netem->qnm_mask |= SCH_NETEM_ATTR_CORRUPT_CORR; }
/** * Set re-order correlation probability of netem qdisc. * @arg qdisc Netem qdisc to be modified. * @arg prob New re-ordering correlation probability. * @return 0 on success or a negative error code. */ void rtnl_netem_set_reorder_correlation(struct rtnl_qdisc *qdisc, int prob) { struct rtnl_netem *netem; if (!(netem = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); netem->qnm_ro.nmro_correlation = prob; netem->qnm_mask |= SCH_NETEM_ATTR_RO_CORR; }
int rtnl_u32_set_divisor(struct rtnl_cls *cls, uint32_t divisor) { struct rtnl_u32 *u; if (!(u = (struct rtnl_u32 *) rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; u->cu_divisor = divisor; u->cu_mask |= U32_ATTR_DIVISOR; return 0; }
int rtnl_skbedit_set_priority(struct rtnl_act *act, uint32_t prio) { struct rtnl_skbedit *u; if (!(u = (struct rtnl_skbedit *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; u->s_prio = prio; u->s_flags |= SKBEDIT_F_PRIORITY; return 0; }
int rtnl_skbedit_set_mark(struct rtnl_act *act, uint32_t mark) { struct rtnl_skbedit *u; if (!(u = (struct rtnl_skbedit *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; u->s_mark = mark; u->s_flags |= SKBEDIT_F_MARK; return 0; }
int rtnl_skbedit_set_queue_mapping(struct rtnl_act *act, uint16_t index) { struct rtnl_skbedit *u; if (!(u = (struct rtnl_skbedit *) rtnl_tc_data(TC_CAST(act)))) return -NLE_NOMEM; u->s_queue_mapping = index; u->s_flags |= SKBEDIT_F_QUEUE_MAPPING; return 0; }
int rtnl_u32_set_link(struct rtnl_cls *cls, uint32_t link) { struct rtnl_u32 *u; if (!(u = (struct rtnl_u32 *) rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; u->cu_link = link; u->cu_mask |= U32_ATTR_LINK; return 0; }
int rtnl_u32_set_hashtable(struct rtnl_cls *cls, uint32_t ht) { struct rtnl_u32 *u; if (!(u = (struct rtnl_u32 *) rtnl_tc_data(TC_CAST(cls)))) return -NLE_NOMEM; u->cu_hash = ht; u->cu_mask |= U32_ATTR_HASH; return 0; }
/** * Get peak rate cell size of TBF qdisc. * @arg qdisc TBF qdisc. * @return Size of peak rate cell in bytes or a negative error code. */ int rtnl_qdisc_tbf_get_peakrate_cell(struct rtnl_qdisc *qdisc) { struct rtnl_tbf *tbf; if (!(tbf = rtnl_tc_data(TC_CAST(qdisc)))) BUG(); if (tbf->qt_mask & TBF_ATTR_PEAKRATE) return (1 << tbf->qt_peakrate.rs_cell_log); else return -1; }