/** * Add object to hashtable * @arg ht Hashtable * @arg obj Object to add * * Adds `obj` to the hashtable. Object type must support hashing, otherwise all * objects will be added to the chain `0`. * * @note The reference counter of the object is incremented. * * @return 0 on success or a negative error code * @retval -NLE_EXIST Identical object already present in hashtable */ int nl_hash_table_add(nl_hash_table_t *ht, struct nl_object *obj) { nl_hash_node_t *node; uint32_t key_hash; nl_object_keygen(obj, &key_hash, ht->size); node = ht->nodes[key_hash]; while (node) { if (nl_object_identical(node->obj, obj)) { NL_DBG(2, "Warning: Add to hashtable found duplicate...\n"); return -NLE_EXIST; } node = node->next; } NL_DBG (5, "adding cache entry of obj %p in table %p, with hash 0x%x\n", obj, ht, key_hash); node = malloc(sizeof(nl_hash_node_t)); if (!node) return -NLE_NOMEM; nl_object_get(obj); node->obj = obj; node->key = key_hash; node->key_size = sizeof(uint32_t); node->next = ht->nodes[key_hash]; ht->nodes[key_hash] = node; return 0; }
/** * Transmit Netlink message using sendmsg() * @arg sk Netlink socket (required) * @arg msg Netlink message to be sent (required) * @arg hdr sendmsg() message header (required) * * Transmits the message specified in \c hdr over the Netlink socket using the * sendmsg() system call. * * @attention * The `msg` argument will *not* be used to derive the message payload that * is being sent out. The `msg` argument is *only* passed on to the * `NL_CB_MSG_OUT` callback. The caller is responsible to initialize the * `hdr` struct properly and have it point to the message payload and * socket address. * * @note * This function uses `nlmsg_set_src()` to modify the `msg` argument prior to * invoking the `NL_CB_MSG_OUT` callback to provide the local port number. * * @callback This function triggers the `NL_CB_MSG_OUT` callback. * * @attention * Think twice before using this function. It provides a low level access to * the Netlink socket. Among other limitations, it does not add credentials * even if enabled or respect the destination address specified in the `msg` * object. * * @see nl_socket_set_local_port() * @see nl_send_auto() * @see nl_send_iovec() * * @return Number of bytes sent on success or a negative error code. * * @lowlevel */ int nl_sendmsg(struct nl_sock *sk, struct nl_msg *msg, struct msghdr *hdr) { struct nl_cb *cb; int ret; if (sk->s_fd < 0) return -NLE_BAD_SOCK; nlmsg_set_src(msg, &sk->s_local); cb = sk->s_cb; if (cb->cb_set[NL_CB_MSG_OUT]) if ((ret = nl_cb_call(cb, NL_CB_MSG_OUT, msg)) != NL_OK) return ret; ret = sendmsg(sk->s_fd, hdr, 0); if (ret < 0) { char errbuf[64]; NL_DBG(4, "nl_sendmsg(%p): sendmsg() failed with %d (%s)\n", sk, errno, strerror_r(errno, errbuf, sizeof(errbuf))); return -nl_syserr2nlerr(errno); } NL_DBG(4, "sent %d bytes\n", ret); return ret; }
/** * Remove ematch from the list of ematches it is linked to. * @arg ematch ematch object */ void rtnl_ematch_unlink(struct rtnl_ematch *ematch) { NL_DBG(2, "unlinked ematch %p from any lists\n", ematch); if (!nl_list_empty(&ematch->e_childs)) NL_DBG(1, "warning: ematch %p with childs was unlinked\n", ematch); nl_list_del(&ematch->e_list); nl_init_list_head(&ematch->e_list); }
/** * Remove object from hashtable * @arg ht Hashtable * @arg obj Object to remove * * Remove `obj` from hashtable if it exists. * * @note Reference counter of object will be decremented. * * @return 0 on success or a negative error code. * @retval -NLE_OBJ_NOTFOUND Object not present in hashtable. */ int nl_hash_table_del(nl_hash_table_t *ht, struct nl_object *obj) { nl_hash_node_t *node, *prev; uint32_t key_hash; nl_object_keygen(obj, &key_hash, ht->size); prev = node = ht->nodes[key_hash]; while (node) { if (nl_object_identical(node->obj, obj)) { nl_object_put(obj); NL_DBG (5, "deleting cache entry of obj %p in table %p, with" " hash 0x%x\n", obj, ht, key_hash); if (node == ht->nodes[key_hash]) ht->nodes[key_hash] = node->next; else prev->next = node->next; free(node); return 0; } prev = node; node = node->next; } return -NLE_OBJ_NOTFOUND; }
static int include_cb(struct nl_object *obj, struct nl_parser_param *p) { struct nl_cache_assoc *ca = p->pp_arg; struct nl_cache_ops *ops = ca->ca_cache->c_ops; NL_DBG(2, "Including object %p into cache %p\n", obj, ca->ca_cache); #ifdef NL_DEBUG if (nl_debug >= 4) nl_object_dump(obj, &nl_debug_dp); #endif if (ops->co_event_filter) if (ops->co_event_filter(ca->ca_cache, obj) != NL_OK) return 0; if (ops->co_include_event) return ops->co_include_event(ca->ca_cache, obj, ca->ca_change, ca->ca_change_v2, ca->ca_change_data); else { if (ca->ca_change_v2) return nl_cache_include_v2(ca->ca_cache, obj, ca->ca_change_v2, ca->ca_change_data); else return nl_cache_include(ca->ca_cache, obj, ca->ca_change, ca->ca_change_data); } }
/** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type == 0) { NL_DBG(1, "Illegal nla->nla_type == 0\n"); continue; } if (type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } tb[type] = nla; } }
/** * Allocate new cache manager * @arg sk Netlink socket or NULL to auto allocate * @arg protocol Netlink protocol this manager is used for * @arg flags Flags (\c NL_AUTO_PROVIDE) * @arg result Result pointer * * Allocates a new cache manager for the specified netlink protocol. * * 1. If sk is not specified (\c NULL) a netlink socket matching the * specified protocol will be automatically allocated. * * 2. The socket will be put in non-blocking mode and sequence checking * will be disabled regardless of whether the socket was provided by * the caller or automatically allocated. * * 3. The socket will be connected. * * If the flag \c NL_AUTO_PROVIDE is specified, any cache added to the * manager will automatically be made available to other users using * nl_cache_mngt_provide(). * * @note If the socket is provided by the caller, it is NOT recommended * to use the socket for anything else besides receiving netlink * notifications. * * @return 0 on success or a negative error code. */ int nl_cache_mngr_alloc(struct nl_sock *sk, int protocol, int flags, struct nl_cache_mngr **result) { struct nl_cache_mngr *mngr; int err = -NLE_NOMEM; /* Catch abuse of flags */ if (flags & NL_ALLOCATED_SOCK) BUG(); mngr = calloc(1, sizeof(*mngr)); if (!mngr) return -NLE_NOMEM; if (!sk) { if (!(sk = nl_socket_alloc())) goto errout; flags |= NL_ALLOCATED_SOCK; } mngr->cm_sock = sk; mngr->cm_nassocs = NASSOC_INIT; mngr->cm_protocol = protocol; mngr->cm_flags = flags; mngr->cm_assocs = calloc(mngr->cm_nassocs, sizeof(struct nl_cache_assoc)); if (!mngr->cm_assocs) goto errout; /* Required to receive async event notifications */ nl_socket_disable_seq_check(mngr->cm_sock); if ((err = nl_connect(mngr->cm_sock, protocol)) < 0) goto errout; if ((err = nl_socket_set_nonblocking(mngr->cm_sock)) < 0) goto errout; /* Create and allocate socket for sync cache fills */ mngr->cm_sync_sock = nl_socket_alloc(); if (!mngr->cm_sync_sock) { err = -NLE_NOMEM; goto errout; } if ((err = nl_connect(mngr->cm_sync_sock, protocol)) < 0) goto errout_free_sync_sock; NL_DBG(1, "Allocated cache manager %p, protocol %d, %d caches\n", mngr, protocol, mngr->cm_nassocs); *result = mngr; return 0; errout_free_sync_sock: nl_socket_free(mngr->cm_sync_sock); errout: nl_cache_mngr_free(mngr); return err; }
/** * Free cache manager and all caches. * @arg mngr Cache manager. * * Release all resources held by a cache manager. */ void nl_cache_mngr_free(struct nl_cache_mngr *mngr) { int i; if (!mngr) return; if (mngr->cm_sock) nl_close(mngr->cm_sock); if (mngr->cm_sync_sock) { nl_close(mngr->cm_sync_sock); nl_socket_free(mngr->cm_sync_sock); } if (mngr->cm_flags & NL_ALLOCATED_SOCK) nl_socket_free(mngr->cm_sock); for (i = 0; i < mngr->cm_nassocs; i++) { if (mngr->cm_assocs[i].ca_cache) { nl_cache_mngt_unprovide(mngr->cm_assocs[i].ca_cache); nl_cache_free(mngr->cm_assocs[i].ca_cache); } } free(mngr->cm_assocs); NL_DBG(1, "Cache manager %p freed\n", mngr); free(mngr); }
void rtnl_ematch_free(struct rtnl_ematch *ematch) { NL_DBG(2, "freed ematch %p\n", ematch); rtnl_ematch_unlink(ematch); free(ematch->e_data); free(ematch); }
static void idiagnl_keygen(struct nl_object *obj, uint32_t *hashkey, uint32_t table_sz) { struct idiagnl_msg *msg = (struct idiagnl_msg *)obj; unsigned int key_sz; struct idiagnl_hash_key { uint8_t family; uint8_t state; uint16_t sport; uint16_t dport; } __attribute__((packed)) key; key_sz = sizeof(key); key.family = msg->idiag_family; key.state = msg->idiag_state; key.sport = msg->idiag_sport; key.dport = msg->idiag_dport; *hashkey = nl_hash(&key, key_sz, 0) % table_sz; NL_DBG(5, "idiagnl %p key (fam %d state %d sport %d dport %d) keysz %d, hash 0x%x\n", msg, key.family, key.state, key.sport, key.dport, key_sz, *hashkey); return; }
/** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) continue; if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } if (tb[type]) NL_DBG(1, "Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); tb[type] = nla; }
static int bridge_parse_af_full(struct rtnl_link *link, struct nlattr *attr_full, void *data) { struct bridge_data *bd = data; struct bridge_vlan_info *vinfo = NULL; uint16_t vid_range_start = 0; uint16_t vid_range_flags = -1; struct nlattr *attr; int remaining; nla_for_each_nested(attr, attr_full, remaining) { if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) continue; if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) return -EINVAL; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { vid_range_start = vinfo->vid; vid_range_flags = (vinfo->flags ^ BRIDGE_VLAN_INFO_RANGE_BEGIN); continue; } if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { /* sanity check the range flags */ if (vid_range_flags != (vinfo->flags ^ BRIDGE_VLAN_INFO_RANGE_END)) { NL_DBG(1, "VLAN range flags differ; can not handle it.\n"); return -EINVAL; } } else { vid_range_start = vinfo->vid; } for (; vid_range_start <= vinfo->vid; vid_range_start++) { if (vinfo->flags & BRIDGE_VLAN_INFO_PVID) bd->vlan_info.pvid = vinfo->vid; if (vinfo->flags & BRIDGE_VLAN_INFO_UNTAGGED) set_bit(vid_range_start, bd->vlan_info.untagged_bitmap); set_bit(vid_range_start, bd->vlan_info.vlan_bitmap); bd->ce_mask |= BRIDGE_ATTR_PORT_VLAN; } vid_range_flags = -1; } return 0; }
/** * Free ematch tree object * @arg tree ematch tree object * * This function frees the ematch tree and all ematches attached to it. */ void rtnl_ematch_tree_free(struct rtnl_ematch_tree *tree) { if (!tree) return; free_ematch_list(&tree->et_list); NL_DBG(2, "Freed ematch tree %p\n", tree); free(tree); }
/** * Register ematch module * @arg ops Module operations. * * This function must be called by each ematch module at initialization * time. It registers the calling module as available module. * * @return 0 on success or a negative error code. */ int rtnl_ematch_register(struct rtnl_ematch_ops *ops) { if (rtnl_ematch_lookup_ops(ops->eo_kind)) return -NLE_EXIST; NL_DBG(1, "ematch module \"%s\" registered\n", ops->eo_name); nl_list_add_tail(&ops->eo_list, &ematch_ops_list); return 0; }
/* Retrieves the configured HZ and ticks/us value in the kernel. * The value is cached. Supported ways of getting it: * * 1) environment variable * 2) /proc/net/psched and sysconf * * Supports the environment variables: * PROC_NET_PSCHED - may point to psched file in /proc * PROC_ROOT - may point to /proc fs */ static void __init get_psched_settings(void) { char name[FILENAME_MAX]; FILE *fd; int got_hz = 0; if (getenv("HZ")) { long hz = strtol(getenv("HZ"), NULL, 0); if (LONG_MIN != hz && LONG_MAX != hz) { user_hz = hz; got_hz = 1; } } if (!got_hz) user_hz = sysconf(_SC_CLK_TCK); psched_hz = user_hz; if (getenv("TICKS_PER_USEC")) { double t = strtod(getenv("TICKS_PER_USEC"), NULL); ticks_per_usec = t; } else { if (getenv("PROC_NET_PSCHED")) snprintf(name, sizeof(name), "%s", getenv("PROC_NET_PSCHED")); else if (getenv("PROC_ROOT")) snprintf(name, sizeof(name), "%s/net/psched", getenv("PROC_ROOT")); else strncpy(name, "/proc/net/psched", sizeof(name) - 1); if ((fd = fopen(name, "r"))) { unsigned int ns_per_usec, ns_per_tick, nom, denom; if (fscanf(fd, "%08x %08x %08x %08x", &ns_per_usec, &ns_per_tick, &nom, &denom) != 4) { NL_DBG(1, "Fatal error: can not read psched settings from \"%s\". " \ "Try to set TICKS_PER_USEC, PROC_NET_PSCHED or PROC_ROOT " \ "environment variables\n", name); exit(1); } ticks_per_usec = (double) ns_per_usec / (double) ns_per_tick; if (nom == 1000000) psched_hz = denom; fclose(fd); } } }
/** * Check for event notifications * @arg mngr Cache Manager * @arg timeout Upper limit poll() will block, in milliseconds. * * Causes poll() to be called to check for new event notifications * being available. Automatically receives and handles available * notifications. * * This functionally is ideally called regularly during an idle * period. * * @return A positive value if at least one update was handled, 0 * for none, or a negative error code. */ int nl_cache_mngr_poll(struct nl_cache_mngr *mngr, int timeout) { int ret; struct pollfd fds = { .fd = nl_socket_get_fd(mngr->cm_handle), .events = POLLIN, }; NL_DBG(3, "Cache manager %p, poll() fd %d\n", mngr, fds.fd); ret = poll(&fds, 1, timeout); NL_DBG(3, "Cache manager %p, poll() returned %d\n", mngr, ret); if (ret < 0) return -nl_syserr2nlerr(errno); if (ret == 0) return 0; return nl_cache_mngr_data_ready(mngr); } /** * Receive available event notifications * @arg mngr Cache manager * * This function can be called if the socket associated to the manager * contains updates to be received. This function should not be used * if nl_cache_mngr_poll() is used. * * @return A positive value if at least one update was handled, 0 * for none, or a negative error code. */ int nl_cache_mngr_data_ready(struct nl_cache_mngr *mngr) { int err; err = nl_recvmsgs_default(mngr->cm_handle); if (err < 0) return err; return 1; }
/** * Add ematch to the end of the parent's list of children. * @arg parent parent ematch object * @arg child ematch object to be added to parent * * The parent must be a container ematch. */ int rtnl_ematch_add_child(struct rtnl_ematch *parent, struct rtnl_ematch *child) { if (parent->e_kind != TCF_EM_CONTAINER) return -NLE_OPNOTSUPP; NL_DBG(2, "added ematch %p \"%s\" to container %p\n", child, child->e_ops->eo_name, parent); nl_list_add_tail(&child->e_list, &parent->e_childs); return 0; }
static int event_input(struct nl_msg *msg, void *arg) { struct nl_cache_mngr *mngr = arg; int protocol = nlmsg_get_proto(msg); int type = nlmsg_hdr(msg)->nlmsg_type; struct nl_cache_ops *ops; int i, n; struct nl_parser_param p = { .pp_cb = include_cb, }; NL_DBG(2, "Cache manager %p, handling new message %p as event\n", mngr, msg); #ifdef NL_DEBUG if (nl_debug >= 4) nl_msg_dump(msg, stderr); #endif if (mngr->cm_protocol != protocol) BUG(); for (i = 0; i < mngr->cm_nassocs; i++) { if (mngr->cm_assocs[i].ca_cache) { ops = mngr->cm_assocs[i].ca_cache->c_ops; for (n = 0; ops->co_msgtypes[n].mt_id >= 0; n++) if (ops->co_msgtypes[n].mt_id == type) goto found; } } return NL_SKIP; found: NL_DBG(2, "Associated message %p to cache %p\n", msg, mngr->cm_assocs[i].ca_cache); p.pp_arg = &mngr->cm_assocs[i]; return nl_cache_parse(ops, NULL, nlmsg_hdr(msg), &p); }
/** * Allocate ematch tree object * @arg progid program id */ struct rtnl_ematch_tree *rtnl_ematch_tree_alloc(uint16_t progid) { struct rtnl_ematch_tree *tree; if (!(tree = calloc(1, sizeof(*tree)))) return NULL; NL_INIT_LIST_HEAD(&tree->et_list); tree->et_progid = progid; NL_DBG(2, "allocated new ematch tree %p, progid=%u\n", tree, progid); return tree; }
/** * Allocate ematch object. * * Allocates and initializes an ematch object. * * @return New ematch object or NULL. */ struct rtnl_ematch *rtnl_ematch_alloc(void) { struct rtnl_ematch *e; if (!(e = calloc(1, sizeof(*e)))) return NULL; NL_DBG(2, "allocated ematch %p\n", e); NL_INIT_LIST_HEAD(&e->e_list); NL_INIT_LIST_HEAD(&e->e_childs); return e; }
static int nfnlmsg_append(struct nl_msg *msg, uint8_t family, uint16_t res_id) { struct nfgenmsg *nfg; nfg = nlmsg_reserve(msg, sizeof(*nfg), NLMSG_ALIGNTO); if (nfg == NULL) return -NLE_NOMEM; nfg->nfgen_family = family; nfg->version = NFNETLINK_V0; nfg->res_id = htons(res_id); NL_DBG(2, "msg %p: Added nfnetlink header family=%d res_id=%d\n", msg, family, res_id); return 0; }
/** * Register a set of cache operations * @arg ops cache operations * * Called by users of caches to announce the avaibility of * a certain cache type. * * @return 0 on success or a negative error code. */ int nl_cache_mngt_register(struct nl_cache_ops *ops) { if (!ops->co_name || !ops->co_obj_ops) return -NLE_INVAL; if (nl_cache_ops_lookup(ops->co_name)) return -NLE_EXIST; ops->co_next = cache_ops; cache_ops = ops; NL_DBG(1, "Registered cache operations %s\n", ops->co_name); return 0; }
/** * Unregister a set of cache operations * @arg ops cache operations * * Called by users of caches to announce a set of * cache operations is no longer available. The * specified cache operations must have been registered * previously using nl_cache_mngt_register() * * @return 0 on success or a negative error code */ int nl_cache_mngt_unregister(struct nl_cache_ops *ops) { struct nl_cache_ops *t, **tp; for (tp = &cache_ops; (t=*tp) != NULL; tp = &t->co_next) if (t == ops) break; if (!t) return -NLE_NOCACHE; NL_DBG(1, "Unregistered cache operations %s\n", ops->co_name); *tp = t->co_next; return 0; }
/** * Allocate new cache manager * @arg sk Netlink socket. * @arg protocol Netlink Protocol this manager is used for * @arg flags Flags * @arg result Result pointer * * @return 0 on success or a negative error code. */ int nl_cache_mngr_alloc(struct nl_sock *sk, int protocol, int flags, struct nl_cache_mngr **result) { struct nl_cache_mngr *mngr; int err = -NLE_NOMEM; if (sk == NULL) BUG(); mngr = calloc(1, sizeof(*mngr)); if (!mngr) goto errout; mngr->cm_handle = sk; mngr->cm_nassocs = 32; mngr->cm_protocol = protocol; mngr->cm_flags = flags; mngr->cm_assocs = calloc(mngr->cm_nassocs, sizeof(struct nl_cache_assoc)); if (!mngr->cm_assocs) goto errout; nl_socket_modify_cb(mngr->cm_handle, NL_CB_VALID, NL_CB_CUSTOM, event_input, mngr); /* Required to receive async event notifications */ nl_socket_disable_seq_check(mngr->cm_handle); if ((err = nl_connect(mngr->cm_handle, protocol) < 0)) goto errout; if ((err = nl_socket_set_nonblocking(mngr->cm_handle) < 0)) goto errout; NL_DBG(1, "Allocated cache manager %p, protocol %d, %d caches\n", mngr, protocol, mngr->cm_nassocs); *result = mngr; return 0; errout: nl_cache_mngr_free(mngr); return err; }
/** * Free cache manager and all caches. * @arg mngr Cache manager. * * Release all resources after usage of a cache manager. */ void nl_cache_mngr_free(struct nl_cache_mngr *mngr) { int i; if (!mngr) return; if (mngr->cm_handle) nl_close(mngr->cm_handle); for (i = 0; i < mngr->cm_nassocs; i++) if (mngr->cm_assocs[i].ca_cache) nl_cache_free(mngr->cm_assocs[i].ca_cache); free(mngr->cm_assocs); free(mngr); NL_DBG(1, "Cache manager %p freed\n", mngr); }
/** * Add Generic Netlink headers to Netlink message * @arg msg Netlink message object * @arg port Netlink port or NL_AUTO_PORT * @arg seq Sequence number of message or NL_AUTO_SEQ * @arg family Numeric family identifier * @arg hdrlen Length of user header * @arg flags Additional Netlink message flags (optional) * @arg cmd Numeric command identifier * @arg version Interface version * * Calls nlmsg_put() on the specified message object to reserve space for * the Netlink header, the Generic Netlink header, and a user header of * specified length. Fills out the header fields with the specified * parameters. * * @par Example: * @code * struct nl_msg *msg; * struct my_hdr *user_hdr; * * if (!(msg = nlmsg_alloc())) * // ERROR * * user_hdr = genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, family_id, * sizeof(struct my_hdr), 0, MY_CMD_FOO, 0); * if (!user_hdr) * // ERROR * @endcode * * @see nlmsg_put() * * Returns Pointer to user header or NULL if an error occurred. */ void *genlmsg_put(struct nl_msg *msg, uint32_t port, uint32_t seq, int family, int hdrlen, int flags, uint8_t cmd, uint8_t version) { struct nlmsghdr *nlh; struct genlmsghdr hdr = { .cmd = cmd, .version = version, }; nlh = nlmsg_put(msg, port, seq, family, GENL_HDRLEN + hdrlen, flags); if (nlh == NULL) return NULL; memcpy(nlmsg_data(nlh), &hdr, sizeof(hdr)); NL_DBG(2, "msg %p: Added generic netlink header cmd=%d version=%d\n", msg, cmd, version); return nlmsg_data(nlh) + GENL_HDRLEN; }
/** * Send netlink message with control over sendmsg() message header. * @arg sk Netlink socket. * @arg msg Netlink message to be sent. * @arg hdr Sendmsg() message header. * @return Number of characters sent on sucess or a negative error code. */ int nl_sendmsg(struct nl_sock *sk, struct nl_msg *msg, struct msghdr *hdr) { struct nl_cb *cb; int ret; nlmsg_set_src(msg, &sk->s_local); cb = sk->s_cb; if (cb->cb_set[NL_CB_MSG_OUT]) if ((ret = nl_cb_call(cb, NL_CB_MSG_OUT, msg)) != NL_OK) return ret; ret = sendmsg(sk->s_fd, hdr, 0); if (ret < 0) return -nl_syserr2nlerr(errno); NL_DBG(4, "sent %d bytes\n", ret); return ret; }
static int veth_parse(struct rtnl_link *link, struct nlattr *data, struct nlattr *xstats) { struct nlattr *tb[VETH_INFO_MAX+1]; struct nlattr *peer_tb[IFLA_MAX + 1]; struct rtnl_link *peer = link->l_info; int err; NL_DBG(3, "Parsing veth link info"); if ((err = nla_parse_nested(tb, VETH_INFO_MAX, data, veth_policy)) < 0) goto errout; if (tb[VETH_INFO_PEER]) { struct nlattr *nla_peer; struct ifinfomsg *ifi; nla_peer = tb[VETH_INFO_PEER]; ifi = nla_data(nla_peer); peer->l_family = ifi->ifi_family; peer->l_arptype = ifi->ifi_type; peer->l_index = ifi->ifi_index; peer->l_flags = ifi->ifi_flags; peer->l_change = ifi->ifi_change; err = nla_parse(peer_tb, IFLA_MAX, nla_data(nla_peer) + sizeof(struct ifinfomsg), nla_len(nla_peer) - sizeof(struct ifinfomsg), link_policy); if (err < 0) goto errout; err = rtnl_link_info_parse(peer, peer_tb); if (err < 0) goto errout; } err = 0; errout: return err; }
/** * Transmit raw data over Netlink socket. * @arg sk Netlink socket (required) * @arg buf Buffer carrying data to send (required) * @arg size Size of buffer (required) * * Transmits "raw" data over the specified Netlink socket. Unlike the other * transmit functions it does not modify the data in any way. It directly * passes the buffer \c buf of \c size to sendto(). * * The message is addressed to the peer as specified in the socket by either * the nl_socket_set_peer_port() or nl_socket_set_peer_groups() function. * * @note Because there is no indication on the message boundaries of the data * being sent, the \c NL_CB_MSG_OUT callback handler will not be invoked * for data that is being sent using this function. * * @see nl_socket_set_peer_port() * @see nl_socket_set_peer_groups() * @see nl_sendmsg() * * @return Number of bytes sent or a negative error code. */ int nl_sendto(struct nl_sock *sk, void *buf, size_t size) { int ret; if (!buf) return -NLE_INVAL; if (sk->s_fd < 0) return -NLE_BAD_SOCK; ret = sendto(sk->s_fd, buf, size, 0, (struct sockaddr *) &sk->s_peer, sizeof(sk->s_peer)); if (ret < 0) { char errbuf[64]; NL_DBG(4, "nl_sendto(%p): sendto() failed with %d (%s)\n", sk, errno, strerror_r(errno, errbuf, sizeof(errbuf))); return -nl_syserr2nlerr(errno); } return ret; }
static int vlan_parse(struct rtnl_link *link, struct nlattr *data, struct nlattr *xstats) { struct nlattr *tb[IFLA_VLAN_MAX+1]; struct vlan_info *vi; int err; NL_DBG(3, "Parsing VLAN link info"); if ((err = nla_parse_nested(tb, IFLA_VLAN_MAX, data, vlan_policy)) < 0) goto errout; if ((err = vlan_alloc(link)) < 0) goto errout; vi = link->l_info; if (tb[IFLA_VLAN_ID]) { vi->vi_vlan_id = nla_get_u16(tb[IFLA_VLAN_ID]); vi->vi_mask |= VLAN_HAS_ID; } if (tb[IFLA_VLAN_FLAGS]) { struct ifla_vlan_flags flags; nla_memcpy(&flags, tb[IFLA_VLAN_FLAGS], sizeof(flags)); vi->vi_flags = flags.flags; vi->vi_mask |= VLAN_HAS_FLAGS; } if (tb[IFLA_VLAN_INGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining; memset(vi->vi_ingress_qos, 0, sizeof(vi->vi_ingress_qos)); nla_for_each_nested(nla, tb[IFLA_VLAN_INGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); map = nla_data(nla); if (map->from < 0 || map->from > VLAN_PRIO_MAX) { return nl_error(EINVAL, "VLAN prio %d out of " "range", map->from); } vi->vi_ingress_qos[map->from] = map->to; } vi->vi_mask |= VLAN_HAS_INGRESS_QOS; } if (tb[IFLA_VLAN_EGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining, i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); i++; } /* align to have a little reserve */ vi->vi_egress_size = (i + 32) & ~31; vi->vi_egress_qos = calloc(vi->vi_egress_size, sizeof(*map)); if (vi->vi_egress_qos == NULL) return nl_errno(ENOMEM); i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { map = nla_data(nla); NL_DBG(4, "Assigning egress qos mapping %d\n", i); vi->vi_egress_qos[i].vm_from = map->from; vi->vi_egress_qos[i++].vm_to = map->to; } vi->vi_negress = i; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; }