static bool nl_new(struct dionaea *d) { g_debug("%s", __PRETTY_FUNCTION__); nl_runtime.sock = nl_socket_alloc(); struct nl_sock *sock = nl_runtime.sock; nl_socket_disable_seq_check(sock); nl_socket_modify_cb(sock, NL_CB_VALID, NL_CB_CUSTOM, nl_event_input, NULL); nl_join_groups(sock, RTMGRP_LINK); int err; if ( (err = nl_connect(sock, NETLINK_ROUTE)) < 0) { g_error("Could not connect netlink (%s)", nl_geterror(err)); } nl_socket_add_membership(sock, RTNLGRP_LINK); nl_socket_add_membership(sock, RTNLGRP_NEIGH); nl_socket_add_membership(sock, RTNLGRP_IPV4_IFADDR); nl_socket_add_membership(sock, RTNLGRP_IPV6_IFADDR); if( (err=rtnl_neigh_alloc_cache(sock, &nl_runtime.neigh_cache)) != 0 ) { g_error("Could not allocate neigh cache! (%s)", nl_geterror(err)); } #if LIBNL_RTNL_LINK_ALLOC_CACHE_ARGC == 3 if( (err=rtnl_link_alloc_cache(sock, AF_UNSPEC, &nl_runtime.link_cache)) != 0 ) #elif LIBNL_RTNL_LINK_ALLOC_CACHE_ARGC == 2 if( (err=rtnl_link_alloc_cache(sock, &nl_runtime.link_cache)) != 0 ) #endif { g_error("Could not allocate link cache! (%s)", nl_geterror(err)); } if( (err=rtnl_addr_alloc_cache(sock, &nl_runtime.addr_cache)) != 0 ) { g_error("Could not allocate addr cache! (%s)", nl_geterror(err)); } nl_cache_mngt_provide(nl_runtime.neigh_cache); nl_cache_mngt_provide(nl_runtime.link_cache); nl_cache_mngt_provide(nl_runtime.addr_cache); nl_runtime.ihandler = ihandler_new("dionaea.connection.*.accept", nl_ihandler_cb, NULL); ev_io_init(&nl_runtime.io_in, nl_io_in_cb, nl_socket_get_fd(sock), EV_READ); ev_io_start(g_dionaea->loop, &nl_runtime.io_in); nl_runtime.link_addr_cache = g_hash_table_new(g_int_hash, g_int_equal); nl_cache_foreach(nl_runtime.link_cache, nl_obj_input, NULL); nl_cache_foreach(nl_runtime.addr_cache, nl_obj_input, NULL); return true; }
static gboolean sync_connection_setup (NMNetlinkMonitor *self, GError **error) { NMNetlinkMonitorPrivate *priv = NM_NETLINK_MONITOR_GET_PRIVATE (self); #ifdef LIBNL_NEEDS_ADDR_CACHING_WORKAROUND struct nl_cache *addr_cache; #endif int err; /* Set up the event listener connection */ priv->nlh_sync = nl_socket_alloc (); if (!priv->nlh_sync) { g_set_error (error, NM_NETLINK_MONITOR_ERROR, NM_NETLINK_MONITOR_ERROR_NETLINK_ALLOC_HANDLE, _("unable to allocate netlink handle for monitoring link status: %s"), nl_geterror (ENOMEM)); goto error; } if (!nlh_setup (priv->nlh_sync, NULL, self, error)) goto error; #ifdef LIBNL_NEEDS_ADDR_CACHING_WORKAROUND /* Work around apparent libnl bug; rtnl_addr requires that all * addresses have the "peer" attribute set in order to be compared * for equality, but this attribute is not normally set. As a * result, most addresses will not compare as equal even to * themselves, busting caching. */ rtnl_addr_alloc_cache (priv->nlh_sync, &addr_cache); g_warn_if_fail (addr_cache != NULL); nl_cache_get_ops (addr_cache)->co_obj_ops->oo_id_attrs &= ~0x80; nl_cache_free (addr_cache); #endif err = rtnl_link_alloc_cache (priv->nlh_sync, AF_UNSPEC, &priv->link_cache); if (err < 0) { g_set_error (error, NM_NETLINK_MONITOR_ERROR, NM_NETLINK_MONITOR_ERROR_NETLINK_ALLOC_LINK_CACHE, _("unable to allocate netlink link cache for monitoring link status: %s"), nl_geterror (err)); goto error; } nl_cache_mngt_provide (priv->link_cache); return TRUE; error: if (priv->link_cache) { nl_cache_free (priv->link_cache); priv->link_cache = NULL; } if (priv->nlh_sync) { nl_socket_free (priv->nlh_sync); priv->nlh_sync = NULL; } return FALSE; }
struct nl_cache *nl_cli_rule_alloc_cache(struct nl_sock *sk) { struct nl_cache *cache; int err; if ((err = rtnl_rule_alloc_cache(sk, AF_UNSPEC, &cache)) < 0) nl_cli_fatal(err, "Unable to allocate routing rule cache: %s\n", nl_geterror(err)); nl_cache_mngt_provide(cache); return cache; }
struct nl_cache *nltool_alloc_neigh_cache(struct nl_handle *nlh) { struct nl_cache *cache; cache = rtnl_neigh_alloc_cache(nlh); if (!cache) fprintf(stderr, "Unable to retrieve neighbour cache: %s\n", nl_geterror()); else nl_cache_mngt_provide(cache); return cache; }
struct nl_cache *nltool_alloc_genl_family_cache(struct nl_handle *nlh) { struct nl_cache *cache; cache = genl_ctrl_alloc_cache(nlh); if (!cache) fprintf(stderr, "Unable to retrieve genl family cache: %s\n", nl_geterror()); else nl_cache_mngt_provide(cache); return cache; }
struct nl_cache *nl_cli_alloc_cache(struct nl_sock *sock, const char *name, int (*ac)(struct nl_sock *, struct nl_cache **)) { struct nl_cache *cache; int err; if ((err = ac(sock, &cache)) < 0) nl_cli_fatal(err, "Unable to allocate %s cache: %s", name, nl_geterror(err)); nl_cache_mngt_provide(cache); return cache; }
/** * nm_netlink_find_address: * @ifindex: interface index * @family: address family, either AF_INET or AF_INET6 * @addr: binary address, either struct in_addr* or struct in6_addr* * @prefix: prefix length * * Searches for a matching address on the given interface. * * Returns: %TRUE if the given address was found on the interface, %FALSE if it * was not found or an error occurred. **/ gboolean nm_netlink_find_address (int ifindex, int family, void *addr, /* struct in_addr or struct in6_addr */ int prefix) { struct nl_sock *nlh = NULL; struct nl_cache *cache = NULL; FindAddrInfo info; g_return_val_if_fail (ifindex > 0, FALSE); g_return_val_if_fail (family == AF_INET || family == AF_INET6, FALSE); g_return_val_if_fail (addr != NULL, FALSE); g_return_val_if_fail (prefix >= 0, FALSE); memset (&info, 0, sizeof (info)); info.ifindex = ifindex; info.family = family; info.prefix = prefix; info.addr = addr; if (family == AF_INET) info.addrlen = sizeof (struct in_addr); else if (family == AF_INET6) info.addrlen = sizeof (struct in6_addr); else g_assert_not_reached (); nlh = nm_netlink_get_default_handle (); if (nlh) { rtnl_addr_alloc_cache(nlh, &cache); if (cache) { nl_cache_mngt_provide (cache); nl_cache_foreach (cache, find_one_address, &info); nl_cache_free (cache); } } return info.found; }
/** * Add cache to cache manager * @arg mngr Cache manager. * @arg cache Cache to be added to cache manager * @arg cb Function to be called upon changes. * @arg data Argument passed on to change callback * * Adds cache to the manager. The operation will trigger a full * dump request from the kernel to initially fill the contents * of the cache. The manager will subscribe to the notification group * of the cache and keep track of any further changes. * * The user is responsible for calling nl_cache_mngr_poll() or monitor * the socket and call nl_cache_mngr_data_ready() to allow the library * to process netlink notification events. * * @see nl_cache_mngr_poll() * @see nl_cache_mngr_data_ready() * * @return 0 on success or a negative error code. * @return -NLE_PROTO_MISMATCH Protocol mismatch between cache manager and * cache type * @return -NLE_OPNOTSUPP Cache type does not support updates * @return -NLE_EXIST Cache of this type already being managed */ int nl_cache_mngr_add_cache(struct nl_cache_mngr *mngr, struct nl_cache *cache, change_func_t cb, void *data) { struct nl_cache_ops *ops; struct nl_af_group *grp; int err, i; ops = cache->c_ops; if (!ops) return -NLE_INVAL; if (ops->co_protocol != mngr->cm_protocol) return -NLE_PROTO_MISMATCH; if (ops->co_groups == NULL) return -NLE_OPNOTSUPP; for (i = 0; i < mngr->cm_nassocs; i++) if (mngr->cm_assocs[i].ca_cache && mngr->cm_assocs[i].ca_cache->c_ops == ops) return -NLE_EXIST; retry: for (i = 0; i < mngr->cm_nassocs; i++) if (!mngr->cm_assocs[i].ca_cache) break; if (i >= mngr->cm_nassocs) { mngr->cm_nassocs += NASSOC_EXPAND; mngr->cm_assocs = realloc(mngr->cm_assocs, mngr->cm_nassocs * sizeof(struct nl_cache_assoc)); if (mngr->cm_assocs == NULL) return -NLE_NOMEM; memset(mngr->cm_assocs + (mngr->cm_nassocs - NASSOC_EXPAND), 0, NASSOC_EXPAND * sizeof(struct nl_cache_assoc)); NL_DBG(1, "Increased capacity of cache manager %p " \ "to %d\n", mngr, mngr->cm_nassocs); goto retry; } for (grp = ops->co_groups; grp->ag_group; grp++) { err = nl_socket_add_membership(mngr->cm_sock, grp->ag_group); if (err < 0) return err; } err = nl_cache_refill(mngr->cm_sync_sock, cache); if (err < 0) goto errout_drop_membership; mngr->cm_assocs[i].ca_cache = cache; mngr->cm_assocs[i].ca_change = cb; mngr->cm_assocs[i].ca_change_data = data; if (mngr->cm_flags & NL_AUTO_PROVIDE) nl_cache_mngt_provide(cache); NL_DBG(1, "Added cache %p <%s> to cache manager %p\n", cache, nl_cache_name(cache), mngr); return 0; errout_drop_membership: for (grp = ops->co_groups; grp->ag_group; grp++) nl_socket_drop_membership(mngr->cm_sock, grp->ag_group); return err; }
static void obj_input(struct nl_object *obj, void *arg) { struct nfnl_queue_msg *msg = (struct nfnl_queue_msg *) obj; struct nl_dump_params dp = { .dp_type = NL_DUMP_STATS, .dp_fd = stdout, .dp_dump_msgtype = 1, }; nfnl_queue_msg_set_verdict(msg, NF_ACCEPT); nl_object_dump(obj, &dp); nfnl_queue_msg_send_verdict(nfnlh, msg); } static int event_input(struct nl_msg *msg, void *arg) { if (nl_msg_parse(msg, &obj_input, NULL) < 0) fprintf(stderr, "<<EVENT>> Unknown message type\n"); /* Exit nl_recvmsgs_def() and return to the main select() */ return NL_STOP; } int main(int argc, char *argv[]) { struct nl_handle *rtnlh; struct nl_cache *link_cache; struct nfnl_queue *queue; enum nfnl_queue_copy_mode copy_mode; uint32_t copy_range; int err = 1; int family; if (nltool_init(argc, argv) < 0) return -1; nfnlh = nltool_alloc_handle(); if (nfnlh == NULL) return -1; nl_disable_sequence_check(nfnlh); nl_socket_modify_cb(nfnlh, NL_CB_VALID, NL_CB_CUSTOM, event_input, NULL); if ((argc > 1 && !strcasecmp(argv[1], "-h")) || argc < 3) { printf("Usage: nf-queue family group [ copy_mode ] " "[ copy_range ]\n"); return 2; } if (nfnl_connect(nfnlh) < 0) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } family = nl_str2af(argv[1]); if (family == AF_UNSPEC) { fprintf(stderr, "Unknown family: %s\n", argv[1]); goto errout; } nfnl_queue_pf_unbind(nfnlh, family); if (nfnl_queue_pf_bind(nfnlh, family) < 0) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } queue = nfnl_queue_alloc(); if (queue == NULL) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } nfnl_queue_set_group(queue, atoi(argv[2])); copy_mode = NFNL_QUEUE_COPY_PACKET; if (argc > 3) { copy_mode = nfnl_queue_str2copy_mode(argv[3]); if (copy_mode < 0) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } } nfnl_queue_set_copy_mode(queue, copy_mode); copy_range = 0xFFFF; if (argc > 4) copy_range = atoi(argv[4]); nfnl_queue_set_copy_range(queue, copy_range); if (nfnl_queue_create(nfnlh, queue) < 0) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } rtnlh = nltool_alloc_handle(); if (rtnlh == NULL) { goto errout_close; } if (nl_connect(rtnlh, NETLINK_ROUTE) < 0) { fprintf(stderr, "%s\n", nl_geterror()); goto errout; } if ((link_cache = rtnl_link_alloc_cache(rtnlh)) == NULL) { fprintf(stderr, "%s\n", nl_geterror()); goto errout_close; } nl_cache_mngt_provide(link_cache); while (1) { fd_set rfds; int nffd, rtfd, maxfd, retval; FD_ZERO(&rfds); maxfd = nffd = nl_socket_get_fd(nfnlh); FD_SET(nffd, &rfds); rtfd = nl_socket_get_fd(rtnlh); FD_SET(rtfd, &rfds); if (maxfd < rtfd) maxfd = rtfd; /* wait for an incoming message on the netlink socket */ retval = select(maxfd+1, &rfds, NULL, NULL, NULL); if (retval) { if (FD_ISSET(nffd, &rfds)) nl_recvmsgs_default(nfnlh); if (FD_ISSET(rtfd, &rfds)) nl_recvmsgs_default(rtnlh); } } nl_cache_mngt_unprovide(link_cache); nl_cache_free(link_cache); nfnl_queue_put(queue); nl_close(rtnlh); nl_handle_destroy(rtnlh); errout_close: nl_close(nfnlh); nl_handle_destroy(nfnlh); errout: return err; }
/** * Add cache responsibility to cache manager * @arg mngr Cache manager. * @arg name Name of cache to keep track of * @arg cb Function to be called upon changes. * @arg data Argument passed on to change callback * @arg result Pointer to store added cache. * * Allocates a new cache of the specified type and adds it to the manager. * The operation will trigger a full dump request from the kernel to * initially fill the contents of the cache. The manager will subscribe * to the notification group of the cache to keep track of any further * changes. * * @return 0 on success or a negative error code. */ int nl_cache_mngr_add(struct nl_cache_mngr *mngr, const char *name, change_func_t cb, void *data, struct nl_cache **result) { struct nl_cache_ops *ops; struct nl_cache *cache; struct nl_af_group *grp; int err, i; ops = nl_cache_ops_lookup(name); if (!ops) return -NLE_NOCACHE; if (ops->co_protocol != mngr->cm_protocol) return -NLE_PROTO_MISMATCH; if (ops->co_groups == NULL) return -NLE_OPNOTSUPP; for (i = 0; i < mngr->cm_nassocs; i++) if (mngr->cm_assocs[i].ca_cache && mngr->cm_assocs[i].ca_cache->c_ops == ops) return -NLE_EXIST; retry: for (i = 0; i < mngr->cm_nassocs; i++) if (!mngr->cm_assocs[i].ca_cache) break; if (i >= mngr->cm_nassocs) { mngr->cm_nassocs += 16; mngr->cm_assocs = realloc(mngr->cm_assocs, mngr->cm_nassocs * sizeof(struct nl_cache_assoc)); if (mngr->cm_assocs == NULL) return -NLE_NOMEM; else { NL_DBG(1, "Increased capacity of cache manager %p " \ "to %d\n", mngr, mngr->cm_nassocs); goto retry; } } cache = nl_cache_alloc(ops); if (!cache) return -NLE_NOMEM; for (grp = ops->co_groups; grp->ag_group; grp++) { err = nl_socket_add_membership(mngr->cm_handle, grp->ag_group); if (err < 0) goto errout_free_cache; } err = nl_cache_refill(mngr->cm_handle, cache); if (err < 0) goto errout_drop_membership; mngr->cm_assocs[i].ca_cache = cache; mngr->cm_assocs[i].ca_change = cb; mngr->cm_assocs[i].ca_change_data = data; if (mngr->cm_flags & NL_AUTO_PROVIDE) nl_cache_mngt_provide(cache); NL_DBG(1, "Added cache %p <%s> to cache manager %p\n", cache, nl_cache_name(cache), mngr); *result = cache; return 0; errout_drop_membership: for (grp = ops->co_groups; grp->ag_group; grp++) nl_socket_drop_membership(mngr->cm_handle, grp->ag_group); errout_free_cache: nl_cache_free(cache); return err; }