Ejemplo n.º 1
0
Archivo: ring.c Proyecto: chamaken/nurs
static struct mnl_ring *
mnl_socket_mmap(struct mnl_socket *nls, struct nl_mmap_req *req,
		int flags, int optname)
{
	struct mnl_ring *nlr = alloc_ring(req);

	if (nlr == NULL)
		return NULL;

	if (mnl_socket_setsockopt(nls, optname, req, sizeof(*req)) == -1)
		goto fail;

	nlr->ring = mmap(NULL, ring_size(nlr), PROT_READ | PROT_WRITE, flags,
			 mnl_socket_get_fd(nls), 0);
	if (nlr->ring == MAP_FAILED)
		goto fail;

        nlr->fd = mnl_socket_get_fd(nls);

	return nlr;

fail:
	free(nlr);
	return NULL;
}
Ejemplo n.º 2
0
/* method: get_fd */
static int mnl_socket__get_fd__meth(lua_State *L) {
  mnl_socket * this_idx1;
  int rc_mnl_socket_get_fd_idx1 = 0;
  this_idx1 = obj_type_mnl_socket_check(L,1);
  rc_mnl_socket_get_fd_idx1 = mnl_socket_get_fd(this_idx1);
  lua_pushinteger(L, rc_mnl_socket_get_fd_idx1);
  return 1;
}
Ejemplo n.º 3
0
//Initialize the Linux-specific part of the context. All is related to
//libmnl/netfilter
struct neat_ctx *nt_linux_init_ctx(struct neat_ctx *ctx)
{
    //TODO: Consider allocator function
    if ((ctx->mnl_rcv_buf = calloc(1, MNL_SOCKET_BUFFER_SIZE)) == NULL) {
        nt_log(ctx, NEAT_LOG_ERROR, "Failed to allocate netlink buffer", __func__);
        return NULL;
    }

    //Configure netlink and start requesting addresses
    if ((ctx->mnl_sock = mnl_socket_open(NETLINK_ROUTE)) == NULL) {
        nt_log(ctx, NEAT_LOG_ERROR, "Failed to allocate netlink socket", __func__);
        return NULL;
    }

    if (mnl_socket_bind(ctx->mnl_sock, (1 << (RTNLGRP_IPV4_IFADDR - 1)) |
                (1 << (RTNLGRP_IPV6_IFADDR - 1)), 0)) {
        nt_log(ctx, NEAT_LOG_ERROR, "Failed to bind netlink socket", __func__);
        return NULL;
    }

    //We need to build a list of all available source addresses as soon as
    //possible. It is started here
    if (neat_linux_request_addrs(ctx->mnl_sock) <= 0) {
        nt_log(ctx, NEAT_LOG_ERROR, "Failed to request addresses", __func__);
        return NULL;
    }

    //Add socket to event loop
    if (uv_udp_init(ctx->loop, &(ctx->uv_nl_handle))) {
        nt_log(ctx, NEAT_LOG_ERROR, "Failed to initialize uv UDP handle", __func__);
        return NULL;
    }

    //TODO: We could use offsetof, but libuv has a pointer so ...
    ctx->uv_nl_handle.data = ctx;

    if (uv_udp_open(&(ctx->uv_nl_handle), mnl_socket_get_fd(ctx->mnl_sock))) {
        nt_log(ctx, NEAT_LOG_ERROR, "Could not add netlink socket to uv", __func__);
        return NULL;
    }

    if (uv_udp_recv_start(&(ctx->uv_nl_handle), neat_linux_nl_alloc,
                nt_linux_nl_recv)) {
        nt_log(ctx, NEAT_LOG_ERROR, "Could not start receiving netlink packets", __func__);
        return NULL;
    }

    ctx->cleanup = nt_linux_cleanup;

#ifdef MPTCP_SUPPORT
    linux_read_sys_mptcp_enabled(ctx);
#endif // MPTCP_SUPPORT

    //Configure netlink socket, add to event loop and start dumping
    return ctx;
}
Ejemplo n.º 4
0
//Initialize the Linux-specific part of the context. All is related to
//libmnl/netfilter
struct neat_ctx *neat_linux_init_ctx(struct neat_ctx *nc)
{
    //TODO: Consider allocator function
    if ((nc->mnl_rcv_buf = calloc(MNL_SOCKET_BUFFER_SIZE, 1)) == NULL) {
        fprintf(stderr, "Failed to allocate netlink buffer\n");
        return NULL;
    }

    //Configure netlink and start requesting addresses
    if ((nc->mnl_sock = mnl_socket_open(NETLINK_ROUTE)) == NULL) {
        fprintf(stderr, "Failed to allocate netlink socket\n");
        return NULL;
    }

    if (mnl_socket_bind(nc->mnl_sock, (1 << (RTNLGRP_IPV4_IFADDR - 1)) |
                (1 << (RTNLGRP_IPV6_IFADDR - 1)), 0)) {
        fprintf(stderr, "Failed to bind netlink socket\n");
        return NULL;
    }
    
    //We need to build a list of all available source addresses as soon as
    //possible. It is started here
    if (neat_linux_request_addrs(nc->mnl_sock) <= 0) {
        fprintf(stderr, "Failed to request addresses\n");
        return NULL;
    }

    //Add socket to event loop
    if (uv_udp_init(nc->loop, &(nc->uv_nl_handle))) {
        fprintf(stderr, "Failed to initialize uv UDP handle\n");
        return NULL;
    }

    //TODO: We could use offsetof, but libuv has a pointer so ...
    nc->uv_nl_handle.data = nc;

    if (uv_udp_open(&(nc->uv_nl_handle), mnl_socket_get_fd(nc->mnl_sock))) {
        fprintf(stderr, "Could not add netlink socket to uv\n");
        return NULL;
    }

    if (uv_udp_recv_start(&(nc->uv_nl_handle), neat_linux_nl_alloc,
                neat_linux_nl_recv)) {
        fprintf(stderr, "Could not start receiving netlink packets\n");
        return NULL;
    }
     
    nc->cleanup = neat_linux_cleanup;

    //Configure netlink socket, add to event loop and start dumping
    return nc;
}
Ejemplo n.º 5
0
Archivo: nfq.c Proyecto: chamaken/nurs
static enum nurs_return_t nfq_organize(struct nurs_producer *producer)
{
	struct nfq_priv *priv = nurs_producer_context(producer);

	if (nfq_common_organize(producer) != NURS_RET_OK)
		return NURS_RET_ERROR;

	priv->fd = nurs_fd_create(mnl_socket_get_fd(priv->nl),
				  NURS_FD_F_READ);
	if (!priv->fd)
		goto fail;
	return NURS_RET_OK;
fail:
#ifdef NLMMAP
	mnl_socket_unmap(priv->nlr);
#endif
	mnl_socket_close(priv->nl);
	return NURS_RET_ERROR;

}
Ejemplo n.º 6
0
static int32_t multi_link_event_loop(struct multi_config *mc){
    struct multi_link_info *li;
    pthread_attr_t detach_attr;
    uint8_t buf[MAX_PIPE_MSG_LEN];
    uint8_t mnl_buf[MNL_SOCKET_BUFFER_SIZE];
    int32_t retval, numbytes;
    uint32_t i;
    int32_t mnl_sock_event, mnl_sock_set, mnl_sock_get;
    fd_set masterfds, readfds;
    int fdmax = 0;
    struct timeval tv;

    FD_ZERO(&masterfds);
    FD_ZERO(&readfds);

    //NETLINK_ROUTE is where I want to hook into the kernel
    if(!(multi_link_nl_request = mnl_socket_open(NETLINK_ROUTE))){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not create mnl socket (request)\n");
        return EXIT_FAILURE;
    }

    if(!(multi_link_nl_set = mnl_socket_open(NETLINK_ROUTE))){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not create mnl socket (set)\n");
        return EXIT_FAILURE;
    }

    if(!(multi_link_nl_event = mnl_socket_open(NETLINK_ROUTE))){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not create mnl socket (event)\n");
        return EXIT_FAILURE;
    }

    if(mnl_socket_bind(multi_link_nl_request, 0, MNL_SOCKET_AUTOPID) < 0){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not bind mnl event socket\n");
        mnl_socket_close(multi_link_nl_event);
        return EXIT_FAILURE;
    }

    if(mnl_socket_bind(multi_link_nl_set, 0, MNL_SOCKET_AUTOPID) < 0){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not bind mnl event socket\n");
        mnl_socket_close(multi_link_nl_event);
        return EXIT_FAILURE;
    }

    if(mnl_socket_bind(multi_link_nl_event, 1 << (RTNLGRP_LINK - 1), MNL_SOCKET_AUTOPID) 
            < 0){
        MULTI_DEBUG_PRINT_SYSLOG(stderr, "Could not bind mnl event socket\n");
        mnl_socket_close(multi_link_nl_event);
        return EXIT_FAILURE;
    }

    if(pipe(multi_link_dhcp_pipes) < 0){
        //perror("Pipe failed\n");
        MULTI_DEBUG_PRINT_SYSLOG(stderr,"Pipe failed\n");
        return EXIT_FAILURE;
    }

    /* Find interfaces that are already up, removes info and then reruns 
     * DHCP (need config) */
    multi_link_populate_links_list();

    /* Check if I have any PPP links. */
    //TODO: Give this one a better name since it is not only for PPP any more
    LIST_FOREACH_CB(&multi_link_links_2, next, multi_link_check_ppp, li, NULL);

    MULTI_DEBUG_PRINT_SYSLOG(stderr, "Done populating links list!\n");

    if(multi_link_flush_links() == EXIT_FAILURE)
        return EXIT_FAILURE;

    //Go through already seen interfaces and start DHCP as needed
    if(multi_link_num_links > 0){
        pthread_attr_init(&detach_attr);
        pthread_attr_setdetachstate(&detach_attr, PTHREAD_CREATE_DETACHED);

        for(li = multi_link_links_2.lh_first; li != NULL;
                li = li->next.le_next){
            /* Start DHCP */
            if(li->state == WAITING_FOR_DHCP){
                MULTI_DEBUG_PRINT_SYSLOG(stderr, "Starting DHCP for existing "
                        "interface %s\n", li->dev_name);
                pthread_create(&(li->dhcp_thread), &detach_attr, 
                        multi_dhcp_main, (void *) li);
            }
        }
    }

	/* Do a scan of the list here to check for links with static IP/PPP */
    LIST_FOREACH_CB(&multi_link_links_2, next, multi_link_check_link, li, mc);

    mnl_sock_event = mnl_socket_get_fd(multi_link_nl_event);
    mnl_sock_set = mnl_socket_get_fd(multi_link_nl_set);
    mnl_sock_get = mnl_socket_get_fd(multi_link_nl_request);

    FD_SET(mnl_sock_event, &masterfds); 
    fdmax = fdmax > mnl_sock_event ? fdmax : mnl_sock_event;
    FD_SET(mnl_sock_get, &masterfds);
    fdmax = fdmax > mnl_sock_get ? fdmax : mnl_sock_get;
    FD_SET(mnl_sock_set, &masterfds);
    fdmax = fdmax > mnl_sock_set ? fdmax : mnl_sock_set;
    FD_SET(multi_link_dhcp_pipes[0], &masterfds);
    fdmax = fdmax > multi_link_dhcp_pipes[0] ? fdmax : multi_link_dhcp_pipes[0];

    tv.tv_sec = 5;
    tv.tv_usec = 0;

    while(1){
        readfds = masterfds;

        retval = select(fdmax+1, &readfds, NULL, NULL, &tv);

        if(retval == 0){
            //Check for any PPP that is marked as down 
            LIST_FOREACH_CB(&multi_link_links_2, next, multi_link_check_ppp,
                    li, NULL);
            LIST_FOREACH_CB(&multi_link_links_2, next, multi_link_check_link,
                    li, mc);

            tv.tv_sec = 5;
            tv.tv_usec = 0;
            continue;
        }

        //TODO: Rewrite this so I only call the callbacks at the end, not per
        //message
        for(i=0; i<=fdmax; i++){
            if(FD_ISSET(i, &readfds)){
                if (i == mnl_sock_event){
                    numbytes = mnl_socket_recvfrom(multi_link_nl_event, 
                            mnl_buf, sizeof(mnl_buf));
                    mnl_cb_run(mnl_buf, numbytes, 0, 0, 
                            multi_link_parse_netlink, mc);
                    LIST_FOREACH_CB(&multi_link_links_2, next,
                            multi_link_check_link, li, mc);
                } else if (i == mnl_sock_set){
                    numbytes = mnl_socket_recvfrom(multi_link_nl_set, mnl_buf, 
                            sizeof(mnl_buf));
                } else if (i == mnl_sock_get){
                    numbytes = mnl_socket_recvfrom(multi_link_nl_request, 
                            mnl_buf, sizeof(mnl_buf));
                } else if (i == multi_link_dhcp_pipes[0]){
                    numbytes = read(i, buf, MAX_PIPE_MSG_LEN);
                    LIST_FOREACH_CB(&multi_link_links_2, next,
                            multi_link_check_link, li, mc);
                    multi_link_clean_links();
                }
            } 
        }
    }
}
Ejemplo n.º 7
0
//Test function which just generates some netlink messages that are sent to our
//group
static void test_netlink(uint32_t packets)
{
    struct mnl_socket *mnl_sock = NULL;
    struct sockaddr_nl netlink_addr;
	uint8_t snd_buf[MNL_SOCKET_BUFFER_SIZE];
    struct nlmsghdr *netlink_hdr;
    uint32_t i = 0;
	struct json_object *obj_to_send = NULL;
    struct timeval tv;

    gettimeofday(&tv, NULL);

    mnl_sock = nlhelper_create_socket(NETLINK_USERSOCK, 0);

    if (mnl_sock == NULL) {
        fprintf(stderr, "Could not create netlink socket used for testing\n");
        return;
    }

    memset(&netlink_addr, 0, sizeof(netlink_addr));
    memset(snd_buf, 0, sizeof(snd_buf));

    netlink_hdr = mnl_nlmsg_put_header(snd_buf);
    netlink_hdr->nlmsg_type = 1;

    netlink_addr.nl_family = AF_NETLINK;

    //A message is broadcasted (multicasted) to all members of the group, except
    //the one where portid equals nl_pid (if any). Then it is unicasted to the
    //socket where portid equals nl_pid (if any). See af_netlink.c and
    //netlink_unicast()/netlink_broadcast().
    //
    //When testing, there is no need to multicast. We can just send to the PID
    netlink_addr.nl_pid = getpid();

    //TODO: Specify number of packets from command line
    while(1) {
#if 0
        if (i == 0)
            obj_to_send = create_fake_conn_obj(1, 2, CONN_EVENT_META_UPDATE, "1,2,1,", i+1);
        else
            obj_to_send = create_fake_conn_obj(2, 3, CONN_EVENT_META_UPDATE, "1,2,1,4", i+1);
#endif

        if (i < 4)
            obj_to_send = create_fake_conn_obj(1, 2, CONN_EVENT_L3_UP, "1,2,1", i+1);
        else
            obj_to_send = create_fake_conn_obj(1, 2, CONN_EVENT_META_UPDATE, "1,2,1,4", tv.tv_sec);

        if (!obj_to_send)
            continue;

        send_netlink_json(snd_buf, obj_to_send, mnl_socket_get_fd(mnl_sock),
                (struct sockaddr*) &netlink_addr);
        json_object_put(obj_to_send);

#if 0
        obj_to_send = create_fake_gps_gga_obj();
        send_netlink_json(snd_buf, obj_to_send, mnl_socket_get_fd(mnl_sock),
                (struct sockaddr*) &netlink_addr);
        json_object_put(obj_to_send);

        obj_to_send = create_fake_gps_rmc_obj();
        send_netlink_json(snd_buf, obj_to_send, mnl_socket_get_fd(mnl_sock),
                (struct sockaddr*) &netlink_addr);
        json_object_put(obj_to_send);

#endif
        test_modem_metadata(snd_buf, mnl_socket_get_fd(mnl_sock),
                (struct sockaddr*) &netlink_addr);
        if (packets && (++i >= packets))
            break;

        usleep(1000000);
    }
}
Ejemplo n.º 8
0
static void
send_batch(struct mnl_socket *nl, struct mnl_nlmsg_batch *b, int portid)
{
	int ret, fd = mnl_socket_get_fd(nl);
	size_t len = mnl_nlmsg_batch_size(b);
	char rcv_buf[MNL_SOCKET_BUFFER_SIZE];

	ret = mnl_socket_sendto(nl, mnl_nlmsg_batch_head(b), len);
	if (ret == -1) {
		perror("mnl_socket_recvfrom");
		exit(EXIT_FAILURE);
	}

	/* receive and digest all the acknowledgments from the kernel. */
	struct timeval tv = {
		.tv_sec		= 0,
		.tv_usec	= 0
	};
	fd_set readfds;
	FD_ZERO(&readfds);
	FD_SET(fd, &readfds);

	ret = select(fd+1, &readfds, NULL, NULL, &tv);
	if (ret == -1) {
		perror("select");
		exit(EXIT_FAILURE);
	}
	while (ret > 0 && FD_ISSET(fd, &readfds)) {
		ret = mnl_socket_recvfrom(nl, rcv_buf, sizeof(rcv_buf));
		if (ret == -1) {
			perror("mnl_socket_recvfrom");
			exit(EXIT_FAILURE);
		}

		ret = mnl_cb_run2(rcv_buf, ret, 0, portid,
				  NULL, NULL, cb_ctl_array,
				  MNL_ARRAY_SIZE(cb_ctl_array));
		if (ret == -1) {
			perror("mnl_cb_run");
			exit(EXIT_FAILURE);
		}

		ret = select(fd+1, &readfds, NULL, NULL, &tv);
		if (ret == -1) {
			perror("select");
			exit(EXIT_FAILURE);
		}
		FD_ZERO(&readfds);
		FD_SET(fd, &readfds);
	}
}

int main(void)
{
	struct mnl_socket *nl;
	char snd_buf[MNL_SOCKET_BUFFER_SIZE*2];
	struct mnl_nlmsg_batch *b;
	int j;
	unsigned int seq, portid;
	uint16_t i;

	nl = mnl_socket_open(NETLINK_NETFILTER);
	if (nl == NULL) {
		perror("mnl_socket_open");
		exit(EXIT_FAILURE);
	}
	if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
		perror("mnl_socket_bind");
		exit(EXIT_FAILURE);
	}
	portid = mnl_socket_get_portid(nl);

	/* The buffer that we use to batch messages is MNL_SOCKET_BUFFER_SIZE
	 * multiplied by 2 bytes long, but we limit the batch to half of it
	 * since the last message that does not fit the batch goes over the
	 * upper boundary, if you break this rule, expect memory corruptions. */
	b = mnl_nlmsg_batch_start(snd_buf, MNL_SOCKET_BUFFER_SIZE);
	if (b == NULL) {
		perror("mnl_nlmsg_batch_start");
		exit(EXIT_FAILURE);
	}

	seq = time(NULL);
	for (i=1024, j=0; i<65535; i++, j++) {
		put_msg(mnl_nlmsg_batch_current(b), i, seq+j);

		/* is there room for more messages in this batch?
		 * if so, continue. */
		if (mnl_nlmsg_batch_next(b))
			continue;

		send_batch(nl, b, portid);

		/* this moves the last message that did not fit into the
		 * batch to the head of it. */
		mnl_nlmsg_batch_reset(b);
	}

	/* check if there is any message in the batch not sent yet. */
	if (!mnl_nlmsg_batch_is_empty(b))
		send_batch(nl, b, portid);

	mnl_nlmsg_batch_stop(b);
	mnl_socket_close(nl);

	return 0;
}