void* manage_netw(void* shared_void){ SharedVars* shared = (SharedVars*)shared_void; pthread_t order_handling_threads[255]; pthread_create(&order_handling_threads[MY_ID], NULL, &report_events_master, shared); while (1){ struct timeval timeout; timeout.tv_sec = 0; timeout.tv_usec = 0.4*SEC_TO_USEC; RecvMsg recv_msg; fd_set read_fd_set = file_descriptor_setup(&shared->netw_membs[BROADCAST].sock_fd, 1); if (select(shared->netw_membs[BROADCAST].sock_fd + 1, &read_fd_set, NULL, NULL, &timeout) > 0){ recv_msg = shared->netw_membs[BROADCAST].recv(); // Do this everywhere: recv_msg } switch (recv_msg.MSG_ID){ case HEARTBEAT: determine_master(); break; case CONNECT: char send_msg = ACCEPT_CON; //Ack shared->netw_membs[ip_to_id(recv_msg.sender_ip)] = NetwMemb(recv_msg.sender_ip, recv_msg.sender_ip, ELEV_INFO_PORT, SLAVE_ROLE); shared->netw_membs[ip_to_id(recv_msg.sender_ip)].send(&send_msg); shared->slave_id = ip_to_id(recv_msg.sender_ip); // Need to use semaphore to keep this alive pthread_create(&order_handling_threads[ip_to_id(recv_msg.sender_ip)], NULL, &handle_orders, shared); break; } shared->netw_membs[BROADCAST].send_heartbeat(); } }
void* manage_netw(){ pthread_t elev_managing_threads[255]; pthread_create(&elev_managing_threads[MY_ID], NULL, &handle_local_events_master, NULL); while (1){ struct timeval timeout; timeout.tv_sec = 0; timeout.tv_usec = 0.4*SEC_TO_USEC; fd_set read_fd_set; file_descriptor_setup(&shared->netw_membs[BROADCAST].sock_fd, &read_fd_set); if (select(shared->netw_membs[BROADCAST].sock_fd + 1, &read_fd_set, NULL, NULL, &timeout) > 0){ struct RecvMsg recv_msg = shared->netw_membs[BROADCAST].recv(); if(recv_msg.MSG_ID == CONNECT){ int* slave_id = new int; *slave_id = ip_to_id(recv_msg.sender_ip); char send_msg[BUFF_SIZE]; send_msg[0] = ACKNOWLEDGE; shared->netw_membs[*slave_id] = NetwMemb(*slave_id, *slave_id, ELEV_INFO_PORT, SLAVE_ROLE); shared->netw_membs[*slave_id].send(&send_msg); pthread_create(&elev_managing_threads[*slave_id], NULL, &manage_slave, slave_id); } } shared->netw_membs[BROADCAST].send_heartbeat(shared->netw_master_q); } }
void* netw_fsm(void* shared_void){ SharedVars* shared = (SharedVars*)shared_void; shared->netw_membs[BROADCAST] = NetwMemb(INADDR_ANY, id_to_ip(BROADCAST), NETW_INFO_PORT, BROADCAST_ROLE); shared->netw_fsm_state = FSM_FIND_NETWORK; while (1){ switch (shared->netw_fsm_state){ case FSM_FIND_NETWORK: //Test connection shared->master_ip = find_master(shared->netw_membs); if (!shared->master_ip) shared->netw_fsm_state = FSM_MASTER; else shared->netw_fsm_state = FSM_SLAVE; break; case FSM_MASTER: pthread_t master_threads[2]; pthread_create(&master_threads[0], NULL, &manage_netw, shared); //Investigate how sockets works as shared variables pthread_create(&master_threads[1], NULL, &manage_backup, shared); sleep(2); if (shared->netw_membs[MY_ID].role == BACKUP_ROLE){ /*Look up own role in NetwMembs*/ follow_up_ext_orders(shared); } break; case FSM_SLAVE: shared->netw_membs[ip_to_id(shared->master_ip)] = NetwMemb(shared->master_ip, shared->master_ip, ELEV_INFO_PORT, MASTER_ROLE); serve(shared); break; } } }
static inline int ipmap_test(const struct ip_set *set, ip_set_ip_t ip) { const struct ip_set_ipmap *map = set->data; if (ip < map->first_ip || ip > map->last_ip) return -ERANGE; DP("set: %s, ip: %pI4h", set->name, &ip); return !!test_bit(ip_to_id(map, ip), map->members); }
static inline int __testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip) { struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data; if (ip < map->first_ip || ip > map->last_ip) return -ERANGE; *hash_ip = ip & map->netmask; DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u", set->name, HIPQUAD(ip), HIPQUAD(*hash_ip)); return !!test_bit(ip_to_id(map, *hash_ip), map->members); }
struct RecvMsg NetwMemb::recv(){ struct RecvMsg msg; struct sockaddr_in sender_addr; unsigned int sender_size = sizeof(sender_addr); if (recvfrom(this->sock_fd, msg.content, BUFF_SIZE, 0, (struct sockaddr*) &sender_addr, &sender_size) < 0){ perror("Error receiving message.\n"); msg.MSG_ID = ERROR; } printf("Nudes: %i from _ %i\n", msg.MSG_ID, ip_to_id(msg.sender_ip)); msg.sender_ip = 0xFFFFFFFF & ntohl(sender_addr.sin_addr.s_addr); return msg; }
static inline int ipmap_del(struct ip_set *set, ip_set_ip_t ip) { struct ip_set_ipmap *map = set->data; if (ip < map->first_ip || ip > map->last_ip) return -ERANGE; DP("set: %s, ip: %pI4h", set->name, &ip); if (!test_and_clear_bit(ip_to_id(map, ip), map->members)) return -EEXIST; return 0; }
static inline int __addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip) { struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data; if (ip < map->first_ip || ip > map->last_ip) return -ERANGE; *hash_ip = ip & map->netmask; DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip)); if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members)) return -EEXIST; return 0; }
static int bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, u8 pf, u8 dim, u8 flags) { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 ip; ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; ip = ip_to_id(map, ip); return adtfn(set, &ip, map->timeout); }
static int bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct bitmap_ip_adt_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; e.id = ip_to_id(map, ip); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); }
NetwMemb::NetwMemb(unsigned long bind_ip, unsigned long send_ip, int port, int init_role){ //Create socket int broad = 0; if((bind_ip & 0xFF) == 255){ broad = 1; } this->bind_addr.sin_family = AF_INET; this->bind_addr.sin_port = htons(port); printf("Our newest member: %lu\nNow known as: %i\n", 0xFFFFFFFF & bind_ip, ip_to_id(bind_ip)); if(broad){ this->bind_addr.sin_addr.s_addr = 0xFFFFFFFF & htonl(bind_ip); } this->sock_setup(broad); //Store sender this->send_addr.sin_family = AF_INET; this->send_addr.sin_port = htons(port); this->send_addr.sin_addr.s_addr = 0xFFFFFFFF & htonl(send_ip); this->netw_role = init_role; }
static int bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; /* Backward compatibility: we don't check the second flag */ if (skb_mac_header(skb) < skb->head || (skb_mac_header(skb) + ETH_HLEN) > skb->data) return -EINVAL; e.id = ip_to_id(map, ip); if (opt->flags & IPSET_DIM_ONE_SRC) ether_addr_copy(e.ether, eth_hdr(skb)->h_source); else ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); if (is_zero_ether_addr(e.ether)) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0; int ret = 0; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP])) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; e.id = ip_to_id(map, ip); if (tb[IPSET_ATTR_ETHER]) { if (nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN) return -IPSET_ERR_PROTOCOL; memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); e.add_mac = 1; } ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } static bool bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b) { const struct bitmap_ipmac *x = a->data; const struct bitmap_ipmac *y = b->data; return x->first_ip == y->first_ip && x->last_ip == y->last_ip && a->timeout == b->timeout && a->extensions == b->extensions; } /* Plain variant */ #include "ip_set_bitmap_gen.h" /* Create bitmap:ip,mac type of sets */ static bool init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, u32 first_ip, u32 last_ip, u32 elements) { map->members = ip_set_alloc(map->memsize); if (!map->members) return false; map->first_ip = first_ip; map->last_ip = last_ip; map->elements = elements; set->timeout = IPSET_NO_TIMEOUT; map->set = set; set->data = map; set->family = NFPROTO_IPV4; return true; } static int bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 first_ip = 0, last_ip = 0; u64 elements; struct bitmap_ipmac *map; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); if (ret) return ret; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); if (ret) return ret; if (first_ip > last_ip) swap(first_ip, last_ip); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr >= HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(first_ip, last_ip, cidr); } else { return -IPSET_ERR_PROTOCOL; } elements = (u64)last_ip - first_ip + 1; if (elements > IPSET_BITMAP_MAX_RANGE + 1) return -IPSET_ERR_BITMAP_RANGE_SIZE; set->dsize = ip_set_elem_len(set, tb, sizeof(struct bitmap_ipmac_elem), __alignof__(struct bitmap_ipmac_elem)); map = ip_set_alloc(sizeof(*map) + elements * set->dsize); if (!map) return -ENOMEM; map->memsize = bitmap_bytes(0, elements - 1); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { kfree(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ipmac_gc_init(set, bitmap_ipmac_gc); } return 0; }
static int bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags) { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 timeout = map->timeout; u32 ip, ip_to, id; int ret = 0; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout(map->timeout)) return -IPSET_ERR_TIMEOUT; timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (adt == IPSET_TEST) { id = ip_to_id(map, ip); return adtfn(set, &id, timeout); } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) { swap(ip, ip_to); if (ip < map->first_ip) return -IPSET_ERR_BITMAP_RANGE; } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip &= ip_set_hostmask(cidr); ip_to = ip | ~ip_set_hostmask(cidr); } else ip_to = ip; if (ip_to > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; for (; !before(ip_to, ip); ip += map->hosts) { id = ip_to_id(map, ip); ret = adtfn(set, &id, timeout);; if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; }
static int bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 ip = 0, ip_to = 0; struct bitmap_ip_adt_elem e = { }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret = 0; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; if (adt == IPSET_TEST) { e.id = ip_to_id(map, ip); return adtfn(set, &e, &ext, &ext, flags); } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) { swap(ip, ip_to); if (ip < map->first_ip) return -IPSET_ERR_BITMAP_RANGE; } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } else ip_to = ip; if (ip_to > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; for (; !before(ip_to, ip); ip += map->hosts) { e.id = ip_to_id(map, ip); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; }