/** * Mapping from local network to loopback for outbound connections. * * Copy "src" to "dst" with ip6_addr_set(dst, src), but if "src" is a * local network address that maps host's loopback address, copy IPv6 * loopback address to "dst". */ int pxremap_outbound_ip6(ip6_addr_t *dst, ip6_addr_t *src) { struct netif *netif; int i; LWIP_ASSERT1(dst != NULL); LWIP_ASSERT1(src != NULL); for (netif = netif_list; netif != NULL; netif = netif->next) { if (!netif_is_up(netif) /* || this is not a proxy netif */) { continue; } for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { if (ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)) && ip6_addr_isuniquelocal(netif_ip6_addr(netif, i))) { ip6_addr_t *ifaddr = netif_ip6_addr(netif, i); if (memcmp(src, ifaddr, sizeof(ip6_addr_t) - 1) == 0 && ((IP6_ADDR_BLOCK8(src) & 0xff) == (IP6_ADDR_BLOCK8(ifaddr) & 0xff) + 1)) { ip6_addr_set_loopback(dst); return PXREMAP_MAPPED; } } } } /* not remapped, just copy src */ ip6_addr_set(dst, src); return PXREMAP_ASIS; }
void pollmgr_update_events(int slot, int events) { LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC); LWIP_ASSERT1((nfds_t)slot < pollmgr.nfds); pollmgr.fds[slot].events = events; }
/** * New proxied UDP conversation created. * Global callback for udp_proxy_accept(). */ static void pxudp_pcb_accept(void *arg, struct udp_pcb *newpcb, struct pbuf *p, ip_addr_t *addr, u16_t port) { struct pxudp *pxudp; ipX_addr_t dst_addr; int mapping; int sdom; SOCKET sock; LWIP_ASSERT1(newpcb != NULL); LWIP_ASSERT1(p != NULL); LWIP_UNUSED_ARG(arg); pxudp = pxudp_allocate(); if (pxudp == NULL) { DPRINTF(("pxudp_allocate: failed\n")); udp_remove(newpcb); pbuf_free(p); return; } sdom = PCB_ISIPV6(newpcb) ? PF_INET6 : PF_INET; mapping = pxremap_outbound_ipX(PCB_ISIPV6(newpcb), &dst_addr, &newpcb->local_ip); #if 0 /* XXX: DNS IPv6->IPv4 remapping hack */ if (mapping == PXREMAP_MAPPED && newpcb->local_port == 53 && PCB_ISIPV6(newpcb)) { /* * "Remap" DNS over IPv6 to IPv4 since Ubuntu dnsmasq does not * listen on IPv6. */ sdom = PF_INET; ipX_addr_set_loopback(0, &dst_addr); } #endif /* DNS IPv6->IPv4 remapping hack */ sock = proxy_connected_socket(sdom, SOCK_DGRAM, &dst_addr, newpcb->local_port); if (sock == INVALID_SOCKET) { udp_remove(newpcb); pbuf_free(p); return; } pxudp->sock = sock; pxudp->pcb = newpcb; udp_recv(newpcb, pxudp_pcb_recv, pxudp); pxudp->pmhdl.callback = pxudp_pmgr_pump; pxudp_chan_send(POLLMGR_CHAN_PXUDP_ADD, pxudp); /* dispatch directly instead of calling pxudp_pcb_recv() */ pxudp_pcb_forward_outbound(pxudp, p, addr, port); }
/** * Try to get the pointer from implicitely weak reference we've got * from a channel. * * If we detect that the object is still strongly referenced, but no * longer registered with the poll manager we abort strengthening * conversion here b/c lwip thread callback is already scheduled to * destruct the object. */ struct pollmgr_handler * pollmgr_refptr_get(struct pollmgr_refptr *rp) { struct pollmgr_handler *handler; size_t weak; sys_mutex_lock(&rp->lock); LWIP_ASSERT1(rp->weak > 0); weak = --rp->weak; handler = rp->ptr; if (handler == NULL) { LWIP_ASSERT1(rp->strong == 0); sys_mutex_unlock(&rp->lock); if (weak == 0) { pollmgr_refptr_delete(rp); } return NULL; } LWIP_ASSERT1(rp->strong == 1); /* * Here we woild do: * * ++rp->strong; * * and then, after channel handler is done, we would decrement it * back. * * Instead we check that the object is still registered with poll * manager. If it is, there's no race with lwip thread trying to * drop its strong reference, as lwip thread callback to destruct * the object is always scheduled by its poll manager callback. * * Conversly, if we detect that the object is no longer registered * with poll manager, we immediately abort. Since channel handler * can't do anything useful anyway and would have to return * immediately. * * Since channel handler would always find rp->strong as it had * left it, just elide extra strong reference creation to avoid * the whole back-and-forth. */ if (handler->slot < 0) { /* no longer polling */ sys_mutex_unlock(&rp->lock); return NULL; } sys_mutex_unlock(&rp->lock); return handler; }
static void pxdns_request_free(struct request *req) { LWIP_ASSERT1(req->pprev_hash == NULL); LWIP_ASSERT1(req->pprev_timeout == NULL); if (req->reply != NULL) { pbuf_free(req->reply); } free(req); }
/** * Add weak reference before "rp" is sent over a poll manager channel. */ void pollmgr_refptr_weak_ref(struct pollmgr_refptr *rp) { sys_mutex_lock(&rp->lock); LWIP_ASSERT1(rp->ptr != NULL); LWIP_ASSERT1(rp->strong > 0); ++rp->weak; sys_mutex_unlock(&rp->lock); }
static void pollmgr_refptr_delete(struct pollmgr_refptr *rp) { if (rp == NULL) { return; } LWIP_ASSERT1(rp->strong == 0); LWIP_ASSERT1(rp->weak == 0); sys_mutex_free(&rp->lock); free(rp); }
/** * Remove (the only) strong reference. * * If it were real strong/weak pointers, we should also call * destructor for the referenced object, but */ void pollmgr_refptr_unref(struct pollmgr_refptr *rp) { sys_mutex_lock(&rp->lock); LWIP_ASSERT1(rp->strong == 1); --rp->strong; if (rp->strong > 0) { sys_mutex_unlock(&rp->lock); } else { size_t weak; /* void *ptr = rp->ptr; */ rp->ptr = NULL; /* delete ptr; // see doc comment */ weak = rp->weak; sys_mutex_unlock(&rp->lock); if (weak == 0) { pollmgr_refptr_delete(rp); } } }
/** * Receive a pointer sent over poll manager channel. */ void * pollmgr_chan_recv_ptr(struct pollmgr_handler *handler, SOCKET fd, int revents) { void *ptr; ssize_t nread; if (revents & POLLNVAL) { errx(EXIT_FAILURE, "chan %d: fd invalid", (int)handler->slot); /* NOTREACHED */ } if (revents & (POLLERR | POLLHUP)) { errx(EXIT_FAILURE, "chan %d: fd error", (int)handler->slot); /* NOTREACHED */ } LWIP_ASSERT1(revents & POLLIN); nread = recv(fd, (char *)&ptr, sizeof(ptr), 0); if (nread == SOCKET_ERROR) { err(EXIT_FAILURE, "chan %d: recv", (int)handler->slot); /* NOTREACHED */ } if (nread != sizeof(ptr)) { errx(EXIT_FAILURE, "chan %d: recv: read %d bytes", (int)handler->slot, (int)nread); /* NOTREACHED */ } return ptr; }
/** * Check if "dst" is an IPv4 address that proxy remaps to host's * loopback. */ static int proxy_ip4_is_mapped_loopback(struct netif *netif, const ip_addr_t *dst, ip_addr_t *lo) { u32_t off; const struct ip4_lomap *lomap; size_t i; LWIP_ASSERT1(dst != NULL); if (g_proxy_options->lomap_desc == NULL) { return 0; } if (!ip_addr_netcmp(dst, &netif->ip_addr, &netif->netmask)) { return 0; } /* XXX: TODO: check netif is a proxying netif! */ off = ntohl(ip4_addr_get_u32(dst) & ~ip4_addr_get_u32(&netif->netmask)); lomap = g_proxy_options->lomap_desc->lomap; for (i = 0; i < g_proxy_options->lomap_desc->num_lomap; ++i) { if (off == lomap[i].off) { if (lo != NULL) { ip_addr_copy(*lo, lomap[i].loaddr); } return 1; } } return 0; }
/** * Lwip thread callback invoked via fwudp::msg_delete */ static void fwudp_pcb_delete(void *arg) { struct fwudp *fwudp = (struct fwudp *)arg; struct udp_pcb *pcb; struct udp_pcb **pprev; LWIP_ASSERT1(fwudp->inbuf.unsent == fwudp->inbuf.vacant); pprev = &udp_proxy_pcbs; pcb = udp_proxy_pcbs; while (pcb != NULL) { if (pcb->recv_arg != fwudp) { pprev = &pcb->next; pcb = pcb->next; } else { struct udp_pcb *dead = pcb; pcb = pcb->next; *pprev = pcb; memp_free(MEMP_UDP_PCB, dead); } } closesocket(fwudp->sock); free(fwudp->inbuf.buf); free(fwudp); }
static int proxy_ip6_is_mapped_loopback(struct netif *netif, ip6_addr_t *dst) { int i; /* XXX: TODO: check netif is a proxying netif! */ LWIP_ASSERT1(dst != NULL); for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { if (ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)) && ip6_addr_isuniquelocal(netif_ip6_addr(netif, i))) { ip6_addr_t *ifaddr = netif_ip6_addr(netif, i); if (memcmp(dst, ifaddr, sizeof(ip6_addr_t) - 1) == 0 && ((IP6_ADDR_BLOCK8(dst) & 0xff) == (IP6_ADDR_BLOCK8(ifaddr) & 0xff) + 1)) { return 1; } } } return 0; }
/** * udp_recv() callback. */ static void pxudp_pcb_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, ip_addr_t *addr, u16_t port) { struct pxudp *pxudp = (struct pxudp *)arg; LWIP_ASSERT1(pxudp != NULL); LWIP_ASSERT1(pcb == pxudp->pcb); LWIP_UNUSED_ARG(pcb); if (p != NULL) { pxudp_pcb_forward_outbound(pxudp, p, addr, port); } else { pxudp_pcb_expired(pxudp); } }
/** * Forward request to the req::residx resolver in the pxdns::resolvers * array of upstream resolvers. * * Returns 1 on success, 0 on failure. */ static int pxdns_forward_outbound(struct pxdns *pxdns, struct request *req) { union sockaddr_inet *resolver; ssize_t nsent; DPRINTF2(("%s: req %p: sending to resolver #%lu\n", __func__, (void *)req, (unsigned long)req->residx)); LWIP_ASSERT1(req->generation == pxdns->generation); LWIP_ASSERT1(req->residx < pxdns->nresolvers); resolver = &pxdns->resolvers[req->residx]; if (resolver->sa.sa_family == AF_INET) { nsent = sendto(pxdns->sock4, req->data, req->size, 0, &resolver->sa, sizeof(resolver->sin)); } else if (resolver->sa.sa_family == AF_INET6) { if (pxdns->sock6 != INVALID_SOCKET) { nsent = sendto(pxdns->sock6, req->data, req->size, 0, &resolver->sa, sizeof(resolver->sin6)); } else { /* shouldn't happen, we should have weeded out IPv6 resolvers */ return 0; } } else { /* shouldn't happen, we should have weeded out unsupported families */ return 0; } if ((size_t)nsent == req->size) { return 1; /* sent */ } if (nsent < 0) { DPRINTF2(("%s: send: %R[sockerr]\n", __func__, SOCKERRNO())); } else { DPRINTF2(("%s: sent only %lu of %lu\n", __func__, (unsigned long)nsent, (unsigned long)req->size)); } return 0; /* not sent, caller will retry as necessary */ }
static void pxdns_timeout_del(struct pxdns *pxdns, struct request *req) { LWIP_ASSERT1(req->pprev_timeout != NULL); LWIP_ASSERT1(req->timeout_slot < TIMEOUT); if (req->next_timeout != NULL) { req->next_timeout->pprev_timeout = req->pprev_timeout; } *req->pprev_timeout = req->next_timeout; req->pprev_timeout = NULL; req->next_timeout = NULL; if (pxdns->timeout_list[req->timeout_slot] == NULL) { pxdns->timeout_mask &= ~(1U << req->timeout_slot); /* may be on pollmgr thread so no sys_untimeout */ } }
void pollmgr_del_slot(int slot) { LWIP_ASSERT1(slot >= POLLMGR_SLOT_FIRST_DYNAMIC); DPRINTF2(("%s(%d): fd %d ! DELETED\n", __func__, slot, pollmgr.fds[slot].fd)); pollmgr.fds[slot].fd = INVALID_SOCKET; /* see poll loop */ }
/* * Called on the lwip thread (aka tcpip thread) from tcpip_init() via * its "tcpip_init_done" callback. Raw API is ok to use here * (e.g. rtadvd), but netconn API is not. */ void proxy_init(struct netif *proxy_netif, struct proxy_options *opts) { int status; LWIP_ASSERT1(opts != NULL); LWIP_UNUSED_ARG(proxy_netif); status = RTStrFormatTypeRegister("sockerr", proxy_sockerr_rtstrfmt, NULL); AssertRC(status); g_proxy_options = opts; g_proxy_netif = proxy_netif; #if 1 proxy_rtadvd_start(proxy_netif); #endif /* * XXX: We use stateless DHCPv6 only to report IPv6 address(es) of * nameserver(s). Since we don't yet support IPv6 addresses in * HostDnsService, there's no point in running DHCPv6. */ #if 0 dhcp6ds_init(proxy_netif); #endif if (opts->tftp_root != NULL) { tftpd_init(proxy_netif, opts->tftp_root); } status = pollmgr_init(); if (status < 0) { errx(EXIT_FAILURE, "failed to initialize poll manager"); /* NOTREACHED */ } pxtcp_init(); pxudp_init(); portfwd_init(); pxdns_init(proxy_netif); pxping_init(proxy_netif, opts->icmpsock4, opts->icmpsock6); pollmgr_tid = sys_thread_new("pollmgr_thread", pollmgr_thread, NULL, DEFAULT_THREAD_STACKSIZE, DEFAULT_THREAD_PRIO); if (!pollmgr_tid) { errx(EXIT_FAILURE, "failed to create poll manager thread"); /* NOTREACHED */ } }
/** * Mapping from loopback to local network for inbound (port-forwarded) * connections. * * Copy "src" to "dst" with ip_addr_set(dst, src), but if "src" is a * host's loopback address, copy local network address that maps it to * "dst". */ int pxremap_inbound_ip4(ip_addr_t *dst, ip_addr_t *src) { struct netif *netif; const struct ip4_lomap *lomap; unsigned int i; if (ip4_addr1(src) != IP_LOOPBACKNET) { ip_addr_set(dst, src); return PXREMAP_ASIS; } if (g_proxy_options->lomap_desc == NULL) { return PXREMAP_FAILED; } #if 0 /* ?TODO: with multiple interfaces we need to consider fwspec::dst */ netif = ip_route(target); if (netif == NULL) { return PXREMAP_FAILED; } #else netif = netif_list; LWIP_ASSERT1(netif != NULL); LWIP_ASSERT1(netif->next == NULL); #endif lomap = g_proxy_options->lomap_desc->lomap; for (i = 0; i < g_proxy_options->lomap_desc->num_lomap; ++i) { if (ip_addr_cmp(src, &lomap[i].loaddr)) { ip_addr_t net; ip_addr_get_network(&net, &netif->ip_addr, &netif->netmask); ip4_addr_set_u32(dst, htonl(ntohl(ip4_addr_get_u32(&net)) + lomap[i].off)); return PXREMAP_MAPPED; } } return PXREMAP_FAILED; }
/** * Mapping from local network to loopback for outbound connections. * * Copy "src" to "dst" with ip_addr_set(dst, src), but if "src" is a * local network address that maps host's loopback address, copy * loopback address to "dst". */ int pxremap_outbound_ip4(ip_addr_t *dst, ip_addr_t *src) { struct netif *netif; LWIP_ASSERT1(dst != NULL); LWIP_ASSERT1(src != NULL); for (netif = netif_list; netif != NULL; netif = netif->next) { if (netif_is_up(netif) /* && this is a proxy netif */) { if (proxy_ip4_is_mapped_loopback(netif, src, dst)) { return PXREMAP_MAPPED; } } } /* not remapped, just copy src */ ip_addr_set(dst, src); return PXREMAP_ASIS; }
/** * Send static callback message from poll manager thread to lwip * thread, scheduling a function call in lwip thread context. * * XXX: Existing lwip api only provides non-blocking version for this. * It may fail when lwip thread is not running (mbox invalid) or if * post failed (mbox full). How to handle these? */ void proxy_lwip_post(struct tcpip_msg *msg) { struct tcpip_callback_msg *m; err_t error; LWIP_ASSERT1(msg != NULL); /* * lwip plays games with fake incomplete struct tag to enforce API */ m = (struct tcpip_callback_msg *)msg; error = tcpip_callbackmsg(m); if (error == ERR_VAL) { /* XXX: lwip thread is not running (mbox invalid) */ LWIP_ASSERT1(error != ERR_VAL); } LWIP_ASSERT1(error == ERR_OK); }
/** * Callback from poll manager to trigger sending to guest. */ static void pxudp_pcb_write_inbound(void *ctx) { struct pxudp *pxudp = (struct pxudp *)ctx; LWIP_ASSERT1(pxudp != NULL); if (pxudp->pcb == NULL) { return; } pxudp_pcb_forward_inbound(pxudp); }
static void pxdns_hash_del(struct pxdns *pxdns, struct request *req) { LWIP_ASSERT1(req->pprev_hash != NULL); --pxdns->active_queries; if (req->next_hash != NULL) { req->next_hash->pprev_hash = req->pprev_hash; } *req->pprev_hash = req->next_hash; req->pprev_hash = NULL; req->next_hash = NULL; }
/** * Retransmit of g/c expired requests and move timeout slot forward. */ static void pxdns_timer(void *arg) { struct pxdns *pxdns = (struct pxdns *)arg; struct request **chain, *req; u32_t mask; sys_mutex_lock(&pxdns->lock); /* * Move timeout slot first. New slot points to the list of * expired requests. If any expired request is retransmitted, we * keep it on the list (that is now current), effectively * resetting the timeout. */ LWIP_ASSERT1(pxdns->timeout_slot < TIMEOUT); if (++pxdns->timeout_slot == TIMEOUT) { pxdns->timeout_slot = 0; } chain = &pxdns->timeout_list[pxdns->timeout_slot]; req = *chain; while (req != NULL) { struct request *expired = req; req = req->next_timeout; if (pxdns_rexmit(pxdns, expired)) { continue; } pxdns_hash_del(pxdns, expired); pxdns_timeout_del(pxdns, expired); ++pxdns->expired_queries; pxdns_request_free(expired); } if (pxdns->timeout_list[pxdns->timeout_slot] == NULL) { pxdns->timeout_mask &= ~(1U << pxdns->timeout_slot); } else { pxdns->timeout_mask |= 1U << pxdns->timeout_slot; } mask = pxdns->timeout_mask; sys_mutex_unlock(&pxdns->lock); if (mask != 0) { sys_timeout(1 * 1000, pxdns_timer, pxdns); } }
int fwspec_equal(struct fwspec *a, struct fwspec *b) { LWIP_ASSERT1(a != NULL); LWIP_ASSERT1(b != NULL); if (a->sdom != b->sdom || a->stype != b->stype) { return 0; } if (a->sdom == PF_INET) { return a->src.sin.sin_port == b->src.sin.sin_port && a->dst.sin.sin_port == b->dst.sin.sin_port && a->src.sin.sin_addr.s_addr == b->src.sin.sin_addr.s_addr && a->dst.sin.sin_addr.s_addr == b->dst.sin.sin_addr.s_addr; } else { /* PF_INET6 */ return a->src.sin6.sin6_port == b->src.sin6.sin6_port && a->dst.sin6.sin6_port == b->dst.sin6.sin6_port && IN6_ARE_ADDR_EQUAL(&a->src.sin6.sin6_addr, &b->src.sin6.sin6_addr) && IN6_ARE_ADDR_EQUAL(&a->dst.sin6.sin6_addr, &b->dst.sin6.sin6_addr); } }
/** * POLLMGR_CHAN_PXUDP_ADD handler. * * Get new pxudp from lwip thread and start polling its socket. */ static int pxudp_pmgr_chan_add(struct pollmgr_handler *handler, SOCKET fd, int revents) { struct pxudp *pxudp; int status; pxudp = pxudp_chan_recv(handler, fd, revents); DPRINTF(("pxudp_add: new pxudp %p; pcb %p\n", (void *)pxudp, (void *)pxudp->pcb)); LWIP_ASSERT1(pxudp != NULL); LWIP_ASSERT1(pxudp->pmhdl.callback != NULL); LWIP_ASSERT1(pxudp->pmhdl.data = (void *)pxudp); LWIP_ASSERT1(pxudp->pmhdl.slot < 0); status = pollmgr_add(&pxudp->pmhdl, pxudp->sock, POLLIN); if (status < 0) { pxudp_schedule_delete(pxudp); } return POLLIN; }
static void pxdns_hash_add(struct pxdns *pxdns, struct request *req) { struct request **chain; LWIP_ASSERT1(req->pprev_hash == NULL); chain = &pxdns->request_hash[HASH(req->id)]; if ((req->next_hash = *chain) != NULL) { (*chain)->pprev_hash = &req->next_hash; ++pxdns->hash_collisions; } *chain = req; req->pprev_hash = chain; }
static void fwtcp_pcb_delete(void *arg) { struct fwtcp *fwtcp = (struct fwtcp *)arg; void *data; u32_t timo; timo = sys_mbox_tryfetch(&fwtcp->connmbox, &data); LWIP_ASSERT1(timo == SYS_MBOX_EMPTY); LWIP_UNUSED_ARG(timo); /* only in assert */ sys_mbox_free(&fwtcp->connmbox); free(fwtcp); }
/** * Mapping from loopback to local network for inbound (port-forwarded) * connections. * * Copy "src" to "dst" with ip6_addr_set(dst, src), but if "src" is a * host's loopback address, copy local network address that maps it to * "dst". */ int pxremap_inbound_ip6(ip6_addr_t *dst, ip6_addr_t *src) { ip6_addr_t loopback; struct netif *netif; int i; ip6_addr_set_loopback(&loopback); if (!ip6_addr_cmp(src, &loopback)) { ip6_addr_set(dst, src); return PXREMAP_ASIS; } #if 0 /* ?TODO: with multiple interfaces we need to consider fwspec::dst */ netif = ip6_route_fwd(target); if (netif == NULL) { return PXREMAP_FAILED; } #else netif = netif_list; LWIP_ASSERT1(netif != NULL); LWIP_ASSERT1(netif->next == NULL); #endif for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { ip6_addr_t *ifaddr = netif_ip6_addr(netif, i); if (ip6_addr_ispreferred(netif_ip6_addr_state(netif, i)) && ip6_addr_isuniquelocal(ifaddr)) { ip6_addr_set(dst, ifaddr); ++((u8_t *)&dst->addr[3])[3]; return PXREMAP_MAPPED; } } return PXREMAP_FAILED; }
int fwspec_set(struct fwspec *fwspec, int sdom, int stype, const char *src_addr_str, uint16_t src_port, const char *dst_addr_str, uint16_t dst_port) { struct addrinfo hints; struct addrinfo *ai; int status; LWIP_ASSERT1(sdom == PF_INET || sdom == PF_INET6); LWIP_ASSERT1(stype == SOCK_STREAM || stype == SOCK_DGRAM); fwspec->sdom = sdom; fwspec->stype = stype; memset(&hints, 0, sizeof(hints)); hints.ai_family = (sdom == PF_INET) ? AF_INET : AF_INET6; hints.ai_socktype = stype; hints.ai_flags = AI_NUMERICHOST; status = getaddrinfo(src_addr_str, NULL, &hints, &ai); if (status != 0) { LogRel(("\"%s\": %s\n", src_addr_str, gai_strerror(status))); return -1; } LWIP_ASSERT1(ai != NULL); LWIP_ASSERT1(ai->ai_addrlen <= sizeof(fwspec->src)); memcpy(&fwspec->src, ai->ai_addr, ai->ai_addrlen); freeaddrinfo(ai); ai = NULL; status = getaddrinfo(dst_addr_str, NULL, &hints, &ai); if (status != 0) { LogRel(("\"%s\": %s\n", dst_addr_str, gai_strerror(status))); return -1; } LWIP_ASSERT1(ai != NULL); LWIP_ASSERT1(ai->ai_addrlen <= sizeof(fwspec->dst)); memcpy(&fwspec->dst, ai->ai_addr, ai->ai_addrlen); freeaddrinfo(ai); ai = NULL; if (sdom == PF_INET) { fwspec->src.sin.sin_port = htons(src_port); fwspec->dst.sin.sin_port = htons(dst_port); } else { /* PF_INET6 */ fwspec->src.sin6.sin6_port = htons(src_port); fwspec->dst.sin6.sin6_port = htons(dst_port); } return 0; }
/** * Create strongly held refptr. */ struct pollmgr_refptr * pollmgr_refptr_create(struct pollmgr_handler *ptr) { struct pollmgr_refptr *rp; LWIP_ASSERT1(ptr != NULL); rp = (struct pollmgr_refptr *)malloc(sizeof (*rp)); if (rp == NULL) { return NULL; } sys_mutex_new(&rp->lock); rp->ptr = ptr; rp->strong = 1; rp->weak = 0; return rp; }