td_t tsplit(spdid_t spdid, td_t tid, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -EINVAL; struct torrent *t; net_connection_t nc = 0; int accept = 0; if (tor_isnull(tid)) return -EINVAL; NET_LOCK_TAKE(); /* creating a new connection */ if (tid == td_root || len == 0 || strstr(param, "accept")) { if (tid == td_root) { /* new connection */ nc = net_create_tcp_connection(spdid, cos_get_thd_id(), evtid); if (nc <= 0) ERR_THROW(-ENOMEM, done); } else { /* len == 0 || strstr(param, "accept"), accept on connection */ t = tor_lookup(tid); if (!t) goto done; nc = net_accept(spdid, (net_connection_t)t->data); if (nc == -EAGAIN) { /* printc("net accept return EAGAIN\n"); */ ERR_THROW(-EAGAIN, done); } if (nc < 0) ERR_THROW(-EINVAL, done); if (0 < net_accept_data(spdid, nc, evtid)) BUG(); accept = 1; } t = tor_alloc((void*)nc, tflags); if (!t) ERR_THROW(-ENOMEM, free); ret = t->td; } else { /* modifying an existing connection */ t = tor_lookup(tid); if (!t) goto done; nc = (net_connection_t)t->data; } if (!accept && len != 0) { int r; NET_LOCK_RELEASE(); r = modify_connection(spdid, nc, param, len); if (r < 0) ret = r; NET_LOCK_TAKE(); } done: NET_LOCK_RELEASE(); assert(lock_contested(&net_lock) != cos_get_thd_id()); return ret; free: net_close(spdid, nc); goto done; }
int tread(spdid_t spdid, td_t td, int cbid, int sz) { net_connection_t nc; struct torrent *t; char *buf; int ret; buf = cbuf2buf(cbid, sz); if (!buf) return -EINVAL; if (tor_isnull(td)) return -EINVAL; NET_LOCK_TAKE(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); if (!(t->flags & TOR_READ)) ERR_THROW(-EACCES, done); assert(t->data); nc = (net_connection_t)t->data; ret = net_recv(spdid, nc, buf, sz); done: NET_LOCK_RELEASE(); assert(lock_contested(&net_lock) != cos_get_thd_id()); return ret; }
static int init(void) { unsigned short int i; void *b; lock_static_init(&netif_lock); NET_LOCK_TAKE(); cos_vect_init_static(&tmap); rb_init(&rb1_md_wildcard, &rb1); rb_init(&rb2_md, &rb2); /* Setup the region from which headers will be transmitted. */ if (cos_buff_mgmt(COS_BM_XMIT_REGION, &xmit_headers, sizeof(xmit_headers), 0)) { prints("net: error setting up xmit region."); } /* Wildcard upcall */ if (cos_net_create_net_brand(0, &rb1_md_wildcard)) BUG(); for (i = 0 ; i < NUM_WILDCARD_BUFFS ; i++) { if(!(b = alloc_rb_buff(&rb1_md_wildcard))) { prints("net: could not allocate the ring buffer."); } if(rb_add_buff(&rb1_md_wildcard, b, MTU)) { prints("net: could not populate the ring with buffer"); } } NET_LOCK_RELEASE(); return 0; }
static int modify_connection(spdid_t spdid, net_connection_t nc, char *ops, int len) { struct intern_connection *ic; char *prop; int ret = -EINVAL; prop = strstr(ops, "bind"); if (prop) { u32_t ip; u32_t port; int r; NET_LOCK_TAKE(); ic = net_conn_get_internal(nc); //ic = net_verify_tcp_connection(nc, &ret); if (NULL == ic) goto release; r = sscanf(prop, "bind:%x:%d", &ip, &port); if (r != 2) goto release; port &= 0xFFFF; ret = net_bind(spdid, nc, ip, port); NET_LOCK_RELEASE(); } prop = strstr(ops, "listen"); if (prop) { int r; unsigned int q; NET_LOCK_TAKE(); ic = net_conn_get_internal(nc); //ic = net_verify_tcp_connection(nc, &ret); if (NULL == ic) goto release; r = sscanf(prop, "listen:%d", &q); if (r != 1) goto release; printc("net_listen q %d\n", q); ret = net_listen(spdid, nc, q); NET_LOCK_RELEASE(); } done: return ret; release: NET_LOCK_RELEASE(); goto done; }
static int init(void) { int cnt = 0; #ifdef LWIP_STATS int stats_cnt = 0; #endif lock_static_init(&net_lock); /* printc("netlock id %d\n", net_lock.lock_id); */ NET_LOCK_TAKE(); torlib_init(); net_conn_init(); cos_net_create_netif_thd(); init_lwip(); NET_LOCK_RELEASE(); /* Start the tcp timer */ while (1) { /* Sleep for a quarter of seconds as prescribed by lwip */ NET_LOCK_TAKE(); if (++cnt == 4) { #ifdef TEST_TIMING timing_output(); #endif } #ifdef LWIP_STATS if (++stats_cnt == 20) { stats_cnt = 0; stats_display(); } #endif tcp_tmr(); NET_LOCK_RELEASE(); timed_event_block(cos_spd_id(), 25); /* expressed in ticks currently */ /* printc("use timer to tcp debug thread here...\n"); */ cos_mpd_update(); } prints("net: Error -- returning from init!!!"); BUG(); return 0; }
int netif_event_release(spdid_t spdid) { assert(wildcard_brand_id > 0); NET_LOCK_TAKE(); rem_thd_map(cos_get_thd_id()); NET_LOCK_RELEASE(); return 0; }
int netif_event_xmit(spdid_t spdid, char *mem, int sz) { int ret; if (sz > MTU || sz <= 0) return -EINVAL; NET_LOCK_TAKE(); ret = __netif_xmit(mem, (unsigned int)sz); NET_LOCK_RELEASE(); return ret; }
int netif_event_xmit(spdid_t spdid, struct cos_array *d) { int ret; if (!cos_argreg_arr_intern(d)) return -EINVAL; if (d->sz > MTU || d->sz <= 0) return -EINVAL; NET_LOCK_TAKE(); ret = __netif_xmit(d->mem, (unsigned int)d->sz); NET_LOCK_RELEASE(); return ret; }
/* * Currently, this only adds to the wildcard brand. */ int netif_event_create(spdid_t spdid) { unsigned short int ucid = cos_get_thd_id(); assert(wildcard_brand_id > 0); NET_LOCK_TAKE(); if (sched_add_thd_to_brand(cos_spd_id(), wildcard_brand_id, ucid)) BUG(); add_thd_map(ucid, /*0 wildcard port ,*/ &rb1_md_wildcard); NET_LOCK_RELEASE(); printc("created net uc %d associated with brand %d\n", ucid, wildcard_brand_id); return 0; }
int netif_event_wait(spdid_t spdid, struct cos_array *d) { int ret_sz = 0; /* if (!cos_argreg_arr_intern(d)) return -EINVAL; */ /* if (d->sz < MTU) return -EINVAL; */ interrupt_wait(); NET_LOCK_TAKE(); if (interrupt_process(d->mem, d->sz, &ret_sz)) BUG(); NET_LOCK_RELEASE(); d->sz = ret_sz; return 0; }
/* * Currently, this only adds to the wildcard acap. */ int netif_event_create(spdid_t spdid) { unsigned short int ucid = cos_get_thd_id(); NET_LOCK_TAKE(); /* Wildcard upcall */ if (cos_net_create_net_acap(0, &rb1_md_wildcard)) BUG(); assert(wildcard_acap_id > 0); add_thd_map(ucid, /*0 wildcard port ,*/ &rb1_md_wildcard); NET_LOCK_RELEASE(); printc("created net uc %d associated with acap %d\n", ucid, wildcard_acap_id); return 0; }
static int __net_connect(spdid_t spdid, net_connection_t nc, struct ip_addr *ip, u16_t port) { struct intern_connection *ic; u16_t tid = cos_get_thd_id(); NET_LOCK_TAKE(); if (!net_conn_valid(nc)) goto perm_err; ic = net_conn_get_internal(nc); if (NULL == ic) goto perm_err; if (tid != ic->tid) goto perm_err; assert(ACTIVE == ic->thd_status); switch (ic->conn_type) { case UDP: { struct udp_pcb *up; up = ic->conn.up; if (ERR_OK != udp_connect(up, ip, port)) { NET_LOCK_RELEASE(); return -EISCONN; } break; } case TCP: { struct tcp_pcb *tp; tp = ic->conn.tp; ic->thd_status = CONNECTING; if (ERR_OK != tcp_connect(tp, ip, port, cos_net_lwip_tcp_connected)) { ic->thd_status = ACTIVE; NET_LOCK_RELEASE(); return -ENOMEM; } NET_LOCK_RELEASE(); if (sched_block(cos_spd_id(), 0) < 0) BUG(); assert(ACTIVE == ic->thd_status); /* When we wake up, we should be connected. */ return 0; } case TCP_CLOSED: // __net_close(ic); NET_LOCK_RELEASE(); return -EPIPE; default: BUG(); } NET_LOCK_RELEASE(); return 0; perm_err: NET_LOCK_RELEASE(); return -EPERM; }
void trelease(spdid_t spdid, td_t td) { struct torrent *t; net_connection_t nc; if (!tor_is_usrdef(td)) return; NET_LOCK_TAKE(); t = tor_lookup(td); if (!t) goto done; nc = (net_connection_t)t->data; if (nc) net_close(spdid, nc); tor_free(t); done: NET_LOCK_RELEASE(); assert(lock_contested(&net_lock) != cos_get_thd_id()); return; }
int netif_event_wait(spdid_t spdid, char *mem, int sz) { int ret_sz = 0; if (sz < MTU) return -EINVAL; /* printc("before %d: I\n", cos_get_thd_id()); */ /* rdtscll(start); */ interrupt_wait(); /* rdtscll(end); */ /* pnums++; */ /* avg += end-start; */ /* if (pnums%100000 == 0) { */ /* printc("pnums %llu avg %llu\n", pnums, avg); */ /* } */ /* printc("after %d: I\n", cos_get_thd_id()); */ NET_LOCK_TAKE(); if (interrupt_process(mem, sz, &ret_sz)) BUG(); interrupt_process_cnt++; NET_LOCK_RELEASE(); return ret_sz; }
int net_send(spdid_t spdid, net_connection_t nc, void *data, int sz) { struct intern_connection *ic; u16_t tid = cos_get_thd_id(); int ret = sz; // if (!cos_argreg_buff_intern(data, sz)) return -EFAULT; if (!net_conn_valid(nc)) return -EINVAL; if (sz > MAX_SEND) return -EMSGSIZE; // NET_LOCK_TAKE(); ic = net_conn_get_internal(nc); if (NULL == ic) { ret = -EINVAL; goto err; } if (tid != ic->tid) { ret = -EPERM; goto err; } switch (ic->conn_type) { case UDP: { struct udp_pcb *up; struct pbuf *p; /* There's no blocking in the UDP case, so this is simple */ up = ic->conn.up; p = pbuf_alloc(PBUF_TRANSPORT, sz, PBUF_ROM); if (NULL == p) { ret = -ENOMEM; goto err; } p->payload = data; if (ERR_OK != udp_send(up, p)) { pbuf_free(p); /* IP/port must not be set */ ret = -ENOTCONN; goto err; } pbuf_free(p); break; } case TCP: { struct tcp_pcb *tp; #define TCP_SEND_COPY #ifdef TCP_SEND_COPY void *d; struct packet_queue *pq; #endif tp = ic->conn.tp; if (tcp_sndbuf(tp) < sz) { ret = 0; break; } #ifdef TCP_SEND_COPY pq = malloc(sizeof(struct packet_queue) + sz); if (unlikely(NULL == pq)) { ret = -ENOMEM; goto err; } #ifdef TEST_TIMING pq->ts_start = timing_record(APP_PROC, ic->ts_start); #endif pq->headers = NULL; d = net_packet_data(pq); memcpy(d, data, sz); if (ERR_OK != (ret = tcp_write(tp, d, sz, 0))) { #else if (ERR_OK != (ret = tcp_write(tp, data, sz, TCP_WRITE_FLAG_COPY))) { #endif free(pq); printc("tcp_write returned %d (sz %d, tcp_sndbuf %d, ERR_MEM: %d)", ret, sz, tcp_sndbuf(tp), ERR_MEM); BUG(); } /* No implementation of nagle's algorithm yet. Send * out the packet immediately if possible. */ if (ERR_OK != (ret = tcp_output(tp))) { printc("tcp_output returned %d, ERR_MEM: %d", ret, ERR_MEM); BUG(); } ret = sz; break; } case TCP_CLOSED: ret = -EPIPE; break; default: BUG(); } err: // NET_LOCK_RELEASE(); return ret; } /************************ LWIP integration: **************************/ struct ip_addr ip, mask, gw; struct netif cos_if; static void cos_net_interrupt(char *packet, int sz) { void *d; int len; struct pbuf *p; struct ip_hdr *ih; struct packet_queue *pq; #ifdef TEST_TIMING unsigned long long ts; #endif // printc(">>> %d\n", net_lock.lock_id); NET_LOCK_TAKE(); // printc("<<< %d\n", net_lock.lock_id); assert(packet); ih = (struct ip_hdr*)packet; if (unlikely(4 != IPH_V(ih))) goto done; len = ntohs(IPH_LEN(ih)); if (unlikely(len != sz || len > MTU)) { printc("len %d != %d or > %d", len, sz, MTU); goto done; } p = pbuf_alloc(PBUF_IP, len, PBUF_ROM); if (unlikely(!p)) { prints("OOM in interrupt: allocation of pbuf failed.\n"); goto done; } /* For now, we're going to do an additional copy. Currently, * packets should be small, so this shouldn't hurt that badly. * This is done because 1) we are freeing the packet * elsewhere, 2) we want to malloc some (small) packets to * save space and free up the ring buffers, 3) it is difficult * to know in (1) which deallocation method (free or return to * ring buff) to use */ pq = malloc(len + sizeof(struct packet_queue)); if (unlikely(NULL == pq)) { printc("OOM in interrupt: allocation of packet data (%d bytes) failed.\n", len); pbuf_free(p); goto done; } pq->headers = d = net_packet_data(pq); #ifdef TEST_TIMING #ifdef TCP_SEND_COPY ts = pq->ts_start = timing_timestamp(); #endif #endif memcpy(d, packet, len); p->payload = p->alloc_track = d; /* hand off packet ownership here... */ if (ERR_OK != cos_if.input(p, &cos_if)) { prints("net: failure in IP input."); pbuf_free(p); goto done; } #ifdef TEST_TIMING timing_record(UPCALL_PROC, ts); #endif done: NET_LOCK_RELEASE(); return; }