static err_t cos_net_lwip_tcp_accept(void *arg, struct tcp_pcb *new_tp, err_t err) { struct intern_connection *ic = arg, *ica; net_connection_t nc; u16_t new_port; assert(ic); /* this is here to have the same properties as if we were * calling the portmgr for each accept call. Really, this * call should be in the lwip stack. */ new_port = portmgr_new(cos_spd_id()); if (0 > (nc = __net_create_tcp_connection(ic->spdid, ic->tid, new_tp, -1))) BUG(); ica = net_conn_get_internal(nc); if (NULL == ica) BUG(); ic->next = NULL; if (NULL == ic->accepted_ic) { assert(NULL == ic->accepted_last); ic->accepted_ic = ica; ic->accepted_last = ica; } else { assert(NULL != ic->accepted_last); ic->accepted_last->next = ica; ic->accepted_last = ica; } assert(-1 != ic->data); /* printc("cos_net_lwip_tcp_accept trigger event (thd %d)\n", cos_get_thd_id()); */ /* printc("thd %ld in tcp_accept call trigger evtid %d\n", cos_get_thd_id(), ic->data); */ if (evt_trigger(cos_spd_id(), ic->data)) BUG(); tcp_accept_cnt++; return ERR_OK; }
td_t tsplit(spdid_t spdid, td_t td, char *param, int len, tor_flags_t tflags, long evtid) { td_t ret = -1; struct torrent *t, *nt; struct fsobj *fso, *fsc, *parent; /* obj, child, and parent */ char *subpath; if (tor_isnull(td)) return -EINVAL; LOCK(); t = tor_lookup(td); if (!t) ERR_THROW(-EINVAL, done); fso = t->data; fsc = fsobj_path2obj(param, len, fso, &parent, &subpath); if (!fsc) return -ENOENT; fsobj_take(fsc); nt = tor_alloc(fsc, tflags); if (!nt) ERR_THROW(-ENOMEM, done); ret = nt->td; /* If we created the torrent, then trigger an event as we have data! */ evt_trigger(cos_spd_id(), evtid); done: UNLOCK(); return ret; }
int ec3_ser3_pass(long id) { /* printc("\n*** trigger *****\n"); */ /* printc("\n(ser 3) thread %d is triggering event %ld\n", cos_get_thd_id(), id); */ evt_trigger(cos_spd_id(), id); return 0; }
void core1_high() { printc("core %ld high prio thd %d running.\n", cos_cpuid(), cos_get_thd_id()); create_thd(0, HIGH_PRIO); create_thd(1, LOW_PRIO); /* Brand operations removed. Add acap creation here. */ int received_ipi = 0; int param[4]; u64_t s, e; int iter = 0; while (1) { int ret = 0; /* printc("core %ld going to wait, thd %d\n", cos_cpuid(), cos_get_thd_id()); */ /* if (-1 == (ret = cos_ainv_wait(...))) BUG(); */ /* printc("core %ld, rec %d\n", cos_cpuid(), ++received_ipi); */ param[0] = shared_mem[0]; param[1] = shared_mem[1]; param[2] = shared_mem[2]; param[3] = shared_mem[3]; assert(param[0] == 2); assert(param[1] == 4); assert(param[2] == 6); assert(param[3] == 8); /* rdtscll(e); */ /* data[iter++] = e - c1_tsc; */ int i; for (i = 0; i < n_wait; i++) { delay(20); /* printc("core %d triggering evt %d, i %d....\n", cos_cpuid(), evt, i); */ shared_ret = 10; /* rdtscll(s); */ evt_trigger(cos_spd_id(), evt); /* rdtscll(e); */ /* data[iter++] = e - s; */ /* printc("core %d triggerred evt %d, i %d....\n", cos_cpuid(), evt, i); */ } } }
static int channel_init(int channel) { char *addr, *start; unsigned long i, sz; int acap, srv_acap; int direction; direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0); if (direction < 0) { channels[channel].exists = 0; return 0; } channels[channel].exists = 1; channels[channel].direction = direction; sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0); assert(sz <= (4*1024*1024)); /* current 8MB max */ start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE); assert(start); for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) { assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i)); } cringbuf_init(&channels[channel].rb, start, sz); if (direction == COS_TRANS_DIR_LTOC) { acap = cos_async_cap_cntl(COS_ACAP_CREATE, cos_spd_id(), cos_spd_id(), cos_get_thd_id() << 16 | cos_get_thd_id()); assert(acap); /* cli acap not used. Linux thread will be triggering the * acap. We set the cli acap owner to the current thread for * access control only.*/ srv_acap = acap & 0xFFFF; cos_trans_cntl(COS_TRANS_ACAP, channel, srv_acap, 0); while (1) { int ret; if (-1 == (ret = cos_ainv_wait(srv_acap))) BUG(); assert(channels[channel].t); evt_trigger(cos_spd_id(), channels[channel].t->evtid); } } return 0; }
static int channel_init(int channel) { char *addr, *start; unsigned long i, sz; unsigned short int bid; int direction; direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0); if (direction < 0) { channels[channel].exists = 0; return 0; } channels[channel].exists = 1; channels[channel].direction = direction; sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0); assert(sz <= (4*1024*1024)); /* current 8MB max */ start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE); assert(start); for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) { assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i)); } cringbuf_init(&channels[channel].rb, start, sz); if (direction == COS_TRANS_DIR_LTOC) { bid = cos_brand_cntl(COS_BRAND_CREATE, 0, 0, cos_spd_id()); assert(bid > 0); assert(!cos_trans_cntl(COS_TRANS_BRAND, channel, bid, 0)); if (sched_add_thd_to_brand(cos_spd_id(), bid, cos_get_thd_id())) BUG(); while (1) { int ret; if (-1 == (ret = cos_brand_wait(bid))) BUG(); assert(channels[channel].t); evt_trigger(cos_spd_id(), channels[channel].t->evtid); } } return 0; }
/* * This should be called every time that a tcp connection is closed. */ static void cos_net_lwip_tcp_err(void *arg, err_t err) { struct intern_connection *ic = arg; assert(ic); switch(err) { case ERR_ABRT: case ERR_RST: assert(ic->conn_type == TCP); assert(ic->conn_type != TCP_CLOSED); /* printc("thd %ld in tcp_err call trigger evt_id %d\n", cos_get_thd_id(), ic->data); */ if (-1 != ic->data && evt_trigger(cos_spd_id(), ic->data)) BUG(); ic->conn_type = TCP_CLOSED; ic->conn.tp = NULL; net_conn_free_packet_data(ic); break; default: printc("TCP error #%d: don't really have docs to know what this means.", err); } return; }
/* * After a connection has been accepted, we need to associate it with * its "event" data, or the scalar to pass to the event component. * That is what this call does. */ int net_accept_data(spdid_t spdid, net_connection_t nc, long data) { struct intern_connection *ic; int ret; //NET_LOCK_TAKE(); ic = net_verify_tcp_connection(nc, &ret); if (NULL == ic || -1 != ic->data) goto err; ic->data = data; /* If data has already arrived, but couldn't trigger the event * because ->data was not set, trigger the event now. */ /* printc("trigger event??? (thd %d) \n", cos_get_thd_id()); */ /* printc("thd %ld in net_accept_data call trigger evtid %d\n", cos_get_thd_id(), ic->data); */ if (0 < ic->incoming_size && evt_trigger(cos_spd_id(), data)) goto err; net_accetp_cnt++; //NET_LOCK_RELEASE(); return 0; err: //NET_LOCK_RELEASE(); return -1; }
static err_t cos_net_lwip_tcp_recv(void *arg, struct tcp_pcb *tp, struct pbuf *p, err_t err) { struct intern_connection *ic; struct packet_queue *pq, *last; void *headers; struct pbuf *first; ic = (struct intern_connection*)arg; assert(NULL != ic); assert(TCP == ic->conn_type); if (NULL == p) { assert(ic->conn.tp == tp); /* * This should call our registered error function * above with ERR_ABRT, which will make progress * towards closing the connection. * * Later, when the app calls some function in the API, * TCP_CLOSED will be seen and the internal connection * will be deallocated, and the application notified. */ tcp_abort(tp); assert(ic->conn_type == TCP_CLOSED && NULL == ic->conn.tp); /* tcp_close(tp); // Jiguo: aggressive close */ return ERR_CLSD; } first = p; while (p) { struct pbuf *q; if (p->ref != 1) printc("pbuf with len %d, totlen %d and refcnt %d", p->len, p->tot_len, p->ref); assert(p->len > 0); assert(p->type == PBUF_ROM || p->type == PBUF_REF); headers = cos_net_header_start(p, TCP); assert (NULL != headers); pq = net_packet_pq(headers); pq->data = p->payload; pq->len = p->len; pq->next = NULL; #ifdef TEST_TIMING pq->ts_start = timing_record(RECV, pq->ts_start); #endif assert((NULL == ic->incoming) == (NULL == ic->incoming_last)); /* Is the queue empty? */ if (NULL == ic->incoming) { assert(NULL == ic->incoming_last); ic->incoming = ic->incoming_last = pq; } else { last = ic->incoming_last; last->next = pq; ic->incoming_last = pq; } ic->incoming_size += p->len; //assert(1 == p->ref); q = p->next; p->payload = p->alloc_track = NULL; assert(NULL != q || p->len == p->tot_len); assert(p->ref == 1); p = q; } /* Just make sure lwip is doing what we think its doing */ assert(first->ref == 1); /* This should deallocate the entire chain */ pbuf_free(first); /* printc("thd in %ld tcp_recv call trigger evt id %d\n", cos_get_thd_id(), ic->data); */ if (-1 != ic->data && evt_trigger(cos_spd_id(), ic->data)) BUG(); tcp_recv_cnt++; /* /\* If the thread blocked waiting for a packet, wake it up *\/ */ /* if (RECVING == ic->thd_status) { */ /* ic->thd_status = ACTIVE; */ /* assert(ic->thd_status == ACTIVE); /\* Detect races *\/ */ /* if (sched_wakeup(cos_spd_id(), ic->tid)) BUG(); */ /* } */ return ERR_OK; }