static void if_tx_thread(void *args) { ifnet *i = args; cbuf *buf; ssize_t len; t_current_set_name("IF Xmit"); #if IF_PRIO thread_set_priority(i->tx_thread, THREAD_MAX_RT_PRIORITY - 2); #endif //if(i->fd < 0) return -1; //printf("if %x tx thread inited", i); for(;;) { sem_acquire(i->tx_queue_sem); //printf("if %x tx thread gogo\n", i); for(;;) { // pull a packet out of the queue mutex_lock(&i->tx_queue_lock); buf = fixed_queue_dequeue(&i->tx_queue); mutex_unlock(&i->tx_queue_lock); if(!buf) break; #if LOSE_TX_PACKETS if(rand() % 100 < LOSE_TX_PERCENTAGE) { cbuf_free_chain(buf); continue; } #endif // put the cbuf chain into a flat buffer len = cbuf_get_len(buf); cbuf_memcpy_from_chain(i->tx_buf, buf, 0, len); cbuf_free_chain(buf); #if 0||NET_CHATTY dprintf("if_tx_thread: sending packet size %ld\n", (long)len); #endif //sys_write(i->fd, i->tx_buf, 0, len); i->dev->dops.write(i->dev, i->tx_buf, len); } } }
static int if_tx_thread(void *args) { ifnet *i = args; cbuf *buf; ssize_t len; if(i->fd < 0) return -1; for(;;) { sem_acquire(i->tx_queue_sem, 1); for(;;) { // pull a packet out of the queue mutex_lock(&i->tx_queue_lock); buf = fixed_queue_dequeue(&i->tx_queue); mutex_unlock(&i->tx_queue_lock); if(!buf) break; #if LOSE_TX_PACKETS if(rand() % 100 < LOSE_TX_PERCENTAGE) { cbuf_free_chain(buf); continue; } #endif // put the cbuf chain into a flat buffer len = cbuf_get_len(buf); cbuf_memcpy_from_chain(i->tx_buf, buf, 0, len); cbuf_free_chain(buf); #if NET_CHATTY dprintf("if_tx_thread: sending packet size %Ld\n", (long long)len); #endif sys_write(i->fd, i->tx_buf, 0, len); } } }
ssize_t port_read_etc(port_id id, int32 *msg_code, void *msg_buffer, size_t buffer_size, uint32 flags, bigtime_t timeout) { int slot; sem_id cached_semid; size_t siz; int res; int t; cbuf* msg_store; int32 code; int err; if(ports_active == false) return ERR_PORT_NOT_ACTIVE; if(id < 0) return ERR_INVALID_HANDLE; if(msg_code == NULL) return ERR_INVALID_ARGS; if((msg_buffer == NULL) && (buffer_size > 0)) return ERR_INVALID_ARGS; if (timeout < 0) return ERR_INVALID_ARGS; flags = flags & (PORT_FLAG_USE_USER_MEMCPY | PORT_FLAG_INTERRUPTABLE | PORT_FLAG_TIMEOUT); slot = id % MAX_PORTS; int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); if(ports[slot].id != id) { RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); dprintf("read_port_etc: invalid port_id %d\n", id); return ERR_INVALID_HANDLE; } // store sem_id in local variable cached_semid = ports[slot].read_sem; // unlock port && enable ints/ RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // XXX -> possible race condition if port gets deleted (->sem deleted too), therefore // sem_id is cached in local variable up here // get 1 entry from the queue, block if needed res = sem_acquire_etc(cached_semid, 1, flags, timeout, NULL); // XXX: possible race condition if port read by two threads... // both threads will read in 2 different slots allocated above, simultaneously // slot is a thread-local variable if (res == ERR_SEM_DELETED) { // somebody deleted the port return ERR_PORT_DELETED; } if (res == ERR_INTERRUPTED) { // XXX: somebody signaled the process the port belonged to, deleting the sem ? return ERR_INTERRUPTED; } if (res == ERR_SEM_TIMED_OUT) { // timed out, or, if timeout=0, 'would block' return ERR_PORT_TIMED_OUT; } if (res != NO_ERROR) { dprintf("write_port_etc: res unknown error %d\n", res); return res; } int_disable_interrupts(); GRAB_PORT_LOCK(ports[slot]); t = ports[slot].tail; if (t < 0) panic("port %id: tail < 0", ports[slot].id); if (t > ports[slot].capacity) panic("port %id: tail > cap %d", ports[slot].id, ports[slot].capacity); ports[slot].tail = (ports[slot].tail + 1) % ports[slot].capacity; msg_store = ports[slot].msg_queue[t].data_cbuf; code = ports[slot].msg_queue[t].msg_code; // mark queue entry unused ports[slot].msg_queue[t].data_cbuf = NULL; // check output buffer size siz = min(buffer_size, ports[slot].msg_queue[t].data_len); cached_semid = ports[slot].write_sem; RELEASE_PORT_LOCK(ports[slot]); int_restore_interrupts(); // copy message *msg_code = code; if (siz > 0) { if (flags & PORT_FLAG_USE_USER_MEMCPY) { if ((err = cbuf_user_memcpy_from_chain(msg_buffer, msg_store, 0, siz) < 0)) { // leave the port intact, for other threads that might not crash cbuf_free_chain(msg_store); sem_release(cached_semid, 1); return err; } } else cbuf_memcpy_from_chain(msg_buffer, msg_store, 0, siz); } // free the cbuf cbuf_free_chain(msg_store); // make one spot in queue available again for write sem_release(cached_semid, 1); return siz; }
ssize_t udp_recvfrom( void *prot_data, void *buf, ssize_t len, i4sockaddr *saddr, int flags, bigtime_t timeout) { udp_endpoint *e = prot_data; udp_queue_elem *qe; int err; ssize_t ret; retry: //#warning timeout ignored #if 1 if(flags & SOCK_FLAG_TIMEOUT) err = hal_sem_acquire_etc( &e->blocking_sem, 1, SEM_FLAG_TIMEOUT, timeout ); else #endif err = sem_acquire(e->blocking_sem); //if(err < 0) if(err) return -err; // pop an item off the list, if there are any mutex_lock(&e->lock); qe = udp_queue_pop(&e->q); mutex_unlock(&e->lock); if(!qe) { #if 1||NET_CHATTY printf("UDP read retry"); #endif goto retry; } // we have the data, copy it out //err = cbuf_user_memcpy_from_chain(buf, qe->buf, 0, min(qe->len, len)); err = cbuf_memcpy_from_chain(buf, qe->buf, 0, min(qe->len, len)); if(err < 0) { ret = err; goto out; } ret = qe->len; // copy the address out if(saddr) { saddr->addr.len = 4; saddr->addr.type = ADDR_TYPE_IP; NETADDR_TO_IPV4(saddr->addr) = qe->src_address; saddr->port = qe->src_port; } out: // free this queue entry cbuf_free_chain(qe->buf); kfree(qe); return ret; }