int usrsock_setsockopt(FAR struct usrsock_conn_s *conn, int level, int option, FAR const void *value, FAR socklen_t value_len) { struct usrsock_reqstate_s state = {}; net_lock_t save; ssize_t ret; DEBUGASSERT(conn); save = net_lock(); if (conn->state == USRSOCK_CONN_STATE_UNINITIALIZED || conn->state == USRSOCK_CONN_STATE_ABORTED) { /* Invalid state or closed by daemon. */ nlldbg("usockid=%d; connect() with uninitialized usrsock.\n", conn->usockid); ret = (conn->state == USRSOCK_CONN_STATE_ABORTED) ? -EPIPE : -ECONNRESET; goto errout_unlock; } /* Set up event callback for usrsock. */ ret = usrsock_setup_request_callback(conn, &state, setsockopt_event, USRSOCK_EVENT_ABORT | USRSOCK_EVENT_REQ_COMPLETE); if (ret < 0) { ndbg("usrsock_setup_request_callback failed: %d\n", ret); goto errout_unlock; } /* Request user-space daemon to close socket. */ ret = do_setsockopt_request(conn, level, option, value, value_len); if (ret >= 0) { /* Wait for completion of request. */ while (net_lockedwait(&state.recvsem) != OK) { DEBUGASSERT(*get_errno_ptr() == EINTR); } ret = state.result; } usrsock_teardown_request_callback(&state); errout_unlock: net_unlock(save); return ret; }
void net_cleanup() { net_lock(&init_lock); if(1 == init_cnt) { WSACleanup(); } --init_cnt; net_unlock(&init_lock); }
int netdev_unregister(FAR struct net_driver_s *dev) { struct net_driver_s *prev; struct net_driver_s *curr; net_lock_t save; if (dev) { save = net_lock(); /* Find the device in the list of known network devices */ for (prev = NULL, curr = g_netdevices; curr && curr != dev; prev = curr, curr = curr->flink); /* Remove the device to the list of known network devices */ if (curr) { /* Where was the entry */ if (prev) { /* The entry was in the middle or at the end of the list */ prev->flink = curr->flink; } else { /* The entry was at the beginning of the list */ g_netdevices = curr->flink; } curr->flink = NULL; } net_unlock(save); #ifdef CONFIG_NET_ETHERNET ninfo("Unregistered MAC: %02x:%02x:%02x:%02x:%02x:%02x as dev: %s\n", dev->d_mac.ether_addr_octet[0], dev->d_mac.ether_addr_octet[1], dev->d_mac.ether_addr_octet[2], dev->d_mac.ether_addr_octet[3], dev->d_mac.ether_addr_octet[4], dev->d_mac.ether_addr_octet[5], dev->d_ifname); #else ninfo("Registered dev: %s\n", dev->d_ifname); #endif return OK; } return -EINVAL; }
static void skel_txavail_work(FAR void *arg) { FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg; net_lock_t state; /* Perform the poll */ state = net_lock(); skel_txavail_process(priv); net_unlock(state); }
static void skel_txtimeout_work(FAR void *arg) { FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg; net_lock_t state; /* Process pending Ethernet interrupts */ state = net_lock(); skel_txtimeout_process(priv); net_unlock(state); }
int netdev_count(void) { struct net_driver_s *dev; net_lock_t save; int ndev; save = net_lock(); for (dev = g_netdevices, ndev = 0; dev; dev = dev->flink, ndev++); net_unlock(save); return ndev; }
void release_rest_session(struct net_service* service, struct net_session* session) { net_lock(&service->close_lock); sb_tree_delete(&service->close_root, session->id); net_unlock(&service->close_lock); clean_epoll_op(service, session); net_close_fd(session->fd); release_net_session(session); }
void igmp_schedmsg(FAR struct igmp_group_s *group, uint8_t msgid) { net_lock_t flags; /* The following should be atomic */ flags = net_lock(); DEBUGASSERT(!IS_SCHEDMSG(group->flags)); group->msgid = msgid; SET_SCHEDMSG(group->flags); net_unlock(flags); }
int dcload_unlink(vfs_handler_t * dummy, const char *fn) { int oldirq = 0; int ret; net_lock(); STOPIRQ; ret = sc_unlink(fn); STARTIRQ; net_unlock(); return ret; }
FAR struct igmp_group_s *igmp_grpalloc(FAR struct net_driver_s *dev, FAR const in_addr_t *addr) { FAR struct igmp_group_s *group; net_lock_t flags; nllvdbg("addr: %08x dev: %p\n", *addr, dev); if (up_interrupt_context()) { #if CONFIG_PREALLOC_IGMPGROUPS > 0 grplldbg("Use a pre-allocated group entry\n"); group = igmp_grpprealloc(); #else grplldbg("Cannot allocate from interrupt handler\n"); group = NULL; #endif } else { grplldbg("Allocate from the heap\n"); group = igmp_grpheapalloc(); } grplldbg("group: %p\n", group); /* Check if we successfully allocated a group structure */ if (group) { /* Initialize the non-zero elements of the group structure */ net_ipv4addr_copy(group->grpaddr, *addr); sem_init(&group->sem, 0, 0); /* Initialize the group timer (but don't start it yet) */ group->wdog = wd_create(); DEBUGASSERT(group->wdog); /* Interrupts must be disabled in order to modify the group list */ flags = net_lock(); /* Add the group structure to the list in the device structure */ sq_addfirst((FAR sq_entry_t *)group, &dev->grplist); net_unlock(flags); } return group; }
void dcload_close(uint32 hnd) { if (!tool_ip) return; net_lock(); #ifndef BENPATCH if (hnd > 100) /* hack */ sc_closedir(hnd); else sc_close(hnd-1); #else dcload_close_handler(hnd); #endif net_unlock(); }
static void skel_interrupt_work(FAR void *arg) { FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg; net_lock_t state; /* Process pending Ethernet interrupts */ state = net_lock(); skel_interrupt_process(priv); net_unlock(state); /* Re-enable Ethernet interrupts */ up_enable_irq(CONFIG_skeleton_IRQ); }
dirent_t *dcload_readdir(uint32 hnd) { int oldirq = 0; dirent_t *rv = NULL; dcload_dirent_t *dcld; dcload_stat_t filestat; char *fn; #ifdef BENPATCH hnd = dcload_get_handler(hnd); #endif if (hnd < 100) return NULL; /* hack */ net_lock(); STOPIRQ; dcld = (dcload_dirent_t *)sc_readdir(hnd); STARTIRQ; if (dcld) { rv = &dirent; strcpy(rv->name, dcld->d_name); rv->size = 0; rv->time = 0; rv->attr = 0; /* what the hell is attr supposed to be anyways? */ fn = malloc(strlen(dcload_path)+strlen(dcld->d_name)+1); strcpy(fn, dcload_path); strcat(fn, dcld->d_name); STOPIRQ; if (!sc_stat(fn, &filestat)) { if (filestat.st_mode & S_IFDIR) rv->size = -1; else rv->size = filestat.st_size; rv->time = filestat.st_mtime; } STARTIRQ; free(fn); } net_unlock(); return rv; }
void net_lostconnection(FAR struct socket *psock, uint16_t flags) { net_lock_t save; DEBUGASSERT(psock != NULL && psock->s_conn != NULL); /* Close the connection */ save = net_lock(); connection_closed(psock, flags); /* Stop the network monitor */ net_stopmonitor((FAR struct tcp_conn_s *)psock->s_conn); net_unlock(save); }
static void misoc_net_txtimeout_work(FAR void *arg) { FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg; /* Increment statistics and dump debug info */ net_lock(); NETDEV_TXTIMEOUTS(priv->misoc_net_dev); /* Then reset the hardware */ /* Then poll the network for new XMIT data */ (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll); net_unlock(); }
NET_API net_socket net_accept(struct net_service* service, net_socket nd) { unsigned short index; struct net_session* session; struct net_session* new_session; int fd; ffid_vtype id; if(!service || !nd) return 0; index = ffid_index(service->socket_ids, nd); net_lock(&service->session_lock[index]); session = service->sessions[index]; if(!session || !session->lsession || session->id != nd) { net_unlock(&service->session_lock[index]); return 0; } fd = accept(session->fd, 0, 0); if(fd < 0) { net_unlock(&service->session_lock[index]); return 0; } ctl_socket_async(fd); new_session = create_net_session(); if(!new_session) { net_close_fd(fd); net_unlock(&service->session_lock[index]); return 0; } net_unlock(&service->session_lock[index]); new_session->fd = fd; id = add_net_session(service, new_session); if(!id) { release_net_session(new_session); net_close_fd(fd); return 0; } return id; }
unsigned int netdev_nametoindex(FAR const char *ifname) { FAR struct net_driver_s *dev; unsigned int ifindex = -ENODEV; /* Find the driver with this name */ net_lock(); dev = netdev_findbyname(ifname); if (dev != NULL) { ifindex = dev->d_ifindex; } net_unlock(); return ifindex; }
void igmp_grpfree(FAR struct net_driver_s *dev, FAR struct igmp_group_s *group) { net_lock_t flags; grplldbg("Free: %p flags: %02x\n", group, group->flags); /* Cancel the wdog */ flags = net_lock(); wd_cancel(group->wdog); /* Remove the group structure from the group list in the device structure */ sq_rem((FAR sq_entry_t *)group, &dev->grplist); /* Destroy the wait semaphore */ (void)sem_destroy(&group->sem); /* Destroy the wdog */ wd_delete(group->wdog); /* Then release the group structure resources. Check first if this is one * of the pre-allocated group structures that we will retain in a free list. */ #if CONFIG_PREALLOC_IGMPGROUPS > 0 if (IS_PREALLOCATED(group->flags)) { grplldbg("Put back on free list\n"); sq_addlast((FAR sq_entry_t *)group, &g_freelist); net_unlock(flags); } else #endif { /* No.. deallocate the group structure. Use sched_kfree() just in case * this function is executing within an interrupt handler. */ net_unlock(flags); grplldbg("Call sched_kfree()\n"); sched_kfree(group); } }
int dcload_rename(vfs_handler_t * dummy, const char *fn1, const char *fn2) { int oldirq = 0; int ret; net_lock(); /* really stupid hack, since I didn't put rename() in dcload */ STOPIRQ; ret = sc_link(fn1, fn2); if (!ret) ret = sc_unlink(fn1); STARTIRQ; net_unlock(); return ret; }
static void misoc_net_txavail_work(FAR void *arg) { FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg; /* Ignore the notification if the interface is not yet up */ net_lock(); if (priv->misoc_net_bifup) { /* Check if there is room in the hardware to hold another outgoing packet. */ if (!ethmac_sram_reader_ready_read()) { /* If so, then poll the network for new XMIT data */ (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll); } } net_unlock(); }
void net_stopmonitor(FAR struct tcp_conn_s *conn) { net_lock_t save; DEBUGASSERT(conn); /* Free any allocated device event callback structure */ save = net_lock(); if (conn->connection_devcb) { tcp_monitor_callback_free(conn, conn->connection_devcb); } /* Nullify all connection event data */ conn->connection_private = NULL; conn->connection_devcb = NULL; conn->connection_event = NULL; net_unlock(save); }
static void bcmf_poll_work(FAR void *arg) { // wlinfo("Entry\n"); FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)arg; /* Lock the network and serialize driver operations if necessary. * NOTE: Serialization is only required in the case where the driver work * is performed on an LP worker thread and where more than one LP worker * thread has been configured. */ net_lock(); /* Perform the poll */ /* Check if there is room in the send another TX packet. We cannot perform * the TX poll if he are unable to accept another packet for transmission. */ if (bcmf_netdev_alloc_tx_frame(priv)) { goto exit_unlock; } /* If so, update TCP timing states and poll the network for new XMIT data. * Hmmm.. might be bug here. Does this mean if there is a transmit in * progress, we will missing TCP time state updates? */ priv->bc_dev.d_buf = priv->cur_tx_frame->data; priv->bc_dev.d_len = 0; (void)devif_timer(&priv->bc_dev, bcmf_txpoll); /* Setup the watchdog poll timer again */ (void)wd_start(priv->bc_txpoll, BCMF_WDDELAY, bcmf_poll_expiry, 1, (wdparm_t)priv); exit_unlock: net_unlock(); }
static void lo_txavail_work(FAR void *arg) { FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg; net_lock_t state; /* Ignore the notification if the interface is not yet up */ state = net_lock(); if (priv->lo_bifup) { do { /* If so, then poll the network for new XMIT data */ priv->lo_txdone = false; (void)devif_poll(&priv->lo_dev, lo_txpoll); } while (priv->lo_txdone); } net_unlock(state); }
void handle_read(struct net_service* service, int ret, int err, struct read_session* rsession, size_t bytes) { struct net_session* session; unsigned short index; unsigned int events; if(!rsession) { return; } if(rsession->id == 0) { release_read_session(rsession); return; } index = ffid_index(service->socket_ids, rsession->id); net_lock(&service->session_lock[index]); session = service->sessions[index]; if(!session || session->id != rsession->id) { release_read_session(rsession); net_unlock(&service->session_lock[index]); return; } rsession->op = OP_NET_NONE; events = Eve_Read; if((!ret && err) || post_read(service, session) ) { events |= Eve_Error; print_error(); } push_queue(service, session, events); net_unlock(&service->session_lock[index]); }
int tcp_pollteardown(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct tcp_conn_s *conn = psock->s_conn; FAR struct tcp_poll_s *info; net_lock_t flags; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds->priv) { return -EINVAL; } #endif /* Recover the socket descriptor poll state info from the poll structure */ info = (FAR struct tcp_poll_s *)fds->priv; DEBUGASSERT(info && info->fds && info->cb); if (info) { /* Release the callback */ flags = net_lock(); tcp_callback_free(conn, info->cb); net_unlock(flags); /* Release the poll/select data slot */ info->fds->priv = NULL; /* Then free the poll info container */ kmm_free(info); } return OK; }
static void bcmf_rxpoll(FAR void *arg) { // wlinfo("Entry\n"); FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)arg; /* Lock the network and serialize driver operations if necessary. * NOTE: Serialization is only required in the case where the driver work * is performed on an LP worker thread and where more than one LP worker * thread has been configured. */ net_lock(); bcmf_receive(priv); /* Check if a packet transmission just completed. If so, call bcmf_txdone. * This may disable further Tx interrupts if there are no pending * transmissions. */ // bcmf_txdone(priv); net_unlock(); }
size_t dcload_total(uint32 hnd) { int oldirq = 0; ssize_t ret = -1; size_t cur; net_lock(); #ifdef BENPATCH hnd = dcload_get_handler(hnd); #endif if (hnd) { hnd--; /* KOS uses 0 for error, not -1 */ STOPIRQ; cur = sc_lseek(hnd, 0, SEEK_CUR); ret = sc_lseek(hnd, 0, SEEK_END); sc_lseek(hnd, cur, SEEK_SET); STARTIRQ; } net_unlock(); return ret; }
ssize_t dcload_write(uint32 hnd, const void *buf, size_t cnt) { int oldirq = 0; ssize_t ret = -1; net_lock(); #ifndef BENPATCH if (hnd) ret = sc_write(hnd-1, buf, cnt); #else hnd = dcload_get_handler(hnd); if (hnd) { hnd--; /* KOS uses 0 for error, not -1 */ STOPIRQ; ret = sc_write(hnd, buf, cnt); STARTIRQ; } #endif net_unlock(); return ret; }
int udp_bind(FAR struct udp_conn_s *conn, FAR const struct sockaddr_in *addr) #endif { int ret = -EADDRINUSE; net_lock_t flags; /* Is the user requesting to bind to any port? */ if (!addr->sin_port) { /* Yes.. Find an unused local port number */ conn->lport = htons(udp_select_port()); ret = OK; } else { /* Interrupts must be disabled while access the UDP connection list */ flags = net_lock(); /* Is any other UDP connection bound to this port? */ if (!udp_find_conn(addr->sin_port)) { /* No.. then bind the socket to the port */ conn->lport = addr->sin_port; ret = OK; } net_unlock(flags); } return ret; }
ssize_t dcload_read(uint32 hnd, void *buf, size_t cnt) { int oldirq = 0; ssize_t ret = -1; net_lock(); #ifndef BENPATCH if (hnd) ret = dcload_read_buffer(hnd-1, buf, cnt); #else if (hnd) { dcload_handler_t * dh = dcload_get_buffer_handler(hnd); if (!dh || cnt > dcload_buffering) { ssize_t n = 0; if (dh) { hnd = dh->hdl; n = dh->cnt - dh->cur; if (n > 0) { memcpy(buf, dh->buffer+dh->cur, n); buf = (void *)((int8 *)buf + n); cnt -= n; } else n = 0; dh->cur = dh->cnt = 0; dh->tell = -1; } ret = dcload_read_buffer(hnd-1, buf, cnt) + n; } else { int eof = 0; hnd = dh->hdl-1; ret = 0; while (cnt) { ssize_t n; n = dh->cnt - dh->cur; if (n <= 0) { n = dh->cnt = dh->cur = 0; if (!eof) { n = dcload_read_buffer(hnd, dh->buffer, dh->max); eof = n != dh->max; if (n < 0) { /* $$$ Try */ if (!ret) ret = n; break; } } dh->cnt = n; } if (!n) { break; } retell(dh,n); if (n > cnt) { n = cnt; } /* Fast copy */ memcpy(buf, dh->buffer+dh->cur, n); dh->cur += n; cnt -= n; ret += n; buf = (void *)((int8 *)buf + n); } } } #endif net_unlock(); return ret; }