static void skel_txtimeout_work(FAR void *arg) { FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg; net_lock_t state; /* Process pending Ethernet interrupts */ state = net_lock(); skel_txtimeout_process(priv); net_unlock(state); }
void release_rest_session(struct net_service* service, struct net_session* session) { net_lock(&service->close_lock); sb_tree_delete(&service->close_root, session->id); net_unlock(&service->close_lock); clean_epoll_op(service, session); net_close_fd(session->fd); release_net_session(session); }
void igmp_schedmsg(FAR struct igmp_group_s *group, uint8_t msgid) { net_lock_t flags; /* The following should be atomic */ flags = net_lock(); DEBUGASSERT(!IS_SCHEDMSG(group->flags)); group->msgid = msgid; SET_SCHEDMSG(group->flags); net_unlock(flags); }
int dcload_unlink(vfs_handler_t * dummy, const char *fn) { int oldirq = 0; int ret; net_lock(); STOPIRQ; ret = sc_unlink(fn); STARTIRQ; net_unlock(); return ret; }
FAR struct igmp_group_s *igmp_grpalloc(FAR struct net_driver_s *dev, FAR const in_addr_t *addr) { FAR struct igmp_group_s *group; net_lock_t flags; nllvdbg("addr: %08x dev: %p\n", *addr, dev); if (up_interrupt_context()) { #if CONFIG_PREALLOC_IGMPGROUPS > 0 grplldbg("Use a pre-allocated group entry\n"); group = igmp_grpprealloc(); #else grplldbg("Cannot allocate from interrupt handler\n"); group = NULL; #endif } else { grplldbg("Allocate from the heap\n"); group = igmp_grpheapalloc(); } grplldbg("group: %p\n", group); /* Check if we successfully allocated a group structure */ if (group) { /* Initialize the non-zero elements of the group structure */ net_ipv4addr_copy(group->grpaddr, *addr); sem_init(&group->sem, 0, 0); /* Initialize the group timer (but don't start it yet) */ group->wdog = wd_create(); DEBUGASSERT(group->wdog); /* Interrupts must be disabled in order to modify the group list */ flags = net_lock(); /* Add the group structure to the list in the device structure */ sq_addfirst((FAR sq_entry_t *)group, &dev->grplist); net_unlock(flags); } return group; }
void handle_read(struct net_service* service, int ret, int err, struct read_session* rsession, size_t bytes) { struct net_session* session; unsigned short index; unsigned int events; if(!rsession) { return; } if(rsession->id == 0) { release_read_session(rsession); return; } index = ffid_index(service->socket_ids, rsession->id); net_lock(&service->session_lock[index]); session = service->sessions[index]; if(!session || session->id != rsession->id) { release_read_session(rsession); net_unlock(&service->session_lock[index]); return; } rsession->op = OP_NET_NONE; events = Eve_Read; if((!ret && err) || post_read(service, session) ) { events |= Eve_Error; print_error(); } push_queue(service, session, events); net_unlock(&service->session_lock[index]); }
void dcload_close(uint32 hnd) { if (!tool_ip) return; net_lock(); #ifndef BENPATCH if (hnd > 100) /* hack */ sc_closedir(hnd); else sc_close(hnd-1); #else dcload_close_handler(hnd); #endif net_unlock(); }
static void skel_interrupt_work(FAR void *arg) { FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg; net_lock_t state; /* Process pending Ethernet interrupts */ state = net_lock(); skel_interrupt_process(priv); net_unlock(state); /* Re-enable Ethernet interrupts */ up_enable_irq(CONFIG_skeleton_IRQ); }
dirent_t *dcload_readdir(uint32 hnd) { int oldirq = 0; dirent_t *rv = NULL; dcload_dirent_t *dcld; dcload_stat_t filestat; char *fn; #ifdef BENPATCH hnd = dcload_get_handler(hnd); #endif if (hnd < 100) return NULL; /* hack */ net_lock(); STOPIRQ; dcld = (dcload_dirent_t *)sc_readdir(hnd); STARTIRQ; if (dcld) { rv = &dirent; strcpy(rv->name, dcld->d_name); rv->size = 0; rv->time = 0; rv->attr = 0; /* what the hell is attr supposed to be anyways? */ fn = malloc(strlen(dcload_path)+strlen(dcld->d_name)+1); strcpy(fn, dcload_path); strcat(fn, dcld->d_name); STOPIRQ; if (!sc_stat(fn, &filestat)) { if (filestat.st_mode & S_IFDIR) rv->size = -1; else rv->size = filestat.st_size; rv->time = filestat.st_mtime; } STARTIRQ; free(fn); } net_unlock(); return rv; }
static void misoc_net_txtimeout_work(FAR void *arg) { FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg; /* Increment statistics and dump debug info */ net_lock(); NETDEV_TXTIMEOUTS(priv->misoc_net_dev); /* Then reset the hardware */ /* Then poll the network for new XMIT data */ (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll); net_unlock(); }
void net_lostconnection(FAR struct socket *psock, uint16_t flags) { net_lock_t save; DEBUGASSERT(psock != NULL && psock->s_conn != NULL); /* Close the connection */ save = net_lock(); connection_closed(psock, flags); /* Stop the network monitor */ net_stopmonitor((FAR struct tcp_conn_s *)psock->s_conn); net_unlock(save); }
unsigned int netdev_nametoindex(FAR const char *ifname) { FAR struct net_driver_s *dev; unsigned int ifindex = -ENODEV; /* Find the driver with this name */ net_lock(); dev = netdev_findbyname(ifname); if (dev != NULL) { ifindex = dev->d_ifindex; } net_unlock(); return ifindex; }
int dcload_rename(vfs_handler_t * dummy, const char *fn1, const char *fn2) { int oldirq = 0; int ret; net_lock(); /* really stupid hack, since I didn't put rename() in dcload */ STOPIRQ; ret = sc_link(fn1, fn2); if (!ret) ret = sc_unlink(fn1); STARTIRQ; net_unlock(); return ret; }
void net_stopmonitor(FAR struct tcp_conn_s *conn) { net_lock_t save; DEBUGASSERT(conn); /* Free any allocated device event callback structure */ save = net_lock(); if (conn->connection_devcb) { tcp_monitor_callback_free(conn, conn->connection_devcb); } /* Nullify all connection event data */ conn->connection_private = NULL; conn->connection_devcb = NULL; conn->connection_event = NULL; net_unlock(save); }
static void misoc_net_txavail_work(FAR void *arg) { FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg; /* Ignore the notification if the interface is not yet up */ net_lock(); if (priv->misoc_net_bifup) { /* Check if there is room in the hardware to hold another outgoing packet. */ if (!ethmac_sram_reader_ready_read()) { /* If so, then poll the network for new XMIT data */ (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll); } } net_unlock(); }
static void bcmf_poll_work(FAR void *arg) { // wlinfo("Entry\n"); FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)arg; /* Lock the network and serialize driver operations if necessary. * NOTE: Serialization is only required in the case where the driver work * is performed on an LP worker thread and where more than one LP worker * thread has been configured. */ net_lock(); /* Perform the poll */ /* Check if there is room in the send another TX packet. We cannot perform * the TX poll if he are unable to accept another packet for transmission. */ if (bcmf_netdev_alloc_tx_frame(priv)) { goto exit_unlock; } /* If so, update TCP timing states and poll the network for new XMIT data. * Hmmm.. might be bug here. Does this mean if there is a transmit in * progress, we will missing TCP time state updates? */ priv->bc_dev.d_buf = priv->cur_tx_frame->data; priv->bc_dev.d_len = 0; (void)devif_timer(&priv->bc_dev, bcmf_txpoll); /* Setup the watchdog poll timer again */ (void)wd_start(priv->bc_txpoll, BCMF_WDDELAY, bcmf_poll_expiry, 1, (wdparm_t)priv); exit_unlock: net_unlock(); }
static void lo_txavail_work(FAR void *arg) { FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg; net_lock_t state; /* Ignore the notification if the interface is not yet up */ state = net_lock(); if (priv->lo_bifup) { do { /* If so, then poll the network for new XMIT data */ priv->lo_txdone = false; (void)devif_poll(&priv->lo_dev, lo_txpoll); } while (priv->lo_txdone); } net_unlock(state); }
int tcp_pollteardown(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct tcp_conn_s *conn = psock->s_conn; FAR struct tcp_poll_s *info; net_lock_t flags; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds->priv) { return -EINVAL; } #endif /* Recover the socket descriptor poll state info from the poll structure */ info = (FAR struct tcp_poll_s *)fds->priv; DEBUGASSERT(info && info->fds && info->cb); if (info) { /* Release the callback */ flags = net_lock(); tcp_callback_free(conn, info->cb); net_unlock(flags); /* Release the poll/select data slot */ info->fds->priv = NULL; /* Then free the poll info container */ kmm_free(info); } return OK; }
static int printk_func(const uint8 *data, int len, int xlat) { if (tool_ip) { int oldirq = 0; int res = -1; if (irq_inside_int()) return 0; if (net_trylock()) { STOPIRQ; res = sc_write(1, data, len); STARTIRQ; net_unlock(); } return res; } else { if (real_old_printk_func) return real_old_printk_func(data, len, xlat); else return len; } }
static void bcmf_rxpoll(FAR void *arg) { // wlinfo("Entry\n"); FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)arg; /* Lock the network and serialize driver operations if necessary. * NOTE: Serialization is only required in the case where the driver work * is performed on an LP worker thread and where more than one LP worker * thread has been configured. */ net_lock(); bcmf_receive(priv); /* Check if a packet transmission just completed. If so, call bcmf_txdone. * This may disable further Tx interrupts if there are no pending * transmissions. */ // bcmf_txdone(priv); net_unlock(); }
int udp_bind(FAR struct udp_conn_s *conn, FAR const struct sockaddr_in *addr) #endif { int ret = -EADDRINUSE; net_lock_t flags; /* Is the user requesting to bind to any port? */ if (!addr->sin_port) { /* Yes.. Find an unused local port number */ conn->lport = htons(udp_select_port()); ret = OK; } else { /* Interrupts must be disabled while access the UDP connection list */ flags = net_lock(); /* Is any other UDP connection bound to this port? */ if (!udp_find_conn(addr->sin_port)) { /* No.. then bind the socket to the port */ conn->lport = addr->sin_port; ret = OK; } net_unlock(flags); } return ret; }
size_t dcload_total(uint32 hnd) { int oldirq = 0; ssize_t ret = -1; size_t cur; net_lock(); #ifdef BENPATCH hnd = dcload_get_handler(hnd); #endif if (hnd) { hnd--; /* KOS uses 0 for error, not -1 */ STOPIRQ; cur = sc_lseek(hnd, 0, SEEK_CUR); ret = sc_lseek(hnd, 0, SEEK_END); sc_lseek(hnd, cur, SEEK_SET); STARTIRQ; } net_unlock(); return ret; }
ssize_t dcload_write(uint32 hnd, const void *buf, size_t cnt) { int oldirq = 0; ssize_t ret = -1; net_lock(); #ifndef BENPATCH if (hnd) ret = sc_write(hnd-1, buf, cnt); #else hnd = dcload_get_handler(hnd); if (hnd) { hnd--; /* KOS uses 0 for error, not -1 */ STOPIRQ; ret = sc_write(hnd, buf, cnt); STARTIRQ; } #endif net_unlock(); return ret; }
int net_startmonitor(FAR struct socket *psock) { FAR struct tcp_conn_s *conn; FAR struct devif_callback_s *cb; net_lock_t save; DEBUGASSERT(psock != NULL && psock->s_conn != NULL); conn = (FAR struct tcp_conn_s *)psock->s_conn; /* Check if the connection has already been closed before any callbacks * have been registered. (Maybe the connection is lost before accept has * registered the monitoring callback.) */ save = net_lock(); if (!(conn->tcpstateflags == TCP_ESTABLISHED || conn->tcpstateflags == TCP_SYN_RCVD)) { /* Invoke the TCP_CLOSE connection event now */ (void)connection_event(NULL, conn, psock, TCP_CLOSE); /* Make sure that the monitor is stopped */ conn->connection_private = NULL; conn->connection_devcb = NULL; conn->connection_event = NULL; /* And return -ENOTCONN to indicate the the monitor was not started * because the socket was already disconnected. */ net_unlock(save); return -ENOTCONN; } DEBUGASSERT(conn->connection_event == NULL && conn->connection_devcb == NULL); /* Allocate a callback structure that we will use to get callbacks if * the network goes down. */ cb = tcp_monitor_callback_alloc(conn); if (cb != NULL) { cb->event = connection_event; cb->priv = (void*)psock; cb->flags = NETDEV_DOWN; } conn->connection_devcb = cb; /* Set up to receive callbacks on connection-related events */ conn->connection_private = (void*)psock; conn->connection_event = connection_event; net_unlock(save); return OK; }
ssize_t dcload_read(uint32 hnd, void *buf, size_t cnt) { int oldirq = 0; ssize_t ret = -1; net_lock(); #ifndef BENPATCH if (hnd) ret = dcload_read_buffer(hnd-1, buf, cnt); #else if (hnd) { dcload_handler_t * dh = dcload_get_buffer_handler(hnd); if (!dh || cnt > dcload_buffering) { ssize_t n = 0; if (dh) { hnd = dh->hdl; n = dh->cnt - dh->cur; if (n > 0) { memcpy(buf, dh->buffer+dh->cur, n); buf = (void *)((int8 *)buf + n); cnt -= n; } else n = 0; dh->cur = dh->cnt = 0; dh->tell = -1; } ret = dcload_read_buffer(hnd-1, buf, cnt) + n; } else { int eof = 0; hnd = dh->hdl-1; ret = 0; while (cnt) { ssize_t n; n = dh->cnt - dh->cur; if (n <= 0) { n = dh->cnt = dh->cur = 0; if (!eof) { n = dcload_read_buffer(hnd, dh->buffer, dh->max); eof = n != dh->max; if (n < 0) { /* $$$ Try */ if (!ret) ret = n; break; } } dh->cnt = n; } if (!n) { break; } retell(dh,n); if (n > cnt) { n = cnt; } /* Fast copy */ memcpy(buf, dh->buffer+dh->cur, n); dh->cur += n; cnt -= n; ret += n; buf = (void *)((int8 *)buf + n); } } } #endif net_unlock(); return ret; }
uint32 dcload_open(vfs_handler_t * dummy, const char *fn, int mode) { #ifdef BENPATCH dcload_handler_t * hdl = 0; #endif int hnd = 0; int dcload_mode = 0; int oldirq = 0; int max_buffer = 0; /* dbglog(DBG_DEBUG, */ /* "fs_dcload : open [%s,%d]\n",fn,mode); */ // printf("open '%s' %d\n", fn, mode); if (!tool_ip) return 0; net_lock(); if (mode & O_DIR) { if (fn[0] == '\0') { fn = "/"; } STOPIRQ; hnd = (int) sc_opendir(fn); STARTIRQ; if (hnd) { if (dcload_path) free(dcload_path); if (fn[strlen(fn)] == '/') { dcload_path = malloc(strlen(fn)+1); strcpy(dcload_path, fn); } else { dcload_path = malloc(strlen(fn)+2); strcpy(dcload_path, fn); strcat(dcload_path, "/"); } } } else { /* hack */ if (mode == O_RDONLY) { max_buffer = dcload_buffering; dcload_mode = 0; } else if (mode == O_RDWR) { dcload_mode = 2 | 0x0200; } else if (mode == O_WRONLY) { dcload_mode = 1 | 0x0200; } else if (mode == O_APPEND) { dcload_mode = 2 | 8 | 0x0200; } STOPIRQ; hnd = sc_open(fn, dcload_mode, 0644); STARTIRQ; hnd++; /* KOS uses 0 for error, not -1 */ } #ifdef BENPATCH hdl = 0; if (hnd > 0 && max_buffer) { hdl = dcload_new_handler(max_buffer); if (hdl) { hdl->hdl = hnd; hnd = (int) hdl; } /* if alloc failed, fallback to nornal handle. */ } #endif net_unlock(); /* dbglog(DBG_DEBUG, */ /* "fs_dcload : open handler = [%p]\n", hdl); */ return hnd; }
ssize_t psock_sendto(FAR struct socket *psock, FAR const void *buf, size_t len, int flags, FAR const struct sockaddr *to, socklen_t tolen) { #ifdef CONFIG_NET_UDP FAR struct udp_conn_s *conn; #ifdef CONFIG_NET_IPv6 FAR const struct sockaddr_in6 *into = (const struct sockaddr_in6 *)to; #else FAR const struct sockaddr_in *into = (const struct sockaddr_in *)to; #endif struct sendto_s state; net_lock_t save; int ret; #endif int err; /* If to is NULL or tolen is zero, then this function is same as send (for * connected socket types) */ if (!to || !tolen) { #ifdef CONFIG_NET_TCP return psock_send(psock, buf, len, flags); #else ndbg("ERROR: No to address\n"); err = EINVAL; goto errout; #endif } /* Verify that a valid address has been provided */ #ifdef CONFIG_NET_IPv6 if (to->sa_family != AF_INET6 || tolen < sizeof(struct sockaddr_in6)) #else if (to->sa_family != AF_INET || tolen < sizeof(struct sockaddr_in)) #endif { ndbg("ERROR: Invalid address\n"); err = EBADF; goto errout; } /* Verify that the psock corresponds to valid, allocated socket */ if (!psock || psock->s_crefs <= 0) { ndbg("ERROR: Invalid socket\n"); err = EBADF; goto errout; } /* If this is a connected socket, then return EISCONN */ if (psock->s_type != SOCK_DGRAM) { ndbg("ERROR: Connected socket\n"); err = EISCONN; goto errout; } /* Make sure that the IP address mapping is in the ARP table */ #ifdef CONFIG_NET_ARP_SEND ret = arp_send(into->sin_addr.s_addr); if (ret < 0) { ndbg("ERROR: Not reachable\n"); err = ENETUNREACH; goto errout; } #endif /* Perform the UDP sendto operation */ #ifdef CONFIG_NET_UDP /* Set the socket state to sending */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND); /* Initialize the state structure. This is done with interrupts * disabled because we don't want anything to happen until we * are ready. */ save = net_lock(); memset(&state, 0, sizeof(struct sendto_s)); sem_init(&state.st_sem, 0, 0); state.st_buflen = len; state.st_buffer = buf; /* Set the initial time for calculating timeouts */ #ifdef CONFIG_NET_SENDTO_TIMEOUT state.st_sock = psock; state.st_time = clock_systimer(); #endif /* Setup the UDP socket */ conn = (FAR struct udp_conn_s *)psock->s_conn; ret = udp_connect(conn, into); if (ret < 0) { net_unlock(save); err = -ret; goto errout; } /* Set up the callback in the connection */ state.st_cb = udp_callback_alloc(conn); if (state.st_cb) { state.st_cb->flags = UDP_POLL; state.st_cb->priv = (void*)&state; state.st_cb->event = sendto_interrupt; /* Notify the device driver of the availabilty of TX data */ netdev_txnotify(conn->ripaddr); /* Wait for either the receive to complete or for an error/timeout to occur. * NOTES: (1) net_lockedwait will also terminate if a signal is received, (2) * interrupts may be disabled! They will be re-enabled while the task sleeps * and automatically re-enabled when the task restarts. */ net_lockedwait(&state.st_sem); /* Make sure that no further interrupts are processed */ udp_callback_free(conn, state.st_cb); } net_unlock(save); sem_destroy(&state.st_sem); /* Set the socket state to idle */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE); /* Check for errors */ if (state.st_sndlen < 0) { err = -state.st_sndlen; goto errout; } /* Success */ return state.st_sndlen; #else err = ENOSYS; #endif errout: set_errno(err); return ERROR; }
int tcp_pollsetup(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct tcp_conn_s *conn = psock->s_conn; FAR struct tcp_poll_s *info; FAR struct devif_callback_s *cb; net_lock_t flags; int ret; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds) { return -EINVAL; } #endif /* Allocate a container to hold the poll information */ info = (FAR struct tcp_poll_s *)kmm_malloc(sizeof(struct tcp_poll_s)); if (!info) { return -ENOMEM; } /* Some of the following must be atomic */ flags = net_lock(); /* Allocate a TCP/IP callback structure */ cb = tcp_callback_alloc(conn); if (!cb) { ret = -EBUSY; goto errout_with_lock; } /* Initialize the poll info container */ info->psock = psock; info->fds = fds; info->cb = cb; /* Initialize the callback structure. Save the reference to the info * structure as callback private data so that it will be available during * callback processing. */ cb->flags = (TCP_NEWDATA | TCP_BACKLOG | TCP_POLL | TCP_CLOSE | TCP_ABORT | TCP_TIMEDOUT); cb->priv = (FAR void *)info; cb->event = tcp_poll_interrupt; /* Save the reference in the poll info structure as fds private as well * for use durring poll teardown as well. */ fds->priv = (FAR void *)info; #ifdef CONFIG_NET_TCPBACKLOG /* Check for read data or backlogged connection availability now */ if (!IOB_QEMPTY(&conn->readahead) || tcp_backlogavailable(conn)) #else /* Check for read data availability now */ if (!IOB_QEMPTY(&conn->readahead)) #endif { /* Normal data may be read without blocking. */ fds->revents |= (POLLRDNORM & fds->events); } /* Check for a loss of connection events. We need to be careful here. * There are four possibilities: * * 1) The socket is connected and we are waiting for data availability * events. * * __SS_ISCONNECTED(f) == true * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == false * * Action: Wait for data availability events * * 2) This is a listener socket that was never connected and we are * waiting for connection events. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == true * __SS_ISCLOSED(f) == false * * Action: Wait for connection events * * 3) This socket was previously connected, but the peer has gracefully * closed the connection. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == true * * Action: Return with POLLHUP|POLLERR events * * 4) This socket was previously connected, but we lost the connection * due to some exceptional event. * * __SS_ISCONNECTED(f) == false * __SS_ISLISTENING(f) == false * __SS_ISCLOSED(f) == false * * Action: Return with POLLHUP|POLLERR events */ if (!_SS_ISCONNECTED(psock->s_flags) && !_SS_ISLISTENING(psock->s_flags)) { /* We were previously connected but lost the connection either due * to a graceful shutdown by the remote peer or because of some * exceptional event. */ fds->revents |= (POLLERR | POLLHUP); } /* Check if any requested events are already in effect */ if (fds->revents != 0) { /* Yes.. then signal the poll logic */ sem_post(fds->sem); } net_unlock(flags); return OK; errout_with_lock: kmm_free(info); net_unlock(flags); return ret; }
ssize_t psock_tcp_send(FAR struct socket *psock, FAR const void *buf, size_t len) { FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)psock->s_conn; struct send_s state; net_lock_t save; int err; int ret = OK; /* Verify that the sockfd corresponds to valid, allocated socket */ if (!psock || psock->s_crefs <= 0) { ndbg("ERROR: Invalid socket\n"); err = EBADF; goto errout; } /* If this is an un-connected socket, then return ENOTCONN */ if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags)) { ndbg("ERROR: Not connected\n"); err = ENOTCONN; goto errout; } /* Make sure that we have the IP address mapping */ conn = (FAR struct tcp_conn_s *)psock->s_conn; DEBUGASSERT(conn); #if defined(CONFIG_NET_ARP_SEND) || defined(CONFIG_NET_ICMPv6_NEIGHBOR) #ifdef CONFIG_NET_ARP_SEND #ifdef CONFIG_NET_ICMPv6_NEIGHBOR if (psock->s_domain == PF_INET) #endif { /* Make sure that the IP address mapping is in the ARP table */ ret = arp_send(conn->u.ipv4.raddr); } #endif /* CONFIG_NET_ARP_SEND */ #ifdef CONFIG_NET_ICMPv6_NEIGHBOR #ifdef CONFIG_NET_ARP_SEND else #endif { /* Make sure that the IP address mapping is in the Neighbor Table */ ret = icmpv6_neighbor(conn->u.ipv6.raddr); } #endif /* CONFIG_NET_ICMPv6_NEIGHBOR */ /* Did we successfully get the address mapping? */ if (ret < 0) { ndbg("ERROR: Not reachable\n"); err = ENETUNREACH; goto errout; } #endif /* CONFIG_NET_ARP_SEND || CONFIG_NET_ICMPv6_NEIGHBOR */ /* Set the socket state to sending */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND); /* Perform the TCP send operation */ /* Initialize the state structure. This is done with interrupts * disabled because we don't want anything to happen until we * are ready. */ save = net_lock(); memset(&state, 0, sizeof(struct send_s)); (void)sem_init(&state.snd_sem, 0, 0); /* Doesn't really fail */ state.snd_sock = psock; /* Socket descriptor to use */ state.snd_buflen = len; /* Number of bytes to send */ state.snd_buffer = buf; /* Buffer to send from */ if (len > 0) { /* Allocate resources to receive a callback */ state.snd_cb = tcp_callback_alloc(conn); if (state.snd_cb) { /* Get the initial sequence number that will be used */ state.snd_isn = tcp_getsequence(conn->sndseq); /* There is no outstanding, unacknowledged data after this * initial sequence number. */ conn->unacked = 0; /* Set the initial time for calculating timeouts */ #ifdef CONFIG_NET_SOCKOPTS state.snd_time = clock_systimer(); #endif /* Set up the callback in the connection */ state.snd_cb->flags = (TCP_ACKDATA | TCP_REXMIT | TCP_POLL | TCP_DISCONN_EVENTS); state.snd_cb->priv = (FAR void *)&state; state.snd_cb->event = tcpsend_interrupt; /* Notify the device driver of the availability of TX data */ send_txnotify(psock, conn); /* Wait for the send to complete or an error to occur: NOTES: (1) * net_lockedwait will also terminate if a signal is received, (2) interrupts * may be disabled! They will be re-enabled while the task sleeps and * automatically re-enabled when the task restarts. */ ret = net_lockedwait(&state.snd_sem); /* Make sure that no further interrupts are processed */ tcp_callback_free(conn, state.snd_cb); } } sem_destroy(&state.snd_sem); net_unlock(save); /* Set the socket state to idle */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE); /* Check for a errors. Errors are signalled by negative errno values * for the send length */ if (state.snd_sent < 0) { err = state.snd_sent; goto errout; } /* If net_lockedwait failed, then we were probably reawakened by a signal. In * this case, net_lockedwait will have set errno appropriately. */ if (ret < 0) { err = -ret; goto errout; } /* Return the number of bytes actually sent */ return state.snd_sent; errout: set_errno(err); return ERROR; }
off_t dcload_seek(uint32 hnd, off_t offset, int whence) { int oldirq = 0; off_t ret = -1; net_lock(); #ifndef BENPATCH if (hnd) ret = sc_lseek(hnd-1, offset, whence); #else if (hnd) { dcload_handler_t * dh = dcload_get_buffer_handler(hnd); if (!dh) { hnd--; /* KOS uses 0 for error, not -1 */ STOPIRQ; ret = sc_lseek(hnd, offset, whence); STARTIRQ; } else { const int skip = 0x666; off_t cur, start, end; hnd = dh->hdl-1; switch (whence) { case SEEK_END: break; case SEEK_SET: case SEEK_CUR: cur = retell(dh,0); if (cur == -1) { whence = skip; break; } /* end is the buffer end file position */ end = cur; /* start is the buffer start file position */ start = end - dh->cnt; /* cur is the buffer current file position */ cur = start + dh->cur; /* offset is the ABSOLUTE seeking position */ if (whence == SEEK_CUR) { offset += cur; whence = SEEK_SET; } if (offset >= start && offset <= end) { /* Seeking in the buffer :) */ dh->cur = offset - start; whence = skip; ret = offset; } break; default: whence = skip; break; } if (whence != skip) { STOPIRQ; ret = sc_lseek(hnd, offset, whence); STARTIRQ; dh->cur = dh->cnt = 0; dh->tell = ret; } } } #endif net_unlock(); return ret; }