/*** * rt_packet_recvmsg */ ssize_t rt_packet_recvmsg(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); size_t copy_len; size_t real_len; struct rtskb *skb; struct ethhdr *eth; struct sockaddr_ll *sll; int ret; nanosecs_t timeout = sock->timeout; /* non-blocking receive? */ if (testbits(msg_flags, MSG_DONTWAIT)) timeout = -1; ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL); if (unlikely(ret < 0)) { if ((ret != -EWOULDBLOCK) && (ret != -ETIMEDOUT)) ret = -EBADF; /* socket has been closed */ return ret; } skb = rtskb_dequeue_chain(&sock->incoming); RTNET_ASSERT(skb != NULL, return -EFAULT;);
/*** * rt_packet_recvmsg */ static ssize_t rt_packet_recvmsg(struct rtdm_fd *fd, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = rtdm_fd_to_private(fd); size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); size_t copy_len; size_t real_len; struct rtskb *rtskb; struct sockaddr_ll *sll; int ret; nanosecs_rel_t timeout = sock->timeout; /* non-blocking receive? */ if (msg_flags & MSG_DONTWAIT) timeout = -1; ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL); if (unlikely(ret < 0)) switch (ret) { case -EWOULDBLOCK: case -ETIMEDOUT: case -EINTR: return ret; default: return -EBADF; /* socket has been closed */ } rtskb = rtskb_dequeue_chain(&sock->incoming); RTNET_ASSERT(rtskb != NULL, return -EFAULT;);
/*** * rt_udp_recvmsg */ ssize_t rt_udp_recvmsg(struct rtdm_dev_context *context, rtdm_user_info_t *user_info, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); struct rtskb *skb; struct rtskb *first_skb; size_t copied = 0; size_t block_size; size_t data_len; struct udphdr *uh; struct sockaddr_in *sin; nanosecs_t timeout = sock->timeout; int ret; /* non-blocking receive? */ if (test_bit(RT_SOCK_NONBLOCK, &context->context_flags) || (msg_flags & MSG_DONTWAIT)) timeout = -1; ret = rtos_sem_down(&sock->pending_sem, timeout); if (unlikely(ret < 0)) { if (ret == -EWOULDBLOCK) ret = -EAGAIN; else if (ret != -ETIMEDOUT) ret = -ENOTSOCK; return ret; } skb = rtskb_dequeue_chain(&sock->incoming); RTNET_ASSERT(skb != NULL, return -EFAULT;);
/*** * rt_packet_recvmsg */ int rt_packet_recvmsg(struct rtsocket *sock, struct msghdr *msg, size_t len, int flags) { size_t copy_len, real_len; struct rtskb *skb; struct ethhdr *eth; struct sockaddr_ll *sll; int ret; /* block on receive event */ if (((sock->flags & RT_SOCK_NONBLOCK) == 0) && ((flags & MSG_DONTWAIT) == 0)) while ((skb = rtskb_dequeue_chain(&sock->incoming)) == NULL) { if (RTOS_TIME_IS_ZERO(&sock->timeout)) { ret = rtos_event_wait_timeout(&sock->wakeup_event, &sock->timeout); if (ret == RTOS_EVENT_TIMEOUT) return -ETIMEDOUT; } else ret = rtos_event_wait(&sock->wakeup_event); if (RTOS_EVENT_ERROR(ret)) return -ENOTSOCK; } else { skb = rtskb_dequeue_chain(&sock->incoming); if (skb == NULL) return 0; } eth = skb->mac.ethernet; sll = msg->msg_name; /* copy the address */ msg->msg_namelen = sizeof(*sll); if (sll != NULL) { sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; sll->sll_ifindex = skb->rtdev->ifindex; sll->sll_pkttype = skb->pkt_type; /* Ethernet specific */ sll->sll_hatype = ARPHRD_ETHER; sll->sll_halen = ETH_ALEN; memcpy(sll->sll_addr, eth->h_source, ETH_ALEN); } copy_len = real_len = skb->len; /* The data must not be longer than the available buffer size */ if (copy_len > len) { copy_len = len; msg->msg_flags |= MSG_TRUNC; } /* copy the data */ rt_memcpy_tokerneliovec(msg->msg_iov, skb->data, copy_len); if ((flags & MSG_PEEK) == 0) { rtdev_dereference(skb->rtdev); kfree_rtskb(skb); } else rtskb_queue_head(&sock->incoming, skb); return real_len; }
/*** * rt_packet_recvmsg */ ssize_t rt_packet_recvmsg(struct rtdm_dev_context *context, int call_flags, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); size_t copy_len; size_t real_len; struct rtskb *skb; struct ethhdr *eth; struct sockaddr_ll *sll; int ret; unsigned long flags; rtos_time_t timeout; /* block on receive event */ if (!test_bit(RT_SOCK_NONBLOCK, &context->context_flags) && ((msg_flags & MSG_DONTWAIT) == 0)) while ((skb = rtskb_dequeue_chain(&sock->incoming)) == NULL) { rtos_spin_lock_irqsave(&sock->param_lock, flags); memcpy(&timeout, &sock->timeout, sizeof(timeout)); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (!RTOS_TIME_IS_ZERO(&timeout)) { ret = rtos_event_sem_wait_timed(&sock->wakeup_event, &timeout); if (ret == RTOS_EVENT_TIMEOUT) return -ETIMEDOUT; } else ret = rtos_event_sem_wait(&sock->wakeup_event); if (RTOS_EVENT_ERROR(ret)) return -ENOTSOCK; } else { skb = rtskb_dequeue_chain(&sock->incoming); if (skb == NULL) return -EAGAIN; } eth = skb->mac.ethernet; sll = msg->msg_name; /* copy the address */ msg->msg_namelen = sizeof(*sll); if (sll != NULL) { sll->sll_family = AF_PACKET; sll->sll_protocol = skb->protocol; sll->sll_ifindex = skb->rtdev->ifindex; sll->sll_pkttype = skb->pkt_type; /* Ethernet specific */ sll->sll_hatype = ARPHRD_ETHER; sll->sll_halen = ETH_ALEN; memcpy(sll->sll_addr, eth->h_source, ETH_ALEN); } copy_len = real_len = skb->len; /* The data must not be longer than the available buffer size */ if (copy_len > len) { copy_len = len; msg->msg_flags |= MSG_TRUNC; } /* copy the data */ rt_memcpy_tokerneliovec(msg->msg_iov, skb->data, copy_len); if ((msg_flags & MSG_PEEK) == 0) { rtdev_dereference(skb->rtdev); kfree_rtskb(skb); } else rtskb_queue_head(&sock->incoming, skb); return real_len; }
/*** * rt_udp_recvmsg */ ssize_t rt_udp_recvmsg(struct rtdm_dev_context *context, int call_flags, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); struct rtskb *skb; struct rtskb *first_skb; size_t copied = 0; size_t block_size; size_t data_len; struct udphdr *uh; struct sockaddr_in *sin; int ret; unsigned long flags; rtos_time_t timeout; /* block on receive event */ if (!test_bit(RT_SOCK_NONBLOCK, &context->context_flags) && ((msg_flags & MSG_DONTWAIT) == 0)) while ((skb = rtskb_dequeue_chain(&sock->incoming)) == NULL) { rtos_spin_lock_irqsave(&sock->param_lock, flags); memcpy(&timeout, &sock->timeout, sizeof(timeout)); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (!RTOS_TIME_IS_ZERO(&timeout)) { ret = rtos_event_sem_wait_timed(&sock->wakeup_event, &timeout); if (ret == RTOS_EVENT_TIMEOUT) return -ETIMEDOUT; } else ret = rtos_event_sem_wait(&sock->wakeup_event); if (RTOS_EVENT_ERROR(ret)) return -ENOTSOCK; } else { skb = rtskb_dequeue_chain(&sock->incoming); if (skb == NULL) return -EAGAIN; } uh = skb->h.uh; data_len = ntohs(uh->len) - sizeof(struct udphdr); sin = msg->msg_name; /* copy the address */ msg->msg_namelen = sizeof(*sin); if (sin) { sin->sin_family = AF_INET; sin->sin_port = uh->source; sin->sin_addr.s_addr = skb->nh.iph->saddr; } /* remove the UDP header */ __rtskb_pull(skb, sizeof(struct udphdr)); first_skb = skb; /* iterate over all IP fragments */ do { rtskb_trim(skb, data_len); block_size = skb->len; copied += block_size; data_len -= block_size; /* The data must not be longer than the available buffer size */ if (copied > len) { block_size -= copied - len; copied = len; msg->msg_flags |= MSG_TRUNC; /* copy the data */ rt_memcpy_tokerneliovec(msg->msg_iov, skb->data, block_size); break; } /* copy the data */ rt_memcpy_tokerneliovec(msg->msg_iov, skb->data, block_size); /* next fragment */ skb = skb->next; } while (skb != NULL); /* did we copied all bytes? */ if (data_len > 0) msg->msg_flags |= MSG_TRUNC; if ((msg_flags & MSG_PEEK) == 0) kfree_rtskb(first_skb); else { __rtskb_push(first_skb, sizeof(struct udphdr)); rtskb_queue_head(&sock->incoming, first_skb); } return copied; }