void rtcan_socket_init(struct rtdm_fd *fd) { struct rtcan_socket *sock = rtdm_fd_to_private(fd); rtdm_lockctx_t lock_ctx; rtdm_sem_init(&sock->recv_sem, 0); sock->recv_head = 0; sock->recv_tail = 0; atomic_set(&sock->ifindex, 0); sock->flistlen = RTCAN_SOCK_UNBOUND; sock->flist = NULL; sock->err_mask = 0; sock->rx_buf_full = 0; #ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK sock->loopback = 1; #endif sock->tx_timeout = RTDM_TIMEOUT_INFINITE; sock->rx_timeout = RTDM_TIMEOUT_INFINITE; INIT_LIST_HEAD(&sock->tx_wait_head); rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); list_add(&sock->socket_list, &rtcan_socket_list); rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); }
/*** * rt_packet_socket - initialize a packet socket */ static int rt_packet_socket(struct rtdm_fd *fd, int protocol) { struct rtsocket *sock = rtdm_fd_to_private(fd); int ret; if ((ret = rt_socket_init(fd, protocol)) != 0) return ret; sock->prot.packet.packet_type.type = protocol; sock->prot.packet.ifindex = 0; sock->prot.packet.packet_type.trylock = rt_packet_trylock; sock->prot.packet.packet_type.unlock = rt_packet_unlock; /* if protocol is non-zero, register the packet type */ if (protocol != 0) { sock->prot.packet.packet_type.handler = rt_packet_rcv; sock->prot.packet.packet_type.err_handler = NULL; if ((ret = rtdev_add_pack(&sock->prot.packet.packet_type)) < 0) { rt_socket_cleanup(fd); return ret; } } return 0; }
/*** * rt_packet_recvmsg */ static ssize_t rt_packet_recvmsg(struct rtdm_fd *fd, struct msghdr *msg, int msg_flags) { struct rtsocket *sock = rtdm_fd_to_private(fd); size_t len = rt_iovec_len(msg->msg_iov, msg->msg_iovlen); size_t copy_len; size_t real_len; struct rtskb *rtskb; struct sockaddr_ll *sll; int ret; nanosecs_rel_t timeout = sock->timeout; /* non-blocking receive? */ if (msg_flags & MSG_DONTWAIT) timeout = -1; ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL); if (unlikely(ret < 0)) switch (ret) { case -EWOULDBLOCK: case -ETIMEDOUT: case -EINTR: return ret; default: return -EBADF; /* socket has been closed */ } rtskb = rtskb_dequeue_chain(&sock->incoming); RTNET_ASSERT(rtskb != NULL, return -EFAULT;);
/*** * rt_socket_init - initialises a new socket structure */ int __rt_socket_init(struct rtdm_fd *fd, unsigned short protocol, struct module *module) { struct rtsocket *sock = rtdm_fd_to_private(fd); unsigned int pool_size; sock->flags = 0; sock->callback_func = NULL; rtskb_queue_init(&sock->incoming); sock->timeout = 0; rtdm_lock_init(&sock->param_lock); rtdm_sem_init(&sock->pending_sem, 0); pool_size = __rt_bare_socket_init(fd, protocol, RTSKB_PRIO_VALUE(SOCK_DEF_PRIO, RTSKB_DEF_RT_CHANNEL), socket_rtskbs, module); sock->pool_size = pool_size; mutex_init(&sock->pool_nrt_lock); if (pool_size < socket_rtskbs) { /* fix statistics */ if (pool_size == 0) rtskb_pools--; rt_socket_cleanup(fd); return -ENOMEM; } return 0; }
static int tdma_dev_open(struct rtdm_fd *fd, int oflags) { struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd); ctx->cycle_waiter = NULL; return 0; }
static void tdma_dev_close(struct rtdm_fd *fd) { struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd); RTDM_EXECUTE_ATOMICALLY( if (ctx->cycle_waiter) rtdm_task_unblock(ctx->cycle_waiter); );
int rt_socket_select_bind(struct rtdm_fd *fd, rtdm_selector_t *selector, enum rtdm_selecttype type, unsigned fd_index) { struct rtsocket *sock = rtdm_fd_to_private(fd); switch (type) { case XNSELECT_READ: return rtdm_sem_select(&sock->pending_sem, selector, XNSELECT_READ, fd_index); default: return -EBADF; } return -EINVAL; }
/*** * rt_socket_cleanup - releases resources allocated for the socket */ void rt_socket_cleanup(struct rtdm_fd *fd) { struct rtsocket *sock = rtdm_fd_to_private(fd); rtdm_sem_destroy(&sock->pending_sem); mutex_lock(&sock->pool_nrt_lock); set_bit(SKB_POOL_CLOSED, &sock->flags); if (sock->pool_size > 0) rtskb_pool_release(&sock->skb_pool); mutex_unlock(&sock->pool_nrt_lock); module_put(sock->owner); }
void rtcan_socket_cleanup(struct rtdm_fd *fd) { struct rtcan_socket *sock = rtdm_fd_to_private(fd); struct tx_wait_queue *tx_waiting; rtdm_lockctx_t lock_ctx; int tx_list_empty; /* Wake up sleeping senders. This is re-entrant-safe. */ do { cobalt_atomic_enter(lock_ctx); /* Is someone there? */ if (list_empty(&sock->tx_wait_head)) tx_list_empty = 1; else { tx_list_empty = 0; /* Get next entry pointing to a waiting task */ tx_waiting = list_entry(sock->tx_wait_head.next, struct tx_wait_queue, tx_wait_list); /* Remove it from list */ list_del(&tx_waiting->tx_wait_list); /* Wake task up (atomic section is left implicitly) */ rtdm_task_unblock(tx_waiting->rt_task); } cobalt_atomic_leave(lock_ctx); } while (!tx_list_empty); rtdm_sem_destroy(&sock->recv_sem); rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); if (sock->socket_list.next) { list_del(&sock->socket_list); sock->socket_list.next = NULL; } rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); }
/*** * rt_packet_ioctl */ static int rt_packet_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg) { struct rtsocket *sock = rtdm_fd_to_private(fd); struct _rtdm_setsockaddr_args *setaddr = arg; struct _rtdm_getsockaddr_args *getaddr = arg; /* fast path for common socket IOCTLs */ if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK) return rt_socket_common_ioctl(fd, request, arg); switch (request) { case _RTIOC_BIND: return rt_packet_bind(sock, setaddr->addr, setaddr->addrlen); case _RTIOC_GETSOCKNAME: return rt_packet_getsockname(sock, getaddr->addr, getaddr->addrlen); default: return rt_socket_if_ioctl(fd, request, arg); } }
int __rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol, unsigned int priority, unsigned int pool_size, struct module *module) { struct rtsocket *sock = rtdm_fd_to_private(fd); int err; err = try_module_get(module); if (!err) return -EAFNOSUPPORT; err = rtskb_pool_init(&sock->skb_pool, pool_size, NULL, fd); if (err < 0) { module_put(module); return err; } sock->protocol = protocol; sock->priority = priority; sock->owner = module; return err; }
/*** * rt_packet_close */ static void rt_packet_close(struct rtdm_fd *fd) { struct rtsocket *sock = rtdm_fd_to_private(fd); struct rtpacket_type *pt = &sock->prot.packet.packet_type; struct rtskb *del; rtdm_lockctx_t context; rtdm_lock_get_irqsave(&sock->param_lock, context); if (pt->type != 0) { rtdev_remove_pack(pt); pt->type = 0; } rtdm_lock_put_irqrestore(&sock->param_lock, context); /* free packets in incoming queue */ while ((del = rtskb_dequeue(&sock->incoming)) != NULL) { kfree_rtskb(del); } rt_socket_cleanup(fd); }
/*** * rt_socket_common_ioctl */ int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void *arg) { struct rtsocket *sock = rtdm_fd_to_private(fd); int ret = 0; struct rtnet_callback *callback = arg; unsigned int rtskbs; rtdm_lockctx_t context; switch (request) { case RTNET_RTIOC_XMITPARAMS: sock->priority = *(unsigned int *)arg; break; case RTNET_RTIOC_TIMEOUT: sock->timeout = *(nanosecs_rel_t *)arg; break; case RTNET_RTIOC_CALLBACK: if (rtdm_fd_is_user(fd)) return -EACCES; rtdm_lock_get_irqsave(&sock->param_lock, context); sock->callback_func = callback->func; sock->callback_arg = callback->arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); break; case RTNET_RTIOC_EXTPOOL: rtskbs = *(unsigned int *)arg; if (rtdm_in_rt_context()) return -ENOSYS; mutex_lock(&sock->pool_nrt_lock); if (test_bit(SKB_POOL_CLOSED, &sock->flags)) { mutex_unlock(&sock->pool_nrt_lock); return -EBADF; } ret = rtskb_pool_extend(&sock->skb_pool, rtskbs); sock->pool_size += ret; mutex_unlock(&sock->pool_nrt_lock); if (ret == 0 && rtskbs > 0) ret = -ENOMEM; break; case RTNET_RTIOC_SHRPOOL: rtskbs = *(unsigned int *)arg; if (rtdm_in_rt_context()) return -ENOSYS; mutex_lock(&sock->pool_nrt_lock); ret = rtskb_pool_shrink(&sock->skb_pool, rtskbs); sock->pool_size -= ret; mutex_unlock(&sock->pool_nrt_lock); if (ret == 0 && rtskbs > 0) ret = -EBUSY; break; default: ret = -EOPNOTSUPP; break; } return ret; }