/*** * rt_socket_cleanup - releases resources allocated for the socket */ int rt_socket_cleanup(struct rtdm_dev_context *context) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; unsigned int rtskbs; unsigned long flags; rtos_event_sem_delete(&sock->wakeup_event); rtos_spin_lock_irqsave(&sock->param_lock, flags); set_bit(SKB_POOL_CLOSED, &context->context_flags); rtskbs = atomic_read(&sock->pool_size); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (rtskbs > 0) { if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) { rtskbs = rtskb_pool_shrink(&sock->skb_pool, rtskbs); atomic_sub(rtskbs, &sock->pool_size); if (atomic_read(&sock->pool_size) > 0) return -EAGAIN; rtskb_pool_release(&sock->skb_pool); } else { rtskbs = rtskb_pool_shrink_rt(&sock->skb_pool, rtskbs); atomic_sub(rtskbs, &sock->pool_size); if (atomic_read(&sock->pool_size) > 0) return -EAGAIN; rtskb_pool_release_rt(&sock->skb_pool); } } return 0; }
/*** * rt_socket_cleanup - releases resources allocated for the socket */ int rt_socket_cleanup(struct rtdm_dev_context *sockctx) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; int ret = 0; rtdm_sem_destroy(&sock->pending_sem); mutex_lock(&sock->pool_nrt_lock); set_bit(SKB_POOL_CLOSED, &sockctx->context_flags); if (sock->pool_size > 0) { sock->pool_size -= rtskb_pool_shrink(&sock->skb_pool, sock->pool_size); if (sock->pool_size > 0) ret = -EAGAIN; else rtskb_pool_release(&sock->skb_pool); } mutex_unlock(&sock->pool_nrt_lock); return ret; }
/*** * rtdev_free */ void rtdev_free (struct rtnet_device *rtdev) { if (rtdev != NULL) { rtskb_pool_shrink(&global_pool, rtdev->add_rtskbs); rtdev->stack_event = NULL; /* rtdev->rtdev_mbx = NULL;*/ rtos_res_lock_delete(&rtdev->xmit_lock); kfree(rtdev); } }
/*** * rt_socket_common_ioctl */ int rt_socket_common_ioctl(struct rtdm_dev_context *context, int call_flags, int request, void *arg) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; int ret = 0; struct rtnet_callback *callback = arg; unsigned int rtskbs; unsigned long flags; switch (request) { case RTNET_RTIOC_PRIORITY: sock->priority = *(unsigned int *)arg; break; case RTNET_RTIOC_TIMEOUT: rtos_spin_lock_irqsave(&sock->param_lock, flags); rtos_nanosecs_to_time(*(nanosecs_t *)arg, &sock->timeout); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); break; case RTNET_RTIOC_CALLBACK: if (test_bit(RTDM_USER_MODE_CALL, &context->context_flags)) return -EACCES; rtos_spin_lock_irqsave(&sock->param_lock, flags); sock->callback_func = callback->func; sock->callback_arg = callback->arg; rtos_spin_unlock_irqrestore(&sock->param_lock, flags); break; case RTNET_RTIOC_NONBLOCK: if (*(unsigned int *)arg != 0) set_bit(RT_SOCK_NONBLOCK, &context->context_flags); else clear_bit(RT_SOCK_NONBLOCK, &context->context_flags); break; case RTNET_RTIOC_EXTPOOL: rtskbs = *(unsigned int *)arg; rtos_spin_lock_irqsave(&sock->param_lock, flags); if (test_bit(SKB_POOL_CLOSED, &context->context_flags)) { rtos_spin_unlock_irqrestore(&sock->param_lock, flags); return -EBADF; } atomic_add(rtskbs, &sock->pool_size); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) { if (!(call_flags & RTDM_NRT_CALL)) return -EACCES; ret = rtskb_pool_extend(&sock->skb_pool, rtskbs); } else ret = rtskb_pool_extend_rt(&sock->skb_pool, rtskbs); atomic_sub(rtskbs-ret, &sock->pool_size); break; case RTNET_RTIOC_SHRPOOL: rtskbs = *(unsigned int *)arg; rtos_spin_lock_irqsave(&sock->param_lock, flags); if (test_bit(SKB_POOL_CLOSED, &context->context_flags)) { rtos_spin_unlock_irqrestore(&sock->param_lock, flags); return -EBADF; } atomic_sub(rtskbs, &sock->pool_size); rtos_spin_unlock_irqrestore(&sock->param_lock, flags); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) { if (!(call_flags & RTDM_NRT_CALL)) return -EACCES; ret = rtskb_pool_shrink(&sock->skb_pool, *(unsigned int *)arg); } else ret = rtskb_pool_shrink_rt(&sock->skb_pool, *(unsigned int *)arg); atomic_add(rtskbs-ret, &sock->pool_size); break; default: ret = -EOPNOTSUPP; break; } return ret; }
/*** * rt_socket_common_ioctl */ int rt_socket_common_ioctl(struct rtdm_dev_context *sockctx, rtdm_user_info_t *user_info, int request, void *arg) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; int ret = 0; struct rtnet_callback *callback = arg; unsigned int rtskbs; rtdm_lockctx_t context; switch (request) { case RTNET_RTIOC_XMITPARAMS: sock->priority = *(unsigned int *)arg; break; case RTNET_RTIOC_TIMEOUT: sock->timeout = *(nanosecs_rel_t *)arg; break; case RTNET_RTIOC_CALLBACK: if (user_info) return -EACCES; rtdm_lock_get_irqsave(&sock->param_lock, context); sock->callback_func = callback->func; sock->callback_arg = callback->arg; rtdm_lock_put_irqrestore(&sock->param_lock, context); break; case RTNET_RTIOC_EXTPOOL: rtskbs = *(unsigned int *)arg; if (rtdm_in_rt_context()) return -ENOSYS; mutex_lock(&sock->pool_nrt_lock); if (test_bit(SKB_POOL_CLOSED, &sockctx->context_flags)) { mutex_unlock(&sock->pool_nrt_lock); return -EBADF; } ret = rtskb_pool_extend(&sock->skb_pool, rtskbs); sock->pool_size += ret; mutex_unlock(&sock->pool_nrt_lock); if (ret == 0 && rtskbs > 0) ret = -ENOMEM; break; case RTNET_RTIOC_SHRPOOL: rtskbs = *(unsigned int *)arg; if (rtdm_in_rt_context()) return -ENOSYS; mutex_lock(&sock->pool_nrt_lock); ret = rtskb_pool_shrink(&sock->skb_pool, rtskbs); sock->pool_size -= ret; mutex_unlock(&sock->pool_nrt_lock); if (ret == 0 && rtskbs > 0) ret = -EBUSY; break; default: ret = -EOPNOTSUPP; break; } return ret; }