/*** * rt_socket_init - initialises a new socket structure */ int rt_socket_init(struct rtdm_dev_context *context) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; unsigned int pool_size; sock->priority = SOCK_DEF_PRIO; sock->callback_func = NULL; rtskb_queue_init(&sock->incoming); rtos_nanosecs_to_time(0, &sock->timeout); rtos_spin_lock_init(&sock->param_lock); rtos_event_sem_init(&sock->wakeup_event); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) pool_size = rtskb_pool_init(&sock->skb_pool, socket_rtskbs); else pool_size = rtskb_pool_init_rt(&sock->skb_pool, socket_rtskbs); atomic_set(&sock->pool_size, pool_size); if (pool_size < socket_rtskbs) { /* fix statistics */ if (pool_size == 0) rtskb_pools--; rt_socket_cleanup(context); return -ENOMEM; } return 0; }
/*** * rt_socket_init - initialises a new socket structure */ int rt_socket_init(struct rtdm_dev_context *sockctx, unsigned short protocol) { struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private; unsigned int pool_size; sock->callback_func = NULL; rtskb_queue_init(&sock->incoming); sock->timeout = 0; rtdm_lock_init(&sock->param_lock); rtdm_sem_init(&sock->pending_sem, 0); pool_size = rt_bare_socket_init(sock, protocol, RTSKB_PRIO_VALUE(SOCK_DEF_PRIO, RTSKB_DEF_RT_CHANNEL), socket_rtskbs); sock->pool_size = pool_size; mutex_init(&sock->pool_nrt_lock); if (pool_size < socket_rtskbs) { /* fix statistics */ if (pool_size == 0) rtskb_pools--; rt_socket_cleanup(sockctx); return -ENOMEM; } return 0; }
int __init rtcfg_init_frames(void) { int ret; rtskb_queue_init(&rx_queue); rtos_event_sem_init(&rx_event); if (rtskb_pool_init(&rtcfg_pool, num_rtskbs) < num_rtskbs) { ret = -ENOMEM; goto error1; } ret = rtos_task_init(&rx_task, rtcfg_rx_task, 0, RTOS_LOWEST_RT_PRIORITY); if (ret < 0) goto error1; ret = rtdev_add_pack(&rtcfg_packet_type); if (ret < 0) goto error2; return 0; error2: rtos_task_delete(&rx_task); error1: rtos_event_sem_delete(&rx_event); rtskb_pool_release(&rtcfg_pool); return ret; }
/*** * rt_socket_init - initialises a new socket structure */ int __rt_socket_init(struct rtdm_fd *fd, unsigned short protocol, struct module *module) { struct rtsocket *sock = rtdm_fd_to_private(fd); unsigned int pool_size; sock->flags = 0; sock->callback_func = NULL; rtskb_queue_init(&sock->incoming); sock->timeout = 0; rtdm_lock_init(&sock->param_lock); rtdm_sem_init(&sock->pending_sem, 0); pool_size = __rt_bare_socket_init(fd, protocol, RTSKB_PRIO_VALUE(SOCK_DEF_PRIO, RTSKB_DEF_RT_CHANNEL), socket_rtskbs, module); sock->pool_size = pool_size; mutex_init(&sock->pool_nrt_lock); if (pool_size < socket_rtskbs) { /* fix statistics */ if (pool_size == 0) rtskb_pools--; rt_socket_cleanup(fd); return -ENOMEM; } return 0; }
int __init rtcfg_init_frames(void) { int ret; if (rtskb_pool_init(&rtcfg_pool, num_rtskbs) < num_rtskbs) return -ENOMEM; rtskb_queue_init(&rx_queue); rtdm_event_init(&rx_event, 0); ret = rtdm_task_init(&rx_task, "rtcfg-rx", rtcfg_rx_task, 0, RTDM_TASK_LOWEST_PRIORITY, 0); if (ret < 0) { rtdm_event_destroy(&rx_event); goto error1; } ret = rtdev_add_pack(&rtcfg_packet_type); if (ret < 0) goto error2; return 0; error2: rtdm_event_destroy(&rx_event); rtdm_task_join_nrt(&rx_task, 100); error1: rtskb_pool_release(&rtcfg_pool); return ret; }
/*** * rt_socket_init - initialises a new socket structure */ int rt_socket_init(struct rtdm_dev_context *context) { struct rtsocket *sock = (struct rtsocket *)&context->dev_private; unsigned int pool_size; sock->priority = RTSKB_PRIO_VALUE(SOCK_DEF_PRIO, RTSKB_DEF_RT_CHANNEL); sock->callback_func = NULL; rtskb_queue_init(&sock->incoming); sock->timeout = 0; rtos_spin_lock_init(&sock->param_lock); rtos_sem_init(&sock->pending_sem); if (test_bit(RTDM_CREATED_IN_NRT, &context->context_flags)) pool_size = rtskb_pool_init(&sock->skb_pool, socket_rtskbs); else pool_size = rtskb_pool_init_rt(&sock->skb_pool, socket_rtskbs); atomic_set(&sock->pool_size, pool_size); if (pool_size < socket_rtskbs) { /* fix statistics */ if (pool_size == 0) rtskb_pools--; rt_socket_cleanup(context); return -ENOMEM; } return 0; }
int tdma_attach(struct rtnet_device *rtdev, void *priv) { struct rtmac_tdma *tdma = (struct rtmac_tdma *)priv; rt_printk("RTmac: tdma1: init time devision multiple access (tdma) for realtime stations\n"); memset(tdma, 0, sizeof(struct rtmac_tdma)); spin_lock_init(&tdma->delta_t_lock); tdma->rtdev = rtdev; /* * init semas, they implement a producer consumer between the * sending realtime- and the driver-task * */ rt_sem_init(&tdma->client_tx, 0); /* * init tx queue * */ rtskb_prio_queue_init(&tdma->tx_queue); /* * init rt stuff * - timer * - list heads * */ /* generic */ /* master */ init_timer(&tdma->rt_add_timer); INIT_LIST_HEAD(&tdma->rt_add_list); INIT_LIST_HEAD(&tdma->rt_list); INIT_LIST_HEAD(&tdma->rt_list_rate); init_timer(&tdma->task_change_timer); init_timer(&tdma->master_wait_timer); init_timer(&tdma->master_sent_conf_timer); init_timer(&tdma->master_sent_test_timer); rtskb_queue_init(&tdma->master_queue); /* client */ init_timer(&tdma->client_sent_ack_timer); /* * start timer */ rt_set_oneshot_mode(); start_rt_timer(0); return 0; }
int tdma_attach(struct rtnet_device *rtdev, void *priv) { struct rtmac_tdma *tdma = (struct rtmac_tdma *)priv; memset(tdma, 0, sizeof(struct rtmac_tdma)); tdma->magic = TDMA_MAGIC; rtos_spin_lock_init(&tdma->delta_t_lock); tdma->flags.mac_active = 1; tdma->rtdev = rtdev; /* * init event * It is set to signaled state when the SOF arrived at the client. */ rtos_event_init(&tdma->client_tx); /* * init tx queue * */ rtskb_prio_queue_init(&tdma->tx_queue); /* * init rt stuff * - timer * - list heads * */ /* generic */ /* master */ init_timer(&tdma->rt_add_timer); INIT_LIST_HEAD(&tdma->rt_add_list); INIT_LIST_HEAD(&tdma->rt_list); INIT_LIST_HEAD(&tdma->rt_list_rate); init_timer(&tdma->task_change_timer); init_timer(&tdma->master_wait_timer); init_timer(&tdma->master_sent_conf_timer); init_timer(&tdma->master_sent_test_timer); rtskb_queue_init(&tdma->master_queue); /* client */ init_timer(&tdma->client_sent_ack_timer); return tdma_dev_init(rtdev, tdma); }
int __init nomac_proto_init(void) { int ret; rtskb_queue_init(&nrt_rtskb_queue); rtos_event_init(&wakeup_sem); ret = rtos_task_init(&wrapper_task, nrt_xmit_task, 0, RTOS_LOWEST_RT_PRIORITY); if (ret < 0) { rtos_event_delete(&wakeup_sem); return ret; } return 0; }
int __init rtcap_init(void) { struct rtnet_device *rtdev; struct net_device *dev; int ret; int devices = 0; int i; unsigned long flags; printk("RTcap: real-time capturing interface\n"); #if defined(CONFIG_RTAI_24) || defined(CONFIG_RTAI_30) || defined(CONFIG_RTAI_31) if (start_timer) { rt_set_oneshot_mode(); start_rt_timer(0); } #endif rtskb_queue_init(&cap_queue); ret = rtos_nrt_signal_init(&cap_signal, rtcap_signal_handler); if (ret < 0) goto error1; for (i = 0; i < MAX_RT_DEVICES; i++) { tap_device[i].present = 0; rtdev = rtdev_get_by_index(i); if (rtdev != NULL) { down(&rtdev->nrt_sem); if (test_bit(PRIV_FLAG_UP, &rtdev->priv_flags)) { up(&rtdev->nrt_sem); printk("RTcap: %s busy, skipping device!\n", rtdev->name); rtdev_dereference(rtdev); continue; } if (rtdev->mac_priv != NULL) { up(&rtdev->nrt_sem); printk("RTcap: RTmac discipline already active on device %s. " "Load RTcap before RTmac!\n", rtdev->name); rtdev_dereference(rtdev); continue; } memset(&tap_device[i].tap_dev_stats, 0, sizeof(struct net_device_stats)); dev = &tap_device[i].tap_dev; memset(dev, 0, sizeof(struct net_device)); dev->init = tap_dev_init; dev->priv = rtdev; strncpy(dev->name, rtdev->name, IFNAMSIZ-1); dev->name[IFNAMSIZ-1] = 0; ret = register_netdev(dev); if (ret < 0) { up(&rtdev->nrt_sem); rtdev_dereference(rtdev); printk("RTcap: unable to register %s!\n", dev->name); goto error2; } tap_device[i].present = TAP_DEV; tap_device[i].orig_xmit = rtdev->hard_start_xmit; if ((rtdev->flags & IFF_LOOPBACK) == 0) { dev = &tap_device[i].rtmac_tap_dev; memset(dev, 0, sizeof(struct net_device)); dev->init = tap_dev_init; dev->priv = rtdev; strncpy(dev->name, rtdev->name, IFNAMSIZ-1); dev->name[IFNAMSIZ-1] = 0; strncat(dev->name, "-mac", IFNAMSIZ-strlen(dev->name)); ret = register_netdev(dev); if (ret < 0) { up(&rtdev->nrt_sem); rtdev_dereference(rtdev); printk("RTcap: unable to register %s!\n", dev->name); goto error2; } tap_device[i].present |= RTMAC_TAP_DEV; rtdev->hard_start_xmit = rtcap_xmit_hook; } else rtdev->hard_start_xmit = rtcap_loopback_xmit_hook; /* If the device requires no xmit_lock, start_xmit points equals * hard_start_xmit => we have to update this as well */ if (rtdev->features & RTNETIF_F_NON_EXCLUSIVE_XMIT) rtdev->start_xmit = rtdev->hard_start_xmit; tap_device[i].present |= XMIT_HOOK; __MOD_INC_USE_COUNT(rtdev->owner); up(&rtdev->nrt_sem); devices++; } } if (devices == 0) { printk("RTcap: no real-time devices found!\n"); ret = -ENODEV; goto error2; } if (rtskb_pool_init(&cap_pool, rtcap_rtskbs * devices) < rtcap_rtskbs * devices) { rtskb_pool_release(&cap_pool); ret = -ENOMEM; goto error2; } /* register capturing handlers with RTnet core */ rtos_spin_lock_irqsave(&rtcap_lock, flags); rtcap_handler = rtcap_rx_hook; rtos_spin_unlock_irqrestore(&rtcap_lock, flags); return 0; error2: cleanup_tap_devices(); rtos_nrt_signal_delete(&cap_signal); error1: #if defined(CONFIG_RTAI_24) || defined(CONFIG_RTAI_30) || defined(CONFIG_RTAI_31) if (start_timer) stop_rt_timer(); #endif return ret; }