int __init rtcfg_init_frames(void) { int ret; if (rtskb_pool_init(&rtcfg_pool, num_rtskbs) < num_rtskbs) return -ENOMEM; rtskb_queue_init(&rx_queue); rtdm_event_init(&rx_event, 0); ret = rtdm_task_init(&rx_task, "rtcfg-rx", rtcfg_rx_task, 0, RTDM_TASK_LOWEST_PRIORITY, 0); if (ret < 0) { rtdm_event_destroy(&rx_event); goto error1; } ret = rtdev_add_pack(&rtcfg_packet_type); if (ret < 0) goto error2; return 0; error2: rtdm_event_destroy(&rx_event); rtdm_task_join_nrt(&rx_task, 100); error1: rtskb_pool_release(&rtcfg_pool); return ret; }
int __init init_heartbeat(void) { int err; err = leds_init(); if (err) return err; return rtdm_task_init(&heartbeat_task, "heartbeat", heartbeat, NULL, 99, HEARTBEAT_PERIOD); }
int setup_timer_handler(void) { int ret = 0; priv = netdev_priv(get_vlc_dev()); //Get the first packets in the packet ring rx_packet = get_rx_packet(); tx_packet = get_tx_packet(); //Init semas rtdm_sem_init(&tx_sem, 1); rtdm_sem_init(&rx_sem, 1); //Store the current time rx_sleep_slot = rtdm_clock_read_monotonic(); tx_sleep_slot = rx_sleep_slot + sleep_increment / 2; early_late_slot = do_div(sleep_increment, 5); //Return an error if either is NULL if(!rx_packet || !tx_packet) goto error; //Start the rx and tx handler tasks rtdm_task_init(&rx_handler_task, "VLC rx handler", rx_handler, NULL, RTDM_TASK_HIGHEST_PRIORITY, 0); if(ret) goto error; rtdm_task_init(&tx_handler_task, "VLC tx handler", tx_handler, NULL, RTDM_TASK_HIGHEST_PRIORITY, 0); if(ret) goto error; return 0; error: rtdm_task_destroy(&rx_handler_task); rtdm_task_destroy(&tx_handler_task); return -1; }
/* ************************************************************************ * ************************************************************************ * I N I T * ************************************************************************ * ************************************************************************ */ static int __init rtnetproxy_init_module(void) { int err; /* Initialize the proxy's rtskb pool (JK) */ if (rtskb_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) { rtskb_pool_release(&rtskb_pool); return -ENOMEM; } dev_rtnetproxy.init = rtnetproxy_init; SET_MODULE_OWNER(&dev_rtnetproxy); /* Define the name for this unit */ err=dev_alloc_name(&dev_rtnetproxy,"rtproxy"); if(err<0) { rtskb_pool_release(&rtskb_pool); return err; } err = register_netdev(&dev_rtnetproxy); if (err<0) { rtskb_pool_release(&rtskb_pool); return err; } /* Initialize the ringbuffers: */ memset(&ring_rtskb_kernel_rtnet, 0, sizeof(ring_rtskb_kernel_rtnet)); memset(&ring_rtskb_rtnet_kernel, 0, sizeof(ring_rtskb_rtnet_kernel)); memset(&ring_skb_kernel_rtnet, 0, sizeof(ring_skb_kernel_rtnet)); memset(&ring_skb_rtnet_kernel, 0, sizeof(ring_skb_rtnet_kernel)); /* Init the task for transmission */ rtdm_sem_init(&rtnetproxy_sem, 0); rtdm_task_init(&rtnetproxy_thread, "rtnetproxy", rtnetproxy_transmit_thread, 0, RTDM_TASK_LOWEST_PRIORITY, 0); /* Register srq */ rtdm_nrtsig_init(&rtnetproxy_signal, rtnetproxy_signal_handler); /* rtNet stuff: */ rt_ip_register_fallback(rtnetproxy_recv); printk("rtnetproxy installed as \"%s\"\n", dev_rtnetproxy.name); return 0; }
static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb) { struct rtcfg_frm_stage_2_cfg *stage_2_cfg; struct rtcfg_device *rtcfg_dev = &device[ifindex]; size_t data_len; int ret; if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg)) { rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg frame\n"); kfree_rtskb(rtskb); return; } stage_2_cfg = (struct rtcfg_frm_stage_2_cfg *)rtskb->data; __rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg)); if (stage_2_cfg->heartbeat_period) { ret = rtdm_task_init(&rtcfg_dev->timer_task, "rtcfg-timer", rtcfg_timer, (void *)(long)ifindex, RTDM_TASK_LOWEST_PRIORITY, (nanosecs_rel_t)ntohs(stage_2_cfg->heartbeat_period) * 1000000); if (ret < 0) /*ERRMSG*/rtdm_printk("RTcfg: unable to create timer task\n"); else rtcfg_dev->flags |= FLAG_TIMER_STARTED; } /* add server to station list */ if (rtcfg_add_to_station_list(rtcfg_dev, rtskb->mac.ethernet->h_source, stage_2_cfg->flags) < 0) { rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); RTCFG_DEBUG(1, "RTcfg: unable to process stage_2_cfg frage\n"); kfree_rtskb(rtskb); return; } rtcfg_dev->other_stations = ntohl(stage_2_cfg->stations); rtcfg_dev->spec.clt.cfg_len = ntohl(stage_2_cfg->cfg_len); data_len = MIN(rtcfg_dev->spec.clt.cfg_len, rtskb->len); if (((rtcfg_dev->flags & RTCFG_FLAG_STAGE_2_DATA) != 0) && (data_len > 0)) { rtcfg_client_queue_frag(ifindex, rtskb, data_len); rtskb = NULL; if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN); } else { if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) { rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE, 0); rtcfg_next_main_state(ifindex, ((rtcfg_dev->flags & RTCFG_FLAG_READY) != 0) ? RTCFG_MAIN_CLIENT_READY : RTCFG_MAIN_CLIENT_2); } else rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ALL_FRAMES); rtcfg_send_ack(ifindex); } rtdm_mutex_unlock(&rtcfg_dev->dev_mutex); if (rtskb != NULL) kfree_rtskb(rtskb); }
static int rtdmtest_ioctl(struct rtdm_dev_context *context, rtdm_user_info_t *user_info, unsigned int request, void *arg) { struct rtdmtest_context *ctx; struct rttst_rtdmtest_config config_buf, *config; rtdm_toseq_t toseq_local, *toseq = NULL; int i, err = 0; ctx = (struct rtdmtest_context *)context->dev_private; switch (request) { case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN: case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT: case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST: case RTTST_RTIOC_RTDMTEST_MUTEX_TEST: config = arg; if (user_info) { if (rtdm_safe_copy_from_user (user_info, &config_buf, arg, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; config = &config_buf; } if (!config->seqcount) config->seqcount = 1; if (config->timeout && config->seqcount > 1) { toseq = &toseq_local; rtdm_toseq_init(toseq, config->timeout); } switch(request) { case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN: for (i = 0; i < config->seqcount; i++) { err = rtdm_sem_timeddown(&ctx->sem, config->timeout, toseq); if (err) break; } break; case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT: for (i = 0; i < config->seqcount; i++) { err = rtdm_event_timedwait(&ctx->event, config->timeout, toseq); if (err) break; } break; case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST: for (i = 0; i < config->seqcount; i++) { err = rtdm_mutex_timedlock(&ctx->mutex, config->timeout, toseq); if (err) break; if (config->delay_jiffies) { __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(config->delay_jiffies); } rtdm_lock_count++; rtdm_mutex_unlock(&ctx->mutex); } break; case RTTST_RTIOC_RTDMTEST_MUTEX_TEST: for (i = 0; i < config->seqcount; i++) { if ((err = rtdm_mutex_lock(&ctx->mutex))) break; rtdm_lock_count++; rtdm_mutex_unlock(&ctx->mutex); } break; } break; case RTTST_RTIOC_RTDMTEST_SEM_DOWN: err = rtdm_sem_down(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_SEM_UP: rtdm_sem_up(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_SEM_DESTROY: rtdm_sem_destroy(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_EVENT_WAIT: err = rtdm_event_wait(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_EVENT_SIGNAL: rtdm_event_signal(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_EVENT_DESTROY: rtdm_event_destroy(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_MUTEX_DESTROY: rtdm_mutex_destroy(&ctx->mutex); break; case RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT: printk("RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT\n"); if (user_info) config = &config_buf; else config = arg; config->seqcount = rtdm_lock_count; if (user_info) { if (rtdm_safe_copy_to_user (user_info, arg, &config_buf, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; } break; case RTTST_RTIOC_RTDMTEST_NRTSIG_PEND: rtdm_nrtsig_pend(&ctx->nrtsig); break; case RTTST_RTIOC_RTDMTEST_TASK_CREATE: case RTTST_RTIOC_RTDMTEST_TASK_SET_PRIO: config = arg; if (user_info) { if (rtdm_safe_copy_from_user (user_info, &config_buf, arg, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; config = &config_buf; } if (request == RTTST_RTIOC_RTDMTEST_TASK_CREATE) { task_period = config->timeout; rtdm_task_init(&task, "RTDMTEST", rtdmtest_task, (void *)config, config->priority, 0); } else { rtdm_task_set_priority(&task, config->priority); } break; case RTTST_RTIOC_RTDMTEST_TASK_DESTROY: rtdm_task_destroy(&task); rtdm_task_join_nrt(&task, 100); break; default: printk("request=%d\n", request); err = -ENOTTY; } return err; }
static void sync_task_func(void *arg) { int ret; rtdm_lockctx_t lock_ctx; nanosecs_abs_t timestamp; nanosecs_abs_t timestamp_master; rtser_event_t ser_rx_event; can_frame_t can_frame = { .can_id = clock_sync_can_id, .can_dlc = sizeof(timestamp), }; struct iovec iov = { .iov_base = &can_frame, .iov_len = sizeof(can_frame_t), }; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_iov = &iov, .msg_iovlen = 1, .msg_control = NULL, .msg_controllen = 0, }; if (clock_sync_mode == SYNC_CAN_SLAVE) { msg.msg_control = ×tamp; msg.msg_controllen = sizeof(timestamp); } while (1) { switch (clock_sync_mode) { case SYNC_SER_MASTER: timestamp = cpu_to_be64(rtdm_clock_read()); ret = sync_dev_ctx->ops->write_rt(sync_dev_ctx, NULL, ×tamp, sizeof(timestamp)); if (ret != sizeof(timestamp)) { tims_error("[CLOCK SYNC]: can't write serial time stamp, " "code = %d\n", ret); goto exit_task; } rtdm_task_wait_period(); break; case SYNC_SER_SLAVE: ret = sync_dev_ctx->ops->ioctl_rt(sync_dev_ctx, NULL, RTSER_RTIOC_WAIT_EVENT, &ser_rx_event); if (ret < 0) { tims_error("[CLOCK SYNC]: can't read serial time stamp, " "code = %d\n", ret); goto exit_task; } ret = sync_dev_ctx->ops->read_rt(sync_dev_ctx, NULL, ×tamp_master, sizeof(timestamp_master)); if (ret != sizeof(timestamp_master)) { tims_error("[CLOCK SYNC]: can't read serial time stamp, " "code = %d\n", ret); goto exit_task; } timestamp_master = be64_to_cpu(timestamp_master); rtdm_lock_get_irqsave(&sync_lock, lock_ctx); clock_offset = timestamp_master - ser_rx_event.rxpend_timestamp; rtdm_lock_put_irqrestore(&sync_lock, lock_ctx); break; case SYNC_CAN_MASTER: // workaround for kernel working on user data iov.iov_len = sizeof(can_frame_t); iov.iov_base = &can_frame; // workaround end *(nanosecs_abs_t *)can_frame.data = cpu_to_be64(rtdm_clock_read()); ret = sync_dev_ctx->ops->sendmsg_rt(sync_dev_ctx, NULL, &msg, 0); if (ret < 0) { tims_error("[CLOCK SYNC]: can't send CAN time stamp, " "code = %d\n", ret); goto exit_task; } rtdm_task_wait_period(); break; case SYNC_CAN_SLAVE: // workaround for kernel working on user data iov.iov_len = sizeof(can_frame_t); iov.iov_base = &can_frame; // workaround end ret = sync_dev_ctx->ops->recvmsg_rt(sync_dev_ctx, NULL, &msg, 0); if (ret < 0) { tims_error("[CLOCK SYNC]: can't receive CAN time stamp, " "code = %d\n", ret); return; } timestamp_master = be64_to_cpu(*(nanosecs_abs_t *)can_frame.data); rtdm_lock_get_irqsave(&sync_lock, lock_ctx); clock_offset = timestamp_master - timestamp; rtdm_lock_put_irqrestore(&sync_lock, lock_ctx); break; } } exit_task: rtdm_context_unlock(sync_dev_ctx); } static __initdata char *mode_str[] = { "Local Clock", "RTnet", "CAN Master", "CAN Slave", "Serial Master", "Serial Slave" }; static __initdata struct rtser_config sync_serial_config = { .config_mask = RTSER_SET_BAUD | RTSER_SET_FIFO_DEPTH | RTSER_SET_TIMESTAMP_HISTORY | RTSER_SET_EVENT_MASK, .baud_rate = 115200, .fifo_depth = RTSER_FIFO_DEPTH_8, .timestamp_history = RTSER_RX_TIMESTAMP_HISTORY, .event_mask = RTSER_EVENT_RXPEND, }; int __init tims_clock_init(void) { struct can_filter filter; int nr_filters = 1; struct ifreq can_ifr; struct sockaddr_can can_addr; int ret; if (clock_sync_mode < SYNC_NONE || clock_sync_mode > SYNC_SER_SLAVE) { tims_error("invalid clock_sync_mode %d", clock_sync_mode); return -EINVAL; } printk("TIMS: clock sync mode is %s\n", mode_str[clock_sync_mode]); printk("TIMS: clock sync dev is %s\n", clock_sync_dev); rtdm_lock_init(&sync_lock); switch(clock_sync_mode) { case SYNC_NONE: return 0; case SYNC_RTNET: sync_dev_fd = rt_dev_open(clock_sync_dev, O_RDONLY); if (sync_dev_fd < 0) goto sync_dev_error; set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags); break; case SYNC_CAN_MASTER: case SYNC_CAN_SLAVE: sync_dev_fd = rt_dev_socket(PF_CAN, SOCK_RAW, 0); if (sync_dev_fd < 0) { tims_error("[CLOCK SYNC]: error opening CAN socket: %d\n", sync_dev_fd); return sync_dev_fd; } set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags); strcpy(can_ifr.ifr_name, clock_sync_dev); ret = rt_dev_ioctl(sync_dev_fd, SIOCGIFINDEX, &can_ifr); if (ret) { tims_info("[CLOCK SYNC]: error resolving CAN interface: %d\n", ret); return ret; } if (clock_sync_mode == SYNC_CAN_MASTER) nr_filters = 0; else { filter.can_id = clock_sync_can_id; filter.can_mask = 0xFFFFFFFF; } ret = rt_dev_setsockopt(sync_dev_fd, SOL_CAN_RAW, CAN_RAW_FILTER, &filter, nr_filters*sizeof(can_filter_t)); if (ret < 0) goto config_error; /* Bind socket to default CAN ID */ can_addr.can_family = AF_CAN; can_addr.can_ifindex = can_ifr.ifr_ifindex; ret = rt_dev_bind(sync_dev_fd, (struct sockaddr *)&can_addr, sizeof(can_addr)); if (ret < 0) goto config_error; /* Enable timestamps for incoming packets */ ret = rt_dev_ioctl(sync_dev_fd, RTCAN_RTIOC_TAKE_TIMESTAMP, RTCAN_TAKE_TIMESTAMPS); if (ret < 0) goto config_error; /* Calculate transmission delay */ ret = rt_dev_ioctl(sync_dev_fd, SIOCGCANBAUDRATE, &can_ifr); if (ret < 0) goto config_error; /* (47+64 bit) * 1.000.000.000 (ns/sec) / baudrate (bit/s) */ sync_delay = 1000 * (111000000 / can_ifr.ifr_ifru.ifru_ivalue); break; case SYNC_SER_MASTER: case SYNC_SER_SLAVE: sync_dev_fd = rt_dev_open(clock_sync_dev, O_RDWR); if (sync_dev_fd < 0) goto sync_dev_error; set_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags); ret = rt_dev_ioctl(sync_dev_fd, RTSER_RTIOC_SET_CONFIG, &sync_serial_config); if (ret < 0) goto config_error; /* (80 bit) * 1.000.000.000 (ns/sec) / baudrate (bit/s) */ sync_delay = 1000 * (80000000 / sync_serial_config.baud_rate); break; } sync_dev_ctx = rtdm_context_get(sync_dev_fd); if (clock_sync_mode != SYNC_RTNET) { ret = rtdm_task_init(&sync_task, "TIMSClockSync", sync_task_func, NULL, CLOCK_SYNC_PRIORITY, CLOCK_SYNC_PERIOD); if (ret < 0) return ret; set_bit(TIMS_INIT_BIT_SYNC_TASK, &init_flags); } return 0; sync_dev_error: tims_error("[CLOCK SYNC]: cannot open %s\n", clock_sync_dev); return sync_dev_fd; config_error: tims_info("[CLOCK SYNC]: error configuring sync device: %d\n", ret); return ret; } void tims_clock_cleanup(void) { if (test_and_clear_bit(TIMS_INIT_BIT_SYNC_DEV, &init_flags)) rt_dev_close(sync_dev_fd); if (test_and_clear_bit(TIMS_INIT_BIT_SYNC_TASK, &init_flags)) rtdm_task_join_nrt(&sync_task, 100); }