示例#1
0
static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
                        rtdm_lockctx_t lockctx)
{
    struct rtskb *rtskb;

    if ((job->period != 1) &&
        (tdma->current_cycle % job->period != job->phasing))
        return;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    /* wait for slot begin, then send one pending packet */
    rtdm_task_sleep_abs(tdma->current_cycle_start + SLOT_JOB(job)->offset,
                        RTDM_TIMERMODE_REALTIME);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
    rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
    if (!rtskb)
        return;
    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    rtmac_xmit(rtskb);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
示例#2
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    unsigned short          hash;
    struct rtpacket_type    *pt_entry;
    rtdm_lockctx_t          context;


    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (skb->xmit_stamp)
        *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

    /* make sure that critical fields are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);
    skb->nh.raw   = skb->data;

    rtdev_reference(rtdev);

    rtcap_report_incoming(skb);

    hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK;

    rtdm_lock_get_irqsave(&rt_packets_lock, context);

    list_for_each_entry(pt_entry, &rt_packets[hash], list_entry)
        if (pt_entry->type == skb->protocol) {
            pt_entry->refcount++;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            pt_entry->handler(skb, pt_entry);

            rtdm_lock_get_irqsave(&rt_packets_lock, context);
            pt_entry->refcount--;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            goto out;
        }

    rtdm_lock_put_irqrestore(&rt_packets_lock, context);

    /* don't warn if running in promiscuous mode (RTcap...?) */
    if ((rtdev->flags & IFF_PROMISC) == 0)
        rtdm_printk("RTnet: unknown layer-3 protocol\n");

    kfree_rtskb(skb);

  out:
    rtdev_dereference(rtdev);
    return 0;
}
示例#3
0
void tdma_worker(void *arg)
{
    struct tdma_priv    *tdma = (struct tdma_priv *)arg;
    struct tdma_job     *job;
    rtdm_lockctx_t      lockctx;


    rtdm_event_wait(&tdma->worker_wakeup);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);

    job = tdma->first_job;

    while (!test_bit(TDMA_FLAG_SHUTDOWN, &tdma->flags)) {
        job->ref_count++;
        switch (job->id) {
            case WAIT_ON_SYNC:
                rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
                rtdm_event_wait(&tdma->sync_event);
                rtdm_lock_get_irqsave(&tdma->lock, lockctx);
                break;

            case XMIT_REQ_CAL:
                job = do_request_cal_job(tdma, REQUEST_CAL_JOB(job), lockctx);
                break;

#ifdef CONFIG_RTNET_TDMA_MASTER
            case XMIT_SYNC:
                do_xmit_sync_job(tdma, lockctx);
                break;

            case BACKUP_SYNC:
                do_backup_sync_job(tdma, lockctx);
                break;

            case XMIT_RPL_CAL:
                job = do_reply_cal_job(tdma, REPLY_CAL_JOB(job), lockctx);
                break;
#endif /* CONFIG_RTNET_TDMA_MASTER */

            default:
                do_slot_job(tdma, SLOT_JOB(job), lockctx);
                break;
        }
        job->ref_count--;

        job = tdma->current_job =
            list_entry(job->entry.next, struct tdma_job, entry);
    }

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
}
示例#4
0
static void do_xmit_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
{
    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    /* wait for beginning of next cycle, then send sync */
    rtdm_task_sleep_abs(tdma->current_cycle_start + tdma->cycle_period,
                        RTDM_TIMERMODE_REALTIME);
    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
    tdma->current_cycle++;
    tdma->current_cycle_start += tdma->cycle_period;
    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    tdma_xmit_sync_frame(tdma);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
示例#5
0
int tims_clock_ioctl(rtdm_user_info_t *user_info, unsigned int request,
                     void *arg)
{
    rtdm_lockctx_t lock_ctx;
    nanosecs_rel_t result = 0;

    switch(clock_sync_mode) {
        case SYNC_RTNET:
            sync_dev_ctx->ops->ioctl_rt(sync_dev_ctx, NULL,
                                        RTMAC_RTIOC_TIMEOFFSET, &result);
            break;

        case SYNC_CAN_SLAVE:
        case SYNC_SER_SLAVE:
            rtdm_lock_get_irqsave(&sync_lock, lock_ctx);
            result = clock_offset;
            rtdm_lock_put_irqrestore(&sync_lock, lock_ctx);
            break;
    }

    result += sync_delay;

    if (request == TIMS_RTIOC_GETTIME)
        result += rtdm_clock_read();

    return rtdm_safe_copy_to_user(user_info, arg, &result, sizeof(result));

}
示例#6
0
static int32_t cfgChnSet(
    struct rtdm_dev_context * ctx,
    enum xspiChn        chn) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set current channel to %d", chn);

    if (!CFG_ARG_IS_VALID(chn, XSPI_CHN_0, XSPI_CHN_3)) {

        return (-EINVAL);
    }

    if (FALSE == portChnIsOnline(ctx->device, chn)) {

        return (-EIDRM);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);
    devCtx->cfg.chn = chn;
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#7
0
/***
 *  rt_packet_rcv
 */
int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
{
    struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) -
                                ((u8 *)&((struct rtsocket *)0)->prot.packet));
    int             ifindex = sock->prot.packet.ifindex;
    void            (*callback_func)(struct rtdm_dev_context *, void *);
    void            *callback_arg;
    rtdm_lockctx_t  context;


    if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) ||
        (rtskb_acquire(skb, &sock->skb_pool) != 0))
        kfree_rtskb(skb);
    else {
        rtdev_reference(skb->rtdev);
        rtskb_queue_tail(&sock->incoming, skb);
        rtdm_sem_up(&sock->pending_sem);

        rtdm_lock_get_irqsave(&sock->param_lock, context);
        callback_func = sock->callback_func;
        callback_arg  = sock->callback_arg;
        rtdm_lock_put_irqrestore(&sock->param_lock, context);

        if (callback_func)
            callback_func(rt_socket_context(sock), callback_arg);
    }
    return 0;
}
示例#8
0
static int32_t cfgChnTransferModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiTransferMode transferMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set transfer mode to %d", transferMode);

    if (!CFG_ARG_IS_VALID(transferMode, XSPI_TRANSFER_MODE_TX_AND_RX, XSPI_TRANSFER_MODE_TX_ONLY)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.transferMode = transferMode;
    lldChnTransferModeSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)transferMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#9
0
static int32_t cfgChnPinLayoutSet(
    struct rtdm_dev_context * ctx,
    enum xspiPinLayout  pinLayout) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set pin layout to %d", pinLayout);

    if (!CFG_ARG_IS_VALID(pinLayout, XSPI_PIN_LAYOUT_TX_RX, XSPI_PIN_LAYOUT_RX_TX)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.pinLayout = pinLayout;
    lldChnPinLayoutSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)pinLayout);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#10
0
/***
 *  rt_udp_close
 */
int rt_udp_close(struct rtdm_dev_context *sockctx,
                 rtdm_user_info_t *user_info)
{
    struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private;
    struct rtskb    *del;
    int             port;
    rtdm_lockctx_t  context;


    rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

    sock->prot.inet.state = TCP_CLOSE;

    if (sock->prot.inet.reg_index >= 0) {
        port = sock->prot.inet.reg_index;
        clear_bit(port % 32, &port_bitmap[port / 32]);

        free_ports++;

        sock->prot.inet.reg_index = -1;
    }

    rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);

    /* cleanup already collected fragments */
    rt_ip_frag_invalidate_socket(sock);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
        kfree_rtskb(del);

    return rt_socket_cleanup(sockctx);
}
示例#11
0
static int32_t cfgInitialDelaySet(
    struct rtdm_dev_context * ctx,
    enum xspiInitialDelay delay) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set initial delay to %d", delay);

    if (!CFG_ARG_IS_VALID(delay, XSPI_INITIAL_DELAY_0, XSPI_INITIAL_DELAY_32)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.delay = delay;
    lldInitialDelaySet(
        ctx->device,
        (uint32_t)delay);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#12
0
文件: route.c 项目: hiddeate2m/rtnet
/***
 *  rt_ip_route_del_host - deletes specified host route
 */
int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev)
{
    rtdm_lockctx_t      context;
    struct host_route   *rt;
    struct host_route   **last_ptr;
    unsigned int        key;


    key = ntohl(addr) & HOST_HASH_KEY_MASK;
    last_ptr = &host_hash_tbl[key];

    rtdm_lock_get_irqsave(&host_table_lock, context);

    rt = host_hash_tbl[key];
    while (rt != NULL) {
        if ((rt->dest_host.ip == addr) &&
            (!rtdev || (rt->dest_host.rtdev->local_ip == rtdev->local_ip))) {
            *last_ptr = rt->next;

            rt_free_host_route(rt);

            rtdm_lock_put_irqrestore(&host_table_lock, context);

            return 0;
        }

        last_ptr = &rt->next;
        rt = rt->next;
    }

    rtdm_lock_put_irqrestore(&host_table_lock, context);

    return -ENOENT;
}
示例#13
0
static int32_t cfgModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiMode       mode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set SPI mode to %d", mode);

    if (!CFG_ARG_IS_VALID(mode, XSPI_MODE_MASTER, XSPI_MODE_SLAVE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.mode = mode;
    cfgApply(
        ctx);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#14
0
struct rtcan_device *rtcan_dev_get_by_index(int ifindex)
{
    struct rtcan_device *dev;
#ifdef RTCAN_USE_REFCOUNT
    rtdm_lockctx_t context;
#endif


    if ((ifindex <= 0) || (ifindex > RTCAN_MAX_DEVICES))
        return NULL;

#ifdef RTCAN_USE_REFCOUNT
    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
#endif

    dev = __rtcan_dev_get_by_index(ifindex);

#ifdef RTCAN_USE_REFCOUNT
    if (dev != NULL)
        atomic_inc(&dev->refcount);
    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
#endif

    return dev;
}
示例#15
0
static int32_t cfgChnCsPolaritySet(
    struct rtdm_dev_context * ctx,
    enum xspiCsPolarity csPolarity) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS polarity to %d", csPolarity);

    if (!CFG_ARG_IS_VALID(csPolarity, XSPI_CS_POLARITY_ACTIVE_HIGH, XSPI_CS_POLAROTY_ACTIVE_LOW)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.wordLength = csPolarity;
    lldChnCsPolaritySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)csPolarity);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#16
0
文件: route.c 项目: hiddeate2m/rtnet
/***
 *  rt_ip_route_get_host - check if specified host route is resolved
 */
int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
                         struct rtnet_device *rtdev)
{
    rtdm_lockctx_t      context;
    struct host_route   *rt;
    unsigned int        key;


    key = ntohl(addr) & HOST_HASH_KEY_MASK;

    rtdm_lock_get_irqsave(&host_table_lock, context);

    rt = host_hash_tbl[key];
    while (rt != NULL) {
        if ((rt->dest_host.ip == addr) &&
            (!rtdev || rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
            memcpy(dev_addr, rt->dest_host.dev_addr,
                   rt->dest_host.rtdev->addr_len);
            strncpy(if_name, rt->dest_host.rtdev->name, IFNAMSIZ);

            rtdm_lock_put_irqrestore(&host_table_lock, context);
            return 0;
        }

        rt = rt->next;
    }

    rtdm_lock_put_irqrestore(&host_table_lock, context);

    return -ENOENT;
}
示例#17
0
void rtcan_socket_init(struct rtdm_dev_context *context)
{
    struct rtcan_socket *sock = (struct rtcan_socket *)&context->dev_private;
    rtdm_lockctx_t lock_ctx;


    rtdm_sem_init(&sock->recv_sem, 0);

    sock->recv_head = 0;
    sock->recv_tail = 0;
    atomic_set(&sock->ifindex, 0);
    sock->flistlen = RTCAN_SOCK_UNBOUND;
    sock->flist = NULL;
    sock->err_mask = 0;
    sock->rx_buf_full = 0;
#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
    sock->loopback = 1;
#endif

    sock->tx_timeout = RTDM_TIMEOUT_INFINITE;
    sock->rx_timeout = RTDM_TIMEOUT_INFINITE;

    INIT_LIST_HEAD(&sock->tx_wait_head);

    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
    list_add(&sock->socket_list, &rtcan_socket_list);
    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
}
int cpdma_chan_start(struct cpdma_chan *chan)
{
	struct cpdma_ctlr	*ctlr = chan->ctlr;
	struct cpdma_desc_pool	*pool = ctlr->pool;
	rtdm_lockctx_t		context;

	rtdm_lock_get_irqsave(&chan->lock, context);
	if (chan->state != CPDMA_STATE_IDLE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EBUSY;
	}
	if (ctlr->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EINVAL;
	}
	dma_reg_write(ctlr, chan->int_set, chan->mask);
	chan->state = CPDMA_STATE_ACTIVE;
	if (chan->head) {
		chan_write(chan, hdp, desc_phys(pool, chan->head));
		if (chan->rxfree)
			chan_write(chan, rxfree, chan->count);
	}

	rtdm_lock_put_irqrestore(&chan->lock, context);
	return 0;
}
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
{
	rtdm_lockctx_t context;
	int i;

	rtdm_lock_get_irqsave(&ctlr->lock, context);
	if (ctlr->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&ctlr->lock, context);
		return -EINVAL;
	}

	ctlr->state = CPDMA_STATE_TEARDOWN;

	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
		if (ctlr->channels[i])
			cpdma_chan_stop(ctlr->channels[i]);
	}

	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);

	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);

	ctlr->state = CPDMA_STATE_IDLE;

	rtdm_lock_put_irqrestore(&ctlr->lock, context);
	return 0;
}
示例#20
0
/***
 *  rt_packet_close
 */
int rt_packet_close(struct rtdm_dev_context *sockctx,
                    rtdm_user_info_t *user_info)
{
    struct rtsocket         *sock = (struct rtsocket *)&sockctx->dev_private;
    struct rtpacket_type    *pt = &sock->prot.packet.packet_type;
    struct rtskb            *del;
    int                     ret = 0;
    rtdm_lockctx_t          context;


    rtdm_lock_get_irqsave(&sock->param_lock, context);

    if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0))
        pt->type = 0;

    rtdm_lock_put_irqrestore(&sock->param_lock, context);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL) {
        rtdev_dereference(del->rtdev);
        kfree_rtskb(del);
    }

    if (ret == 0)
        ret = rt_socket_cleanup(sockctx);

    return ret;
}
示例#21
0
static int32_t cfgChnWordLengthSet(
    struct rtdm_dev_context * ctx,
    uint32_t            length) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set word length to %d", length);

    if (!CFG_ARG_IS_VALID(length, 4u, 32u)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.wordLength = length;
    lldChnCsDelaySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)length);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#22
0
static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma,
                                         struct tdma_reply_cal *job,
                                         rtdm_lockctx_t lockctx)
{
    struct tdma_job *prev_job;

    if (job->reply_cycle > tdma->current_cycle)
        return &job->head;

    /* remove the job */
    __list_del(job->head.entry.prev, job->head.entry.next);
    job->head.ref_count--;
    prev_job = tdma->current_job =
        list_entry(job->head.entry.prev, struct tdma_job, entry);
    prev_job->ref_count++;
    tdma->job_list_revision++;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    if (job->reply_cycle == tdma->current_cycle) {
        /* send reply in the assigned slot */
        rtdm_task_sleep_abs(tdma->current_cycle_start + job->reply_offset,
                            RTDM_TIMERMODE_REALTIME);
        rtmac_xmit(job->reply_rtskb);
    } else {
        /* cleanup if cycle already passed */
        kfree_rtskb(job->reply_rtskb);
    }

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);

    return prev_job;
}
示例#23
0
static int32_t cfgChnCsDelaySet(
    struct rtdm_dev_context * ctx,
    enum xspiCsDelay    delay) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS delay to %d", delay);

    if (!CFG_ARG_IS_VALID(delay, XSPI_CS_DELAY_0_5, XSPI_CS_DELAY_3_5)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.csDelay = delay;
    lldChnCsDelaySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)delay);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#24
0
static int32_t cfgCsModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiCsMode     csMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS mode to %d", csMode);

    if (!CFG_ARG_IS_VALID(csMode, XSPI_CS_MODE_ENABLED, XSPI_CS_MODE_DISABLED)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.csMode = csMode;
    lldCsModeSet(
        ctx->device,
        (int32_t)csMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#25
0
static int32_t cfgChnCsStateSet(
    struct rtdm_dev_context * ctx,
    enum xspiCsState    state) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;
    int32_t             ret;

    LOG_DBG("CFG: set CS state to %d", state);

    if (!CFG_ARG_IS_VALID(state, XSPI_CS_STATE_INACTIVE, XSPI_CS_STATE_ACTIVE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.csState = state;
    ret = lldChnCsStateSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)state);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (ret);
}
示例#26
0
static int32_t cfgChannelModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiChannelMode channelMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set initial delay to %d", channelMode);

    if (!CFG_ARG_IS_VALID(channelMode, XSPI_CHANNEL_MODE_MULTI, XSPI_CHANNEL_MODE_SINGLE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.delay = channelMode;
    lldChannelModeSet(
        ctx->device,
        (uint32_t)channelMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
示例#27
0
/***
 *  rt_udp_connect
 */
int rt_udp_connect(struct rtsocket *sock, const struct sockaddr *serv_addr,
                   socklen_t addrlen)
{
    struct sockaddr_in  *usin = (struct sockaddr_in *) serv_addr;
    rtdm_lockctx_t      context;
    int                 index;


    if (usin->sin_family == AF_UNSPEC) {
        if ((index = sock->prot.inet.reg_index) < 0)
            /* socket is being closed */
            return -EBADF;

        rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

        sock->prot.inet.saddr = INADDR_ANY;
        /* Note: The following line differs from standard stacks, and we also
                 don't remove the socket from the port list. Might get fixed in
                 the future... */
        sock->prot.inet.sport = index + auto_port_start;
        sock->prot.inet.daddr = INADDR_ANY;
        sock->prot.inet.dport = 0;
        sock->prot.inet.state = TCP_CLOSE;

        rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
    } else {
        if ((addrlen < (int)sizeof(struct sockaddr_in)) ||
            (usin->sin_family != AF_INET))
            return -EINVAL;

        rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

        if (sock->prot.inet.state != TCP_CLOSE) {
            rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
            return -EINVAL;
        }

        sock->prot.inet.state = TCP_ESTABLISHED;
        sock->prot.inet.daddr = usin->sin_addr.s_addr;
        sock->prot.inet.dport = usin->sin_port;

        rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
    }

    return 0;
}
示例#28
0
void rt_icmp_queue_echo_request(struct rt_proc_call *call)
{
    rtdm_lockctx_t  context;


    rtdm_lock_get_irqsave(&echo_calls_lock, context);
    list_add_tail(&call->list_entry, &echo_calls);
    rtdm_lock_put_irqrestore(&echo_calls_lock, context);
}
示例#29
0
void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb)
{
    struct rtskb_queue *queue = &pool->queue;
    rtdm_lockctx_t context;

    rtdm_lock_get_irqsave(&queue->lock, context);
    __rtskb_pool_queue_tail(pool, skb);
    rtdm_lock_put_irqrestore(&queue->lock, context);
}
示例#30
0
static struct tdma_job *do_request_cal_job(struct tdma_priv *tdma,
                                           struct tdma_request_cal *job,
                                           rtdm_lockctx_t lockctx)
{
    struct rt_proc_call *call;
    struct tdma_job     *prev_job;
    int                 err;

    if ((job->period != 1) &&
        (tdma->current_cycle % job->period != job->phasing))
        return &job->head;

    /* remove job until we get a reply */
    __list_del(job->head.entry.prev, job->head.entry.next);
    job->head.ref_count--;
    prev_job = tdma->current_job =
        list_entry(job->head.entry.prev, struct tdma_job, entry);
    prev_job->ref_count++;
    tdma->job_list_revision++;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    rtdm_task_sleep_abs(tdma->current_cycle_start + job->offset,
                        RTDM_TIMERMODE_REALTIME);
    err = tdma_xmit_request_cal_frame(tdma,
            tdma->current_cycle + job->period, job->offset);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);

    /* terminate call on error */
    if (err < 0) {
        call = tdma->calibration_call;
        tdma->calibration_call = NULL;

        if (call) {
            rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
            rtpc_complete_call(call, err);
            rtdm_lock_get_irqsave(&tdma->lock, lockctx);
        }
    }

    return prev_job;
}