Example #1
0
/***
 *  rt_ip_route_del_host - deletes specified host route
 */
int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev)
{
    rtdm_lockctx_t      context;
    struct host_route   *rt;
    struct host_route   **last_ptr;
    unsigned int        key;


    key = ntohl(addr) & HOST_HASH_KEY_MASK;
    last_ptr = &host_hash_tbl[key];

    rtdm_lock_get_irqsave(&host_table_lock, context);

    rt = host_hash_tbl[key];
    while (rt != NULL) {
        if ((rt->dest_host.ip == addr) &&
            (!rtdev || (rt->dest_host.rtdev->local_ip == rtdev->local_ip))) {
            *last_ptr = rt->next;

            rt_free_host_route(rt);

            rtdm_lock_put_irqrestore(&host_table_lock, context);

            return 0;
        }

        last_ptr = &rt->next;
        rt = rt->next;
    }

    rtdm_lock_put_irqrestore(&host_table_lock, context);

    return -ENOENT;
}
Example #2
0
/***
 *  rt_ip_route_get_host - check if specified host route is resolved
 */
int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
                         struct rtnet_device *rtdev)
{
    rtdm_lockctx_t      context;
    struct host_route   *rt;
    unsigned int        key;


    key = ntohl(addr) & HOST_HASH_KEY_MASK;

    rtdm_lock_get_irqsave(&host_table_lock, context);

    rt = host_hash_tbl[key];
    while (rt != NULL) {
        if ((rt->dest_host.ip == addr) &&
            (!rtdev || rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
            memcpy(dev_addr, rt->dest_host.dev_addr,
                   rt->dest_host.rtdev->addr_len);
            strncpy(if_name, rt->dest_host.rtdev->name, IFNAMSIZ);

            rtdm_lock_put_irqrestore(&host_table_lock, context);
            return 0;
        }

        rt = rt->next;
    }

    rtdm_lock_put_irqrestore(&host_table_lock, context);

    return -ENOENT;
}
int cpdma_chan_start(struct cpdma_chan *chan)
{
	struct cpdma_ctlr	*ctlr = chan->ctlr;
	struct cpdma_desc_pool	*pool = ctlr->pool;
	rtdm_lockctx_t		context;

	rtdm_lock_get_irqsave(&chan->lock, context);
	if (chan->state != CPDMA_STATE_IDLE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EBUSY;
	}
	if (ctlr->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EINVAL;
	}
	dma_reg_write(ctlr, chan->int_set, chan->mask);
	chan->state = CPDMA_STATE_ACTIVE;
	if (chan->head) {
		chan_write(chan, hdp, desc_phys(pool, chan->head));
		if (chan->rxfree)
			chan_write(chan, rxfree, chan->count);
	}

	rtdm_lock_put_irqrestore(&chan->lock, context);
	return 0;
}
Example #4
0
static int32_t cfgInitialDelaySet(
    struct rtdm_dev_context * ctx,
    enum xspiInitialDelay delay) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set initial delay to %d", delay);

    if (!CFG_ARG_IS_VALID(delay, XSPI_INITIAL_DELAY_0, XSPI_INITIAL_DELAY_32)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.delay = delay;
    lldInitialDelaySet(
        ctx->device,
        (uint32_t)delay);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #5
0
static int32_t cfgChannelModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiChannelMode channelMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set initial delay to %d", channelMode);

    if (!CFG_ARG_IS_VALID(channelMode, XSPI_CHANNEL_MODE_MULTI, XSPI_CHANNEL_MODE_SINGLE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.delay = channelMode;
    lldChannelModeSet(
        ctx->device,
        (uint32_t)channelMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #6
0
static int32_t cfgCsModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiCsMode     csMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS mode to %d", csMode);

    if (!CFG_ARG_IS_VALID(csMode, XSPI_CS_MODE_ENABLED, XSPI_CS_MODE_DISABLED)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.csMode = csMode;
    lldCsModeSet(
        ctx->device,
        (int32_t)csMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #7
0
static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
                        rtdm_lockctx_t lockctx)
{
    struct rtskb *rtskb;

    if ((job->period != 1) &&
        (tdma->current_cycle % job->period != job->phasing))
        return;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    /* wait for slot begin, then send one pending packet */
    rtdm_task_sleep_abs(tdma->current_cycle_start + SLOT_JOB(job)->offset,
                        RTDM_TIMERMODE_REALTIME);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
    rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
    if (!rtskb)
        return;
    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    rtmac_xmit(rtskb);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
Example #8
0
static int32_t cfgModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiMode       mode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set SPI mode to %d", mode);

    if (!CFG_ARG_IS_VALID(mode, XSPI_MODE_MASTER, XSPI_MODE_SLAVE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->cfg.mode = mode;
    cfgApply(
        ctx);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #9
0
static int32_t cfgChnWordLengthSet(
    struct rtdm_dev_context * ctx,
    uint32_t            length) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set word length to %d", length);

    if (!CFG_ARG_IS_VALID(length, 4u, 32u)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.wordLength = length;
    lldChnCsDelaySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)length);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #10
0
static int32_t cfgChnPinLayoutSet(
    struct rtdm_dev_context * ctx,
    enum xspiPinLayout  pinLayout) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set pin layout to %d", pinLayout);

    if (!CFG_ARG_IS_VALID(pinLayout, XSPI_PIN_LAYOUT_TX_RX, XSPI_PIN_LAYOUT_RX_TX)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.pinLayout = pinLayout;
    lldChnPinLayoutSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)pinLayout);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #11
0
static int32_t cfgChnCsPolaritySet(
    struct rtdm_dev_context * ctx,
    enum xspiCsPolarity csPolarity) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS polarity to %d", csPolarity);

    if (!CFG_ARG_IS_VALID(csPolarity, XSPI_CS_POLARITY_ACTIVE_HIGH, XSPI_CS_POLAROTY_ACTIVE_LOW)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.wordLength = csPolarity;
    lldChnCsPolaritySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)csPolarity);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #12
0
static int32_t cfgChnCsDelaySet(
    struct rtdm_dev_context * ctx,
    enum xspiCsDelay    delay) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set CS delay to %d", delay);

    if (!CFG_ARG_IS_VALID(delay, XSPI_CS_DELAY_0_5, XSPI_CS_DELAY_3_5)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.csDelay = delay;
    lldChnCsDelaySet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)delay);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
Example #13
0
static int32_t cfgChnTransferModeSet(
    struct rtdm_dev_context * ctx,
    enum xspiTransferMode transferMode) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set transfer mode to %d", transferMode);

    if (!CFG_ARG_IS_VALID(transferMode, XSPI_TRANSFER_MODE_TX_AND_RX, XSPI_TRANSFER_MODE_TX_ONLY)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.transferMode = transferMode;
    lldChnTransferModeSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)transferMode);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}
int cpdma_chan_stop(struct cpdma_chan *chan)
{
	struct cpdma_ctlr	*ctlr = chan->ctlr;
	struct cpdma_desc_pool	*pool = ctlr->pool;
	rtdm_lockctx_t		context;
	int			ret;
	unsigned long		timeout;

	rtdm_lock_get_irqsave(&chan->lock, context);
	if (chan->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EINVAL;
	}

	chan->state = CPDMA_STATE_TEARDOWN;
	dma_reg_write(ctlr, chan->int_clear, chan->mask);

	/* trigger teardown */
	dma_reg_write(ctlr, chan->td, chan_linear(chan));

	/* wait for teardown complete */
	timeout = jiffies + HZ/10;	/* 100 msec */
	while (time_before(jiffies, timeout)) {
		u32 cp = chan_read(chan, cp);
		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
			break;
		cpu_relax();
	}
	WARN_ON(!time_before(jiffies, timeout));
	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);

	/* handle completed packets */
	rtdm_lock_put_irqrestore(&chan->lock, context);
	do {
		ret = __cpdma_chan_process(chan);
		if (ret < 0)
			break;
	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
	rtdm_lock_get_irqsave(&chan->lock, context);

	/* remaining packets haven't been tx/rx'ed, clean them up */
	while (chan->head) {
		struct cpdma_desc __iomem *desc = chan->head;
		dma_addr_t next_dma;

		next_dma = desc_read(desc, hw_next);
		chan->head = desc_from_phys(pool, next_dma);
		chan->count--;
		chan->stats.teardown_dequeue++;

		/* issue callback without locks held */
		rtdm_lock_put_irqrestore(&chan->lock, context);
		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
		rtdm_lock_get_irqsave(&chan->lock, context);
	}

	chan->state = CPDMA_STATE_IDLE;
	rtdm_lock_put_irqrestore(&chan->lock, context);
	return 0;
}
Example #15
0
static int32_t cfgChnCsStateSet(
    struct rtdm_dev_context * ctx,
    enum xspiCsState    state) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;
    int32_t             ret;

    LOG_DBG("CFG: set CS state to %d", state);

    if (!CFG_ARG_IS_VALID(state, XSPI_CS_STATE_INACTIVE, XSPI_CS_STATE_ACTIVE)) {

        return (-EINVAL);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);

    if (XSPI_ACTIVITY_RUNNIG == devCtx->actvCnt) {
        rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

        return (-EAGAIN);
    }
    devCtx->chn[devCtx->cfg.chn].cfg.csState = state;
    ret = lldChnCsStateSet(
        ctx->device,
        devCtx->cfg.chn,
        (uint32_t)state);
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (ret);
}
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
{
	rtdm_lockctx_t context;
	int i;

	rtdm_lock_get_irqsave(&ctlr->lock, context);
	if (ctlr->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&ctlr->lock, context);
		return -EINVAL;
	}

	ctlr->state = CPDMA_STATE_TEARDOWN;

	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
		if (ctlr->channels[i])
			cpdma_chan_stop(ctlr->channels[i]);
	}

	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);

	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);

	ctlr->state = CPDMA_STATE_IDLE;

	rtdm_lock_put_irqrestore(&ctlr->lock, context);
	return 0;
}
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
				     cpdma_handler_fn handler)
{
	struct cpdma_chan *chan;
	int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
	rtdm_lockctx_t context;

	if (__chan_linear(chan_num) >= ctlr->num_chan)
		return NULL;

	ret = -ENOMEM;
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		goto err_chan_alloc;

	rtdm_lock_get_irqsave(&ctlr->lock, context);
	ret = -EBUSY;
	if (ctlr->channels[chan_num])
		goto err_chan_busy;

	chan->ctlr	= ctlr;
	chan->state	= CPDMA_STATE_IDLE;
	chan->chan_num	= chan_num;
	chan->handler	= handler;

	if (is_rx_chan(chan)) {
		chan->hdp	= ctlr->params.rxhdp + offset;
		chan->cp	= ctlr->params.rxcp + offset;
		chan->rxfree	= ctlr->params.rxfree + offset;
		chan->int_set	= CPDMA_RXINTMASKSET;
		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
		chan->td	= CPDMA_RXTEARDOWN;
		chan->dir	= DMA_FROM_DEVICE;
	} else {
		chan->hdp	= ctlr->params.txhdp + offset;
		chan->cp	= ctlr->params.txcp + offset;
		chan->int_set	= CPDMA_TXINTMASKSET;
		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
		chan->td	= CPDMA_TXTEARDOWN;
		chan->dir	= DMA_TO_DEVICE;
	}
	chan->mask = BIT(chan_linear(chan));

	rtdm_lock_init(&chan->lock);

	ctlr->channels[chan_num] = chan;
	rtdm_lock_put_irqrestore(&ctlr->lock, context);
	return chan;

err_chan_busy:
	rtdm_lock_put_irqrestore(&ctlr->lock, context);
	kfree(chan);
err_chan_alloc:
	return ERR_PTR(ret);
}
Example #18
0
static int rt_host_route_read_proc(char *buf, char **start, off_t offset,
                                   int count, int *eof, void *data)
{
    struct host_route   *entry_ptr;
    struct dest_route   dest_host;
    unsigned int        key;
    unsigned int        index;
    unsigned int        i;
    rtdm_lockctx_t      context;
    int                 res;
    RTNET_PROC_PRINT_VARS_EX(80);


    if (!RTNET_PROC_PRINT_EX("Hash\tDestination\tHW Address\t\tDevice\n"))
        goto done;

    for (key = 0; key < HOST_HASH_TBL_SIZE; key++) {
        index = 0;
        while (1) {
            rtdm_lock_get_irqsave(&host_table_lock, context);

            entry_ptr = host_hash_tbl[key];

            for (i = 0; (i < index) && (entry_ptr != NULL); i++)
                entry_ptr = entry_ptr->next;

            if (entry_ptr == NULL) {
                rtdm_lock_put_irqrestore(&host_table_lock, context);
                break;
            }

            memcpy(&dest_host, &entry_ptr->dest_host,
                   sizeof(struct dest_route));
            rtdev_reference(dest_host.rtdev);

            rtdm_lock_put_irqrestore(&host_table_lock, context);

            res = RTNET_PROC_PRINT_EX("%02X\t%u.%u.%u.%-3u\t"
                    "%02X:%02X:%02X:%02X:%02X:%02X\t%s\n",
                    key, NIPQUAD(dest_host.ip),
                    dest_host.dev_addr[0], dest_host.dev_addr[1],
                    dest_host.dev_addr[2], dest_host.dev_addr[3],
                    dest_host.dev_addr[4], dest_host.dev_addr[5],
                    dest_host.rtdev->name);
            rtdev_dereference(dest_host.rtdev);
            if (!res)
                goto done;

            index++;
        }
    }

  done:
    RTNET_PROC_PRINT_DONE_EX;
}
Example #19
0
/***
 *  rt_loopback_xmit - begin packet transmission
 *  @skb: packet to be sent
 *  @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
    unsigned short          hash;
    struct rtpacket_type    *pt_entry;
    rtdm_lockctx_t          context;


    /* write transmission stamp - in case any protocol ever gets the idea to
       ask the lookback device for this service... */
    if (skb->xmit_stamp)
        *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);

    /* make sure that critical fields are re-intialised */
    skb->chain_end = skb;

    /* parse the Ethernet header as usual */
    skb->protocol = rt_eth_type_trans(skb, rtdev);
    skb->nh.raw   = skb->data;

    rtdev_reference(rtdev);

    rtcap_report_incoming(skb);

    hash = ntohs(skb->protocol) & RTPACKET_HASH_KEY_MASK;

    rtdm_lock_get_irqsave(&rt_packets_lock, context);

    list_for_each_entry(pt_entry, &rt_packets[hash], list_entry)
        if (pt_entry->type == skb->protocol) {
            pt_entry->refcount++;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            pt_entry->handler(skb, pt_entry);

            rtdm_lock_get_irqsave(&rt_packets_lock, context);
            pt_entry->refcount--;
            rtdm_lock_put_irqrestore(&rt_packets_lock, context);

            goto out;
        }

    rtdm_lock_put_irqrestore(&rt_packets_lock, context);

    /* don't warn if running in promiscuous mode (RTcap...?) */
    if ((rtdev->flags & IFF_PROMISC) == 0)
        rtdm_printk("RTnet: unknown layer-3 protocol\n");

    kfree_rtskb(skb);

  out:
    rtdev_dereference(rtdev);
    return 0;
}
Example #20
0
void tdma_worker(void *arg)
{
    struct tdma_priv    *tdma = (struct tdma_priv *)arg;
    struct tdma_job     *job;
    rtdm_lockctx_t      lockctx;


    rtdm_event_wait(&tdma->worker_wakeup);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);

    job = tdma->first_job;

    while (!test_bit(TDMA_FLAG_SHUTDOWN, &tdma->flags)) {
        job->ref_count++;
        switch (job->id) {
            case WAIT_ON_SYNC:
                rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
                rtdm_event_wait(&tdma->sync_event);
                rtdm_lock_get_irqsave(&tdma->lock, lockctx);
                break;

            case XMIT_REQ_CAL:
                job = do_request_cal_job(tdma, REQUEST_CAL_JOB(job), lockctx);
                break;

#ifdef CONFIG_RTNET_TDMA_MASTER
            case XMIT_SYNC:
                do_xmit_sync_job(tdma, lockctx);
                break;

            case BACKUP_SYNC:
                do_backup_sync_job(tdma, lockctx);
                break;

            case XMIT_RPL_CAL:
                job = do_reply_cal_job(tdma, REPLY_CAL_JOB(job), lockctx);
                break;
#endif /* CONFIG_RTNET_TDMA_MASTER */

            default:
                do_slot_job(tdma, SLOT_JOB(job), lockctx);
                break;
        }
        job->ref_count--;

        job = tdma->current_job =
            list_entry(job->entry.next, struct tdma_job, entry);
    }

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
}
static int __cpdma_chan_process(struct cpdma_chan *chan)
{
	struct cpdma_ctlr		*ctlr = chan->ctlr;
	struct cpdma_desc __iomem	*desc;
	int				status, outlen;
	struct cpdma_desc_pool		*pool = ctlr->pool;
	dma_addr_t			desc_dma;
	rtdm_lockctx_t			context;

	//rtdm_printk("__cpdma_chan_process(%x)\n", chan);
	
	rtdm_lock_get_irqsave(&chan->lock, context);
	desc = chan->head;
	if (!desc) {
		chan->stats.empty_dequeue++;
		status = -ENOENT;
		goto unlock_ret;
	}
	desc_dma = desc_phys(pool, desc);

	status	= __raw_readl(&desc->hw_mode);
	outlen	= status & 0x7ff;
	if (status & CPDMA_DESC_OWNER) {
		chan->stats.busy_dequeue++;
		status = -EBUSY;
		goto unlock_ret;
	}
	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);

	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
	chan_write(chan, cp, desc_dma);
	chan->count--;
	chan->stats.good_dequeue++;

	if (status & CPDMA_DESC_EOQ) {
		chan->stats.requeue++;
		chan_write(chan, hdp, desc_phys(pool, chan->head));
	}

	rtdm_lock_put_irqrestore(&chan->lock, context);

	__cpdma_chan_free(chan, desc, outlen, status);
	return status;
unlock_ret:

	rtdm_lock_put_irqrestore(&chan->lock, context);

	return status;
}
Example #22
0
/***
 *  rt_udp_socket - create a new UDP-Socket
 *  @s: socket
 */
int rt_udp_socket(struct rtdm_dev_context *sockctx,
                  rtdm_user_info_t *user_info)
{
    struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private;
    int             ret;
    int             i;
    int             index;
    rtdm_lockctx_t  context;


    if ((ret = rt_socket_init(sockctx)) != 0)
        return ret;

    sock->protocol        = IPPROTO_UDP;
    sock->prot.inet.saddr = INADDR_ANY;
    sock->prot.inet.state = TCP_CLOSE;
#ifdef CONFIG_RTNET_RTDM_SELECT
    sock->wakeup_select   = NULL;
#endif /* CONFIG_RTNET_RTDM_SELECT */

    rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

    /* enforce maximum number of UDP sockets */
    if (free_ports == 0) {
        rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
        rt_socket_cleanup(sockctx);
        return -EAGAIN;
    }
    free_ports--;

    /* find free auto-port in bitmap */
    for (i = 0; i < sizeof(port_bitmap)/4; i++)
        if (port_bitmap[i] != 0xFFFFFFFF)
            break;
    index = ffz(port_bitmap[i]);
    set_bit(index, &port_bitmap[i]);
    index += i*32;
    sock->prot.inet.reg_index = index;
    sock->prot.inet.sport     = index + auto_port_start;

    /* register UDP socket */
    port_registry[index].sport = sock->prot.inet.sport;
    port_registry[index].saddr = INADDR_ANY;
    port_registry[index].sock  = sock;

    rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);

    return 0;
}
Example #23
0
/***
 *  rt_packet_rcv
 */
int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
{
    struct rtsocket *sock = (struct rtsocket *)(((u8 *)pt) -
                                ((u8 *)&((struct rtsocket *)0)->prot.packet));
    int             ifindex = sock->prot.packet.ifindex;
    void            (*callback_func)(struct rtdm_dev_context *, void *);
    void            *callback_arg;
    rtdm_lockctx_t  context;


    if (((ifindex != 0) && (ifindex != skb->rtdev->ifindex)) ||
        (rtskb_acquire(skb, &sock->skb_pool) != 0))
        kfree_rtskb(skb);
    else {
        rtdev_reference(skb->rtdev);
        rtskb_queue_tail(&sock->incoming, skb);
        rtdm_sem_up(&sock->pending_sem);

        rtdm_lock_get_irqsave(&sock->param_lock, context);
        callback_func = sock->callback_func;
        callback_arg  = sock->callback_arg;
        rtdm_lock_put_irqrestore(&sock->param_lock, context);

        if (callback_func)
            callback_func(rt_socket_context(sock), callback_arg);
    }
    return 0;
}
Example #24
0
/***
 *  rt_packet_close
 */
int rt_packet_close(struct rtdm_dev_context *sockctx,
                    rtdm_user_info_t *user_info)
{
    struct rtsocket         *sock = (struct rtsocket *)&sockctx->dev_private;
    struct rtpacket_type    *pt = &sock->prot.packet.packet_type;
    struct rtskb            *del;
    int                     ret = 0;
    rtdm_lockctx_t          context;


    rtdm_lock_get_irqsave(&sock->param_lock, context);

    if ((pt->type != 0) && ((ret = rtdev_remove_pack(pt)) == 0))
        pt->type = 0;

    rtdm_lock_put_irqrestore(&sock->param_lock, context);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL) {
        rtdev_dereference(del->rtdev);
        kfree_rtskb(del);
    }

    if (ret == 0)
        ret = rt_socket_cleanup(sockctx);

    return ret;
}
Example #25
0
int tims_clock_ioctl(rtdm_user_info_t *user_info, unsigned int request,
                     void *arg)
{
    rtdm_lockctx_t lock_ctx;
    nanosecs_rel_t result = 0;

    switch(clock_sync_mode) {
        case SYNC_RTNET:
            sync_dev_ctx->ops->ioctl_rt(sync_dev_ctx, NULL,
                                        RTMAC_RTIOC_TIMEOFFSET, &result);
            break;

        case SYNC_CAN_SLAVE:
        case SYNC_SER_SLAVE:
            rtdm_lock_get_irqsave(&sync_lock, lock_ctx);
            result = clock_offset;
            rtdm_lock_put_irqrestore(&sync_lock, lock_ctx);
            break;
    }

    result += sync_delay;

    if (request == TIMS_RTIOC_GETTIME)
        result += rtdm_clock_read();

    return rtdm_safe_copy_to_user(user_info, arg, &result, sizeof(result));

}
Example #26
0
/***
 *  rt_udp_close
 */
int rt_udp_close(struct rtdm_dev_context *sockctx,
                 rtdm_user_info_t *user_info)
{
    struct rtsocket *sock = (struct rtsocket *)&sockctx->dev_private;
    struct rtskb    *del;
    int             port;
    rtdm_lockctx_t  context;


    rtdm_lock_get_irqsave(&udp_socket_base_lock, context);

    sock->prot.inet.state = TCP_CLOSE;

    if (sock->prot.inet.reg_index >= 0) {
        port = sock->prot.inet.reg_index;
        clear_bit(port % 32, &port_bitmap[port / 32]);

        free_ports++;

        sock->prot.inet.reg_index = -1;
    }

    rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);

    /* cleanup already collected fragments */
    rt_ip_frag_invalidate_socket(sock);

    /* free packets in incoming queue */
    while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
        kfree_rtskb(del);

    return rt_socket_cleanup(sockctx);
}
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
{
	rtdm_lockctx_t context;

	rtdm_lock_get_irqsave(&chan->lock, context);
	if (chan->state != CPDMA_STATE_ACTIVE) {
		rtdm_lock_put_irqrestore(&chan->lock, context);
		return -EINVAL;
	}

	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
		      chan->mask);
	rtdm_lock_put_irqrestore(&chan->lock, context);

	return 0;
}
Example #28
0
void rtcan_socket_init(struct rtdm_dev_context *context)
{
    struct rtcan_socket *sock = (struct rtcan_socket *)&context->dev_private;
    rtdm_lockctx_t lock_ctx;


    rtdm_sem_init(&sock->recv_sem, 0);

    sock->recv_head = 0;
    sock->recv_tail = 0;
    atomic_set(&sock->ifindex, 0);
    sock->flistlen = RTCAN_SOCK_UNBOUND;
    sock->flist = NULL;
    sock->err_mask = 0;
    sock->rx_buf_full = 0;
#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
    sock->loopback = 1;
#endif

    sock->tx_timeout = RTDM_TIMEOUT_INFINITE;
    sock->rx_timeout = RTDM_TIMEOUT_INFINITE;

    INIT_LIST_HEAD(&sock->tx_wait_head);

    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
    list_add(&sock->socket_list, &rtcan_socket_list);
    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
}
Example #29
0
struct rtcan_device *rtcan_dev_get_by_index(int ifindex)
{
    struct rtcan_device *dev;
#ifdef RTCAN_USE_REFCOUNT
    rtdm_lockctx_t context;
#endif


    if ((ifindex <= 0) || (ifindex > RTCAN_MAX_DEVICES))
        return NULL;

#ifdef RTCAN_USE_REFCOUNT
    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
#endif

    dev = __rtcan_dev_get_by_index(ifindex);

#ifdef RTCAN_USE_REFCOUNT
    if (dev != NULL)
        atomic_inc(&dev->refcount);
    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
#endif

    return dev;
}
Example #30
0
static int32_t cfgChnSet(
    struct rtdm_dev_context * ctx,
    enum xspiChn        chn) {

    struct devCtx *     devCtx;
    rtdm_lockctx_t      lockCtx;

    LOG_DBG("CFG: set current channel to %d", chn);

    if (!CFG_ARG_IS_VALID(chn, XSPI_CHN_0, XSPI_CHN_3)) {

        return (-EINVAL);
    }

    if (FALSE == portChnIsOnline(ctx->device, chn)) {

        return (-EIDRM);
    }
    devCtx = getDevCtx(
        ctx);
    rtdm_lock_get_irqsave(&devCtx->lock, lockCtx);
    devCtx->cfg.chn = chn;
    rtdm_lock_put_irqrestore(&devCtx->lock, lockCtx);

    return (0);
}