void ni_tio_set_mite_channel(struct ni_gpct *counter, struct mite_channel *mite_chan) { unsigned long flags; comedi_spin_lock_irqsave(&counter->lock, flags); counter->mite_chan = mite_chan; comedi_spin_unlock_irqrestore(&counter->lock, flags); }
int mite_done(struct mite_channel *mite_chan) { struct mite_struct *mite = mite_chan->mite; unsigned long flags; int done; mite_get_status(mite_chan); comedi_spin_lock_irqsave(&mite->lock, flags); done = mite_chan->done; comedi_spin_unlock_irqrestore(&mite->lock, flags); return done; }
int ni_tio_cancel(struct ni_gpct *counter) { unsigned long flags; ni_tio_arm(counter, 0, 0); comedi_spin_lock_irqsave(&counter->lock, flags); if (counter->mite_chan) { mite_dma_disarm(counter->mite_chan); } comedi_spin_unlock_irqrestore(&counter->lock, flags); ni_tio_configure_dma(counter, 0, 0); ni_tio_set_bits(counter, NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0x0); return 0; }
unsigned mite_get_status(struct mite_channel *mite_chan) { struct mite_struct *mite = mite_chan->mite; unsigned status; unsigned long flags; comedi_spin_lock_irqsave(&mite->lock, flags); status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel)); if (status & CHSR_DONE) { mite_chan->done = 1; writel(CHOR_CLRDONE, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); } mmiowb(); comedi_spin_unlock_irqrestore(&mite->lock, flags); return status; }
void ni_tio_handle_interrupt(struct ni_gpct *counter, comedi_subdevice * s) { unsigned gpct_mite_status; unsigned long flags; int gate_error; int tc_error; int perm_stale_data; ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error, &perm_stale_data, NULL); if (gate_error) { rt_printk("%s: Gi_Gate_Error detected.\n", __FUNCTION__); s->async->events |= COMEDI_CB_OVERFLOW; } if (perm_stale_data) { s->async->events |= COMEDI_CB_ERROR; } switch (counter->counter_dev->variant) { case ni_gpct_variant_m_series: case ni_gpct_variant_660x: if (read_register(counter, NITIO_Gi_DMA_Status_Reg(counter-> counter_index)) & Gi_DRQ_Error_Bit) { rt_printk("%s: Gi_DRQ_Error detected.\n", __FUNCTION__); s->async->events |= COMEDI_CB_OVERFLOW; } break; case ni_gpct_variant_e_series: break; } comedi_spin_lock_irqsave(&counter->lock, flags); if (counter->mite_chan == NULL) { comedi_spin_unlock_irqrestore(&counter->lock, flags); return; } gpct_mite_status = mite_get_status(counter->mite_chan); if (gpct_mite_status & CHSR_LINKC) { writel(CHOR_CLRLC, counter->mite_chan->mite->mite_io_addr + MITE_CHOR(counter->mite_chan->channel)); } mite_sync_input_dma(counter->mite_chan, s->async); comedi_spin_unlock_irqrestore(&counter->lock, flags); }
void mite_dma_arm(struct mite_channel *mite_chan) { struct mite_struct *mite = mite_chan->mite; int chor; unsigned long flags; MDPRINTK("mite_dma_arm ch%i\n", channel); /* memory barrier is intended to insure any twiddling with the buffer is done before writing to the mite to arm dma transfer */ smp_mb(); /* arm */ chor = CHOR_START; comedi_spin_lock_irqsave(&mite->lock, flags); mite_chan->done = 0; writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); mmiowb(); comedi_spin_unlock_irqrestore(&mite->lock, flags); // mite_dma_tcr(mite, channel); }
void comedi_switch_to_non_rt(struct comedi_device *dev) { struct comedi_irq_struct *it; unsigned long flags; it = comedi_irqs[dev->irq]; if (it == NULL) return; comedi_spin_lock_irqsave(&dev->spinlock, flags); dev->rt--; if (!dev->rt) comedi_rt_release_irq(it); it->rt = 0; comedi_spin_unlock_irqrestore(&dev->spinlock, flags); }
struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite, struct mite_dma_descriptor_ring *ring, unsigned min_channel, unsigned max_channel) { int i; unsigned long flags; struct mite_channel *channel = NULL; // spin lock so mite_release_channel can be called safely from interrupts comedi_spin_lock_irqsave(&mite->lock, flags); for (i = min_channel; i <= max_channel; ++i) { if (mite->channel_allocated[i] == 0) { mite->channel_allocated[i] = 1; channel = &mite->channels[i]; channel->ring = ring; break; } } comedi_spin_unlock_irqrestore(&mite->lock, flags); return channel; }
void mite_release_channel(struct mite_channel *mite_chan) { struct mite_struct *mite = mite_chan->mite; unsigned long flags; // spin lock to prevent races with mite_request_channel comedi_spin_lock_irqsave(&mite->lock, flags); if (mite->channel_allocated[mite_chan->channel]) { mite_dma_disarm(mite_chan); mite_dma_reset(mite_chan); /* disable all channel's interrupts (do it after disarm/reset so MITE_CHCR reg isn't changed while dma is still active!) */ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); mite->channel_allocated[mite_chan->channel] = 0; mite_chan->ring = NULL; mmiowb(); } comedi_spin_unlock_irqrestore(&mite->lock, flags); }
int comedi_switch_to_rt(struct comedi_device *dev) { struct comedi_irq_struct *it; unsigned long flags; it = comedi_irqs[dev->irq]; /* drivers might not be using an interrupt for commands, or we might not have been able to get an unshared irq */ if (it == NULL) return -1; comedi_spin_lock_irqsave(&dev->spinlock, flags); if (!dev->rt) comedi_rt_get_irq(it); dev->rt++; it->rt = 1; comedi_spin_unlock_irqrestore(&dev->spinlock, flags); return 0; }
static int ni_tio_input_inttrig(comedi_device * dev, comedi_subdevice * s, unsigned int trignum) { unsigned long flags; int retval = 0; struct ni_gpct *counter = s->private; BUG_ON(counter == NULL); if (trignum != 0) return -EINVAL; comedi_spin_lock_irqsave(&counter->lock, flags); if (counter->mite_chan) mite_dma_arm(counter->mite_chan); else retval = -EIO; comedi_spin_unlock_irqrestore(&counter->lock, flags); if (retval < 0) return retval; retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE); s->async->inttrig = NULL; return retval; }
/* During buffered input counter operation for e-series, the gate interrupt is acked automatically by the dma controller, due to the Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */ static int should_ack_gate(struct ni_gpct *counter) { unsigned long flags; int retval = 0; switch (counter->counter_dev->variant) { case ni_gpct_variant_m_series: case ni_gpct_variant_660x: // not sure if 660x really supports gate interrupts (the bits are not listed in register-level manual) return 1; break; case ni_gpct_variant_e_series: comedi_spin_lock_irqsave(&counter->lock, flags); { if (counter->mite_chan == NULL || counter->mite_chan->dir != COMEDI_INPUT || (mite_done(counter->mite_chan))) { retval = 1; } } comedi_spin_unlock_irqrestore(&counter->lock, flags); break; } return retval; }
int ni_tio_cmd(struct ni_gpct *counter, comedi_async * async) { comedi_cmd *cmd = &async->cmd; int retval = 0; unsigned long flags; comedi_spin_lock_irqsave(&counter->lock, flags); if (counter->mite_chan == NULL) { rt_printk ("ni_tio: commands only supported with DMA. Interrupt-driven commands not yet implemented.\n"); retval = -EIO; } else { retval = ni_tio_cmd_setup(counter, async); if (retval == 0) { if (cmd->flags & CMDF_WRITE) { retval = ni_tio_output_cmd(counter, async); } else { retval = ni_tio_input_cmd(counter, async); } } } comedi_spin_unlock_irqrestore(&counter->lock, flags); return retval; }