int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst) { u32 dma_width; switch (drv_data->n_bytes) { case 1: dma_width = DCMD_WIDTH1; break; case 2: dma_width = DCMD_WIDTH2; break; default: dma_width = DCMD_WIDTH4; break; } /* Setup rx DMA Channel */ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; DTADR(drv_data->rx_channel) = drv_data->rx_dma; if (drv_data->rx == drv_data->null_dma_buf) /* No target address increment */ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | dma_width | dma_burst | drv_data->len; else DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | DCMD_FLOWSRC | dma_width | dma_burst | drv_data->len; /* Setup tx DMA Channel */ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; DSADR(drv_data->tx_channel) = drv_data->tx_dma; DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; if (drv_data->tx == drv_data->null_dma_buf) /* No source address increment */ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | dma_width | dma_burst | drv_data->len; else DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | DCMD_FLOWTRG | dma_width | dma_burst | drv_data->len; /* Enable dma end irqs on SSP to detect end of transfer */ if (drv_data->ssp_type == PXA25x_SSP) DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; return 0; }
static void pxa250_start_rx_dma(struct net_device *dev) { struct pxa250_irda *si = dev->priv; int ch=si->rxdma_ch; if (!si->rxskb) { DBG("rx buffer went missing\n"); /* return; */ } DCSR(ch)=0; DCSR(ch)=DCSR_NODESC; DSADR(ch) = __PREG(ICDR); DTADR(ch) = si->rxbuf_dma; /* phisical address */; /* We should never do END_IRQ. !!!*/ DCMD(ch) = DCMD_ENDIRQEN| DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_BURST8 | DCMD_WIDTH1 | HPSIR_MAX_RXLEN; /* * All right information will be available as soon as we set RXE flag */ DCSR(ch) = DCSR_ENDINTR | DCSR_BUSERR; DCSR(ch) = DCSR_RUN | DCSR_NODESC ; }
inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si) { DCSR(si->txdma) = DCSR_NODESC; DSADR(si->txdma) = si->dma_tx_buff_phy; DTADR(si->txdma) = __PREG(ICDR); DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len; DCSR(si->txdma) |= DCSR_RUN; }
inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si) { DCSR(si->rxdma) = DCSR_NODESC; DSADR(si->rxdma) = __PREG(ICDR); DTADR(si->rxdma) = si->dma_rx_buff_phy; DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT; DCSR(si->rxdma) |= DCSR_RUN; }
static snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct pxa2xx_runtime_data *rtd = runtime->private_data; dma_addr_t ptr = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? DSADR(rtd->dma_ch) : DTADR(rtd->dma_ch); snd_pcm_uframes_t x = bytes_to_frames(runtime, ptr - runtime->dma_addr); if (x == runtime->buffer_size) x = 0; return x; }
/* EIF(Error in FIFO/End in Frame) handler for FIR */ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) { unsigned int len, stat, data; /* Get the current data position. */ len = DTADR(si->rxdma) - si->dma_rx_buff_phy; do { /* Read Status, and then Data. */ stat = ICSR1; rmb(); data = ICDR; if (stat & (ICSR1_CRE | ICSR1_ROR)) { si->stats.rx_errors++; if (stat & ICSR1_CRE) { printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); si->stats.rx_crc_errors++; } if (stat & ICSR1_ROR) { printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); si->stats.rx_over_errors++; } } else { si->dma_rx_buff[len++] = data; } /* If we hit the end of frame, there's no point in continuing. */ if (stat & ICSR1_EOF) break; } while (ICSR0 & ICSR0_EIF); if (stat & ICSR1_EOF) { /* end of frame. */ struct sk_buff *skb; if (icsr0 & ICSR0_FRE) { printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); si->stats.rx_dropped++; return; } skb = alloc_skb(len+1,GFP_ATOMIC); if (!skb) { printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); si->stats.rx_dropped++; return; } /* Align IP header to 20 bytes */ skb_reserve(skb, 1); skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); /* Feed it to IrLAP */ skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); si->stats.rx_packets++; si->stats.rx_bytes += len; dev->last_rx = jiffies; } }
static int pxa250_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa250_irda *si = dev->priv; int speed = irda_get_next_speed(skb); int mtt; __ECHO_IN; /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; pxa250_irda_set_speed(dev, speed); } dev_kfree_skb(skb); return 0; } DBG("stop queue\n"); netif_stop_queue(dev); if(!IS_FIR(si)) { si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); pxa250_sir_transmit(dev); dev_kfree_skb(skb); dev->trans_start = jiffies; return 0; } else /* FIR */ { DBG("Enter FIR transmit\n"); /* * We must not be transmitting... */ if (si->txskb) BUG(); disable_irq(si->fir_irq); netif_stop_queue(dev); DBG("queue stoped\n"); si->txskb = skb; /* we could not just map so we'll need some triks */ /* skb->data may be not DMA capable -Sed- */ if (skb->len > TXBUFF_MAX_SIZE) { printk (KERN_ERR "skb data too large\n"); printk (KERN_ERR "len=%d",skb->len); BUG(); } DBG("gonna copy %d bytes to txbuf\n",skb->len); memcpy (si->txbuf_dma_virt, skb->data , skb->len); /* Actual sending ;must not be receiving !!! */ /* Write data and source address */ DBG("ICSR1 & RNE =%d\n",(ICSR1 & ICSR1_RNE) ? 1 : 0 ); /*Disable receiver and enable transifer */ ICCR0 &= ~ICCR0_RXE; if (ICSR1 & ICSR1_TBY) BUG(); ICCR0 |= ICCR0_TXE; DBG("FICP status %x\n",ICSR0); if (0){ int i; DBG("sending packet\n"); for (i=0;i<skb->len;i++) (i % 64) ? printk ("%2x ",skb->data[i]) : printk ("%2x \n",skb->data[i]) ; DBG(" done\n"); } /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ mtt = irda_get_mtt(skb); if(mtt) udelay(mtt); DCSR(si->txdma_ch)=0; DCSR(si->txdma_ch)=DCSR_NODESC; DSADR(si->txdma_ch) = si->txbuf_dma; /* phisic address */ DTADR(si->txdma_ch) = __PREG(ICDR); DCMD(si->txdma_ch) = DCMD_ENDIRQEN| DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_BURST8 | DCMD_WIDTH1 | skb->len; DCSR(si->txdma_ch) = DCSR_ENDINTR | DCSR_BUSERR; DCSR(si->txdma_ch) = DCSR_RUN | DCSR_NODESC ; DBG("FICP status %x\n",ICSR0); return 0; } }
static int audio_ioctl( struct inode *inode, struct file *file, uint cmd, ulong arg) { audio_state_t *state = file->private_data; audio_stream_t *os = state->output_stream; audio_stream_t *is = state->input_stream; long val; switch (cmd) { case OSS_GETVERSION: return put_user(SOUND_VERSION, (int *)arg); case SNDCTL_DSP_GETBLKSIZE: if (file->f_mode & FMODE_WRITE) return put_user(os->fragsize, (int *)arg); else return put_user(is->fragsize, (int *)arg); case SNDCTL_DSP_GETCAPS: val = DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP; if (is && os) val |= DSP_CAP_DUPLEX; return put_user(val, (int *)arg); case SNDCTL_DSP_SETFRAGMENT: if (get_user(val, (long *) arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { int ret = audio_set_fragments(is, val); if (ret < 0) return ret; ret = put_user(ret, (int *)arg); if (ret) return ret; } if (file->f_mode & FMODE_WRITE) { int ret = audio_set_fragments(os, val); if (ret < 0) return ret; ret = put_user(ret, (int *)arg); if (ret) return ret; } return 0; case SNDCTL_DSP_SYNC: return audio_sync(file); case SNDCTL_DSP_SETDUPLEX: return 0; case SNDCTL_DSP_POST: return 0; case SNDCTL_DSP_GETTRIGGER: val = 0; if (file->f_mode & FMODE_READ && DCSR(is->dma_ch) & DCSR_RUN) val |= PCM_ENABLE_INPUT; if (file->f_mode & FMODE_WRITE && DCSR(os->dma_ch) & DCSR_RUN) val |= PCM_ENABLE_OUTPUT; return put_user(val, (int *)arg); case SNDCTL_DSP_SETTRIGGER: if (get_user(val, (int *)arg)) return -EFAULT; if (file->f_mode & FMODE_READ) { if (val & PCM_ENABLE_INPUT) { if (!is->buffers && audio_setup_buf(is)) return -ENOMEM; if (!(DCSR(is->dma_ch) & DCSR_RUN)) { audio_buf_t *b = &is->buffers[is->dma_frag]; DDADR(is->dma_ch) = b->dma_desc->ddadr; DCSR(is->dma_ch) = DCSR_RUN; } } else { DCSR(is->dma_ch) = 0; } } if (file->f_mode & FMODE_WRITE) { if (val & PCM_ENABLE_OUTPUT) { if (!os->buffers && audio_setup_buf(os)) return -ENOMEM; if (!(DCSR(os->dma_ch) & DCSR_RUN)) { audio_buf_t *b = &os->buffers[os->dma_frag]; DDADR(os->dma_ch) = b->dma_desc->ddadr; DCSR(os->dma_ch) = DCSR_RUN; } } else { DCSR(os->dma_ch) = 0; } } return 0; case SNDCTL_DSP_GETOSPACE: case SNDCTL_DSP_GETISPACE: { audio_buf_info inf = { 0, }; audio_stream_t *s = (cmd == SNDCTL_DSP_GETOSPACE) ? os : is; if ((s == is && !(file->f_mode & FMODE_READ)) || (s == os && !(file->f_mode & FMODE_WRITE))) return -EINVAL; if (!s->buffers && audio_setup_buf(s)) return -ENOMEM; inf.bytes = atomic_read(&s->sem.count) * s->fragsize; inf.bytes -= s->buffers[s->usr_frag].offset; inf.fragments = inf.bytes / s->fragsize; inf.fragsize = s->fragsize; inf.fragstotal = s->nbfrags; return copy_to_user((void *)arg, &inf, sizeof(inf)); } case SNDCTL_DSP_GETOPTR: case SNDCTL_DSP_GETIPTR: { count_info inf = { 0, }; audio_stream_t *s = (cmd == SNDCTL_DSP_GETOPTR) ? os : is; dma_addr_t ptr; int bytecount, offset, flags; if ((s == is && !(file->f_mode & FMODE_READ)) || (s == os && !(file->f_mode & FMODE_WRITE))) return -EINVAL; if (DCSR(s->dma_ch) & DCSR_RUN) { audio_buf_t *b; save_flags_cli(flags); ptr = (s->output) ? DSADR(s->dma_ch) : DTADR(s->dma_ch); b = &s->buffers[s->dma_frag]; offset = ptr - b->dma_desc->dsadr; if (offset >= s->fragsize) offset = s->fragsize - 4; } else { save_flags(flags); offset = 0; } inf.ptr = s->dma_frag * s->fragsize + offset; bytecount = s->bytecount + offset; s->bytecount = -offset; inf.blocks = s->fragcount; s->fragcount = 0; restore_flags(flags); if (bytecount < 0) bytecount = 0; inf.bytes = bytecount; return copy_to_user((void *)arg, &inf, sizeof(inf)); } case SNDCTL_DSP_NONBLOCK: file->f_flags |= O_NONBLOCK; return 0; case SNDCTL_DSP_RESET: if (file->f_mode & FMODE_WRITE) audio_clear_buf(os); if (file->f_mode & FMODE_READ) audio_clear_buf(is); return 0; default: return state->client_ioctl(inode, file, cmd, arg); } return 0; }
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) { unsigned int len, stat, data; len = DTADR(si->rxdma) - si->dma_rx_buff_phy; do { stat = ICSR1; rmb(); data = ICDR; if (stat & (ICSR1_CRE | ICSR1_ROR)) { dev->stats.rx_errors++; if (stat & ICSR1_CRE) { printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); dev->stats.rx_crc_errors++; } if (stat & ICSR1_ROR) { printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); dev->stats.rx_over_errors++; } } else { si->dma_rx_buff[len++] = data; } if (stat & ICSR1_EOF) break; } while (ICSR0 & ICSR0_EIF); if (stat & ICSR1_EOF) { struct sk_buff *skb; if (icsr0 & ICSR0_FRE) { printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); dev->stats.rx_dropped++; return; } skb = alloc_skb(len+1,GFP_ATOMIC); if (!skb) { printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); dev->stats.rx_dropped++; return; } skb_reserve(skb, 1); skb_copy_to_linear_data(skb, si->dma_rx_buff, len); skb_put(skb, len); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } }