static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: s3c_dma_enqueue(substream); case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); break; case SNDRV_PCM_TRIGGER_SUSPEND: if (prtd->dma_loaded) prtd->dma_loaded--; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; }
int s3c_audio_suspend(audio_state_t *s, u32 state, u32 level) { if (level == SUSPEND_DISABLE || level == SUSPEND_POWER_DOWN) { audio_stream_t *is = s->input_stream; audio_stream_t *os = s->output_stream; int stopstate; if (is ) { stopstate = is->stopped; audio_stop_dma(is); s3c2410_dma_ctrl(is->dma, S3C2410_DMAOP_FLUSH); is->stopped = stopstate; } if (os ) { stopstate = os->stopped; audio_stop_dma(os); s3c2410_dma_ctrl(os->dma, S3C2410_DMAOP_FLUSH); os->stopped = stopstate; } if (AUDIO_ACTIVE(s) && s->hw_shutdown) s->hw_shutdown(s->data); } return 0; }
static int s3c24xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; int ret = 0; s3cdbg("Entered %s\n", __FUNCTION__); spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); #if !defined (CONFIG_CPU_S3C6400) && !defined (CONFIG_CPU_S3C6410) s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STARTED); #endif break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; }
static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan) { struct s3c2410_dma_buf *buf, *next; unsigned long flags; pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number); dbg_showchan(chan); local_irq_save(flags); if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: stopping channel...\n", __func__ ); s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP); } buf = chan->curr; if (buf == NULL) buf = chan->next; chan->curr = chan->next = chan->end = NULL; if (buf != NULL) { for ( ; buf != NULL; buf = next) { next = buf->next; pr_debug("%s: free buffer %p, next %p\n", __func__, buf, buf->next); s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT); s3c2410_dma_freebuf(buf); } } dbg_showregs(chan); s3c2410_dma_waitforstop(chan); dbg_showregs(chan); local_irq_restore(flags); return 0; }
static int s3c24xx_pcm_prepare(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; int ret = 0; DBG("Entered %s\n", __func__); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->params) return 0; /* channel needs configuring for mem=>device, increment memory addr, * sync to pclk, half-word transfers to the IIS-FIFO. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { s3c2410_dma_devconfig(prtd->params->channel, S3C2410_DMASRC_MEM, S3C2410_DISRCC_INC | S3C2410_DISRCC_APB, prtd->params->dma_addr); s3c2410_dma_config(prtd->params->channel, prtd->params->dma_size, S3C2410_DCON_SYNC_PCLK | S3C2410_DCON_HANDSHAKE); } else { s3c2410_dma_config(prtd->params->channel, prtd->params->dma_size, S3C2410_DCON_HANDSHAKE | S3C2410_DCON_SYNC_PCLK); s3c2410_dma_devconfig(prtd->params->channel, S3C2410_DMASRC_HW, 0x3, prtd->params->dma_addr); } /* flush the DMA channel */ s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); prtd->dma_loaded = 0; prtd->dma_pos = prtd->dma_start; /* enqueue dma buffers */ s3c24xx_pcm_enqueue(substream); return ret; }
static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct s3c_dma_params *dma_data = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); pr_debug("Entered %s\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!s3c24xx_snd_is_clkmaster()) { ret = s3c24xx_snd_lrsync(); if (ret) goto exit_err; } if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) s3c24xx_snd_rxctrl(1); else s3c24xx_snd_txctrl(1); s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) s3c24xx_snd_rxctrl(0); else s3c24xx_snd_txctrl(0); break; default: ret = -EINVAL; break; } exit_err: return ret; }
/* s3c2410_dma_free * * release the given channel back to the system, will stop and flush * any outstanding transfers, and ensure the channel is ready for the * next claimant. * * Note, although a warning is currently printed if the freeing client * info is not the same as the registrant's client info, the free is still * allowed to go through. */ int s3c2410_dma_free(dmach_t channel, struct s3c2410_dma_client *client) { unsigned long flags; struct s3c2410_dma_chan *chan = lookup_dma_channel(channel); pr_debug("%s: DMA channel %d will be stopped\n", __FUNCTION__, chan->number); if (chan == NULL) return -EINVAL; local_irq_save(flags); if (chan->client != client) { printk(KERN_WARNING "DMA CH %d: possible free from different client (channel %p, passed %p)\n", channel, chan->client, client); } /* sort out stopping and freeing the channel */ if (chan->state != S3C_DMA_IDLE) { pr_debug("%s: need to stop dma channel %p\n", __FUNCTION__, chan); /* possibly flush the channel */ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP); } chan->client = NULL; chan->in_use = 0; chan->dma_con->in_use--; if (chan->irq_claimed) free_irq(chan->irq, (void *)chan->dma_con); chan->irq_claimed = 0; if (!(channel & DMACH_LOW_LEVEL)) dma_chan_map[channel] = NULL; local_irq_restore(flags); return 0; }
static int s3c_dma_prepare(struct snd_pcm_substream *substream) { struct s3c24xx_runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->params) return 0; /* channel needs configuring for mem=>device, increment memory addr, * sync to pclk, half-word transfers to the IIS-FIFO. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { s3c2410_dma_devconfig(prtd->params->channel, S3C2410_DMASRC_MEM, prtd->params->dma_addr); } else { s3c2410_dma_devconfig(prtd->params->channel, S3C2410_DMASRC_HW, prtd->params->dma_addr); } s3c2410_dma_config(prtd->params->channel, prtd->params->dma_size); /* To fix the pcm buffer underrun in case of high busy state ( ex. Starting broswer ) 2010.06.22*/ if(substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ring_buf_index = 0; period_index = 0; } /* flush the DMA channel */ s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); prtd->dma_loaded = 0; prtd->dma_pos = prtd->dma_start; /* enqueue dma buffers */ s3c_dma_enqueue(substream); return ret; }
static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan) { s3c2410_dma_buf_t *buf, *next; unsigned long flags; pr_debug("%s:\n", __FUNCTION__); local_irq_save(flags); if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: stopping channel...\n", __FUNCTION__ ); s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP); } buf = chan->curr; if (buf == NULL) buf = chan->next; chan->curr = chan->next = chan->end = NULL; if (buf != NULL) { for ( ; buf != NULL; buf = next) { next = buf->next; pr_debug("%s: free buffer %p, next %p\n", __FUNCTION__, buf, buf->next); s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT); s3c2410_dma_freebuf(buf); } } local_irq_restore(flags); return 0; }
int s3c_mem_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { unsigned long *virt_addr; struct mm_struct *mm = current->mm; struct s3c_mem_alloc param; struct s3c_mem_dma_param dma_param; switch (cmd) { case S3C_MEM_ALLOC: mutex_lock(&mem_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = MEM_ALLOC; param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_alloc_lock); break; case S3C_MEM_CACHEABLE_ALLOC: mutex_lock(&mem_cacheable_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_CACHEABLE; param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_cacheable_alloc_lock); break; case S3C_MEM_SHARE_ALLOC: mutex_lock(&mem_share_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_SHARE_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_share_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_share_alloc_lock); break; case S3C_MEM_CACHEABLE_SHARE_ALLOC: mutex_lock(&mem_cacheable_share_alloc_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } flag = MEM_ALLOC_CACHEABLE_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__); if(param.vir_addr == -EINVAL) { printk("S3C_MEM_SHARE_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_cacheable_share_alloc_lock); break; case S3C_MEM_FREE: mutex_lock(&mem_free_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed !!\n"); mutex_unlock(&mem_free_lock); return -EINVAL; } virt_addr = (unsigned long *)phys_to_virt(param.phy_addr); kfree(virt_addr); param.size = 0; DEBUG("do_munmap() succeed !!\n"); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } mutex_unlock(&mem_free_lock); break; case S3C_MEM_SHARE_FREE: mutex_lock(&mem_share_free_lock); if(copy_from_user(¶m, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed - MEM_SHARE_FREE!!\n"); mutex_unlock(&mem_share_free_lock); return -EINVAL; } param.vir_addr = 0; DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n"); if(copy_to_user((struct s3c_mem_alloc *)arg, ¶m, sizeof(struct s3c_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } mutex_unlock(&mem_share_free_lock); break; #if 0 /* IOCTL for Old PL-080 codes (dma-pl080.c) */ case S3C_MEM_DMA_COPY: if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } //printk("S3C_MEM_DMA_COPY called\n"); if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel.\n"); return -1; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish); //dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL); // printk("MEMCPY src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size); /* Source address */ s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, dma_param.src_addr); s3c2410_dma_config(DMACH_3D_M2M, 8); /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); #if 0 /* Test code with hard coding */ /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x10000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x20000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x30000, 0x4000); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x40000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); //wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x50000, 0x4000); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); #endif /* Test code with hard coding */ s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client); if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } break; case S3C_MEM_DMA_SET: if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel.\n"); return -1; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish); //dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL); // printk("MEMSET src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size); /* Source address */ s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_SET, dma_param.src_addr); s3c2410_dma_config(DMACH_3D_M2M, 8); /* Destination address : Data buffer address */ s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size); s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START); wait_for_completion(&s3c_m2m_dma_complete); s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client); if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) { return -EFAULT; } break; #endif default: DEBUG("s3c_mem_ioctl() : default !!\n"); return -EINVAL; } return 0; }
static inline int s3c_dma_trigger(unsigned ch) { return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START); }
int s3c2410_dma_enqueue(unsigned int channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = lookup_dma_channel(channel); struct s3c2410_dma_buf *buf; unsigned long flags; if (chan == NULL) return -EINVAL; pr_debug("%s: id=%p, data=%08x, size=%d\n", __func__, id, (unsigned int)data, size); buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { pr_debug("%s: out of memory (%ld alloc)\n", __func__, (long)sizeof(*buf)); return -ENOMEM; } //pr_debug("%s: new buffer %p\n", __func__, buf); //dbg_showchan(chan); buf->next = NULL; buf->data = buf->ptr = data; buf->size = size; buf->id = id; buf->magic = BUF_MAGIC; local_irq_save(flags); if (chan->curr == NULL) { /* we've got nothing loaded... */ pr_debug("%s: buffer %p queued onto empty channel\n", __func__, buf); chan->curr = buf; chan->end = buf; chan->next = NULL; } else { pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n", chan->number, __func__, buf); if (chan->end == NULL) pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n", chan->number, __func__, chan); chan->end->next = buf; chan->end = buf; } /* if necessary, update the next buffer field */ if (chan->next == NULL) chan->next = buf; /* check to see if we can load a buffer */ if (chan->state == S3C2410_DMA_RUNNING) { if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { printk(KERN_ERR "dma%d: loadbuffer:" "timeout loading buffer\n", chan->number); dbg_showchan(chan); local_irq_restore(flags); return -EINVAL; } } while (s3c2410_dma_canload(chan) && chan->next != NULL) { s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->state == S3C2410_DMA_IDLE) { if (chan->flags & S3C2410_DMAF_AUTOSTART) { s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_START); } } local_irq_restore(flags); return 0; }
static void finalize_request(struct s3cmci_host *host) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = host->cmd_is_stop?mrq->stop:mrq->cmd; int debug_as_failure = 0; if (host->complete_what != COMPLETION_FINALIZE) return; if (!mrq) return; // if (cmd->data && (cmd->error == MMC_ERR_NONE) && // (cmd->data->error == MMC_ERR_NONE)) { if (cmd->data && (cmd->error == 0) && (cmd->data->error == 0)) { if (host->dodma && (!host->dma_complete)) { dbg(host, dbg_dma, "DMA Missing!\n"); return; } } // Read response cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); // reset clock speed, as it could still be set low for writel(host->prescaler, host->base + S3C2410_SDIPRE); if (cmd->error) debug_as_failure = 1; if (cmd->data && cmd->data->error) debug_as_failure = 1; //if(cmd->flags & MMC_RSP_MAYFAIL) debug_as_failure = 0; #ifdef CONFIG_MMC_DEBUG dbg_dumpcmd(host, cmd, debug_as_failure); #endif //Cleanup controller writel(0, host->base + S3C2410_SDICMDARG); writel(0, host->base + S3C2410_SDIDCON); writel(0, host->base + S3C2410_SDICMDCON); writel(0, host->base + host->sdiimsk); if (cmd->data && cmd->error) cmd->data->error = cmd->error; if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { host->cmd_is_stop = 1; s3cmci_send_request(host->mmc); return; } // If we have no data transfer we are finished here if (!mrq->data) goto request_done; // Calulate the amout of bytes transfer, but only if there was // no error // if (mrq->data->error == MMC_ERR_NONE) { if (mrq->data->error == 0) { mrq->data->bytes_xfered = (mrq->data->blocks * mrq->data->blksz); } else { mrq->data->bytes_xfered = 0; } // If we had an error while transfering data we flush the // DMA channel and the fifo to clear out any garbage // if (mrq->data->error != MMC_ERR_NONE) { if (mrq->data->error != 0) { if (host->dodma) s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); if (host->is2440) { //Clear failure register and reset fifo writel(S3C2440_SDIFSTA_FIFORESET | S3C2440_SDIFSTA_FIFOFAIL, host->base + S3C2410_SDIFSTA); } else { u32 mci_con; //reset fifo mci_con = readl(host->base + S3C2410_SDICON); mci_con|= S3C2410_SDICON_FIFORESET; writel(mci_con, host->base + S3C2410_SDICON); } } request_done: host->complete_what = COMPLETION_NONE; host->mrq = NULL; mmc_request_done(host->mmc, mrq); }
/* s3c2410_dma_enqueue * * queue an given buffer for dma transfer. * * id the device driver's id information for this buffer * data the physical address of the buffer data * size the size of the buffer in bytes * * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART * is checked, and if set, the channel is started. If this flag isn't set, * then an error will be returned. * * It is possible to queue more than one DMA buffer onto a channel at * once, and the code will deal with the re-loading of the next buffer * when necessary. */ int s3c2410_dma_enqueue(unsigned int channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = lookup_dma_channel(channel); struct s3c_dma_buf *buf; unsigned long flags; pr_debug("%s: id=%p, data=%08x, size=%d\n", __FUNCTION__, id, (unsigned int) data, size); buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { printk(KERN_ERR "dma <%d> no memory for buffer\n", channel); return -ENOMEM; } pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); buf->next = NULL; buf->data = buf->ptr = data; buf->size = size; buf->id = id; buf->magic = BUF_MAGIC; local_irq_save(flags); buf->mcptr_cpu = dma_alloc_coherent(NULL, SIZE_OF_MICRO_CODES, &buf->mcptr, GFP_ATOMIC); if (buf->mcptr_cpu == NULL) { printk(KERN_ERR "%s: failed to allocate memory for micro codes\n", __FUNCTION__); return -ENOMEM; } if (chan->curr == NULL) { /* we've got nothing loaded... */ pr_debug("%s: buffer %p queued onto empty channel\n", __FUNCTION__, buf); chan->curr = buf; chan->end = buf; chan->next = NULL; } else { pr_debug("dma CH %d: %s: buffer %p queued onto non-empty channel\n", chan->number, __FUNCTION__, buf); if (chan->end == NULL) /* In case of flushing */ pr_debug("dma CH %d: %s: %p not empty, and chan->end==NULL?\n", chan->number, __FUNCTION__, chan); else { chan->end->next = buf; chan->end = buf; } } /* if necessary, update the next buffer field */ if (chan->next == NULL) chan->next = buf; /* check to see if we can load a buffer */ if (chan->state == S3C_DMA_RUNNING) { if (chan->load_state == S3C_DMALOAD_1LOADED && 1) { if (s3c_dma_waitforload(chan, __LINE__) == 0) { printk(KERN_ERR "dma CH %d: loadbuffer:" "timeout loading buffer\n", chan->number); dbg_showchan(chan); local_irq_restore(flags); return -EINVAL; } } } else if (chan->state == S3C_DMA_IDLE) { if (chan->flags & S3C2410_DMAF_AUTOSTART) { s3c2410_dma_ctrl(channel, S3C2410_DMAOP_START); } else { pr_debug("loading onto stopped channel\n"); } } local_irq_restore(flags); return 0; }
static inline int s3c_dma_started(unsigned ch) { return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED); }
static irqreturn_t s3c_dma_irq(int irq, void *devpw) { unsigned int channel = 0, dcon_num, i; unsigned long tmp; s3c_dma_controller_t *dma_controller = (s3c_dma_controller_t *) devpw; struct s3c2410_dma_chan *chan=NULL; struct s3c_dma_buf *buf; dcon_num = dma_controller->number; tmp = dma_rdreg(dma_controller, S3C_DMAC_INTSTATUS); pr_debug("# s3c_dma_irq: IRQ status : 0x%x\n", tmp); if (tmp == 0) return IRQ_HANDLED; for (i = 0; i < S3C_CHANNELS_PER_DMA; i++) { if (tmp & 0x01) { pr_debug("# DMAC %d: requestor %d\n", dcon_num, i); channel = i; chan = &s3c_dma_chans[channel + dcon_num * S3C_CHANNELS_PER_DMA]; pr_debug("# DMA CH:%d, index:%d load_state:%d\n", chan->number, chan->index, chan->load_state); buf = chan->curr; dbg_showchan(chan); /* modify the channel state */ switch (chan->load_state) { case S3C_DMALOAD_1RUNNING: /* TODO - if we are running only one buffer, we probably * want to reload here, and then worry about the buffer * callback */ chan->load_state = S3C_DMALOAD_NONE; break; case S3C_DMALOAD_1LOADED: /* iirc, we should go back to NONE loaded here, we * had a buffer, and it was never verified as being * loaded. */ chan->load_state = S3C_DMALOAD_NONE; break; case S3C_DMALOAD_1LOADED_1RUNNING: /* we'll worry about checking to see if another buffer is * ready after we've called back the owner. This should * ensure we do not wait around too long for the DMA * engine to start the next transfer */ chan->load_state = S3C_DMALOAD_1LOADED; break; case S3C_DMALOAD_NONE: printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n", chan->number); break; default: printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n", chan->number, chan->load_state); break; } if (buf != NULL) { /* update the chain to make sure that if we load any more * buffers when we call the callback function, things should * work properly */ chan->curr = buf->next; buf->next = NULL; if (buf->magic != BUF_MAGIC) { printk(KERN_ERR "dma CH %d: %s: buf %p incorrect magic\n", chan->number, __FUNCTION__, buf); goto next_channel; } s3c_dma_buffdone(chan, buf, S3C2410_RES_OK); /* free resouces */ s3c_dma_freebuf(buf); } else { } /* only reload if the channel is still running... our buffer done * routine may have altered the state by requesting the dma channel * to stop or shutdown... */ if (chan->next != NULL && chan->state != S3C_DMA_IDLE) { unsigned long flags; switch (chan->load_state) { case S3C_DMALOAD_1RUNNING: /* don't need to do anything for this state */ break; case S3C_DMALOAD_NONE: /* can load buffer immediately */ break; case S3C_DMALOAD_1LOADED: if (s3c_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma CH %d: timeout waiting for load\n", chan->number); goto next_channel; } break; case S3C_DMALOAD_1LOADED_1RUNNING: goto next_channel; default: printk(KERN_ERR "dma CH %d: unknown load_state in irq, %d\n", chan->number, chan->load_state); goto next_channel; } local_irq_save(flags); s3c_dma_loadbuffer(chan, chan->next); start_DMA_channel(dma_regaddr(chan->dma_con, S3C_DMAC_DBGSTATUS), chan->number, chan->curr->mcptr, PL330_NON_SECURE_DMA); local_irq_restore(flags); } else { s3c_dma_lastxfer(chan); /* see if we can stop this channel.. */ if (chan->load_state == S3C_DMALOAD_NONE) { pr_debug("# DMA CH %d - (index:%d): end of transfer, stopping channel (%ld)\n", chan->number, chan->index, jiffies); s3c2410_dma_ctrl(chan->index | DMACH_LOW_LEVEL, S3C2410_DMAOP_STOP); } } s3c_clear_interrupts(chan->dma_con->number, chan->number); } next_channel: tmp >>= 1; } return IRQ_HANDLED; }
static inline int s3c_dma_flush(unsigned ch) { return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH); }
static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai); int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); unsigned long irqs; int ret = 0; struct s3c_dma_params *dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); pr_debug("Entered %s\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* On start, ensure that the FIFOs are cleared and reset. */ writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); /* clear again, just in case */ writel(0x0, i2s->regs + S3C2412_IISFIC); case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!i2s->master) { ret = s3c2412_snd_lrsync(i2s); if (ret) goto exit_err; } local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 1); else s3c2412_snd_txctrl(i2s, 1); local_irq_restore(irqs); /* * Load the next buffer to DMA to meet the reqirement * of the auto reload mechanism of S3C24XX. * This call won't bother S3C64XX. */ s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 0); else s3c2412_snd_txctrl(i2s, 0); local_irq_restore(irqs); break; default: ret = -EINVAL; break; } exit_err: return ret; }
static inline int s3c_dma_stop(unsigned ch) { return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP); }
static irqreturn_t s3c2410_dma_irq(int irq, void *devpw) { struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw; struct s3c2410_dma_buf *buf; buf = chan->curr; dbg_showchan(chan); /* modify the channel state */ switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* TODO - if we are running only one buffer, we probably * want to reload here, and then worry about the buffer * callback */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED: /* iirc, we should go back to NONE loaded here, we * had a buffer, and it was never verified as being * loaded. */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED_1RUNNING: /* we'll worry about checking to see if another buffer is * ready after we've called back the owner. This should * ensure we do not wait around too long for the DMA * engine to start the next transfer */ chan->load_state = S3C2410_DMALOAD_1LOADED; break; case S3C2410_DMALOAD_NONE: printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n", chan->number); break; default: printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n", chan->number, chan->load_state); break; } if (buf != NULL) { /* update the chain to make sure that if we load any more * buffers when we call the callback function, things should * work properly */ chan->curr = buf->next; buf->next = NULL; if (buf->magic != BUF_MAGIC) { printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n", chan->number, __func__, buf); return IRQ_HANDLED; } s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK); /* free resouces */ s3c2410_dma_freebuf(buf); } else { } /* only reload if the channel is still running... our buffer done * routine may have altered the state by requesting the dma channel * to stop or shutdown... */ /* todo: check that when the channel is shut-down from inside this * function, we cope with unsetting reload, etc */ if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) { unsigned long flags; switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* don't need to do anything for this state */ break; case S3C2410_DMALOAD_NONE: /* can load buffer immediately */ break; case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", chan->number, __func__); return IRQ_HANDLED; } break; case S3C2410_DMALOAD_1LOADED_1RUNNING: goto no_load; default: printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n", chan->number, chan->load_state); return IRQ_HANDLED; } local_irq_save(flags); s3c2410_dma_loadbuffer(chan, chan->next); local_irq_restore(flags); } else { s3c2410_dma_lastxfer(chan); /* see if we can stop this channel.. */ if (chan->load_state == S3C2410_DMALOAD_NONE) { pr_debug("dma%d: end of transfer, stopping channel (%ld)\n", chan->number, jiffies); s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_STOP); } } no_load: return IRQ_HANDLED; }
static void finalize_request(struct s3cmci_host *host) { struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd; int debug_as_failure = 0; if (host->complete_what != COMPLETION_FINALIZE) return; if (!mrq) return; if (cmd->data && (cmd->error == 0) && (cmd->data->error == 0)) { if (host->dodma && (!host->dma_complete)) { dbg(host, dbg_dma, "DMA Missing!\n"); return; } } /* Read response from controller. */ cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0); cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1); cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2); cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3); writel(host->prescaler, host->base + S3C2410_SDIPRE); if (cmd->error) debug_as_failure = 1; if (cmd->data && cmd->data->error) debug_as_failure = 1; dbg_dumpcmd(host, cmd, debug_as_failure); /* Cleanup controller */ writel(0, host->base + S3C2410_SDICMDARG); writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); writel(0, host->base + S3C2410_SDICMDCON); writel(0, host->base + host->sdiimsk); if (cmd->data && cmd->error) cmd->data->error = cmd->error; if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) { host->cmd_is_stop = 1; s3cmci_send_request(host->mmc); return; } /* If we have no data transfer we are finished here */ if (!mrq->data) goto request_done; /* Calulate the amout of bytes transfer if there was no error */ if (mrq->data->error == 0) { mrq->data->bytes_xfered = (mrq->data->blocks * mrq->data->blksz); } else { mrq->data->bytes_xfered = 0; } /* If we had an error while transfering data we flush the * DMA channel and the fifo to clear out any garbage. */ if (mrq->data->error != 0) { if (host->dodma) s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); if (host->is2440) { /* Clear failure register and reset fifo. */ writel(S3C2440_SDIFSTA_FIFORESET | S3C2440_SDIFSTA_FIFOFAIL, host->base + S3C2410_SDIFSTA); } else { u32 mci_con; /* reset fifo */ mci_con = readl(host->base + S3C2410_SDICON); mci_con |= S3C2410_SDICON_FIFORESET; writel(mci_con, host->base + S3C2410_SDICON); } } request_done: host->complete_what = COMPLETION_NONE; host->mrq = NULL; mmc_request_done(host->mmc, mrq); }