/*************************************************************************************** * * Clear any pending transfers * **************************************************************************************/ void omap_clear_sound_dma(audio_stream_t * s) { FN_IN; omap_clear_dma(s->lch[s->dma_q_head]); FN_OUT(0); return; }
/* * Clear any pending transfers */ void omap_clear_alsa_sound_dma(struct audio_stream *s) { FN_IN; omap_clear_dma(s->lch[s->dma_q_head]); FN_OUT(0); return; }
void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { IOLog("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } if (cpu_class_is_omap1()) { /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); } if (cpu_class_is_omap2()) { u32 val; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupts */ val = dma_read(IRQENABLE_L0); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); dma_write(1 << lch, IRQSTATUS_L0); /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); omap_clear_dma(lch); } spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); }
/* *Static function, perform AES encryption/decryption using the DMA for data *transfer. * *inputs: src : pointer of the input data to process * nb_blocks : number of block to process * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA) * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA) *output: dest : pointer of the output data (can be eq to src) */ static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, u32 ctrl, bool is_kernel) { /* *Note: The DMA only sees physical addresses ! */ int dma_ch0; int dma_ch1; struct omap_dma_channel_params ch0_parameters; struct omap_dma_channel_params ch1_parameters; u32 length = nb_blocks * AES_BLOCK_SIZE; u32 length_loop = 0; u32 nb_blocksLoop = 0; struct tf_device *dev = tf_get_device(); dprintk(KERN_INFO "%s: In=0x%08x, Out=0x%08x, Len=%u\n", __func__, (unsigned int)src, (unsigned int)dest, (unsigned int)length); /*lock the DMA */ while (!mutex_trylock(&dev->sm.dma_mutex)) cpu_relax(); if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { mutex_unlock(&dev->sm.dma_mutex); return false; } if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { omap_free_dma(dma_ch0); mutex_unlock(&dev->sm.dma_mutex); return false; } while (length > 0) { /* * At this time, we are sure that the DMAchannels *are available and not used by other public crypto operation */ /*DMA used for Input and Output */ OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); /*check length */ if (length <= dev->dma_buffer_length) length_loop = length; else length_loop = dev->dma_buffer_length; /*The length is always a multiple of the block size */ nb_blocksLoop = length_loop / AES_BLOCK_SIZE; /* * Copy the data from the user input buffer into a preallocated * buffer which has correct properties from efficient DMA * transfers. */ if (!is_kernel) { if (copy_from_user( dev->dma_buffer, src, length_loop)) { omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); return false; } } else { memcpy(dev->dma_buffer, src, length_loop); } /*DMA1: Mem -> AES */ tf_dma_set_channel_common_params(&ch0_parameters, nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, AES1_REGS_HW_ADDR + 0x60, (u32)dev->dma_buffer_phys, OMAP44XX_DMA_AES1_P_DATA_IN_REQ); ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC; ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT; ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC; dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__); omap_set_dma_params(dma_ch0, &ch0_parameters); omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(dma_ch0, 1); /*DMA2: AES -> Mem */ tf_dma_set_channel_common_params(&ch1_parameters, nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, (u32)dev->dma_buffer_phys, AES1_REGS_HW_ADDR + 0x60, OMAP44XX_DMA_AES1_P_DATA_OUT_REQ); ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT; ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC; ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__); omap_set_dma_params(dma_ch1, &ch1_parameters); omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(dma_ch1, 1); wmb(); dprintk(KERN_INFO "%s: Start DMA channel %d\n", __func__, (unsigned int)dma_ch1); tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO "%s: Start DMA channel %d\n", __func__, (unsigned int)dma_ch0); tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO "%s: Waiting for IRQ\n", __func__); tf_dma_wait(2); /*Unset DMA synchronisation requests */ OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT) & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT)); omap_clear_dma(dma_ch0); omap_clear_dma(dma_ch1); /* *The dma transfer is complete */ pr_info("%s completing\n", __func__); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION tf_aes_fault_injection(ctrl, dev->dma_buffer); #endif /*The DMA output is in the preallocated aligned buffer *and needs to be copied to the output buffer.*/ if (!is_kernel) { if (copy_to_user( dest, dev->dma_buffer, length_loop)) { omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); return false; } } else { memcpy(dest, dev->dma_buffer, length_loop); } src += length_loop; dest += length_loop; length -= length_loop; } /*For safety reasons, let's clean the working buffer */ memset(dev->dma_buffer, 0, length_loop); /*release the DMA */ omap_free_dma(dma_ch0); omap_free_dma(dma_ch1); mutex_unlock(&dev->sm.dma_mutex); dprintk(KERN_INFO "%s: Success\n", __func__); return true; }
int omap_request_dma(int dev_id, const char *dev_name, void (*callback)(int lch, u16 ch_status, void *data), void *data, int *dma_ch_out) { int ch, free_ch = -1; unsigned long flags; struct omap_dma_lch *chan; spin_lock_irqsave(&dma_chan_lock, flags); for (ch = 0; ch < dma_chan_count; ch++) { if (free_ch == -1 && dma_chan[ch].dev_id == -1) { free_ch = ch; if (dev_id == 0) break; } } if (free_ch == -1) { spin_unlock_irqrestore(&dma_chan_lock, flags); return -EBUSY; } chan = dma_chan + free_ch; chan->dev_id = dev_id; if (cpu_class_is_omap1()) clear_lch_regs(free_ch); if (cpu_class_is_omap2()) omap_clear_dma(free_ch); spin_unlock_irqrestore(&dma_chan_lock, flags); chan->dev_name = dev_name; chan->callback = callback; chan->data = data; chan->flags = 0; #ifndef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap2()) { chan->chain_id = -1; chan->next_linked_ch = -1; } #endif chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ; if (cpu_class_is_omap1()) chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ; else if (cpu_class_is_omap2()) chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ; if (cpu_is_omap16xx()) { /* If the sync device is set, configure it dynamically. */ if (dev_id != 0) { set_gdma_dev(free_ch + 1, dev_id); dev_id = free_ch + 1; } /* * Disable the 1510 compatibility mode and set the sync device * id. */ dma_write(dev_id | (1 << 10), CCR(free_ch)); } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { dma_write(dev_id, CCR(free_ch)); } if (cpu_class_is_omap2()) { omap2_enable_irq_lch(free_ch); omap_enable_channel_irq(free_ch); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch)); dma_write(1 << free_ch, IRQSTATUS_L0); } *dma_ch_out = free_ch; return 0; }