/* Note that src_port is only for omap1 */ void omap_set_dma_src_params(int lch, int src_port, int src_amode, unsigned long src_start, int src_ei, int src_fi) { u32 l; if (cpu_class_is_omap1()) { u16 w; w = dma_read(CSDP(lch)); w &= ~(0x1f << 2); w |= src_port << 2; dma_write(w, CSDP(lch)); } l = dma_read(CCR(lch)); l &= ~(0x03 << 12); l |= src_amode << 12; dma_write(l, CCR(lch)); if (cpu_class_is_omap1()) { dma_write(src_start >> 16, CSSA_U(lch)); dma_write((u16)src_start, CSSA_L(lch)); } if (cpu_class_is_omap2()) dma_write(src_start, CSSA(lch)); dma_write(src_ei, CSEI(lch)); dma_write(src_fi, CSFI(lch)); }
/* * Once the DMA queue is stopped, we can destroy it. */ void omap_dma_unlink_lch(int lch_head, int lch_queue) { if (omap_dma_in_1510_mode()) { if (lch_head == lch_queue) { dma_write(dma_read(CCR(lch_head)) & ~(3 << 8), CCR(lch_head)); return; } printk("DMA linking is not supported in 1510 mode"); BUG(); return; } if (dma_chan[lch_head].next_lch != lch_queue || dma_chan[lch_head].next_lch == -1) { printk("omap_dma: trying to unlink " "non linked channels"); BUG(); } if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) || (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) { printk("omap_dma: You need to stop the DMA channels " "before unlinking"); BUG(); } dma_chan[lch_head].next_lch = -1; }
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) { BUG_ON(omap_dma_in_1510_mode()); if (cpu_class_is_omap1()) { u16 w; w = dma_read(CCR2(lch)); w &= ~0x03; switch (mode) { case OMAP_DMA_CONSTANT_FILL: w |= 0x01; break; case OMAP_DMA_TRANSPARENT_COPY: w |= 0x02; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(w, CCR2(lch)); w = dma_read(LCH_CTRL(lch)); w &= ~0x0f; /* Default is channel type 2D */ if (mode) { dma_write((u16)color, COLOR_L(lch)); dma_write((u16)(color >> 16), COLOR_U(lch)); w |= 1; /* Channel type G */ } dma_write(w, LCH_CTRL(lch)); } if (cpu_class_is_omap2()) { u32 val; val = dma_read(CCR(lch)); val &= ~((1 << 17) | (1 << 16)); switch (mode) { case OMAP_DMA_CONSTANT_FILL: val |= 1 << 16; break; case OMAP_DMA_TRANSPARENT_COPY: val |= 1 << 17; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(val, CCR(lch)); color &= 0xffffff; dma_write(color, COLOR(lch)); } }
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch) { u32 l; l = dma_read(CSDP(lch)); l &= ~0x03; l |= data_type; dma_write(l, CSDP(lch)); if (cpu_class_is_omap1()) { u16 ccr; ccr = dma_read(CCR(lch)); ccr &= ~(1 << 5); if (sync_mode == OMAP_DMA_SYNC_FRAME) ccr |= 1 << 5; dma_write(ccr, CCR(lch)); ccr = dma_read(CCR2(lch)); ccr &= ~(1 << 2); if (sync_mode == OMAP_DMA_SYNC_BLOCK) ccr |= 1 << 2; dma_write(ccr, CCR2(lch)); } if (cpu_class_is_omap2() && dma_trigger) { u32 val; val = dma_read(CCR(lch)); /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ val &= ~((3 << 19) | 0x1f); val |= (dma_trigger & ~0x1f) << 14; val |= dma_trigger & 0x1f; if (sync_mode & OMAP_DMA_SYNC_FRAME) val |= 1 << 5; else val &= ~(1 << 5); if (sync_mode & OMAP_DMA_SYNC_BLOCK) val |= 1 << 18; else val &= ~(1 << 18); if (src_or_dst_synch) val |= 1 << 24; /* source synch */ else val &= ~(1 << 24); /* dest synch */ dma_write(val, CCR(lch)); } dma_write(elem_count, CEN(lch)); dma_write(frame_count, CFN(lch)); }
/** * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain. * * @param chain_id * * @return - Success : 0 * Failure : EINVAL */ int omap_stop_dma_chain_transfers(int chain_id) { int *channels; u32 l, i; u32 sys_cf; /* Check for input params */ if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } /* Check if the chain exists */ if (dma_linked_lch[chain_id].linked_dmach_q == NULL) { IOLog("Chain doesn't exists\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; /* * DMA Errata: * Special programming model needed to disable DMA before end of block */ sys_cf = dma_read(OCP_SYSCONFIG); l = sys_cf; /* Middle mode reg set no Standby */ l &= ~((1 << 12)|(1 << 13)); dma_write(l, OCP_SYSCONFIG); for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { /* Stop the Channel transmission */ l = dma_read(CCR(channels[i])); l &= ~(1 << 7); dma_write(l, CCR(channels[i])); /* Disable the link in all the channels */ disable_lnk(channels[i]); dma_chan[channels[i]].state = DMA_CH_NOTSTARTED; } dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED; /* Reset the Queue pointers */ OMAP_DMA_CHAIN_QINIT(chain_id); /* Errata - put in the old value */ dma_write(sys_cf, OCP_SYSCONFIG); return 0; }
void omap_start_dma(int lch) { u32 l; if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT]; dma_chan_link_map[lch] = 1; /* Set the link register of the first channel */ enable_lnk(lch); memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); cur_lch = dma_chan[lch].next_lch; do { next_lch = dma_chan[cur_lch].next_lch; /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; enable_lnk(cur_lch); omap_enable_channel_irq(cur_lch); cur_lch = next_lch; } while (next_lch != -1); } else if (cpu_is_omap242x() || (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) { /* Errata: Need to write lch even if not using chaining */ dma_write(lch, CLNK_CTRL(lch)); } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); /* * Errata: On ES2.0 BUFFERING disable must be set. * This will always fail on ES1.0 */ if (cpu_is_omap24xx()) l |= OMAP_DMA_CCR_EN; l |= OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; }
static int set_info_func (CameraFilesystem *fs, const char *folder, const char *file, CameraFileInfo info, void *data, GPContext *context) { Camera *camera = data; char tmp[7]; int p; unsigned long image_id; KncCamRes cr; /* Permissions? */ if (info.file.fields & GP_FILE_INFO_PERMISSIONS) { strncpy (tmp, file, 6); tmp[6] = '\0'; image_id = atol (tmp); if (info.file.permissions & GP_FILE_PERM_DELETE) p = FALSE; else p = TRUE; CR (knc_set_prot (camera->pl->c, &cr, image_id, KNC_SOURCE_CARD, p), context); CCR (cr, context); } /* Name? */ if (info.file.fields & GP_FILE_INFO_NAME) { gp_context_error (context, _("Your camera does not support " "changing filenames.")); return (GP_ERROR_NOT_SUPPORTED); } return (GP_OK); }
static int camera_summary (Camera* camera, CameraText* summary, GPContext *context) { KncInfo info; KncCamRes cr; CR (knc_get_info (camera->pl->c, &cr, &info), context); CCR (cr, context); snprintf (summary->text, sizeof (summary->text), _("Model: %s\n" "Serial Number: %s,\n" "Hardware Version: %i.%i\n" "Software Version: %i.%i\n" "Testing Software Version: %i.%i\n" "Name: %s,\n" "Manufacturer: %s\n"), info.model, info.serial_number, info.hardware.major, info.hardware.minor, info.software.major, info.software.minor, info.testing.major, info.testing.minor, info.name, info.manufacturer); return (GP_OK); }
static int set_speed (Camera *camera, int speed, GPContext *context) { GPPortSettings s; KncCamRes cr; KncBitRate br; KncBitFlag bf; int i; int speeds[] = {300, 600, 1200, 2400, 4800, 9600, 19200, 38400, 57600, 115200}; C (gp_port_get_settings (camera->port, &s)); if ((s.serial.speed == speed) || (s.serial.speed == 115200)) return (GP_OK); switch (speed) { case 0: /* Set the highest possible speed */ CR (knc_get_io_pref (camera->pl->c, &cr, &br, &bf), context); CCR (cr, context); for (i = 9; i >= 0; i--) if ((1 << i) & br) break; if (i < 0) return (GP_ERROR_IO_SERIAL_SPEED); speed = speeds[i]; br = 1 << i; break; case 300 : br = 1 << 0; break; case 600 : br = 1 << 1; break; case 1200 : br = 1 << 2; break; case 2400 : br = 1 << 3; break; case 4800 : br = 1 << 4; break; case 9600 : br = 1 << 5; break; case 19200 : br = 1 << 6; break; case 38400 : br = 1 << 7; break; case 57600 : br = 1 << 8; break; case 115200: br = 1 << 9; break; default: return (GP_ERROR_IO_SERIAL_SPEED); } /* Request the new speed */ bf = KNC_BIT_FLAG_8_BITS; CR (knc_set_io_pref (camera->pl->c, &cr, &br, &bf), context); CCR (cr, context); s.serial.speed = speed; C (gp_port_set_settings (camera->port, s)); return (GP_OK); }
void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { IOLog("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } if (cpu_class_is_omap1()) { /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); } if (cpu_class_is_omap2()) { u32 val; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupts */ val = dma_read(IRQENABLE_L0); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); dma_write(1 << lch, IRQSTATUS_L0); /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); omap_clear_dma(lch); } spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); }
/** * @brief omap_start_dma_chain_transfers - Start the chain * * @param chain_id * * @return - Success : 0 * Failure : -EINVAL/-EBUSY */ int omap_start_dma_chain_transfers(int chain_id) { int *channels; u32 l, i; if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } channels = dma_linked_lch[chain_id].linked_dmach_q; if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) { IOLog("Chain is already started\n"); return -EBUSY; } if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) { for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) { enable_lnk(channels[i]); omap_enable_channel_irq(channels[i]); } } else { omap_enable_channel_irq(channels[0]); } l = dma_read(CCR(channels[0])); l |= (1 << 7); dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED; dma_chan[channels[0]].state = DMA_CH_STARTED; if ((0 == (l & (1 << 24)))) l &= ~(1 << 25); else l |= (1 << 25); dma_write(l, CCR(channels[0])); dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE; return 0; }
static int get_file_func (CameraFilesystem *fs, const char *folder, const char *filename, CameraFileType type, CameraFile *file, void *data, GPContext *context) { Camera *camera = data; unsigned long image_id; char image_id_string[] = {0, 0, 0, 0, 0, 0, 0}; unsigned int size; CameraFileInfo info; int r; KncCamRes cr; KncCntrlRes cntrl_res = KNC_CNTRL_OK; if (strlen (filename) != 11) return (GP_ERROR_FILE_NOT_FOUND); if (strcmp (folder, "/")) return (GP_ERROR_DIRECTORY_NOT_FOUND); /* Check if we can get the image id from the filename. */ strncpy (image_id_string, filename, 6); image_id = atol (image_id_string); /* Get information about the image */ C (gp_filesystem_get_info (camera->fs, folder, filename, &info, context)); /* * Remove the timeout, get the image and start the timeout * afterwards. */ gp_camera_stop_timeout (camera, camera->pl->timeout); knc_cntrl_set_func_data (camera->pl->c, data_func, file); switch (type) { case GP_FILE_TYPE_PREVIEW: size = 2048; cntrl_res = knc_get_image (camera->pl->c, &cr, image_id, KNC_SOURCE_CARD, KNC_IMAGE_THUMB); break; case GP_FILE_TYPE_NORMAL: size = info.file.size; cntrl_res = knc_get_image (camera->pl->c, &cr, image_id, KNC_SOURCE_CARD, KNC_IMAGE_EXIF); break; default: r = GP_ERROR_NOT_SUPPORTED; } camera->pl->timeout = gp_camera_start_timeout (camera, PING_TIMEOUT, timeout_func); CR (cntrl_res, context); CCR (cr, context); C (gp_file_set_mime_type (file, GP_MIME_JPEG)); return (GP_OK); }
/** * @brief omap_dma_set_prio_lch : Set channel wise priority settings * * @param lch * @param read_prio - Read priority * @param write_prio - Write priority * Both of the above can be set with one of the following values : * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW */ int omap_dma_set_prio_lch(int lch, unsigned char read_prio, unsigned char write_prio) { u32 l; if (unlikely((lch < 0 || lch >= dma_lch_count))) { IOLog("Invalid channel id\n"); return -EINVAL; } l = dma_read(CCR(lch)); l &= ~((1 << 6) | (1 << 26)); if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); else l |= ((read_prio & 0x1) << 6); dma_write(l, CCR(lch)); return 0; }
void omap_set_dma_priority(int lch, int dst_port, int priority) { unsigned long reg; u32 l; if (cpu_class_is_omap1()) { switch (dst_port) { case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */ reg = OMAP_TC_OCPT1_PRIOR; break; case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */ reg = OMAP_TC_OCPT2_PRIOR; break; case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */ reg = OMAP_TC_EMIFF_PRIOR; break; case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */ reg = OMAP_TC_EMIFS_PRIOR; break; default: BUG(); return; } l = omap_readl(reg); l &= ~(0xf << 8); l |= (priority & 0xf) << 8; omap_writel(l, reg); } if (cpu_class_is_omap2()) { u32 ccr; ccr = dma_read(CCR(lch)); if (priority) ccr |= (1 << 6); else ccr &= ~(1 << 6); dma_write(ccr, CCR(lch)); } }
/* * lch_queue DMA will start right after lch_head one is finished. * For this DMA link to start, you still need to start (see omap_start_dma) * the first one. That will fire up the entire queue. */ void omap_dma_link_lch(int lch_head, int lch_queue) { if (omap_dma_in_1510_mode()) { if (lch_head == lch_queue) { dma_write(dma_read(CCR(lch_head)) | (3 << 8), CCR(lch_head)); return; } IOLog("DMA linking is not supported in 1510 mode\n"); BUG(); return; } if ((dma_chan[lch_head].dev_id == -1) || (dma_chan[lch_queue].dev_id == -1)) { IOLog("omap_dma: trying to link " "non requested channels\n"); BUG(); } dma_chan[lch_head].next_lch = lch_queue; }
static int file_list_func (CameraFilesystem *fs, const char *folder, CameraList *list, void *data, GPContext *context) { CameraFile *file; CameraFileInfo info; KncStatus status; unsigned int i, id; Camera *camera = data; int result; KncCamRes cr; /* * We can't get the filename from the camera. * But we decide to call the images %6i.jpeg', with the image id as * parameter. Therefore, let's get the image ids. */ CR (knc_get_status (camera->pl->c, &cr, &status), context); CCR (cr, context); id = gp_context_progress_start (context, status.pictures, _("Getting file list...")); for (i = 0; i < status.pictures; i++) { /* Get information */ gp_file_new (&file); result = get_info (camera, i + 1, &info, file, context); if (result < 0) { gp_file_unref (file); return (result); } /* * Append directly to the filesystem instead of to the list, * because we have additional information. */ gp_filesystem_append (camera->fs, folder, info.file.name, context); gp_filesystem_set_info_noop (camera->fs, folder, info, context); gp_filesystem_set_file_noop (camera->fs, folder, file, context); gp_file_unref (file); gp_context_idle (context); gp_context_progress_update (context, id, i + 1); if (gp_context_cancel (context) == GP_CONTEXT_FEEDBACK_CANCEL) return (GP_ERROR_CANCEL); } gp_context_progress_stop (context, id); return (GP_OK); }
int omap_dma_running(void) { int lch; #if 0 if (cpu_class_is_omap1()) if (omap_lcd_dma_running()) return 1; #endif for (lch = 0; lch < dma_chan_count; lch++) if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) return 1; return 0; }
static int delete_all_func (CameraFilesystem *fs, const char* folder, void *data, GPContext *context) { Camera *camera = data; unsigned int not_erased = 0; KncCamRes cr; if (strcmp (folder, "/")) return (GP_ERROR_DIRECTORY_NOT_FOUND); CR (knc_erase_all (camera->pl->c, &cr, KNC_SOURCE_CARD, ¬_erased), context); CCR (cr, context); if (not_erased) { gp_context_error (context, _("%i pictures could not be " "deleted because they are protected"), not_erased); gp_filesystem_reset (camera->fs); return (GP_ERROR); } return (GP_OK); }
static int delete_file_func (CameraFilesystem *fs, const char *folder, const char *filename, void *data, GPContext *context) { Camera *camera = data; char tmp[] = {0, 0, 0, 0, 0, 0, 0}; unsigned long image_id; KncCamRes cr; C_NULL (camera && folder && filename); /* We don't support folders */ if (strcmp (folder, "/")) return (GP_ERROR_DIRECTORY_NOT_FOUND); /* Extract the image id from the filename */ strncpy (tmp, filename, 6); image_id = atol (tmp); CR (knc_erase_image (camera->pl->c, &cr, image_id, KNC_SOURCE_CARD), context); CCR (cr, context); return (GP_OK); }
int omap_request_dma(int dev_id, const char *dev_name, void (*callback)(int lch, u16 ch_status, void *data), void *data, int *dma_ch_out) { int ch, free_ch = -1; unsigned long flags; struct omap_dma_lch *chan; spin_lock_irqsave(&dma_chan_lock, flags); for (ch = 0; ch < dma_chan_count; ch++) { if (free_ch == -1 && dma_chan[ch].dev_id == -1) { free_ch = ch; if (dev_id == 0) break; } } if (free_ch == -1) { spin_unlock_irqrestore(&dma_chan_lock, flags); return -EBUSY; } chan = dma_chan + free_ch; chan->dev_id = dev_id; if (cpu_class_is_omap1()) clear_lch_regs(free_ch); if (cpu_class_is_omap2()) omap_clear_dma(free_ch); spin_unlock_irqrestore(&dma_chan_lock, flags); chan->dev_name = dev_name; chan->callback = callback; chan->data = data; chan->flags = 0; #ifndef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap2()) { chan->chain_id = -1; chan->next_linked_ch = -1; } #endif chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ; if (cpu_class_is_omap1()) chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ; else if (cpu_class_is_omap2()) chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ; if (cpu_is_omap16xx()) { /* If the sync device is set, configure it dynamically. */ if (dev_id != 0) { set_gdma_dev(free_ch + 1, dev_id); dev_id = free_ch + 1; } /* * Disable the 1510 compatibility mode and set the sync device * id. */ dma_write(dev_id | (1 << 10), CCR(free_ch)); } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) { dma_write(dev_id, CCR(free_ch)); } if (cpu_class_is_omap2()) { omap2_enable_irq_lch(free_ch); omap_enable_channel_irq(free_ch); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch)); dma_write(1 << free_ch, IRQSTATUS_L0); } *dma_ch_out = free_ch; return 0; }
/** * @brief omap_dma_chain_a_transfer - Get a free channel from a chain, * set the params and start the transfer. * * @param chain_id * @param src_start - buffer start address * @param dest_start - Dest address * @param elem_count * @param frame_count * @param callbk_data - channel callback parameter data. * * @return - Success : 0 * Failure: -EINVAL/-EBUSY */ int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start, int elem_count, int frame_count, void *callbk_data) { int *channels; u32 l, lch; int start_dma = 0; /* * if buffer size is less than 1 then there is * no use of starting the chain */ if (elem_count < 1) { IOLog("Invalid buffer size\n"); return -EINVAL; } /* Check for input params */ if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) { IOLog("Invalid chain id\n"); return -EINVAL; } /* Check if the chain exists */ if (dma_linked_lch[chain_id].linked_dmach_q == NULL) { IOLog("Chain doesn't exist\n"); return -EINVAL; } /* Check if all the channels in chain are in use */ if (OMAP_DMA_CHAIN_QFULL(chain_id)) return -EBUSY; /* Frame count may be negative in case of indexed transfers */ channels = dma_linked_lch[chain_id].linked_dmach_q; /* Get a free channel */ lch = channels[dma_linked_lch[chain_id].q_tail]; /* Store the callback data */ dma_chan[lch].data = callbk_data; /* Increment the q_tail */ OMAP_DMA_CHAIN_INCQTAIL(chain_id); /* Set the params to the free channel */ if (src_start != 0) dma_write(src_start, CSSA(lch)); if (dest_start != 0) dma_write(dest_start, CDSA(lch)); /* Write the buffer size */ dma_write(elem_count, CEN(lch)); dma_write(frame_count, CFN(lch)); /* * If the chain is dynamically linked, * then we may have to start the chain if its not active */ if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) { /* * In Dynamic chain, if the chain is not started, * queue the channel */ if (dma_linked_lch[chain_id].chain_state == DMA_CHAIN_NOTSTARTED) { /* Enable the link in previous channel */ if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_QUEUED) enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; } /* * Chain is already started, make sure its active, * if not then start the chain */ else { start_dma = 1; if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_STARTED) { enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; start_dma = 0; if (0 == ((1 << 7) & dma_read( CCR(dma_chan[lch].prev_linked_ch)))) { disable_lnk(dma_chan[lch]. prev_linked_ch); IOLog("\n prev ch is stopped\n"); start_dma = 1; } } else if (dma_chan[dma_chan[lch].prev_linked_ch].state == DMA_CH_QUEUED) { enable_lnk(dma_chan[lch].prev_linked_ch); dma_chan[lch].state = DMA_CH_QUEUED; start_dma = 0; } omap_enable_channel_irq(lch); l = dma_read(CCR(lch)); if ((0 == (l & (1 << 24)))) l &= ~(1 << 25); else l |= (1 << 25); if (start_dma == 1) { if (0 == (l & (1 << 7))) { l |= (1 << 7); dma_chan[lch].state = DMA_CH_STARTED; IOLog("starting %d\n", lch); dma_write(l, CCR(lch)); } else start_dma = 0; } else { if (0 == (l & (1 << 7))) dma_write(l, CCR(lch)); } dma_chan[lch].flags |= OMAP_DMA_ACTIVE; } } return 0; }
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; unsigned int datasz = nob * blksz; int i; if (data->flags & MMC_DATA_STREAM) nob = 0xffff; host->data = data; data->bytes_xfered = 0; MMC_NOB = nob; MMC_BLK_LEN = blksz; /* * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. * We are in big troubles for non-512 byte transfers according to note in the paragraph * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. * The situation is even more complex in reality. The SDHC in not able to handle wll * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. * This is required for SCR read at least. */ if (datasz < 512) { host->dma_size = datasz; if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; /* Hack to enable read SCR */ MMC_NOB = 1; MMC_BLK_LEN = 512; } else { host->dma_dir = DMA_TO_DEVICE; } /* Convert back to virtual address */ host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset); host->data_cnt = 0; clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); return; } if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ); /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; } else { host->dma_dir = DMA_TO_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE); /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; } #if 1 /* This code is there only for consistency checking and can be disabled in future */ host->dma_size = 0; for(i=0; i<host->dma_nents; i++) host->dma_size+=data->sg[i].length; if (datasz > host->dma_size) { dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", datasz, host->dma_size); } #endif host->dma_size = datasz; wmb(); if(host->actual_bus_width == MMC_BUS_WIDTH_4) BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ else BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ RSSR(host->dma) = DMA_REQ_SDHC; set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); /* start DMA engine for read, write is delayed after initial response */ if (host->dma_dir == DMA_FROM_DEVICE) { imx_dma_enable(host->dma); } }
static void imxmci_tasklet_fnc(unsigned long data) { struct imxmci_host *host = (struct imxmci_host *)data; u32 stat; unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ int timeout = 0; if(atomic_read(&host->stuck_timeout) > 4) { char *what; timeout = 1; stat = MMC_STATUS; host->status_reg = stat; if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) what = "RESP+DMA"; else what = "RESP"; else if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) what = "DATA"; else what = "DMA"; else what = "???"; dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", what, stat, MMC_INT_MASK); dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma)); dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size); } if(!host->present || timeout) host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); stat = MMC_STATUS; /* * This is not required in theory, but there is chance to miss some flag * which clears automatically by mask write, FreeScale original code keeps * stat from IRQ time so do I */ stat |= host->status_reg; if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { imxmci_busy_wait_for_status(host, &stat, STATUS_END_CMD_RESP | STATUS_ERR_MASK, 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); } if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) imxmci_cmd_done(host, stat); if(host->data && (stat & STATUS_ERR_MASK)) imxmci_data_done(host, stat); } if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { stat |= MMC_STATUS; if(imxmci_cpu_driven_data(host, &stat)){ if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) imxmci_cmd_done(host, stat); atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, &host->pending_events); imxmci_data_done(host, stat); } } } if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { stat = MMC_STATUS; /* Same as above */ stat |= host->status_reg; if(host->dma_dir == DMA_TO_DEVICE) { data_dir_mask = STATUS_WRITE_OP_DONE; } else { data_dir_mask = STATUS_DATA_TRANS_DONE; } if(stat & data_dir_mask) { clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); imxmci_data_done(host, stat); } } if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { if(host->cmd) imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); if(host->data) imxmci_data_done(host, STATUS_TIME_OUT_READ | STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); if(host->req) imxmci_finish_request(host, host->req); mmc_detect_change(host->mmc, msecs_to_jiffies(100)); } }
int omap_get_dma_active_status(int lch) { return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0; }