static int sdma_alloc_chan_resources(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct imx_dma_data *data = chan->private; int prio, ret; if (!data) return -EINVAL; switch (data->priority) { case DMA_PRIO_HIGH: prio = 3; break; case DMA_PRIO_MEDIUM: prio = 2; break; case DMA_PRIO_LOW: default: prio = 1; break; } sdmac->peripheral_type = data->peripheral_type; sdmac->event_id0 = data->dma_request; ret = sdma_set_channel_priority(sdmac, prio); if (ret) return ret; ret = sdma_request_channel(sdmac); if (ret) return ret; return 0; }
static int setup_fpga_interface(struct sdma_engine *sdma) { const int channel = 1; struct sdma_channel *sdmac = &sdma->channel[channel]; const u32 sdma_code[24] = { 0x6c20672b, 0x07647d02, 0x04007cfa, 0x612b622b, 0x662b0762, 0x7d0e6900, 0x6d0406da, 0x7d056e18, 0x000002a6, 0x0e087cf8, 0x6a186c14, 0x6c2b7ce8, 0x7de7632b, 0x6904008f, 0x38037802, 0x6b290312, 0x4a007d09, 0x6d0006da, 0x7d056e18, 0x000002a6, 0x0e087cf8, 0x6a180763, 0x7ce80300, 0x7de60000, }; const int origin = 0xe00; /* In data space terms (32 bits/address) */ struct sdma_context_data *context = sdma->context; int ret; ret = eim_init(); if (ret) { printk(KERN_ERR THIS "Failed to initialize EIM bus\n"); return ret; } sdma_write_datamem(sdma, (void *) sdma_code, sizeof(sdma_code), origin); ret = sdma_request_channel(sdmac); if (ret) { printk(KERN_ERR "Failed to request channel\n"); return ret; } sdma_disable_channel(sdmac); /* Don't let events run yet: */ sdma_config_ownership(sdmac, true, true, false); memset(context, 0, sizeof(*context)); context->channel_state.pc = origin * 2; /* In program space addressing */ context->gReg[4] = MX51_CS2_BASE_ADDR + 0x80; /* Request region */ context->gReg[5] = MX51_CS2_BASE_ADDR + 0x8000; /* Data region */ ret = sdma_write_datamem(sdma, (void *) context, sizeof(*context), 0x800 + (sizeof(*context) / 4) * channel); if (ret) { printk(KERN_ERR "Failed to load context\n"); return ret; } sdmac->desc.callback = sdma_irq_callback; sdmac->desc.callback_param = NULL; xillybus_sdmac = sdmac; return 0; /* Success! */ }
int dma_test_thread( void *data ) { unsigned long test = (unsigned long)data; int devIdx; int rc; printk( "dma_test_thread( %lu ) called\n", test ); for ( devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++ ) { gTestHandle[devIdx] = SDMA_INVALID_HANDLE; } for ( devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++ ) { if ( devIdx == DMA_DEVICE_NAND_MEM_TO_MEM ) { printk( "Skipping NAND device %d\n", devIdx ); continue; } if ( test == 1 ) { printk( "About to request channel for device %d ...\n", devIdx ); if (( gTestHandle[ devIdx ] = sdma_request_channel( devIdx )) < 0 ) { printk( "Call to sdma_request_channel failed: %d\n", gTestHandle[ devIdx ] ); continue; } printk( " request completed, handle = 0x%04x\n", gTestHandle[ devIdx ] ); } else { if ( gTestHandle[ devIdx ] != SDMA_INVALID_HANDLE ) { printk( "About to relase channel for device %d, handle 0x%04x\n", devIdx, gTestHandle[ devIdx ] ); if (( rc = sdma_free_channel( gTestHandle[ devIdx ])) < 0 ) { printk( "Call to sdma_free_channel failed: %d\n", rc ); } msleep( 33 ); } } } printk( "test thread %ld exiting\n", test ); gDmaTestRunning[ test - 1 ] = 0; return 0; }
static int sdma_alloc_chan_resources(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct imx_dma_data *data = chan->private; int prio, ret; if (!data) return -EINVAL; switch (data->priority) { case DMA_PRIO_HIGH: prio = 3; break; case DMA_PRIO_MEDIUM: prio = 2; break; case DMA_PRIO_LOW: default: prio = 1; break; } sdmac->peripheral_type = data->peripheral_type; sdmac->event_id0 = data->dma_request; if (data->dma_request_p2p > 0) sdmac->event_id1 = data->dma_request_p2p; else sdmac->event_id1 = 0; ret = sdma_request_channel(sdmac); if (ret) return ret; ret = sdma_set_channel_priority(sdmac, prio); if (ret) return ret; dma_async_tx_descriptor_init(&sdmac->desc, chan); sdmac->desc.tx_submit = sdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ sdmac->desc.flags = DMA_CTRL_ACK; return 0; }
/* * Kernel thread that processes the update requests */ static void test_thread(void) { struct test_cfg *cfg = &gCfg; unsigned int i; int rc; void *virt_addr; enum dma_data_direction dir; dma_addr_t devPhysAddr; unsigned long time_left; unsigned long trials = 0; if (!cfg->setup_is_done) { printk(KERN_ERR "Need to set up parameters before running the test\n"); return; } cfg->kill_thread = 0; cfg->in_use = 1; /* allocate memory for dstination and source */ cfg->src_ptr = alloc_mem(&cfg->src_addr, cfg->len, cfg->src_type); if (cfg->src_ptr == NULL) { printk(KERN_ERR "alloc_mem for src failed\n"); goto exit_free_mem; } cfg->dst_ptr = alloc_mem(&cfg->dst_addr, cfg->len, cfg->dst_type); if (cfg->dst_ptr == NULL) { printk(KERN_ERR "alloc_mem for dst failed\n"); goto exit_free_mem; } /* init mmap */ rc = dma_mmap_init_map(&cfg->mmap_cfg); if (rc < 0) { printk(KERN_ERR "dma_mmap_init_map failed\n"); goto exit_free_mem; } if (cfg->dir) { /* mem-to-dev, map src memory */ virt_addr = cfg->src_ptr; dir = DMA_TO_DEVICE; devPhysAddr = cfg->dst_addr; } else { /* dev-to-mem, map dst memory */ virt_addr = cfg->dst_ptr; dir = DMA_FROM_DEVICE; devPhysAddr = cfg->src_addr; } daemonize(MODULE_NAME); allow_signal(SIGKILL); for (;;) { /* driver shutting down... let's quit the kthread */ if (cfg->kill_thread) goto exit_term_mmap; if (signal_pending(current)) goto exit_term_mmap; /* write some values into the source buffer */ for (i = 0; i < cfg->len; i++) { ((unsigned char *)cfg->src_ptr)[i] = cnt++; } /* clear the destination buffer */ memset(cfg->dst_ptr, 0, cfg->len); /* map memories */ rc = dma_mmap_map(&cfg->mmap_cfg, virt_addr, cfg->len, dir); if (rc < 0) { printk(KERN_ERR "dma_mmap_map failed\n"); goto exit_term_mmap; } /* reserve the DMA channel and set up descriptors */ if (cfg->dma_type == DMA_TYPE_SDMA) { cfg->sdma_hdl = sdma_request_channel(cfg->device); if (cfg->sdma_hdl < 0) { printk(KERN_ERR "sdma_request_channel failed\n"); goto exit_unmap; } rc = sdma_map_create_descriptor_ring(cfg->sdma_hdl, &cfg->mmap_cfg, devPhysAddr, DMA_UPDATE_MODE_INC); if (rc < 0) { printk(KERN_ERR "create desc ring failed\n"); goto exit_free_dma_channel; } } else { printk(KERN_ERR "only support SDMA for now\n"); rc = -EINVAL; goto exit_unmap; #if 0 cfg->dma_hdl = dma_request_channel(cfg->device); if (cfg->dma_hdl < 0) { printk(KERN_ERR "dma_request_channel failed\n"); goto exit_unmap; } rc = dma_map_create_descriptor_ring(cfg->device, &cfg->mmap_cfg, devPhysAddr, DMA_UPDATE_MODE_INC); if (rc < 0) { printk(KERN_ERR "create desc ring failed\n"); goto exit_free_dma_channel; } #endif } /* set DMA interrupt handler */ rc = set_dev_handler(cfg); if (rc < 0) { printk(KERN_ERR "set_dev_handler failed\n"); goto exit_free_dma_channel; } INIT_COMPLETION(cfg->dma_done); if (cfg->dma_type == DMA_TYPE_SDMA) { rc = sdma_start_transfer(cfg->sdma_hdl); if (rc < 0) { printk(KERN_ERR "sdma_transfer failed\n"); goto exit_free_dma_channel; } } #if 0 else { /* synopsys DMA */ rc = dma_start_transfer(cfg->dma_hdl, cfg->len); if (rc < 0) { printk(KERN_ERR "dma_start_transfer failed\n"); goto exit_free_dma_channel; } } #endif time_left = wait_for_completion_timeout(&cfg->dma_done, TIMEOUT_TIME); if (time_left == 0) { printk(KERN_ERR "DMA MMAP test timeout after %lu trials\n", trials); goto exit_free_dma_channel; } /* free DMA channel and unmap memory */ if (cfg->dma_type == DMA_TYPE_SDMA) sdma_free_channel(cfg->sdma_hdl); #if 0 else dma_free_channel(cfg->dma_hdl); #endif dma_mmap_unmap(&cfg->mmap_cfg, 0); /* verify the result */ for (i = 0; i < cfg->len; i++) { if (((unsigned char *)cfg->src_ptr)[i] != ((unsigned char *)cfg->dst_ptr)[i]) { printk(KERN_ERR "src[%u]=%u != dst[%u]=%u trials=%lu\n", i, ((unsigned char *)cfg->src_ptr)[i], i, ((unsigned char *)cfg->dst_ptr)[i], trials); goto exit_term_mmap; } } trials++; } exit_free_dma_channel: if (cfg->dma_type == DMA_TYPE_SDMA) sdma_free_channel(cfg->sdma_hdl); #if 0 else dma_free_channel(cfg->dma_hdl); #endif exit_unmap: dma_mmap_unmap(&cfg->mmap_cfg, 0); exit_term_mmap: dma_mmap_term_map(&cfg->mmap_cfg); exit_free_mem: if (cfg->src_ptr) { free_mem(cfg->src_ptr, cfg->src_addr, cfg->len, cfg->src_type); cfg->src_ptr = NULL; } if (cfg->dst_ptr) { free_mem(cfg->dst_ptr, cfg->dst_addr, cfg->len, cfg->dst_type); cfg->dst_ptr = NULL; } cfg->thread = NULL; cfg->in_use = 0; cfg->kill_thread = 0; printk(KERN_INFO "Quitted test thread\n"); }
static int __init sdma_init(struct sdma_engine *sdma) { int i, ret; dma_addr_t ccb_phys; switch (sdma->version) { case 1: sdma->num_events = 32; break; case 2: sdma->num_events = 48; break; default: dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); return -ENODEV; } clk_enable(sdma->clk); /* Be sure SDMA has not started yet */ __raw_writel(0, sdma->regs + SDMA_H_C0PTR); sdma->channel_control = dma_alloc_coherent(NULL, MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + sizeof(struct sdma_context_data), &ccb_phys, GFP_KERNEL); if (!sdma->channel_control) { ret = -ENOMEM; goto err_dma_alloc; } sdma->context = (void *)sdma->channel_control + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); sdma->context_phys = ccb_phys + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); /* Zero-out the CCB structures array just allocated */ memset(sdma->channel_control, 0, MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); /* disable all channels */ for (i = 0; i < sdma->num_events; i++) __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); /* All channels have priority 0 */ for (i = 0; i < MAX_DMA_CHANNELS; i++) __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); ret = sdma_request_channel(&sdma->channel[0]); if (ret) goto err_dma_alloc; sdma_config_ownership(&sdma->channel[0], false, true, false); /* Set Command Channel (Channel Zero) */ __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); /* Set bits of CONFIG register but with static context switching */ /* FIXME: Check whether to set ACR bit depending on clock ratios */ __raw_writel(0, sdma->regs + SDMA_H_CONFIG); __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); /* Set bits of CONFIG register with given context switching mode */ __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); /* Initializes channel's priorities */ sdma_set_channel_priority(&sdma->channel[0], 7); clk_disable(sdma->clk); return 0; err_dma_alloc: clk_disable(sdma->clk); dev_err(sdma->dev, "initialisation failed with %d\n", ret); return ret; }
static int memcpy_tests( long iterations ) { int rc; int i; dma_mem_t src[SDMA_NUM_CHANNELS]; dma_mem_t dst[SDMA_NUM_CHANNELS]; SDMA_Handle_t dmaHandle[SDMA_NUM_CHANNELS]; int chan; int run_one_test( int test_number ) { for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { /* Start DMA operation */ sdma_transfer_mem_to_mem(dmaHandle[chan], src[chan].physPtr, dst[chan].physPtr, ALLOC_SIZE ); } for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { /* Wait for DMA completion */ sdma_test_wait( chan ); /* Verify transfer */ if (( rc = dma_test_check_mem( &dst[chan], channel_to_start( chan ))) != 0 ) { printk( KERN_ERR ": ========== DMA memcpy test%d channel %d failed [%03d] ==========\n\n", test_number, chan, i ); return rc; } printk( "\n========== DMA memcpy test%d channel %d passed [%03d] ==========\n\n", test_number, chan, i ); /* Reset destination memory */ memset( dst[chan].virtPtr, 0, dst[chan].numBytes ); } return 0; } printk( "\n::Entering into test thread::\n\n"); for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { /* Allocate contiguous source memory */ if ( alloc_mem( &src[chan], ALLOC_SIZE ) == NULL ) { return -ENOMEM; } /* Allocate contiguous destination memory */ if ( alloc_mem( &dst[chan], ALLOC_SIZE ) == NULL ) { return -ENOMEM; } /* Init test settings */ sdma_test_set( chan ); /* initialize the source memory */ dma_test_init_mem( &src[chan], channel_to_start( chan )); } for ( i = 0; i < iterations; i++ ) { for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { /* Aquire a DMA channel */ dmaHandle[chan] = sdma_request_channel( device[chan] ); if ( dmaHandle[chan] < 0 ) { return (int)dmaHandle[chan]; } } if (( rc = run_one_test( 1 )) != 0 ) { return rc; } /* * Now test to see if we can do back-to-back DMA transfers * and reuse the same DMA descriptor */ if (( rc = run_one_test( 2 )) != 0 ) { return rc; } for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { sdma_free_channel( dmaHandle[chan] ); } } for ( chan = 0; chan < SDMA_NUM_CHANNELS; chan++ ) { /* Free test memory */ free_mem( &src[chan] ); free_mem( &dst[chan] ); } printk( "\n ::Exiting from test thread:: \n\n"); return 0; }