/** * Verify that a BAM device is enabled and gathers the hardware * configuration. * */ int bam_check(void *base, u32 *version, u32 *num_pipes) { u32 ver = 0; SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base); if (!bam_read_reg_field(base, CTRL, BAM_EN)) { SPS_ERR("sps:%s:bam 0x%x(va) is not enabled.\n", __func__, (u32) base); return -ENODEV; } ver = bam_read_reg(base, REVISION) & BAM_REVISION; /* * Discover the hardware version number and the number of pipes * supported by this BAM */ *num_pipes = bam_read_reg(base, NUM_PIPES); *version = ver; /* Check BAM version */ if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) { SPS_ERR("sps:%s:bam 0x%x(va) Invalid BAM version 0x%x.\n", __func__, (u32) base, ver); return -ENODEV; } return 0; }
/** * Enable a BAM DMA pipe * */ int sps_dma_pipe_enable(void *bam_arg, u32 pipe_index) { struct sps_bam *bam = bam_arg; struct bamdma_device *dev; struct bamdma_chan *chan; u32 channel; int result = SPS_ERROR; SPS_DBG2("sps:sps_dma_pipe_enable.pipe %d", pipe_index); mutex_lock(&bam_dma_lock); dev = sps_dma_find_device((u32) bam); if (dev == NULL) { SPS_ERR("sps:BAM-DMA: invalid BAM"); goto exit_err; } if (pipe_index >= dev->num_pipes) { SPS_ERR("sps:BAM-DMA: BAM %x invalid pipe: %d", bam->props.phys_addr, pipe_index); goto exit_err; } if (dev->pipes[pipe_index] != PIPE_ACTIVE) { SPS_ERR("sps:BAM-DMA: BAM %x pipe %d not active", bam->props.phys_addr, pipe_index); goto exit_err; } /* * The channel must be enabled when the dest/input/write pipe * is enabled */ if (DMA_PIPE_IS_DEST(pipe_index)) { /* Configure and enable the channel */ channel = pipe_index / 2; chan = &dev->chans[channel]; if (chan->threshold != SPS_DMA_THRESHOLD_DEFAULT) dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ACT_THRESH, chan->thresh); if (chan->priority != SPS_DMA_PRI_DEFAULT) dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_WEIGHT, chan->weight); dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ENABLE, 1); } result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * Deactivate a BAM DMA pipe * * This function deactivates a BAM DMA pipe. * * @dev - pointer to BAM-DMA device descriptor * * @bam - pointer to BAM device descriptor * * @pipe_index - pipe index * * @return 0 on success, negative value on error * */ static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev, struct sps_bam *bam, u32 pipe_index) { u32 channel; if (dev->bam != bam) return SPS_ERROR; if (pipe_index >= dev->num_pipes) return SPS_ERROR; if (dev->pipes[pipe_index] != PIPE_ACTIVE) return SPS_ERROR; /* Pipe is not active */ SPS_DBG2("sps:BAM-DMA: deactivate pipe %d", pipe_index); /* Mark pipe inactive */ dev->pipes[pipe_index] = PIPE_INACTIVE; /* * Channel must be reset when either pipe is disabled, so just always * reset regardless of other pipe's state */ channel = pipe_index / 2; dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ENABLE, 0); /* If the peer pipe is also inactive, reset the channel */ if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) { /* Free channel if allocated internally */ if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT) dev->chans[channel].state = DMA_CHAN_STATE_FREE; } return 0; }
int bam_check(void *base, u32 *version, u32 *num_pipes) { u32 ver = 0; SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base); if (!bam_read_reg_field(base, CTRL, BAM_EN)) { SPS_ERR("sps:%s:bam 0x%x(va) is not enabled.\n", __func__, (u32) base); return -ENODEV; } ver = bam_read_reg(base, REVISION) & BAM_REVISION; *num_pipes = bam_read_reg(base, NUM_PIPES); *version = ver; if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) { SPS_ERR("sps:%s:bam 0x%x(va) Invalid BAM version 0x%x.\n", __func__, (u32) base, ver); return -ENODEV; } return 0; }
int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask) { u32 version; u32 num_pipes; u32 mask; u32 pipe; SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base); version = bam_read_reg_field(base, REVISION, BAM_REVISION); num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES); if (version < 3 || version > 0x1F) { SPS_ERR("sps:bam 0x%x(va) security is not supported for this" "BAM version 0x%x.\n", (u32) base, version); return -ENODEV; } if (num_pipes > BAM_MAX_PIPES) { SPS_ERR("sps:bam 0x%x(va) the number of pipes is more than " "the maximum number allowed.", (u32) base); return -ENODEV; } for (pipe = 0, mask = 1; pipe < num_pipes; pipe++, mask <<= 1) if ((mask & pipe_mask) != 0) bam_pipe_set_ee(base, pipe, ee, vmid); mask = 1UL << 31; if ((mask & pipe_mask) != 0) bam_set_ee(base, ee, vmid, BAM_NONSECURE_RESET_ENABLE); return 0; }
static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev, struct sps_bam *bam, u32 pipe_index) { u32 channel; if (dev->bam != bam) return SPS_ERROR; if (pipe_index >= dev->num_pipes) return SPS_ERROR; if (dev->pipes[pipe_index] != PIPE_ACTIVE) return SPS_ERROR; SPS_DBG2("sps:BAM-DMA: deactivate pipe %d", pipe_index); dev->pipes[pipe_index] = PIPE_INACTIVE; channel = pipe_index / 2; dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ENABLE, 0); if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) { if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT) dev->chans[channel].state = DMA_CHAN_STATE_FREE; } return 0; }
/** * Configure interrupt for a BAM pipe * */ void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en, u32 src_mask, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); bam_write_reg(base, P_IRQ_EN(pipe), src_mask); bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), irq_en); }
/** * Initialize a BAM device * */ int bam_init(void *base, u32 ee, u16 summing_threshold, u32 irq_mask, u32 *version, u32 *num_pipes) { /* disable bit#11 because of HW bug */ u32 cfg_bits = 0xffffffff & ~(1 << 11); u32 ver = 0; SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee); ver = bam_read_reg_field(base, REVISION, BAM_REVISION); if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) { SPS_ERR("sps:bam 0x%x(va) Invalid BAM REVISION 0x%x.\n", (u32) base, ver); return -ENODEV; } else SPS_INFO("sps:REVISION of BAM 0x%x is 0x%x.\n", (u32) base, ver); if (summing_threshold == 0) { summing_threshold = 4; SPS_ERR("sps:bam 0x%x(va) summing_threshold is zero , " "use default 4.\n", (u32) base); } bam_write_reg_field(base, CTRL, BAM_SW_RST, 1); /* No delay needed */ bam_write_reg_field(base, CTRL, BAM_SW_RST, 0); bam_write_reg_field(base, CTRL, BAM_EN, 1); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, CTRL, CACHE_MISS_ERR_RESP_EN, 1); bam_write_reg_field(base, CTRL, LOCAL_CLK_GATING, 1); #endif bam_write_reg(base, DESC_CNT_TRSHLD, summing_threshold); bam_write_reg(base, CNFG_BITS, cfg_bits); /* * Enable Global BAM Interrupt - for error reasons , * filter with mask. * Note: Pipes interrupts are disabled until BAM_P_IRQ_enn is set */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 1); bam_write_reg(base, IRQ_EN, irq_mask); *num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES); *version = ver; return 0; }
/** * Disable a BAM device * */ void bam_exit(void *base, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee); bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 0); bam_write_reg(base, IRQ_EN, 0); /* Disable the BAM */ bam_write_reg_field(base, CTRL, BAM_EN, 0); }
/** * Reset the BAM pipe * */ void bam_pipe_exit(void *base, u32 pipe, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); bam_write_reg(base, P_IRQ_EN(pipe), 0); /* Disable the Pipe Interrupt at the BAM level */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 0); /* Pipe Disable */ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0); }
void bam_pipe_exit(void *base, u32 pipe, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); bam_write_reg(base, P_IRQ_EN(pipe), 0); bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 0); bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0); }
int bam_init(void *base, u32 ee, u16 summing_threshold, u32 irq_mask, u32 *version, u32 *num_pipes) { u32 cfg_bits = 0xffffffff & ~(1 << 11); u32 ver = 0; SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee); ver = bam_read_reg_field(base, REVISION, BAM_REVISION); if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) { SPS_ERR("sps:bam 0x%x(va) Invalid BAM REVISION 0x%x.\n", (u32) base, ver); return -ENODEV; } else SPS_INFO("sps:REVISION of BAM 0x%x is 0x%x.\n", (u32) base, ver); if (summing_threshold == 0) { summing_threshold = 4; SPS_ERR("sps:bam 0x%x(va) summing_threshold is zero , " "use default 4.\n", (u32) base); } bam_write_reg_field(base, CTRL, BAM_SW_RST, 1); bam_write_reg_field(base, CTRL, BAM_SW_RST, 0); bam_write_reg_field(base, CTRL, BAM_EN, 1); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, CTRL, CACHE_MISS_ERR_RESP_EN, 1); bam_write_reg_field(base, CTRL, LOCAL_CLK_GATING, 1); #endif bam_write_reg(base, DESC_CNT_TRSHLD, summing_threshold); bam_write_reg(base, CNFG_BITS, cfg_bits); bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 1); bam_write_reg(base, IRQ_EN, irq_mask); *num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES); *version = ver; return 0; }
void sps_mem_free_io(u32 phys_addr, u32 bytes) { u32 virt_addr = 0; iomem_offset = phys_addr - iomem_phys; virt_addr = (u32) iomem_virt + iomem_offset; SPS_DBG2("sps:sps_mem_free_io.phys=0x%x.virt=0x%x.size=0x%x.", phys_addr, virt_addr, bytes); gen_pool_free(pool, virt_addr, bytes); total_free += bytes; }
/** * Free I/O memory * */ void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes) { unsigned long virt_addr = 0; iomem_offset = phys_addr - iomem_phys; virt_addr = (uintptr_t) iomem_virt + iomem_offset; SPS_DBG2("sps:sps_mem_free_io.phys=%pa.virt=0x%lx.size=0x%x.", &phys_addr, virt_addr, bytes); gen_pool_free(pool, virt_addr, bytes); total_free += bytes; }
/** * Initialize a BAM pipe */ int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param, u32 ee) { /* Reset the BAM pipe */ bam_write_reg(base, P_RST(pipe), 1); /* No delay needed */ bam_write_reg(base, P_RST(pipe), 0); /* Enable the Pipe Interrupt at the BAM level */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 1); bam_write_reg(base, P_IRQ_EN(pipe), param->pipe_irq_mask); bam_write_reg_field(base, P_CTRL(pipe), P_DIRECTION, param->dir); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_MODE, param->mode); bam_write_reg(base, P_EVNT_GEN_TRSHLD(pipe), param->event_threshold); bam_write_reg(base, P_DESC_FIFO_ADDR(pipe), param->desc_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DESC_FIFO_SIZE, param->desc_size); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_STRM, param->stream_mode); if (param->mode == BAM_PIPE_MODE_BAM2BAM) { u32 peer_dest_addr = param->peer_phys_addr + P_EVNT_REG(param->peer_pipe); bam_write_reg(base, P_DATA_FIFO_ADDR(pipe), param->data_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DATA_FIFO_SIZE, param->data_size); bam_write_reg(base, P_EVNT_DEST_ADDR(pipe), peer_dest_addr); SPS_DBG2("sps:bam=0x%x(va).pipe=%d.peer_bam=0x%x." "peer_pipe=%d.\n", (u32) base, pipe, (u32) param->peer_phys_addr, param->peer_pipe); } /* Pipe Enable - at last */ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1); return 0; }
u32 sps_mem_alloc_io(u32 bytes) { u32 phys_addr = SPS_ADDR_INVALID; u32 virt_addr = 0; virt_addr = gen_pool_alloc(pool, bytes); if (virt_addr) { iomem_offset = virt_addr - (u32) iomem_virt; phys_addr = iomem_phys + iomem_offset; total_alloc += bytes; } else { SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes); return SPS_ADDR_INVALID; } SPS_DBG2("sps:sps_mem_alloc_io.phys=0x%x.virt=0x%x.size=0x%x.", phys_addr, virt_addr, bytes); return phys_addr; }
/** * Allocate I/O (pipe) memory * */ phys_addr_t sps_mem_alloc_io(u32 bytes) { phys_addr_t phys_addr = SPS_ADDR_INVALID; unsigned long virt_addr = 0; virt_addr = gen_pool_alloc(pool, bytes); if (virt_addr) { iomem_offset = virt_addr - (uintptr_t) iomem_virt; phys_addr = iomem_phys + iomem_offset; total_alloc += bytes; } else { SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes); return SPS_ADDR_INVALID; } SPS_DBG2("sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.", &phys_addr, virt_addr, bytes); return phys_addr; }
int sps_dma_device_init(u32 h) { struct bamdma_device *dev; struct sps_bam_props *props; u32 chan; int result = SPS_ERROR; mutex_lock(&bam_dma_lock); dev = NULL; if (bam_dma_dev[0].bam != NULL) { SPS_ERR("sps:BAM-DMA BAM device is already initialized."); goto exit_err; } else { dev = &bam_dma_dev[0]; } memset(dev, 0, sizeof(*dev)); dev->h = h; dev->bam = sps_h2bam(h); if (dev->bam == NULL) { SPS_ERR("sps:BAM-DMA BAM device is not found " "from the handle."); goto exit_err; } props = &dev->bam->props; dev->phys_addr = props->periph_phys_addr; if (props->periph_virt_addr != NULL) { dev->virt_addr = props->periph_virt_addr; dev->virtual_mapped = false; } else { if (props->periph_virt_size == 0) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virt_addr = ioremap(dev->phys_addr, props->periph_virt_size); if (dev->virt_addr == NULL) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virtual_mapped = true; } dev->hwio = (void *) dev->virt_addr; if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) { SPS_DBG2("sps:BAM-DMA is controlled locally: %x", dev->phys_addr); dev->local = true; } else { SPS_DBG2("sps:BAM-DMA is controlled remotely: %x", dev->phys_addr); dev->local = false; } if (sps_dma_device_enable(dev)) goto exit_err; dev->num_pipes = dev->bam->props.num_pipes; if (dev->local) for (chan = 0; chan < (dev->num_pipes / 2); chan++) { dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(chan), DMA_CHNL_ENABLE, 0); } result = 0; exit_err: if (result) { if (dev != NULL) { if (dev->virtual_mapped) iounmap(dev->virt_addr); dev->bam = NULL; } } mutex_unlock(&bam_dma_lock); return result; }
/** * Initialize BAM DMA device * */ int sps_dma_device_init(u32 h) { struct bamdma_device *dev; struct sps_bam_props *props; int result = SPS_ERROR; mutex_lock(&bam_dma_lock); /* Find a free BAM-DMA device slot */ dev = NULL; if (bam_dma_dev[0].bam != NULL) { SPS_ERR("sps:BAM-DMA BAM device is already initialized."); goto exit_err; } else { dev = &bam_dma_dev[0]; } /* Record BAM */ memset(dev, 0, sizeof(*dev)); dev->h = h; dev->bam = sps_h2bam(h); if (dev->bam == NULL) { SPS_ERR("sps:BAM-DMA BAM device is not found " "from the handle."); goto exit_err; } /* Map the BAM DMA device into virtual space, if necessary */ props = &dev->bam->props; dev->phys_addr = props->periph_phys_addr; if (props->periph_virt_addr != NULL) { dev->virt_addr = props->periph_virt_addr; dev->virtual_mapped = false; } else { if (props->periph_virt_size == 0) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virt_addr = ioremap(dev->phys_addr, props->periph_virt_size); if (dev->virt_addr == NULL) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virtual_mapped = true; } dev->hwio = (void *) dev->virt_addr; /* Is the BAM-DMA device locally controlled? */ if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) { SPS_DBG2("sps:BAM-DMA is controlled locally: %x", dev->phys_addr); dev->local = true; } else { SPS_DBG2("sps:BAM-DMA is controlled remotely: %x", dev->phys_addr); dev->local = false; } /* * Enable the BAM DMA and determine the number of pipes/channels. * Leave the BAM-DMA enabled, since it is always a shared device. */ if (sps_dma_device_enable(dev)) goto exit_err; dev->num_pipes = dev->bam->props.num_pipes; result = 0; exit_err: if (result) { if (dev != NULL) { if (dev->virtual_mapped) iounmap(dev->virt_addr); dev->bam = NULL; } } mutex_unlock(&bam_dma_lock); return result; }
/** * Initialize a BAM pipe */ int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); /* Reset the BAM pipe */ bam_write_reg(base, P_RST(pipe), 1); /* No delay needed */ bam_write_reg(base, P_RST(pipe), 0); /* Enable the Pipe Interrupt at the BAM level */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 1); bam_write_reg(base, P_IRQ_EN(pipe), param->pipe_irq_mask); bam_write_reg_field(base, P_CTRL(pipe), P_DIRECTION, param->dir); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_MODE, param->mode); bam_write_reg(base, P_EVNT_GEN_TRSHLD(pipe), param->event_threshold); bam_write_reg(base, P_DESC_FIFO_ADDR(pipe), param->desc_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DESC_FIFO_SIZE, param->desc_size); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_STRM, param->stream_mode); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, P_CTRL(pipe), P_LOCK_GROUP, param->lock_group); SPS_DBG("sps:bam=0x%x(va).pipe=%d.lock_group=%d.\n", (u32) base, pipe, param->lock_group); #endif if (param->mode == BAM_PIPE_MODE_BAM2BAM) { u32 peer_dest_addr = param->peer_phys_addr + P_EVNT_REG(param->peer_pipe); bam_write_reg(base, P_DATA_FIFO_ADDR(pipe), param->data_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DATA_FIFO_SIZE, param->data_size); bam_write_reg(base, P_EVNT_DEST_ADDR(pipe), peer_dest_addr); SPS_DBG2("sps:bam=0x%x(va).pipe=%d.peer_bam=0x%x." "peer_pipe=%d.\n", (u32) base, pipe, (u32) param->peer_phys_addr, param->peer_pipe); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, P_CTRL(pipe), P_WRITE_NWD, param->write_nwd); SPS_DBG("sps:%s WRITE_NWD bit for this bam2bam pipe.", param->write_nwd ? "Set" : "Do not set"); #endif } /* Pipe Enable - at last */ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1); return 0; }
/** * Allocate a BAM DMA channel * */ int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc, struct sps_dma_chan *chan_info) { struct bamdma_device *dev; struct bamdma_chan *chan; u32 pipe_index; enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0; enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0; int result = SPS_ERROR; if (alloc == NULL || chan_info == NULL) { SPS_ERR("sps:sps_alloc_dma_chan. invalid parameters"); return SPS_ERROR; } /* Translate threshold and priority to hwio values */ if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) { if (alloc->threshold >= 512) thresh = BAM_DMA_THRESH_512; else if (alloc->threshold >= 256) thresh = BAM_DMA_THRESH_256; else if (alloc->threshold >= 128) thresh = BAM_DMA_THRESH_128; else thresh = BAM_DMA_THRESH_64; } weight = alloc->priority; if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) { SPS_ERR("sps:BAM-DMA: invalid priority: %x", alloc->priority); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(alloc->dev); if (dev == NULL) { SPS_ERR("sps:BAM-DMA: invalid BAM handle: %x", alloc->dev); goto exit_err; } /* Search for a free set of pipes */ for (pipe_index = 0, chan = dev->chans; pipe_index < dev->num_pipes; pipe_index += 2, chan++) { if (chan->state == DMA_CHAN_STATE_FREE) { /* Just check pipes for safety */ if (dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("sps:BAM-DMA: channel %d state " "error:%d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); goto exit_err; } break; /* Found free pipe */ } } if (pipe_index >= dev->num_pipes) { SPS_ERR("sps:BAM-DMA: no free channel. num_pipes = %d", dev->num_pipes); goto exit_err; } chan->state = DMA_CHAN_STATE_ALLOC_EXT; /* Store config values for use when pipes are activated */ chan = &dev->chans[pipe_index / 2]; chan->threshold = alloc->threshold; chan->thresh = thresh; chan->priority = alloc->priority; chan->weight = weight; SPS_DBG2("sps:sps_alloc_dma_chan. pipe %d.\n", pipe_index); /* Report allocated pipes to client */ chan_info->dev = dev->h; /* Dest/input/write pipex */ chan_info->dest_pipe_index = pipe_index; /* Source/output/read pipe */ chan_info->src_pipe_index = pipe_index + 1; result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc, struct sps_dma_chan *chan_info) { struct bamdma_device *dev; struct bamdma_chan *chan; u32 pipe_index; enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0; enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0; int result = SPS_ERROR; if (alloc == NULL || chan_info == NULL) { SPS_ERR("sps:sps_alloc_dma_chan. invalid parameters"); return SPS_ERROR; } if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) { if (alloc->threshold >= 512) thresh = BAM_DMA_THRESH_512; else if (alloc->threshold >= 256) thresh = BAM_DMA_THRESH_256; else if (alloc->threshold >= 128) thresh = BAM_DMA_THRESH_128; else thresh = BAM_DMA_THRESH_64; } weight = alloc->priority; if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) { SPS_ERR("sps:BAM-DMA: invalid priority: %x", alloc->priority); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(alloc->dev); if (dev == NULL) { SPS_ERR("sps:BAM-DMA: invalid BAM handle: %x", alloc->dev); goto exit_err; } for (pipe_index = 0, chan = dev->chans; pipe_index < dev->num_pipes; pipe_index += 2, chan++) { if (chan->state == DMA_CHAN_STATE_FREE) { if (dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("sps:BAM-DMA: channel %d state " "error:%d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); goto exit_err; } break; } } if (pipe_index >= dev->num_pipes) { SPS_ERR("sps:BAM-DMA: no free channel. num_pipes = %d", dev->num_pipes); goto exit_err; } chan->state = DMA_CHAN_STATE_ALLOC_EXT; chan = &dev->chans[pipe_index / 2]; chan->threshold = alloc->threshold; chan->thresh = thresh; chan->priority = alloc->priority; chan->weight = weight; SPS_DBG2("sps:sps_alloc_dma_chan. pipe %d.\n", pipe_index); chan_info->dev = dev->h; chan_info->dest_pipe_index = pipe_index; chan_info->src_pipe_index = pipe_index + 1; result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * Diasble a BAM pipe * */ void bam_pipe_disable(void *base, u32 pipe) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); bam_write_reg_field(base, P_CTRL(pipe), P_EN, 0); }