/** * Initialize BAM device security execution environment */ int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask) { u32 version; u32 num_pipes; u32 mask; u32 pipe; SPS_DBG2("sps:%s:bam=0x%x(va).", __func__, (u32) base); /* * Discover the hardware version number and the number of pipes * supported by this BAM */ version = bam_read_reg_field(base, REVISION, BAM_REVISION); num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES); if (version < 3 || version > 0x1F) { SPS_ERR("sps:bam 0x%x(va) security is not supported for this" "BAM version 0x%x.\n", (u32) base, version); return -ENODEV; } if (num_pipes > BAM_MAX_PIPES) { SPS_ERR("sps:bam 0x%x(va) the number of pipes is more than " "the maximum number allowed.", (u32) base); return -ENODEV; } for (pipe = 0, mask = 1; pipe < num_pipes; pipe++, mask <<= 1) if ((mask & pipe_mask) != 0) bam_pipe_set_ee(base, pipe, ee, vmid); /* If MSbit is set, assign top-level interrupt to this EE */ mask = 1UL << 31; if ((mask & pipe_mask) != 0) bam_set_ee(base, ee, vmid, BAM_NONSECURE_RESET_ENABLE); return 0; }
void *spsi_get_mem_ptr(u32 phys_addr) { void *virt = NULL; if ((phys_addr >= iomem_phys) && (phys_addr < (iomem_phys + iomem_size))) { virt = (u8 *) iomem_virt + (phys_addr - iomem_phys); } else { virt = phys_to_virt(phys_addr); SPS_ERR("sps:spsi_get_mem_ptr.invalid phys addr=0x%x.", phys_addr); } return virt; }
/** * Initialize driver memory module * */ int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size) { #ifndef CONFIG_SPS_SUPPORT_NDP_BAM int res; #endif /* 2^8=128. The desc-fifo and data-fifo minimal allocation. */ int min_alloc_order = 8; #ifndef CONFIG_SPS_SUPPORT_NDP_BAM iomem_phys = pipemem_phys_base; iomem_size = pipemem_size; if (iomem_phys == 0) { SPS_ERR("sps:Invalid Pipe-Mem address"); return SPS_ERROR; } else { iomem_virt = ioremap(iomem_phys, iomem_size); if (!iomem_virt) { SPS_ERR("sps:Failed to IO map pipe memory.\n"); return -ENOMEM; } } iomem_offset = 0; SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.", iomem_phys, (u32) iomem_virt); #endif pool = gen_pool_create(min_alloc_order, nid); #ifndef CONFIG_SPS_SUPPORT_NDP_BAM res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid); if (res) return res; #endif return 0; }
int sps_mem_de_init(void) { if (iomem_virt != NULL) { gen_pool_destroy(pool); pool = NULL; iounmap(iomem_virt); iomem_virt = NULL; } if (total_alloc == total_free) return 0; else { SPS_ERR("sps:sps_mem_de_init:some memory not free"); return SPS_ERROR; } }
static int sps_dma_device_enable(struct bamdma_device *dev) { if (dev->enabled) return 0; if (dev->local) dma_write_reg(dev->virt_addr, DMA_ENBL, 1); if (sps_bam_enable(dev->bam)) { SPS_ERR("sps:Failed to enable BAM DMA's BAM: %x", dev->phys_addr); return SPS_ERROR; } dev->enabled = true; return 0; }
u32 sps_mem_alloc_io(u32 bytes) { u32 phys_addr = SPS_ADDR_INVALID; u32 virt_addr = 0; virt_addr = gen_pool_alloc(pool, bytes); if (virt_addr) { iomem_offset = virt_addr - (u32) iomem_virt; phys_addr = iomem_phys + iomem_offset; total_alloc += bytes; } else { SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes); return SPS_ADDR_INVALID; } SPS_DBG2("sps:sps_mem_alloc_io.phys=0x%x.virt=0x%x.size=0x%x.", phys_addr, virt_addr, bytes); return phys_addr; }
/** * Allocate I/O (pipe) memory * */ phys_addr_t sps_mem_alloc_io(u32 bytes) { phys_addr_t phys_addr = SPS_ADDR_INVALID; unsigned long virt_addr = 0; virt_addr = gen_pool_alloc(pool, bytes); if (virt_addr) { iomem_offset = virt_addr - (uintptr_t) iomem_virt; phys_addr = iomem_phys + iomem_offset; total_alloc += bytes; } else { SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes); return SPS_ADDR_INVALID; } SPS_DBG2("sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.", &phys_addr, virt_addr, bytes); return phys_addr; }
/** * Free a BAM DMA pipe * */ int sps_dma_pipe_free(void *bam_arg, u32 pipe_index) { struct bamdma_device *dev; struct sps_bam *bam = bam_arg; int result; mutex_lock(&bam_dma_lock); dev = sps_dma_find_device((u32) bam); if (dev == NULL) { SPS_ERR("BAM-DMA: invalid BAM"); result = SPS_ERROR; goto exit_err; } result = sps_dma_deactivate_pipe_atomic(dev, bam, pipe_index); exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * BAM DMA device enable * * This function enables a BAM DMA device and the associated BAM. * * @dev - pointer to BAM DMA device context * * @return 0 on success, negative value on error * */ static int sps_dma_device_enable(struct bamdma_device *dev) { if (dev->enabled) return 0; /* * If the BAM-DMA device is locally controlled then enable BAM-DMA * device */ if (dev->local) dma_write_reg(dev->virt_addr, DMA_ENBL, 1); /* Enable BAM device */ if (sps_bam_enable(dev->bam)) { SPS_ERR("Failed to enable BAM DMA's BAM: %x", dev->phys_addr); return SPS_ERROR; } dev->enabled = true; return 0; }
/** * Get BAM IRQ source and clear global IRQ status */ u32 bam_check_irq_source(void *base, u32 ee, u32 mask) { u32 source = bam_read_reg(base, IRQ_SRCS_EE(ee)); u32 clr = source & (1UL << 31); if (clr) { u32 status = 0; status = bam_read_reg(base, IRQ_STTS); bam_write_reg(base, IRQ_CLR, status); if (printk_ratelimit()) { if (status & IRQ_STTS_BAM_ERROR_IRQ) SPS_ERR("sps:bam 0x%x(va);bam irq status=" "0x%x.\nsps: BAM_ERROR_IRQ\n", (u32) base, status); else SPS_INFO("sps:bam 0x%x(va);bam irq status=" "0x%x.", (u32) base, status); } } source &= mask; return source; }
/** * Initialize a BAM device * */ int bam_init(void *base, u32 ee, u16 summing_threshold, u32 irq_mask, u32 *version, u32 *num_pipes, u32 p_rst) { u32 cfg_bits; u32 ver = 0; SPS_DBG2("sps:%s:bam=0x%x(va).ee=%d.", __func__, (u32) base, ee); ver = bam_read_reg_field(base, REVISION, BAM_REVISION); if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) { SPS_ERR("sps:bam 0x%x(va) Invalid BAM REVISION 0x%x.\n", (u32) base, ver); return -ENODEV; } else SPS_DBG2("sps:REVISION of BAM 0x%x is 0x%x.\n", (u32) base, ver); if (summing_threshold == 0) { summing_threshold = 4; SPS_ERR("sps:bam 0x%x(va) summing_threshold is zero , " "use default 4.\n", (u32) base); } if (p_rst) cfg_bits = 0xffffffff & ~(3 << 11); else cfg_bits = 0xffffffff & ~(1 << 11); bam_write_reg_field(base, CTRL, BAM_SW_RST, 1); /* No delay needed */ bam_write_reg_field(base, CTRL, BAM_SW_RST, 0); bam_write_reg_field(base, CTRL, BAM_EN, 1); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, CTRL, CACHE_MISS_ERR_RESP_EN, 1); bam_write_reg_field(base, CTRL, LOCAL_CLK_GATING, 1); #endif bam_write_reg(base, DESC_CNT_TRSHLD, summing_threshold); bam_write_reg(base, CNFG_BITS, cfg_bits); /* * Enable Global BAM Interrupt - for error reasons , * filter with mask. * Note: Pipes interrupts are disabled until BAM_P_IRQ_enn is set */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), BAM_IRQ, 1); bam_write_reg(base, IRQ_EN, irq_mask); *num_pipes = bam_read_reg_field(base, NUM_PIPES, BAM_NUM_PIPES); *version = ver; return 0; }
/** * Allocate a BAM DMA channel * */ int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc, struct sps_dma_chan *chan_info) { struct bamdma_device *dev; struct bamdma_chan *chan; u32 pipe_index; enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0; enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0; int result = SPS_ERROR; if (alloc == NULL || chan_info == NULL) { SPS_ERR("sps_alloc_dma_chan. invalid parameters"); return SPS_ERROR; } /* Translate threshold and priority to hwio values */ if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) { if (alloc->threshold >= 512) thresh = BAM_DMA_THRESH_512; else if (alloc->threshold >= 256) thresh = BAM_DMA_THRESH_256; else if (alloc->threshold >= 128) thresh = BAM_DMA_THRESH_128; else thresh = BAM_DMA_THRESH_64; } weight = alloc->priority; if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) { SPS_ERR("BAM-DMA: invalid priority: %x", alloc->priority); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(alloc->dev); if (dev == NULL) { SPS_ERR("BAM-DMA: invalid BAM handle: %x", alloc->dev); goto exit_err; } /* Search for a free set of pipes */ for (pipe_index = 0, chan = dev->chans; pipe_index < dev->num_pipes; pipe_index += 2, chan++) { if (chan->state == DMA_CHAN_STATE_FREE) { /* Just check pipes for safety */ if (dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("BAM-DMA: channel %d state error:%d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); goto exit_err; } break; /* Found free pipe */ } } if (pipe_index >= dev->num_pipes) { SPS_ERR("BAM-DMA: no free channel. num_pipes = %d", dev->num_pipes); goto exit_err; } chan->state = DMA_CHAN_STATE_ALLOC_EXT; /* Store config values for use when pipes are activated */ chan = &dev->chans[pipe_index / 2]; chan->threshold = alloc->threshold; chan->thresh = thresh; chan->priority = alloc->priority; chan->weight = weight; SPS_DBG("sps_alloc_dma_chan. pipe %d.\n", pipe_index); /* Report allocated pipes to client */ chan_info->dev = dev->h; /* Dest/input/write pipex */ chan_info->dest_pipe_index = pipe_index; /* Source/output/read pipe */ chan_info->src_pipe_index = pipe_index + 1; result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * Initialize BAM DMA device * */ int sps_dma_device_init(u32 h) { struct bamdma_device *dev; struct sps_bam_props *props; u32 chan; int result = SPS_ERROR; mutex_lock(&bam_dma_lock); /* Find a free BAM-DMA device slot */ dev = NULL; if (bam_dma_dev[0].bam != NULL) { SPS_ERR("BAM-DMA BAM device already initialized."); goto exit_err; } else { dev = &bam_dma_dev[0]; } /* Record BAM */ memset(dev, 0, sizeof(*dev)); dev->h = h; dev->bam = sps_h2bam(h); if (dev->bam == NULL) { SPS_ERR("BAM-DMA BAM device is not found from the handle."); goto exit_err; } /* Map the BAM DMA device into virtual space, if necessary */ props = &dev->bam->props; dev->phys_addr = props->periph_phys_addr; if (props->periph_virt_addr != NULL) { dev->virt_addr = props->periph_virt_addr; dev->virtual_mapped = false; } else { if (props->periph_virt_size == 0) { SPS_ERR("Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virt_addr = ioremap(dev->phys_addr, props->periph_virt_size); if (dev->virt_addr == NULL) { SPS_ERR("Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virtual_mapped = true; } dev->hwio = (void *) dev->virt_addr; /* Is the BAM-DMA device locally controlled? */ if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) { SPS_DBG("BAM-DMA is controlled locally: %x", dev->phys_addr); dev->local = true; } else { SPS_DBG("BAM-DMA is controlled remotely: %x", dev->phys_addr); dev->local = false; } /* * Enable the BAM DMA and determine the number of pipes/channels. * Leave the BAM-DMA enabled, since it is always a shared device. */ if (sps_dma_device_enable(dev)) goto exit_err; dev->num_pipes = dev->bam->props.num_pipes; /* Disable all channels */ if (dev->local) for (chan = 0; chan < (dev->num_pipes / 2); chan++) { dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(chan), DMA_CHNL_ENABLE, 0); } result = 0; exit_err: if (result) { if (dev != NULL) { if (dev->virtual_mapped) iounmap(dev->virt_addr); dev->bam = NULL; } } mutex_unlock(&bam_dma_lock); return result; }
int sps_map_find(struct sps_connect *connect) { const struct sps_map *map; u32 i; void *desc; void *data; if (sps_maps.num_maps == 0) return SPS_ERROR; for (i = sps_maps.num_maps, map = sps_maps.maps; i > 0; i--, map++) if (map->src.periph_class == (u32) connect->source && map->dest.periph_class == (u32) connect->destination && map->config == (u32) connect->config) break; if (i == 0) return SPS_ERROR; desc = spsi_get_mem_ptr(map->desc_base); if (desc == NULL) { SPS_ERR("sps:Cannot get virt addr for I/O buffer: 0x%x", map->desc_base); return SPS_ERROR; } if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) { data = spsi_get_mem_ptr(map->data_base); if (data == NULL) { SPS_ERR("sps:Can't get virt addr for I/O buffer: 0x%x", map->data_base); return SPS_ERROR; } } else { data = NULL; } if (connect->source != SPS_DEV_HANDLE_MEM) connect->src_pipe_index = map->src.pipe_index; if (connect->destination != SPS_DEV_HANDLE_MEM) connect->dest_pipe_index = map->dest.pipe_index; if (connect->mode == SPS_MODE_SRC) connect->event_thresh = map->src.event_thresh; else connect->event_thresh = map->dest.event_thresh; connect->desc.size = map->desc_size; connect->desc.phys_base = map->desc_base; connect->desc.base = desc; if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) { connect->data.size = map->data_size; connect->data.phys_base = map->data_base; connect->data.base = data; } return 0; }
int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc, struct sps_dma_chan *chan_info) { struct bamdma_device *dev; struct bamdma_chan *chan; u32 pipe_index; enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0; enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0; int result = SPS_ERROR; if (alloc == NULL || chan_info == NULL) { SPS_ERR("sps:sps_alloc_dma_chan. invalid parameters"); return SPS_ERROR; } if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) { if (alloc->threshold >= 512) thresh = BAM_DMA_THRESH_512; else if (alloc->threshold >= 256) thresh = BAM_DMA_THRESH_256; else if (alloc->threshold >= 128) thresh = BAM_DMA_THRESH_128; else thresh = BAM_DMA_THRESH_64; } weight = alloc->priority; if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) { SPS_ERR("sps:BAM-DMA: invalid priority: %x", alloc->priority); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(alloc->dev); if (dev == NULL) { SPS_ERR("sps:BAM-DMA: invalid BAM handle: %x", alloc->dev); goto exit_err; } for (pipe_index = 0, chan = dev->chans; pipe_index < dev->num_pipes; pipe_index += 2, chan++) { if (chan->state == DMA_CHAN_STATE_FREE) { if (dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("sps:BAM-DMA: channel %d state " "error:%d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); goto exit_err; } break; } } if (pipe_index >= dev->num_pipes) { SPS_ERR("sps:BAM-DMA: no free channel. num_pipes = %d", dev->num_pipes); goto exit_err; } chan->state = DMA_CHAN_STATE_ALLOC_EXT; chan = &dev->chans[pipe_index / 2]; chan->threshold = alloc->threshold; chan->thresh = thresh; chan->priority = alloc->priority; chan->weight = weight; SPS_DBG2("sps:sps_alloc_dma_chan. pipe %d.\n", pipe_index); chan_info->dev = dev->h; chan_info->dest_pipe_index = pipe_index; chan_info->src_pipe_index = pipe_index + 1; result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
int sps_dma_device_init(u32 h) { struct bamdma_device *dev; struct sps_bam_props *props; u32 chan; int result = SPS_ERROR; mutex_lock(&bam_dma_lock); dev = NULL; if (bam_dma_dev[0].bam != NULL) { SPS_ERR("sps:BAM-DMA BAM device is already initialized."); goto exit_err; } else { dev = &bam_dma_dev[0]; } memset(dev, 0, sizeof(*dev)); dev->h = h; dev->bam = sps_h2bam(h); if (dev->bam == NULL) { SPS_ERR("sps:BAM-DMA BAM device is not found " "from the handle."); goto exit_err; } props = &dev->bam->props; dev->phys_addr = props->periph_phys_addr; if (props->periph_virt_addr != NULL) { dev->virt_addr = props->periph_virt_addr; dev->virtual_mapped = false; } else { if (props->periph_virt_size == 0) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virt_addr = ioremap(dev->phys_addr, props->periph_virt_size); if (dev->virt_addr == NULL) { SPS_ERR("sps:Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virtual_mapped = true; } dev->hwio = (void *) dev->virt_addr; if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) { SPS_DBG2("sps:BAM-DMA is controlled locally: %x", dev->phys_addr); dev->local = true; } else { SPS_DBG2("sps:BAM-DMA is controlled remotely: %x", dev->phys_addr); dev->local = false; } if (sps_dma_device_enable(dev)) goto exit_err; dev->num_pipes = dev->bam->props.num_pipes; if (dev->local) for (chan = 0; chan < (dev->num_pipes / 2); chan++) { dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(chan), DMA_CHNL_ENABLE, 0); } result = 0; exit_err: if (result) { if (dev != NULL) { if (dev->virtual_mapped) iounmap(dev->virt_addr); dev->bam = NULL; } } mutex_unlock(&bam_dma_lock); return result; }