/** * Deactivate a BAM DMA pipe * * This function deactivates a BAM DMA pipe. * * @dev - pointer to BAM-DMA device descriptor * * @bam - pointer to BAM device descriptor * * @pipe_index - pipe index * * @return 0 on success, negative value on error * */ static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev, struct sps_bam *bam, u32 pipe_index) { u32 channel; if (dev->bam != bam) return SPS_ERROR; if (pipe_index >= dev->num_pipes) return SPS_ERROR; if (dev->pipes[pipe_index] != PIPE_ACTIVE) return SPS_ERROR; /* Pipe is not active */ SPS_DBG("BAM-DMA: deactivate pipe %d", pipe_index); /* Mark pipe inactive */ dev->pipes[pipe_index] = PIPE_INACTIVE; /* * Channel must be reset when either pipe is disabled, so just always * reset regardless of other pipe's state */ channel = pipe_index / 2; dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ENABLE, 0); /* If the peer pipe is also inactive, reset the channel */ if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) { /* Free channel if allocated internally */ if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT) dev->chans[channel].state = DMA_CHAN_STATE_FREE; } return 0; }
/** * Enable a BAM DMA pipe * */ int sps_dma_pipe_enable(void *bam_arg, u32 pipe_index) { struct sps_bam *bam = bam_arg; struct bamdma_device *dev; struct bamdma_chan *chan; u32 channel; int result = SPS_ERROR; SPS_DBG("sps_dma_pipe_enable.pipe %d", pipe_index); mutex_lock(&bam_dma_lock); dev = sps_dma_find_device((u32) bam); if (dev == NULL) { SPS_ERR("BAM-DMA: invalid BAM"); goto exit_err; } if (pipe_index >= dev->num_pipes) { SPS_ERR("BAM-DMA: BAM %x invalid pipe: %d", bam->props.phys_addr, pipe_index); goto exit_err; } if (dev->pipes[pipe_index] != PIPE_ACTIVE) { SPS_ERR("BAM-DMA: BAM %x pipe %d not active", bam->props.phys_addr, pipe_index); goto exit_err; } /* * The channel must be enabled when the dest/input/write pipe * is enabled */ if (DMA_PIPE_IS_DEST(pipe_index)) { /* Configure and enable the channel */ channel = pipe_index / 2; chan = &dev->chans[channel]; if (chan->threshold != SPS_DMA_THRESHOLD_DEFAULT) dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ACT_THRESH, chan->thresh); if (chan->priority != SPS_DMA_PRI_DEFAULT) dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_WEIGHT, chan->weight); dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel), DMA_CHNL_ENABLE, 1); } result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * * Read register with debug info. * * @base - bam base virtual address. * @offset - register offset. * * @return u32 */ static inline u32 bam_read_reg(void *base, u32 offset) { u32 val = ioread32(base + offset); SPS_DBG("sps:bam 0x%x(va) read reg 0x%x r_val 0x%x.\n", (u32) base, offset, val); return val; }
/** * Initialize driver memory module * */ int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size) { int res; /* 2^8=128. The desc-fifo and data-fifo minimal allocation. */ int min_alloc_order = 8; iomem_phys = pipemem_phys_base; iomem_size = pipemem_size; if (iomem_phys == 0) { SPS_ERR("sps:Invalid Pipe-Mem address"); return SPS_ERROR; } else { iomem_virt = ioremap(iomem_phys, iomem_size); if (!iomem_virt) { SPS_ERR("sps:Failed to IO map pipe memory.\n"); return -ENOMEM; } } iomem_offset = 0; SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.", iomem_phys, (u32) iomem_virt); pool = gen_pool_create(min_alloc_order, nid); res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid); if (res) return res; return 0; }
/** * Initialize BAM DMA module * */ int sps_dma_init(const struct sps_bam_props *bam_props) { struct sps_bam_props props; const struct sps_bam_props *bam_reg; u32 h; /* Init local data */ memset(&bam_dma_dev, 0, sizeof(bam_dma_dev)); num_bams = 0; memset(bam_handles, 0, sizeof(bam_handles)); /* Create a mutex to control access to the BAM-DMA devices */ mutex_init(&bam_dma_lock); /* Are there any BAM DMA devices? */ if (bam_props == NULL) return 0; /* * Registers all BAMs in the BSP properties, but only uses the first * BAM-DMA device for allocations. */ if (bam_props->phys_addr) { /* Force multi-EE option for all BAM-DMAs */ bam_reg = bam_props; if ((bam_props->options & SPS_BAM_OPT_BAMDMA) && (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) { SPS_DBG("sps:Setting multi-EE options for BAM-DMA: %x", bam_props->phys_addr); props = *bam_props; props.manage |= SPS_BAM_MGR_MULTI_EE; bam_reg = &props; } /* Register the BAM */ if (sps_register_bam_device(bam_reg, &h)) { SPS_ERR("sps:Fail to register BAM-DMA BAM device: " "phys 0x%0x", bam_props->phys_addr); return SPS_ERROR; } /* Record the BAM so that it may be deregistered later */ if (num_bams < MAX_BAM_DMA_BAMS) { bam_handles[num_bams] = h; num_bams++; } else { SPS_ERR("sps:BAM-DMA: BAM limit exceeded: %d", num_bams); return SPS_ERROR; } } else { SPS_ERR("sps:BAM-DMA phys_addr is zero."); return SPS_ERROR; } return 0; }
/** * Read register masked field with debug info. * * @base - bam base virtual address. * @offset - register offset. * @mask - register bitmask. * * @return u32 */ static inline u32 bam_read_reg_field(void *base, u32 offset, const u32 mask) { u32 shift = find_first_bit((void *)&mask, 32); u32 val = ioread32(base + offset); val &= mask; /* clear other bits */ val >>= shift; SPS_DBG("sps:bam 0x%x(va) read reg 0x%x mask 0x%x r_val 0x%x.\n", (u32) base, offset, mask, val); return val; }
/** * Write register masked field with debug info. * * @base - bam base virtual address. * @offset - register offset. * @mask - register bitmask. * @val - value to write. * */ static inline void dma_write_reg_field(void *base, u32 offset, const u32 mask, u32 val) { u32 shift = find_first_bit((void *)&mask, 32); u32 tmp = ioread32(base + offset); tmp &= ~mask; /* clear written bits */ val = tmp | (val << shift); iowrite32(val, base + offset); SPS_DBG("bamdma: write reg 0x%x w_val 0x%x.", offset, val); }
int sps_dma_init(const struct sps_bam_props *bam_props) { struct sps_bam_props props; const struct sps_bam_props *bam_reg; u32 h; memset(&bam_dma_dev, 0, sizeof(bam_dma_dev)); num_bams = 0; memset(bam_handles, 0, sizeof(bam_handles)); mutex_init(&bam_dma_lock); if (bam_props == NULL) return 0; if (bam_props->phys_addr) { bam_reg = bam_props; if ((bam_props->options & SPS_BAM_OPT_BAMDMA) && (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) { SPS_DBG("sps:Setting multi-EE options for BAM-DMA: %x", bam_props->phys_addr); props = *bam_props; props.manage |= SPS_BAM_MGR_MULTI_EE; bam_reg = &props; } if (sps_register_bam_device(bam_reg, &h)) { SPS_ERR("sps:Fail to register BAM-DMA BAM device: " "phys 0x%0x", bam_props->phys_addr); return SPS_ERROR; } if (num_bams < MAX_BAM_DMA_BAMS) { bam_handles[num_bams] = h; num_bams++; } else { SPS_ERR("sps:BAM-DMA: BAM limit exceeded: %d", num_bams); return SPS_ERROR; } } else { SPS_ERR("sps:BAM-DMA phys_addr is zero."); return SPS_ERROR; } return 0; }
/** * Free a BAM DMA channel * */ int sps_free_dma_chan(struct sps_dma_chan *chan) { struct bamdma_device *dev; u32 pipe_index; int result = 0; if (chan == NULL) { SPS_ERR("sps_free_dma_chan. chan is NULL"); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(chan->dev); if (dev == NULL) { SPS_ERR("BAM-DMA: invalid BAM handle: %x", chan->dev); result = SPS_ERROR; goto exit_err; } /* Verify the pipe indices */ pipe_index = chan->dest_pipe_index; if (pipe_index >= dev->num_pipes || ((pipe_index & 1)) || (pipe_index + 1) != chan->src_pipe_index) { SPS_ERR("sps_free_dma_chan. Invalid pipe indices"); SPS_DBG("num_pipes=%d.dest=%d.src=%d.", dev->num_pipes, chan->dest_pipe_index, chan->src_pipe_index); result = SPS_ERROR; goto exit_err; } /* Are both pipes inactive? */ if (dev->chans[pipe_index / 2].state != DMA_CHAN_STATE_ALLOC_EXT || dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("BAM-DMA: attempt to free active chan %d: %d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); result = SPS_ERROR; goto exit_err; } /* Free the channel */ dev->chans[pipe_index / 2].state = DMA_CHAN_STATE_FREE; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * Free I/O memory * */ void sps_mem_free_io(u32 phys_addr, u32 bytes) { u32 virt_addr = 0; iomem_offset = phys_addr - iomem_phys; virt_addr = (u32) iomem_virt + iomem_offset; SPS_DBG("sps:sps_mem_free_io.phys=0x%x.virt=0x%x.size=0x%x.", phys_addr, virt_addr, bytes); gen_pool_free(pool, virt_addr, bytes); total_free += bytes; }
/** * Initialize a BAM pipe */ int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param, u32 ee) { /* Reset the BAM pipe */ bam_write_reg(base, P_RST(pipe), 1); /* No delay needed */ bam_write_reg(base, P_RST(pipe), 0); /* Enable the Pipe Interrupt at the BAM level */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 1); bam_write_reg(base, P_IRQ_EN(pipe), param->pipe_irq_mask); bam_write_reg_field(base, P_CTRL(pipe), P_DIRECTION, param->dir); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_MODE, param->mode); bam_write_reg(base, P_EVNT_GEN_TRSHLD(pipe), param->event_threshold); bam_write_reg(base, P_DESC_FIFO_ADDR(pipe), param->desc_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DESC_FIFO_SIZE, param->desc_size); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_STRM, param->stream_mode); if (param->mode == BAM_PIPE_MODE_BAM2BAM) { u32 peer_dest_addr = param->peer_phys_addr + P_EVNT_REG(param->peer_pipe); bam_write_reg(base, P_DATA_FIFO_ADDR(pipe), param->data_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DATA_FIFO_SIZE, param->data_size); bam_write_reg(base, P_EVNT_DEST_ADDR(pipe), peer_dest_addr); SPS_DBG("sps:bam=0x%x(va).pipe=%d.peer_bam=0x%x." "peer_pipe=%d.\n", (u32) base, pipe, (u32) param->peer_phys_addr, param->peer_pipe); } /* Pipe Enable - at last */ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1); return 0; }
/** * Initialize driver memory module * */ int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size) { int res; /* 2^8=128. The desc-fifo and data-fifo minimal allocation. */ int min_alloc_order = 8; if ((d_type == 0) || (d_type == 2) || imem) { iomem_phys = pipemem_phys_base; iomem_size = pipemem_size; if (iomem_phys == 0) { SPS_ERR("sps:Invalid Pipe-Mem address"); return SPS_ERROR; } else { iomem_virt = ioremap(iomem_phys, iomem_size); if (!iomem_virt) { SPS_ERR("sps:Failed to IO map pipe memory.\n"); return -ENOMEM; } } iomem_offset = 0; SPS_DBG("sps:sps_mem_init.iomem_phys=%pa,iomem_virt=0x%p.", &iomem_phys, iomem_virt); } pool = gen_pool_create(min_alloc_order, nid); if (!pool) { SPS_ERR("sps:Failed to create a new memory pool.\n"); return -ENOMEM; } if ((d_type == 0) || (d_type == 2) || imem) { res = gen_pool_add(pool, (uintptr_t)iomem_virt, iomem_size, nid); if (res) return res; } return 0; }
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size) { int res; int min_alloc_order = 8; if ((d_type == 0) || (d_type == 2)) { iomem_phys = pipemem_phys_base; iomem_size = pipemem_size; if (iomem_phys == 0) { SPS_ERR("sps:Invalid Pipe-Mem address"); return SPS_ERROR; } else { iomem_virt = ioremap(iomem_phys, iomem_size); if (!iomem_virt) { SPS_ERR("sps:Failed to IO map pipe memory.\n"); return -ENOMEM; } } iomem_offset = 0; SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.", iomem_phys, (u32) iomem_virt); } pool = gen_pool_create(min_alloc_order, nid); if (!pool) { SPS_ERR("sps:Failed to create a new memory pool.\n"); return -ENOMEM; } if ((d_type == 0) || (d_type == 2)) { res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid); if (res) return res; } return 0; }
/** * Allocate I/O (pipe) memory * */ u32 sps_mem_alloc_io(u32 bytes) { u32 phys_addr = SPS_ADDR_INVALID; u32 virt_addr = 0; virt_addr = gen_pool_alloc(pool, bytes); if (virt_addr) { iomem_offset = virt_addr - (u32) iomem_virt; phys_addr = iomem_phys + iomem_offset; total_alloc += bytes; } else { SPS_ERR("sps:gen_pool_alloc %d bytes fail.", bytes); return SPS_ADDR_INVALID; } SPS_DBG("sps:sps_mem_alloc_io.phys=0x%x.virt=0x%x.size=0x%x.", phys_addr, virt_addr, bytes); return phys_addr; }
int sps_map_init(const struct sps_map *map_props, u32 options) { const struct sps_map *maps; memset(&sps_maps, 0, sizeof(sps_maps)); if (map_props == NULL) return 0; sps_maps.maps = map_props; sps_maps.options = options; for (maps = sps_maps.maps;; maps++, sps_maps.num_maps++) if (maps->src.periph_class == SPS_CLASS_INVALID && maps->src.periph_phy_addr == SPS_ADDR_INVALID) break; SPS_DBG("sps: %d mappings", sps_maps.num_maps); return 0; }
/** * * Write register with debug info. * * @base - bam base virtual address. * @offset - register offset. * @val - value to write. * */ static inline void bam_write_reg(void *base, u32 offset, u32 val) { iowrite32(val, base + offset); SPS_DBG("sps:bam 0x%x(va) write reg 0x%x w_val 0x%x.\n", (u32) base, offset, val); }
/** * Initialize BAM DMA device * */ int sps_dma_device_init(u32 h) { struct bamdma_device *dev; struct sps_bam_props *props; u32 chan; int result = SPS_ERROR; mutex_lock(&bam_dma_lock); /* Find a free BAM-DMA device slot */ dev = NULL; if (bam_dma_dev[0].bam != NULL) { SPS_ERR("BAM-DMA BAM device already initialized."); goto exit_err; } else { dev = &bam_dma_dev[0]; } /* Record BAM */ memset(dev, 0, sizeof(*dev)); dev->h = h; dev->bam = sps_h2bam(h); if (dev->bam == NULL) { SPS_ERR("BAM-DMA BAM device is not found from the handle."); goto exit_err; } /* Map the BAM DMA device into virtual space, if necessary */ props = &dev->bam->props; dev->phys_addr = props->periph_phys_addr; if (props->periph_virt_addr != NULL) { dev->virt_addr = props->periph_virt_addr; dev->virtual_mapped = false; } else { if (props->periph_virt_size == 0) { SPS_ERR("Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virt_addr = ioremap(dev->phys_addr, props->periph_virt_size); if (dev->virt_addr == NULL) { SPS_ERR("Unable to map BAM DMA IO memory: %x %x", dev->phys_addr, props->periph_virt_size); goto exit_err; } dev->virtual_mapped = true; } dev->hwio = (void *) dev->virt_addr; /* Is the BAM-DMA device locally controlled? */ if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) { SPS_DBG("BAM-DMA is controlled locally: %x", dev->phys_addr); dev->local = true; } else { SPS_DBG("BAM-DMA is controlled remotely: %x", dev->phys_addr); dev->local = false; } /* * Enable the BAM DMA and determine the number of pipes/channels. * Leave the BAM-DMA enabled, since it is always a shared device. */ if (sps_dma_device_enable(dev)) goto exit_err; dev->num_pipes = dev->bam->props.num_pipes; /* Disable all channels */ if (dev->local) for (chan = 0; chan < (dev->num_pipes / 2); chan++) { dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(chan), DMA_CHNL_ENABLE, 0); } result = 0; exit_err: if (result) { if (dev != NULL) { if (dev->virtual_mapped) iounmap(dev->virt_addr); dev->bam = NULL; } } mutex_unlock(&bam_dma_lock); return result; }
/** * Allocate a BAM DMA channel * */ int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc, struct sps_dma_chan *chan_info) { struct bamdma_device *dev; struct bamdma_chan *chan; u32 pipe_index; enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0; enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0; int result = SPS_ERROR; if (alloc == NULL || chan_info == NULL) { SPS_ERR("sps_alloc_dma_chan. invalid parameters"); return SPS_ERROR; } /* Translate threshold and priority to hwio values */ if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) { if (alloc->threshold >= 512) thresh = BAM_DMA_THRESH_512; else if (alloc->threshold >= 256) thresh = BAM_DMA_THRESH_256; else if (alloc->threshold >= 128) thresh = BAM_DMA_THRESH_128; else thresh = BAM_DMA_THRESH_64; } weight = alloc->priority; if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) { SPS_ERR("BAM-DMA: invalid priority: %x", alloc->priority); return SPS_ERROR; } mutex_lock(&bam_dma_lock); dev = sps_dma_find_device(alloc->dev); if (dev == NULL) { SPS_ERR("BAM-DMA: invalid BAM handle: %x", alloc->dev); goto exit_err; } /* Search for a free set of pipes */ for (pipe_index = 0, chan = dev->chans; pipe_index < dev->num_pipes; pipe_index += 2, chan++) { if (chan->state == DMA_CHAN_STATE_FREE) { /* Just check pipes for safety */ if (dev->pipes[pipe_index] != PIPE_INACTIVE || dev->pipes[pipe_index + 1] != PIPE_INACTIVE) { SPS_ERR("BAM-DMA: channel %d state error:%d %d", pipe_index / 2, dev->pipes[pipe_index], dev->pipes[pipe_index + 1]); goto exit_err; } break; /* Found free pipe */ } } if (pipe_index >= dev->num_pipes) { SPS_ERR("BAM-DMA: no free channel. num_pipes = %d", dev->num_pipes); goto exit_err; } chan->state = DMA_CHAN_STATE_ALLOC_EXT; /* Store config values for use when pipes are activated */ chan = &dev->chans[pipe_index / 2]; chan->threshold = alloc->threshold; chan->thresh = thresh; chan->priority = alloc->priority; chan->weight = weight; SPS_DBG("sps_alloc_dma_chan. pipe %d.\n", pipe_index); /* Report allocated pipes to client */ chan_info->dev = dev->h; /* Dest/input/write pipex */ chan_info->dest_pipe_index = pipe_index; /* Source/output/read pipe */ chan_info->src_pipe_index = pipe_index + 1; result = 0; exit_err: mutex_unlock(&bam_dma_lock); return result; }
/** * Initialize a BAM pipe */ int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param, u32 ee) { SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe); /* Reset the BAM pipe */ bam_write_reg(base, P_RST(pipe), 1); /* No delay needed */ bam_write_reg(base, P_RST(pipe), 0); /* Enable the Pipe Interrupt at the BAM level */ bam_write_reg_field(base, IRQ_SRCS_MSK_EE(ee), (1 << pipe), 1); bam_write_reg(base, P_IRQ_EN(pipe), param->pipe_irq_mask); bam_write_reg_field(base, P_CTRL(pipe), P_DIRECTION, param->dir); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_MODE, param->mode); bam_write_reg(base, P_EVNT_GEN_TRSHLD(pipe), param->event_threshold); bam_write_reg(base, P_DESC_FIFO_ADDR(pipe), param->desc_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DESC_FIFO_SIZE, param->desc_size); bam_write_reg_field(base, P_CTRL(pipe), P_SYS_STRM, param->stream_mode); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, P_CTRL(pipe), P_LOCK_GROUP, param->lock_group); SPS_DBG("sps:bam=0x%x(va).pipe=%d.lock_group=%d.\n", (u32) base, pipe, param->lock_group); #endif if (param->mode == BAM_PIPE_MODE_BAM2BAM) { u32 peer_dest_addr = param->peer_phys_addr + P_EVNT_REG(param->peer_pipe); bam_write_reg(base, P_DATA_FIFO_ADDR(pipe), param->data_base); bam_write_reg_field(base, P_FIFO_SIZES(pipe), P_DATA_FIFO_SIZE, param->data_size); bam_write_reg(base, P_EVNT_DEST_ADDR(pipe), peer_dest_addr); SPS_DBG2("sps:bam=0x%x(va).pipe=%d.peer_bam=0x%x." "peer_pipe=%d.\n", (u32) base, pipe, (u32) param->peer_phys_addr, param->peer_pipe); #ifdef CONFIG_SPS_SUPPORT_NDP_BAM bam_write_reg_field(base, P_CTRL(pipe), P_WRITE_NWD, param->write_nwd); SPS_DBG("sps:%s WRITE_NWD bit for this bam2bam pipe.", param->write_nwd ? "Set" : "Do not set"); #endif } /* Pipe Enable - at last */ bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1); return 0; }
/** * * Write register with debug info. * * @base - bam base virtual address. * @offset - register offset. * @val - value to write. * */ static inline void dma_write_reg(void *base, u32 offset, u32 val) { iowrite32(val, base + offset); SPS_DBG("bamdma: write reg 0x%x w_val 0x%x.", offset, val); }