static int rsnd_ssi_quit(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct device *dev = rsnd_priv_to_dev(priv); if (!rsnd_ssi_is_run_mods(mod, io)) return 0; if (!ssi->usrcnt) { dev_err(dev, "%s[%d] usrcnt error\n", rsnd_mod_name(mod), rsnd_mod_id(mod)); return -EIO; } if (!rsnd_ssi_is_parent(mod, io)) ssi->cr_own = 0; rsnd_ssi_master_clk_stop(mod, io); rsnd_mod_power_off(mod); ssi->usrcnt--; return 0; }
static int rsnd_ssi_common_probe(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); int ret; /* * SSIP/SSIU/IRQ are not needed on * SSI Multi slaves */ if (rsnd_ssi_is_multi_slave(mod, io)) return 0; /* * It can't judge ssi parent at this point * see rsnd_ssi_pcm_new() */ ret = rsnd_ssiu_attach(io, mod); if (ret < 0) return ret; ret = devm_request_irq(dev, ssi->irq, rsnd_ssi_interrupt, IRQF_SHARED, dev_name(dev), mod); return ret; }
int rsnd_dma_probe(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_dma_ctrl *dmac; struct resource *res; /* * for Gen1 */ if (rsnd_is_gen1(priv)) return 0; /* * for Gen2 */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp"); dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL); if (!dmac || !res) { dev_err(dev, "dma allocate failed\n"); return 0; /* it will be PIO mode */ } dmac->dmapp_num = 0; dmac->base = devm_ioremap_resource(dev, res); if (IS_ERR(dmac->base)) return PTR_ERR(dmac->base); priv->dma = dmac; return 0; }
static dma_addr_t rsnd_gen2_dma_addr(struct rsnd_dai_stream *io, struct rsnd_mod *mod, int is_play, int is_from) { struct rsnd_priv *priv = rsnd_io_to_priv(io); struct device *dev = rsnd_priv_to_dev(priv); phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI); phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU); int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); int use_src = !!rsnd_io_to_mod_src(io); int use_dvc = !!rsnd_io_to_mod_dvc(io); int id = rsnd_mod_id(mod); struct dma_addr { dma_addr_t out_addr; dma_addr_t in_addr; } dma_addrs[3][2][3] = { /* SRC */ {{{ 0, 0 }, /* Capture */ { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) }, { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } }, /* Playback */ {{ 0, 0, }, { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) }, { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } } }, /* SSI */ /* Capture */ {{{ RDMA_SSI_O_N(ssi, id), 0 }, { RDMA_SSIU_O_P(ssi, id), 0 }, { RDMA_SSIU_O_P(ssi, id), 0 } }, /* Playback */ {{ 0, RDMA_SSI_I_N(ssi, id) }, { 0, RDMA_SSIU_I_P(ssi, id) }, { 0, RDMA_SSIU_I_P(ssi, id) } } }, /* SSIU */ /* Capture */ {{{ RDMA_SSIU_O_N(ssi, id), 0 }, { RDMA_SSIU_O_P(ssi, id), 0 }, { RDMA_SSIU_O_P(ssi, id), 0 } }, /* Playback */ {{ 0, RDMA_SSIU_I_N(ssi, id) }, { 0, RDMA_SSIU_I_P(ssi, id) }, { 0, RDMA_SSIU_I_P(ssi, id) } } }, }; /* it shouldn't happen */ if (use_dvc && !use_src) dev_err(dev, "DVC is selected without SRC\n"); /* use SSIU or SSI ? */ if (is_ssi && rsnd_ssi_use_busif(io, mod)) is_ssi++; return (is_from) ? dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr : dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr; }
static int rsnd_dmaen_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id, struct rsnd_mod *mod_from, struct rsnd_mod *mod_to) { struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); struct rsnd_priv *priv = rsnd_io_to_priv(io); struct device *dev = rsnd_priv_to_dev(priv); struct dma_slave_config cfg = {}; int is_play = rsnd_io_is_play(io); int ret; if (dmaen->chan) { dev_err(dev, "it already has dma channel\n"); return -EIO; } if (dev->of_node) { dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); } else { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); dmaen->chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); } if (IS_ERR_OR_NULL(dmaen->chan)) { dmaen->chan = NULL; dev_err(dev, "can't get dma channel\n"); goto rsnd_dma_channel_err; } cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; cfg.src_addr = dma->src_addr; cfg.dst_addr = dma->dst_addr; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dev_dbg(dev, "dma : %pad -> %pad\n", &cfg.src_addr, &cfg.dst_addr); ret = dmaengine_slave_config(dmaen->chan, &cfg); if (ret < 0) goto rsnd_dma_init_err; return 0; rsnd_dma_init_err: rsnd_dma_quit(io, dma); rsnd_dma_channel_err: /* * DMA failed. try to PIO mode * see * rsnd_ssi_fallback() * rsnd_rdai_continuance_probe() */ return -EAGAIN; }
void rsnd_dma_start(struct rsnd_dma *dma) { struct rsnd_mod *mod = rsnd_dma_to_mod(dma); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); struct snd_pcm_substream *substream = io->substream; struct device *dev = rsnd_priv_to_dev(priv); struct dma_async_tx_descriptor *desc; desc = dmaengine_prep_dma_cyclic(dma->chan, (dma->addr) ? dma->addr : substream->runtime->dma_addr, snd_pcm_lib_buffer_bytes(substream), snd_pcm_lib_period_bytes(substream), dma->dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); return; } desc->callback = rsnd_dma_complete; desc->callback_param = dma; if (dmaengine_submit(desc) < 0) { dev_err(dev, "dmaengine_submit() fail\n"); return; } dma_async_issue_pending(dma->chan); }
int rsnd_ssiu_probe(struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_ssiu *ssiu; static struct rsnd_mod_ops *ops; int i, nr, ret; /* same number to SSI */ nr = priv->ssi_nr; ssiu = devm_kzalloc(dev, sizeof(*ssiu) * nr, GFP_KERNEL); if (!ssiu) return -ENOMEM; priv->ssiu = ssiu; priv->ssiu_nr = nr; if (rsnd_is_gen1(priv)) ops = &rsnd_ssiu_ops_gen1; else ops = &rsnd_ssiu_ops_gen2; for_each_rsnd_ssiu(ssiu, priv, i) { ret = rsnd_mod_init(priv, rsnd_mod_get(ssiu), ops, NULL, rsnd_mod_get_status, RSND_MOD_SSIU, i); if (ret) return ret; }
static void rsnd_adg_get_clkin(struct rsnd_priv *priv, struct rsnd_adg *adg) { struct device *dev = rsnd_priv_to_dev(priv); struct clk *clk; static const char * const clk_name[] = { [CLKA] = "clk_a", [CLKB] = "clk_b", [CLKC] = "clk_c", [CLKI] = "clk_i", }; int i, ret; for (i = 0; i < CLKMAX; i++) { clk = devm_clk_get(dev, clk_name[i]); adg->clk[i] = IS_ERR(clk) ? NULL : clk; } for_each_rsnd_clk(clk, adg, i) { ret = clk_prepare_enable(clk); if (ret < 0) dev_warn(dev, "can't use clk %d\n", i); dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk)); }
static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io, struct rsnd_mod *mod) { struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); struct rsnd_mod *src = rsnd_io_to_mod_src(io); struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); const u8 *entry = NULL; int id = rsnd_mod_id(mod); int size = 0; if (mod == ssi) { entry = gen2_id_table_ssiu; size = ARRAY_SIZE(gen2_id_table_ssiu); } else if (mod == src) { entry = gen2_id_table_scu; size = ARRAY_SIZE(gen2_id_table_scu); } else if (mod == dvc) { entry = gen2_id_table_cmd; size = ARRAY_SIZE(gen2_id_table_cmd); } if ((!entry) || (size <= id)) { struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io)); dev_err(dev, "unknown connection (%s[%d])\n", rsnd_mod_name(mod), rsnd_mod_id(mod)); /* use non-prohibited SRS number as error */ return 0x00; /* SSI00 */ } return entry[id]; }
static int rsnd_src_probe_gen2(struct rsnd_mod *mod, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct rsnd_src *src = rsnd_mod_to_src(mod); struct rsnd_mod *ssi = rsnd_ssi_mod_get(priv, rsnd_mod_id(mod)); struct device *dev = rsnd_priv_to_dev(priv); int ret; int is_play; if (info->dai_info) is_play = rsnd_info_is_playback(priv, src); else is_play = rsnd_ssi_is_play(ssi); ret = rsnd_dma_init(priv, rsnd_mod_to_dma(mod), is_play, src->info->dma_id); if (ret < 0) dev_err(dev, "SRC DMA failed\n"); return ret; }
static void rsnd_dma_do_work(struct work_struct *work) { struct rsnd_dma *dma = container_of(work, struct rsnd_dma, work); struct rsnd_priv *priv = dma->priv; struct device *dev = rsnd_priv_to_dev(priv); struct dma_async_tx_descriptor *desc; dma_addr_t buf; size_t len; int i; for (i = 0; i < dma->submit_loop; i++) { if (dma->inquiry(dma, &buf, &len) < 0) return; desc = dmaengine_prep_slave_single( dma->chan, buf, len, dma->dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); return; } desc->callback = rsnd_dma_complete; desc->callback_param = dma; if (dmaengine_submit(desc) < 0) { dev_err(dev, "dmaengine_submit() fail\n"); return; } dma_async_issue_pending(dma->chan); } }
void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg) { struct rsnd_gen *gen = rsnd_priv_to_gen(priv); struct device *dev = rsnd_priv_to_dev(priv); int index; u32 offset_id, offset_adr; if (reg >= RSND_REG_MAX) { dev_err(dev, "rsnd_reg reg error\n"); return NULL; } index = gen->reg_map[reg].index; offset_id = gen->reg_map[reg].offset_id; offset_adr = gen->reg_map[reg].offset_adr; if (index < 0) { dev_err(dev, "unsupported reg access %d\n", reg); return NULL; } if (offset_id && mod) offset_id *= rsnd_mod_id(mod); /* * index/offset were set on gen1/gen2 */ return gen->base[index] + offset_id + offset_adr; }
int rsnd_gen_probe(struct platform_device *pdev, struct rcar_snd_info *info, struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_gen *gen; int i; gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL); if (!gen) { dev_err(dev, "GEN allocate failed\n"); return -ENOMEM; } priv->gen = gen; /* * see * rsnd_reg_get() * rsnd_gen_probe() */ for (i = 0; i < RSND_REG_MAX; i++) gen->reg_map[i].index = -1; /* * init each module */ if (rsnd_is_gen1(priv)) return rsnd_gen1_probe(pdev, info, priv); dev_err(dev, "unknown generation R-Car sound device\n"); return -ENODEV; }
/* * Gen */ int rsnd_gen_probe(struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_gen *gen; int ret; gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL); if (!gen) { dev_err(dev, "GEN allocate failed\n"); return -ENOMEM; } priv->gen = gen; ret = -ENODEV; if (rsnd_is_gen1(priv)) ret = rsnd_gen1_probe(priv); else if (rsnd_is_gen2(priv)) ret = rsnd_gen2_probe(priv); if (ret < 0) dev_err(dev, "unknown generation R-Car sound device\n"); return ret; }
static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv, struct rsnd_dai_stream *io, unsigned int target_rate, unsigned int *target_val, unsigned int *target_en) { struct rsnd_adg *adg = rsnd_priv_to_adg(priv); struct device *dev = rsnd_priv_to_dev(priv); int idx, sel, div, step; unsigned int val, en; unsigned int min, diff; unsigned int sel_rate[] = { clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */ clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */ clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */ adg->rbga_rate_for_441khz, /* 0011: RBGA */ adg->rbgb_rate_for_48khz, /* 0100: RBGB */ }; min = ~0; val = 0; en = 0; for (sel = 0; sel < ARRAY_SIZE(sel_rate); sel++) { idx = 0; step = 2; if (!sel_rate[sel]) continue; for (div = 2; div <= 98304; div += step) { diff = abs(target_rate - sel_rate[sel] / div); if (min > diff) { val = (sel << 8) | idx; min = diff; en = 1 << (sel + 1); /* fixme */ } /* * step of 0_0000 / 0_0001 / 0_1101 * are out of order */ if ((idx > 2) && (idx % 2)) step *= 2; if (idx == 0x1c) { div += step; step *= 2; } idx++; } } if (min == ~0) { dev_err(dev, "no Input clock\n"); return; } *target_val = val; if (target_en) *target_en = en; }
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct device *dev = rsnd_priv_to_dev(priv); int dma_id = ssi->info->dma_id; int is_play; int ret; if (info->dai_info) is_play = rsnd_info_is_playback(priv, ssi); else is_play = rsnd_ssi_is_play(&ssi->mod); ret = rsnd_dma_init( priv, rsnd_mod_to_dma(mod), is_play, dma_id); if (ret < 0) dev_err(dev, "SSI DMA failed\n"); return ret; }
int rsnd_scu_probe(struct platform_device *pdev, struct rcar_snd_info *info, struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_scu *scu; int i, nr; /* * init SCU */ nr = info->scu_info_nr; scu = devm_kzalloc(dev, sizeof(*scu) * nr, GFP_KERNEL); if (!scu) { dev_err(dev, "SCU allocate failed\n"); return -ENOMEM; } priv->scu_nr = nr; priv->scu = scu; for_each_rsnd_scu(scu, priv, i) { rsnd_mod_init(priv, &scu->mod, &rsnd_scu_ops, i); scu->info = &info->scu_info[i]; dev_dbg(dev, "SCU%d probed\n", i); }
static int rsnd_dmaen_start(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) { struct rsnd_dma *dma = rsnd_mod_to_dma(mod); struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); struct snd_pcm_substream *substream = io->substream; struct device *dev = rsnd_priv_to_dev(priv); struct dma_async_tx_descriptor *desc; int is_play = rsnd_io_is_play(io); desc = dmaengine_prep_dma_cyclic(dmaen->chan, substream->runtime->dma_addr, snd_pcm_lib_buffer_bytes(substream), snd_pcm_lib_period_bytes(substream), is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(dev, "dmaengine_prep_slave_sg() fail\n"); return -EIO; } desc->callback = rsnd_dmaen_complete; desc->callback_param = rsnd_mod_get(dma); if (dmaengine_submit(desc) < 0) { dev_err(dev, "dmaengine_submit() fail\n"); return -EIO; } dma_async_issue_pending(dmaen->chan); return 0; }
static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod); struct device *dev = rsnd_priv_to_dev(priv); u32 cr; if (0 == ssi->usrcnt) { clk_enable(ssi->clk); if (rsnd_rdai_is_clk_master(rdai)) { if (rsnd_ssi_clk_from_parent(ssi)) rsnd_ssi_hw_start(ssi->parent, rdai, io); else rsnd_ssi_master_clk_start(ssi, io); } } cr = ssi->cr_own | ssi->cr_clk | ssi->cr_etc | EN; rsnd_mod_write(&ssi->mod, SSICR, cr); ssi->usrcnt++; dev_dbg(dev, "ssi%d hw started\n", rsnd_mod_id(&ssi->mod)); }
int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_adg *adg = rsnd_priv_to_adg(priv); struct device *dev = rsnd_priv_to_dev(priv); struct clk *clk; int i; u32 data; int sel_table[] = { [CLKA] = 0x1, [CLKB] = 0x2, [CLKC] = 0x3, [CLKI] = 0x0, }; dev_dbg(dev, "request clock = %d\n", rate); /* * find suitable clock from * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI. */ data = 0; for_each_rsnd_clk(clk, adg, i) { if (rate == clk_get_rate(clk)) { data = sel_table[i]; goto found_clock; } } /* * find divided clock from BRGA/BRGB */ if (rate == adg->rbga_rate_for_441khz) { data = 0x10; goto found_clock; } if (rate == adg->rbgb_rate_for_48khz) { data = 0x20; goto found_clock; } return -EIO; found_clock: /* * This "mod" = "ssi" here. * we can get "ssi id" from mod */ rsnd_adg_set_ssi_clk(mod, data); dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n", rsnd_mod_name(mod), rsnd_mod_id(mod), data, rate); return 0; }
int rsnd_mix_probe(struct rsnd_priv *priv) { struct device_node *node; struct device_node *np; struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_mix *mix; struct clk *clk; char name[MIX_NAME_SIZE]; int i, nr, ret; /* This driver doesn't support Gen1 at this point */ if (rsnd_is_gen1(priv)) return 0; node = rsnd_mix_of_node(priv); if (!node) return 0; /* not used is not error */ nr = of_get_child_count(node); if (!nr) { ret = -EINVAL; goto rsnd_mix_probe_done; } mix = devm_kcalloc(dev, nr, sizeof(*mix), GFP_KERNEL); if (!mix) { ret = -ENOMEM; goto rsnd_mix_probe_done; } priv->mix_nr = nr; priv->mix = mix; i = 0; ret = 0; for_each_child_of_node(node, np) { mix = rsnd_mix_get(priv, i); snprintf(name, MIX_NAME_SIZE, "%s.%d", MIX_NAME, i); clk = devm_clk_get(dev, name); if (IS_ERR(clk)) { ret = PTR_ERR(clk); of_node_put(np); goto rsnd_mix_probe_done; } ret = rsnd_mod_init(priv, rsnd_mod_get(mix), &rsnd_mix_ops, clk, rsnd_mod_get_status, RSND_MOD_MIX, i); if (ret) { of_node_put(np); goto rsnd_mix_probe_done; } i++; }
int rsnd_src_probe(struct platform_device *pdev, const struct rsnd_of_data *of_data, struct rsnd_priv *priv) { struct rcar_snd_info *info = rsnd_priv_to_info(priv); struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_src *src; struct rsnd_mod_ops *ops; struct clk *clk; char name[RSND_SRC_NAME_SIZE]; int i, nr; rsnd_of_parse_src(pdev, of_data, priv); /* * init SRC */ nr = info->src_info_nr; if (!nr) return 0; src = devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL); if (!src) { dev_err(dev, "SRC allocate failed\n"); return -ENOMEM; } priv->src_nr = nr; priv->src = src; for_each_rsnd_src(src, priv, i) { snprintf(name, RSND_SRC_NAME_SIZE, "src.%d", i); clk = devm_clk_get(dev, name); if (IS_ERR(clk)) { snprintf(name, RSND_SRC_NAME_SIZE, "scu.%d", i); clk = devm_clk_get(dev, name); } if (IS_ERR(clk)) return PTR_ERR(clk); src->info = &info->src_info[i]; src->clk = clk; ops = &rsnd_src_non_ops; if (rsnd_src_hpbif_is_enable(src)) { if (rsnd_is_gen1(priv)) ops = &rsnd_src_gen1_ops; if (rsnd_is_gen2(priv)) ops = &rsnd_src_gen2_ops; } rsnd_mod_init(priv, &src->mod, ops, RSND_MOD_SRC, i); dev_dbg(dev, "SRC%d probed\n", i); }
/* * SSI mod common functions */ static int rsnd_ssi_init(struct rsnd_mod *mod, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); u32 cr; cr = FORCE; /* * always use 32bit system word for easy clock calculation. * see also rsnd_ssi_master_clk_enable() */ cr |= SWL_32; /* * init clock settings for SSICR */ switch (runtime->sample_bits) { case 16: cr |= DWL_16; break; case 32: cr |= DWL_24; break; default: return -EIO; } if (rdai->bit_clk_inv) cr |= SCKP; if (rdai->frm_clk_inv) cr |= SWSP; if (rdai->data_alignment) cr |= SDTA; if (rdai->sys_delay) cr |= DEL; if (rsnd_dai_is_play(rdai, io)) cr |= TRMD; /* * set ssi parameter */ ssi->rdai = rdai; ssi->io = io; ssi->cr_own = cr; ssi->err = -1; /* ignore 1st error */ rsnd_ssi_mode_set(priv, rdai, ssi); dev_dbg(dev, "%s.%d init\n", rsnd_mod_name(mod), rsnd_mod_id(mod)); return 0; }
void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type) { if (mod->type != type) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); dev_warn(dev, "%s[%d] is not your expected module\n", rsnd_mod_name(mod), rsnd_mod_id(mod)); } }
static int rsnd_src_probe_gen1(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct device *dev = rsnd_priv_to_dev(priv); dev_dbg(dev, "%s[%d] (Gen1) is probed\n", rsnd_mod_name(mod), rsnd_mod_id(mod)); return 0; }
static int rsnd_src_probe_gen1(struct rsnd_mod *mod, struct rsnd_dai *rdai) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); dev_dbg(dev, "%s (Gen1) is probed\n", rsnd_mod_name(mod)); return 0; }
int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int is_play, int id) { struct device *dev = rsnd_priv_to_dev(priv); struct dma_slave_config cfg; struct rsnd_mod *mod_from; struct rsnd_mod *mod_to; char dma_name[DMA_NAME_SIZE]; dma_cap_mask_t mask; int ret; if (dma->chan) { dev_err(dev, "it already has dma channel\n"); return -EIO; } dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); rsnd_dma_of_path(dma, is_play, &mod_from, &mod_to); rsnd_dma_of_name(mod_from, mod_to, dma_name); cfg.slave_id = id; cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; cfg.src_addr = rsnd_gen_dma_addr(priv, mod_from, is_play, 1); cfg.dst_addr = rsnd_gen_dma_addr(priv, mod_to, is_play, 0); cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dev_dbg(dev, "dma : %s %pad -> %pad\n", dma_name, &cfg.src_addr, &cfg.dst_addr); dma->chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, (void *)id, dev, dma_name); if (!dma->chan) { dev_err(dev, "can't get dma channel\n"); return -EIO; } ret = dmaengine_slave_config(dma->chan, &cfg); if (ret < 0) goto rsnd_dma_init_err; dma->addr = is_play ? cfg.src_addr : cfg.dst_addr; dma->dir = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; return 0; rsnd_dma_init_err: rsnd_dma_quit(priv, dma); return ret; }
int rsnd_dvc_probe(struct rsnd_priv *priv) { struct device_node *node; struct device_node *np; struct device *dev = rsnd_priv_to_dev(priv); struct rsnd_dvc *dvc; struct clk *clk; char name[RSND_DVC_NAME_SIZE]; int i, nr, ret; /* This driver doesn't support Gen1 at this point */ if (rsnd_is_gen1(priv)) return 0; node = rsnd_dvc_of_node(priv); if (!node) return 0; /* not used is not error */ nr = of_get_child_count(node); if (!nr) { ret = -EINVAL; goto rsnd_dvc_probe_done; } dvc = devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL); if (!dvc) { ret = -ENOMEM; goto rsnd_dvc_probe_done; } priv->dvc_nr = nr; priv->dvc = dvc; i = 0; ret = 0; for_each_child_of_node(node, np) { dvc = rsnd_dvc_get(priv, i); snprintf(name, RSND_DVC_NAME_SIZE, "%s.%d", DVC_NAME, i); clk = devm_clk_get(dev, name); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto rsnd_dvc_probe_done; } ret = rsnd_mod_init(priv, rsnd_mod_get(dvc), &rsnd_dvc_ops, clk, rsnd_mod_get_status, RSND_MOD_DVC, i); if (ret) goto rsnd_dvc_probe_done; i++; }
static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg) { struct rsnd_mod *mod = rsnd_mod_get(dma); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); struct device *dev = rsnd_priv_to_dev(priv); dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data); iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg)); }
/* * Non SSI */ static int rsnd_ssi_non(struct rsnd_mod *mod, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); dev_dbg(dev, "%s\n", __func__); return 0; }