long IS_ERR(const void *ptr) { return IS_ERR_VALUE((long)ptr); }
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_buf *tp) { int len, ret = 0; int domain = MDSS_IOMMU_DOMAIN_UNSECURE; char *bp; unsigned long size, addr; bp = tp->data; len = ALIGN(tp->len, 4); size = ALIGN(tp->len, SZ_4K); if (is_mdss_iommu_attached()) { int ret = msm_iommu_map_contig_buffer(tp->dmap, mdss_get_iommu_domain(domain), 0, size, SZ_4K, 0, &(addr)); if (IS_ERR_VALUE(ret)) { pr_err("unable to map dma memory to iommu(%d)\n", ret); return -ENOMEM; } } else { addr = tp->dmap; } INIT_COMPLETION(ctrl->dma_comp); if (ctrl->shared_pdata.broadcast_enable) if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr); MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len); } MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr); MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len); wmb(); if (ctrl->shared_pdata.broadcast_enable) if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01); } MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01); /* trigger */ wmb(); ret = wait_for_completion_timeout(&ctrl->dma_comp, msecs_to_jiffies(DMA_TX_TIMEOUT)); if (ret == 0) ret = -ETIMEDOUT; else ret = tp->len; if (is_mdss_iommu_attached()) msm_iommu_unmap_contig_buffer(addr, mdss_get_iommu_domain(domain), 0, size); return ret; }
int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config, u32 rate) /* This function is no blocking */ { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_info("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) return -EINVAL; if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) port_id = VIRTUAL_ID_TO_PORTID(port_id); if (this_afe.apr == NULL) { pr_err("%s: AFE APR is not registered\n", __func__); ret = -ENODEV; return ret; } config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* send AFE cal */ afe_send_cal(port_id); start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
static void debugfs_afe_loopback(u32 loop) { int trc; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; if (loop) { /* Vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); if (IS_ERR_VALUE(trc)) MM_ERR("failed to set clk rate\n"); clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); clk_enable(drv->lpa_p_clk); clk_enable(drv->lpa_codec_clk); clk_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) MM_ERR("failed to enable lpa\n"); lpa_config.sample_rate = 8000; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = 1; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); mi2s_set_codec_output_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(&debug_rx_profile, &debugfs_rx_adie); adie_codec_setpath(debugfs_rx_adie, 8000, 256); lpa_cmd_enable_codec(drv->lpa, 1); /* Start AFE for RX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; MM_INFO("enable afe\n"); trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("fail to enable afe RX\n"); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Vote for PWM mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); MM_INFO("Enable Handset Mic bias\n"); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(&debug_tx_profile, &debugfs_tx_adie); adie_codec_setpath(debugfs_tx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE for TX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("failed to enable AFE TX\n"); /* Set the volume level to non unity, to avoid loopback effect */ afe_device_volume_ctrl(AFE_HW_PATH_CODEC_RX, 0x0500); /* enable afe loopback */ afe_loopback(1); MM_INFO("AFE loopback enabled\n"); } else { /* disable afe loopback */ afe_loopback(0); /* Remove the vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_rx_adie); /* Disable AFE for RX */ afe_disable(AFE_HW_PATH_CODEC_RX); /* Disable LPA Sub system */ lpa_cmd_enable_codec(drv->lpa, 0); lpa_put(drv->lpa); /* Disable LPA clocks */ clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable AFE for TX */ afe_disable(AFE_HW_PATH_CODEC_TX); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_tx_adie); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); MM_INFO("AFE loopback disabled\n"); } }
static int msm_hdmi_audio_codec_rx_dai_hw_params( struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { u32 channel_allocation = 0; u32 level_shift = 0; /* 0dB */ bool down_mix = 0; u32 num_channels = params_channels(params); int rv = 0; struct msm_hdmi_audio_codec_rx_data *codec_data = dev_get_drvdata(dai->codec->dev); rv = codec_data->hdmi_ops.hdmi_cable_status( codec_data->hdmi_core_pdev, 1); if (IS_ERR_VALUE(rv)) { dev_err(dai->dev, "%s() HDMI core is not ready\n", __func__); return rv; } switch (num_channels) { case 2: channel_allocation = 0; break; case 3: channel_allocation = 0x02;//default to FL/FR/FC break; case 4: channel_allocation = 0x06;//default to FL/FR/FC/RC break; case 5: channel_allocation = 0x0A;//default to FL/FR/FC/RR/RL break; case 6: channel_allocation = 0x0B; break; case 7: channel_allocation = 0x12;//default to FL/FR/FC/RL/RR/RRC/RLC break; case 8: channel_allocation = 0x13; break; default: dev_err(dai->dev, "invalid Channels = %u\n", num_channels); return -EINVAL; } dev_dbg(dai->dev, "%s() num_ch %u samplerate %u channel_allocation = %u\n", __func__, num_channels, params_rate(params), channel_allocation); rv = codec_data->hdmi_ops.audio_info_setup( codec_data->hdmi_core_pdev, params_rate(params), num_channels, channel_allocation, level_shift, down_mix); if (IS_ERR_VALUE(rv)) { dev_err(dai->dev, "%s() HDMI core is not ready\n", __func__); } return rv; }
static int __init snddev_icodec_init(void) { s32 rc; struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; rc = platform_driver_register(&snddev_icodec_driver); if (IS_ERR_VALUE(rc)) goto error_platform_driver; icodec_drv->rx_mclk = clk_get(NULL, "mi2s_codec_rx_m_clk"); if (IS_ERR(icodec_drv->rx_mclk)) goto error_rx_mclk; icodec_drv->rx_sclk = clk_get(NULL, "mi2s_codec_rx_s_clk"); if (IS_ERR(icodec_drv->rx_sclk)) goto error_rx_sclk; icodec_drv->tx_mclk = clk_get(NULL, "mi2s_codec_tx_m_clk"); if (IS_ERR(icodec_drv->tx_mclk)) goto error_tx_mclk; icodec_drv->tx_sclk = clk_get(NULL, "mi2s_codec_tx_s_clk"); if (IS_ERR(icodec_drv->tx_sclk)) goto error_tx_sclk; icodec_drv->lpa_codec_clk = clk_get(NULL, "lpa_codec_clk"); if (IS_ERR(icodec_drv->lpa_codec_clk)) goto error_lpa_codec_clk; icodec_drv->lpa_core_clk = clk_get(NULL, "lpa_core_clk"); if (IS_ERR(icodec_drv->lpa_core_clk)) goto error_lpa_core_clk; icodec_drv->lpa_p_clk = clk_get(NULL, "lpa_pclk"); if (IS_ERR(icodec_drv->lpa_p_clk)) goto error_lpa_p_clk; #ifdef CONFIG_DEBUG_FS debugfs_sdev_dent = debugfs_create_dir("snddev_icodec", 0); if (debugfs_sdev_dent) { debugfs_afelb = debugfs_create_file("afe_loopback", S_IFREG | S_IWUGO, debugfs_sdev_dent, (void *) "afe_loopback", &snddev_icodec_debug_fops); debugfs_adielb = debugfs_create_file("adie_loopback", S_IFREG | S_IWUGO, debugfs_sdev_dent, (void *) "adie_loopback", &snddev_icodec_debug_fops); } #endif mutex_init(&icodec_drv->rx_lock); mutex_init(&icodec_drv->tx_lock); icodec_drv->rx_active = 0; icodec_drv->tx_active = 0; icodec_drv->lpa = NULL; wake_lock_init(&icodec_drv->tx_idlelock, WAKE_LOCK_IDLE, "snddev_tx_idle"); wake_lock_init(&icodec_drv->rx_idlelock, WAKE_LOCK_IDLE, "snddev_rx_idle"); return 0; error_lpa_p_clk: clk_put(icodec_drv->lpa_core_clk); error_lpa_core_clk: clk_put(icodec_drv->lpa_codec_clk); error_lpa_codec_clk: clk_put(icodec_drv->tx_sclk); error_tx_sclk: clk_put(icodec_drv->tx_mclk); error_tx_mclk: clk_put(icodec_drv->rx_sclk); error_rx_sclk: clk_put(icodec_drv->rx_mclk); error_rx_mclk: platform_driver_unregister(&snddev_icodec_driver); error_platform_driver: MM_ERR("encounter error\n"); return -ENODEV; }
static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec) { int trc; int i, err; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;; wake_lock(&drv->tx_idlelock); /* Vote for PWM mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* Reuse pamp_on for TX platform-specific setup */ if (icodec->data->pamp_on) icodec->data->pamp_on(); for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_PWM_TCXO); } /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ trc = clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_enable(drv->tx_mclk); clk_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path((icodec->data->channel_mode == REAL_STEREO_CHANNEL_MODE ? MI2S_CHAN_STEREO : (icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_RAW)), WT_16_BIT); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* Enable ADIE */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; icodec->enabled = 1; wake_unlock(&drv->tx_idlelock); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: clk_disable(drv->tx_sclk); clk_disable(drv->tx_mclk); error_invalid_freq: /* Disable mic bias */ for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_OFF); } if (icodec->data->pamp_off) icodec->data->pamp_off(); MM_ERR("encounter error\n"); wake_unlock(&drv->tx_idlelock); return -ENODEV; }
static int snddev_ecodec_open_rx(struct snddev_ecodec_state *ecodec) { int rc = 0; struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; struct msm_afe_config afe_config; MM_DBG("snddev_ecodec_open_rx\n"); MM_INFO("snddev_ecodec_open_rx : name = %s\n", ecodec->data->name); if (!drv->tx_active) { /* request GPIO */ rc = aux_pcm_gpios_request(); if (rc) { MM_ERR("GPIO enable failed\n"); goto done; } /* config clocks */ clk_enable(drv->lpa_core_clk); /* enable ecodec clk */ clk_enable(drv->ecodec_clk); /* let ADSP confiure AUX PCM regs */ aux_codec_adsp_codec_ctl_en(ADSP_CTL); /* let adsp configure pcm path */ aux_codec_pcm_path_ctl_en(ADSP_CTL); /* choose ADSP_A */ audio_interct_aux_regsel(AUDIO_ADSP_A); audio_interct_tpcm_source(AUDIO_ADSP_A); audio_interct_rpcm_source(AUDIO_ADSP_A); clk_disable(drv->lpa_core_clk); /* send AUX_CODEC_CONFIG to AFE */ rc = afe_config_aux_codec(ecodec->data->conf_pcm_ctl_val, ecodec->data->conf_aux_codec_intf, ecodec->data->conf_data_format_padding_val); if (IS_ERR_VALUE(rc)) goto error; } /* send CODEC CONFIG to AFE */ afe_config.sample_rate = ecodec->sample_rate / 1000; afe_config.channel_mode = ecodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; rc = afe_enable(AFE_HW_PATH_AUXPCM_RX, &afe_config); if (IS_ERR_VALUE(rc)) { if (!drv->tx_active) { aux_pcm_gpios_free(); clk_disable(drv->ecodec_clk); } goto done; } ecodec->enabled = 1; return 0; error: aux_pcm_gpios_free(); clk_disable(drv->ecodec_clk); done: return rc; }
int fimg2d4x_bitblt(struct fimg2d_control *ctrl) { int ret = 0; enum addr_space addr_type; struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; fimg2d_debug("%s : enter blitter\n", __func__); while (1) { cmd = fimg2d_get_command(ctrl); if (!cmd) break; ctx = cmd->ctx; ctx->state = CTX_READY; #ifdef CONFIG_PM_RUNTIME if (fimg2d4x_get_clk_cnt(ctrl->clock) == false) fimg2d_err("2D clock is not set\n"); #endif atomic_set(&ctrl->busy, 1); perf_start(cmd, PERF_SFR); ret = ctrl->configure(ctrl, cmd); perf_end(cmd, PERF_SFR); if (IS_ERR_VALUE(ret)) { fimg2d_err("failed to configure\n"); ctx->state = CTX_ERROR; goto fail_n_del; } addr_type = cmd->image[IDST].addr.type; ctx->vma_lock = vma_lock_mapping(ctx->mm, prefbuf, MAX_IMAGES - 1); if (fimg2d_check_pgd(ctx->mm, cmd)) { ret = -EFAULT; goto fail_n_del; } if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { if (!ctx->mm || !ctx->mm->pgd) { atomic_set(&ctrl->busy, 0); fimg2d_err("ctx->mm:0x%p or ctx->mm->pgd:0x%p\n", ctx->mm, (ctx->mm) ? ctx->mm->pgd : NULL); ret = -EPERM; goto fail_n_del; } pgd = (unsigned long *)ctx->mm->pgd; #ifdef CONFIG_EXYNOS7_IOMMU if (iovmm_activate(ctrl->dev)) { fimg2d_err("failed to iovmm activate\n"); ret = -EPERM; goto fail_n_del; } #else if (exynos_sysmmu_enable(ctrl->dev, (unsigned long)virt_to_phys(pgd))) { fimg2d_err("failed to sysmme enable\n"); ret = -EPERM; goto fail_n_del; } #endif fimg2d_debug("%s : sysmmu enable: pgd %p ctx %p seq_no(%u)\n", __func__, pgd, ctx, cmd->blt.seq_no); //exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf); fimg2d_debug("%s : set smmu prefbuf\n", __func__); } fimg2d4x_pre_bitblt(ctrl, cmd); perf_start(cmd, PERF_BLIT); /* start blit */ fimg2d_debug("%s : start blit\n", __func__); ctrl->run(ctrl); ret = fimg2d4x_blit_wait(ctrl, cmd); perf_end(cmd, PERF_BLIT); perf_start(cmd, PERF_UNMAP); if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { #ifdef CONFIG_EXYNOS7_IOMMU iovmm_deactivate(ctrl->dev); if (cmd->dma[ISRC].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].base.addr, cmd->dma[ISRC].base.size); } if (cmd->dma[ISRC].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].plane2.addr, cmd->dma[ISRC].plane2.size); } if (cmd->dma[IMSK].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IMSK].base.addr, cmd->dma[IMSK].base.size); } if (cmd->dma[IDST].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].base.addr, cmd->dma[IDST].base.size); } if (cmd->dma[IDST].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].plane2.addr, cmd->dma[IDST].plane2.size); } #else exynos_sysmmu_disable(ctrl->dev); #endif fimg2d_debug("sysmmu disable\n"); } perf_end(cmd, PERF_UNMAP); fail_n_del: vma_unlock_mapping(ctx->vma_lock); fimg2d_del_command(ctrl, cmd); } fimg2d_debug("%s : exit blitter\n", __func__); return ret; }
static int __init virtualblockdevice_init(void) { int ret; elevator_t *oldelev; printk(KERN_ALERT "VirtualBlockDevice: Entry virtualblockdevice_init !\n"); ret = register_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); if( 0 > ret ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to register block device: virtualblockdevice ! Major: %d\tErrno: %d !\n", virtualblockdevice_major, ret); goto failure_register_blkdev; } virtualblockdevice_major = ret; printk(KERN_ALERT "VirtualBlockDevice: Success to register block device: virtualblockdevice ! Major: %d !\n", virtualblockdevice_major); // get request_queue virtualblockdevice_queue = blk_init_queue( virtualblockdevice_do_request, NULL ); if( !virtualblockdevice_queue ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to init request_queue !\n"); ret = -ENOMEM; goto failure_init_queue; } printk(KERN_ALERT "VirtualBlockDevice: Success to init request_queue !\n"); // switch elevator oldelev = virtualblockdevice_queue->elevator; if( IS_ERR_VALUE( elevator_init( virtualblockdevice_queue, "noop" ) ) ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to switch elevator to noop, continue to use old one !\n"); } else { printk(KERN_ALERT "VirtualBlockDevice: Success to switch elevator to noop !\n"); elevator_exit( oldelev ); } // get gendisk virtualblockdevice_disk = alloc_disk( 1 ); if( !virtualblockdevice_disk ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to allocate gendisk !\n"); ret = -ENOMEM; goto failure_alloc_disk; } printk(KERN_ALERT "VirtualBlockDevice: Success to allocate gendisk !\n"); // initialize gendisk strcpy( virtualblockdevice_disk->disk_name, VIRTUALBLOCKDEVICE_NAME ); virtualblockdevice_disk->major = virtualblockdevice_major; virtualblockdevice_disk->first_minor = virtualblockdevice_minor; virtualblockdevice_disk->fops = &virtualblockdevice_fops; virtualblockdevice_disk->queue = virtualblockdevice_queue; set_capacity( virtualblockdevice_disk, ( VIRTUALBLOCKDEVICE_DISK_CAPACITY >> 9 ) ); // add gendisk to kernel add_disk( virtualblockdevice_disk ); return 0; failure_alloc_disk: blk_cleanup_queue( virtualblockdevice_queue ); failure_init_queue: unregister_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); failure_register_blkdev: return ret; }
static int msm_hdmi_audio_codec_rx_dai_hw_params( struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { u32 channel_allocation = 0; u32 level_shift = 0; bool down_mix = 0; u32 num_channels = params_channels(params); int rc = 0; struct msm_hdmi_audio_codec_rx_data *codec_data = dev_get_drvdata(dai->codec->dev); rc = codec_data->hdmi_ops.hdmi_cable_status( codec_data->hdmi_core_pdev, 1); pr_info("%s: hdmi status %d\n",__func__,rc); if (IS_ERR_VALUE(rc) || rc == 0) { rc = -EINVAL; dev_err(dai->dev, "%s() HDMI core is not ready 1\n", __func__); return rc; } if (IS_ERR_VALUE(msm_hdmi_audio_codec_return_value)) { dev_err(dai->dev, "%s() HDMI core is not ready 2\n", __func__); return msm_hdmi_audio_codec_return_value; } switch (num_channels) { case 2: channel_allocation = 0; break; case 3: channel_allocation = 0x02; break; case 4: channel_allocation = 0x06; break; case 5: channel_allocation = 0x0A; break; case 6: channel_allocation = 0x0B; break; case 7: channel_allocation = 0x12; break; case 8: channel_allocation = 0x13; break; default: dev_err(dai->dev, "invalid Channels = %u\n", num_channels); return -EINVAL; } dev_dbg(dai->dev, "%s() num_ch %u samplerate %u channel_allocation = %u\n", __func__, num_channels, params_rate(params), channel_allocation); rc = codec_data->hdmi_ops.audio_info_setup( codec_data->hdmi_core_pdev, params_rate(params), num_channels, channel_allocation, level_shift, down_mix); if (IS_ERR_VALUE(rc)) { dev_err(dai->dev, "%s() HDMI core is not ready 3\n", __func__); } return rc; }
static int snddev_mi2s_open(struct msm_snddev_info *dev_info) { int rc = 0; union afe_port_config afe_config; u8 channels; u8 num_of_sd_lines = 0; struct snddev_mi2s_drv_state *drv = &snddev_mi2s_drv; struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; if (!dev_info) { pr_err("%s: msm_snddev_info is null\n", __func__); return -EINVAL; } /* set up osr clk */ drv->tx_osrclk = clk_get_sys(NULL, "mi2s_osr_clk"); if (IS_ERR(drv->tx_osrclk)) pr_err("%s master clock Error\n", __func__); rc = clk_set_rate(drv->tx_osrclk, SNDDEV_MI2S_CLK_RATE(dev_info->sample_rate)); if (IS_ERR_VALUE(rc)) { pr_err("ERROR setting osr clock\n"); return -ENODEV; } clk_prepare_enable(drv->tx_osrclk); /* set up bit clk */ drv->tx_bitclk = clk_get_sys(NULL, "mi2s_bit_clk"); if (IS_ERR(drv->tx_bitclk)) pr_err("%s clock Error\n", __func__); rc = clk_set_rate(drv->tx_bitclk, 8); if (IS_ERR_VALUE(rc)) { pr_err("ERROR setting bit clock\n"); clk_disable_unprepare(drv->tx_osrclk); return -ENODEV; } clk_prepare_enable(drv->tx_bitclk); afe_config.mi2s.bitwidth = 16; if (snddev_mi2s_data->channel_mode == 1) channels = AFE_MI2S_MONO; else if (snddev_mi2s_data->channel_mode == 2) channels = AFE_MI2S_STEREO; else if (snddev_mi2s_data->channel_mode == 4) channels = AFE_MI2S_4CHANNELS; else if (snddev_mi2s_data->channel_mode == 6) channels = AFE_MI2S_6CHANNELS; else if (snddev_mi2s_data->channel_mode == 8) channels = AFE_MI2S_8CHANNELS; else { pr_err("ERROR: Invalid MI2S channel mode\n"); goto error_invalid_data; } num_of_sd_lines = num_of_bits_set(snddev_mi2s_data->sd_lines); switch (num_of_sd_lines) { case 1: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0: afe_config.mi2s.line = AFE_I2S_SD0; break; case MI2S_SD1: afe_config.mi2s.line = AFE_I2S_SD1; break; case MI2S_SD2: afe_config.mi2s.line = AFE_I2S_SD2; break; case MI2S_SD3: afe_config.mi2s.line = AFE_I2S_SD3; break; default: pr_err("%s: invalid SD line\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_STEREO && channels != AFE_MI2S_MONO) { pr_err("%s: for one SD line, channel " "must be 1 or 2\n", __func__); goto error_invalid_data; } afe_config.mi2s.channel = channels; break; case 2: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1: afe_config.mi2s.line = AFE_I2S_QUAD01; break; case MI2S_SD2 | MI2S_SD3: afe_config.mi2s.line = AFE_I2S_QUAD23; break; default: pr_err("%s: invalid SD line\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_4CHANNELS) { pr_err("%s: for two SD lines, channel " "must be 1 and 2 or 3 and 4\n", __func__); goto error_invalid_data; } break; case 3: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1 | MI2S_SD2: afe_config.mi2s.line = AFE_I2S_6CHS; break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_6CHANNELS) { pr_err("%s: for three SD lines, lines " "must be 1, 2, and 3\n", __func__); goto error_invalid_data; } break; case 4: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1 | MI2S_SD2 | MI2S_SD3: afe_config.mi2s.line = AFE_I2S_8CHS; break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_8CHANNELS) { pr_err("%s: for four SD lines, lines " "must be 1, 2, 3, and 4\n", __func__); goto error_invalid_data; } break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } afe_config.mi2s.ws = 1; afe_config.mi2s.format = MSM_AFE_I2S_FORMAT_LPCM; rc = afe_open(snddev_mi2s_data->copp_id, &afe_config, dev_info->sample_rate); if (rc < 0) { pr_err("%s: afe_open failed\n", __func__); goto error_invalid_data; } /*enable fm gpio here*/ rc = mi2s_gpios_request(); if (rc < 0) { pr_err("%s: GPIO request failed\n", __func__); return rc; } pr_info("%s: afe_open done\n", __func__); return rc; error_invalid_data: clk_disable_unprepare(drv->tx_bitclk); clk_disable_unprepare(drv->tx_osrclk); return -EINVAL; }
static int fimc_is_isp_video_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { int ret = 0; int i2c_clk; struct fimc_is_video *video; struct fimc_is_video_ctx *vctx = file->private_data; struct fimc_is_device_ischain *device; struct fimc_is_core *core; BUG_ON(!vctx); BUG_ON(!vctx->device); BUG_ON(!vctx->video); dbg_isp("%s\n", __func__); device = vctx->device; video = vctx->video; core = container_of(video, struct fimc_is_core, video_isp); if (core->resourcemgr.dvfs_ctrl.cur_int_qos == DVFS_L0) i2c_clk = I2C_L0; else i2c_clk = I2C_L1; switch (ctrl->id) { case V4L2_CID_IS_DEBUG_DUMP: info("Print fimc-is info dump by HAL"); if (device != NULL) { fimc_is_hw_logdump(device->interface); fimc_is_hw_regdump(device->interface); CALL_POPS(device, print_clk, device->pdev); } if (ctrl->value) { err("BUG_ON from HAL"); BUG(); } break; case V4L2_CID_IS_DEBUG_SYNC_LOG: fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG); break; case V4L2_CID_IS_HAL_VERSION: if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) { merr("hal version(%d) is invalid", vctx, ctrl->value); ret = -EINVAL; goto p_err; } core->resourcemgr.hal_version = ctrl->value; break; case V4L2_CID_IS_G_CAPABILITY: ret = fimc_is_ischain_g_capability(device, ctrl->value); dbg_isp("V4L2_CID_IS_G_CAPABILITY : %X\n", ctrl->value); break; case V4L2_CID_IS_FORCE_DONE: set_bit(FIMC_IS_GROUP_REQUEST_FSTOP, &device->group_isp.state); break; case V4L2_CID_IS_DVFS_LOCK: ret = fimc_is_itf_i2c_lock(device, I2C_L0, true); if (ret) { err("fimc_is_itf_i2_clock fail\n"); break; } pm_qos_add_request(&device->user_qos, PM_QOS_DEVICE_THROUGHPUT, ctrl->value); ret = fimc_is_itf_i2c_lock(device, I2C_L0, false); if (ret) { err("fimc_is_itf_i2c_unlock fail\n"); break; } dbg_isp("V4L2_CID_IS_DVFS_LOCK : %d\n", ctrl->value); break; case V4L2_CID_IS_DVFS_UNLOCK: ret = fimc_is_itf_i2c_lock(device, i2c_clk, true); if (ret) { err("fimc_is_itf_i2_clock fail\n"); break; } pm_qos_remove_request(&device->user_qos); ret = fimc_is_itf_i2c_lock(device, i2c_clk, false); if (ret) { err("fimc_is_itf_i2c_unlock fail\n"); break; } dbg_isp("V4L2_CID_IS_DVFS_UNLOCK : %d I2C(%d)\n", ctrl->value, i2c_clk); break; case V4L2_CID_IS_SET_SETFILE: if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) { err("Setting setfile is only avaiable before starting device!! (0x%08x)", ctrl->value); ret = -EINVAL; } else { device->setfile = ctrl->value; minfo("[ISP:V] setfile: 0x%08X\n", vctx, ctrl->value); } break; case V4L2_CID_IS_COLOR_RANGE: if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) { err("failed to change color range: device started already (0x%08x)", ctrl->value); ret = -EINVAL; } else { device->color_range &= ~FIMC_IS_ISP_CRANGE_MASK; if (ctrl->value) device->color_range |= (FIMC_IS_CRANGE_LIMITED << FIMC_IS_ISP_CRANGE_SHIFT); } break; case V4L2_CID_IS_MAP_BUFFER: { /* hack for 64bit addr */ ulong value_to_addr; struct fimc_is_queue *queue; struct fimc_is_framemgr *framemgr; struct fimc_is_frame *frame; struct dma_buf *dmabuf; struct dma_buf_attachment *attachment; dma_addr_t dva; struct v4l2_buffer *buf; struct v4l2_plane *planes; size_t size; u32 write, plane, group_id; size = sizeof(struct v4l2_buffer); buf = kmalloc(size, GFP_KERNEL); if (!buf) { merr("kmalloc is fail", vctx); ret = -EINVAL; goto p_err; } /* hack for 64bit addr */ value_to_addr = ctrl->value; ret = copy_from_user(buf, (void __user *)value_to_addr, size); if (ret) { merr("copy_from_user is fail(%d)", vctx, ret); kfree(buf); ret = -EINVAL; goto p_err; } if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) { merr("single plane is not supported", vctx); kfree(buf); ret = -EINVAL; goto p_err; } if (buf->index >= FRAMEMGR_MAX_REQUEST) { merr("buffer index is invalid(%d)", vctx, buf->index); kfree(buf); ret = -EINVAL; goto p_err; } if (buf->length > VIDEO_MAX_PLANES) { merr("planes[%d] is invalid", vctx, buf->length); kfree(buf); ret = -EINVAL; goto p_err; } queue = GET_QUEUE(vctx, buf->type); if (queue->vbq->memory != V4L2_MEMORY_DMABUF) { merr("memory type(%d) is not supported", vctx, queue->vbq->memory); kfree(buf); ret = -EINVAL; goto p_err; } size = sizeof(struct v4l2_plane) * buf->length; planes = kmalloc(size, GFP_KERNEL); if (IS_ERR(planes)) { merr("kmalloc is fail(%p)", vctx, planes); kfree(buf); ret = -EINVAL; goto p_err; } ret = copy_from_user(planes, (void __user *)buf->m.planes, size); if (ret) { merr("copy_from_user is fail(%d)", vctx, ret); kfree(planes); kfree(buf); ret = -EINVAL; goto p_err; } framemgr = &queue->framemgr; frame = &framemgr->frame[buf->index]; if (test_bit(FRAME_MAP_MEM, &frame->memory)) { merr("this buffer(%d) is already mapped", vctx, buf->index); kfree(planes); kfree(buf); ret = -EINVAL; goto p_err; } /* only last buffer need to map */ if (buf->length >= 1) { plane = buf->length - 1; } else { merr("buffer length is not correct(%d)", vctx, buf->length); kfree(planes); kfree(buf); ret = -EINVAL; goto p_err; } dmabuf = dma_buf_get(planes[plane].m.fd); if (IS_ERR(dmabuf)) { merr("dma_buf_get is fail(%p)", vctx, dmabuf); kfree(planes); kfree(buf); ret = -EINVAL; goto p_err; } attachment = dma_buf_attach(dmabuf, &device->pdev->dev); if (IS_ERR(attachment)) { merr("dma_buf_attach is fail(%p)", vctx, attachment); kfree(planes); kfree(buf); dma_buf_put(dmabuf); ret = -EINVAL; goto p_err; } write = !V4L2_TYPE_IS_OUTPUT(buf->type); dva = ion_iovmm_map(attachment, 0, dmabuf->size, write, plane); if (IS_ERR_VALUE(dva)) { merr("ion_iovmm_map is fail(%pa)", vctx, &dva); kfree(planes); kfree(buf); dma_buf_detach(dmabuf, attachment); dma_buf_put(dmabuf); ret = -EINVAL; goto p_err; } group_id = GROUP_ID(device->group_isp.id); ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size); if (ret) { merr("fimc_is_itf_map is fail(%d)", vctx, ret); kfree(planes); kfree(buf); dma_buf_detach(dmabuf, attachment); dma_buf_put(dmabuf); goto p_err; } minfo("[ISP:V] buffer%d.plane%d mapping\n", vctx, buf->index, plane); set_bit(FRAME_MAP_MEM, &frame->memory); dma_buf_detach(dmabuf, attachment); dma_buf_put(dmabuf); kfree(planes); kfree(buf); } break; default: err("unsupported ioctl(%d)\n", ctrl->id); ret = -EINVAL; break; } p_err: return ret; }
/* This function sends multi-channel HDMI configuration command and AFE * calibration which is only supported by QDSP6 on 8960 and onward. */ int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_debug("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) return -EINVAL; if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) port_id = VIRTUAL_ID_TO_PORTID(port_id); ret = afe_q6_interface_prepare(); if (IS_ERR_VALUE(ret)) return ret; if (port_id == HDMI_RX) { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG; } else { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; switch (port_id) { case SLIMBUS_0_RX: case SLIMBUS_0_TX: case SLIMBUS_1_RX: case SLIMBUS_1_TX: case SLIMBUS_2_RX: case SLIMBUS_2_TX: case SLIMBUS_3_RX: case SLIMBUS_3_TX: case SLIMBUS_4_RX: case SLIMBUS_4_TX: config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG; break; case MI2S_TX: case MI2S_RX: case SECONDARY_I2S_RX: case SECONDARY_I2S_TX: case PRIMARY_I2S_RX: case PRIMARY_I2S_TX: /* AFE_PORT_CMD_I2S_CONFIG command is not supported * in the LPASS EL 1.0. So we have to distiguish * which AFE command, AFE_PORT_CMD_I2S_CONFIG or * AFE_PORT_AUDIO_IF_CONFIG to use. If the format * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used * to make the backward compatible. */ pr_debug("%s: afe_config->mi2s.format = %d\n", __func__, afe_config->mi2s.format); if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM) config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; else config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG; break; default: config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; break; } } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; atomic_set(&this_afe.state, 1); atomic_set(&this_afe.status, 0); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout IF CONFIG\n", __func__); ret = -EINVAL; goto fail_cmd; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); ret = -EINVAL; goto fail_cmd; } /* send AFE cal */ afe_send_cal(port_id); start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout PORT START\n", __func__); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void __user *argp) { int rc = -EINVAL; struct mdp3_session_data *mdp3_session; struct msmfb_metadata metadata; struct mdp_overlay req; struct msmfb_overlay_data ov_data; int val; pr_debug("mdp3_ctrl_ioctl_handler\n"); mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; if (!mdp3_session) return -ENODEV; if (!mdp3_session->status) { pr_err("mdp3_ctrl_ioctl_handler, display off!\n"); return -EINVAL; } switch (cmd) { case MSMFB_VSYNC_CTRL: case MSMFB_OVERLAY_VSYNC_CTRL: if (!copy_from_user(&val, argp, sizeof(val))) { rc = mdp3_ctrl_vsync_enable(mfd, val); } else { pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n"); rc = -EFAULT; } break; case MSMFB_BLIT: rc = mdp3_ctrl_blit_req(mfd, argp); break; case MSMFB_METADATA_GET: rc = copy_from_user(&metadata, argp, sizeof(metadata)); if (rc) return rc; rc = mdp3_get_metadata(mfd, &metadata); if (!rc) rc = copy_to_user(argp, &metadata, sizeof(metadata)); break; case MSMFB_OVERLAY_GET: rc = copy_from_user(&req, argp, sizeof(req)); if (!rc) { rc = mdp3_overlay_get(mfd, &req); if (!IS_ERR_VALUE(rc)) rc = copy_to_user(argp, &req, sizeof(req)); } if (rc) pr_err("OVERLAY_GET failed (%d)\n", rc); break; case MSMFB_OVERLAY_SET: rc = copy_from_user(&req, argp, sizeof(req)); if (!rc) { rc = mdp3_overlay_set(mfd, &req); if (!IS_ERR_VALUE(rc)) rc = copy_to_user(argp, &req, sizeof(req)); } if (rc) pr_err("OVERLAY_SET failed (%d)\n", rc); break; case MSMFB_OVERLAY_UNSET: if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val)))) rc = mdp3_overlay_unset(mfd, val); break; case MSMFB_OVERLAY_PLAY: rc = copy_from_user(&ov_data, argp, sizeof(ov_data)); if (!rc) rc = mdp3_overlay_play(mfd, &ov_data); if (rc) pr_err("OVERLAY_PLAY failed (%d)\n", rc); break; default: break; } return rc; }
/* * get a romfs inode based on its position in the image (which doubles as the * inode number) */ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) { struct romfs_inode_info *inode; struct romfs_inode ri; struct inode *i; unsigned long nlen; unsigned nextfh; int ret; umode_t mode; /* we might have to traverse a chain of "hard link" file entries to get * to the actual file */ for (;;) { ret = romfs_dev_read(sb, pos, &ri, sizeof(ri)); if (ret < 0) goto error; /* XXX: do romfs_checksum here too (with name) */ nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) != ROMFH_HRD) break; pos = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* determine the length of the filename */ nlen = romfs_dev_strnlen(sb, pos + ROMFH_SIZE, ROMFS_MAXFN); if (IS_ERR_VALUE(nlen)) goto eio; /* get an inode for this image position */ i = iget_locked(sb, pos); if (!i) return ERR_PTR(-ENOMEM); if (!(i->i_state & I_NEW)) return i; /* precalculate the data offset */ inode = ROMFS_I(i); inode->i_metasize = (ROMFH_SIZE + nlen + 1 + ROMFH_PAD) & ROMFH_MASK; inode->i_dataoffset = pos + inode->i_metasize; i->i_nlink = 1; /* Hard to decide.. */ i->i_size = be32_to_cpu(ri.size); i->i_mtime.tv_sec = i->i_atime.tv_sec = i->i_ctime.tv_sec = 0; i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0; /* set up mode and ops */ mode = romfs_modemap[nextfh & ROMFH_TYPE]; switch (nextfh & ROMFH_TYPE) { case ROMFH_DIR: i->i_size = ROMFS_I(i)->i_metasize; i->i_op = &romfs_dir_inode_operations; i->i_fop = &romfs_dir_operations; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_REG: i->i_fop = &romfs_ro_fops; i->i_data.a_ops = &romfs_aops; if (i->i_sb->s_mtd) i->i_data.backing_dev_info = i->i_sb->s_mtd->backing_dev_info; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_SYM: i->i_op = &page_symlink_inode_operations; i->i_data.a_ops = &romfs_aops; mode |= S_IRWXUGO; break; default: /* depending on MBZ for sock/fifos */ nextfh = be32_to_cpu(ri.spec); init_special_inode(i, mode, MKDEV(nextfh >> 16, nextfh & 0xffff)); break; } i->i_mode = mode; unlock_new_inode(i); return i; eio: ret = -EIO; error: printk(KERN_ERR "ROMFS: read error for inode 0x%lx\n", pos); return ERR_PTR(ret); }
int mdp3_ctrl_init(struct msm_fb_data_type *mfd) { struct device *dev = mfd->fbi->dev; struct msm_mdp_interface *mdp3_interface = &mfd->mdp; struct mdp3_session_data *mdp3_session = NULL; u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO; int rc; pr_debug("mdp3_ctrl_init\n"); mdp3_interface->on_fnc = mdp3_ctrl_on; mdp3_interface->off_fnc = mdp3_ctrl_off; mdp3_interface->do_histogram = NULL; mdp3_interface->cursor_update = NULL; mdp3_interface->dma_fnc = mdp3_ctrl_pan_display; mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler; mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff; mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL); if (!mdp3_session) { pr_err("fail to allocate mdp3 private data structure"); return -ENOMEM; } memset(mdp3_session, 0, sizeof(struct mdp3_session_data)); mutex_init(&mdp3_session->lock); init_completion(&mdp3_session->vsync_comp); spin_lock_init(&mdp3_session->vsync_lock); mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL); if (!mdp3_session->dma) { rc = -ENODEV; goto init_done; } intf_type = mdp3_ctrl_get_intf_type(mfd); mdp3_session->intf = mdp3_get_display_intf(intf_type); if (!mdp3_session->intf) { rc = -ENODEV; goto init_done; } mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev); mdp3_session->status = 0; mdp3_session->overlay.id = MSMFB_NEW_REQUEST; mdp3_bufq_init(&mdp3_session->bufq_in); mdp3_bufq_init(&mdp3_session->bufq_out); mfd->mdp.private1 = mdp3_session; rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group); if (rc) { pr_err("vsync sysfs group creation failed, ret=%d\n", rc); goto init_done; } kobject_uevent(&dev->kobj, KOBJ_ADD); pr_debug("vsync kobject_uevent(KOBJ_ADD)\n"); init_done: if (IS_ERR_VALUE(rc)) kfree(mdp3_session); return rc; }
static int wl1271_probe(struct spi_device *spi) { struct wl12xx_spi_glue *glue; struct wlcore_platdev_data pdev_data; struct resource res[1]; int ret; memset(&pdev_data, 0x00, sizeof(pdev_data)); pdev_data.if_ops = &spi_ops; glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&spi->dev, "can't allocate glue\n"); return -ENOMEM; } glue->dev = &spi->dev; spi_set_drvdata(spi, glue); /* This is the only SPI value that we need to set here, the rest * comes from the board-peripherals file */ spi->bits_per_word = 32; glue->reg = devm_regulator_get(&spi->dev, "vwlan"); if (PTR_ERR(glue->reg) == -EPROBE_DEFER) return -EPROBE_DEFER; if (IS_ERR(glue->reg)) { dev_err(glue->dev, "can't get regulator\n"); return PTR_ERR(glue->reg); } ret = wlcore_probe_of(spi, glue, &pdev_data); if (IS_ERR_VALUE(ret)) { dev_err(glue->dev, "can't get device tree parameters (%d)\n", ret); return ret; } ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); return ret; } glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device\n"); return -ENOMEM; } glue->core->dev.parent = &spi->dev; memset(res, 0x00, sizeof(res)); res[0].start = spi->irq; res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq); res[0].name = "irq"; ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); if (ret) { dev_err(glue->dev, "can't add resources\n"); goto out_dev_put; } ret = platform_device_add_data(glue->core, &pdev_data, sizeof(pdev_data)); if (ret) { dev_err(glue->dev, "can't add platform data\n"); goto out_dev_put; } ret = platform_device_add(glue->core); if (ret) { dev_err(glue->dev, "can't register platform device\n"); goto out_dev_put; } return 0; out_dev_put: platform_device_put(glue->core); return ret; }
static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec) { int trc, err; int smps_mode = PMAPP_SMPS_MODE_VOTE_PWM; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; wake_lock(&drv->rx_idlelock); if ((icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_MONO) || (icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_STEREO)) { /* Vote PMAPP_SMPS_MODE_VOTE_PFM for headset */ smps_mode = PMAPP_SMPS_MODE_VOTE_PFM; MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PFM \n"); } else MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PWM \n"); /* Vote for SMPS mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, smps_mode); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_enable(drv->rx_mclk); clk_enable(drv->rx_sclk); /* clk_set_rate(drv->lpa_codec_clk, 1); */ /* Remove if use pcom */ clk_enable(drv->lpa_p_clk); clk_enable(drv->lpa_codec_clk); clk_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) goto error_lpa; lpa_config.sample_rate = icodec->sample_rate; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = icodec->data->channel_mode; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); /* Set MI2S */ mi2s_set_codec_output_path((icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_PACKED), WT_16_BIT); if (icodec->data->voltage_on) icodec->data->voltage_on(); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* OSR default to 256, can be changed for power optimization * If OSR is to be changed, need clock API for setting the divider */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; lpa_cmd_enable_codec(drv->lpa, 1); /* Enable ADIE */ adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Enable power amplifier */ if (icodec->data->pamp_on) icodec->data->pamp_on(); icodec->enabled = 1; wake_unlock(&drv->rx_idlelock); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: lpa_put(drv->lpa); error_lpa: clk_disable(drv->lpa_p_clk); clk_disable(drv->lpa_codec_clk); clk_disable(drv->lpa_core_clk); clk_disable(drv->rx_sclk); clk_disable(drv->rx_mclk); error_invalid_freq: MM_ERR("encounter error\n"); wake_unlock(&drv->rx_idlelock); return -ENODEV; }
static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void __user *argp) { int rc = -EINVAL; struct mdp3_session_data *mdp3_session; struct msmfb_metadata metadata; struct mdp_overlay *req = NULL; struct msmfb_overlay_data ov_data; int val; mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1; if (!mdp3_session) return -ENODEV; req = &mdp3_session->req_overlay; if (!mdp3_session->status && cmd != MSMFB_METADATA_GET && cmd != MSMFB_HISTOGRAM_STOP) { pr_err("mdp3_ctrl_ioctl_handler, display off!\n"); return -EPERM; } switch (cmd) { case MSMFB_MDP_PP: rc = mdp3_pp_ioctl(mfd, argp); break; case MSMFB_HISTOGRAM_START: case MSMFB_HISTOGRAM_STOP: case MSMFB_HISTOGRAM: rc = mdp3_histo_ioctl(mfd, cmd, argp); break; case MSMFB_VSYNC_CTRL: case MSMFB_OVERLAY_VSYNC_CTRL: if (!copy_from_user(&val, argp, sizeof(val))) { mutex_lock(&mdp3_session->lock); mdp3_session->vsync_enabled = val; rc = mdp3_ctrl_vsync_enable(mfd, val); mutex_unlock(&mdp3_session->lock); } else { pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n"); rc = -EFAULT; } break; case MSMFB_ASYNC_BLIT: rc = mdp3_ctrl_async_blit_req(mfd, argp); break; case MSMFB_BLIT: rc = mdp3_ctrl_blit_req(mfd, argp); break; case MSMFB_METADATA_GET: rc = copy_from_user(&metadata, argp, sizeof(metadata)); if (!rc) rc = mdp3_get_metadata(mfd, &metadata); if (!rc) rc = copy_to_user(argp, &metadata, sizeof(metadata)); if (rc) pr_err("mdp3_get_metadata failed (%d)\n", rc); break; case MSMFB_METADATA_SET: rc = copy_from_user(&metadata, argp, sizeof(metadata)); if (!rc) rc = mdp3_set_metadata(mfd, &metadata); if (rc) pr_err("mdp3_set_metadata failed (%d)\n", rc); break; case MSMFB_OVERLAY_GET: rc = copy_from_user(req, argp, sizeof(*req)); if (!rc) { rc = mdp3_overlay_get(mfd, req); if (!IS_ERR_VALUE(rc)) rc = copy_to_user(argp, req, sizeof(*req)); } if (rc) pr_err("OVERLAY_GET failed (%d)\n", rc); break; case MSMFB_OVERLAY_SET: rc = copy_from_user(req, argp, sizeof(*req)); if (!rc) { rc = mdp3_overlay_set(mfd, req); if (!IS_ERR_VALUE(rc)) rc = copy_to_user(argp, req, sizeof(*req)); } if (rc) pr_err("OVERLAY_SET failed (%d)\n", rc); break; case MSMFB_OVERLAY_UNSET: if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val)))) rc = mdp3_overlay_unset(mfd, val); break; case MSMFB_OVERLAY_PLAY: rc = copy_from_user(&ov_data, argp, sizeof(ov_data)); if (!rc) rc = mdp3_overlay_play(mfd, &ov_data); if (rc) pr_err("OVERLAY_PLAY failed (%d)\n", rc); break; case MSMFB_OVERLAY_PREPARE: rc = mdp3_overlay_prepare(mfd, argp); break; default: break; } return rc; }
static int snddev_icodec_open(struct msm_snddev_info *dev_info) { int rc = 0; struct snddev_icodec_state *icodec; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (!dev_info) { rc = -EINVAL; goto error; } icodec = dev_info->private_data; if (icodec->data->capability & SNDDEV_CAP_RX) { mutex_lock(&drv->rx_lock); if (drv->rx_active) { mutex_unlock(&drv->rx_lock); rc = -EBUSY; goto error; } rc = snddev_icodec_open_rx(icodec); if (!IS_ERR_VALUE(rc)) { drv->rx_active = 1; if ((icodec->data->dev_vol_type & ( SNDDEV_DEV_VOL_DIGITAL | SNDDEV_DEV_VOL_ANALOG))) rc = snddev_icodec_set_device_volume_impl( dev_info, dev_info->dev_volume); if (IS_ERR_VALUE(rc)) { MM_ERR("Failed to set device volume" " impl for rx device\n"); snddev_icodec_close(dev_info); mutex_unlock(&drv->rx_lock); goto error; } } mutex_unlock(&drv->rx_lock); } else { mutex_lock(&drv->tx_lock); if (drv->tx_active) { mutex_unlock(&drv->tx_lock); rc = -EBUSY; goto error; } rc = snddev_icodec_open_tx(icodec); if (!IS_ERR_VALUE(rc)) { drv->tx_active = 1; if ((icodec->data->dev_vol_type & ( SNDDEV_DEV_VOL_DIGITAL | SNDDEV_DEV_VOL_ANALOG))) rc = snddev_icodec_set_device_volume_impl( dev_info, dev_info->dev_volume); if (IS_ERR_VALUE(rc)) { MM_ERR("Failed to set device volume" " impl for tx device\n"); snddev_icodec_close(dev_info); mutex_unlock(&drv->tx_lock); goto error; } } mutex_unlock(&drv->tx_lock); } error: return rc; }
int mdp3_ctrl_init(struct msm_fb_data_type *mfd) { struct device *dev = mfd->fbi->dev; struct msm_mdp_interface *mdp3_interface = &mfd->mdp; struct mdp3_session_data *mdp3_session = NULL; u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO; int rc; int splash_mismatch = 0; pr_debug("mdp3_ctrl_init\n"); rc = mdp3_parse_dt_splash(mfd); if (rc) splash_mismatch = 1; mdp3_interface->on_fnc = mdp3_ctrl_on; mdp3_interface->off_fnc = mdp3_ctrl_off; mdp3_interface->do_histogram = NULL; mdp3_interface->cursor_update = NULL; mdp3_interface->dma_fnc = mdp3_ctrl_pan_display; mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler; mdp3_interface->kickoff_fnc = mdp3_ctrl_display_commit_kickoff; mdp3_interface->lut_update = mdp3_ctrl_lut_update; mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL); if (!mdp3_session) { pr_err("fail to allocate mdp3 private data structure"); return -ENOMEM; } memset(mdp3_session, 0, sizeof(struct mdp3_session_data)); mutex_init(&mdp3_session->lock); INIT_WORK(&mdp3_session->clk_off_work, mdp3_dispatch_clk_off); INIT_WORK(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done); atomic_set(&mdp3_session->vsync_countdown, 0); mutex_init(&mdp3_session->histo_lock); mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL); if (!mdp3_session->dma) { rc = -ENODEV; goto init_done; } rc = mdp3_dma_init(mdp3_session->dma); if (rc) { pr_err("fail to init dma\n"); goto init_done; } intf_type = mdp3_ctrl_get_intf_type(mfd); mdp3_session->intf = mdp3_get_display_intf(intf_type); if (!mdp3_session->intf) { rc = -ENODEV; goto init_done; } rc = mdp3_intf_init(mdp3_session->intf); if (rc) { pr_err("fail to init interface\n"); goto init_done; } mdp3_session->dma->output_config.out_sel = intf_type; mdp3_session->mfd = mfd; mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev); mdp3_session->status = mdp3_session->intf->active; mdp3_session->overlay.id = MSMFB_NEW_REQUEST; mdp3_bufq_init(&mdp3_session->bufq_in); mdp3_bufq_init(&mdp3_session->bufq_out); mdp3_session->histo_status = 0; mdp3_session->lut_sel = 0; BLOCKING_INIT_NOTIFIER_HEAD(&mdp3_session->notifier_head); init_timer(&mdp3_session->vsync_timer); mdp3_session->vsync_timer.function = mdp3_vsync_timer_func; mdp3_session->vsync_timer.data = (u32)mdp3_session; mdp3_session->vsync_period = 1000 / mfd->panel_info->mipi.frame_rate; mfd->mdp.private1 = mdp3_session; init_completion(&mdp3_session->dma_completion); if (intf_type != MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) mdp3_session->wait_for_dma_done = mdp3_wait_for_dma_done; rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group); if (rc) { pr_err("vsync sysfs group creation failed, ret=%d\n", rc); goto init_done; } mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "vsync_event"); if (!mdp3_session->vsync_event_sd) { pr_err("vsync_event sysfs lookup failed\n"); rc = -ENODEV; goto init_done; } rc = mdp3_create_sysfs_link(dev); if (rc) pr_warn("problem creating link to mdp sysfs\n"); kobject_uevent(&dev->kobj, KOBJ_ADD); pr_debug("vsync kobject_uevent(KOBJ_ADD)\n"); if (mdp3_get_cont_spash_en()) { mdp3_session->clk_on = 1; mdp3_session->in_splash_screen = 1; mdp3_ctrl_notifier_register(mdp3_session, &mdp3_session->mfd->mdp_sync_pt_data.notifier); } if (splash_mismatch) { pr_err("splash memory mismatch, stop splash\n"); mdp3_ctrl_off(mfd); } mdp3_session->vsync_before_commit = true; init_done: if (IS_ERR_VALUE(rc)) kfree(mdp3_session); return rc; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; #ifdef CONFIG_PAX_SEGMEXEC if (mm->pax_flags & MF_PAX_SEGMEXEC) pax_task_size = SEGMEXEC_TASK_SIZE; #endif pax_task_size -= PAGE_SIZE; /* requested length too big for entire address space */ if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) return addr; #ifdef CONFIG_PAX_PAGEEXEC if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) goto bottomup; #endif #ifdef CONFIG_PAX_RANDMMAP if (!(mm->pax_flags & MF_PAX_RANDMMAP)) #endif /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); if (pax_task_size - len >= addr) { vma = find_vma(mm, addr); if (check_heap_stack_gap(vma, addr, len)) return addr; } } /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; /* make sure it can fit in the remaining address space */ if (addr > len) { vma = find_vma(mm, addr-len); if (check_heap_stack_gap(vma, addr - len, len)) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } if (mm->mmap_base < len) goto bottomup; addr = mm->mmap_base-len; do { /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (check_heap_stack_gap(vma, addr, len)) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = skip_heap_stack_gap(vma, len); } while (!IS_ERR_VALUE(addr)); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ #ifdef CONFIG_PAX_SEGMEXEC if (mm->pax_flags & MF_PAX_SEGMEXEC) mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; else #endif mm->mmap_base = TASK_UNMAPPED_BASE; #ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) mm->mmap_base += mm->delta_mmap; #endif mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->mmap_base = base; mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; }
static int tegra_mmc_probe(struct udevice *dev) { struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev); struct tegra_mmc_plat *plat = dev_get_platdata(dev); struct tegra_mmc_priv *priv = dev_get_priv(dev); struct mmc_config *cfg = &plat->cfg; int bus_width, ret; cfg->name = dev->name; bus_width = dev_read_u32_default(dev, "bus-width", 1); cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; cfg->host_caps = 0; if (bus_width == 8) cfg->host_caps |= MMC_MODE_8BIT; if (bus_width >= 4) cfg->host_caps |= MMC_MODE_4BIT; cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; /* * min freq is for card identification, and is the highest * low-speed SDIO card frequency (actually 400KHz) * max freq is highest HS eMMC clock as per the SD/MMC spec * (actually 52MHz) */ cfg->f_min = 375000; cfg->f_max = 48000000; cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; priv->reg = (void *)dev_read_addr(dev); ret = reset_get_by_name(dev, "sdhci", &priv->reset_ctl); if (ret) { debug("reset_get_by_name() failed: %d\n", ret); return ret; } ret = clk_get_by_index(dev, 0, &priv->clk); if (ret) { debug("clk_get_by_index() failed: %d\n", ret); return ret; } ret = reset_assert(&priv->reset_ctl); if (ret) return ret; ret = clk_enable(&priv->clk); if (ret) return ret; ret = clk_set_rate(&priv->clk, 20000000); if (IS_ERR_VALUE(ret)) return ret; ret = reset_deassert(&priv->reset_ctl); if (ret) return ret; /* These GPIOs are optional */ gpio_request_by_name(dev, "cd-gpios", 0, &priv->cd_gpio, GPIOD_IS_IN); gpio_request_by_name(dev, "wp-gpios", 0, &priv->wp_gpio, GPIOD_IS_IN); gpio_request_by_name(dev, "power-gpios", 0, &priv->pwr_gpio, GPIOD_IS_OUT); if (dm_gpio_is_valid(&priv->pwr_gpio)) dm_gpio_set_value(&priv->pwr_gpio, 1); upriv->mmc = &plat->mmc; return tegra_mmc_init(dev); }
int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_cmd_desc *cmds, int rlen) { int cnt, len, diff, pkt_size, ret = 0; struct dsi_buf *tp, *rp; int no_max_pkt_size; char cmd; u32 dsi_ctrl, data; int video_mode; u32 left_dsi_ctrl = 0; bool left_ctrl_restore = false; int rx_flags = 0; bool long_rd_rsp_chk = false; if (ctrl->shared_pdata.broadcast_enable) { if (ctrl->ndx == DSI_CTRL_0) { pr_debug("%s: Broadcast mode. 1st ctrl\n", __func__); return 0; } } if (ctrl->shared_pdata.broadcast_enable) { if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { left_dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base + 0x0004); video_mode = left_dsi_ctrl & 0x02; /* VIDEO_MODE_EN */ if (video_mode) { data = left_dsi_ctrl | 0x04; /* CMD_MODE_EN */ MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004, data); left_ctrl_restore = true; } } } /* turn on cmd mode * for video mode, do not send cmds more than * one pixel line, since it only transmit it * during BLLP. */ dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004); video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */ if (video_mode) { data = dsi_ctrl | 0x04; /* CMD_MODE_EN */ MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data); } no_max_pkt_size = rx_flags & CMD_REQ_NO_MAX_PKT_SIZE; if (no_max_pkt_size) rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */ len = rlen; diff = 0; if (len < 2) cnt = 4; /* short read */ else if (len == 2) { /* Response could be a short or long read */ cnt = 8; long_rd_rsp_chk = true; } else { if (len > MDSS_DSI_LEN) len = MDSS_DSI_LEN; /* 8 bytes at most */ len = ALIGN(len, 4); /* len 4 bytes align */ diff = len - rlen; /* * add extra 2 bytes to len to have overall * packet size is multipe by 4. This also make * sure 4 bytes dcs headerlocates within a * 32 bits register after shift in. * after all, len should be either 6 or 10. */ len += 2; cnt = len + 6; /* 4 bytes header + 2 bytes crc */ } tp = &ctrl->tx_buf; rp = &ctrl->rx_buf; if (!no_max_pkt_size) { /* packet size need to be set at every read */ pkt_size = len; max_pktsize[0] = pkt_size; mdss_dsi_buf_init(tp); ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd); if (!ret) { pr_err("%s: failed to call\n", __func__); rp->len = 0; goto end; } mdss_dsi_wait4video_eng_busy(ctrl); mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM); ret = mdss_dsi_cmd_dma_tx(ctrl, tp); if (IS_ERR_VALUE(ret)) { mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM); pr_err("%s: failed to call\n", __func__); rp->len = 0; goto end; } pr_debug("%s: Max packet size sent\n", __func__); } mdss_dsi_buf_init(tp); ret = mdss_dsi_cmd_dma_add(tp, cmds); if (!ret) { pr_err("%s: failed to call cmd_dma_add for cmd = 0x%x\n", __func__, cmds->payload[0]); rp->len = 0; goto end; } mdss_dsi_wait4video_eng_busy(ctrl); mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM); /* transmit read comamnd to client */ ret = mdss_dsi_cmd_dma_tx(ctrl, tp); if (IS_ERR_VALUE(ret)) { mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM); pr_err("%s: failed to call\n", __func__); rp->len = 0; goto end; } /* * once cmd_dma_done interrupt received, * return data from client is ready and stored * at RDBK_DATA register already */ mdss_dsi_buf_init(rp); if (no_max_pkt_size) { /* * expect rlen = n * 4 * short alignement for start addr */ rp->data += 2; } mdss_dsi_cmd_dma_rx(ctrl, rp, cnt); if (no_max_pkt_size) { /* * remove extra 2 bytes from previous * rx transaction at shift register * which was inserted during copy * shift registers to rx buffer * rx payload start from long alignment addr */ rp->data += 2; } if (long_rd_rsp_chk && rp->data[0] != DTYPE_GEN_LREAD_RESP && rp->data[0] != DTYPE_DCS_LREAD_RESP) rp->data += 4; cmd = rp->data[0]; switch (cmd) { case DTYPE_ACK_ERR_RESP: pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__); rp->len = 0; case DTYPE_GEN_READ1_RESP: case DTYPE_DCS_READ1_RESP: mdss_dsi_short_read1_resp(rp); break; case DTYPE_GEN_READ2_RESP: case DTYPE_DCS_READ2_RESP: mdss_dsi_short_read2_resp(rp); break; case DTYPE_GEN_LREAD_RESP: case DTYPE_DCS_LREAD_RESP: mdss_dsi_long_read_resp(rp); if (!long_rd_rsp_chk) { rp->len -= 2; /* extra 2 bytes added */ rp->len -= diff; /* align bytes */ } break; default: pr_warning("%s:Invalid response cmd\n", __func__); rp->len = 0; } end: if (left_ctrl_restore) MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004, left_dsi_ctrl); /*restore */ if (video_mode) MIPI_OUTP((ctrl->ctrl_base) + 0x0004, dsi_ctrl); /* restore */ return rp->len; }
/* * mdss_dsi_cmds_rx() - dcs read from panel * @ctrl: dsi controller * @cmds: read command descriptor * @len: number of bytes to read back * * controller have 4 registers can hold 16 bytes of rxed data * dcs packet: 4 bytes header + payload + 2 bytes crc * 2 padding bytes add to payload to have payload length is mutipled by 4 * 1st read: 4 bytes header + 8 bytes payload + 2 padding + 2 crc * 2nd read: 12 bytes payload + 2 padding + 2 crc * 3rd read: 12 bytes payload + 2 padding + 2 crc * */ int mdss_dsi_cmds_rx(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_cmd_desc *cmds, int rlen) { int data_byte, rx_byte, dlen, end; int short_response, diff, pkt_size, ret = 0; struct dsi_buf *tp, *rp; char cmd; u32 dsi_ctrl, data; int video_mode; u32 left_dsi_ctrl = 0; bool left_ctrl_restore = false; if (ctrl->shared_pdata.broadcast_enable) { if (ctrl->ndx == DSI_CTRL_0) { pr_debug("%s: Broadcast mode. 1st ctrl\n", __func__); return 0; } } if (ctrl->shared_pdata.broadcast_enable) { if ((ctrl->ndx == DSI_CTRL_1) && (left_ctrl_pdata != NULL)) { left_dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base + 0x0004); video_mode = left_dsi_ctrl & 0x02; /* VIDEO_MODE_EN */ if (video_mode) { data = left_dsi_ctrl | 0x04; /* CMD_MODE_EN */ MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004, data); left_ctrl_restore = true; } } } /* turn on cmd mode * for video mode, do not send cmds more than * one pixel line, since it only transmit it * during BLLP. */ dsi_ctrl = MIPI_INP((ctrl->ctrl_base) + 0x0004); video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */ if (video_mode) { data = dsi_ctrl | 0x04; /* CMD_MODE_EN */ MIPI_OUTP((ctrl->ctrl_base) + 0x0004, data); } if (rlen == 0) { short_response = 1; rx_byte = 4; } else { short_response = 0; data_byte = 8; /* first read */ /* * add extra 2 padding bytes to have overall * packet size is multipe by 4. This also make * sure 4 bytes dcs headerlocates within a * 32 bits register after shift in. */ pkt_size = data_byte + 2; rx_byte = data_byte + 8; /* 4 header + 2 crc + 2 padding*/ } tp = &ctrl->tx_buf; rp = &ctrl->rx_buf; end = 0; mdss_dsi_buf_init(rp); while (!end) { pr_debug("%s: rlen=%d pkt_size=%d rx_byte=%d\n", __func__, rlen, pkt_size, rx_byte); if (!short_response) { max_pktsize[0] = pkt_size; mdss_dsi_buf_init(tp); ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd); if (!ret) { pr_err("%s: failed to add max_pkt_size\n", __func__); rp->len = 0; goto end; } mdss_dsi_wait4video_eng_busy(ctrl); mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM); ret = mdss_dsi_cmd_dma_tx(ctrl, tp); if (IS_ERR_VALUE(ret)) { mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM); pr_err("%s: failed to tx max_pkt_size\n", __func__); rp->len = 0; goto end; } pr_debug("%s: max_pkt_size=%d sent\n", __func__, pkt_size); } mdss_dsi_buf_init(tp); ret = mdss_dsi_cmd_dma_add(tp, cmds); if (!ret) { pr_err("%s: failed to add cmd = 0x%x\n", __func__, cmds->payload[0]); rp->len = 0; goto end; } mdss_dsi_wait4video_eng_busy(ctrl); /* video mode only */ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM); /* transmit read comamnd to client */ ret = mdss_dsi_cmd_dma_tx(ctrl, tp); if (IS_ERR_VALUE(ret)) { mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM); pr_err("%s: failed to tx cmd = 0x%x\n", __func__, cmds->payload[0]); rp->len = 0; goto end; } /* * once cmd_dma_done interrupt received, * return data from client is ready and stored * at RDBK_DATA register already * since rx fifo is 16 bytes, dcs header is kept at first loop, * after that dcs header lost during shift into registers */ dlen = mdss_dsi_cmd_dma_rx(ctrl, rp, rx_byte); if (short_response) break; if (rlen <= data_byte) { diff = data_byte - rlen; end = 1; } else { diff = 0; rlen -= data_byte; } dlen -= 2; /* 2 padding bytes */ dlen -= 2; /* 2 crc */ dlen -= diff; rp->data += dlen; /* next start position */ rp->len += dlen; data_byte = 12; /* NOT first read */ pkt_size += data_byte; pr_debug("%s: rp data=%x len=%d dlen=%d diff=%d\n", __func__, (int)rp->data, rp->len, dlen, diff); } rp->data = rp->start; /* move back to start position */ cmd = rp->data[0]; switch (cmd) { case DTYPE_ACK_ERR_RESP: pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__); rp->len = 0; case DTYPE_GEN_READ1_RESP: case DTYPE_DCS_READ1_RESP: mdss_dsi_short_read1_resp(rp); break; case DTYPE_GEN_READ2_RESP: case DTYPE_DCS_READ2_RESP: mdss_dsi_short_read2_resp(rp); break; case DTYPE_GEN_LREAD_RESP: case DTYPE_DCS_LREAD_RESP: mdss_dsi_long_read_resp(rp); break; default: pr_warning("%s:Invalid response cmd\n", __func__); rp->len = 0; } end: if (left_ctrl_restore) MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004, left_dsi_ctrl); /*restore */ if (video_mode) MIPI_OUTP((ctrl->ctrl_base) + 0x0004, dsi_ctrl); /* restore */ return rp->len; }
static int mdss_mdp_probe(struct platform_device *pdev) { struct resource *res; int rc; struct mdss_data_type *mdata; if (!pdev->dev.of_node) { pr_err("MDP driver only supports device tree probe\n"); return -ENOTSUPP; } if (mdss_res) { pr_err("MDP already initialized\n"); return -EINVAL; } mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); if (mdata == NULL) return -ENOMEM; pdev->id = 0; mdata->pdev = pdev; platform_set_drvdata(pdev, mdata); mdss_res = mdata; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys"); if (!res) { pr_err("unable to get MDP base address\n"); rc = -ENOMEM; goto probe_done; } mdata->mdp_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (unlikely(!mdata->mdp_base)) { pr_err("unable to map MDP base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->mdp_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys"); if (!res) { pr_err("unable to get MDSS VBIF base address\n"); rc = -ENOMEM; goto probe_done; } mdata->vbif_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (unlikely(!mdata->vbif_base)) { pr_err("unable to map MDSS VBIF base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDSS VBIF HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->vbif_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_err("unable to get MDSS irq\n"); rc = -ENOMEM; goto probe_done; } mdata->irq = res->start; rc = mdss_mdp_res_init(mdata); if (rc) { pr_err("unable to initialize mdss mdp resources\n"); goto probe_done; } rc = mdss_mdp_bus_scale_register(mdata); probe_done: if (IS_ERR_VALUE(rc)) mdss_res = NULL; return rc; }
static int __init dra7xx_pcie_probe(struct platform_device *pdev) { u32 reg; int ret; int irq; int i; int phy_count; struct phy **phy; void __iomem *base; struct resource *res; struct dra7xx_pcie *dra7xx; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; char name[10]; dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); if (!dra7xx) return -ENOMEM; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "missing IRQ resource\n"); return -EINVAL; } ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, IRQF_SHARED, "dra7xx-pcie-main", dra7xx); if (ret) { dev_err(dev, "failed to request irq\n"); return ret; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); base = devm_ioremap_nocache(dev, res->start, resource_size(res)); if (!base) return -ENOMEM; phy_count = of_property_count_strings(np, "phy-names"); if (phy_count < 0) { dev_err(dev, "unable to find the strings\n"); return phy_count; } phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); if (!phy) return -ENOMEM; for (i = 0; i < phy_count; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) return PTR_ERR(phy[i]); ret = phy_init(phy[i]); if (ret < 0) goto err_phy; ret = phy_power_on(phy[i]); if (ret < 0) { phy_exit(phy[i]); goto err_phy; } } dra7xx->base = base; dra7xx->phy = phy; dra7xx->dev = dev; dra7xx->phy_count = phy_count; pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (IS_ERR_VALUE(ret)) { dev_err(dev, "pm_runtime_get_sync failed\n"); goto err_phy; } reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); reg &= ~LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); platform_set_drvdata(pdev, dra7xx); ret = add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_add_port; return 0; err_add_port: pm_runtime_put(dev); pm_runtime_disable(dev); err_phy: while (--i >= 0) { phy_power_off(phy[i]); phy_exit(phy[i]); } return ret; }
static int __devinit gdsc_probe(struct platform_device *pdev) { static atomic_t gdsc_count = ATOMIC_INIT(-1); struct regulator_init_data *init_data; struct resource *res; struct gdsc *sc; uint32_t regval; bool retain_mem, retain_periph; int i, ret; sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL); if (sc == NULL) return -ENOMEM; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); if (init_data == NULL) return -ENOMEM; if (of_get_property(pdev->dev.of_node, "parent-supply", NULL)) init_data->supply_regulator = "parent"; ret = of_property_read_string(pdev->dev.of_node, "regulator-name", &sc->rdesc.name); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (sc->gdscr == NULL) return -ENOMEM; sc->clock_count = of_property_count_strings(pdev->dev.of_node, "qcom,clock-names"); if (sc->clock_count == -EINVAL) { sc->clock_count = 0; } else if (IS_ERR_VALUE(sc->clock_count)) { dev_err(&pdev->dev, "Failed to get clock names\n"); return -EINVAL; } sc->clocks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * sc->clock_count, GFP_KERNEL); if (!sc->clocks) return -ENOMEM; for (i = 0; i < sc->clock_count; i++) { const char *clock_name; of_property_read_string_index(pdev->dev.of_node, "qcom,clock-names", i, &clock_name); sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name); if (IS_ERR(sc->clocks[i])) { int rc = PTR_ERR(sc->clocks[i]); if (rc != -EPROBE_DEFER) dev_err(&pdev->dev, "Failed to get %s\n", clock_name); return rc; } } sc->rdesc.id = atomic_inc_return(&gdsc_count); sc->rdesc.ops = &gdsc_ops; sc->rdesc.type = REGULATOR_VOLTAGE; sc->rdesc.owner = THIS_MODULE; platform_set_drvdata(pdev, sc); /* * Disable HW trigger: collapse/restore occur based on registers writes. * Disable SW override: Use hardware state-machine for sequencing. */ regval = readl_relaxed(sc->gdscr); regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK); /* Configure wait time between states. */ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK); regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; writel_relaxed(regval, sc->gdscr); retain_mem = of_property_read_bool(pdev->dev.of_node, "qcom,retain-mem"); retain_periph = of_property_read_bool(pdev->dev.of_node, "qcom,retain-periph"); for (i = 0; i < sc->clock_count; i++) { if (retain_mem || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM); if (retain_periph || (regval & PWR_ON_MASK)) clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH); else clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH); } sc->toggle_mem = !retain_mem; sc->toggle_periph = !retain_periph; sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node, "qcom,skip-logic-collapse"); if (!sc->toggle_logic) { regval &= ~SW_COLLAPSE_MASK; writel_relaxed(regval, sc->gdscr); ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK, TIMEOUT_US); if (ret) { dev_err(&pdev->dev, "%s enable timed out\n", sc->rdesc.name); return ret; } } sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc, pdev->dev.of_node); if (IS_ERR(sc->rdev)) { dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n", sc->rdesc.name); return PTR_ERR(sc->rdev); } return 0; }
int rmnet_usb_ctrl_init(void) { struct rmnet_ctrl_dev *dev; int n; int status; for (n = 0; n < NUM_CTRL_CHANNELS; ++n) { dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { status = -ENOMEM; goto error0; } /*for debug purpose*/ snprintf(dev->name, CTRL_DEV_MAX_LEN, "hsicctl%d", n); mutex_init(&dev->dev_lock); spin_lock_init(&dev->rx_lock); init_waitqueue_head(&dev->read_wait_queue); init_waitqueue_head(&dev->open_wait_queue); INIT_LIST_HEAD(&dev->rx_list); init_usb_anchor(&dev->tx_submitted); status = rmnet_usb_ctrl_alloc_rx(dev); if (status < 0) { kfree(dev); goto error0; } ctrl_dev[n] = dev; } status = alloc_chrdev_region(&ctrldev_num, 0, NUM_CTRL_CHANNELS, DEVICE_NAME); if (IS_ERR_VALUE(status)) { pr_err("ERROR:%s: alloc_chrdev_region() ret %i.\n", __func__, status); goto error0; } ctrldev_classp = class_create(THIS_MODULE, DEVICE_NAME); if (IS_ERR(ctrldev_classp)) { pr_err("ERROR:%s: class_create() ENOMEM\n", __func__); status = -ENOMEM; goto error1; } for (n = 0; n < NUM_CTRL_CHANNELS; ++n) { cdev_init(&ctrl_dev[n]->cdev, &ctrldev_fops); ctrl_dev[n]->cdev.owner = THIS_MODULE; status = cdev_add(&ctrl_dev[n]->cdev, (ctrldev_num + n), 1); if (IS_ERR_VALUE(status)) { pr_err("%s: cdev_add() ret %i\n", __func__, status); kfree(ctrl_dev[n]); goto error2; } ctrl_dev[n]->devicep = device_create(ctrldev_classp, NULL, (ctrldev_num + n), NULL, DEVICE_NAME "%d", n); if (IS_ERR(ctrl_dev[n]->devicep)) { pr_err("%s: device_create() ENOMEM\n", __func__); status = -ENOMEM; cdev_del(&ctrl_dev[n]->cdev); kfree(ctrl_dev[n]); goto error2; } /*create /sys/class/hsicctl/hsicctlx/modem_wait*/ status = device_create_file(ctrl_dev[n]->devicep, &dev_attr_modem_wait); if (status) { device_destroy(ctrldev_classp, MKDEV(MAJOR(ctrldev_num), n)); cdev_del(&ctrl_dev[n]->cdev); kfree(ctrl_dev[n]); goto error2; } dev_set_drvdata(ctrl_dev[n]->devicep, ctrl_dev[n]); } rmnet_usb_ctrl_debugfs_init(); pr_info("rmnet usb ctrl Initialized.\n"); return 0; error2: while (--n >= 0) { cdev_del(&ctrl_dev[n]->cdev); device_destroy(ctrldev_classp, MKDEV(MAJOR(ctrldev_num), n)); } class_destroy(ctrldev_classp); n = NUM_CTRL_CHANNELS; error1: unregister_chrdev_region(MAJOR(ctrldev_num), NUM_CTRL_CHANNELS); error0: while (--n >= 0) kfree(ctrl_dev[n]); return status; }