static void __exit iommu_exit(void) { int i; /* Common ctx_devs */ for (i = 0; i < ARRAY_SIZE(msm_iommu_common_ctx_devs); i++) platform_device_unregister(msm_iommu_common_ctx_devs[i]); /* Common devs. */ for (i = 0; i < ARRAY_SIZE(msm_iommu_common_devs); ++i) platform_device_unregister(msm_iommu_common_devs[i]); if (cpu_is_msm8x60() || cpu_is_msm8960()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs); i++) platform_device_unregister(msm_iommu_gfx2d_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister(msm_iommu_jpegd_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_devs); i++) platform_device_unregister(msm_iommu_gfx2d_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } if (cpu_is_apq8064() || cpu_is_apq8064ab()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_ctx_devs); i++) platform_device_unregister(msm_iommu_vcap_ctx_devs[i]); } if (cpu_is_apq8064() || cpu_is_msm8960ab() || cpu_is_apq8064ab()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_ctx_devs); i++) platform_device_unregister( msm_iommu_adreno3xx_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister( msm_iommu_jpegd_ctx_devs[i]); if (cpu_is_apq8064() || cpu_is_apq8064ab()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_devs); i++) platform_device_unregister( msm_iommu_vcap_devs[i]); } for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_gfx_devs); i++) platform_device_unregister( msm_iommu_adreno3xx_gfx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } platform_device_unregister(&msm_root_iommu_dev); }
static int __init iommu_init(void) { int ret; if (!msm_soc_version_supports_iommu_v1()) { pr_err("IOMMU v1 is not supported on this SoC version.\n"); return -ENODEV; } ret = platform_device_register(&msm_root_iommu_dev); if (ret != 0) { pr_err("Failed to register root IOMMU device!\n"); goto failure; } platform_add_devices(msm_iommu_common_devs, ARRAY_SIZE(msm_iommu_common_devs)); if (cpu_is_msm8x60() || cpu_is_msm8960()) { platform_add_devices(msm_iommu_jpegd_devs, ARRAY_SIZE(msm_iommu_jpegd_devs)); platform_add_devices(msm_iommu_gfx2d_devs, ARRAY_SIZE(msm_iommu_gfx2d_devs)); } if (cpu_is_apq8064() || cpu_is_apq8064ab()) { platform_add_devices(msm_iommu_jpegd_devs, ARRAY_SIZE(msm_iommu_jpegd_devs)); platform_add_devices(msm_iommu_8064_devs, ARRAY_SIZE(msm_iommu_8064_devs)); } ret = platform_add_devices(msm_iommu_common_ctx_devs, ARRAY_SIZE(msm_iommu_common_ctx_devs)); if (cpu_is_msm8x60() || cpu_is_msm8960()) { platform_add_devices(msm_iommu_jpegd_ctx_devs, ARRAY_SIZE(msm_iommu_jpegd_ctx_devs)); platform_add_devices(msm_iommu_gfx2d_ctx_devs, ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs)); } if (cpu_is_apq8064() || cpu_is_apq8064ab()) { platform_add_devices(msm_iommu_jpegd_ctx_devs, ARRAY_SIZE(msm_iommu_jpegd_ctx_devs)); platform_add_devices(msm_iommu_8064_ctx_devs, ARRAY_SIZE(msm_iommu_8064_ctx_devs)); } return 0; failure: return ret; }
/* * Unregister a notification. * * Note: the function may sleep and must be called in a task context. * * n: the notifcation object that was registered previously. * * Return value: * 0: success * -ENODEV: RPM driver not initialized */ int msm_rpm_unregister_notification(struct msm_rpm_notification *n) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc = 0; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } mutex_lock(&msm_rpm_mutex); ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value = 0; spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_del(&n->list); list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); return rc; }
static void *msm_ipc_router_load_modem(void) { void *pil = NULL; int rc; /* Load GNSS for Standalone 8064 but not for Fusion 3 */ if (cpu_is_apq8064()) { if (socinfo_get_platform_subtype() == 0x0) pil = pil_get("gss"); } else { pil = pil_get("modem"); } if (IS_ERR(pil) || !pil) { pr_debug("%s: modem load failed\n", __func__); pil = NULL; } else { rc = wait_for_completion_interruptible_timeout( &msm_ipc_remote_router_up, MODEM_LOAD_TIMEOUT); if (!rc) rc = -ETIMEDOUT; if (rc < 0) { pr_err("%s: wait for remote router failed %d\n", __func__, rc); msm_ipc_router_unload_modem(pil); pil = NULL; } } return pil; }
int msm_spm_turn_on_cpu_rail(unsigned int cpu) { uint32_t val = 0; uint32_t timeout = 0; void *reg = NULL; void *saw_bases[] = { 0, MSM_SAW1_BASE, MSM_SAW2_BASE, MSM_SAW3_BASE }; if (cpu == 0 || cpu >= num_possible_cpus()) return -EINVAL; reg = saw_bases[cpu]; if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_apq8064() || cpu_is_msm8627() || cpu_is_msm8960ab() || cpu_is_apq8064ab()) { val = 0xA4; reg += 0x14; timeout = 512; } else { return -ENOSYS; } writel_relaxed(val, reg); mb(); udelay(timeout); return 0; }
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv; int ret; if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())) return -EINVAL; priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); if (pwrscale->priv == NULL) return -ENOMEM; priv->governor = TZ_GOVERNOR_ONDEMAND; spin_lock_init(&tz_lock); kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); ret = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_WRITE_ALGORITHM); if(ret == 1) pr_info("Using HTC GPU DCVS algorithm\n"); else pr_info("Using QCT GPU DCVS algorithm\n"); return 0; }
/* * Read the specified status registers and return their values. * * status: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will * contain the value of the status register. * count: number of id-value pairs in the array * * Return value: * 0: success * -EBUSY: RPM is updating the status page; values across different registers * may not be consistent * -EINVAL: invalid id in <status> array * -ENODEV: RPM driver not initialized */ int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count) { uint32_t seq_begin; uint32_t seq_end; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); for (i = 0; i < count; i++) { if (status[i].id > MSM_RPM_STATUS_ID_LAST) { rc = -EINVAL; goto get_status_exit; } status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS, status[i].id); } seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0; get_status_exit: return rc; }
static void __exit msm_audio_exit(void) { if (!cpu_is_apq8064()) { pr_err("%s: Not the right machine type\n", __func__); return ; } msm_free_headset_mic_gpios(); platform_device_unregister(msm_snd_device); }
int __init msm_rpm_init(struct msm_rpm_platform_data *data) { unsigned int irq; int rc; if (cpu_is_apq8064()) return 0; msm_rpm_platform = data; fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MAJOR); fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MINOR); fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_BUILD); pr_info("%s: RPM firmware %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build); if (fw_major != RPM_MAJOR_VER) { pr_err("%s: RPM version %u.%u.%u incompatible with " "this driver version %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build, RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER); return -EFAULT; } msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR, RPM_MAJOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR, RPM_MINOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD, RPM_BUILD_VER); irq = msm_rpm_platform->irq_ack; rc = request_irq(irq, msm_rpm_ack_interrupt, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "rpm_drv", msm_rpm_ack_interrupt); if (rc) { pr_err("%s: failed to request irq %d: %d\n", __func__, irq, rc); return rc; } rc = irq_set_irq_wake(irq, 1); if (rc) { pr_err("%s: failed to set wakeup irq %u: %d\n", __func__, irq, rc); return rc; } msm_rpm_populate_map(); return platform_driver_register(&msm_rpm_platform_driver); }
static void __exit msm_audio_exit(void) { if (!cpu_is_apq8064() || (socinfo_get_id() == 130)) { pr_err("%s: Not the right machine type\n", __func__); return ; } msm_free_headset_mic_gpios(); platform_device_unregister(msm_snd_device); kfree(mbhc_cfg.calibration); }
/* * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENODEV: RPM driver not initialized. */ static int msm_rpm_clear_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE]; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto clear_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto clear_common_exit; for (i = 0; i < ARRAY_SIZE(r); i++) { r[i].id = MSM_RPM_ID_INVALIDATE_0 + i; r[i].value = sel_masks[i]; } memset(sel_masks, 0, sizeof(sel_masks)); sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE); if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r, ARRAY_SIZE(r)); spin_unlock_irqrestore(&msm_rpm_lock, flags); BUG_ON(rc); } else { mutex_lock(&msm_rpm_mutex); rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r)); mutex_unlock(&msm_rpm_mutex); BUG_ON(rc); } clear_common_exit: return rc; }
/** * rpm_vreg_set_frequency - sets the frequency of a switching regulator * @vreg: ID for regulator * @freq: enum corresponding to desired frequency * * Returns 0 on success or errno. */ int rpm_vreg_set_frequency(int vreg_id, enum rpm_vreg_freq freq) { unsigned int mask[2] = {0}, val[2] = {0}; struct vreg *vreg; int rc; /* * HACK: make this function a no-op for 8064 so that it can be called by * consumers on 8064 before RPM capabilities are present. */ if (cpu_is_apq8064()) return 0; if (!config) { pr_err("rpm-regulator driver has not probed yet.\n"); return -ENODEV; } if (vreg_id < config->vreg_id_min || vreg_id > config->vreg_id_max) { pr_err("invalid regulator id=%d\n", vreg_id); return -EINVAL; } vreg = &config->vregs[vreg_id]; if (freq < 0 || freq > RPM_VREG_FREQ_1p20) { vreg_err(vreg, "invalid frequency=%d\n", freq); return -EINVAL; } if (!vreg->pdata.sleep_selectable) { vreg_err(vreg, "regulator is not marked sleep selectable\n"); return -EINVAL; } if (!vreg->part->freq.mask) { vreg_err(vreg, "frequency not supported\n"); return -EINVAL; } val[vreg->part->freq.word] = freq << vreg->part->freq.shift; mask[vreg->part->freq.word] = vreg->part->freq.mask; rc = vreg_set_noirq(vreg, RPM_VREG_VOTER_REG_FRAMEWORK, 1, mask[0], val[0], mask[1], val[1], vreg->part->request_len, 0); if (rc) vreg_err(vreg, "vreg_set failed, rc=%d\n", rc); printk("rpm-regulator driver has probed .\n"); return rc; }
static int __cpuinit release_secondary(unsigned int cpu) { BUG_ON(cpu >= get_core_count()); if (cpu_is_msm8x60()) return scorpion_release_secondary(); if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3() || machine_is_apq8064_sim()) return krait_release_secondary_sim(cpu); if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) return krait_release_secondary(cpu); WARN(1, "unknown CPU case in release_secondary\n"); return -EINVAL; }
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) { static struct mdp4_platform_config config = {}; #ifdef CONFIG_OF /* TODO */ config.max_clk = 266667000; config.iommu = iommu_domain_alloc(&platform_bus_type); #else if (cpu_is_apq8064()) config.max_clk = 266667000; else config.max_clk = 200000000; config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN); #endif return &config; }
static int __init scm_pas_init(void) { /* TODO: Remove once bus scaling driver is in place */ if (!cpu_is_apq8064()) scm_perf_client = msm_bus_scale_register_client( &scm_pas_bus_pdata); if (!scm_perf_client) pr_warn("unable to register bus client\n"); scm_bus_clk = clk_get_sys("scm", "bus_clk"); if (!IS_ERR(scm_bus_clk)) { clk_set_rate(scm_bus_clk, 64000000); } else { scm_bus_clk = NULL; pr_warn("unable to get bus clock\n"); } return 0; }
/** * msm_xo_get() - Get a voting handle for an XO * @xo_id - XO identifier * @voter - Debug string to identify users * * XO voters vote for OFF by default. This function returns a pointer * indicating success. An ERR_PTR is returned on failure. * * If XO voting is disabled, %NULL is returned. */ struct msm_xo_voter *msm_xo_get(enum msm_xo_ids xo_id, const char *voter) { int ret; unsigned long flags; struct msm_xo_voter *xo_voter; /* * TODO: Remove early return for 8064 once RPM XO voting support * is available. */ if (cpu_is_apq8064()) return NULL; if (xo_id >= NUM_MSM_XO_IDS) { ret = -EINVAL; goto err; } xo_voter = kzalloc(sizeof(*xo_voter), GFP_KERNEL); if (!xo_voter) { ret = -ENOMEM; goto err; } xo_voter->name = kstrdup(voter, GFP_KERNEL); if (!xo_voter->name) { ret = -ENOMEM; goto err_name; } xo_voter->xo = &msm_xo_sources[xo_id]; /* Voters vote for OFF by default */ spin_lock_irqsave(&msm_xo_lock, flags); xo_voter->xo->votes[MSM_XO_MODE_OFF]++; list_add(&xo_voter->list, &xo_voter->xo->voters); spin_unlock_irqrestore(&msm_xo_lock, flags); return xo_voter; err_name: kfree(xo_voter); err: return ERR_PTR(ret); }
void __init deluxe_ub1_init_gpu(void) { unsigned int version = socinfo_get_version(); if (cpu_is_apq8064()) kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 450000000; if (SOCINFO_VERSION_MAJOR(version) == 2) { kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 2); } else { if ((SOCINFO_VERSION_MAJOR(version) == 1) && (SOCINFO_VERSION_MINOR(version) == 1)) kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 1); else kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 0); } platform_device_register(&device_kgsl_3d0); }
/* * Register for RPM notification. When the specified resources * change their status on RPM, RPM sends out notifications and the * driver will "up" the semaphore in struct msm_rpm_notification. * * Note: the function may sleep and must be called in a task context. * * Memory for <n> must not be freed until the notification is * unregistered. Memory for <req> can be freed after this * function returns. * * n: the notifcation object. Caller should initialize only the * semaphore field. When a notification arrives later, the * semaphore will be "up"ed. * req: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINVAL: invalid id in <req> array * -ENODEV: RPM driver not initialized */ int msm_rpm_register_notification(struct msm_rpm_notification *n, struct msm_rpm_iv_pair *req, int count) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } INIT_LIST_HEAD(&n->list); rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count); if (rc) goto register_notification_exit; mutex_lock(&msm_rpm_mutex); if (!msm_rpm_init_notif_done) { msm_rpm_initialize_notification(); msm_rpm_init_notif_done = true; } spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_add(&n->list, &msm_rpm_notifications); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); register_notification_exit: return rc; }
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv; /* Trustzone is only valid for some SOCs */ if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8627())) return -EINVAL; priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); if (pwrscale->priv == NULL) return -ENOMEM; priv->governor = TZ_GOVERNOR_ONDEMAND; spin_lock_init(&tz_lock); kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); return 0; }
static void __exit iommu_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(msm_iommu_common_ctx_devs); i++) platform_device_unregister(msm_iommu_common_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_common_devs); ++i) platform_device_unregister(msm_iommu_common_devs[i]); if (cpu_is_msm8x60() || cpu_is_msm8960()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs); i++) platform_device_unregister(msm_iommu_gfx2d_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister(msm_iommu_jpegd_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_devs); i++) platform_device_unregister(msm_iommu_gfx2d_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } if (cpu_is_apq8064() || cpu_is_apq8064ab()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_8064_ctx_devs); i++) platform_device_unregister(msm_iommu_8064_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister(msm_iommu_jpegd_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_8064_devs); i++) platform_device_unregister(msm_iommu_8064_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } platform_device_unregister(&msm_root_iommu_dev); }
static int release_secondary(unsigned int cpu) { BUG_ON(cpu >= get_core_count()); if (cpu_is_msm8x60()) return scorpion_release_secondary(); if (machine_is_msm8974_sim()) return krait_release_secondary_sim(0xf9088000, cpu); if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_apq8064() || cpu_is_msm8627() || cpu_is_msm8960ab()) return krait_release_secondary(0x02088000, cpu); if (cpu_is_msm8974()) return krait_release_secondary_p3(0xf9088000, cpu); WARN(1, "unknown CPU case in release_secondary\n"); return -EINVAL; }
/* Upon return, the <req> array will contain values from the ack page. * * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENOSPC: request rejected * -ENODEV: RPM driver not initialized */ static int msm_rpm_set_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto set_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto set_common_exit; if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count); spin_unlock_irqrestore(&msm_rpm_lock, flags); } else { mutex_lock(&msm_rpm_mutex); rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count); mutex_unlock(&msm_rpm_mutex); } set_common_exit: return rc; }
static int __init gss_8064_init(void) { int ret; if (!cpu_is_apq8064()) return -ENODEV; ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET, smsm_state_cb, 0); if (ret < 0) pr_err("%s: Unable to register SMSM callback! (%d)\n", __func__, ret); ret = request_irq(GSS_A5_WDOG_EXPIRED, gss_wdog_bite_irq, IRQF_TRIGGER_RISING, "gss_a5_wdog", NULL); if (ret < 0) { pr_err("%s: Unable to request gss watchdog IRQ. (%d)\n", __func__, ret); disable_irq_nosync(GSS_A5_WDOG_EXPIRED); goto out; } ret = gss_subsystem_restart_init(); if (ret < 0) { pr_err("%s: Unable to reg with subsystem restart. (%d)\n", __func__, ret); goto out; } gss_data.gss_dev.minor = MISC_DYNAMIC_MINOR; gss_data.gss_dev.name = "gss"; gss_data.gss_dev.fops = &gss_file_ops; ret = misc_register(&gss_data.gss_dev); if (ret) { pr_err("%s: misc_registers failed for %s (%d)", __func__, gss_data.gss_dev.name, ret); goto out; } gss_data.gss_ramdump_dev = create_ramdump_device("gss"); if (!gss_data.gss_ramdump_dev) { pr_err("%s: Unable to create gss ramdump device. (%d)\n", __func__, -ENOMEM); ret = -ENOMEM; goto out; } gss_data.smem_ramdump_dev = create_ramdump_device("smem-gss"); if (!gss_data.smem_ramdump_dev) { pr_err("%s: Unable to create smem ramdump device. (%d)\n", __func__, -ENOMEM); ret = -ENOMEM; goto out; } pr_info("%s: gss fatal driver init'ed.\n", __func__); out: return ret; }
static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; #ifdef CONFIG_OF struct device_node *of_node = dev->of_node; const struct of_device_id *match; match = of_match_node(dt_match, of_node); if (match && match->data) { hdmi_cfg = (struct hdmi_platform_config *)match->data; DBG("hdmi phy: %s", match->compatible); } else { dev_err(dev, "unknown phy: %s\n", of_node->name); return -ENXIO; } hdmi_cfg->mmio_name = "core_physical"; hdmi_cfg->qfprom_mmio_name = "qfprom_physical"; hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else static struct hdmi_platform_config config = {}; static const char *hpd_clk_names[] = { "core_clk", "master_iface_clk", "slave_iface_clk", }; if (cpu_is_apq8064()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 70; config.ddc_data_gpio = 71; config.hpd_gpio = 72; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 100; config.ddc_data_gpio = 101; config.hpd_gpio = 102; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8x60()) { static const char *hpd_reg_names[] = { "8901_hdmi_mvs", "8901_mpp0" }; config.phy_init = hdmi_phy_8x60_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 170; config.ddc_data_gpio = 171; config.hpd_gpio = 172; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } config.mmio_name = "hdmi_msm_hdmi_addr"; config.qfprom_mmio_name = "hdmi_msm_qfprom_addr"; hdmi_cfg = &config; #endif dev->platform_data = hdmi_cfg; hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; msm_hdmi_register_audio_driver(hdmi, dev); return 0; }
/** * rpm_vreg_set_voltage - vote for a min_uV value of specified regualtor * @vreg: ID for regulator * @voter: ID for the voter * @min_uV: minimum acceptable voltage (in uV) that is voted for * @max_uV: maximum acceptable voltage (in uV) that is voted for * @sleep_also: 0 for active set only, non-0 for active set and sleep set * * Returns 0 on success or errno. * * This function is used to vote for the voltage of a regulator without * using the regulator framework. It is needed by consumers which hold spin * locks or have interrupts disabled because the regulator framework can sleep. * It is also needed by consumers which wish to only vote for active set * regulator voltage. * * If sleep_also == 0, then a sleep-set value of 0V will be voted for. * * This function may only be called for regulators which have the sleep flag * specified in their private data. * * Consumers can vote to disable a regulator with this function by passing * min_uV = 0 and max_uV = 0. */ int rpm_vreg_set_voltage(int vreg_id, enum rpm_vreg_voter voter, int min_uV, int max_uV, int sleep_also) { unsigned int mask[2] = {0}, val[2] = {0}; struct vreg_range *range; struct vreg *vreg; int uV = min_uV; int lim_min_uV, lim_max_uV, i, rc; /* * HACK: make this function a no-op for 8064 so that it can be called by * consumers on 8064 before RPM capabilities are present. (needed for * acpuclock driver) */ if (cpu_is_apq8064()) return 0; if (!config) { pr_err("rpm-regulator driver has not probed yet.\n"); return -ENODEV; } if (vreg_id < config->vreg_id_min || vreg_id > config->vreg_id_max) { pr_err("invalid regulator id=%d\n", vreg_id); return -EINVAL; } vreg = &config->vregs[vreg_id]; range = &vreg->set_points->range[0]; if (!vreg->pdata.sleep_selectable) { vreg_err(vreg, "regulator is not marked sleep selectable\n"); return -EINVAL; } /* Allow min_uV == max_uV == 0 to represent a disable request. */ if (min_uV != 0 || max_uV != 0) { /* * Check if request voltage is outside of allowed range. The * regulator core has already checked that constraint range * is inside of the physically allowed range. */ lim_min_uV = vreg->pdata.init_data.constraints.min_uV; lim_max_uV = vreg->pdata.init_data.constraints.max_uV; if (uV < lim_min_uV && max_uV >= lim_min_uV) uV = lim_min_uV; if (uV < lim_min_uV || uV > lim_max_uV) { vreg_err(vreg, "request v=[%d, %d] is outside allowed " "v=[%d, %d]\n", min_uV, max_uV, lim_min_uV, lim_max_uV); return -EINVAL; } /* Find the range which uV is inside of. */ for (i = vreg->set_points->count - 1; i > 0; i--) { if (uV > vreg->set_points->range[i - 1].max_uV) { range = &vreg->set_points->range[i]; break; } } /* * Force uV to be an allowed set point and apply a ceiling * function to non-set point values. */ uV = (uV - range->min_uV + range->step_uV - 1) / range->step_uV; uV = uV * range->step_uV + range->min_uV; if (uV > max_uV) { vreg_err(vreg, "request v=[%d, %d] cannot be met by any set point; " "next set point: %d\n", min_uV, max_uV, uV); return -EINVAL; } } if (vreg->part->uV.mask) { val[vreg->part->uV.word] = uV << vreg->part->uV.shift; mask[vreg->part->uV.word] = vreg->part->uV.mask; } else { val[vreg->part->mV.word] = MICRO_TO_MILLI(uV) << vreg->part->mV.shift; mask[vreg->part->mV.word] = vreg->part->mV.mask; } rc = vreg_set_noirq(vreg, voter, sleep_also, mask[0], val[0], mask[1], val[1], vreg->part->request_len, 1); if (rc) vreg_err(vreg, "vreg_set_noirq failed, rc=%d\n", rc); return rc; }
static int __init modem_8960_init(void) { int ret; if (cpu_is_apq8064() || cpu_is_apq8064ab()) return -ENODEV; ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET, smsm_state_cb, 0); if (ret < 0) pr_err("%s: Unable to register SMSM callback! (%d)\n", __func__, ret); ret = request_irq(Q6FW_WDOG_EXPIRED_IRQ, modem_wdog_bite_irq, IRQF_TRIGGER_RISING, "modem_wdog_fw", NULL); if (ret < 0) { pr_err("%s: Unable to request q6fw watchdog IRQ. (%d)\n", __func__, ret); goto out; } ret = request_irq(Q6SW_WDOG_EXPIRED_IRQ, modem_wdog_bite_irq, IRQF_TRIGGER_RISING, "modem_wdog_sw", NULL); if (ret < 0) { pr_err("%s: Unable to request q6sw watchdog IRQ. (%d)\n", __func__, ret); disable_irq_nosync(Q6FW_WDOG_EXPIRED_IRQ); goto out; } ret = modem_subsystem_restart_init(); if (ret < 0) { pr_err("%s: Unable to reg with subsystem restart. (%d)\n", __func__, ret); goto out; } modemfw_ramdump_dev = create_ramdump_device("modem_fw"); if (!modemfw_ramdump_dev) { pr_err("%s: Unable to create modem fw ramdump device. (%d)\n", __func__, -ENOMEM); ret = -ENOMEM; goto out; } modemsw_ramdump_dev = create_ramdump_device("modem_sw"); if (!modemsw_ramdump_dev) { pr_err("%s: Unable to create modem sw ramdump device. (%d)\n", __func__, -ENOMEM); ret = -ENOMEM; goto out; } smem_ramdump_dev = create_ramdump_device("smem-modem"); if (!smem_ramdump_dev) { pr_err("%s: Unable to create smem ramdump device. (%d)\n", __func__, -ENOMEM); ret = -ENOMEM; goto out; } ret = modem_debugfs_init(); pr_info("%s: modem fatal driver init'ed.\n", __func__); out: return ret; }
static int msm_configure_headset_mic_gpios(void) { int ret; struct pm_gpio param = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_CMOS, .output_value = 1, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_MED, .function = PM_GPIO_FUNC_NORMAL, }; ret = gpio_request(PM8921_GPIO_PM_TO_SYS(23), "AV_SWITCH"); if (ret) { pr_err("%s: Failed to request gpio %d\n", __func__, PM8921_GPIO_PM_TO_SYS(23)); return ret; } ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(23), ¶m); if (ret) pr_err("%s: Failed to configure gpio %d\n", __func__, PM8921_GPIO_PM_TO_SYS(23)); else gpio_direction_output(PM8921_GPIO_PM_TO_SYS(23), 0); ret = gpio_request(PM8921_GPIO_PM_TO_SYS(35), "US_EURO_SWITCH"); if (ret) { pr_err("%s: Failed to request gpio %d\n", __func__, PM8921_GPIO_PM_TO_SYS(35)); gpio_free(PM8921_GPIO_PM_TO_SYS(23)); return ret; } ret = pm8xxx_gpio_config(PM8921_GPIO_PM_TO_SYS(35), ¶m); if (ret) pr_err("%s: Failed to configure gpio %d\n", __func__, PM8921_GPIO_PM_TO_SYS(35)); else gpio_direction_output(PM8921_GPIO_PM_TO_SYS(35), 0); return 0; } static void msm_free_headset_mic_gpios(void) { if (msm_headset_gpios_configured) { gpio_free(PM8921_GPIO_PM_TO_SYS(23)); gpio_free(PM8921_GPIO_PM_TO_SYS(35)); } } static int __init msm_audio_init(void) { int ret; if (!cpu_is_apq8064() || (socinfo_get_id() == 130)) { pr_err("%s: Not the right machine type\n", __func__); return -ENODEV; } mbhc_cfg.calibration = def_tabla_mbhc_cal(); if (!mbhc_cfg.calibration) { pr_err("Calibration data allocation failed\n"); return -ENOMEM; } msm_snd_device = platform_device_alloc("soc-audio", 0); if (!msm_snd_device) { pr_err("Platform device allocation failed\n"); kfree(mbhc_cfg.calibration); return -ENOMEM; } platform_set_drvdata(msm_snd_device, &snd_soc_card_msm); ret = platform_device_add(msm_snd_device); if (ret) { platform_device_put(msm_snd_device); kfree(mbhc_cfg.calibration); return ret; } if (msm_configure_headset_mic_gpios()) { pr_err("%s Fail to configure headset mic gpios\n", __func__); msm_headset_gpios_configured = 0; } else msm_headset_gpios_configured = 1; return ret; }
static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; static struct hdmi_platform_config config = {}; struct hdmi *hdmi; #ifdef CONFIG_OF struct device_node *of_node = dev->of_node; if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; config.phy_init = hdmi_phy_8x74_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.pwr_reg_names = pwr_reg_names; config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_freq = hpd_clk_freq; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.pwr_clk_names = pwr_clk_names; config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; config.phy_init = hdmi_phy_8960_init; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) { config.phy_init = hdmi_phy_8x60_init; } else { dev_err(dev, "unknown phy: %s\n", of_node->name); } config.mmio_name = "core_physical"; config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else static const char *hpd_clk_names[] = { "core_clk", "master_iface_clk", "slave_iface_clk", }; if (cpu_is_apq8064()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 70; config.ddc_data_gpio = 71; config.hpd_gpio = 72; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8960() || cpu_is_msm8960ab()) { static const char *hpd_reg_names[] = {"8921_hdmi_mvs"}; config.phy_init = hdmi_phy_8960_init; config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 100; config.ddc_data_gpio = 101; config.hpd_gpio = 102; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } else if (cpu_is_msm8x60()) { static const char *hpd_reg_names[] = { "8901_hdmi_mvs", "8901_mpp0" }; config.phy_init = hdmi_phy_8x60_init; config.mmio_name = "hdmi_msm_hdmi_addr"; config.hpd_reg_names = hpd_reg_names; config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); config.hpd_clk_names = hpd_clk_names; config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); config.ddc_clk_gpio = 170; config.ddc_data_gpio = 171; config.hpd_gpio = 172; config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } #endif dev->platform_data = &config; hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; return 0; }