static int camera_v4l2_close(struct file *filep) { int rc = 0; /* */ int ret = 0; /* */ struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); // atomic_sub_return(1, &pvdev->opened); if (atomic_read(&pvdev->opened) == 0) { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ /* */ ret = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if(ret < 0){ pr_err("%s:%d camera_v4l2_close_1 failed\n", __func__, __LINE__); } /* */ camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, -1); msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); pm_relax(&pvdev->vdev->dev); atomic_set(&pvdev->stream_cnt, 0); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ /* */ ret = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if(ret < 0){ pr_err("%s:%d camera_v4l2_close_2 failed\n", __func__, __LINE__); } /* */ msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh) { #if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; pm_relax(bcmsdh_osinfo->dev); #endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ }
/* * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID */ static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx, enum mtu3_vbus_id_state status) { struct ssusb_mtk *ssusb = container_of(otg_sx, struct ssusb_mtk, otg_switch); struct mtu3 *mtu = ssusb->u3d; dev_dbg(ssusb->dev, "mailbox state(%d)\n", status); switch (status) { case MTU3_ID_GROUND: switch_port_to_host(ssusb); ssusb_set_vbus(otg_sx, 1); ssusb->is_host = true; break; case MTU3_ID_FLOAT: ssusb->is_host = false; ssusb_set_vbus(otg_sx, 0); switch_port_to_device(ssusb); break; case MTU3_VBUS_OFF: mtu3_stop(mtu); pm_relax(ssusb->dev); break; case MTU3_VBUS_VALID: /* avoid suspend when works as device */ pm_stay_awake(ssusb->dev); mtu3_start(mtu); break; default: dev_err(ssusb->dev, "invalid state\n"); } }
void wcd9xxx_spmi_unlock_sleep() { mutex_lock(&map.pm_lock); if (--map.wlock_holders == 0) { pr_debug("%s: releasing wake lock pm_state %d -> %d\n", __func__, map.pm_state, WCD9XXX_PM_SLEEPABLE); /* * if wcd9xxx_spmi_lock_sleep failed, pm_state would be still * WCD9XXX_PM_ASLEEP, don't overwrite */ if (likely(map.pm_state == WCD9XXX_PM_AWAKE)) map.pm_state = WCD9XXX_PM_SLEEPABLE; pm_qos_update_request(&map.pm_qos_req, PM_QOS_DEFAULT_VALUE); #ifdef VENDOR_EDIT //[email protected], 2015/03/19, Add for Qcom patch, //Headset sometime not detected when phone is sleep pm_relax(&map.spmi[0]->dev); #endif /* VENDOR_EDIT */ } mutex_unlock(&map.pm_lock); pr_debug("%s: wake lock counter %d\n", __func__, map.wlock_holders); pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state); wake_up_all(&map.pm_wq); }
static irqreturn_t titsc_irq(int irq, void *dev) { struct titsc *ts_dev = dev; struct input_dev *input_dev = ts_dev->input; unsigned int fsm, status, irqclr = 0; unsigned int x = 0, y = 0; unsigned int z1, z2, z; status = titsc_readl(ts_dev, REG_RAWIRQSTATUS); if (status & IRQENB_HW_PEN) { ts_dev->pen_down = true; irqclr |= IRQENB_HW_PEN; pm_stay_awake(ts_dev->mfd_tscadc->dev); } if (status & IRQENB_PENUP) { fsm = titsc_readl(ts_dev, REG_ADCFSM); if (fsm == ADCFSM_STEPID) { ts_dev->pen_down = false; input_report_key(input_dev, BTN_TOUCH, 0); input_report_abs(input_dev, ABS_PRESSURE, 0); input_sync(input_dev); pm_relax(ts_dev->mfd_tscadc->dev); } else { ts_dev->pen_down = true; } irqclr |= IRQENB_PENUP; } if (status & IRQENB_EOS) irqclr |= IRQENB_EOS; /* * ADC and touchscreen share the IRQ line. * FIFO1 interrupts are used by ADC. Handle FIFO0 IRQs here only */ if (status & IRQENB_FIFO0THRES) { titsc_read_coordinates(ts_dev, &x, &y, &z1, &z2); if (ts_dev->pen_down && z1 != 0 && z2 != 0) { /* * Calculate pressure using formula * Resistance(touch) = x plate resistance * * x postion/4096 * ((z2 / z1) - 1) */ z = z1 - z2; z *= x; z *= ts_dev->x_plate_resistance; z /= z2; z = (z + 2047) >> 12; if (z <= MAX_12BIT) { input_report_abs(input_dev, ABS_X, x); input_report_abs(input_dev, ABS_Y, y); input_report_abs(input_dev, ABS_PRESSURE, z); input_report_key(input_dev, BTN_TOUCH, 1); input_sync(input_dev); } }
static int camera_v4l2_close(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); BUG_ON(!pvdev); atomic_sub_return(1, &pvdev->opened); if (atomic_read(&pvdev->opened) == 0) { if(1 == cam_wakelock_init && !wake_lock_active(&cam_wakelock)) { hw_camera_log_info("%s: start camera wake_lock_timeout!\n",__func__); //wake lock 500ms for camera exit wake_lock_timeout(&cam_wakelock, HZ/2); } else { hw_camera_log_info("%s: do not need wake_lock now, cam_wakelock_init = %d\n", __func__, cam_wakelock_init); } camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, -1); msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); pm_relax(&pvdev->vdev->dev); atomic_set(&pvdev->stream_cnt, 0); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
static void gpio_keys_gpio_work_func(struct work_struct *work) { struct gpio_button_data *bdata = container_of(work, struct gpio_button_data, work.work); gpio_keys_gpio_report_event(bdata); if (bdata->button->wakeup) pm_relax(bdata->input->dev.parent); }
static void mv_otg_disable(struct mv_otg *mvotg) { if (mvotg->clock_gating) mv_otg_disable_internal(mvotg); pm_qos_update_request(&mvotg->qos_idle, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE); pm_relax(&mvotg->pdev->dev); }
static int camera_v4l2_close(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); unsigned int opn_idx, mask; BUG_ON(!pvdev); opn_idx = atomic_read(&pvdev->opened); pr_debug("%s: close stream_id=%d\n", __func__, sp->stream_id); mask = (1 << sp->stream_id); opn_idx &= ~mask; atomic_set(&pvdev->opened, opn_idx); if (atomic_read(&pvdev->opened) == 0) { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, -1); msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); /* Enable power collapse latency */ msm_pm_qos_update_request(CAMERA_ENABLE_PC_LATENCY); pm_relax(&pvdev->vdev->dev); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
int32_t qpnp_iadc_vadc_sync_read( enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { struct qpnp_iadc_drv *iadc = qpnp_iadc; int rc = 0; if (!iadc || !iadc->iadc_initialized) return -EPROBE_DEFER; mutex_lock(&iadc->iadc_vadc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } rc = qpnp_check_pmic_temp(); if (rc) { pr_err("PMIC die temp check failed\n"); goto fail; } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } rc = qpnp_iadc_read(i_channel, i_result); if (rc) pr_err("Configuring IADC failed\n"); /* Intentional fall through to release VADC */ rc = qpnp_vadc_iadc_sync_complete_request(v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->iadc_vadc_lock); return rc; }
static void smb349_pm_relax(struct smb349_dual_charger *chip, int reason) { int reasons; mutex_lock(&chip->pm_lock); reasons = chip->wake_reasons & (~reason); if (reasons == 0 && chip->wake_reasons != 0) { dev_dbg(chip->dev, "relaxing: 0x%02x (bit %d)\n", reasons, reason); pm_relax(chip->dev); } chip->wake_reasons = reasons; mutex_unlock(&chip->pm_lock); }
static irqreturn_t wake_timer_irq(int irq, void *dev_id) { struct wake_timer *tm = dev_id; int stat = 0; if(irq == tm->irq[0]) { /*clear interrupt*/ readl(tm->mmio + EOI); pm_wakeup_event(&tm->pdev->dev, 0); pm_relax(&tm->pdev->dev); #ifdef CONFIG_PM_WAKEUP_DEVICE_AUTO_TEST_SUSPEND input_report_key(tm->input_dev, KEY_POWER, 1); input_sync(tm->input_dev); input_report_key(tm->input_dev, KEY_POWER, 0); input_sync(tm->input_dev); #endif #ifdef MANU_UNLOCK wake_lock(&tm->wake_lock); #else wake_lock_timeout(&tm->wake_lock, tm->wake_ms/1000*HZ); #endif } else if(irq == tm->irq[1]) { /*clear interrupt*/ readl(tm->mmio + EOI + OFFSET); if(tm->stat == STAT_ON) { stat = pm_runtime_put(&tm->pdev->dev); } else { stat = pm_runtime_get(&tm->pdev->dev); } if(stat) dev_err(&tm->pdev->dev, "pm runtime ret %d", stat); #ifdef MANU_UNLOCK wake_unlock(&tm->wake_lock); #endif } /*dev_info(&tm->pdev->dev, "irq %d", irq);*/ return IRQ_HANDLED; }
void wcd9xxx_spmi_unlock_sleep() { mutex_lock(&map.pm_lock); if (--map.wlock_holders == 0) { pr_debug("%s: releasing wake lock pm_state %d -> %d\n", __func__, map.pm_state, WCD9XXX_PM_SLEEPABLE); /* * if wcd9xxx_spmi_lock_sleep failed, pm_state would be still * WCD9XXX_PM_ASLEEP, don't overwrite */ if (likely(map.pm_state == WCD9XXX_PM_AWAKE)) map.pm_state = WCD9XXX_PM_SLEEPABLE; pm_qos_update_request(&map.pm_qos_req, PM_QOS_DEFAULT_VALUE); pm_relax(&map.spmi[0]->dev); } mutex_unlock(&map.pm_lock); pr_debug("%s: wake lock counter %d\n", __func__, map.wlock_holders); pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state); wake_up_all(&map.pm_wq); }
static void imx_imx_snvs_check_for_events(struct timer_list *t) { struct pwrkey_drv_data *pdata = from_timer(pdata, t, check_timer); struct input_dev *input = pdata->input; u32 state; regmap_read(pdata->snvs, SNVS_HPSR_REG, &state); state = state & SNVS_HPSR_BTN ? 1 : 0; /* only report new event if status changed */ if (state ^ pdata->keystate) { pdata->keystate = state; input_event(input, EV_KEY, pdata->keycode, state); input_sync(input); pm_relax(pdata->input->dev.parent); } /* repeat check if pressed long */ if (state) { mod_timer(&pdata->check_timer, jiffies + msecs_to_jiffies(REPEAT_INTERVAL)); } }
static long bu21150_ioctl_set_scan_mode(unsigned long arg) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); void __user *argp = (void __user *)arg; if (copy_from_user(&ts->scan_mode, argp, sizeof(u16))) { pr_err("%s: Failed to copy_from_user().\n", __func__); return -EFAULT; } mutex_lock(&ts->mutex_wake); if (ts->stay_awake && ts->wake_up && ts->scan_mode != AFE_SCAN_GESTURE_SELF_CAP) { pm_relax(&ts->client->dev); ts->stay_awake = false; } mutex_unlock(&ts->mutex_wake); return 0; }
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc, enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0; uint16_t raw_data; int32_t rsense_u_ohms = 0; int64_t result_current; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail_release_vadc; } rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms); pr_debug("current raw:0%x and rsense:%d\n", raw_data, rsense_n_ohms); rsense_u_ohms = rsense_n_ohms/1000; num = raw_data - iadc->adc->calib.offset_raw; if (num < 0) { sign = 1; num = -num; } i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw); result_current = i_result->result_uv; result_current *= QPNP_IADC_NANO_VOLTS_FACTOR; /* Intentional fall through. Process the result w/o comp */ do_div(result_current, rsense_u_ohms); if (sign) { i_result->result_uv = -i_result->result_uv; result_current = -result_current; } result_current *= -1; rc = qpnp_iadc_comp_result(iadc, &result_current); if (rc < 0) pr_err("Error during compensating the IADC\n"); rc = 0; result_current *= -1; i_result->result_ua = (int32_t) result_current; fail_release_vadc: rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc, enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result) { int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0; int32_t rsense_u_ohms = 0; int64_t result_current; uint16_t raw_data; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) { pr_err("raw offset errors! run iadc calibration again\n"); return -EINVAL; } rc = qpnp_check_pmic_temp(iadc); if (rc) { pr_err("Error checking pmic therm temp\n"); return rc; } mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms); pr_debug("current raw:0%x and rsense:%d\n", raw_data, rsense_n_ohms); rsense_u_ohms = rsense_n_ohms/1000; num = raw_data - iadc->adc->calib.offset_raw; if (num < 0) { sign = 1; num = -num; } result->result_uv = (num * QPNP_ADC_GAIN_NV)/ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw); result_current = result->result_uv; result_current *= QPNP_IADC_NANO_VOLTS_FACTOR; /* Intentional fall through. Process the result w/o comp */ do_div(result_current, rsense_u_ohms); if (sign) { result->result_uv = -result->result_uv; result_current = -result_current; } result_current *= -1; rc = qpnp_iadc_comp_result(iadc, &result_current); if (rc < 0) pr_err("Error during compensating the IADC\n"); rc = 0; result_current *= -1; result->result_ua = (int32_t) result_current; fail: if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
/* * imx_keypad_check_for_events is the timer handler. */ static void imx_keypad_check_for_events(unsigned long data) { struct imx_keypad *keypad = (struct imx_keypad *) data; unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS]; unsigned short reg_val; bool state_changed, is_zero_matrix; int i; memset(matrix_volatile_state, 0, sizeof(matrix_volatile_state)); imx_keypad_scan_matrix(keypad, matrix_volatile_state); state_changed = false; for (i = 0; i < MAX_MATRIX_KEY_COLS; i++) { if ((keypad->cols_en_mask & (1 << i)) == 0) continue; if (keypad->matrix_unstable_state[i] ^ matrix_volatile_state[i]) { state_changed = true; break; } } /* * If the matrix state is changed from the previous scan * (Re)Begin the debouncing process, saving the new state in * keypad->matrix_unstable_state. * else * Increase the count of number of scans with a stable state. */ if (state_changed) { memcpy(keypad->matrix_unstable_state, matrix_volatile_state, sizeof(matrix_volatile_state)); keypad->stable_count = 0; } else keypad->stable_count++; /* * If the matrix is not as stable as we want reschedule scan * in the near future. */ if (keypad->stable_count < IMX_KEYPAD_SCANS_FOR_STABILITY) { mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(10)); return; } /* * If the matrix state is stable, fire the events and save the new * stable state. Note, if the matrix is kept stable for longer * (keypad->stable_count > IMX_KEYPAD_SCANS_FOR_STABILITY) all * events have already been generated. */ if (keypad->stable_count == IMX_KEYPAD_SCANS_FOR_STABILITY) { imx_keypad_fire_events(keypad, matrix_volatile_state); memcpy(keypad->matrix_stable_state, matrix_volatile_state, sizeof(matrix_volatile_state)); } is_zero_matrix = true; for (i = 0; i < MAX_MATRIX_KEY_COLS; i++) { if (matrix_volatile_state[i] != 0) { is_zero_matrix = false; break; } } if (is_zero_matrix) { /* * All keys have been released. Enable only the KDI * interrupt for future key presses (clear the KDI * status bit and its sync chain before that). */ reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KPKD | KBD_STAT_KDSC; writew(reg_val, keypad->mmio_base + KPSR); reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KDIE; reg_val &= ~KBD_STAT_KRIE; writew(reg_val, keypad->mmio_base + KPSR); pm_relax(keypad->input_dev->dev.parent); } else { /* * Some keys are still pressed. Schedule a rescan in * attempt to detect multiple key presses and enable * the KRI interrupt to react quickly to key release * event. */ mod_timer(&keypad->check_matrix_timer, jiffies + msecs_to_jiffies(60)); reg_val = readw(keypad->mmio_base + KPSR); reg_val |= KBD_STAT_KPKR | KBD_STAT_KRSS; writew(reg_val, keypad->mmio_base + KPSR); } }
static int camera_v4l2_open(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); BUG_ON(!pvdev); rc = camera_v4l2_fh_open(filep); if (rc < 0) { pr_err("%s : camera_v4l2_fh_open failed Line %d rc %d\n", __func__, __LINE__, rc); goto fh_open_fail; } /* every stream has a vb2 queue */ rc = camera_v4l2_vb2_q_init(filep); if (rc < 0) { pr_err("%s : vb2 queue init fails Line %d rc %d\n", __func__, __LINE__, rc); goto vb2_q_fail; } if (!atomic_read(&pvdev->opened)) { pm_stay_awake(&pvdev->vdev->dev); /* create a new session when first opened */ rc = msm_create_session(pvdev->vdev->num, pvdev->vdev); if (rc < 0) { pr_err("%s : session creation failed Line %d rc %d\n", __func__, __LINE__, rc); goto session_fail; } rc = msm_create_command_ack_q(pvdev->vdev->num, 0); if (rc < 0) { pr_err("%s : creation of command_ack queue failed\n", __func__); pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc); goto command_ack_q_fail; } camera_pack_event(filep, MSM_CAMERA_NEW_SESSION, 0, -1, &event); rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if (rc < 0) { pr_err("%s : posting of NEW_SESSION event failed\n", __func__); pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc); goto post_fail; } rc = camera_check_event_status(&event); if (rc < 0) { pr_err("%s : checking event status fails Line %d rc %d\n", __func__, __LINE__, rc); goto post_fail; } } else { rc = msm_create_command_ack_q(pvdev->vdev->num, atomic_read(&pvdev->stream_cnt)); if (rc < 0) { pr_err("%s : creation of command_ack queue failed Line %d rc %d\n", __func__, __LINE__, rc); goto session_fail; } } atomic_add(1, &pvdev->opened); atomic_add(1, &pvdev->stream_cnt); return rc; post_fail: msm_delete_command_ack_q(pvdev->vdev->num, 0); command_ack_q_fail: msm_destroy_session(pvdev->vdev->num); session_fail: pm_relax(&pvdev->vdev->dev); camera_v4l2_vb2_q_release(filep); vb2_q_fail: camera_v4l2_fh_release(filep); fh_open_fail: return rc; }
int32_t qpnp_iadc_calibrate_for_trim(void) { struct qpnp_iadc_drv *iadc = qpnp_iadc; uint8_t rslt_lsb, rslt_msb; int32_t rc = 0; uint16_t raw_data; uint32_t mode_sel = 0; if (!iadc || !iadc->iadc_initialized) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } /* Exported symbol may be called from outside this driver. * Ensure this driver is ready (probed) before supporting * calibration. */ rc = qpnp_iadc_is_ready(); if (rc < 0) goto fail; rc = qpnp_iadc_configure(GAIN_CALIBRATION_17P857MV, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } iadc->adc->calib.gain_raw = raw_data; if (iadc->external_rsense) { /* external offset calculation */ rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP_CSN, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } else { /* internal offset calculation */ rc = qpnp_iadc_configure(OFFSET_CALIBRATION_CSP2_CSN2, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } iadc->adc->calib.offset_raw = raw_data; if (rc < 0) { pr_err("qpnp adc offset/gain calculation failed\n"); goto fail; } pr_debug("raw gain:0x%x, raw offset:0x%x\n", iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw); rc = qpnp_convert_raw_offset_voltage(); if (rc < 0) { pr_err("qpnp raw_voltage conversion failed\n"); goto fail; } rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >> QPNP_BIT_SHIFT_8; rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK; pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb); rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(QPNP_IADC_MSB_OFFSET, rslt_msb); if (rc < 0) { pr_err("qpnp iadc configure error for MSB write\n"); goto fail; } rc = qpnp_iadc_write_reg(QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(QPNP_IADC_LSB_OFFSET, rslt_lsb); if (rc < 0) { pr_err("qpnp iadc configure error for LSB write\n"); goto fail; } fail: if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
static int mxhci_hsic_suspend(struct mxhci_hsic_hcd *mxhci) { struct usb_hcd *hcd = hsic_to_hcd(mxhci); int ret; if (mxhci->in_lpm) { dev_dbg(mxhci->dev, "%s called in lpm\n", __func__); return 0; } disable_irq(hcd->irq); /* make sure we don't race against a remote wakeup */ if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) || (readl_relaxed(MSM_HSIC_PORTSC) & PORT_PLS_MASK) == XDEV_RESUME) { dev_dbg(mxhci->dev, "wakeup pending, aborting suspend\n"); enable_irq(hcd->irq); return -EBUSY; } /* make sure HSIC phy is in LPM */ ret = wait_for_completion_timeout( &mxhci->phy_in_lpm, msecs_to_jiffies(PHY_LPM_WAIT_TIMEOUT_MS)); if (!ret) { dev_err(mxhci->dev, "HSIC phy failed to enter lpm\n"); init_completion(&mxhci->phy_in_lpm); enable_irq(hcd->irq); return -EBUSY; } init_completion(&mxhci->phy_in_lpm); clk_disable_unprepare(mxhci->core_clk); clk_disable_unprepare(mxhci->utmi_clk); clk_disable_unprepare(mxhci->hsic_clk); clk_disable_unprepare(mxhci->cal_clk); clk_disable_unprepare(mxhci->system_clk); ret = regulator_set_voltage(mxhci->hsic_vddcx, mxhci->vdd_no_vol_level, mxhci->vdd_high_vol_level); if (ret < 0) dev_err(mxhci->dev, "unable to set vddcx voltage for VDD MIN\n"); if (mxhci->bus_perf_client) { mxhci->bus_vote = false; queue_work(mxhci->wq, &mxhci->bus_vote_w); } mxhci->in_lpm = 1; enable_irq(hcd->irq); if (mxhci->wakeup_irq) { mxhci->wakeup_irq_enabled = 1; enable_irq_wake(mxhci->wakeup_irq); enable_irq(mxhci->wakeup_irq); } /* disable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_NORETAIN_PERIPH); pm_relax(mxhci->dev); dev_dbg(mxhci->dev, "HSIC-USB in low power mode\n"); xhci_dbg_log_event(&dbg_hsic, NULL, "Controller suspended", 0); return 0; }
static int camera_v4l2_open(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); unsigned int opn_idx, idx; BUG_ON(!pvdev); rc = camera_v4l2_fh_open(filep); if (rc < 0) { pr_err("%s : camera_v4l2_fh_open failed Line %d rc %d\n", __func__, __LINE__, rc); goto fh_open_fail; } opn_idx = atomic_read(&pvdev->opened); idx = opn_idx; /* every stream has a vb2 queue */ rc = camera_v4l2_vb2_q_init(filep); if (rc < 0) { pr_err("%s : vb2 queue init fails Line %d rc %d\n", __func__, __LINE__, rc); goto vb2_q_fail; } if (!atomic_read(&pvdev->opened)) { pm_stay_awake(&pvdev->vdev->dev); /* Disable power collapse latency */ msm_pm_qos_update_request(CAMERA_DISABLE_PC_LATENCY); /* create a new session when first opened */ rc = msm_create_session(pvdev->vdev->num, pvdev->vdev); if (rc < 0) { pr_err("%s : session creation failed Line %d rc %d\n", __func__, __LINE__, rc); goto session_fail; } rc = msm_create_command_ack_q(pvdev->vdev->num, find_first_zero_bit((const unsigned long *)&opn_idx, MSM_CAMERA_STREAM_CNT_BITS)); if (rc < 0) { pr_err("%s : creation of command_ack queue failed\n", __func__); pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc); goto command_ack_q_fail; } camera_pack_event(filep, MSM_CAMERA_NEW_SESSION, 0, -1, &event); rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if (rc < 0) { pr_err("%s : posting of NEW_SESSION event failed\n", __func__); pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc); goto post_fail; } rc = camera_check_event_status(&event); if (rc < 0) { pr_err("%s : checking event status fails Line %d rc %d\n", __func__, __LINE__, rc); goto post_fail; } } else { rc = msm_create_command_ack_q(pvdev->vdev->num, find_first_zero_bit((const unsigned long *)&opn_idx, MSM_CAMERA_STREAM_CNT_BITS)); if (rc < 0) { pr_err("%s : creation of command_ack queue failed Line %d rc %d\n", __func__, __LINE__, rc); goto session_fail; } } idx |= (1 << find_first_zero_bit((const unsigned long *)&opn_idx, MSM_CAMERA_STREAM_CNT_BITS)); atomic_cmpxchg(&pvdev->opened, opn_idx, idx); return rc; post_fail: msm_delete_command_ack_q(pvdev->vdev->num, 0); command_ack_q_fail: msm_destroy_session(pvdev->vdev->num); session_fail: pm_relax(&pvdev->vdev->dev); camera_v4l2_vb2_q_release(filep); vb2_q_fail: camera_v4l2_fh_release(filep); fh_open_fail: return rc; }
static void usb_tx_work(struct work_struct *work) { int ret = 0; struct link_device *ld = container_of(work, struct link_device, tx_delayed_work.work); struct usb_link_device *usb_ld = to_usb_link_device(ld); struct sk_buff *skb; struct link_pm_data *pm_data = usb_ld->link_pm_data; if (!usb_ld->usbdev) { mif_info("usbdev is invalid\n"); return; } pm_data->tx_cnt++; while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) { /* request and check usb runtime pm first */ ret = link_pm_runtime_get_active(pm_data); if (ret < 0) { if (ret == -ENODEV) { mif_err("link not avail, retry reconnect.\n"); goto exit; } goto retry_tx_work; } /* If AP try to tx when interface disconnect->reconnect probe, * usbdev was created but one of interface channel device are * probing, _usb_tx_work return to -ENOENT then runtime usage * count allways positive and never enter to L2 */ if (!usb_ld->if_usb_connected) { mif_info("link is available, but if was not readey\n"); goto retry_tx_work; } pm_runtime_get_sync(&usb_ld->usbdev->dev); ret = 0; /* send skb from fmt_txq and raw_txq,*/ /* one by one for fair flow control */ skb = skb_dequeue(&ld->sk_fmt_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { mif_err("usb_tx_urb_with_skb for fmt_q %d\n", ret); skb_queue_head(&ld->sk_fmt_tx_q, skb); if (ret == -ENODEV || ret == -ENOENT) goto exit; /* tx fail and usbdev alived, retry tx work */ pm_runtime_put(&usb_ld->usbdev->dev); goto retry_tx_work; } skb = skb_dequeue(&ld->sk_raw_tx_q); if (skb) ret = _usb_tx_work(skb); if (ret) { mif_err("usb_tx_urb_with_skb for raw_q %d\n", ret); skb_queue_head(&ld->sk_raw_tx_q, skb); if (ret == -ENODEV || ret == -ENOENT) goto exit; pm_runtime_put(&usb_ld->usbdev->dev); goto retry_tx_work; } pm_runtime_put(&usb_ld->usbdev->dev); } #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&pm_data->tx_async_wake); #else pm_relax(pm_data->miscdev.this_device); #endif exit: return; retry_tx_work: queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, msecs_to_jiffies(20)); return; }
static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; struct sched_param param = { .sched_priority = 1 }; unsigned long period, idle_period; int ret; bool ws; sched_setscheduler(current, SCHED_FIFO, ¶m); /* * We want to allow for SDIO cards to work even on non SDIO * aware hosts. One thing that non SDIO host cannot do is * asynchronous notification of pending SDIO card interrupts * hence we poll for them in that case. */ idle_period = msecs_to_jiffies(10); period = (host->caps & MMC_CAP_SDIO_IRQ) ? MAX_SCHEDULE_TIMEOUT : idle_period; pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", mmc_hostname(host), period); do { /* * We claim the host here on drivers behalf for a couple * reasons: * * 1) it is already needed to retrieve the CCCR_INTx; * 2) we want the driver(s) to clear the IRQ condition ASAP; * 3) we need to control the abort condition locally. * * Just like traditional hard IRQ handlers, we expect SDIO * IRQ handlers to be quick and to the point, so that the * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; ws = false; /* * prevent suspend if it has started when scheduled; * 100 msec (approx. value) should be enough for the system to * resume and attend to the card's request */ if ((host->dev_status == DEV_SUSPENDING) || (host->dev_status == DEV_SUSPENDED)) { pm_wakeup_event(&host->card->dev, 100); ws = true; } ret = process_sdio_pending_irqs(host); host->sdio_irq_pending = false; mmc_release_host(host); /* * Give other threads a chance to run in the presence of * errors. */ if (ret < 0) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule_timeout(HZ); set_current_state(TASK_RUNNING); } /* * Adaptive polling frequency based on the assumption * that an interrupt will be closely followed by more. * This has a substantial benefit for network devices. */ if (!(host->caps & MMC_CAP_SDIO_IRQ)) { if (ret > 0) period /= 2; else { period++; if (period > idle_period) period = idle_period; } } set_current_state(TASK_INTERRUPTIBLE); if (host->caps & MMC_CAP_SDIO_IRQ) { mmc_host_clk_hold(host); host->ops->enable_sdio_irq(host, 1); mmc_host_clk_release(host); } /* * function drivers would have processed the event from card * unless suspended, hence release wake source */ if (ws && (host->dev_status == DEV_RESUMED)) pm_relax(&host->card->dev); if (!kthread_should_stop()) schedule_timeout(period); set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); if (host->caps & MMC_CAP_SDIO_IRQ) { mmc_host_clk_hold(host); host->ops->enable_sdio_irq(host, 0); mmc_host_clk_release(host); } pr_debug("%s: IRQ thread exiting with code %d\n", mmc_hostname(host), ret); return ret; } static int sdio_card_irq_get(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); if (!host->sdio_irqs++) { atomic_set(&host->sdio_irq_thread_abort, 0); host->sdio_irq_thread = kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", mmc_hostname(host)); if (IS_ERR(host->sdio_irq_thread)) { int err = PTR_ERR(host->sdio_irq_thread); host->sdio_irqs--; return err; } } return 0; } static int sdio_card_irq_put(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { atomic_set(&host->sdio_irq_thread_abort, 1); kthread_stop(host->sdio_irq_thread); } return 0; } /* If there is only 1 function registered set sdio_single_irq */ static void sdio_single_irq_set(struct mmc_card *card) { struct sdio_func *func; int i; card->sdio_single_irq = NULL; if ((card->host->caps & MMC_CAP_SDIO_IRQ) && card->host->sdio_irqs == 1) for (i = 0; i < card->sdio_funcs; i++) { func = card->sdio_func[i]; if (func && func->irq_handler) { card->sdio_single_irq = func; break; } } } /** * sdio_claim_irq - claim the IRQ for a SDIO function * @func: SDIO function * @handler: IRQ handler callback * * Claim and activate the IRQ for the given SDIO function. The provided * handler will be called when that IRQ is asserted. The host is always * claimed already when the handler is called so the handler must not * call sdio_claim_host() nor sdio_release_host(). */ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler) { int ret; unsigned char reg; BUG_ON(!func); BUG_ON(!func->card); pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func)); if (func->irq_handler) { pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func)); return -EBUSY; } ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); if (ret) return ret; reg |= 1 << func->num; reg |= 1; /* Master interrupt enable */ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL); if (ret) return ret; func->irq_handler = handler; ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; sdio_single_irq_set(func->card); return ret; }
static void link_pm_runtime_work(struct work_struct *work) { int ret; struct link_pm_data *pm_data = container_of(work, struct link_pm_data, link_pm_work.work); struct device *dev = &pm_data->usb_ld->usbdev->dev; if (!pm_data->usb_ld->if_usb_connected || pm_data->dpm_suspending) return; if (pm_data->usb_ld->ld.com_state == COM_NONE) return; mif_debug("for dev 0x%p : current %d\n", dev, dev->power.runtime_status); switch (dev->power.runtime_status) { case RPM_ACTIVE: pm_data->resume_retry_cnt = 0; pm_data->resume_requested = false; complete(&pm_data->active_done); return; case RPM_SUSPENDED: if (pm_data->resume_requested) break; pm_data->resume_requested = true; #ifdef CONFIG_HAS_WAKELOCK wake_lock(&pm_data->rpm_wake); #else pm_stay_awake(pm_data->miscdev.this_device); #endif ret = link_pm_slave_wake(pm_data); if (ret < 0) { mif_err("slave wake fail\n"); #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&pm_data->rpm_wake); #else pm_relax(pm_data->miscdev.this_device); #endif break; } if (!pm_data->usb_ld->if_usb_connected) { #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&pm_data->rpm_wake); #else pm_relax(pm_data->miscdev.this_device); #endif return; } ret = pm_runtime_resume(dev); if (ret < 0) { mif_err("resume error(%d)\n", ret); if (!pm_data->usb_ld->if_usb_connected) { #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&pm_data->rpm_wake); #else pm_relax(pm_data->miscdev.this_device); #endif return; } /* force to go runtime idle before retry resume */ if (dev->power.timer_expires == 0 && !dev->power.request_pending) { mif_debug("run time idle\n"); pm_runtime_idle(dev); } } #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&pm_data->rpm_wake); #else pm_relax(pm_data->miscdev.this_device); #endif break; case RPM_SUSPENDING: /* Checking the usb_runtime_suspend running time.*/ mif_info("rpm_states=%d", dev->power.runtime_status); msleep(20); break; default: break; } pm_data->resume_requested = false; /* check until runtime_status goes to active */ /* attemp 10 times, or re-establish modem-link */ /* if pm_runtime_resume run properly, rpm status must be in ACTIVE */ if (dev->power.runtime_status == RPM_ACTIVE) { pm_data->resume_retry_cnt = 0; complete(&pm_data->active_done); } else if (pm_data->resume_retry_cnt++ > 10) { mif_err("runtime_status(%d), retry_cnt(%d)\n", dev->power.runtime_status, pm_data->resume_retry_cnt); link_pm_change_modem_state(pm_data, STATE_CRASH_RESET); } else queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, msecs_to_jiffies(20)); }
static int msm_ehci_suspend(struct msm_hcd *mhcd) { struct usb_hcd *hcd = mhcd_to_hcd(mhcd); unsigned long timeout; u32 portsc; const struct msm_usb_host_platform_data *pdata; u32 func_ctrl; if (atomic_read(&mhcd->in_lpm)) { dev_dbg(mhcd->dev, "%s called in lpm\n", __func__); return 0; } disable_irq(hcd->irq); /* make sure we don't race against a remote wakeup */ if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) || readl_relaxed(USB_PORTSC) & PORT_RESUME) { dev_dbg(mhcd->dev, "wakeup pending, aborting suspend\n"); enable_irq(hcd->irq); return -EBUSY; } pdata = mhcd->dev->platform_data; if (pdata && pdata->is_uicc) { /* put the controller in non-driving mode */ func_ctrl = msm_ulpi_read(mhcd, ULPI_FUNC_CTRL); func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; msm_ulpi_write(mhcd, func_ctrl, ULPI_FUNC_CTRL); } /* If port is enabled wait 5ms for PHCD to come up. Reset PHY * and link if it fails to do so. * If port is not enabled set the PHCD bit and poll for it to * come up with in 500ms. Reset phy and link if it fails to do so. */ portsc = readl_relaxed(USB_PORTSC); if (portsc & PORT_PE) { usleep_range(5000, 5000); if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) { dev_err(mhcd->dev, "Unable to suspend PHY. portsc: %8x\n", readl_relaxed(USB_PORTSC)); goto reset_phy_and_link; } } else { writel_relaxed(portsc | PORTSC_PHCD, USB_PORTSC); timeout = jiffies + msecs_to_jiffies(PHY_SUSP_TIMEOUT_MSEC); while (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) { if (time_after(jiffies, timeout)) { dev_err(mhcd->dev, "Unable to suspend PHY. portsc: %8x\n", readl_relaxed(USB_PORTSC)); goto reset_phy_and_link; } usleep_range(10000, 10000); } } /* * PHY has capability to generate interrupt asynchronously in low * power mode (LPM). This interrupt is level triggered. So USB IRQ * line must be disabled till async interrupt enable bit is cleared * in USBCMD register. Assert STP (ULPI interface STOP signal) to * block data communication from PHY. Enable asynchronous interrupt * only when wakeup gpio IRQ is not present. */ if (mhcd->wakeup_irq) writel_relaxed(readl_relaxed(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD); else writel_relaxed(readl_relaxed(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD); /* * Ensure that hardware is put in low power mode before * clocks are turned OFF and VDD is allowed to minimize. */ mb(); clk_disable_unprepare(mhcd->iface_clk); clk_disable_unprepare(mhcd->core_clk); /* usb phy does not require TCXO clock, hence vote for TCXO disable */ if (mhcd->xo_clk) clk_disable_unprepare(mhcd->xo_clk); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); msm_ehci_config_vddcx(mhcd, 0); atomic_set(&mhcd->in_lpm, 1); enable_irq(hcd->irq); if (mhcd->wakeup_irq) { mhcd->wakeup_irq_enabled = 1; enable_irq_wake(mhcd->wakeup_irq); enable_irq(mhcd->wakeup_irq); } if (mhcd->pmic_gpio_dp_irq) { mhcd->pmic_gpio_dp_irq_enabled = 1; enable_irq_wake(mhcd->pmic_gpio_dp_irq); enable_irq(mhcd->pmic_gpio_dp_irq); } if (mhcd->async_irq) { mhcd->async_irq_enabled = 1; enable_irq_wake(mhcd->async_irq); enable_irq(mhcd->async_irq); } pm_relax(mhcd->dev); dev_info(mhcd->dev, "EHCI USB in low power mode\n"); return 0; reset_phy_and_link: schedule_work(&mhcd->phy_susp_fail_work); return -ETIMEDOUT; }
int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc, enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result, enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result) { int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0; int dt_index = 0; uint16_t raw_data; int32_t rsense_u_ohms = 0; int64_t result_current; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) { pr_err("raw offset errors! run iadc calibration again\n"); return -EINVAL; } mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->iadc_mode_sel = true; rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel); if (rc) { pr_err("Configuring VADC failed\n"); goto fail; } while (((enum qpnp_iadc_channels) iadc->adc->adc_channels[dt_index].channel_num != i_channel) && (dt_index < iadc->max_channels_available)) dt_index++; if (dt_index >= iadc->max_channels_available) { pr_err("not a valid IADC channel\n"); rc = -EINVAL; goto fail; } iadc->adc->amux_prop->decimation = iadc->adc->adc_channels[dt_index].adc_decimation; iadc->adc->amux_prop->fast_avg_setup = iadc->adc->adc_channels[dt_index].fast_avg_setup; rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail_release_vadc; } rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms); pr_debug("current raw:0%x and rsense:%d\n", raw_data, rsense_n_ohms); rsense_u_ohms = rsense_n_ohms/1000; num = raw_data - iadc->adc->calib.offset_raw; if (num < 0) { sign = 1; num = -num; } i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw); result_current = i_result->result_uv; result_current *= QPNP_IADC_NANO_VOLTS_FACTOR; /* Intentional fall through. Process the result w/o comp */ if (!rsense_u_ohms) { pr_err("rsense error=%d\n", rsense_u_ohms); goto fail_release_vadc; } do_div(result_current, rsense_u_ohms); if (sign) { i_result->result_uv = -i_result->result_uv; result_current = -result_current; } result_current *= -1; rc = qpnp_iadc_comp_result(iadc, &result_current); if (rc < 0) pr_err("Error during compensating the IADC\n"); rc = 0; result_current *= -1; i_result->result_ua = (int32_t) result_current; fail_release_vadc: rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel, v_result); if (rc) pr_err("Releasing VADC failed\n"); fail: iadc->iadc_mode_sel = false; if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
int fimc_is_resource_put(struct fimc_is_resourcemgr *resourcemgr, u32 rsc_type) { int ret = 0; u32 rsccount; struct fimc_is_resource *resource; struct fimc_is_core *core; BUG_ON(!resourcemgr); BUG_ON(!resourcemgr->private_data); BUG_ON(rsc_type >= RESOURCE_TYPE_MAX); resource = GET_RESOURCE(resourcemgr, rsc_type); core = (struct fimc_is_core *)resourcemgr->private_data; rsccount = atomic_read(&core->rsccount); if (!core->pdev) { err("[RSC] pdev is NULL"); ret = -EMFILE; goto p_err; } if (rsccount == 0) { err("[RSC] Invalid rsccount(%d)\n", rsccount); ret = -EMFILE; goto p_err; } /* local update */ if (atomic_read(&resource->rsccount) == 1) { /* clear hal version, default 1.0 */ resourcemgr->hal_version = IS_HAL_VER_1_0; switch (rsc_type) { case RESOURCE_TYPE_COMPANION: #if defined(CONFIG_PM_RUNTIME) pm_runtime_put_sync(&resource->pdev->dev); #else fimc_is_companion_runtime_suspend(&resource->pdev->dev); #endif clear_bit(FIMC_IS_RM_COM_POWER_ON, &resourcemgr->state); break; case RESOURCE_TYPE_SENSOR0: #if defined(CONFIG_PM_RUNTIME) pm_runtime_put_sync(&resource->pdev->dev); #else fimc_is_sensor_runtime_suspend(&resource->pdev->dev); #endif clear_bit(FIMC_IS_RM_SS0_POWER_ON, &resourcemgr->state); break; case RESOURCE_TYPE_SENSOR1: #if defined(CONFIG_PM_RUNTIME) pm_runtime_put_sync(&resource->pdev->dev); #else fimc_is_sensor_runtime_suspend(&resource->pdev->dev); #endif clear_bit(FIMC_IS_RM_SS1_POWER_ON, &resourcemgr->state); break; case RESOURCE_TYPE_ISCHAIN: ret = fimc_is_itf_power_down(&core->interface); if (ret) err("power down cmd is fail(%d)", ret); ret = fimc_is_ischain_power(&core->ischain[0], 0); if (ret) err("fimc_is_ischain_power is fail(%d)", ret); ret = fimc_is_interface_close(&core->interface); if (ret) err("fimc_is_interface_close is fail(%d)", ret); ret = fimc_is_debug_close(); if (ret) err("fimc_is_debug_close is fail(%d)", ret); #ifndef ENABLE_RESERVED_MEM ret = fimc_is_resourcemgr_deinitmem(resourcemgr); if (ret) err("fimc_is_resourcemgr_deinitmem is fail(%d)", ret); #endif clear_bit(FIMC_IS_RM_ISC_POWER_ON, &resourcemgr->state); break; default: err("[RSC] resource type(%d) is invalid", rsc_type); BUG(); break; } } /* global update */ if (atomic_read(&core->rsccount) == 1) { u32 current_min, current_max; current_min = (resourcemgr->cluster0 & CLUSTER_MIN_MASK) >> CLUSTER_MIN_SHIFT; current_max = (resourcemgr->cluster0 & CLUSTER_MAX_MASK) >> CLUSTER_MAX_SHIFT; if (current_min) { C0MIN_QOS_DEL(); warn("[RSC] cluster0 minfreq is not removed(%dMhz)\n", current_min); } if (current_max) { C0MAX_QOS_DEL(); warn("[RSC] cluster0 maxfreq is not removed(%dMhz)\n", current_max); } current_min = (resourcemgr->cluster1 & CLUSTER_MIN_MASK) >> CLUSTER_MIN_SHIFT; current_max = (resourcemgr->cluster1 & CLUSTER_MAX_MASK) >> CLUSTER_MAX_SHIFT; if (current_min) { C1MIN_QOS_DEL(); warn("[RSC] cluster1 minfreq is not removed(%dMhz)\n", current_min); } if (current_max) { C1MAX_QOS_DEL(); warn("[RSC] cluster1 maxfreq is not removed(%dMhz)\n", current_max); } resourcemgr->cluster0 = 0; resourcemgr->cluster1 = 0; ret = fimc_is_runtime_suspend_post(NULL); if (ret) err("fimc_is_runtime_suspend_post is fail(%d)", ret); pm_relax(&core->pdev->dev); clear_bit(FIMC_IS_RM_POWER_ON, &resourcemgr->state); }
int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc, bool batfet_closed) { uint8_t rslt_lsb, rslt_msb; int32_t rc = 0, version = 0; uint16_t raw_data; uint32_t mode_sel = 0; bool iadc_offset_ch_batfet_check; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } iadc->adc->amux_prop->decimation = DECIMATION_TYPE1; iadc->adc->amux_prop->fast_avg_setup = ADC_FAST_AVG_SAMPLE_1; rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } iadc->adc->calib.gain_raw = raw_data; /* * there is a features on PM8941 in the BMS where if the batfet is * opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for * internal rsense. */ version = qpnp_adc_get_revid_version(iadc->dev); if ((version == QPNP_REV_ID_8941_3_1) || (version == QPNP_REV_ID_8941_3_0) || (version == QPNP_REV_ID_8941_2_0)) iadc_offset_ch_batfet_check = true; else iadc_offset_ch_batfet_check = false; if ((iadc_offset_ch_batfet_check && !batfet_closed) || (iadc->external_rsense)) { /* external offset calculation */ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } else { /* internal offset calculation */ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } iadc->adc->calib.offset_raw = raw_data; if (rc < 0) { pr_err("qpnp adc offset/gain calculation failed\n"); goto fail; } if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2 && iadc->iadc_comp.revision_ana_minor == QPNP_IADC_PM8026_2_REV3) iadc->adc->calib.gain_raw = iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN; pr_debug("raw gain:0x%x, raw offset:0x%x\n", iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw); rc = qpnp_convert_raw_offset_voltage(iadc); if (rc < 0) { pr_err("qpnp raw_voltage conversion failed\n"); goto fail; } rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >> QPNP_BIT_SHIFT_8; rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK; pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb); rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET, rslt_msb); if (rc < 0) { pr_err("qpnp iadc configure error for MSB write\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET, rslt_lsb); if (rc < 0) { pr_err("qpnp iadc configure error for LSB write\n"); goto fail; } fail: if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }
int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc, bool batfet_closed) { uint8_t rslt_lsb, rslt_msb; int32_t rc = 0, version = 0; uint16_t raw_data; uint32_t mode_sel = 0; bool iadc_offset_ch_batfet_check; if (qpnp_iadc_is_valid(iadc) < 0) return -EPROBE_DEFER; mutex_lock(&iadc->adc->adc_lock); if (iadc->iadc_poll_eoc) { pr_debug("acquiring iadc eoc wakelock\n"); pm_stay_awake(iadc->dev); } rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } iadc->adc->calib.gain_raw = raw_data; version = qpnp_adc_get_revid_version(iadc->dev); if ((version == QPNP_REV_ID_8941_3_1) || (version == QPNP_REV_ID_8941_3_0) || (version == QPNP_REV_ID_8941_2_0)) iadc_offset_ch_batfet_check = true; else iadc_offset_ch_batfet_check = false; if ((iadc_offset_ch_batfet_check && !batfet_closed) || (iadc->external_rsense)) { rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } else { rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2, &raw_data, mode_sel); if (rc < 0) { pr_err("qpnp adc result read failed with %d\n", rc); goto fail; } } iadc->adc->calib.offset_raw = raw_data; if (rc < 0) { pr_err("qpnp adc offset/gain calculation failed\n"); goto fail; } if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2 && iadc->iadc_comp.revision_ana_minor == QPNP_IADC_PM8026_2_REV3) iadc->adc->calib.gain_raw = iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN; pr_debug("raw gain:0x%x, raw offset:0x%x\n", iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw); rc = qpnp_convert_raw_offset_voltage(iadc); if (rc < 0) { pr_err("qpnp raw_voltage conversion failed\n"); goto fail; } rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >> QPNP_BIT_SHIFT_8; rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK; pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb); rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET, rslt_msb); if (rc < 0) { pr_err("qpnp iadc configure error for MSB write\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS, QPNP_IADC_SEC_ACCESS_DATA); if (rc < 0) { pr_err("qpnp iadc configure error for sec access\n"); goto fail; } rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET, rslt_lsb); if (rc < 0) { pr_err("qpnp iadc configure error for LSB write\n"); goto fail; } fail: if (iadc->iadc_poll_eoc) { pr_debug("releasing iadc eoc wakelock\n"); pm_relax(iadc->dev); } mutex_unlock(&iadc->adc->adc_lock); return rc; }