static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
{
    s64 numerator;

    if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
        pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
               iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
        return -EINVAL;
    }

    numerator = iadc->adc->calib.offset_raw - IADC_CENTER;
    numerator *= IADC_READING_RESOLUTION_N;
    iadc->adc->calib.offset_uv = div_s64(numerator,
                                         IADC_READING_RESOLUTION_D);

    numerator = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
    numerator *= IADC_READING_RESOLUTION_N;

    iadc->adc->calib.gain_uv = div_s64(numerator,
                                       IADC_READING_RESOLUTION_D);

    pr_debug("gain_uv:%d offset_uv:%d\n",
             iadc->adc->calib.gain_uv, iadc->adc->calib.offset_uv);
    return 0;
}
Пример #2
0
static void __update_writeback_rate(struct cached_dev *dc)
{
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
	int64_t target = __calc_target_rate(dc);
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
	int64_t integral_scaled;
	uint32_t new_rate;

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}

	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);

	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);

	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate -
			atomic_long_read(&dc->writeback_rate.rate);
	atomic_long_set(&dc->writeback_rate.rate, new_rate);
	dc->writeback_rate_target = target;
}
Пример #3
0
bool tegra_dc_does_vsync_separate(struct tegra_dc *dc, s64 new_ts, s64 old_ts)
{
	BUG_ON(!dc->frametime_ns);
	return (((new_ts - old_ts) > dc->frametime_ns)
		|| (div_s64((new_ts - dc->frame_end_timestamp), dc->frametime_ns)
			!= div_s64((old_ts - dc->frame_end_timestamp),
				dc->frametime_ns)));
}
Пример #4
0
void __timecompare_update(struct timecompare *sync,
			  u64 source_tstamp)
{
	s64 offset;
	u64 average_time;

	if (!timecompare_offset(sync, &offset, &average_time))
		return;

	if (!sync->last_update) {
		sync->last_update = average_time;
		sync->offset = offset;
		sync->skew = 0;
	} else {
		s64 delta_nsec = average_time - sync->last_update;

		/* avoid division by negative or small deltas */
		if (delta_nsec >= 10000) {
			s64 delta_offset_nsec = offset - sync->offset;
			s64 skew; /* delta_offset_nsec *
				     TIMECOMPARE_SKEW_RESOLUTION /
				     delta_nsec */
			u64 divisor;

			/* div_s64() is limited to 32 bit divisor */
			skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
			divisor = delta_nsec;
			while (unlikely(divisor >= ((s64)1) << 32)) {
				/* divide both by 2; beware, right shift
				   of negative value has undefined
				   behavior and can only be used for
				   the positive divisor */
				skew = div_s64(skew, 2);
				divisor >>= 1;
			}
			skew = div_s64(skew, divisor);

			/*
			 * Calculate new overall skew as 4/16 the
			 * old value and 12/16 the new one. This is
			 * a rather arbitrary tradeoff between
			 * only using the latest measurement (0/16 and
			 * 16/16) and even more weight on past measurements.
			 */
#define TIMECOMPARE_NEW_SKEW_PER_16 12
			sync->skew =
				div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
					sync->skew +
					TIMECOMPARE_NEW_SKEW_PER_16 * skew,
					16);
			sync->last_update = average_time;
			sync->offset = offset;
		}
	}
Пример #5
0
static void __update_writeback_rate(struct cached_dev *dc)
{
	struct cache_set *c = dc->disk.c;
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);

	int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
				   c->cached_dev_sectors);

	/* PD controller */

	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
	int64_t derivative = dirty - dc->disk.sectors_dirty_last;
	int64_t proportional = dirty - target;
	int64_t change;

	dc->disk.sectors_dirty_last = dirty;

	/* Scale to sectors per second */

	proportional *= dc->writeback_rate_update_seconds;
	proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);

	derivative = div_s64(derivative, dc->writeback_rate_update_seconds);

	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
			      (dc->writeback_rate_d_term /
			       dc->writeback_rate_update_seconds) ?: 1, 0);

	derivative *= dc->writeback_rate_d_term;
	derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);

	change = proportional + derivative;

	/* Don't increase writeback rate if the device isn't keeping up */
	if (change > 0 &&
	    time_after64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))
		change = 0;

	dc->writeback_rate.rate =
		clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
			1, NSEC_PER_MSEC);

	dc->writeback_rate_proportional = proportional;
	dc->writeback_rate_derivative = derivative;
	dc->writeback_rate_change = change;
	dc->writeback_rate_target = target;
}
static void mdss_dsi_ssc_param_calc(struct mdss_dsi_pll_config *pd,
	struct mdss_dsi_vco_calc *vco_calc)
{
	uint64_t ppm_freq, incr, spread_freq, div_rf, frac_n_value;
	uint32_t rem;

	vco_calc->ssc.kdiv = div_round_closest_unsigned(VCO_REF_CLOCK_RATE,
		1000000) - 1;
	vco_calc->ssc.triang_steps = div_round_closest_unsigned(
		VCO_REF_CLOCK_RATE, pd->ssc_freq * (vco_calc->ssc.kdiv + 1));
	ppm_freq = (vco_calc->gen_vco_clk * pd->ssc_ppm) / 1000000;
	incr = (ppm_freq * 65536) / (VCO_REF_CLOCK_RATE * 2 *
		vco_calc->ssc.triang_steps);

	vco_calc->ssc.triang_inc_7_0 = incr & 0xff;
	vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3;

	if (!pd->is_center_spread)
		spread_freq = vco_calc->gen_vco_clk - ppm_freq;
	else
		spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2);

	div_rf = spread_freq / (2 * VCO_REF_CLOCK_RATE);
	vco_calc->ssc.dc_offset = (div_rf - 1);

	div_s64(spread_freq, 2 * VCO_REF_CLOCK_RATE, &rem);
	frac_n_value = ((uint64_t) rem * 65536) / (2 * VCO_REF_CLOCK_RATE);

	vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff;
	vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff;

}
Пример #7
0
static int adxl345_write_raw(struct iio_dev *indio_dev,
			     struct iio_chan_spec const *chan,
			     int val, int val2, long mask)
{
	struct adxl345_data *data = iio_priv(indio_dev);
	s64 n;

	switch (mask) {
	case IIO_CHAN_INFO_CALIBBIAS:
		/*
		 * 8-bit resolution at +/- 2g, that is 4x accel data scale
		 * factor
		 */
		return regmap_write(data->regmap,
				    ADXL345_REG_OFS_AXIS(chan->address),
				    val / 4);
	case IIO_CHAN_INFO_SAMP_FREQ:
		n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ);

		return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
					  ADXL345_BW_RATE,
					  clamp_val(ilog2(n), 0,
						    ADXL345_BW_RATE));
	}

	return -EINVAL;
}
Пример #8
0
/**
 * cmm_get_mpp - Read memory performance parameters
 *
 * Makes hcall to query the current page loan request from the hypervisor.
 *
 * Return value:
 * 	nothing
 **/
static void cmm_get_mpp(void)
{
	int rc;
	struct hvcall_mpp_data mpp_data;
	unsigned long active_pages_target;
	signed long page_loan_request;

	rc = h_get_mpp(&mpp_data);

	if (rc != H_SUCCESS)
		return;

	page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
	loaned_pages_target = page_loan_request + loaned_pages;
	if (loaned_pages_target > oom_freed_pages)
		loaned_pages_target -= oom_freed_pages;
	else
		loaned_pages_target = 0;

	active_pages_target = totalram_pages + loaned_pages - loaned_pages_target;

	if ((min_mem_mb * 1024 * 1024) > (active_pages_target * PAGE_SIZE))
		loaned_pages_target = totalram_pages + loaned_pages -
			((min_mem_mb * 1024 * 1024) / PAGE_SIZE);

	cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
		page_loan_request, loaned_pages, loaned_pages_target,
		oom_freed_pages, totalram_pages);
}
Пример #9
0
static int mtk_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
	time64_t time;
	struct mt6397_rtc *rtc = dev_get_drvdata(dev);
	int days, sec, ret;

	do {
		ret = __mtk_rtc_read_time(rtc, tm, &sec);
		if (ret < 0)
			goto exit;
	} while (sec < tm->tm_sec);

	/* HW register use 7 bits to store year data, minus
	 * RTC_MIN_YEAR_OFFSET before write year data to register, and plus
	 * RTC_MIN_YEAR_OFFSET back after read year from register
	 */
	tm->tm_year += RTC_MIN_YEAR_OFFSET;

	/* HW register start mon from one, but tm_mon start from zero. */
	tm->tm_mon--;
	time = rtc_tm_to_time64(tm);

	/* rtc_tm_to_time64 covert Gregorian date to seconds since
	 * 01-01-1970 00:00:00, and this date is Thursday.
	 */
	days = div_s64(time, 86400);
	tm->tm_wday = (days + 4) % 7;

exit:
	return ret;
}
Пример #10
0
static unsigned long __init boottime_get_time(void)
{
	return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read(
						  &clocksource_dbx500_prcmu),
					  clocksource_dbx500_prcmu.mult,
					  clocksource_dbx500_prcmu.shift),
		       1000);
}
Пример #11
0
static s64 cc_adjust_for_gain(s64 uv, uint16_t gain)
{
	s64 result_uv;

	pr_debug("adjusting_uv = %lld\n", uv);
	if (gain == 0) {
		pr_debug("gain is %d, not adjusting\n", gain);
		return uv;
	}
	pr_debug("adjusting by factor: %lld/%hu = %lld%%\n",
			QPNP_ADC_GAIN_IDEAL, gain,
			div_s64(QPNP_ADC_GAIN_IDEAL * 100LL, (s64)gain));

	result_uv = div_s64(uv * QPNP_ADC_GAIN_IDEAL, (s64)gain);
	pr_debug("result_uv = %lld\n", result_uv);
	return result_uv;
}
static int32_t mdss_dsi_pll_vco_rate_calc(struct mdss_dsi_pll_config *pd,
	struct mdss_dsi_vco_calc *vco_calc)
{
	uint8_t i;
	int8_t rc = NO_ERROR;
	uint32_t rem;
	uint64_t frac_n_mode = 0, ref_doubler_en_b = 0;
	uint64_t div_fb = 0, frac_n_value = 0, ref_clk_to_pll = 0;

	/* Configure the Loop filter resistance */
	for (i = 0; i < LPFR_LUT_SIZE; i++)
		if (pd->vco_clock <= lpfr_lut[i].vco_rate)
			break;
	if (i == LPFR_LUT_SIZE) {
		dprintf(INFO, "unable to get loop filter resistance. vco=%d\n"
						, lpfr_lut[i-1].vco_rate);
		rc = ERROR;
		return rc;
	}
	vco_calc->lpfr_lut_res = lpfr_lut[i].resistance;

	rem = pd->vco_clock % VCO_REF_CLOCK_RATE;
	if (rem) {
		vco_calc->refclk_cfg = 0x1;
		frac_n_mode = 1;
		ref_doubler_en_b = 0;
	} else {
		vco_calc->refclk_cfg = 0x0;
		frac_n_mode = 0;
		ref_doubler_en_b = 1;
	}

	ref_clk_to_pll = (VCO_REF_CLOCK_RATE * 2 * vco_calc->refclk_cfg)
			  + (ref_doubler_en_b * VCO_REF_CLOCK_RATE);
	div_fb = div_s64(pd->vco_clock, ref_clk_to_pll, &rem);
	frac_n_value = ((uint64_t) rem * (1 << 16)) / ref_clk_to_pll;
	vco_calc->gen_vco_clk = pd->vco_clock;

	if (frac_n_mode) {
		vco_calc->sdm_cfg0 = 0x0;
		vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1;
		vco_calc->sdm_cfg3 = frac_n_value / 256;
		vco_calc->sdm_cfg2 = frac_n_value % 256;
	} else {
		vco_calc->sdm_cfg0 = (0x1 << 5);
		vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1;
		vco_calc->sdm_cfg1 = 0x0;
		vco_calc->sdm_cfg2 = 0;
		vco_calc->sdm_cfg3 = 0;
	}

	vco_calc->cal_cfg11 = vco_calc->gen_vco_clk / 256000000;
	vco_calc->cal_cfg10 = (vco_calc->gen_vco_clk % 256000000) / 1000000;

	return NO_ERROR;

}
Пример #13
0
static void msm_otg_del_timer(struct msm_otg *dev)
{
	int bit = dev->active_tmout;

	pr_debug("deleting %s timer. remaining %lld msec \n", timer_string(bit),
			div_s64(ktime_to_us(hrtimer_get_remaining(&dev->timer)),
					1000));
	hrtimer_cancel(&dev->timer);
	clear_bit(bit, &dev->tmouts);
}
Пример #14
0
static s64 calc_frametime_ns(const struct tegra_dc_mode *m)
{
	long h_total, v_total;
	h_total = m->h_active + m->h_front_porch + m->h_back_porch +
		m->h_sync_width;
	v_total = m->v_active + m->v_front_porch + m->v_back_porch +
		m->v_sync_width;
	return (!m->pclk) ? 0 : (s64)(div_s64(((s64)h_total * v_total *
					1000000000ULL), m->pclk));
}
Пример #15
0
static int rcu_debugfs_show(struct seq_file *m, void *unused)
{
	int cpu, q, s[2], msecs;

	raw_local_irq_disable();
	msecs = div_s64(sched_clock() - rcu_timestamp, NSEC_PER_MSEC);
	raw_local_irq_enable();

	seq_printf(m, "%14u: #batches seen\n",
		rcu_stats.nbatches);
	seq_printf(m, "%14u: #barriers seen\n",
		atomic_read(&rcu_stats.nbarriers));
	seq_printf(m, "%14llu: #callbacks invoked\n",
		rcu_stats.ninvoked);
	seq_printf(m, "%14u: #callbacks left to invoke\n",
		atomic_read(&rcu_stats.nleft));
	seq_printf(m, "%14u: #msecs since last end-of-batch\n",
		msecs);
	seq_printf(m, "%14u: #passes forced (0 is best)\n",
		rcu_stats.nforced);
	seq_printf(m, "\n");

	for_each_online_cpu(cpu)
		seq_printf(m, "%4d ", cpu);
	seq_printf(m, "  CPU\n");

	s[1] = s[0] = 0;
	for_each_online_cpu(cpu) {
		struct rcu_data *rd = &rcu_data[cpu];
		int w = ACCESS_ONCE(rd->which) & 1;
		seq_printf(m, "%c%c%c%d ",
			'-',
			idle_cpu(cpu) ? 'I' : '-',
			rd->wait ? 'W' : '-',
			w);
		s[w]++;
	}
	seq_printf(m, "  FLAGS\n");

	for (q = 0; q < 2; q++) {
		for_each_online_cpu(cpu) {
			struct rcu_data *rd = &rcu_data[cpu];
			struct rcu_list *l = &rd->cblist[q];
			seq_printf(m, "%4d ", l->count);
		}
		seq_printf(m, "  Q%d%c\n", q, " *"[s[q] > s[q^1]]);
	}
	seq_printf(m, "\nFLAGS:\n");
	seq_printf(m, "  I - cpu idle, 0|1 - Q0 or Q1 is current Q, other is previous Q,\n");
	seq_printf(m, "  W - cpu does not permit current batch to end (waiting),\n");
	seq_printf(m, "  * - marks the Q that is current for most CPUs.\n");

	return 0;
}
Пример #16
0
/**
 * convert_regval2uc
 * 1 bit = 2.345*110*10^(-6) C = 2.5795*10^(-5) C = 25795 / 1000 uC
 * convert regval to uv
 */
u64 coul_convert_regval2uc(unsigned short reg_val)
{
    int ret;
    s64 temp;

    ret = reg_val;
    temp = (s64)(ret) * (s64)(25795);
    temp = div_s64(temp, 1000);

    return temp;
}
Пример #17
0
/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
 * FW/HW accepts the adjustment value in terms of 3 parameters:
 *   Drift period - adjustment happens once in certain number of nano seconds.
 *   Drift value - time is adjusted by a certain value, for example by 5 ns.
 *   Drift direction - add or subtract the adjustment value.
 * The routine translates ppb into the adjustment triplet in an optimal manner.
 */
static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
{
	s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
	u32 drift_ctr_cfg = 0, drift_state;
	int drift_dir = 1;

	if (ppb < 0) {
		ppb = -ppb;
		drift_dir = 0;
	}

	if (ppb > 1) {
		s64 best_dif = ppb, best_approx_dev = 1;

		/* Adjustment value is up to +/-7ns, find an optimal value in
		 * this range.
		 */
		for (val = 7; val > 0; val--) {
			period = div_s64(val * 1000000000, ppb);
			period -= 8;
			period >>= 4;
			if (period < 1)
				period = 1;
			if (period > 0xFFFFFFE)
				period = 0xFFFFFFE;

			/* Check both rounding ends for approximate error */
			approx_dev = period * 16 + 8;
			dif = ppb * approx_dev - val * 1000000000;
			dif2 = dif + 16 * ppb;

			if (dif < 0)
				dif = -dif;
			if (dif2 < 0)
				dif2 = -dif2;

			/* Determine which end gives better approximation */
			if (dif * (approx_dev + 16) > dif2 * approx_dev) {
				period++;
				approx_dev += 16;
				dif = dif2;
			}

			/* Track best approximation found so far */
			if (best_dif * approx_dev > dif * best_approx_dev) {
				best_dif = dif;
				best_val = val;
				best_period = period;
				best_approx_dev = approx_dev;
			}
		}
	} else if (ppb == 1) {
Пример #18
0
/*
 * Assumes that IIO and hwmon operate in the same base units.
 * This is supposed to be true, but needs verification for
 * new channel types.
 */
static ssize_t iio_hwmon_read_val(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	long result;
	int val, ret, scaleint, scalepart;
	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
	struct iio_hwmon_state *state = dev_get_drvdata(dev);

	/*
	 * No locking between this pair, so theoretically possible
	 * the scale has changed.
	 */
	ret = iio_st_read_channel_raw(&state->channels[sattr->index],
				      &val);
	if (ret < 0)
		return ret;

	ret = iio_st_read_channel_scale(&state->channels[sattr->index],
					&scaleint, &scalepart);
	if (ret < 0)
		return ret;
	switch (ret) {
	case IIO_VAL_INT:
		result = val * scaleint;
		break;
	case IIO_VAL_INT_PLUS_MICRO:
		result = (s64)val * (s64)scaleint +
			div_s64((s64)val * (s64)scalepart, 1000000LL);
		break;
	case IIO_VAL_INT_PLUS_NANO:
		result = (s64)val * (s64)scaleint +
			div_s64((s64)val * (s64)scalepart, 1000000000LL);
		break;
	default:
		return -EINVAL;
	}
	return sprintf(buf, "%ld\n", result);
}
Пример #19
0
static int zopt2201_read_raw(struct iio_dev *indio_dev,
				struct iio_chan_spec const *chan,
				int *val, int *val2, long mask)
{
	struct zopt2201_data *data = iio_priv(indio_dev);
	u64 tmp;
	int ret;

	switch (mask) {
	case IIO_CHAN_INFO_RAW:
		ret = zopt2201_read(data, chan->address);
		if (ret < 0)
			return ret;
		*val = ret;
		return IIO_VAL_INT;
	case IIO_CHAN_INFO_PROCESSED:
		ret = zopt2201_read(data, ZOPT2201_UVB_DATA);
		if (ret < 0)
			return ret;
		*val = ret * 18 *
			(1 << (20 - zopt2201_resolution[data->res].bits)) /
			zopt2201_gain_uvb[data->gain].gain;
		return IIO_VAL_INT;
	case IIO_CHAN_INFO_SCALE:
		switch (chan->address) {
		case ZOPT2201_ALS_DATA:
			*val = zopt2201_gain_als[data->gain].scale;
			break;
		case ZOPT2201_UVB_DATA:
			*val = zopt2201_gain_uvb[data->gain].scale;
			break;
		default:
			return -EINVAL;
		}

		*val2 = 1000000;
		*val2 *= (1 << (zopt2201_resolution[data->res].bits - 13));
		tmp = div_s64(*val * 1000000ULL, *val2);
		*val = div_s64_rem(tmp, 1000000, val2);

		return IIO_VAL_INT_PLUS_MICRO;
	case IIO_CHAN_INFO_INT_TIME:
		*val = 0;
		*val2 = zopt2201_resolution[data->res].us;
		return IIO_VAL_INT_PLUS_MICRO;
	default:
		return -EINVAL;
	}
}
Пример #20
0
/*
 * The Mali DDK uses s64 types to contain software counter values, but gator
 * can only use a maximum of 32 bits. This function scales a software counter
 * to an appopriate range.
 */
static u32 scale_sw_counter_value(unsigned int event_id, signed long long value)
{
    u32 scaled_value;

    switch (event_id) {
        case COUNTER_GLES_UPLOAD_TEXTURE_TIME:
        case COUNTER_GLES_UPLOAD_VBO_TIME:
            scaled_value = (u32)div_s64(value, 1000000);
            break;
        default:
            scaled_value = (u32)value;
            break;
    }

    return scaled_value;
}
Пример #21
0
static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
						int adc_chan, int val)
{
	s64 code = val * PRECISION_MULTIPLIER;

	if ((code - adc->adc_info[adc_chan].offset) < 0) {
		dev_err(adc->dev, "No Input Connected\n");
		return 0;
	}

	if (!(adc->adc_info[adc_chan].is_correct_code)) {
		code -= adc->adc_info[adc_chan].offset;
		code = div_s64(code, adc->adc_info[adc_chan].gain);
		return code;
	}

	return val;
}
Пример #22
0
/**
 * convert_regval2ua
 * 10 mohm resistance: 1 bit = 5/10661 A = 5*1000*1000 / 10661 uA
 * 20 mohm resistance: 1 bit = 10 mohm / 2
 * 30 mohm resistance: 1 bit = 10 mohm / 3
 * ...
 * high bit = 0 is in, 1 is out
 * convert regval to ua
 */
s64 coul_convert_regval2ua(short reg_val)
{
    int ret;
    s64 temp;

    ret = reg_val;
    temp = (s64)(ret) * (s64)(1000 * 1000 * 5);
    temp = div_s64(temp, 10661);

    ret = temp / (R_COUL_MOHM/10);

    ret = -ret;/*何意?*/

#if 1 /* for debug */
    temp = (s64) coul_cali_config.c_offset_a *ret;
 /*   ret = div_s64(temp, 1000000);*/
    ret += coul_cali_config.c_offset_b;
#endif

    return ret;
}
Пример #23
0
/**
 * cmm_get_mpp - Read memory performance parameters
 *
 * Makes hcall to query the current page loan request from the hypervisor.
 *
 * Return value:
 * 	nothing
 **/
static void cmm_get_mpp(void)
{
	int rc;
	struct hvcall_mpp_data mpp_data;
	signed long active_pages_target, page_loan_request, target;
	signed long total_pages = totalram_pages + loaned_pages;
	signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;

	rc = h_get_mpp(&mpp_data);

	if (rc != H_SUCCESS)
		return;

	page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
	target = page_loan_request + (signed long)loaned_pages;

	if (target < 0 || total_pages < min_mem_pages)
		target = 0;

	if (target > oom_freed_pages)
		target -= oom_freed_pages;
	else
		target = 0;

	active_pages_target = total_pages - target;

	if (min_mem_pages > active_pages_target)
		target = total_pages - min_mem_pages;

	if (target < 0)
		target = 0;

	loaned_pages_target = target;

	cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
		page_loan_request, loaned_pages, loaned_pages_target,
		oom_freed_pages, totalram_pages);
}
Пример #24
0
static int cc_reading_to_uv(int16_t reading)
{
	return div_s64(reading * CC_READING_RESOLUTION_N,
					CC_READING_RESOLUTION_D);
}
Пример #25
0
int timecompare_offset(struct timecompare *sync,
		       s64 *offset,
		       u64 *source_tstamp)
{
	u64 start_source = 0, end_source = 0;
	struct {
		s64 offset;
		s64 duration_target;
	} buffer[10], sample, *samples;
	int counter = 0, i;
	int used;
	int index;
	int num_samples = sync->num_samples;

	if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
		samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
		if (!samples) {
			samples = buffer;
			num_samples = sizeof(buffer)/sizeof(buffer[0]);
		}
	} else {
		samples = buffer;
	}

	/* run until we have enough valid samples, but do not try forever */
	i = 0;
	counter = 0;
	while (1) {
		u64 ts;
		ktime_t start, end;

		start = sync->target();
		ts = timecounter_read(sync->source);
		end = sync->target();

		if (!i)
			start_source = ts;

		/* ignore negative durations */
		sample.duration_target = ktime_to_ns(ktime_sub(end, start));
		if (sample.duration_target >= 0) {
			/*
			 * assume symetric delay to and from source:
			 * average target time corresponds to measured
			 * source time
			 */
			sample.offset =
				(ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
				ts;

			/* simple insertion sort based on duration */
			index = counter - 1;
			while (index >= 0) {
				if (samples[index].duration_target <
				    sample.duration_target)
					break;
				samples[index + 1] = samples[index];
				index--;
			}
			samples[index + 1] = sample;
			counter++;
		}

		i++;
		if (counter >= num_samples || i >= 100000) {
			end_source = ts;
			break;
		}
	}

	*source_tstamp = (end_source + start_source) / 2;

	/* remove outliers by only using 75% of the samples */
	used = counter * 3 / 4;
	if (!used)
		used = counter;
	if (used) {
		/* calculate average */
		s64 off = 0;
		for (index = 0; index < used; index++)
			off += samples[index].offset;
		*offset = div_s64(off, used);
	}

	if (samples && samples != buffer)
		kfree(samples);

	return used;
}
Пример #26
0
/**
 * div_frac() - divide two fixed-point numbers
 * @x:	the dividend
 * @y:	the divisor
 *
 * Return: the result of dividing two fixed-point numbers.  The
 * result is also a fixed-point number.
 */
static inline s64 div_frac(s64 x, s64 y)
{
	return div_s64(x << FRAC_BITS, y);
}
Пример #27
0
static inline int32_t div_fp(int32_t x, int32_t y)
{
	return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}
static int vco_set_rate(struct clk *c, unsigned long rate)
{
	s64 vco_clk_rate = rate;
	s32 rem;
	s64 refclk_cfg, frac_n_mode, ref_doubler_en_b;
	s64 ref_clk_to_pll, div_fbx1000, frac_n_value;
	s64 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
	s64 gen_vco_clk, cal_cfg10, cal_cfg11;
	u32 res;
	int i, rc = 0;
	struct dsi_pll_vco_clk *vco = to_vco_clk(c);

	clk_prepare_enable(mdss_dsi_ahb_clk);

	/* Configure the Loop filter resistance */
	for (i = 0; i < vco->lpfr_lut_size; i++)
		if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
			break;
	if (i == vco->lpfr_lut_size) {
		pr_err("%s: unable to get loop filter resistance. vco=%ld\n",
			__func__, rate);
		rc = -EINVAL;
		goto error;
	}
	res = vco->lpfr_lut[i].r;
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_LPFR_CFG, res);

	/* Loop filter capacitance values : c1 and c2 */
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);

	div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
	if (rem) {
		refclk_cfg = 0x1;
		frac_n_mode = 1;
		ref_doubler_en_b = 0;
	} else {
		refclk_cfg = 0x0;
		frac_n_mode = 0;
		ref_doubler_en_b = 1;
	}

	pr_debug("%s:refclk_cfg = %lld\n", __func__, refclk_cfg);

	ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (refclk_cfg))
			  + (ref_doubler_en_b * vco->ref_clk_rate));
	div_fbx1000 = div_s64((vco_clk_rate * 1000), ref_clk_to_pll);

	div_s64_rem(div_fbx1000, 1000, &rem);
	frac_n_value = div_s64((rem * (1 << 16)), 1000);
	gen_vco_clk = div_s64(div_fbx1000 * ref_clk_to_pll, 1000);

	pr_debug("%s:ref_clk_to_pll = %lld\n", __func__, ref_clk_to_pll);
	pr_debug("%s:div_fb = %lld\n", __func__, div_fbx1000);
	pr_debug("%s:frac_n_value = %lld\n", __func__, frac_n_value);

	pr_debug("%s:Generated VCO Clock: %lld\n", __func__, gen_vco_clk);
	rem = 0;
	if (frac_n_mode) {
		sdm_cfg0 = (0x0 << 5);
		sdm_cfg0 |= (0x0 & 0x3f);
		sdm_cfg1 = (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
		sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
		sdm_cfg2 = rem;
	} else {
		sdm_cfg0 = (0x1 << 5);
		sdm_cfg0 |= (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
		sdm_cfg1 = (0x0 & 0x3f);
		sdm_cfg2 = 0;
		sdm_cfg3 = 0;
	}

	pr_debug("%s: sdm_cfg0=%lld\n", __func__, sdm_cfg0);
	pr_debug("%s: sdm_cfg1=%lld\n", __func__, sdm_cfg1);
	pr_debug("%s: sdm_cfg2=%lld\n", __func__, sdm_cfg2);
	pr_debug("%s: sdm_cfg3=%lld\n", __func__, sdm_cfg3);

	cal_cfg11 = div_s64_rem(gen_vco_clk, 256 * 1000000, &rem);
	cal_cfg10 = rem / 1000000;
	pr_debug("%s: cal_cfg10=%lld, cal_cfg11=%lld\n", __func__,
		cal_cfg10, cal_cfg11);

	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05);

	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
		(u32)(sdm_cfg1 & 0xff));
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
		(u32)(sdm_cfg2 & 0xff));
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
		(u32)(sdm_cfg3 & 0xff));
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);

	/* Add hardware recommended delay for correct PLL configuration */
	udelay(1000);

	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_REFCLK_CFG,
		(u32)refclk_cfg);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_SDM_CFG0,
		(u32)sdm_cfg0);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x0a);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG10,
		(u32)(cal_cfg10 & 0xff));
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_CAL_CFG11,
		(u32)(cal_cfg11 & 0xff));
	DSS_REG_W(mdss_dsi_base, DSI_0_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);

error:
	clk_disable_unprepare(mdss_dsi_ahb_clk);
	return rc;
}
Пример #29
0
static int cros_ec_sensors_read(struct iio_dev *indio_dev,
			  struct iio_chan_spec const *chan,
			  int *val, int *val2, long mask)
{
	struct cros_ec_sensors_state *st = iio_priv(indio_dev);
	s16 data = 0;
	s64 val64;
	int i;
	int ret;
	int idx = chan->scan_index;

	mutex_lock(&st->core.cmd_lock);

	switch (mask) {
	case IIO_CHAN_INFO_RAW:
		ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
		if (ret < 0)
			break;
		ret = IIO_VAL_INT;
		*val = data;
		break;
	case IIO_CHAN_INFO_CALIBBIAS:
		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
		st->core.param.sensor_offset.flags = 0;

		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
		if (ret < 0)
			break;

		/* Save values */
		for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
			st->core.calib[i] =
				st->core.resp->sensor_offset.offset[i];
		ret = IIO_VAL_INT;
		*val = st->core.calib[idx];
		break;
	case IIO_CHAN_INFO_SCALE:
		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
		st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;

		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
		if (ret < 0)
			break;

		val64 = st->core.resp->sensor_range.ret;
		switch (st->core.type) {
		case MOTIONSENSE_TYPE_ACCEL:
			/*
			 * EC returns data in g, iio exepects m/s^2.
			 * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
			 */
			*val = div_s64(val64 * 980665, 10);
			*val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
			ret = IIO_VAL_FRACTIONAL;
			break;
		case MOTIONSENSE_TYPE_GYRO:
			/*
			 * EC returns data in dps, iio expects rad/s.
			 * Do not use IIO_DEGREE_TO_RAD to avoid precision
			 * loss. Round to the nearest integer.
			 */
			*val = div_s64(val64 * 314159 + 9000000ULL, 1000);
			*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
			ret = IIO_VAL_FRACTIONAL;
			break;
		case MOTIONSENSE_TYPE_MAG:
			/*
			 * EC returns data in 16LSB / uT,
			 * iio expects Gauss
			 */
			*val = val64;
			*val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
			ret = IIO_VAL_FRACTIONAL;
			break;
		default:
			ret = -EINVAL;
		}
		break;
	default:
		ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
						mask);
		break;
	}
	mutex_unlock(&st->core.cmd_lock);

	return ret;
}
Пример #30
0
static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop,
	struct msm_bus_inode_info *info,
	struct msm_bus_fabric_registration *fab_pdata,
	void *sel_cdata, int *master_tiers,
	int64_t add_bw)
{
	struct msm_bus_bimc_info *binfo;
	struct msm_bus_bimc_qos_bw qbw;
	int i;
	int64_t bw;
	int ports = info->node_info->num_mports;
	struct msm_bus_bimc_commit *sel_cd =
		(struct msm_bus_bimc_commit *)sel_cdata;

	MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n",
		info->node_info->id, info->node_info->priv_id, add_bw);

	binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;

	if (info->node_info->num_mports == 0) {
		MSM_BUS_DBG("BIMC: Skip Master BW\n");
		goto skip_mas_bw;
	}

	ports = info->node_info->num_mports;
	bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);

	for (i = 0; i < ports; i++) {
		sel_cd->mas[info->node_info->masterp[i]].bw += bw;
		sel_cd->mas[info->node_info->masterp[i]].hw_id =
			info->node_info->mas_hw_id;
		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
			info->node_info->priv_id,
			sel_cd->mas[info->node_info->masterp[i]].bw);
		if (info->node_info->hw_sel == MSM_BUS_RPM)
			sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
		else {
			if (!info->node_info->qport) {
				MSM_BUS_DBG("No qos ports to update!\n");
				break;
			}
			if (!(info->node_info->mode == BIMC_QOS_MODE_REGULATOR)
					|| (info->node_info->mode ==
						BIMC_QOS_MODE_LIMITER)) {
				MSM_BUS_DBG("Skip QoS reg programming\n");
				break;
			}

			MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
			qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
			qbw.ws = info->node_info->ws;
			/* Threshold low = 90% of bw */
			qbw.thl = div_s64((90 * bw), 100);
			/* Threshold medium = bw */
			qbw.thm = bw;
			/* Threshold high = 10% more than bw */
			qbw.thh = div_s64((110 * bw), 100);
			/* Check if info is a shared master.
			 * If it is, mark it dirty
			 * If it isn't, then set QOS Bandwidth.
			 * Also if dual-conf is set, don't program bw regs.
			 **/
			if (!info->node_info->dual_conf &&
			((info->node_info->mode == BIMC_QOS_MODE_LIMITER) ||
			(info->node_info->mode == BIMC_QOS_MODE_REGULATOR)))
				msm_bus_bimc_set_qos_bw(binfo->base,
					binfo->qos_freq,
					info->node_info->qport[i], &qbw);
		}
	}

skip_mas_bw:
	ports = hop->node_info->num_sports;
	MSM_BUS_DBG("BIMC: ID: %d, Sports: %d\n", hop->node_info->priv_id,
		ports);

	for (i = 0; i < ports; i++) {
		sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw;
		sel_cd->slv[hop->node_info->slavep[i]].hw_id =
			hop->node_info->slv_hw_id;
		MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
			hop->node_info->priv_id,
			sel_cd->slv[hop->node_info->slavep[i]].bw);
		MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
			hop->node_info->slavep[i]);
		/* Check if hop is a shared slave.
		 * If it is, mark it dirty
		 * If it isn't, then nothing to be done as the
		 * slaves are in bypass mode.
		 **/
		if (hop->node_info->hw_sel == MSM_BUS_RPM) {
			MSM_BUS_DBG("Slave dirty: %d, slavep: %d\n",
				hop->node_info->priv_id,
				hop->node_info->slavep[i]);
			sel_cd->slv[hop->node_info->slavep[i]].dirty = 1;
		}
	}
}