Esempio n. 1
0
static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
{
	struct xgbe_prv_data *pdata = container_of(info,
						   struct xgbe_prv_data,
						   ptp_clock_info);
	unsigned long flags;
	u64 adjust;
	u32 addend, diff;
	unsigned int neg_adjust = 0;

	if (delta < 0) {
		neg_adjust = 1;
		delta = -delta;
	}

	adjust = pdata->tstamp_addend;
	adjust *= delta;
	diff = div_u64(adjust, 1000000000UL);

	addend = (neg_adjust) ? pdata->tstamp_addend - diff :
				pdata->tstamp_addend + diff;

	spin_lock_irqsave(&pdata->tstamp_lock, flags);

	pdata->hw_if.update_tstamp_addend(pdata, addend);

	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);

	return 0;
}
Esempio n. 2
0
static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
	u64 adj;
	u32 diff;
	unsigned long flags;
	int neg_adj = 0;
	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
						  ptp_info);

	if (delta < 0) {
		neg_adj = 1;
		delta = -delta;
	}

	adj = tstamp->nominal_c_mult;
	adj *= delta;
	diff = div_u64(adj, 1000000000ULL);

	write_lock_irqsave(&tstamp->lock, flags);
	timecounter_read(&tstamp->clock);
	tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
					tstamp->nominal_c_mult + diff;
	write_unlock_irqrestore(&tstamp->lock, flags);

	return 0;
}
Esempio n. 3
0
int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
	int full;

	if (static_branch_likely(&psi_disabled))
		return -EOPNOTSUPP;

	update_stats(group);

	for (full = 0; full < 2 - (res == PSI_CPU); full++) {
		unsigned long avg[3];
		u64 total;
		int w;

		for (w = 0; w < 3; w++)
			avg[w] = group->avg[res * 2 + full][w];
		total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);

		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
			   full ? "full" : "some",
			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
			   total);
	}

	return 0;
}
Esempio n. 4
0
static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
		struct ieee_maxrate *maxrate)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	u16 tmp[IEEE_8021QAZ_MAX_TCS];
	int i, err;

	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		/* Convert from Kbps into HW units, rounding result up.
		 * Setting to 0, means unlimited BW.
		 */

		tmp[i] = div_u64(maxrate->tc_maxrate[i] +
				 MLX4_RATELIMIT_UNITS_IN_KB - 1,
				 MLX4_RATELIMIT_UNITS_IN_KB);
	}

	err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
	if (err)
		return err;

	memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));

	return 0;
}
Esempio n. 5
0
/**
 * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
 * @ptp: ptp clock structure
 * @delta: Desired frequency change in parts per billion
 *
 * Adjust the frequency of the PHC cycle counter by the indicated delta from
 * the base frequency.
 **/
static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
	u64 adj;
	u32 diff, mult;
	int neg_adj = 0;
	unsigned long flags;
	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
						ptp_clock_info);

	if (delta < 0) {
		neg_adj = 1;
		delta = -delta;
	}
	mult = mdev->nominal_c_mult;
	adj = mult;
	adj *= delta;
	diff = div_u64(adj, 1000000000ULL);

	write_lock_irqsave(&mdev->clock_lock, flags);
	timecounter_read(&mdev->clock);
	mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
	write_unlock_irqrestore(&mdev->clock_lock, flags);

	return 0;
}
Esempio n. 6
0
static int gpio_interrupt(rtdm_irq_t *irq_handle)
{
	RTIME 				temp_time;
	static SRTIME		curr_time;
	static SRTIME		prev_time;
	static SRTIME		diff_time;
	static int			freq;

	temp_time = rt_timer_tsc();

	curr_time = rt_timer_tsc2ns(temp_time);
	diff_time = curr_time - prev_time;
	prev_time = curr_time;

	//Get frequency
	freq = (uint32_t)div_u64(100000000000, diff_time);

	//rtdm_printk("F: %u \n", freq);

	set_next_tooth(curr_time);

	//freq = 100000000000 / diff_time;  //Diff_time is in ns, freq = times*1000

    return RTDM_IRQ_HANDLED;
}
Esempio n. 7
0
/**
 * stmmac_adjust_freq
 *
 * @ptp: pointer to ptp_clock_info structure
 * @ppb: desired period change in parts ber billion
 *
 * Description: this function will adjust the frequency of hardware clock.
 */
static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
{
	struct stmmac_priv *priv =
	    container_of(ptp, struct stmmac_priv, ptp_clock_ops);
	unsigned long flags;
	u32 diff, addend;
	int neg_adj = 0;
	u64 adj;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}

	addend = priv->default_addend;
	adj = addend;
	adj *= ppb;
	diff = div_u64(adj, 1000000000ULL);
	addend = neg_adj ? (addend - diff) : (addend + diff);

	spin_lock_irqsave(&priv->ptp_lock, flags);

	priv->hw->ptp->config_addend(priv->ptpaddr, addend);

	spin_unlock_irqrestore(&priv->ptp_lock, flags);

	return 0;
}
Esempio n. 8
0
static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm)
{
	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
					       ptp_caps);
	struct e1000_hw *hw = &igb->hw;
	int neg_adj = 0;
	u64 rate;
	u32 inca;

	if (scaled_ppm < 0) {
		neg_adj = 1;
		scaled_ppm = -scaled_ppm;
	}
	rate = scaled_ppm;
	rate <<= 13;
	rate = div_u64(rate, 15625);

	inca = rate & INCVALUE_MASK;
	if (neg_adj)
		inca |= ISGN;

	wr32(E1000_TIMINCA, inca);

	return 0;
}
Esempio n. 9
0
/* Rate limiting */
static uint64_t __calc_target_rate(struct cached_dev *dc)
{
	struct cache_set *c = dc->disk.c;

	/*
	 * This is the size of the cache, minus the amount used for
	 * flash-only devices
	 */
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
				atomic_long_read(&c->flash_dev_dirty_sectors);

	/*
	 * Unfortunately there is no control of global dirty data.  If the
	 * user states that they want 10% dirty data in the cache, and has,
	 * e.g., 5 backing volumes of equal size, we try and ensure each
	 * backing volume uses about 2% of the cache for dirty data.
	 */
	uint32_t bdev_share =
		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
				c->cached_dev_sectors);

	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);

	/* Ensure each backing dev gets at least one dirty share */
	if (bdev_share < 1)
		bdev_share = 1;

	return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
}
Esempio n. 10
0
/*
 * Convert non-negative integer string representation in explicitly given radix
 * to an integer.
 * Return number of characters consumed maybe or-ed with overflow bit.
 * If overflow occurs, result integer (incorrect) is still returned.
 *
 * Don't you dare use this function.
 */
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
{
	unsigned long long res;
	unsigned int rv;

	res = 0;
	rv = 0;
	while (*s) {
		unsigned int val;

		if ('0' <= *s && *s <= '9')
			val = *s - '0';
		else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
			val = _tolower(*s) - 'a' + 10;
		else
			break;

		if (val >= base)
			break;
		/*
		 * Check for overflow only if we are within range of
		 * it in the max base we support (16)
		 */
		if (unlikely(res & (~0ull << 60))) {
			if (res > div_u64(ULLONG_MAX - val, base))
				rv |= KSTRTOX_OVERFLOW;
		}
		res = res * base + val;
		rv++;
		s++;
	}
	*p = res;
	return rv;
}
Esempio n. 11
0
static int wfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
        struct wfq_class *cl = NULL;
	unsigned int len = skb_size(skb);
	struct wfq_sched_data *q = qdisc_priv(sch);
	int ret, weight;


	cl = wfq_classify(skb, sch);
	/* No appropriate queue or the switch buffer is overfilled */
	if (unlikely(!cl) || wfq_buffer_overfill(len, cl, q))
	{
		qdisc_qstats_drop(sch);
		qdisc_qstats_drop(cl->qdisc);
		kfree_skb(skb);
		return NET_XMIT_DROP;
	}

	ret = qdisc_enqueue(skb, cl->qdisc);
	if (unlikely(ret != NET_XMIT_SUCCESS))
	{
		if (likely(net_xmit_drop_count(ret)))
		{
			qdisc_qstats_drop(sch);
			qdisc_qstats_drop(cl->qdisc);
		}
		return ret;
	}

	/* If the queue is empty, calculate its head finish time */
	if (cl->qdisc->q.qlen == 1)
	{
                weight = wfq_queue_weight[cl->id];
                /* We only change the priority when the queue is empty */
                cl->prio = (u8)wfq_queue_prio[cl->id];

                if (likely(weight > 0))
                {
                        cl->head_fin_time = div_u64((u64)len, (u32)weight) +
                                            q->virtual_time[cl->prio];
                        q->virtual_time[cl->prio] = cl->head_fin_time;

                }
	}

        /* Update queue sizes */
	sch->q.qlen++;
	q->sum_len_bytes += len;
	cl->len_bytes += len;
        q->prio_len_bytes[cl->prio] += len;

	/* sojourn time based ECN marking: TCN and CoDel */
	if (wfq_ecn_scheme == wfq_tcn || wfq_ecn_scheme == wfq_codel)
		skb->tstamp = ktime_get();
	/* enqueue queue length based ECN marking */
	else if (wfq_enable_dequeue_ecn == wfq_disable)
		wfq_qlen_marking(skb, q, cl);

	return ret;
}
Esempio n. 12
0
u64 met_usecs_to_cputime64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
	/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
	return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
	/* overflow after 292 years if HZ = 1024 */
	return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
	/*
	 * Generic case - optimized for cases where HZ is a multiple of 3.
	 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
	 */
	return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}
Esempio n. 13
0
static int sst_cdev_tstamp(struct device *dev, unsigned int str_id,
		struct snd_compr_tstamp *tstamp)
{
	struct snd_sst_tstamp fw_tstamp = {0,};
	struct stream_info *stream;
	struct intel_sst_drv *ctx = dev_get_drvdata(dev);

	memcpy_fromio(&fw_tstamp,
		((void *)(ctx->mailbox + ctx->tstamp)
		+(str_id * sizeof(fw_tstamp))),
		sizeof(fw_tstamp));

	stream = get_stream_info(ctx, str_id);
	if (!stream)
		return -EINVAL;
	dev_dbg(dev, "rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter);

	tstamp->copied_total = fw_tstamp.ring_buffer_counter;
	tstamp->pcm_frames = fw_tstamp.frames_decoded;
	tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter,
			(u64)((stream->num_ch) * SST_GET_BYTES_PER_SAMPLE(24)));
	tstamp->sampling_rate = fw_tstamp.sampling_frequency;

	dev_dbg(dev, "PCM  = %u\n", tstamp->pcm_io_frames);
	dev_dbg(dev, "Ptr Query on strid = %d  copied_total %d, decodec %d\n",
		str_id, tstamp->copied_total, tstamp->pcm_frames);
	dev_dbg(dev, "rendered %d\n", tstamp->pcm_io_frames);

	return 0;
}
Esempio n. 14
0
static ssize_t show_pw20_wait_time(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	u32 value;
	u64 tb_cycle = 1;
	u64 time;

	unsigned int cpu = dev->id;

	if (!pw20_wt) {
		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
		value = (value & PWRMGTCR0_PW20_ENT) >>
					PWRMGTCR0_PW20_ENT_SHIFT;

		tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
		/* convert ms to ns */
		if (tb_ticks_per_usec > 1000) {
			time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
		} else {
			u32 rem_us;

			time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
						&rem_us);
			time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
		}
	} else {
Esempio n. 15
0
static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
{
	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
					       ptp_caps);
	struct e1000_hw *hw = &igb->hw;
	int neg_adj = 0;
	u64 rate;
	u32 inca;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}
	rate = ppb;
	rate <<= 26;
	rate = div_u64(rate, 1953125);

	inca = rate & INCVALUE_MASK;
	if (neg_adj)
		inca |= ISGN;

	wr32(E1000_TIMINCA, inca);

	return 0;
}
Esempio n. 16
0
static int skl_get_time_info(struct snd_pcm_substream *substream,
			struct timespec *system_ts, struct timespec *audio_ts,
			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
{
	struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
	struct hdac_stream *hstr = hdac_stream(sstream);
	u64 nsec;

	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {

		snd_pcm_gettime(substream->runtime, system_ts);

		nsec = timecounter_read(&hstr->tc);
		nsec = div_u64(nsec, 3); /* can be optimized */
		if (audio_tstamp_config->report_delay)
			nsec = skl_adjust_codec_delay(substream, nsec);

		*audio_ts = ns_to_timespec(nsec);

		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
		audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
		audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */

	} else {
		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
	}

	return 0;
}
Esempio n. 17
0
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
			     unsigned int host_num_mems)
{
	struct nfp_flower_priv *priv = app->priv;
	int err, stats_size;

	hash_init(priv->mask_table);

	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
	if (err)
		return err;

	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));

	/* Init ring buffer and unallocated mask_ids. */
	priv->mask_ids.mask_id_free_list.buf =
		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
	if (!priv->mask_ids.mask_id_free_list.buf)
		goto err_free_flow_table;

	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;

	/* Init timestamps for mask id*/
	priv->mask_ids.last_used =
		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
	if (!priv->mask_ids.last_used)
		goto err_free_mask_id;

	/* Init ring buffer and unallocated stats_ids. */
	priv->stats_ids.free_list.buf =
		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
				   priv->stats_ring_size));
	if (!priv->stats_ids.free_list.buf)
		goto err_free_last_used;

	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);

	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
				     GFP_KERNEL);
	if (!priv->stats)
		goto err_free_ring_buf;

	spin_lock_init(&priv->stats_lock);

	return 0;

err_free_ring_buf:
	vfree(priv->stats_ids.free_list.buf);
err_free_last_used:
	kfree(priv->mask_ids.last_used);
err_free_mask_id:
	kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_flow_table:
	rhashtable_destroy(&priv->flow_table);
	return -ENOMEM;
}
Esempio n. 18
0
static int btrfs_dev_replace_kthread(void *data)
{
	struct btrfs_fs_info *fs_info = data;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	struct btrfs_ioctl_dev_replace_args *status_args;
	u64 progress;

	status_args = kzalloc(sizeof(*status_args), GFP_KERNEL);
	if (status_args) {
		btrfs_dev_replace_status(fs_info, status_args);
		progress = status_args->status.progress_1000;
		kfree(status_args);
		progress = div_u64(progress, 10);
		btrfs_info_in_rcu(fs_info,
			"continuing dev_replace from %s (devid %llu) to %s @%u%%",
			dev_replace->srcdev->missing ? "<missing disk>" :
			rcu_str_deref(dev_replace->srcdev->name),
			dev_replace->srcdev->devid,
			dev_replace->tgtdev ?
			rcu_str_deref(dev_replace->tgtdev->name) :
			"<missing target disk>",
			(unsigned int)progress);
	}
	btrfs_dev_replace_continue_on_mount(fs_info);
	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);

	return 0;
}
Esempio n. 19
0
static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
{
	struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
					       ptp_caps);
	struct e1000_hw *hw = &igb->hw;
	int neg_adj = 0;
	u64 rate;
	u32 incvalue;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}
	rate = ppb;
	rate <<= 14;
	rate = div_u64(rate, 1953125);

	incvalue = 16 << IGB_82576_TSYNC_SHIFT;

	if (neg_adj)
		incvalue -= rate;
	else
		incvalue += rate;

	E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576
			| (incvalue & INCVALUE_82576_MASK));

	return 0;
}
Esempio n. 20
0
/*
 * Convert non-negative integer string representation in explicitly given radix
 * to an integer.
 * Return number of characters consumed maybe or-ed with overflow bit.
 * If overflow occurs, result integer (incorrect) is still returned.
 *
 * Don't you dare use this function.
 */
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res)
{
	unsigned int rv;
	int overflow;

	*res = 0;
	rv = 0;
	overflow = 0;
	while (*s) {
		unsigned int val;

		if ('0' <= *s && *s <= '9')
			val = *s - '0';
		else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
			val = _tolower(*s) - 'a' + 10;
		else
			 break;

		if (val >= base)
			break;
		if (*res > div_u64(ULLONG_MAX - val, base))
			overflow = 1;
		*res = *res * base + val;
		rv++;
		s++;
	}
	if (overflow)
		rv |= KSTRTOX_OVERFLOW;
	return rv;
}
Esempio n. 21
0
static int gpu_dvfs_update_perf(struct kbase_device *kbdev)
{
	unsigned long flags;
	unsigned int pmcnt;
	u64 perfmon;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->perf_gathering_status)
		return 0;

	if (!gpu_control_is_power_on(kbdev))
		return 0;

	exynos_gpu_perf_update(&pmcnt);
	exynos_gpu_perf_reset();
	perfmon = div_u64((u64)pmcnt*1000,  platform->cur_clock);

	spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
	platform->env_data.perf = perfmon;
	spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);

	GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "Current PERF: %d\n", platform->env_data.perf);

	return 0;
}
Esempio n. 22
0
/**
 * __add_badblock_range() - Convert a physical address range to bad sectors
 * @bb:		badblocks instance to populate
 * @ns_offset:	namespace offset where the error range begins (in bytes)
 * @len:	number of bytes of poison to be added
 *
 * This assumes that the range provided with (ns_offset, len) is within
 * the bounds of physical addresses for this namespace, i.e. lies in the
 * interval [ns_start, ns_start + ns_size)
 */
static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
{
	const unsigned int sector_size = 512;
	sector_t start_sector;
	u64 num_sectors;
	u32 rem;

	start_sector = div_u64(ns_offset, sector_size);
	num_sectors = div_u64_rem(len, sector_size, &rem);
	if (rem)
		num_sectors++;

	if (unlikely(num_sectors > (u64)INT_MAX)) {
		u64 remaining = num_sectors;
		sector_t s = start_sector;

		while (remaining) {
			int done = min_t(u64, remaining, INT_MAX);

			set_badblock(bb, s, done);
			remaining -= done;
			s += done;
		}
	} else
		set_badblock(bb, start_sector, num_sectors);
}
Esempio n. 23
0
int kbase_platform_dvfs_power_to_freq(int power)
{
#ifdef CONFIG_MALI_DVFS
	int level, freq;
	unsigned int vol;
	u64 _power;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: platform context (0x%p) is not initialized\n", __func__, platform);
		return -1;
	}

	for (level = gpu_dvfs_get_level(platform->gpu_min_clock); level >= gpu_dvfs_get_level(platform->gpu_max_clock); level--) {
		vol = platform->table[level].voltage / 10000;
		freq = platform->table[level].clock;
		_power = div_u64((u64)platform->ipa_power_coeff_gpu * freq * vol * vol, 100000);
		if ((int)_power >= power)
			break;
	}

	return platform->table[level].clock;
#else
	return 0;
#endif /* CONFIG_MALI_DVFS */
}
int kbase_platform_dvfs_freq_to_power(int freq)
{
	int level;
	unsigned int vol;
	unsigned long flags;
	unsigned long long power;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	gpu_dvfs_info *dvfs_status;

	if (!platform) {
		GPU_LOG(DVFS_ERROR, "platform context (0x%p) is not initialized within %s\n", platform, __FUNCTION__);
		return -1;
	}

	dvfs_status = platform->table;

	if (0 == freq) {
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		power = platform->power;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	} else {
		level = platform->step;
		if (level >= 0) {
			vol = dvfs_status->voltage/10000;
			power = div_u64((u64)POWER_COEFF_GPU * freq * vol * vol, 100000);
		} else {
			power = 0;
		}
	}

	return (int)power;
}
Esempio n. 25
0
/**
 * last_lba(): return number of last logical block of device
 * @bdev: block device
 * 
 * Description: Returns last LBA value on success, 0 on error.
 * This is stored (by sd and ide-geometry) in
 *  the part[0] entry for this disk, and is the number of
 *  physical sectors available on the disk.
 */
static u64 last_lba(struct block_device *bdev)
{
	if (!bdev || !bdev->bd_inode)
		return 0;
	return div_u64(bdev->bd_inode->i_size,
		       bdev_logical_block_size(bdev)) - 1ULL;
}
int kbase_platform_dvfs_power_to_freq(int power)
{
	int level, freq;
	unsigned int vol;
	u64 _power;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	gpu_dvfs_info *dvfs_status;
	if (!platform) {
		GPU_LOG(DVFS_ERROR, "platform context (0x%p) is not initialized within %s\n", platform, __FUNCTION__);
		return -1;
	}

	dvfs_status = platform->table;
	level = 0;
	while (level < (platform->table_size))
	{
		vol = dvfs_status->voltage/10000;
		freq = dvfs_status->clock;
		_power = div_u64((u64)POWER_COEFF_GPU * freq * vol * vol, 100000);
		if ((int)_power > power)
			break;
		level++;
	}

	return platform->table[level].clock;
}
Esempio n. 27
0
/**
 * fec_ptp_adjfreq - adjust ptp cycle frequency
 * @ptp: the ptp clock structure
 * @ppb: parts per billion adjustment from base
 *
 * Adjust the frequency of the ptp cycle counter by the
 * indicated ppb from the base frequency.
 *
 * Because ENET hardware frequency adjust is complex,
 * using software method to do that.
 */
static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
	u64 diff;
	unsigned long flags;
	int neg_adj = 0;
	u32 mult = FEC_CC_MULT;

	struct fec_enet_private *fep =
	    container_of(ptp, struct fec_enet_private, ptp_caps);

	if (ppb < 0) {
		ppb = -ppb;
		neg_adj = 1;
	}

	diff = mult;
	diff *= ppb;
	diff = div_u64(diff, 1000000000ULL);

	spin_lock_irqsave(&fep->tmreg_lock, flags);
	/*
	 * dummy read to set cycle_last in tc to now.
	 * So use adjusted mult to calculate when next call
	 * timercounter_read.
	 */
	timecounter_read(&fep->tc);

	fep->cc.mult = neg_adj ? mult - diff : mult + diff;

	spin_unlock_irqrestore(&fep->tmreg_lock, flags);

	return 0;
}
Esempio n. 28
0
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
	struct mlx4_dev *dev = mdev->dev;
	u64 temp_mult;

	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
	mdev->cycles.read = mlx4_en_read_clock;
	mdev->cycles.mask = CLOCKSOURCE_MASK(48);

	/*
	 * we have hca_core_clock in MHz, so to translate cycles to nsecs
	 * we need to divide cycles by freq and multiply by 1000;
	 * in order to get precise result we shift left the value,
	 * since we don't have floating point there;
	 * at the end shift result back
	 */
	temp_mult = div_u64(((1ull * 1000) << 29), dev->caps.hca_core_clock);
	mdev->cycles.mult = (u32)temp_mult;
	mdev->cycles.shift = 29;

	timecounter_init(&mdev->clock, &mdev->cycles,
			 ktime_to_ns(ktime_get_real()));

	memset(&mdev->compare, 0, sizeof(mdev->compare));
	mdev->compare.source = &mdev->clock;
	mdev->compare.target = ktime_get_real;
	mdev->compare.num_samples = 10;
	timecompare_update(&mdev->compare, 0);
}
Esempio n. 29
0
/**
 * i40e_ptp_adjfreq - Adjust the PHC frequency
 * @ptp: The PTP clock structure
 * @ppb: Parts per billion adjustment from the base
 *
 * Adjust the frequency of the PHC by the indicated parts per billion from the
 * base frequency.
 **/
static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
	struct i40e_hw *hw = &pf->hw;
	u64 adj, freq, diff;
	int neg_adj = 0;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}

	smp_mb(); /* Force any pending update before accessing. */
	adj = ACCESS_ONCE(pf->ptp_base_adj);

	freq = adj;
	freq *= ppb;
	diff = div_u64(freq, 1000000000ULL);

	if (neg_adj)
		adj -= diff;
	else
		adj += diff;

	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);

	return 0;
}
/**
 * ubifs_calc_min_idx_lebs - calculate amount of eraseblocks for the index.
 * @c: UBIFS file-system description object
 *
 * This function calculates and returns the number of eraseblocks which should
 * be kept for index usage.
 */
int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
{
    int idx_lebs, eff_leb_size = c->leb_size - c->max_idx_node_sz;
    long long idx_size;

    idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx;

    /* And make sure we have thrice the index size of space reserved */
    idx_size = idx_size + (idx_size << 1);

    /*
     * We do not maintain 'old_idx_size' as 'old_idx_lebs'/'old_idx_bytes'
     * pair, nor similarly the two variables for the new index size, so we
     * have to do this costly 64-bit division on fast-path.
     */
    idx_size += eff_leb_size - 1;
    idx_lebs = div_u64(idx_size, eff_leb_size);
    /*
     * The index head is not available for the in-the-gaps method, so add an
     * extra LEB to compensate.
     */
    idx_lebs += 1;
    if (idx_lebs < MIN_INDEX_LEBS)
        idx_lebs = MIN_INDEX_LEBS;
    return idx_lebs;
}