void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args) { struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; btrfs_dev_replace_lock(dev_replace); /* even if !dev_replace_is_valid, the values are good enough for * the replace_status ioctl */ args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; args->status.replace_state = dev_replace->replace_state; args->status.time_started = dev_replace->time_started; args->status.time_stopped = dev_replace->time_stopped; args->status.num_write_errors = atomic64_read(&dev_replace->num_write_errors); args->status.num_uncorrectable_read_errors = atomic64_read(&dev_replace->num_uncorrectable_read_errors); switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: args->status.progress_1000 = 0; break; case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: args->status.progress_1000 = 1000; break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: args->status.progress_1000 = div64_u64(dev_replace->cursor_left, div64_u64(dev_replace->srcdev->total_bytes, 1000)); break; } btrfs_dev_replace_unlock(dev_replace); }
/* Multiplier multiplies up all calculated values, so that decimal places can be represented */ void Statistics_Calculate( const StatisticsType *Instance, unsigned int MeanMultiplier, uint64_t *Mean, unsigned int SDMultiplier, uint64_t *StandardDeviation, unsigned int *Minimum, unsigned int *Maximum, unsigned int *Samples) { uint64_t Sum, Sum2, StandardDeviation2n2, StandardDeviationMn; *Samples = Instance->Samples; *Minimum = Instance->Minimum; *Maximum = Instance->Maximum; if (Instance->Samples == 0) return; Sum = Instance->Sum; *Mean = div64_u64((uint64_t) MeanMultiplier * Sum + (uint64_t) (Instance->Samples / 2), (uint64_t) Instance->Samples); Sum2 = Instance->Sum2; StandardDeviation2n2 = (uint64_t) Instance->Samples * Sum2 - Sum * Sum; StandardDeviationMn = SquareRoot(SDMultiplier * SDMultiplier * StandardDeviation2n2); *StandardDeviation = div64_u64(StandardDeviationMn + (uint64_t) (Instance->Samples / 2), (uint64_t) Instance->Samples); }
/* * Perform (stime * rtime) / total with reduced chances * of multiplication overflows by using smaller factors * like quotient and remainders of divisions between * rtime and total. */ static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) { u64 rem, res, scaled; if (rtime >= total) { /* * Scale up to rtime / total then add * the remainder scaled to stime / total. */ res = div64_u64_rem(rtime, total, &rem); scaled = stime * res; scaled += div64_u64(stime * rem, total); } else { /* * Same in reverse: scale down to total / rtime * then substract that result scaled to * to the remaining part. */ res = div64_u64_rem(total, rtime, &rem); scaled = div64_u64(stime, res); scaled -= div64_u64(scaled * rem, total); } return (__force cputime_t) scaled; }
static unsigned int calc_v_total_from_duration( const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, unsigned int duration_in_us) { unsigned int v_total = 0; if (duration_in_us < vrr->min_duration_in_us) duration_in_us = vrr->min_duration_in_us; if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; v_total = div64_u64(div64_u64(((unsigned long long)( duration_in_us) * stream->timing.pix_clk_khz), stream->timing.h_total), 1000); /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { ASSERT(v_total < stream->timing.v_total); v_total = stream->timing.v_total; } return v_total; }
static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, u64 buf_size, u64 win_size, u64 total) { int chunks, total_chunks, i; int copied_chunks = 0; u64 copied = 0, result; char __iomem *tmp = dst; u64 perf, diff_us; ktime_t kstart, kstop, kdiff; chunks = div64_u64(win_size, buf_size); total_chunks = div64_u64(total, buf_size); kstart = ktime_get(); for (i = 0; i < total_chunks; i++) { result = perf_copy(pctx, tmp, src, buf_size); copied += result; copied_chunks++; if (copied_chunks == chunks) { tmp = dst; copied_chunks = 0; } else tmp += buf_size; /* Probably should schedule every 4GB to prevent soft hang. */ if (((copied % SZ_4G) == 0) && !use_dma) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } } if (use_dma) { pr_info("%s: All DMA descriptors submitted\n", current->comm); while (atomic_read(&pctx->dma_sync) != 0) msleep(20); } kstop = ktime_get(); kdiff = ktime_sub(kstop, kstart); diff_us = ktime_to_us(kdiff); pr_info("%s: copied %llu bytes\n", current->comm, copied); pr_info("%s: lasted %llu usecs\n", current->comm, diff_us); perf = div64_u64(copied, diff_us); pr_info("%s: MBytes/s: %llu\n", current->comm, perf); return 0; }
static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, u64 offset) { BUG_ON(offset < bitmap_start); offset -= bitmap_start; return (unsigned long)(div64_u64(offset, sectorsize)); }
static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); unsigned int secno = GET_SECNO(sbi, segno); unsigned int start = secno * sbi->segs_per_sec; unsigned long long mtime = 0; unsigned int vblocks; unsigned char age = 0; unsigned char u; unsigned int i; for (i = 0; i < sbi->segs_per_sec; i++) mtime += get_seg_entry(sbi, start + i)->mtime; vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); mtime = div_u64(mtime, sbi->segs_per_sec); vblocks = div_u64(vblocks, sbi->segs_per_sec); u = (vblocks * 100) >> sbi->log_blocks_per_seg; /* Handle if the system time is changed by user */ if (mtime < sit_i->min_mtime) sit_i->min_mtime = mtime; if (mtime > sit_i->max_mtime) sit_i->max_mtime = mtime; if (sit_i->max_mtime != sit_i->min_mtime) age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), sit_i->max_mtime - sit_i->min_mtime); return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); }
/******************************************* lcdc fps manager,set or get lcdc fps set:0 get 1 set ********************************************/ static int rk3066b_lcdc_fps_mgr(struct rk_lcdc_device_driver *dev_drv,int fps,bool set) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); rk_screen * screen = dev_drv->cur_screen; u64 ft = 0; u32 dotclk; int ret; if(set) { ft = div_u64(1000000000000llu,fps); dev_drv->pixclock = div_u64(ft,(screen->upper_margin + screen->lower_margin + screen->y_res +screen->vsync_len)* (screen->left_margin + screen->right_margin + screen->x_res + screen->hsync_len)); dotclk = div_u64(1000000000000llu,dev_drv->pixclock); ret = clk_set_rate(lcdc_dev->dclk, dotclk); if(ret) { printk(KERN_ERR ">>>>>> set lcdc%d dclk failed\n",lcdc_dev->id); } dev_drv->pixclock = lcdc_dev->pixclock = div_u64(1000000000000llu, clk_get_rate(lcdc_dev->dclk)); } ft = (u64)(screen->upper_margin + screen->lower_margin + screen->y_res +screen->vsync_len)* (screen->left_margin + screen->right_margin + screen->x_res + screen->hsync_len)* (dev_drv->pixclock); // one frame time ,(pico seconds) fps = div64_u64(1000000000000llu,ft); screen->ft = 1000/fps ; //one frame time in ms return fps; }
/** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg) { int cpu; u64 curr_time = sched_clock(); u64 diff = curr_time - last_get_time; u64 tmp_avg = 0; *avg = 0; if (!diff) return; last_get_time = curr_time; /* read and reset nr_running counts */ for_each_possible_cpu(cpu) { unsigned long flags; spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); tmp_avg += per_cpu(nr_prod_sum, cpu); tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu)); per_cpu(last_time, cpu) = curr_time; per_cpu(nr_prod_sum, cpu) = 0; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } *avg = (int)div64_u64(tmp_avg * 100, diff); }
int64_t __aeabi_ldivmod(int64_t n, int64_t d) { int c = 0; s64 res; if (n < 0LL) { c = ~c; n = -n; } if (d < 0LL) { c = ~c; d = -d; } if (unlikely(d & 0xffffffff00000000ULL)) { pr_warn("TE: 64-bit/64-bit div loses precision (0x%llx/0x%llx)\n", n, d); BUG(); } res = div64_u64(n, d); if (c) res = -res; return res; }
/* Rate limiting */ static uint64_t __calc_target_rate(struct cached_dev *dc) { struct cache_set *c = dc->disk.c; /* * This is the size of the cache, minus the amount used for * flash-only devices */ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - atomic_long_read(&c->flash_dev_dirty_sectors); /* * Unfortunately there is no control of global dirty data. If the * user states that they want 10% dirty data in the cache, and has, * e.g., 5 backing volumes of equal size, we try and ensure each * backing volume uses about 2% of the cache for dirty data. */ uint32_t bdev_share = div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, c->cached_dev_sectors); uint64_t cache_dirty_target = div_u64(cache_sectors * dc->writeback_percent, 100); /* Ensure each backing dev gets at least one dirty share */ if (bdev_share < 1) bdev_share = 1; return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT; }
static int snd_compr_write_data(struct snd_compr_stream *stream, const char __user *buf, size_t count) { void *dstn; size_t copy; struct snd_compr_runtime *runtime = stream->runtime; /* 64-bit Modulus */ u64 app_pointer = div64_u64(runtime->total_bytes_available, runtime->buffer_size); app_pointer = runtime->total_bytes_available - (app_pointer * runtime->buffer_size); dstn = runtime->buffer + app_pointer; pr_debug("copying %zu at %lld\n", count, app_pointer); if (count < runtime->buffer_size - app_pointer) { if (copy_from_user(dstn, buf, count)) return -EFAULT; } else { copy = runtime->buffer_size - app_pointer; if (copy_from_user(dstn, buf, copy)) return -EFAULT; if (copy_from_user(runtime->buffer, buf + copy, count - copy)) return -EFAULT; } /* if DSP cares, let it know data has been written */ if (stream->ops->ack) stream->ops->ack(stream, count); return count; }
/* Number of consecutive frames to check before entering/exiting fixed refresh*/ #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 struct core_freesync { struct mod_freesync public; struct dc *dc; }; #define MOD_FREESYNC_TO_CORE(mod_freesync)\ container_of(mod_freesync, struct core_freesync, public) struct mod_freesync *mod_freesync_create(struct dc *dc) { struct core_freesync *core_freesync = kzalloc(sizeof(struct core_freesync), GFP_KERNEL); if (core_freesync == NULL) goto fail_alloc_context; if (dc == NULL) goto fail_construct; core_freesync->dc = dc; return &core_freesync->public; fail_construct: kfree(core_freesync); fail_alloc_context: return NULL; } void mod_freesync_destroy(struct mod_freesync *mod_freesync) { struct core_freesync *core_freesync = NULL; if (mod_freesync == NULL) return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); kfree(core_freesync); } #if 0 /* unused currently */ static unsigned int calc_refresh_in_uhz_from_duration( unsigned int duration_in_ns) { unsigned int refresh_in_uhz = ((unsigned int)(div64_u64((1000000000ULL * 1000000), duration_in_ns))); return refresh_in_uhz; }
/** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg, int *iowait_avg) { int cpu; u64 curr_time; u64 diff_sgnra_last; u64 diff_last; u32 faultyclk_cpumask = 0; u64 tmp; *avg = 0; *iowait_avg = 0; /* read and reset nr_running counts */ for_each_possible_cpu(cpu) { unsigned long flags; spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); curr_time = sched_clock(); /* error handling for problematic clock violation */ if (curr_time > per_cpu(sgnra_last_time, cpu) && curr_time >= per_cpu(last_time, cpu)) { diff_last = curr_time - per_cpu(last_time, cpu); diff_sgnra_last = curr_time - per_cpu(sgnra_last_time, cpu); tmp = per_cpu(nr, cpu) * diff_last; tmp += per_cpu(nr_prod_sum, cpu); *avg += (int)div64_u64(tmp * 100, diff_sgnra_last); tmp = nr_iowait_cpu(cpu) * diff_last; tmp += per_cpu(iowait_prod_sum, cpu); *iowait_avg += (int)div64_u64(tmp * 100, diff_sgnra_last); } else { faultyclk_cpumask |= 1 << cpu; pr_warn("[%s]**** (curr_time %lld), (per_cpu(sgnra_last_time, %d), %lld), (per_cpu(last_time, %d), %lld)\n", __func__, curr_time, cpu, per_cpu(sgnra_last_time, cpu), cpu, per_cpu(last_time, cpu)); } per_cpu(sgnra_last_time, cpu) = curr_time; per_cpu(last_time, cpu) = curr_time; per_cpu(nr_prod_sum, cpu) = 0; per_cpu(iowait_prod_sum, cpu) = 0; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } /* error handling for problematic clock violation*/ if (faultyclk_cpumask) { *avg = 0; *iowait_avg = 0; pr_warn("[%s]**** CPU (%d) clock may unstable !!\n", __func__, faultyclk_cpumask); return; } WARN(*avg < 0, "[sched_get_nr_running_avg] avg:%d", *avg); WARN(*iowait_avg < 0, "[sched_get_nr_running_avg] iowait_avg:%d", *iowait_avg); }
static unsigned int calc_duration_in_us_from_refresh_in_uhz( unsigned int refresh_in_uhz) { unsigned int duration_in_us = ((unsigned int)(div64_u64((1000000000ULL * 1000), refresh_in_uhz))); return duration_in_us; }
uint64_t __aeabi_uldivmod(uint64_t n, uint64_t d) { if (unlikely(d & 0xffffffff00000000ULL)) { pr_warn("TE: 64-bit/64-bit div loses precision (0x%llx/0x%llx)\n", n, d); BUG(); } return div64_u64(n, d); }
static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_limit *priv = nft_expr_priv(expr); u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate); if (nft_limit_eval(priv, cost)) regs->verdict.code = NFT_BREAK; }
static unsigned int calc_duration_in_us_from_v_total( const struct dc_stream_state *stream, const struct mod_vrr_params *in_vrr, unsigned int v_total) { unsigned int duration_in_us = (unsigned int)(div64_u64(((unsigned long long)(v_total) * 1000) * stream->timing.h_total, stream->timing.pix_clk_khz)); return duration_in_us; }
/* Borrow from ptb */ static inline void precompute_ratedata(struct wfq_rate_cfg *r) { r->shift = 0; r->mult = 1; if (r->rate_bps > 0) { r->shift = 15; r->mult = div64_u64(8LLU * NSEC_PER_SEC * (1 << r->shift), r->rate_bps); } }
static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) { u64 temp = (__force u64) rtime; temp *= (__force u64) utime; if (sizeof(cputime_t) == 4) temp = div_u64(temp, (__force u32) total); else temp = div64_u64(temp, (__force u64) total); return (__force cputime_t) temp; }
/** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg, int *iowait_avg) { int cpu; u64 curr_time = sched_clock(); u64 diff = curr_time - last_get_time; u64 tmp_avg = 0, tmp_iowait = 0; *avg = 0; *iowait_avg = 0; if (!diff) return; last_get_time = curr_time; /* read and reset nr_running counts */ for_each_possible_cpu(cpu) { unsigned long flags; spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); tmp_avg += per_cpu(nr_prod_sum, cpu); tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu)); tmp_iowait = per_cpu(iowait_prod_sum, cpu); tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu)); per_cpu(last_time, cpu) = curr_time; per_cpu(nr_prod_sum, cpu) = 0; per_cpu(iowait_prod_sum, cpu) = 0; spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); } *avg = (int)div64_u64(tmp_avg * 100, diff); *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff); BUG_ON(*avg < 0); pr_debug("%s - avg:%d\n", __func__, *avg); BUG_ON(*iowait_avg < 0); pr_debug("%s - avg:%d\n", __func__, *iowait_avg); }
static int msm_compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *arg) { struct snd_compr_runtime *runtime = cstream->runtime; struct msm_compr_audio *prtd = runtime->private_data; struct snd_compr_tstamp tstamp; uint64_t timestamp = 0; int rc = 0, first_buffer; unsigned long flags; pr_debug("%s\n", __func__); memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp)); spin_lock_irqsave(&prtd->lock, flags); tstamp.sampling_rate = prtd->sample_rate; tstamp.byte_offset = prtd->byte_offset; tstamp.copied_total = prtd->copied_total; first_buffer = prtd->first_buffer; if (atomic_read(&prtd->error)) { pr_err("%s Got RESET EVENTS notification, return error", __func__); tstamp.pcm_io_frames = 0; memcpy(arg, &tstamp, sizeof(struct snd_compr_tstamp)); spin_unlock_irqrestore(&prtd->lock, flags); return -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); /* Query timestamp from DSP if some data is with it. This prevents timeouts. */ if (!first_buffer) { rc = q6asm_get_session_time(prtd->audio_client, ×tamp); if (rc < 0) { pr_err("%s: Get Session Time return value =%lld\n", __func__, timestamp); return -EAGAIN; } } /* DSP returns timestamp in usec */ pr_debug("%s: timestamp = %lld usec\n", __func__, timestamp); timestamp *= prtd->sample_rate; tstamp.pcm_io_frames = (snd_pcm_uframes_t)div64_u64(timestamp, 1000000); memcpy(arg, &tstamp, sizeof(struct snd_compr_tstamp)); return 0; }
static int compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { struct snd_compr_runtime *runtime = cstream->runtime; struct runtime_data *prtd = runtime->private_data; struct snd_compr_tstamp timestamp; unsigned long flags; int pcm_size, bytes_available; int num_channel; pr_debug("%s\n", __func__); #ifdef AUDIO_PERF prtd->start_time[POINTER_T] = sched_clock(); #endif memset(×tamp, 0x0, sizeof(struct snd_compr_tstamp)); spin_lock_irqsave(&prtd->lock, flags); timestamp.sampling_rate = prtd->ap->sample_rate; timestamp.byte_offset = prtd->byte_offset; timestamp.copied_total = prtd->copied_total; pcm_size = esa_compr_pcm_size(); /* set the number of channels */ if (prtd->ap->num_channels == 1 || prtd->ap->num_channels == 2) num_channel = 1; else if (prtd->ap->num_channels == 3) num_channel = 2; else num_channel = 2; spin_unlock_irqrestore(&prtd->lock, flags); if (pcm_size) { bytes_available = prtd->received_total - prtd->copied_total; timestamp.pcm_io_frames = (snd_pcm_uframes_t)div64_u64(pcm_size, 2 * num_channel); pr_debug("%s: pcm_size(%u), frame_count(%u), copied_total(%llu), \ free_size(%llu)\n", __func__, pcm_size, timestamp.pcm_io_frames, prtd->copied_total, runtime->buffer_size - bytes_available); } memcpy(tstamp, ×tamp, sizeof(struct snd_compr_tstamp)); #ifdef AUDIO_PERF prtd->end_time[POINTER_T] = sched_clock(); prtd->total_time[POINTER_T] += prtd->end_time[POINTER_T] - prtd->start_time[POINTER_T]; #endif return 0; }
static void __update_writeback_rate(struct cached_dev *dc) { struct cache_set *c = dc->disk.c; uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; uint64_t cache_dirty_target = div_u64(cache_sectors * dc->writeback_percent, 100); int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), c->cached_dev_sectors); /* PD controller */ int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); int64_t derivative = dirty - dc->disk.sectors_dirty_last; int64_t proportional = dirty - target; int64_t change; dc->disk.sectors_dirty_last = dirty; /* Scale to sectors per second */ proportional *= dc->writeback_rate_update_seconds; proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); derivative = div_s64(derivative, dc->writeback_rate_update_seconds); derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, (dc->writeback_rate_d_term / dc->writeback_rate_update_seconds) ?: 1, 0); derivative *= dc->writeback_rate_d_term; derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); change = proportional + derivative; /* Don't increase writeback rate if the device isn't keeping up */ if (change > 0 && time_after64(local_clock(), dc->writeback_rate.next + NSEC_PER_MSEC)) change = 0; dc->writeback_rate.rate = clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 1, NSEC_PER_MSEC); dc->writeback_rate_proportional = proportional; dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; }
static unsigned int calc_v_total_from_refresh( const struct dc_stream_state *stream, unsigned int refresh_in_uhz) { unsigned int v_total = stream->timing.v_total; unsigned int frame_duration_in_ns; frame_duration_in_ns = ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * stream->timing.pix_clk_khz), stream->timing.h_total), 1000000); /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { ASSERT(v_total < stream->timing.v_total); v_total = stream->timing.v_total; } return v_total; }
static int nft_limit_pkts_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_limit_pkts *priv = nft_expr_priv(expr); int err; err = nft_limit_init(&priv->limit, tb); if (err < 0) return err; priv->cost = div64_u64(priv->limit.nsecs, priv->limit.rate); return 0; }
static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, u64 offset) { u64 bitmap_start; u64 bytes_per_bitmap; bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; bitmap_start = offset - block_group->key.objectid; bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); bitmap_start *= bytes_per_bitmap; bitmap_start += block_group->key.objectid; return bitmap_start; }
static ssize_t show_screen_info(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *fbi = dev_get_drvdata(dev); struct rk_lcdc_driver *dev_drv = (struct rk_lcdc_driver *)fbi->par; struct rk_screen *screen = dev_drv->screen0; int fps; u32 x = (screen->mode.left_margin + screen->mode.right_margin + screen->mode.xres + screen->mode.hsync_len); u32 y = (screen->mode.upper_margin + screen->mode.lower_margin + screen->mode.yres + screen->mode.vsync_len); u64 ft = (u64)x * y * (dev_drv->pixclock); // one frame time ,(pico seconds) fps = div64_u64(1000000000000llu, ft); return snprintf(buf, PAGE_SIZE, "xres:%d\nyres:%d\nfps:%d\n", screen->mode.xres, screen->mode.yres, fps); }
/* * Returns true if we're sure to have found the definitive divider (ie * deviation == 0). */ static bool skl_wrpll_try_divider(struct skl_wrpll_context *ctx, uint64_t central_freq, uint64_t dco_freq, unsigned int divider) { uint64_t deviation; bool found = false; deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), central_freq); /* positive deviation */ if (dco_freq >= central_freq) { if (deviation < SKL_MAX_PDEVIATION && deviation < ctx->min_deviation) { ctx->min_deviation = deviation; ctx->central_freq = central_freq; ctx->dco_freq = dco_freq; ctx->p = divider; #if 0 found = true; #endif } /* we can't improve a 0 deviation */ if (deviation == 0) return true; /* negative deviation */ } else if (deviation < SKL_MAX_NDEVIATION && deviation < ctx->min_deviation) { ctx->min_deviation = deviation; ctx->central_freq = central_freq; ctx->dco_freq = dco_freq; ctx->p = divider; #if 0 found = true; #endif } if (found) { printf("Divider %d\n", divider); printf("Deviation %"PRIu64"\n", deviation); printf("dco_freq: %"PRIu64", dco_central_freq %"PRIu64"\n", dco_freq, central_freq); } return false; }
static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct perf_ctx *perf = filp->private_data; char *buf; ssize_t ret, out_off = 0; struct pthr_ctx *pctx; int i; u64 rate; if (!perf) return 0; buf = kmalloc(1024, GFP_KERNEL); if (!buf) return -ENOMEM; if (mutex_is_locked(&perf->run_mutex)) { out_off = scnprintf(buf, 64, "running\n"); goto read_from_buf; } for (i = 0; i < MAX_THREADS; i++) { pctx = &perf->pthr_ctx[i]; if (pctx->status == -ENODATA) break; if (pctx->status) { out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: error %d\n", i, pctx->status); continue; } rate = div64_u64(pctx->copied, pctx->diff_us); out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", i, pctx->copied, pctx->diff_us, rate); } read_from_buf: ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off); kfree(buf); return ret; }