int register_memory(void)
{
	int			result;
	int			i;
	ion_phys_addr_t		paddr;
	void                    *kvptr;
	unsigned long		kvaddr;
	unsigned long		mem_len;
	pr_debug("%s\n", __func__);

	mutex_lock(&acdb_data.acdb_mutex);
	for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
		acdb_data.col_data[i] = kmalloc(MAX_COL_SIZE, GFP_KERNEL);
		atomic_set(&acdb_data.vocproc_col_cal[i].cal_kvaddr,
			(uint32_t)acdb_data.col_data[i]);
	}

	result = msm_audio_ion_import("audio_acdb_client",
				&acdb_data.ion_client,
				&acdb_data.ion_handle,
				atomic_read(&acdb_data.map_handle),
				NULL, 0,
				&paddr, (size_t *)&mem_len, &kvptr);
	if (result) {
		pr_err("%s: audio ION alloc failed, rc = %d\n",
			__func__, result);
		result = PTR_ERR(acdb_data.ion_client);
		goto err_ion_handle;
	}
	kvaddr = (unsigned long)kvptr;
	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	mutex_unlock(&acdb_data.acdb_mutex);

	pr_debug("%s done! paddr = 0x%lx, kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);

	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
Exemple #2
0
static int total_acct_packets(struct nf_conn *ct)
{
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 26)
	BUG_ON(ct == NULL);
	return (ct->counters[IP_CT_DIR_ORIGINAL].packets + ct->counters[IP_CT_DIR_REPLY].packets);
#else
	struct nf_conn_counter *acct;

	BUG_ON(ct == NULL);
	acct = nf_conn_acct_find(ct);
	if (!acct)
		return 0;
	return (atomic64_read(&acct[IP_CT_DIR_ORIGINAL].packets) + atomic64_read(&acct[IP_CT_DIR_REPLY].packets));
#endif
}
Exemple #3
0
/*
 * Call this when reinitializing a CPU.  It fixes the following potential
 * problems:
 *
 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
 *   because the CPU was taken down and came back up with CR3's PCID
 *   bits clear.  CPU hotplug can do this.
 *
 * - The TLB contains junk in slots corresponding to inactive ASIDs.
 *
 * - The CPU went so far out to lunch that it may have missed a TLB
 *   flush.
 */
void initialize_tlbstate_and_flush(void)
{
	int i;
	struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
	u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
	unsigned long cr3 = __read_cr3();

	/* Assert that CR3 already references the right mm. */
	WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));

	/*
	 * Assert that CR4.PCIDE is set if needed.  (CR4.PCIDE initialization
	 * doesn't work like other CR4 bits because it can only be set from
	 * long mode.)
	 */
	WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
		!(cr4_read_shadow() & X86_CR4_PCIDE));

	/* Force ASID 0 and force a TLB flush. */
	write_cr3(build_cr3(mm->pgd, 0));

	/* Reinitialize tlbstate. */
	this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
	this_cpu_write(cpu_tlbstate.next_asid, 1);
	this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
	this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);

	for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
		this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
}
Exemple #4
0
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	int i, j;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		radeon_fence_process(rdev, i);

		seq_printf(m, "--- ring %d ---\n", i);
		seq_printf(m, "Last signaled fence 0x%016llx\n",
			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
		seq_printf(m, "Last emitted        0x%016"PRIx64"\n",
			   rdev->fence_drv[i].sync_seq[i]);

		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
			if (i != j && rdev->fence_drv[j].initialized)
				seq_printf(m, "Last sync to ring %d 0x%016"PRIx64"\n",
					   j, rdev->fence_drv[i].sync_seq[j]);
		}
	}
	return 0;
}
Exemple #5
0
/**
 * radeon_fence_check_signaled - callback from fence_queue
 *
 * this function is called with fence_queue lock held, which is also used
 * for the fence locking itself, so unlocked variants are used for
 * fence_signal, and remove_wait_queue.
 */
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
{
	struct radeon_fence *fence;
	u64 seq;

	fence = container_of(wait, struct radeon_fence, fence_wake);

	/*
	 * We cannot use radeon_fence_process here because we're already
	 * in the waitqueue, in a call from wake_up_all.
	 */
	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
	if (seq >= fence->seq) {
		int ret = fence_signal_locked(&fence->base);

		if (!ret)
			FENCE_TRACE(&fence->base, "signaled from irq context\n");
		else
			FENCE_TRACE(&fence->base, "was already signaled\n");

		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
//       __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
		fence_put(&fence->base);
	} else
		FENCE_TRACE(&fence->base, "pending\n");
	return 0;
}
Exemple #6
0
void bg_progress_get( bg_progress * self, uint64_t * value )
{
    if ( self != NULL && value != NULL )
    {
        *value = atomic64_read( &self -> value );
    }
}
Exemple #7
0
static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
			     bool reset)
{
	u64 consumed, consumed_cap;
	u32 flags = priv->flags;

	/* Since we inconditionally increment consumed quota for each packet
	 * that we see, don't go over the quota boundary in what we send to
	 * userspace.
	 */
	consumed = atomic64_read(&priv->consumed);
	if (consumed >= priv->quota) {
		consumed_cap = priv->quota;
		flags |= NFT_QUOTA_F_DEPLETED;
	} else {
		consumed_cap = consumed;
	}

	if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
			 NFTA_QUOTA_PAD) ||
	    nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
			 NFTA_QUOTA_PAD) ||
	    nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
		goto nla_put_failure;

	if (reset) {
		atomic64_sub(consumed, &priv->consumed);
		clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
	}
	return 0;

nla_put_failure:
	return -1;
}
void sync_sensor_state(struct ssp_data *data)
{
	unsigned char uBuf[9] = {0,};
	unsigned int uSensorCnt;
	int iRet = 0;

	iRet = set_gyro_cal(data);
	if (iRet < 0)
		ssp_errf("set_gyro_cal failed");

	iRet = set_accel_cal(data);
	if (iRet < 0)
		ssp_errf("set_accel_cal failed");

	udelay(10);

	for (uSensorCnt = 0; uSensorCnt < SENSOR_MAX; uSensorCnt++) {
		if (atomic64_read(&data->aSensorEnable) & (1 << uSensorCnt)) {
			s32 dMsDelay
				= get_msdelay(data->adDelayBuf[uSensorCnt]);
			memcpy(&uBuf[0], &dMsDelay, 4);
			memcpy(&uBuf[4], &data->batchLatencyBuf[uSensorCnt], 4);
			uBuf[8] = data->batchOptBuf[uSensorCnt];
			send_instruction(data, ADD_SENSOR, uSensorCnt, uBuf, 9);
			udelay(10);
		}
	}

	if (data->bProximityRawEnabled == true) {
		s32 dMsDelay = 20;
		memcpy(&uBuf[0], &dMsDelay, 4);
		send_instruction(data, ADD_SENSOR, PROXIMITY_RAW, uBuf, 4);
	}

	set_proximity_threshold(data, data->uProxHiThresh, data->uProxLoThresh);
	data->buf[PROXIMITY_SENSOR].prox = 0;
	report_sensordata(data, PROXIMITY_SENSOR, &data->buf[PROXIMITY_SENSOR]);

#if 1
    if(sec_debug_get_debug_level() > 0)
    {
        data->bMcuDumpMode = true;
        ssp_info("Mcu Dump Enabled");
    }

    iRet = ssp_send_cmd(data, MSG2SSP_AP_MCU_SET_DUMPMODE,
            data->bMcuDumpMode);
    if (iRet < 0)
        ssp_errf("MSG2SSP_AP_MCU_SET_DUMPMODE failed");

#else
#if CONFIG_SEC_DEBUG   
	data->bMcuDumpMode = sec_debug_is_enabled();
	iRet = ssp_send_cmd(data, MSG2SSP_AP_MCU_SET_DUMPMODE,
			data->bMcuDumpMode);
	if (iRet < 0)
		ssp_errf("MSG2SSP_AP_MCU_SET_DUMPMODE failed");
#endif
#endif
}
Exemple #9
0
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
		spread, rq0_min_vruntime, spread0;
	struct rq *rq = cpu_rq(cpu);
	struct sched_entity *last;
	unsigned long flags;

#ifdef CONFIG_FAIR_GROUP_SCHED
	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
#else
	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
#endif
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
			SPLIT_NS(cfs_rq->exec_clock));

	raw_spin_lock_irqsave(&rq->lock, flags);
	if (cfs_rq->rb_leftmost)
		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
	last = __pick_last_entity(cfs_rq);
	if (last)
		max_vruntime = last->vruntime;
	min_vruntime = cfs_rq->min_vruntime;
	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
	raw_spin_unlock_irqrestore(&rq->lock, flags);
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
			SPLIT_NS(MIN_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
			SPLIT_NS(min_vruntime));
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
			SPLIT_NS(max_vruntime));
	spread = max_vruntime - MIN_vruntime;
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
			SPLIT_NS(spread));
	spread0 = min_vruntime - rq0_min_vruntime;
	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
			SPLIT_NS(spread0));
	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
			cfs_rq->nr_spread_over);
	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
	SEQ_printf(m, "  .%-30s: %lld\n", "runnable_load_avg",
			cfs_rq->runnable_load_avg);
	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
			cfs_rq->blocked_load_avg);
	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
			cfs_rq->tg_load_contrib);
	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
			cfs_rq->tg_runnable_contrib);
	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
			atomic_read(&cfs_rq->tg->runnable_avg));
#endif

	print_cfs_group_stats(m, cpu, cfs_rq->tg);
#endif
}
Exemple #10
0
static int merge_dup(struct tracing_map_sort_entry **sort_entries,
		     unsigned int target, unsigned int dup)
{
	struct tracing_map_elt *target_elt, *elt;
	bool first_dup = (target - dup) == 1;
	int i;

	if (first_dup) {
		elt = sort_entries[target]->elt;
		target_elt = copy_elt(elt);
		if (!target_elt)
			return -ENOMEM;
		sort_entries[target]->elt = target_elt;
		sort_entries[target]->elt_copied = true;
	} else
		target_elt = sort_entries[target]->elt;

	elt = sort_entries[dup]->elt;

	for (i = 0; i < elt->map->n_fields; i++)
		atomic64_add(atomic64_read(&elt->fields[i].sum),
			     &target_elt->fields[i].sum);

	sort_entries[dup]->dup = true;

	return 0;
}
Exemple #11
0
static int pci_perf_show(struct seq_file *m, void *v)
{
	struct zpci_dev *zdev = m->private;
	u64 *stat;
	int i;

	if (!zdev)
		return 0;
	if (!zdev->fmb)
		return seq_printf(m, "FMB statistics disabled\n");

	/* header */
	seq_printf(m, "FMB @ %p\n", zdev->fmb);
	seq_printf(m, "Update interval: %u ms\n", zdev->fmb_update);
	seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
	seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);

	/* hardware counters */
	stat = (u64 *) &zdev->fmb->ld_ops;
	for (i = 0; i < 4; i++)
		seq_printf(m, "%26s:\t%llu\n",
			   pci_perf_names[i], *(stat + i));
	if (zdev->fmb->dma_valid)
		for (i = 4; i < 6; i++)
			seq_printf(m, "%26s:\t%llu\n",
				   pci_perf_names[i], *(stat + i));
	/* software counters */
	for (i = 6; i < ARRAY_SIZE(pci_perf_names); i++)
		seq_printf(m, "%26s:\t%llu\n",
			   pci_perf_names[i],
			   atomic64_read((atomic64_t *) (stat + i)));

	return 0;
}
Exemple #12
0
/**
 * radeon_fence_driver_start_ring - make the fence driver
 * ready for use on the requested ring.
 *
 * @rdev: radeon device pointer
 * @ring: ring index to start the fence driver on
 *
 * Make the fence driver ready for processing (all asics).
 * Not all asics have all rings, so each asic will only
 * start the fence driver on the rings it has.
 * Returns 0 for success, errors for failure.
 */
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
	uint64_t index;
	int r;

	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			return r;
		}
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
	}
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
	rdev->fence_drv[ring].initialized = true;
	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	return 0;
}
static int deregister_memory(void)
{
	int	result = 0;
	int	i;
	pr_debug("%s\n", __func__);

	mutex_lock(&acdb_data.acdb_mutex);
	kfree(acdb_data.hw_delay_tx.delay_info);
	kfree(acdb_data.hw_delay_rx.delay_info);

	if (atomic64_read(&acdb_data.mem_len)) {
		/* unmap all cal data */
		result = unmap_cal_tables();
		if (result < 0)
			pr_err("%s: unmap_cal_tables failed, err = %d\n",
				__func__, result);

		atomic64_set(&acdb_data.mem_len, 0);

		for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
			kfree(acdb_data.col_data[i]);
			acdb_data.col_data[i] = NULL;
		}
		msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		acdb_data.ion_client = NULL;
		acdb_data.ion_handle = NULL;
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	mutex_unlock(&acdb_data.acdb_mutex);
	return 0;
}
Exemple #14
0
/**
 * radeon_fence_process - process a fence
 *
 * @rdev: radeon_device pointer
 * @ring: ring index the fence is associated with
 *
 * Checks the current fence value and wakes the fence queue
 * if the sequence number has increased (all asics).
 */
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
	uint64_t seq, last_seq, last_emitted;
	unsigned count_loop = 0;
	bool wake = false;

	/* Note there is a scenario here for an infinite loop but it's
	 * very unlikely to happen. For it to happen, the current polling
	 * process need to be interrupted by another process and another
	 * process needs to update the last_seq btw the atomic read and
	 * xchg of the current process.
	 *
	 * More over for this to go in infinite loop there need to be
	 * continuously new fence signaled ie radeon_fence_read needs
	 * to return a different value each time for both the currently
	 * polling process and the other process that xchg the last_seq
	 * btw atomic read and xchg of the current process. And the
	 * value the other process set as last seq must be higher than
	 * the seq value we just read. Which means that current process
	 * need to be interrupted after radeon_fence_read and before
	 * atomic xchg.
	 *
	 * To be even more safe we count the number of time we loop and
	 * we bail after 10 loop just accepting the fact that we might
	 * have temporarly set the last_seq not to the true real last
	 * seq but to an older one.
	 */
	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
	do {
		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
		seq = radeon_fence_read(rdev, ring);
		seq |= last_seq & 0xffffffff00000000LL;
		if (seq < last_seq) {
			seq &= 0xffffffff;
			seq |= last_emitted & 0xffffffff00000000LL;
		}

		if (seq <= last_seq || seq > last_emitted) {
			break;
		}
		/* If we loop over we don't want to return without
		 * checking if a fence is signaled as it means that the
		 * seq we just read is different from the previous on.
		 */
		wake = true;
		last_seq = seq;
		if ((count_loop++) > 10) {
			/* We looped over too many time leave with the
			 * fact that we might have set an older fence
			 * seq then the current real last seq as signaled
			 * by the hw.
			 */
			break;
		}
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);

	if (wake)
		wake_up_all(&rdev->fence_queue);
}
static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
{
	int result = 0;
	int size = vma->vm_end - vma->vm_start;

	pr_debug("%s\n", __func__);

	if (atomic64_read(&acdb_data.mem_len)) {
		if (size <= atomic64_read(&acdb_data.mem_len)) {
			vma->vm_page_prot = pgprot_noncached(
						vma->vm_page_prot);
			result = remap_pfn_range(vma,
				vma->vm_start,
				atomic64_read(&acdb_data.paddr) >> PAGE_SHIFT,
				size,
				vma->vm_page_prot);
		} else {
static unsigned long get_purr(void)
{
	atomic64_t purr = ATOMIC64_INIT(0);

	on_each_cpu(cpu_get_purr, &purr, 1);

	return atomic64_read(&purr);
}
unsigned int
seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
{
	struct nf_conn_acct *acct;
	struct nf_conn_counter *counter;

	acct = nf_conn_acct_find(ct);
	if (!acct)
		return 0;

	counter = acct->counter;
	seq_printf(s, "packets=%llu bytes=%llu ",
		   (unsigned long long)atomic64_read(&counter[dir].packets),
		   (unsigned long long)atomic64_read(&counter[dir].bytes));

	return 0;
};
/*
 * CPER record ID need to be unique even after reboot, because record
 * ID is used as index for ERST storage, while CPER records from
 * multiple boot may co-exist in ERST.
 */
u64 cper_next_record_id(void)
{
	static atomic64_t seq;

	if (!atomic64_read(&seq))
		atomic64_set(&seq, ((u64)get_seconds()) << 32);

	return atomic64_inc_return(&seq);
}
Exemple #19
0
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
			      struct btrfs_ioctl_dev_replace_args *args)
{
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

	down_read(&dev_replace->rwsem);
	/* even if !dev_replace_is_valid, the values are good enough for
	 * the replace_status ioctl */
	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
	args->status.replace_state = dev_replace->replace_state;
	args->status.time_started = dev_replace->time_started;
	args->status.time_stopped = dev_replace->time_stopped;
	args->status.num_write_errors =
		atomic64_read(&dev_replace->num_write_errors);
	args->status.num_uncorrectable_read_errors =
		atomic64_read(&dev_replace->num_uncorrectable_read_errors);
	args->status.progress_1000 = btrfs_dev_replace_progress(fs_info);
	up_read(&dev_replace->rwsem);
}
Exemple #20
0
static void pci_sw_counter_show(struct seq_file *m)
{
	struct zpci_dev *zdev = m->private;
	atomic64_t *counter = &zdev->allocated_pages;
	int i;

	for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
		seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
			   atomic64_read(counter));
}
Exemple #21
0
/*
 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 * to avoid race conditions with concurrent updates to cputime.
 */
static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
{
	u64 curr_cputime;
retry:
	curr_cputime = atomic64_read(cputime);
	if (sum_cputime > curr_cputime) {
		if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
			goto retry;
	}
}
Exemple #22
0
static ssize_t nx842_timehist_show(struct device *dev,
		struct device_attribute *attr, char *buf) {
	char *p = buf;
	struct nx842_devdata *local_devdata;
	atomic64_t *times;
	int bytes_remain = PAGE_SIZE;
	int bytes;
	int i;

	rcu_read_lock();
	local_devdata = rcu_dereference(devdata);
	if (!local_devdata) {
		rcu_read_unlock();
		return 0;
	}

	if (attr == &dev_attr_comp_times)
		times = local_devdata->counters->comp_times;
	else if (attr == &dev_attr_decomp_times)
		times = local_devdata->counters->decomp_times;
	else {
		rcu_read_unlock();
		return 0;
	}

	for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
		bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
			       i ? (2<<(i-1)) : 0, (2<<i)-1,
			       atomic64_read(&times[i]));
		bytes_remain -= bytes;
		p += bytes;
	}
	/* The last bucket holds everything over
	 * 2<<(NX842_HIST_SLOTS - 2) us */
	bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
			2<<(NX842_HIST_SLOTS - 2),
			atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
	p += bytes;

	rcu_read_unlock();
	return p - buf;
}
static ssize_t adc_data_read(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	bool bSuccess = false;
	u8 chTempbuf[4] = { 0 };
	s16 iSensorBuf[3] = {0, };
	int retries = 10;
	struct ssp_data *data = dev_get_drvdata(dev);
	s32 dMsDelay = 20;
	memcpy(&chTempbuf[0], &dMsDelay, 4);

	data->buf[GEOMAGNETIC_SENSOR].x = 0;
	data->buf[GEOMAGNETIC_SENSOR].y = 0;
	data->buf[GEOMAGNETIC_SENSOR].z = 0;

	if (!(atomic64_read(&data->aSensorEnable) & (1 << GEOMAGNETIC_SENSOR)))
		send_instruction(data, ADD_SENSOR, GEOMAGNETIC_SENSOR,
			chTempbuf, 4);

	do {
		msleep(60);
		if (check_data_spec(data, GEOMAGNETIC_SENSOR) == SUCCESS)
			break;
	} while (--retries);

	if (retries > 0)
		bSuccess = true;

	iSensorBuf[0] = data->buf[GEOMAGNETIC_SENSOR].x;
	iSensorBuf[1] = data->buf[GEOMAGNETIC_SENSOR].y;
	iSensorBuf[2] = data->buf[GEOMAGNETIC_SENSOR].z;

	if (!(atomic64_read(&data->aSensorEnable) & (1 << GEOMAGNETIC_SENSOR)))
		send_instruction(data, REMOVE_SENSOR, GEOMAGNETIC_SENSOR,
			chTempbuf, 4);

	pr_info("[SSP] %s - x = %d, y = %d, z = %d\n", __func__,
		iSensorBuf[0], iSensorBuf[1], iSensorBuf[2]);

	return sprintf(buf, "%s,%d,%d,%d\n", (bSuccess ? "OK" : "NG"),
		iSensorBuf[0], iSensorBuf[1], iSensorBuf[2]);
}
Exemple #24
0
static int
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;

	seq_printf(m, "fence %llu %lld\n",
		   (u64)atomic64_read(&vgdev->fence_drv.last_seq),
		   vgdev->fence_drv.sync_seq);
	return 0;
}
void store_anc_cal(struct cal_block *cal_block)
{
	pr_debug("%s,\n", __func__);

	if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
		pr_err("%s: offset %d is > mem_len %ld\n",
			__func__, cal_block->cal_offset,
			(long)atomic64_read(&acdb_data.mem_len));
		goto done;
	}

	atomic_set(&acdb_data.anc_cal.cal_kvaddr,
		cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
	atomic_set(&acdb_data.anc_cal.cal_paddr,
		cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
	atomic_set(&acdb_data.anc_cal.cal_size,
		cal_block->cal_size);
done:
	return;
}
/* Obtain the CPU-measurement alerts for the counter facility */
unsigned long kernel_cpumcf_alert(int clear)
{
	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
	unsigned long alert;

	alert = atomic64_read(&cpuhw->alert);
	if (clear)
		atomic64_set(&cpuhw->alert, 0);

	return alert;
}
static int deregister_memory(void)
{
	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
		atomic64_set(&acdb_data.mem_len, 0);
	}
	return 0;
}
Exemple #28
0
/**
 * radeon_fence_wait_next - wait for the next fence to signal
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Wait for the next fence on the requested ring to signal (all asics).
 * Returns 0 if the next fence has passed, error for all other cases.
 * Caller must hold ring lock.
 */
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
	uint64_t seq[RADEON_NUM_RINGS] = {};

	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
		/* nothing to wait for, last_seq is
		   already the last emited fence */
		return -ENOENT;
	}
	return radeon_fence_wait_seq(rdev, seq, false);
}
Exemple #29
0
/**
 * radeon_fence_wait_next_locked - wait for the next fence to signal
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Wait for the next fence on the requested ring to signal (all asics).
 * Returns 0 if the next fence has passed, error for all other cases.
 * Caller must hold ring lock.
 */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
	uint64_t seq;

	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
		/* nothing to wait for, last_seq is
		   already the last emited fence */
		return -ENOENT;
	}
	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
Exemple #30
0
/*
 * snic_reset_stats_write - Write to reset_stats debugfs file
 * @filp: The file pointer to write from
 * @ubuf: The buffer to copy the data from.
 * @cnt: The number of bytes to write.
 * @ppos: The position in the file to start writing to.
 *
 * Description:
 * This routine writes data from user buffer @ubuf to buffer @buf and
 * resets cumulative stats of snic.
 *
 * Returns:
 * This function returns the amount of data that was written.
 */
static ssize_t
snic_reset_stats_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
{
	struct snic *snic = (struct snic *) filp->private_data;
	struct snic_stats *stats = &snic->s_stats;
	u64 *io_stats_p = (u64 *) &stats->io;
	u64 *fw_stats_p = (u64 *) &stats->fw;
	char buf[64];
	unsigned long val;
	int ret;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = '\0';

	ret = kstrtoul(buf, 10, &val);
	if (ret < 0)
		return ret;

	snic->reset_stats = val;

	if (snic->reset_stats) {
		/* Skip variable is used to avoid descrepancies to Num IOs
		 * and IO Completions stats. Skip incrementing No IO Compls
		 * for pending active IOs after reset_stats
		 */
		atomic64_set(&snic->io_cmpl_skip,
			     atomic64_read(&stats->io.active));
		memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
		memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
		memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
		memset(io_stats_p+1,
			0,
			sizeof(struct snic_io_stats) - sizeof(u64));
		memset(fw_stats_p+1,
			0,
			sizeof(struct snic_fw_stats) - sizeof(u64));
	}

	(*ppos)++;

	SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");

	return cnt;
}