Esempio n. 1
0
static int register_pmem(void)
{
	int result;
	unsigned long paddr;
	unsigned long kvaddr;
	unsigned long pmem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	result = get_pmem_file(atomic_read(&acdb_data.pmem_fd),
				&paddr, &kvaddr, &pmem_len,
				&acdb_data.file);
	mutex_unlock(&acdb_data.acdb_mutex);
	if (result != 0) {
		atomic_set(&acdb_data.pmem_fd, 0);
		atomic64_set(&acdb_data.pmem_len, 0);
		pr_err("%s: Could not register PMEM!!!\n", __func__);
		goto done;
	}

	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.pmem_len, pmem_len);
	pr_debug("AUDIO_REGISTER_PMEM done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.pmem_len));

done:
	return result;
}
static int register_memory(void)
{
	int			result;
	unsigned long		paddr;
	unsigned long		kvaddr;
	unsigned long		mem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	acdb_data.ion_client =
		msm_ion_client_create(UINT_MAX, "audio_acdb_client");
	if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
		pr_err("%s: Could not register ION client!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err;
	}

	acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
		atomic_read(&acdb_data.map_handle));
	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
		pr_err("%s: Could not import map handle!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err_ion_client;
	}

	result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
				&paddr, (size_t *)&mem_len);
	if (result != 0) {
		pr_err("%s: Could not get phys addr!!!\n", __func__);
		goto err_ion_handle;
	}

	kvaddr = (unsigned long)ion_map_kernel(acdb_data.ion_client,
		acdb_data.ion_handle, 0);
	if (IS_ERR_OR_NULL(&kvaddr)) {
		pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
		result = -EINVAL;
		goto err_ion_handle;
	}
	mutex_unlock(&acdb_data.acdb_mutex);

	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	pr_debug("%s done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
	ion_client_destroy(acdb_data.ion_client);
err:
	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
Esempio n. 3
0
File: iova.c Progetto: mkrufky/linux
int init_iova_flush_queue(struct iova_domain *iovad,
			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
	int cpu;

	atomic64_set(&iovad->fq_flush_start_cnt,  0);
	atomic64_set(&iovad->fq_flush_finish_cnt, 0);

	iovad->fq = alloc_percpu(struct iova_fq);
	if (!iovad->fq)
		return -ENOMEM;

	iovad->flush_cb   = flush_cb;
	iovad->entry_dtor = entry_dtor;

	for_each_possible_cpu(cpu) {
		struct iova_fq *fq;

		fq = per_cpu_ptr(iovad->fq, cpu);
		fq->head = 0;
		fq->tail = 0;

		spin_lock_init(&fq->lock);
	}

	setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
	atomic_set(&iovad->fq_timer_on, 0);

	return 0;
}
Esempio n. 4
0
static int __init ddump_mt_init(void)
{
	//int	ret;
	//int	index;

	ddump_nr_cpus = num_online_cpus();
	printk("James: cpu # is %d\n", ddump_nr_cpus);
	ddump_current_cpu = smp_processor_id();
	printk("James: current_cpu # is %d\n", ddump_current_cpu);
	printk("James: end_pfn is %lu, mem size is %lu\n", end_pfn, end_pfn * 4096);

	// init
	atomic64_set(&app_zero_pages, 0);
	atomic64_set(&app_non_zero_pages, 0);
	atomic64_set(&app_other_pages, 0);

	// create helper
	ddump_percpu_thread_create();

	// set up the target
	if (target_pid == -1) {
		printk("No pid addigned\n");
		return -1;
	}

	ddump_tp = find_task_by_pid_type(PIDTYPE_PID, target_pid);
	printk("James: panic_thread is %p, pid %d\n", ddump_tp, ddump_tp->pid);

	run_master_thread();

	return 0;
}
Esempio n. 5
0
int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
			    struct nilfs_root **rootp)
{
	struct the_nilfs *nilfs = sb->s_fs_info;
	struct nilfs_root *root;
	struct nilfs_checkpoint *raw_cp;
	struct buffer_head *bh_cp;
	int err = -ENOMEM;

	root = nilfs_find_or_create_root(
		nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno);
	if (!root)
		return err;

	if (root->ifile)
		goto reuse; /* already attached checkpoint */

	down_read(&nilfs->ns_segctor_sem);
	err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
					  &bh_cp);
	up_read(&nilfs->ns_segctor_sem);
	if (unlikely(err)) {
		if (err == -ENOENT || err == -EINVAL) {
			printk(KERN_ERR
			       "NILFS: Invalid checkpoint "
			       "(checkpoint number=%llu)\n",
			       (unsigned long long)cno);
			err = -EINVAL;
		}
		goto failed;
	}

	err = nilfs_ifile_read(sb, root, nilfs->ns_inode_size,
			       &raw_cp->cp_ifile_inode, &root->ifile);
	if (err)
		goto failed_bh;

	atomic64_set(&root->inodes_count,
			le64_to_cpu(raw_cp->cp_inodes_count));
	atomic64_set(&root->blocks_count,
			le64_to_cpu(raw_cp->cp_blocks_count));

	nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);

 reuse:
	*rootp = root;
	return 0;

 failed_bh:
	nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
 failed:
	nilfs_put_root(root);

	return err;
}
Esempio n. 6
0
/**
 * tracing_map_clear - Clear a tracing_map
 * @map: The tracing_map to clear
 *
 * Resets the tracing map to a cleared or initial state.  The
 * tracing_map_elts are all cleared, and the array of struct
 * tracing_map_entry is reset to an initialized state.
 *
 * Callers should make sure there are no writers actively inserting
 * into the map before calling this.
 */
void tracing_map_clear(struct tracing_map *map)
{
	unsigned int i;

	atomic_set(&map->next_elt, -1);
	atomic64_set(&map->hits, 0);
	atomic64_set(&map->drops, 0);

	tracing_map_array_clear(map->map);

	for (i = 0; i < map->max_elts; i++)
		tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
}
static int register_memory(void)
{
	int			result;
	int			i;
	ion_phys_addr_t		paddr;
	void                    *kvptr;
	unsigned long		kvaddr;
	unsigned long		mem_len;
	pr_debug("%s\n", __func__);

	mutex_lock(&acdb_data.acdb_mutex);
	allocate_hw_delay_entries();
	for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
		acdb_data.col_data[i] = kmalloc(MAX_COL_SIZE, GFP_KERNEL);
		atomic_set(&acdb_data.vocproc_col_cal[i].cal_kvaddr,
			(uint32_t)acdb_data.col_data[i]);
	}

	result = msm_audio_ion_import("audio_acdb_client",
				&acdb_data.ion_client,
				&acdb_data.ion_handle,
				atomic_read(&acdb_data.map_handle),
				NULL, 0,
				&paddr, (size_t *)&mem_len, &kvptr);
	if (result) {
		pr_err("%s: audio ION alloc failed, rc = %d\n",
			__func__, result);
		result = PTR_ERR(acdb_data.ion_client);
		goto err_ion_handle;
	}
	kvaddr = (unsigned long)kvptr;
	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	mutex_unlock(&acdb_data.acdb_mutex);

	pr_debug("%s done! paddr = 0x%lx, kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);

	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
Esempio n. 8
0
static int nft_quota_do_init(const struct nlattr * const tb[],
			     struct nft_quota *priv)
{
	unsigned long flags = 0;
	u64 quota, consumed = 0;

	if (!tb[NFTA_QUOTA_BYTES])
		return -EINVAL;

	quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES]));
	if (quota > S64_MAX)
		return -EOVERFLOW;

	if (tb[NFTA_QUOTA_CONSUMED]) {
		consumed = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_CONSUMED]));
		if (consumed > quota)
			return -EINVAL;
	}

	if (tb[NFTA_QUOTA_FLAGS]) {
		flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS]));
		if (flags & ~NFT_QUOTA_F_INV)
			return -EINVAL;
		if (flags & NFT_QUOTA_F_DEPLETED)
			return -EOPNOTSUPP;
	}

	priv->quota = quota;
	priv->flags = flags;
	atomic64_set(&priv->consumed, consumed);

	return 0;
}
static int deregister_memory(void)
{
	int	result = 0;
	int	i;
	pr_debug("%s\n", __func__);

	mutex_lock(&acdb_data.acdb_mutex);
	kfree(acdb_data.hw_delay_tx.delay_info);
	kfree(acdb_data.hw_delay_rx.delay_info);

	if (atomic64_read(&acdb_data.mem_len)) {
		/* unmap all cal data */
		result = unmap_cal_tables();
		if (result < 0)
			pr_err("%s: unmap_cal_tables failed, err = %d\n",
				__func__, result);

		atomic64_set(&acdb_data.mem_len, 0);

		for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
			kfree(acdb_data.col_data[i]);
			acdb_data.col_data[i] = NULL;
		}
		msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		acdb_data.ion_client = NULL;
		acdb_data.ion_handle = NULL;
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	mutex_unlock(&acdb_data.acdb_mutex);
	return 0;
}
Esempio n. 10
0
static int nft_quota_init(const struct nft_ctx *ctx,
                          const struct nft_expr *expr,
                          const struct nlattr * const tb[])
{
    struct nft_quota *priv = nft_expr_priv(expr);
    u32 flags = 0;
    u64 quota;

    if (!tb[NFTA_QUOTA_BYTES])
        return -EINVAL;

    quota = be64_to_cpu(nla_get_be64(tb[NFTA_QUOTA_BYTES]));
    if (quota > S64_MAX)
        return -EOVERFLOW;

    if (tb[NFTA_QUOTA_FLAGS]) {
        flags = ntohl(nla_get_be32(tb[NFTA_QUOTA_FLAGS]));
        if (flags & ~NFT_QUOTA_F_INV)
            return -EINVAL;
    }

    priv->quota = quota;
    priv->invert = (flags & NFT_QUOTA_F_INV) ? true : false;
    atomic64_set(&priv->remain, quota);

    return 0;
}
Esempio n. 11
0
static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
				u32 nr_queues)
{
	u32 i;
	size_t size;
	int ret;
	struct pending_queue *queue = NULL;

	pqinfo->nr_queues = nr_queues;
	pqinfo->qlen = qlen;

	size = (qlen * sizeof(struct pending_entry));

	for_each_pending_queue(pqinfo, queue, i) {
		queue->head = kzalloc((size), GFP_KERNEL);
		if (!queue->head) {
			ret = -ENOMEM;
			goto pending_qfail;
		}

		queue->front = 0;
		queue->rear = 0;
		atomic64_set((&queue->pending_count), (0));

		/* init queue spin lock */
		spin_lock_init(&queue->lock);
	}
Esempio n. 12
0
/*
 * CPER record ID need to be unique even after reboot, because record
 * ID is used as index for ERST storage, while CPER records from
 * multiple boot may co-exist in ERST.
 */
u64 cper_next_record_id(void)
{
	static atomic64_t seq;

	if (!atomic64_read(&seq))
		atomic64_set(&seq, ((u64)get_seconds()) << 32);

	return atomic64_inc_return(&seq);
}
Esempio n. 13
0
static void symmetric_key_init(struct noise_symmetric_key *key)
{
	spin_lock_init(&key->counter.receive.lock);
	atomic64_set(&key->counter.counter, 0);
	memset(key->counter.receive.backtrack, 0,
	       sizeof(key->counter.receive.backtrack));
	key->birthdate = ktime_get_boot_fast_ns();
	key->is_valid = true;
}
/* Obtain the CPU-measurement alerts for the counter facility */
unsigned long kernel_cpumcf_alert(int clear)
{
	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
	unsigned long alert;

	alert = atomic64_read(&cpuhw->alert);
	if (clear)
		atomic64_set(&cpuhw->alert, 0);

	return alert;
}
Esempio n. 15
0
unsigned long long notrace sched_clock(void)
{
	u32 cyc = read_sched_clock();
#if defined(CONFIG_QC_ABNORMAL_DEBUG_CODE)
	u64 local = cyc_to_sched_clock(cyc, sched_clock_mask);
	atomic64_set(&last_ns, local);
	return local;
#else
	return cyc_to_sched_clock(cyc, sched_clock_mask);
#endif
}
Esempio n. 16
0
static void tracing_map_elt_clear(struct tracing_map_elt *elt)
{
	unsigned i;

	for (i = 0; i < elt->map->n_fields; i++)
		if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
			atomic64_set(&elt->fields[i].sum, 0);

	if (elt->map->ops && elt->map->ops->elt_clear)
		elt->map->ops->elt_clear(elt);
}
Esempio n. 17
0
int init_some_parameters(void)
{	
	int cpu;

	atomic64_set(&save_num, 0L);
	atomic64_set(&sum_num, 0L);
	atomic64_set(&skb_num, 0L);
	atomic64_set(&rdl, 0L);
	atomic64_set(&rdf, 0L);
	
	skb_wq = create_workqueue("kread_queue");
	if (!skb_wq)
		return -1;
	
	for_each_online_cpu(cpu) {
		INIT_LIST_HEAD(&per_cpu(skb_list, cpu));
	}
	
	return 0;
}
static int deregister_memory(void)
{
	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
		atomic64_set(&acdb_data.mem_len, 0);
	}
	return 0;
}
Esempio n. 19
0
/**
 * radeon_fence_driver_init_ring - init the fence driver
 * for the requested ring.
 *
 * @rdev: radeon device pointer
 * @ring: ring index to start the fence driver on
 *
 * Init the fence driver for the requested ring (all asics).
 * Helper function for radeon_fence_driver_init().
 */
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	int i;

	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
		rdev->fence_drv[ring].sync_seq[i] = 0;
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
	rdev->fence_drv[ring].initialized = false;
}
Esempio n. 20
0
/*
 * snic_reset_stats_write - Write to reset_stats debugfs file
 * @filp: The file pointer to write from
 * @ubuf: The buffer to copy the data from.
 * @cnt: The number of bytes to write.
 * @ppos: The position in the file to start writing to.
 *
 * Description:
 * This routine writes data from user buffer @ubuf to buffer @buf and
 * resets cumulative stats of snic.
 *
 * Returns:
 * This function returns the amount of data that was written.
 */
static ssize_t
snic_reset_stats_write(struct file *filp,
		       const char __user *ubuf,
		       size_t cnt,
		       loff_t *ppos)
{
	struct snic *snic = (struct snic *) filp->private_data;
	struct snic_stats *stats = &snic->s_stats;
	u64 *io_stats_p = (u64 *) &stats->io;
	u64 *fw_stats_p = (u64 *) &stats->fw;
	char buf[64];
	unsigned long val;
	int ret;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = '\0';

	ret = kstrtoul(buf, 10, &val);
	if (ret < 0)
		return ret;

	snic->reset_stats = val;

	if (snic->reset_stats) {
		/* Skip variable is used to avoid descrepancies to Num IOs
		 * and IO Completions stats. Skip incrementing No IO Compls
		 * for pending active IOs after reset_stats
		 */
		atomic64_set(&snic->io_cmpl_skip,
			     atomic64_read(&stats->io.active));
		memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
		memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
		memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
		memset(io_stats_p+1,
			0,
			sizeof(struct snic_io_stats) - sizeof(u64));
		memset(fw_stats_p+1,
			0,
			sizeof(struct snic_fw_stats) - sizeof(u64));
	}

	(*ppos)++;

	SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");

	return cnt;
}
Esempio n. 21
0
/* Returns clock set to last monitor_clock value if monitor_interval time 
 * has not passed, otherwise returns clock set to "now" (expressed
 * in steps of monitor_interval) */
static inline psched_time_t clock_step (psched_time_t now) {
	/* the atomic64 lock seems redundant. TODO: remove it */
	psched_time_t clock = (u64) atomic64_read(&monitor_clock);

	if (likely(clock + monitor_interval > now))
		return clock;

	while (clock + monitor_interval < now)
		clock += monitor_interval;

	atomic64_set(&monitor_clock, clock);
	return clock;
}
Esempio n. 22
0
/*
 * CPER record ID need to be unique even after reboot, because record
 * ID is used as index for ERST storage, while CPER records from
 * multiple boot may co-exist in ERST.
 */
u64 cper_next_record_id(void)
{
	static atomic64_t seq;

	if (!atomic64_read(&seq)) {
		time64_t time = ktime_get_real_seconds();

		/*
		 * This code is unlikely to still be needed in year 2106,
		 * but just in case, let's use a few more bits for timestamps
		 * after y2038 to be sure they keep increasing monotonically
		 * for the next few hundred years...
		 */
		if (time < 0x80000000)
			atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
		else
			atomic64_set(&seq, 0x8000000000000000ull |
					   ktime_get_real_seconds() << 24);
	}

	return atomic64_inc_return(&seq);
}
Esempio n. 23
0
/*
    allocate memory for fragment mapping table, set LUN size.
 */
int _lun_init(sce_t * sce, lun_t * lun, sector_t nr_sctr)
{
	uint32_t nr_frag;
	int err = 0;

	ASSERT(sce);
	ASSERT(lun);
	ASSERT(!lun->fragmap);
	ASSERT(nr_sctr > 0);

	/* set sce handle */
	lun->scehndl = (sce_hndl_t) sce;

	spin_lock_init(&lun->lock);

	/* set LUN size */
	lun->nr_sctr = nr_sctr;
	lun->nr_frag = nr_frag =
			(uint32_t) ((nr_sctr + SCE_SCTRPERFRAG - 1) / SCE_SCTRPERFRAG);

	/* allocate memory for fragmap */
	lun->fragmap = vmalloc(lun->nr_frag * sizeof(fragdesc_t));
	if (!lun->fragmap)
	{
		err = ENOMEM;
		goto out;
	}
	/* Initializing to 01010101's */
	memset(lun->fragmap, 85, sizeof(fragdesc_t) * lun->nr_frag);

	atomic64_set(&lun->stats.alloc_sctrs, 0);
	atomic64_set(&lun->stats.valid_sctrs, 0);
	atomic64_set(&lun->stats.populations, 0);
	atomic64_set(&lun->stats.reads, 0);
	atomic64_set(&lun->stats.read_sctrs, 0);
	atomic64_set(&lun->stats.read_hits, 0);
	atomic64_set(&lun->stats.writes, 0);
	atomic64_set(&lun->stats.write_sctrs, 0);
	atomic64_set(&lun->stats.write_hits, 0);

out:
	return err;
}
Esempio n. 24
0
/*
 * fnic_reset_stats_write - Write to reset_stats debugfs file
 * @filp: The file pointer to write from.
 * @ubuf: The buffer to copy the data from.
 * @cnt: The number of bytes to write.
 * @ppos: The position in the file to start writing to.
 *
 * Description:
 * This routine writes data from user buffer @ubuf to buffer @buf and
 * resets cumulative stats of fnic.
 *
 * Returns:
 * This function returns the amount of data that was written.
 */
static ssize_t fnic_reset_stats_write(struct file *file,
					const char __user *ubuf,
					size_t cnt, loff_t *ppos)
{
	struct stats_debug_info *debug = file->private_data;
	struct fnic *fnic = (struct fnic *)debug->i_private;
	struct fnic_stats *stats = &fnic->fnic_stats;
	u64 *io_stats_p = (u64 *)&stats->io_stats;
	u64 *fw_stats_p = (u64 *)&stats->fw_stats;
	char buf[64];
	unsigned long val;
	int ret;

	if (cnt >= sizeof(buf))
		return -EINVAL;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;

	ret = kstrtoul(buf, 10, &val);
	if (ret < 0)
		return ret;

	fnic->reset_stats = val;

	if (fnic->reset_stats) {
		/* Skip variable is used to avoid descrepancies to Num IOs
		 * and IO Completions stats. Skip incrementing No IO Compls
		 * for pending active IOs after reset stats
		 */
		atomic64_set(&fnic->io_cmpl_skip,
			atomic64_read(&stats->io_stats.active_ios));
		memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
		memset(&stats->term_stats, 0,
			sizeof(struct terminate_stats));
		memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
		memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
		memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
		memset(io_stats_p+1, 0,
			sizeof(struct io_path_stats) - sizeof(u64));
		memset(fw_stats_p+1, 0,
			sizeof(struct fw_stats) - sizeof(u64));
		ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
	}

	(*ppos)++;
	return cnt;
}
Esempio n. 25
0
static int deregister_memory(void)
{
	int i;

	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		atomic64_set(&acdb_data.mem_len, 0);

		for (i = 0; i < MAX_VOCPROC_TYPES; i++) {
			kfree(acdb_data.col_data[i]);
			acdb_data.col_data[i] = NULL;
		}
		msm_audio_ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	return 0;
}
Esempio n. 26
0
void quadd_hrt_stop(void)
{
	struct quadd_ctx *ctx = hrt.quadd_ctx;

	pr_info("Stop hrt, number of samples: %llu\n",
		atomic64_read(&hrt.counter_samples));

	if (ctx->pl310)
		ctx->pl310->stop();

	quadd_ma_stop(&hrt);

	hrt.active = 0;

	atomic64_set(&hrt.counter_samples, 0);

	/* reset_cpu_ctx(); */
}
Esempio n. 27
0
rc_t bg_progress_make( bg_progress ** bgp, uint64_t max_value, uint32_t sleep_time, uint32_t digits )
{
    rc_t rc = 0;
    bg_progress * p = calloc( 1, sizeof *p );
    if ( p == NULL )
        rc = RC( rcVDB, rcNoTarg, rcConstructing, rcMemory, rcExhausted );
    else
    {
        atomic64_set( &p -> max_value, max_value );
        p -> sleep_time = sleep_time == 0 ? 200 : sleep_time;
        p -> digits = digits == 0 ? 2 : digits;
        rc = KThreadMake( & p -> thread, bg_progress_thread_func, p );
        if ( rc == 0 )
            *bgp = p;
        else
            free( p );
    }
    return rc;
}
Esempio n. 28
0
static int deregister_memory(void)
{
	mutex_lock(&acdb_data.acdb_mutex);
	kfree(acdb_data.hw_delay_tx.delay_info);
	kfree(acdb_data.hw_delay_rx.delay_info);
	mutex_unlock(&acdb_data.acdb_mutex);

	if (atomic64_read(&acdb_data.mem_len)) {
		mutex_lock(&acdb_data.acdb_mutex);
		atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
		atomic_set(&acdb_data.vocproc_total_cal_size, 0);
		atomic_set(&acdb_data.vocvol_total_cal_size, 0);
		atomic64_set(&acdb_data.mem_len, 0);
		ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle);
		ion_free(acdb_data.ion_client, acdb_data.ion_handle);
		ion_client_destroy(acdb_data.ion_client);
		mutex_unlock(&acdb_data.acdb_mutex);
	}
	return 0;
}
Esempio n. 29
0
struct quadd_hrt_ctx *quadd_hrt_init(struct quadd_ctx *ctx)
{
	int cpu_id;
	u64 period;
	long freq;
	struct quadd_cpu_context *cpu_ctx;

	hrt.quadd_ctx = ctx;
	hrt.active = 0;

	freq = ctx->param.freq;
	freq = max_t(long, QUADD_HRT_MIN_FREQ, freq);
	period = NSEC_PER_SEC / freq;
	hrt.sample_period = period;

	if (ctx->param.ma_freq > 0)
		hrt.ma_period = MSEC_PER_SEC / ctx->param.ma_freq;
	else
		hrt.ma_period = 0;

	atomic64_set(&hrt.counter_samples, 0);
	init_arch_timer();

	hrt.cpu_ctx = alloc_percpu(struct quadd_cpu_context);
	if (!hrt.cpu_ctx)
		return ERR_PTR(-ENOMEM);

	for (cpu_id = 0; cpu_id < nr_cpu_ids; cpu_id++) {
		cpu_ctx = per_cpu_ptr(hrt.cpu_ctx, cpu_id);

		atomic_set(&cpu_ctx->nr_active, 0);

		cpu_ctx->active_thread.pid = -1;
		cpu_ctx->active_thread.tgid = -1;

		init_hrtimer(cpu_ctx);
	}

	return &hrt;
}
Esempio n. 30
0
static void __ice_agent_initialize(struct ice_agent *ag) {
	struct call_media *media = ag->media;
	struct call *call = ag->call;

	ag->candidate_hash = g_hash_table_new(__cand_hash, __cand_equal);
	ag->pair_hash = g_hash_table_new(__pair_hash, __pair_equal);
	ag->transaction_hash = g_hash_table_new(__trans_hash, __trans_equal);
	ag->foundation_hash = g_hash_table_new(__found_hash, __found_equal);
	ag->agent_flags = 0;
	bf_copy(&ag->agent_flags, ICE_AGENT_CONTROLLING, &media->media_flags, MEDIA_FLAG_ICE_CONTROLLING);
	ag->local_interface = media->interface;
	ag->desired_family = media->desired_family;
	ag->nominated_pairs = g_tree_new(__pair_prio_cmp);
	ag->valid_pairs = g_tree_new(__pair_prio_cmp);
	ag->succeeded_pairs = g_tree_new(__pair_prio_cmp);
	ag->all_pairs = g_tree_new(__pair_prio_cmp);

	create_random_ice_string(call, &ag->ufrag[1], 8);
	create_random_ice_string(call, &ag->pwd[1], 26);

	atomic64_set(&ag->last_activity, poller_now);
}