Example #1
0
/*
 * Free all kernel contexts that are not currently in use.
 *   Returns 0 if all freed, else number of inuse context.
 */
static int gru_free_kernel_contexts(void)
{
	struct gru_blade_state *bs;
	struct gru_thread_state *kgts;
	int bid, ret = 0;

	for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
		bs = gru_base[bid];
		if (!bs)
			continue;

		/* Ignore busy contexts. Don't want to block here.  */
		if (down_write_trylock(&bs->bs_kgts_sema)) {
			kgts = bs->bs_kgts;
			if (kgts && kgts->ts_gru)
				gru_unload_context(kgts, 0);
			bs->bs_kgts = NULL;
			up_write(&bs->bs_kgts_sema);
			kfree(kgts);
		} else {
			ret++;
		}
	}
	return ret;
}
Example #2
0
static int
process_set(struct request_state* req)
{
	
	while (!down_write_trylock(&rwlock))
		continue;
	req->err = ub_cache_replace(req->key, req->len_key, req->data, req->len_data);
	up_write(&rwlock);

	/* TODO: this need not generate a new skb on every run, but for now it's simpler
	         to do it this way */
	req->skb_tx = ub_skb_set_up(32);
	if (req->err == 0)
		ub_push_data_to_skb(req->skb_tx, "STORED\r\n", strlen("STORED\r\n"));
	else if (req->err == -ENOMEM)
		ub_push_data_to_skb(req->skb_tx, "NOT_STORED\r\n", strlen("NOT_STORED\r\n"));
	else
	{
		char errstring[20];
		int len_errstring = snprintf(&errstring[0], 20, 
			"SERVER ERROR %d\r\n", req->err);
		ub_push_data_to_skb(req->skb_tx, errstring, len_errstring);
	}

	return 0;
}
Example #3
0
File: pm.c Project: avagin/linux
static int wil_suspend_radio_off(struct wil6210_priv *wil)
{
	int rc = 0;
	bool active_ifaces;

	wil_dbg_pm(wil, "suspend radio off\n");

	rc = down_write_trylock(&wil->mem_lock);
	if (!rc) {
		wil_err(wil,
			"device is busy. down_write_trylock failed, returned (0x%x)\n",
			rc);
		wil->suspend_stats.rejected_by_host++;
		return -EBUSY;
	}

	set_bit(wil_status_suspending, wil->status);
	up_write(&wil->mem_lock);

	/* if netif up, hardware is alive, shut it down */
	mutex_lock(&wil->vif_mutex);
	active_ifaces = wil_has_active_ifaces(wil, true, false);
	mutex_unlock(&wil->vif_mutex);

	if (active_ifaces) {
		rc = wil_down(wil);
		if (rc) {
			wil_err(wil, "wil_down : %d\n", rc);
			wil->suspend_stats.r_off.failed_suspends++;
			goto out;
		}
	}

	/* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
	wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
	wil_disable_irq(wil);

	if (wil->platform_ops.suspend) {
		rc = wil->platform_ops.suspend(wil->platform_handle, false);
		if (rc) {
			wil_enable_irq(wil);
			wil->suspend_stats.r_off.failed_suspends++;
			goto out;
		}
	}

	set_bit(wil_status_suspended, wil->status);

out:
	clear_bit(wil_status_suspending, wil->status);
	wil_dbg_pm(wil, "suspend radio off: %d\n", rc);

	return rc;
}
static ssize_t foo_store(struct kobject *kobj, struct kobj_attribute *attr,
	const char *buf, size_t count)
{
	/* down_write_trylock returns zero on failure */
	if (!down_write_trylock(&rw_sem))
		return -ERESTARTSYS;

	memcpy(foo_kbuff, buf, (strlen(buf)+1));
	up_write(&rw_sem);

	return count;
}
Example #5
0
static int osd_object_write_locked(const struct lu_env *env,
				   struct dt_object *dt)
{
	struct osd_object *obj = osd_dt_obj(dt);
	int rc = 1;

	LASSERT(osd_invariant(obj));

	if (down_write_trylock(&obj->oo_sem)) {
		rc = 0;
		up_write(&obj->oo_sem);
	}
	return rc;
}
Example #6
0
/*
 * Validate and return the priority_cached flag.  We know if it's zero
 * that we don't need to scan, since we immediately set it non-zero
 * when we first consider a MAP_CACHE_PRIORITY mapping.
 *
 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
 * since we're in an interrupt context (servicing switch_mm) we don't
 * worry about it and don't unset the "priority_cached" field.
 * Presumably we'll come back later and have more luck and clear
 * the value then; for now we'll just keep the cache marked for priority.
 */
static unsigned int update_priority_cached(struct mm_struct *mm)
{
	if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
		struct vm_area_struct *vm;
		for (vm = mm->mmap; vm; vm = vm->vm_next) {
			if (hv_pte_get_cached_priority(vm->vm_page_prot))
				break;
		}
		if (vm == NULL)
			mm->context.priority_cached = 0;
		up_write(&mm->mmap_sem);
	}
	return mm->context.priority_cached;
}
Example #7
0
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
			       size_t count)
{
	struct vm_area_struct *vma, *ret = NULL;

	/* mmap_sem must have been held by caller. */
	LASSERT(!down_write_trylock(&mm->mmap_sem));

	for (vma = find_vma(mm, addr);
	    vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
		if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
		    vma->vm_flags & VM_SHARED) {
			ret = vma;
			break;
		}
	}
	return ret;
}
Example #8
0
int __init down_write_trylock_init(void) 
{	 
	int ret;
	init_rwsem( &rwsem );   	      //读写信号量初始化
	printk("<0>after init_rwsem, count: %ld\n",rwsem.count);
	
	if( EXEC_DOWN_READ )
		down_read( &rwsem );          //读者获取读写信号量

	ret = down_write_trylock( &rwsem );   //写者尝试获取读写信号量
	if( ret )
	{	
		printk("<0>after down_write_trylock, count: 0x%0lx\n",rwsem.count);

		up_write( &rwsem );           //写者释放读写信号量	
		printk("<0>after up_write, count: %ld\n",rwsem.count);
	}
	else
		printk("<0>down_write_trylock failed!\n");

	return 0;
}
Example #9
0
/**
 * leb_write_lock - lock logical eraseblock for writing.
 * @ubi: UBI device description object
 * @vol_id: volume ID
 * @lnum: logical eraseblock number
 *
 * This function locks a logical eraseblock for writing if there is no
 * contention and does nothing if there is contention. Returns %0 in case of
 * success, %1 in case of contention, and and a negative error code in case of
 * failure.
 */
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
{
	struct ubi_ltree_entry *le;

	le = ltree_add_entry(ubi, vol_id, lnum);
	if (IS_ERR(le))
		return PTR_ERR(le);
	if (down_write_trylock(&le->mutex))
		return 0;

	/* Contention, cancel */
	spin_lock(&ubi->ltree_lock);
	le->users -= 1;
	ubi_assert(le->users >= 0);
	if (le->users == 0) {
		rb_erase(&le->rb, &ubi->ltree);
		kfree(le);
	}
	spin_unlock(&ubi->ltree_lock);

	return 1;
}
Example #10
0
static void ocfs2_clear_inode(struct inode *inode)
{
	int status;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);

	clear_inode(inode);
	trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
				inode->i_nlink);

	mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
			"Inode=%lu\n", inode->i_ino);

	dquot_drop(inode);

	/* To preven remote deletes we hold open lock before, now it
	 * is time to unlock PR and EX open locks. */
	ocfs2_open_unlock(inode);

	/* Do these before all the other work so that we don't bounce
	 * the downconvert thread while waiting to destroy the locks. */
	ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);

	ocfs2_resv_discard(&OCFS2_SB(inode->i_sb)->osb_la_resmap,
			   &oi->ip_la_data_resv);
	ocfs2_resv_init_once(&oi->ip_la_data_resv);

	/* We very well may get a clear_inode before all an inodes
	 * metadata has hit disk. Of course, we can't drop any cluster
	 * locks until the journal has finished with it. The only
	 * exception here are successfully wiped inodes - their
	 * metadata can now be considered to be part of the system
	 * inodes from which it came. */
	if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
		ocfs2_checkpoint_inode(inode);

	mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
			"Clear inode of %llu, inode has io markers\n",
			(unsigned long long)oi->ip_blkno);

	ocfs2_extent_map_trunc(inode, 0);

	status = ocfs2_drop_inode_locks(inode);
	if (status < 0)
		mlog_errno(status);

	ocfs2_lock_res_free(&oi->ip_rw_lockres);
	ocfs2_lock_res_free(&oi->ip_inode_lockres);
	ocfs2_lock_res_free(&oi->ip_open_lockres);

	ocfs2_metadata_cache_exit(INODE_CACHE(inode));

	mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached,
			"Clear inode of %llu, inode has %u cache items\n",
			(unsigned long long)oi->ip_blkno,
			INODE_CACHE(inode)->ci_num_cached);

	mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE),
			"Clear inode of %llu, inode has a bad flag\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
			"Clear inode of %llu, inode is locked\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
			"Clear inode of %llu, io_mutex is locked\n",
			(unsigned long long)oi->ip_blkno);
	mutex_unlock(&oi->ip_io_mutex);

	/*
	 * down_trylock() returns 0, down_write_trylock() returns 1
	 * kernel 1, world 0
	 */
	mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
			"Clear inode of %llu, alloc_sem is locked\n",
			(unsigned long long)oi->ip_blkno);
	up_write(&oi->ip_alloc_sem);

	mlog_bug_on_msg(oi->ip_open_count,
			"Clear inode of %llu has open count %d\n",
			(unsigned long long)oi->ip_blkno, oi->ip_open_count);

	/* Clear all other flags. */
	oi->ip_flags = 0;
	oi->ip_dir_start_lookup = 0;
	oi->ip_blkno = 0ULL;

	/*
	 * ip_jinode is used to track txns against this inode. We ensure that
	 * the journal is flushed before journal shutdown. Thus it is safe to
	 * have inodes get cleaned up after journal shutdown.
	 */
	jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
				       &oi->ip_jinode);
}
Example #11
0
void ocfs2_clear_inode(struct inode *inode)
{
	int status;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);

	mlog_entry_void();

	if (!inode)
		goto bail;

	mlog(0, "Clearing inode: %llu, nlink = %u\n",
	     (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink);

	mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
			"Inode=%lu\n", inode->i_ino);

	/* For remove delete_inode vote, we hold open lock before,
	 * now it is time to unlock PR and EX open locks. */
	ocfs2_open_unlock(inode);

	/* Do these before all the other work so that we don't bounce
	 * the vote thread while waiting to destroy the locks. */
	ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_meta_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_data_lockres);
	ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);

	/* We very well may get a clear_inode before all an inodes
	 * metadata has hit disk. Of course, we can't drop any cluster
	 * locks until the journal has finished with it. The only
	 * exception here are successfully wiped inodes - their
	 * metadata can now be considered to be part of the system
	 * inodes from which it came. */
	if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
		ocfs2_checkpoint_inode(inode);

	mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
			"Clear inode of %llu, inode has io markers\n",
			(unsigned long long)oi->ip_blkno);

	ocfs2_extent_map_trunc(inode, 0);

	status = ocfs2_drop_inode_locks(inode);
	if (status < 0)
		mlog_errno(status);

	ocfs2_lock_res_free(&oi->ip_rw_lockres);
	ocfs2_lock_res_free(&oi->ip_meta_lockres);
	ocfs2_lock_res_free(&oi->ip_data_lockres);
	ocfs2_lock_res_free(&oi->ip_open_lockres);

	ocfs2_metadata_cache_purge(inode);

	mlog_bug_on_msg(oi->ip_metadata_cache.ci_num_cached,
			"Clear inode of %llu, inode has %u cache items\n",
			(unsigned long long)oi->ip_blkno, oi->ip_metadata_cache.ci_num_cached);

	mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE),
			"Clear inode of %llu, inode has a bad flag\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
			"Clear inode of %llu, inode is locked\n",
			(unsigned long long)oi->ip_blkno);

	mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex),
			"Clear inode of %llu, io_mutex is locked\n",
			(unsigned long long)oi->ip_blkno);
	mutex_unlock(&oi->ip_io_mutex);

	/*
	 * down_trylock() returns 0, down_write_trylock() returns 1
	 * kernel 1, world 0
	 */
	mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
			"Clear inode of %llu, alloc_sem is locked\n",
			(unsigned long long)oi->ip_blkno);
	up_write(&oi->ip_alloc_sem);

	mlog_bug_on_msg(oi->ip_open_count,
			"Clear inode of %llu has open count %d\n",
			(unsigned long long)oi->ip_blkno, oi->ip_open_count);

	/* Clear all other flags. */
	oi->ip_flags = OCFS2_INODE_CACHE_INLINE;
	oi->ip_created_trans = 0;
	oi->ip_last_trans = 0;
	oi->ip_dir_start_lookup = 0;
	oi->ip_blkno = 0ULL;

bail:
	mlog_exit_void();
}
Example #12
0
File: pm.c Project: avagin/linux
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
	int rc = 0;
	unsigned long data_comp_to;

	wil_dbg_pm(wil, "suspend keep radio on\n");

	/* Prevent handling of new tx and wmi commands */
	rc = down_write_trylock(&wil->mem_lock);
	if (!rc) {
		wil_err(wil,
			"device is busy. down_write_trylock failed, returned (0x%x)\n",
			rc);
		wil->suspend_stats.rejected_by_host++;
		return -EBUSY;
	}

	set_bit(wil_status_suspending, wil->status);
	up_write(&wil->mem_lock);

	wil_pm_stop_all_net_queues(wil);

	if (!wil_is_tx_idle(wil)) {
		wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
		wil->suspend_stats.rejected_by_host++;
		goto reject_suspend;
	}

	if (!wil->txrx_ops.is_rx_idle(wil)) {
		wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
		wil->suspend_stats.rejected_by_host++;
		goto reject_suspend;
	}

	if (!wil_is_wmi_idle(wil)) {
		wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
		wil->suspend_stats.rejected_by_host++;
		goto reject_suspend;
	}

	/* Send WMI suspend request to the device */
	rc = wmi_suspend(wil);
	if (rc) {
		wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
			   rc);
		goto reject_suspend;
	}

	/* Wait for completion of the pending RX packets */
	data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
	if (test_bit(wil_status_napi_en, wil->status)) {
		while (!wil->txrx_ops.is_rx_idle(wil)) {
			if (time_after(jiffies, data_comp_to)) {
				if (wil->txrx_ops.is_rx_idle(wil))
					break;
				wil_err(wil,
					"TO waiting for idle RX, suspend failed\n");
				wil->suspend_stats.r_on.failed_suspends++;
				goto resume_after_fail;
			}
			wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
			napi_synchronize(&wil->napi_rx);
			msleep(20);
		}
	}

	/* In case of pending WMI events, reject the suspend
	 * and resume the device.
	 * This can happen if the device sent the WMI events before
	 * approving the suspend.
	 */
	if (!wil_is_wmi_idle(wil)) {
		wil_err(wil, "suspend failed due to pending WMI events\n");
		wil->suspend_stats.r_on.failed_suspends++;
		goto resume_after_fail;
	}

	wil_mask_irq(wil);

	/* Disable device reset on PERST */
	wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);

	if (wil->platform_ops.suspend) {
		rc = wil->platform_ops.suspend(wil->platform_handle, true);
		if (rc) {
			wil_err(wil, "platform device failed to suspend (%d)\n",
				rc);
			wil->suspend_stats.r_on.failed_suspends++;
			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
			wil_unmask_irq(wil);
			goto resume_after_fail;
		}
	}

	/* Save the current bus request to return to the same in resume */
	wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
	wil6210_bus_request(wil, 0);

	set_bit(wil_status_suspended, wil->status);
	clear_bit(wil_status_suspending, wil->status);

	return rc;

resume_after_fail:
	set_bit(wil_status_resuming, wil->status);
	clear_bit(wil_status_suspending, wil->status);
	rc = wmi_resume(wil);
	/* if resume succeeded, reject the suspend */
	if (!rc) {
		rc = -EBUSY;
		wil_pm_wake_connected_net_queues(wil);
	}
	return rc;

reject_suspend:
	clear_bit(wil_status_suspending, wil->status);
	wil_pm_wake_connected_net_queues(wil);
	return -EBUSY;
}
Example #13
0
static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	struct fimg2d_context *ctx;
	struct fimg2d_platdata *pdata;
	struct fimg2d_blit blit;
	struct fimg2d_version ver;
	struct fimg2d_image dst;

	ctx = file->private_data;
	if (!ctx) {
		printk(KERN_ERR "[%s] missing ctx\n", __func__);
		return -EFAULT;
	}

	switch (cmd) {
	case FIMG2D_BITBLT_BLIT:
		if (info->secure)
			return -EFAULT;
		if (copy_from_user(&blit, (void *)arg, sizeof(blit)))
			return -EFAULT;
		if (blit.dst)
			if (copy_from_user(&dst, (void *)blit.dst, sizeof(dst)))
				return -EFAULT;

#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
			dev_lock(info->bus_dev, info->dev, 160160);
#endif
#endif
		if ((blit.dst) && (dst.addr.type == ADDR_USER))
			if (!down_write_trylock(&page_alloc_slow_rwsem))
				ret = -EAGAIN;

		if (ret != -EAGAIN)
			ret = fimg2d_add_command(info, ctx, &blit, dst.addr.type);

		if (!ret) {
			fimg2d_request_bitblt(ctx);
		}

#ifdef PERF_PROFILE
		perf_print(ctx, blit.seq_no);
		perf_clear(ctx);
#endif
		if ((blit.dst) && (dst.addr.type == ADDR_USER) && ret != -EAGAIN)
			up_write(&page_alloc_slow_rwsem);

#ifdef CONFIG_BUSFREQ_OPP
#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
			dev_unlock(info->bus_dev, info->dev);
#endif
#endif
		break;

	case FIMG2D_BITBLT_SYNC:
		fimg2d_debug("FIMG2D_BITBLT_SYNC ctx: %p\n", ctx);
		/* FIXME: */
		break;

	case FIMG2D_BITBLT_VERSION:
		pdata = to_fimg2d_plat(info->dev);
		ver.hw = pdata->hw_ver;
		ver.sw = 0;
		fimg2d_debug("fimg2d version, hw: 0x%x sw: 0x%x\n",
				ver.hw, ver.sw);
		if (copy_to_user((void *)arg, &ver, sizeof(ver)))
			return -EFAULT;
		break;

	case FIMG2D_BITBLT_SECURE:
		if (copy_from_user(&info->secure,
				   (unsigned int *)arg,
				   sizeof(unsigned int))) {
			printk(KERN_ERR
				"[%s] failed to FIMG2D_BITBLT_SECURE: copy_from_user error\n\n",
				__func__);
			return -EFAULT;
		}

		while (1) {
			if (fimg2d_queue_is_empty(&info->cmd_q))
				break;
			mdelay(2);
		}

		break;

	default:
		printk(KERN_ERR "[%s] unknown ioctl\n", __func__);
		ret = -EFAULT;
		break;
	}

	return ret;
}
Example #14
0
/**
 * cppc_set_perf - Set a CPUs performance controls.
 * @cpu: CPU for which to set performance controls.
 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
 *
 * Return: 0 for success, -ERRNO otherwise.
 */
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
	struct cpc_register_resource *desired_reg;
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
	struct cppc_pcc_data *pcc_ss_data;
	int ret = 0;

	if (!cpc_desc || pcc_ss_id < 0) {
		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
		return -ENODEV;
	}

	pcc_ss_data = pcc_data[pcc_ss_id];
	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];

	/*
	 * This is Phase-I where we want to write to CPC registers
	 * -> We want all CPUs to be able to execute this phase in parallel
	 *
	 * Since read_lock can be acquired by multiple CPUs simultaneously we
	 * achieve that goal here
	 */
	if (CPC_IN_PCC(desired_reg)) {
		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
		if (pcc_ss_data->platform_owns_pcc) {
			ret = check_pcc_chan(pcc_ss_id, false);
			if (ret) {
				up_read(&pcc_ss_data->pcc_lock);
				return ret;
			}
		}
		/*
		 * Update the pending_write to make sure a PCC CMD_READ will not
		 * arrive and steal the channel during the switch to write lock
		 */
		pcc_ss_data->pending_pcc_write_cmd = true;
		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
		cpc_desc->write_cmd_status = 0;
	}

	/*
	 * Skip writing MIN/MAX until Linux knows how to come up with
	 * useful values.
	 */
	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);

	if (CPC_IN_PCC(desired_reg))
		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
	/*
	 * This is Phase-II where we transfer the ownership of PCC to Platform
	 *
	 * Short Summary: Basically if we think of a group of cppc_set_perf
	 * requests that happened in short overlapping interval. The last CPU to
	 * come out of Phase-I will enter Phase-II and ring the doorbell.
	 *
	 * We have the following requirements for Phase-II:
	 *     1. We want to execute Phase-II only when there are no CPUs
	 * currently executing in Phase-I
	 *     2. Once we start Phase-II we want to avoid all other CPUs from
	 * entering Phase-I.
	 *     3. We want only one CPU among all those who went through Phase-I
	 * to run phase-II
	 *
	 * If write_trylock fails to get the lock and doesn't transfer the
	 * PCC ownership to the platform, then one of the following will be TRUE
	 *     1. There is at-least one CPU in Phase-I which will later execute
	 * write_trylock, so the CPUs in Phase-I will be responsible for
	 * executing the Phase-II.
	 *     2. Some other CPU has beaten this CPU to successfully execute the
	 * write_trylock and has already acquired the write_lock. We know for a
	 * fact it(other CPU acquiring the write_lock) couldn't have happened
	 * before this CPU's Phase-I as we held the read_lock.
	 *     3. Some other CPU executing pcc CMD_READ has stolen the
	 * down_write, in which case, send_pcc_cmd will check for pending
	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
	 * So this CPU can be certain that its request will be delivered
	 *    So in all cases, this CPU knows that its request will be delivered
	 * by another CPU and can return
	 *
	 * After getting the down_write we still need to check for
	 * pending_pcc_write_cmd to take care of the following scenario
	 *    The thread running this code could be scheduled out between
	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
	 * could have delivered the request to Platform by triggering the
	 * doorbell and transferred the ownership of PCC to platform. So this
	 * avoids triggering an unnecessary doorbell and more importantly before
	 * triggering the doorbell it makes sure that the PCC channel ownership
	 * is still with OSPM.
	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
	 * case during a CMD_READ and if there are pending writes it delivers
	 * the write command before servicing the read command
	 */
	if (CPC_IN_PCC(desired_reg)) {
		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
			/* Update only if there are pending write commands */
			if (pcc_ss_data->pending_pcc_write_cmd)
				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
		} else
			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
			wait_event(pcc_ss_data->pcc_write_wait_q,
				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);

		/* send_pcc_cmd updates the status in case of failure */
		ret = cpc_desc->write_cmd_status;
	}
	return ret;
}