Esempio n. 1
0
/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
		    int ignore_disconnecting)
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	struct task_struct *kthread;
	void (*indicate_partition_disengaged) (struct xpc_partition *) =
		xpc_arch_ops.indicate_partition_disengaged;

	while (needed-- > 0) {

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
			xpc_arch_ops.indicate_partition_engaged(part);
		}
		(void)xpc_part_ref(part);
		xpc_msgqueue_ref(ch);

		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
			/* the fork failed */

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
			 * them is the xpDisconnecting callout that this
			 * failed kthread_run() would have made.
			 */

			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
				indicate_partition_disengaged(part);
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);

			if (atomic_read(&ch->kthreads_assigned) <
			    ch->kthreads_idle_limit) {
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
						       &irq_flags);
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}
u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom)
{
	return atomic_dec_return((atomic_t *)&atom->u.val);
}
static int if_usb_resume(struct usb_interface *intf)
{
	int i, ret;
	struct sk_buff *skb;
	struct usb_link_device *usb_ld = usb_get_intfdata(intf);
	struct if_usb_devdata *pipe;
	struct urb *urb;

	spin_lock_irq(&usb_ld->lock);
	if (!atomic_dec_return(&usb_ld->suspend_count)) {
		spin_unlock_irq(&usb_ld->lock);

		mif_debug("\n");
		wake_lock(&usb_ld->susplock);

		/* HACK: Runtime pm does not allow requesting autosuspend from
		 * resume callback, delayed it after resume */
		queue_delayed_work(system_nrt_wq, &usb_ld->runtime_pm_work,
							msecs_to_jiffies(50));

		for (i = 0; i < IF_USB_DEVNUM_MAX; i++) {
			pipe = &usb_ld->devdata[i];
			while ((urb = usb_get_from_anchor(&pipe->urbs))) {
				ret = usb_rx_submit(pipe, urb, GFP_KERNEL);
				if (ret < 0) {
					usb_put_urb(urb);
					mif_err(
					"usb_rx_submit error with (%d)\n",
						ret);
					return ret;
				}
				usb_put_urb(urb);
			}
		}

		while ((urb = usb_get_from_anchor(&usb_ld->deferred))) {
			mif_debug("got urb (0x%p) from anchor & resubmit\n",
					urb);
			ret = usb_submit_urb(urb, GFP_KERNEL);
			if (ret < 0) {
				mif_err("resubmit failed\n");
				skb = urb->context;
				dev_kfree_skb_any(skb);
				usb_free_urb(urb);
				ret = pm_runtime_put_autosuspend(
						&usb_ld->usbdev->dev);
				if (ret < 0 && ret != -EAGAIN)
					mif_debug("pm_runtime_put_autosuspend "
							"failed: %d\n", ret);
			}
		}
		SET_SLAVE_WAKEUP(usb_ld->pdata, 1);
		udelay(100);
		SET_SLAVE_WAKEUP(usb_ld->pdata, 0);

		/* if_usb_resume() is atomic. post_resume_work is
		 * a kind of bottom halves
		 */
		queue_delayed_work(system_nrt_wq, &usb_ld->post_resume_work, 0);

		return 0;
	}

	spin_unlock_irq(&usb_ld->lock);
	return 0;
}
Esempio n. 4
0
static int
xpc_kthread_start(void *args)
{
	short partid = XPC_UNPACK_ARG1(args);
	u16 ch_number = XPC_UNPACK_ARG2(args);
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;
	int n_needed;
	unsigned long irq_flags;
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;

	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
		partid, ch_number);

	ch = &part->channels[ch_number];

	if (!(ch->flags & XPC_C_DISCONNECTING)) {

		/* let registerer know that connection has been established */

		spin_lock_irqsave(&ch->lock, irq_flags);
		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
			ch->flags |= XPC_C_CONNECTEDCALLOUT;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

			xpc_connected_callout(ch);

			spin_lock_irqsave(&ch->lock, irq_flags);
			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
			spin_unlock_irqrestore(&ch->lock, irq_flags);

			/*
			 * It is possible that while the callout was being
			 * made that the remote partition sent some messages.
			 * If that is the case, we may need to activate
			 * additional kthreads to help deliver them. We only
			 * need one less than total #of messages to deliver.
			 */
			n_needed = n_of_deliverable_payloads(ch) - 1;
			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
				xpc_activate_kthreads(ch, n_needed);

		} else {
			spin_unlock_irqrestore(&ch->lock, irq_flags);
		}

		xpc_kthread_waitmsgs(part, ch);
	}

	/* let registerer know that connection is disconnecting */

	spin_lock_irqsave(&ch->lock, irq_flags);
	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

		xpc_disconnect_callout(ch, xpDisconnecting);

		spin_lock_irqsave(&ch->lock, irq_flags);
		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
	}
	spin_unlock_irqrestore(&ch->lock, irq_flags);

	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
	    atomic_dec_return(&part->nchannels_engaged) == 0) {
		xpc_arch_ops.indicate_partition_disengaged(part);
	}

	xpc_msgqueue_deref(ch);

	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
		partid, ch_number);

	xpc_part_deref(part);
	return 0;
}
Esempio n. 5
0
void mmc_rescan(struct work_struct *work)
{
	struct mmc_host *host =
		container_of(work, struct mmc_host, detect.work);
	u32 ocr;
	int err;
	unsigned long flags;
	int extend_wakelock = 0;

	spin_lock_irqsave(&host->lock, flags);

	if (host->rescan_disable) {
		spin_unlock_irqrestore(&host->lock, flags);
		if (atomic_dec_return(&wakelock_refs) > 0) {
			printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs));
		}else {
			printk(KERN_DEBUG "unlock case1 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs));
			wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500));
		}
		return;
	}

	spin_unlock_irqrestore(&host->lock, flags);

//[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion
//	mutex_lock(&host->carddetect_lock); 
//]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion	

	mmc_bus_get(host);

	/* if there is a card registered, check whether it is still present */
	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) {
		if(host->ops->get_cd && host->ops->get_cd(host) == 0) {
			if(host->bus_ops->remove)
				host->bus_ops->remove(host);

			mmc_claim_host(host);
			mmc_detach_bus(host);
			mmc_release_host(host);
		}
		else
			host->bus_ops->detect(host);
	}

	/* If the card was removed the bus will be marked
	 * as dead - extend the wakelock so userspace
	 * can respond */
	if (host->bus_dead)
		extend_wakelock = 1;

	mmc_bus_put(host);


	mmc_bus_get(host);

	printk(KERN_DEBUG "*** DEBUG : start %s (mmc%d)***\n", __func__, host->index);

	/* if there still is a card present, stop here */
	if (host->bus_ops != NULL) {
		mmc_bus_put(host);
		goto out;
	}

	/* detect a newly inserted card */

	/*
	 * Only we can add a new handler, so it's safe to
	 * release the lock here.
	 */
	mmc_bus_put(host);

	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
		goto out;

	mmc_claim_host(host);

	mmc_power_up(host);
	sdio_reset(host);
	mmc_go_idle(host);

	mmc_send_if_cond(host, host->ocr_avail);

	/*
	 * First we search for SDIO...
	 */
	printk(KERN_DEBUG "*** DEBUG : First we search for SDIO...(%d)***\n", host->index);
	err = mmc_send_io_op_cond(host, 0, &ocr);
	if (!err) {
		if (mmc_attach_sdio(host, ocr))
			mmc_power_off(host);
		extend_wakelock = 1;
		goto out;
	}

	/*
	 * ...then normal SD...
	 */
	printk(KERN_DEBUG "*** DEBUG : ...then normal SD...(%d) ***\n", host->index);
	err = mmc_send_app_op_cond(host, 0, &ocr);
	if (!err) {
		if (mmc_attach_sd(host, ocr))
			mmc_power_off(host);
		extend_wakelock = 1;
		goto out;
	}

	/*
	 * ...and finally MMC.
	 */
	printk(KERN_DEBUG "*** DEBUG : ...and finally MMC. (%d)***\n", host->index);
	err = mmc_send_op_cond(host, 0, &ocr);
	if (!err) {
		if (mmc_attach_mmc(host, ocr))
			mmc_power_off(host);
		extend_wakelock = 1;
		goto out;
	}

	printk(KERN_DEBUG "*** DEBUG : end %s (mmc%d)***\n", __func__, host->index);

	mmc_release_host(host);
	mmc_power_off(host);

out:
#if 0
	//if (extend_wakelock)
	//	wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
	//else
	//	wake_unlock(&mmc_delayed_work_wake_lock);
#else
	if (atomic_dec_return(&wakelock_refs) > 0) {
		printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs));
	}
	else {
		printk(KERN_DEBUG "unlock case2 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs));
		wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500));
	}
#endif

	if (host->caps & MMC_CAP_NEEDS_POLL)
		mmc_schedule_delayed_work(&host->detect, HZ);
//[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion
//	mutex_unlock(&host->carddetect_lock); 
//]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion

}
static void nfs_dec_sillycount(struct inode *dir)
{
	struct nfs_inode *nfsi = NFS_I(dir);
	if (atomic_dec_return(&nfsi->silly_count) == 1)
		wake_up(&nfsi->waitqueue);
}
static inline void jz4740_adc_clk_disable(struct jz4740_adc *adc)
{
	if (atomic_dec_return(&adc->clk_ref) == 0)
		clk_disable(adc->clk);
}
Esempio n. 8
0
inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
{
	#ifdef PLATFORM_LINUX
	return atomic_dec_return(v);
	#endif
}
Esempio n. 9
0
void _brick_block_free(void *data, int len, int cline)
{
	int order;
#ifdef CONFIG_MARS_DEBUG_MEM_STRONG
	struct mem_block_info *inf;
	char *real_data;
#endif
#ifdef BRICK_DEBUG_MEM
	int prev_line = 0;
#ifdef BRICK_DEBUG_ORDER0
	const int plus0 = PAGE_SIZE;
#else
	const int plus0 = 0;
#endif
	const int plus = len <= PAGE_SIZE ? plus0 : PAGE_SIZE * 2;
#else
	const int plus = 0;
#endif

	order = len2order(len + plus);
#ifdef CONFIG_MARS_DEBUG_MEM_STRONG
	real_data = data;
	if (order > 1)
		real_data -= PAGE_SIZE;
	inf = _find_block_info(real_data, false);
	if (likely(inf)) {
		prev_line = inf->inf_line;
		if (unlikely(inf->inf_len != (PAGE_SIZE << order))) {
			BRICK_ERR("line %d: address %p: bad freeing size %d (correct should be %d, previous line = %d)\n",
				  cline, data, (int)(PAGE_SIZE << order), inf->inf_len, prev_line);
			return;
		}
		if (unlikely(!inf->inf_used)) {
			BRICK_ERR("line %d: address %p: double freeing (previous line = %d)\n", cline, data, prev_line);
			return;
		}
		inf->inf_line = cline;
		inf->inf_used = false;
	} else {
		BRICK_ERR("line %d: trying to free non-existent address %p (order = %d)\n", cline, data, order);
		return;
	}
#endif
#ifdef BRICK_DEBUG_MEM
	if (order > 1) {
		void *test = data - PAGE_SIZE;
		int magic = INT_ACCESS(test, 0);
		int line = INT_ACCESS(test, sizeof(int));
		int oldlen = INT_ACCESS(test, sizeof(int)*2);
		int magic1 = INT_ACCESS(data, -1 * sizeof(int));
		int magic2;

		if (unlikely(magic1 != MAGIC_BLOCK)) {
			BRICK_ERR("line %d memory corruption: %p magix1 %08x != %08x (previous line = %d)\n", cline, data, magic1, MAGIC_BLOCK, prev_line);
			return;
		}
		if (unlikely(magic != MAGIC_BLOCK)) {
			BRICK_ERR("line %d memory corruption: %p magix %08x != %08x (previous line = %d)\n", cline, data, magic, MAGIC_BLOCK, prev_line);
			return;
		}
		if (unlikely(line < 0 || line >= BRICK_DEBUG_MEM)) {
			BRICK_ERR("line %d memory corruption %p: alloc line = %d (previous line = %d)\n", cline, data, line, prev_line);
			return;
		}
		if (unlikely(oldlen != len)) {
			BRICK_ERR("line %d memory corruption %p: len != oldlen (%d != %d, previous line = %d))\n", cline, data, len, oldlen, prev_line);
			return;
		}
		magic2 = INT_ACCESS(data, len);
		if (unlikely(magic2 != MAGIC_BEND)) {
			BRICK_ERR("line %d memory corruption %p: magix %08x != %08x (previous line = %d)\n", cline, data, magic, MAGIC_BEND, prev_line);
			return;
		}
		INT_ACCESS(test, 0) = 0xffffffff;
		INT_ACCESS(data, len) = 0xffffffff;
		data = test;
		atomic_dec(&block_count[line]);
		atomic_inc(&block_free[line]);
	} else if (order == 1) {
		void *test = data + PAGE_SIZE;
		int magic  = INT_ACCESS(test, 0 * sizeof(int));
		int line   = INT_ACCESS(test, 1 * sizeof(int));
		int oldlen = INT_ACCESS(test, 2 * sizeof(int));

		if (unlikely(magic != MAGIC_BLOCK)) {
			BRICK_ERR("line %d memory corruption %p: magix %08x != %08x (previous line = %d)\n", cline, data, magic, MAGIC_BLOCK, prev_line);
			return;
		}
		if (unlikely(line < 0 || line >= BRICK_DEBUG_MEM)) {
			BRICK_ERR("line %d memory corruption %p: alloc line = %d (previous line = %d)\n", cline, data, line, prev_line);
			return;
		}
		if (unlikely(oldlen != len)) {
			BRICK_ERR("line %d memory corruption %p: len != oldlen (%d != %d, previous line = %d))\n", cline, data, len, oldlen, prev_line);
			return;
		}
		atomic_dec(&block_count[line]);
		atomic_inc(&block_free[line]);
	}
#endif
#ifdef CONFIG_MARS_MEM_PREALLOC
	if (order > 0 && brick_allow_freelist && atomic_read(&freelist_count[order]) <= brick_mem_freelist_max[order]) {
		_put_free(data, order);
	} else
#endif
		__brick_block_free(data, order, cline);
	
#ifdef CONFIG_MARS_MEM_PREALLOC
	brick_mem_alloc_count[order] = atomic_dec_return(&_alloc_count[order]);
#endif
}
/*****************************************************************************
* FUNCTION
*  hal_btif_clk_ctrl
* DESCRIPTION
*  control clock output enable/disable of DMA module
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dma_clk_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_CLOCK_CTRL flag)
{
/*In MTK DMA BTIF channel, there's only one global CG on AP_DMA, no sub channel's CG bit*/
/*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/
	int i_ret = 0;
	unsigned long irq_flag = 0;

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	static atomic_t s_clk_ref = ATOMIC_INIT(0);
#else
	static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE;
#endif
	spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_CTL

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

	if (CLK_OUT_ENABLE == flag) {
		if (1 == atomic_inc_return(&s_clk_ref)) {
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		}
	} else if (CLK_OUT_DISABLE == flag) {
		if (0 == atomic_dec_return(&s_clk_ref)) {
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		}
	} else {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
	}

#else

	if (status == flag) {
		i_ret = 0;
		BTIF_DBG_FUNC("dma clock already %s\n",
			      CLK_OUT_ENABLE ==
			      status ? "enabled" : "disabled");
	} else {
		if (CLK_OUT_ENABLE == flag) {
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		} else if (CLK_OUT_DISABLE == flag) {
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		} else {
			i_ret = ERR_INVALID_PAR;
			BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
		}
	}
#endif

#else

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

#else

	status = flag;
#endif

	i_ret = 0;
#endif

	spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#else

	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#endif
#if MTK_BTIF_ENABLE_CLK_CTL
	BTIF_DBG_FUNC("DMA's clock is %s\n",
		      (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) ? "off" : "on");
#endif
	return i_ret;
}
Esempio n. 11
0
static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
		unsigned int event)
{
	int rc;
	unsigned int min_freq = ~0;
	unsigned int max_freq = 0;
	unsigned int i;
	struct cpufreq_frequency_table *freq_table;

	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(new_policy->cpu))
			return -EINVAL;

		/*
		 * Do not register the idle hook and create sysfs
		 * entries if we have already done so.
		 */
		if (atomic_inc_return(&active_count) > 1)
			return 0;

		rc = sysfs_create_group(cpufreq_global_kobject,
				&interactivex_attr_group);
		if (rc)
			return rc;

		pm_idle_old = pm_idle;
		pm_idle = cpufreq_idle;
		policy = new_policy;
		enabled = 1;
        	register_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX active\n");
		freq_table = cpufreq_frequency_get_table(new_policy->cpu);
		for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
			unsigned int freq = freq_table[i].frequency;
			if (freq == CPUFREQ_ENTRY_INVALID) {
				continue;
			}
			if (freq < min_freq)	
				min_freq = freq;
			if (freq > max_freq)
				max_freq = freq;
		}
		resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_table[(i-1)/2].frequency : max_freq;		//Value in midrange of available CPU frequencies if sufficient number of freq bins available
		freq_threshld = max_freq;
		break;

	case CPUFREQ_GOV_STOP:
		if (atomic_dec_return(&active_count) > 1)
			return 0;

		sysfs_remove_group(cpufreq_global_kobject,
				&interactivex_attr_group);

		pm_idle = pm_idle_old;
		del_timer(&per_cpu(cpu_timer, new_policy->cpu));
		enabled = 0;
        	unregister_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX inactive\n");
			break;

	case CPUFREQ_GOV_LIMITS:
		if (new_policy->max < new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->max, CPUFREQ_RELATION_H);
		else if (new_policy->min > new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->min, CPUFREQ_RELATION_L);
		break;
	}
	return 0;
}
Esempio n. 12
0
static void netvsc_send_completion(struct netvsc_device *net_device,
                                   struct vmbus_channel *incoming_channel,
				   struct hv_device *device,
				   struct vmpacket_descriptor *packet)
{
	struct nvsp_message *nvsp_packet;
	struct hv_netvsc_packet *nvsc_packet;
	struct net_device *ndev;
	u32 send_index;
	struct sk_buff *skb;

	ndev = net_device->ndev;

	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
			(packet->offset8 << 3));

	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG5_TYPE_SUBCHANNEL)) {
		/* Copy the response back */
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
		       sizeof(struct nvsp_message));
		complete(&net_device->channel_init_wait);
	} else if (nvsp_packet->hdr.msg_type ==
		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
		int num_outstanding_sends;
		u16 q_idx = 0;
		struct vmbus_channel *channel = device->channel;
		int queue_sends;

		/* Get the send context */
		skb = (struct sk_buff *)(unsigned long)packet->trans_id;
	
		/* Notify the layer above us */
		if (skb) {
			nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
			send_index = nvsc_packet->send_buf_index;
			if (send_index != NETVSC_INVALID_INDEX)
				netvsc_free_send_slot(net_device, send_index);
			q_idx = nvsc_packet->q_idx;
			channel = incoming_channel;
			dev_kfree_skb_any(skb);
		}

		num_outstanding_sends =
			atomic_dec_return(&net_device->num_outstanding_sends);
		queue_sends = atomic_dec_return(&net_device->
						queue_sends[q_idx]);

		if (net_device->destroy && num_outstanding_sends == 0)
			wake_up(&net_device->wait_drain);

		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
		    !net_device->start_remove &&
		    (hv_ringbuf_avail_percent(&channel->outbound) >
		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
				netif_tx_wake_queue(netdev_get_tx_queue(
						    ndev, q_idx));
	} else {
		netdev_err(ndev, "Unknown send completion packet type- "
			   "%d received!!\n", nvsp_packet->hdr.msg_type);
	}

}
Esempio n. 13
0
/**
 * kdbus_node_release() - Release an active ref on a node
 * @node:	The node
 *
 * This releases an active reference that was previously acquired via
 * kdbus_node_acquire(). See kdbus_node_acquire() for details.
 */
void kdbus_node_release(struct kdbus_node *node)
{
	if (node && atomic_dec_return(&node->active) == KDBUS_NODE_BIAS)
		wake_up(&node->waitq);
}
Esempio n. 14
0
static int msm_hsphy_set_suspend(struct usb_phy *uphy, int suspend)
{
	struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy);
	bool host = uphy->flags & PHY_HOST_MODE;
	bool chg_connected = uphy->flags & PHY_CHARGER_CONNECTED;
	int i, count;

	if (!!suspend == phy->suspended) {
		dev_dbg(uphy->dev, "%s\n", suspend ? "already suspended"
						   : "already resumed");
		return 0;
	}

	if (suspend) {
		for (i = 0; i < phy->num_ports; i++) {
			/* Clear interrupt latch register */
			writel_relaxed(ALT_INTERRUPT_MASK,
				phy->base + HS_PHY_IRQ_STAT_REG(i));

			/* Enable DP and DM HV interrupts */
			if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						(LINESTATE_INTEN |
						DPINTEN | DMINTEN),
						(LINESTATE_INTEN |
						DPINTEN | DMINTEN));
			else
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						DPDMHV_INT_MASK,
						DPDMHV_INT_MASK);
			if (!host) {
				/* set the following:
				 * OTGDISABLE0=1
				 * USB2_SUSPEND_N_SEL=1, USB2_SUSPEND_N=0
				 */
				if (phy->core_ver >= MSM_CORE_VER_120)
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_COMMON_REG,
							COMMON_OTGDISABLE0,
							COMMON_OTGDISABLE0);
				else
					msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_REG(i),
						OTGDISABLE0, OTGDISABLE0);

				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(USB2_SUSPEND_N_SEL | USB2_SUSPEND_N),
					USB2_SUSPEND_N_SEL);
			}

			if (!phy->ext_vbus_id)
				/* Enable PHY-based IDHV and
				 *OTGSESSVLD HV interrupts
				 */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN));
		}

		/* Enable PHY retention */
		if (!host && !chg_connected) {
			if (phy->core_ver == MSM_CORE_VER_120 &&
					phy->set_pllbtune)
				/*
				 * On this particular revision the PLLITUNE[1]
				 * bit acts as the control for the RETENABLEN
				 * PHY signal.
				 */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_PLLITUNE_1, COMMON_PLLITUNE_1);
			else if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_RETENABLEN, 0);
			else
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(0),
					RETENABLEN, 0);

			if (phy->csr) {
				/* switch PHY control to USB2PHY CSRs */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_CFG0,
						USB2PHY_OVERRIDE_EN,
						USB2PHY_OVERRIDE_EN);
				/* clear suspend_n */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL2,
						USB2PHY_SUSPEND_N_SEL |
						USB2PHY_SUSPEND_N,
						USB2PHY_SUSPEND_N_SEL);
				/* enable retention */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL_COMMON0,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN, 0);
				/* disable internal ref clock buffer */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_REFCLK_CTRL,
						REFCLK_RXTAP_EN, 0);
				/* power down PHY */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_PWRDOWN_CTRL,
						PWRDN_B, 0);
			}

			phy->lpm_flags |= PHY_RETENTIONED;
		}

		/* can turn off regulators if disconnected in device mode */
		if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
			if (phy->ext_vbus_id) {
				msm_hsusb_ldo_enable(phy, 0);
				phy->lpm_flags |= PHY_PWR_COLLAPSED;
			}
			msm_hsusb_config_vdd(phy, 0);
		}

		count = atomic_dec_return(&hsphy_active_count);
		if (count < 0) {
			dev_WARN(uphy->dev, "hsphy_active_count=%d, something wrong?\n",
					count);
			atomic_set(&hsphy_active_count, 0);
		}
	} else {
		atomic_inc(&hsphy_active_count);
		if (phy->lpm_flags & PHY_RETENTIONED && !phy->cable_connected) {
			msm_hsusb_config_vdd(phy, 1);
			if (phy->ext_vbus_id) {
				msm_hsusb_ldo_enable(phy, 1);
				phy->lpm_flags &= ~PHY_PWR_COLLAPSED;
			}

			if (phy->csr) {
				/* power on PHY */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_PWRDOWN_CTRL,
						PWRDN_B, PWRDN_B);
				/* enable internal ref clock buffer */
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_REFCLK_CTRL,
						REFCLK_RXTAP_EN,
						REFCLK_RXTAP_EN);
				/* disable retention */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL_COMMON0,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN,
						USB2PHY_COMMONONN |
						USB2PHY_RETENABLEN);
				/* switch suspend_n_sel back to HW */
				msm_usb_write_readback(phy->csr,
						USB2PHY_HS_PHY_CTRL2,
						USB2PHY_SUSPEND_N_SEL |
						USB2PHY_SUSPEND_N, 0);
				msm_usb_write_readback(phy->csr,
						USB2PHY_USB_PHY_CFG0,
						USB2PHY_OVERRIDE_EN, 0);
			}

			/* Disable PHY retention */
			if (phy->core_ver == MSM_CORE_VER_120 &&
					phy->set_pllbtune)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_PLLITUNE_1, 0);
			else if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_COMMON_REG,
					COMMON_RETENABLEN, COMMON_RETENABLEN);
			else
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(0),
					RETENABLEN, RETENABLEN);
			phy->lpm_flags &= ~PHY_RETENTIONED;
		}

		if (phy->core_ver >= MSM_CORE_VER_120) {
			if (phy->set_pllbtune) {
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_COMMON_REG,
						FSEL_MASK, 0);
			} else {
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_COMMON_REG,
						FSEL_MASK, FSEL_DEFAULT);
			}
		}

		for (i = 0; i < phy->num_ports; i++) {
			if (!phy->ext_vbus_id)
				/* Disable HV interrupts */
				msm_usb_write_readback(phy->base,
					HS_PHY_CTRL_REG(i),
					(OTGSESSVLDHV_INTEN | IDHV_INTEN),
					0);

			/* Clear interrupt latch register */
			writel_relaxed(ALT_INTERRUPT_MASK,
				phy->base + HS_PHY_IRQ_STAT_REG(i));
			/* Disable DP and DM HV interrupt */
			if (phy->core_ver >= MSM_CORE_VER_120)
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						LINESTATE_INTEN, 0);
			else
				msm_usb_write_readback(phy->base,
						ALT_INTERRUPT_EN_REG(i),
						DPDMHV_INT_MASK, 0);
			if (!host) {
				/* Bring PHY out of suspend */
				msm_usb_write_readback(phy->base,
						HS_PHY_CTRL_REG(i),
						USB2_SUSPEND_N_SEL, 0);

				if (phy->core_ver >= MSM_CORE_VER_120)
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_COMMON_REG,
							COMMON_OTGDISABLE0,
							0);
				else
					msm_usb_write_readback(phy->base,
							HS_PHY_CTRL_REG(i),
							OTGDISABLE0, 0);
			}
		}
		msm_hsphy_set_params(uphy);
	}

	phy->suspended = !!suspend; /* double-NOT coerces to bool value */
	return 0;
}
Esempio n. 15
0
void s5p_mfc_clock_off(struct s5p_mfc_dev *dev)
{
	int state, val;
	unsigned long timeout, flags;
	int ret = 0;

	dev->pm.clock_off_steps = 1;

	MFC_TRACE_DEV("++ clock_off\n");
	if (IS_MFCV6(dev)) {
		spin_lock_irqsave(&dev->pm.clklock, flags);
		dev->pm.clock_off_steps = 2;
		if ((atomic_dec_return(&dev->clk_ref) == 0) &&
				FW_HAS_BUS_RESET(dev)) {
			s5p_mfc_write_reg(dev, 0x1, S5P_FIMV_MFC_BUS_RESET_CTRL);

			timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
			/* Check bus status */
			do {
				if (time_after(jiffies, timeout)) {
					mfc_err_dev("Timeout while resetting MFC.\n");
					break;
				}
				val = s5p_mfc_read_reg(dev,
						S5P_FIMV_MFC_BUS_RESET_CTRL);
			} while ((val & 0x2) == 0);
		dev->pm.clock_off_steps = 3;
		}
		spin_unlock_irqrestore(&dev->pm.clklock, flags);
	} else {
		atomic_dec_return(&dev->clk_ref);
	}

	dev->pm.clock_off_steps = 4;
	state = atomic_read(&dev->clk_ref);
	if (state < 0) {
		mfc_err_dev("Clock state is wrong(%d)\n", state);
		atomic_set(&dev->clk_ref, 0);
		dev->pm.clock_off_steps = 5;
	} else {
		if (dev->curr_ctx_drm && dev->is_support_smc) {
			mfc_debug(3, "Begin: disable protection\n");
			spin_lock_irqsave(&dev->pm.clklock, flags);
			dev->pm.clock_off_steps = 6;
			ret = exynos_smc(SMC_PROTECTION_SET, 0,
					dev->id, SMC_PROTECTION_DISABLE);
			if (!ret) {
				printk("Protection Disable failed! ret(%u)\n", ret);
				spin_unlock_irqrestore(&dev->pm.clklock, flags);
				clk_disable(dev->pm.clock);
				return;
			}
			mfc_debug(3, "End: disable protection\n");
			dev->pm.clock_off_steps = 7;
			spin_unlock_irqrestore(&dev->pm.clklock, flags);
		} else {
			dev->pm.clock_off_steps = 8;
			s5p_mfc_mem_suspend(dev->alloc_ctx[0]);
			dev->pm.clock_off_steps = 9;
		}
		dev->pm.clock_off_steps = 10;
		clk_disable(dev->pm.clock);
	}
	mfc_debug(2, "- %d\n", state);
	MFC_TRACE_DEV("-- clock_off: ref state(%d)\n", state);
	dev->pm.clock_off_steps = 11;
}