コード例 #1
0
static void pm_callback_runtime_off(kbase_device *kbdev)
{
	sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
		"g3d turn off++++");
	kbase_platform_clock_off(kbdev);
#ifdef CONFIG_MALI_T6XX_DVFS
	//if (kbase_platform_dvfs_enable(false, MALI_DVFS_CURRENT_FREQ) != MALI_TRUE)
	//	printk("[err] disabling dvfs is faled\n");
#endif
	sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
		"g3d turn off---");
}
コード例 #2
0
static int pm_callback_runtime_on(kbase_device *kbdev)
{
	int ret;
	struct clk *mout_vpll = NULL, *aclk_g3d_sw = NULL, *aclk_g3d_dout = NULL;
	struct device *dev =  kbdev->osdev.dev;
	struct exynos_context * platform = (struct exynos_context *) kbdev->platform_context;

	sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
		"g3d turn on++++");

	kbase_platform_clock_on(kbdev);
#ifdef CONFIG_MALI_T6XX_DVFS
	//if (kbase_platform_dvfs_enable(true, MALI_DVFS_START_FREQ) != MALI_TRUE)
	//	return -EPERM;
#endif
	mout_vpll = clk_get(dev, "mout_vpll");
	if (IS_ERR(mout_vpll)) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_get [mout_vpll]\n");
		return 0;
	}
	aclk_g3d_dout = clk_get(dev, "aclk_g3d_dout");
	if (IS_ERR(aclk_g3d_dout)) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_get [aclk_g3d_dout]\n");
		return 0;
	}
	aclk_g3d_sw = clk_get(dev, "aclk_g3d_sw");
	if (IS_ERR(aclk_g3d_sw)) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_get [aclk_g3d_sw]\n");
		return 0;
	}

	ret = clk_set_parent(platform->aclk_g3d, aclk_g3d_sw);
	if (ret < 0) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_set_parent [platform->aclk_g3d]\n");
		return 0;
	}
	ret = clk_set_parent(aclk_g3d_sw, aclk_g3d_dout);
	if (ret < 0) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_set_parent [aclk_g3d_sw]\n");
		return 0;
	}
	ret = clk_set_parent(aclk_g3d_dout, mout_vpll);
	if (ret < 0) {
		KBASE_DEBUG_PRINT_ERROR(KBASE_CORE, "failed to clk_set_parent [aclk_g3d_dout]\n");
		return 0;
	}

	sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
		"g3d turn on---");

	return 0;
}
コード例 #3
0
static irqreturn_t exynos_tmu_irq(int irq, void *id)
{
	struct exynos_tmu_data *data = id;
	unsigned int status = 0;

	pr_debug("[TMUIRQ] irq = %d\n", irq);
	sec_debug_aux_log(SEC_DEBUG_AUXLOG_THERMAL_CHANGE, "[IRQ] %d", irq);

	if (data->soc == SOC_ARCH_EXYNOS4) {
		status = readl(data->base[0] + EXYNOS_TMU_REG_INTSTAT);
		writel(EXYNOS4270_TMU_CLEAR_RISE_INT|EXYNOS4270_TMU_CLEAR_FALL_INT,
				data->base[0] + EXYNOS_TMU_REG_INTCLEAR);

		if (status & (1 << FALL_LEVEL1_SHIFT))
			writel(EXYNOS4270_TMU_DISABLE_FALL1, data->base[0] + EXYNOS_TMU_REG_INTEN);
		else if (status & (1 << FALL_LEVEL0_SHIFT))
			writel(EXYNOS4270_TMU_DISABLE_FALL0, data->base[0] + EXYNOS_TMU_REG_INTEN);
		else if (status & (1 << RISE_LEVEL1_SHIFT))
			writel(EXYNOS4270_TMU_DISABLE_RISE1, data->base[0] + EXYNOS_TMU_REG_INTEN);
		else if (status & 1)
			writel(EXYNOS4270_TMU_DISABLE_RISE0, data->base[0] + EXYNOS_TMU_REG_INTEN);
	}
	schedule_work(&data->irq_work);

	return IRQ_HANDLED;
}
コード例 #4
0
static int exynos_tmu_read(struct exynos_tmu_data *data)
{
	u8 temp_code;
	int temp, i;
	int cold_event = old_cold;
	int hot_event = old_hot;
	int alltemp[EXYNOS_TMU_COUNT] = {0,};

	if (!th_zone || !th_zone->therm_dev)
		return -EPERM;

	mutex_lock(&data->lock);
	clk_enable(data->clk);

	for (i = 0; i < EXYNOS_TMU_COUNT; i++) {
		temp_code = readb(data->base[i] + EXYNOS_TMU_REG_CURRENT_TEMP);
		temp = code_to_temp(data, temp_code);
		alltemp[i] = temp;
	}

	clk_disable(data->clk);
	mutex_unlock(&data->lock);

	if (temp <= THRESH_MEM_TEMP0)
		cold_event = TMU_COLD;
	else
		cold_event = TMU_NORMAL;

	if (old_hot != TMU_THR_LV3 && temp >= THRESH_MEM_TEMP2)
		hot_event = TMU_THR_LV3;
	else if (old_hot != TMU_THR_LV2 && (temp >= THRESH_MEM_TEMP1 && temp < THRESH_MEM_TEMP2))
		hot_event = TMU_THR_LV2;
	else if (old_hot != TMU_THR_LV1 && (temp >= THRESH_MEM_TEMP0 && temp < THRESH_MEM_TEMP1))
		hot_event = TMU_THR_LV1;

	sec_debug_aux_log(SEC_DEBUG_AUXLOG_THERMAL_CHANGE, "[TMU] %d", temp);
	//printk("[TMU] %s : Thermal Read %d C", __func__, temp); Found it!
#ifdef CONFIG_EXYNOS4_EXPORT_TEMP
	tmu_curr_temperature = temp;
#endif
	th_zone->therm_dev->last_temperature = temp;
	exynos_tmu_call_notifier(cold_event, hot_event);
	sec_debug_aux_log(SEC_DEBUG_AUXLOG_THERMAL_CHANGE, "[TMU] alltemp[0]: %d", alltemp[0]);

	return temp;
}
コード例 #5
0
void kbase_platform_dvfs_set_level(kbase_device *kbdev, int level)
{
	static int prev_level = -1;
	int mif_qos, int_qos, cpu_qos;

#ifdef MALI_DEBUG
	printk(KERN_INFO "\n[mali_devfreq]dvfs level:%d\n", level);
#endif
	if (level == prev_level)
		return;

	if (WARN_ON((level >= MALI_DVFS_STEP) || (level < 0)))
		panic("invalid level");

#ifdef CONFIG_MALI_T6XX_DVFS
	mutex_lock(&mali_set_clock_lock);
#endif

	sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
			"old:%7d new:%7d (G3D)",
			mali_dvfs_infotbl[prev_level].clock*1000, 
			mali_dvfs_infotbl[level].clock*1000);

	mif_qos = mali_dvfs_infotbl[level].mem_freq;
	int_qos = mali_dvfs_infotbl[level].int_freq;
	cpu_qos = mali_dvfs_infotbl[level].cpu_freq;

	if (level > prev_level) {
#if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ)
		pm_qos_update_request(&exynos5_g3d_mif_qos, mif_qos);
		pm_qos_update_request(&exynos5_g3d_int_qos, int_qos);
		pm_qos_update_request(&exynos5_g3d_cpu_qos, cpu_qos);
#endif
		kbase_platform_dvfs_set_vol(mali_dvfs_infotbl[level].voltage + gpu_voltage_margin);
		kbase_platform_dvfs_set_clock(kbdev, mali_dvfs_infotbl[level].clock);
		bts_change_g3d_state(mali_dvfs_infotbl[level].clock);
	} else {
		bts_change_g3d_state(mali_dvfs_infotbl[level].clock);
		kbase_platform_dvfs_set_clock(kbdev, mali_dvfs_infotbl[level].clock);
		kbase_platform_dvfs_set_vol(mali_dvfs_infotbl[level].voltage + gpu_voltage_margin);
#if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ)
		pm_qos_update_request(&exynos5_g3d_mif_qos, mif_qos);
		pm_qos_update_request(&exynos5_g3d_int_qos, int_qos);
		pm_qos_update_request(&exynos5_g3d_cpu_qos, cpu_qos);
#endif
	}
#if defined(CONFIG_MALI_T6XX_DEBUG_SYS) && defined(CONFIG_MALI_T6XX_DVFS)
	update_time_in_state(prev_level);
#endif
	prev_level = level;
#ifdef CONFIG_MALI_T6XX_DVFS
	mutex_unlock(&mali_set_clock_lock);
#endif
}
コード例 #6
0
ファイル: softirq.c プロジェクト: awehoky/Googy-Max-N4-Kernel
static void tasklet_hi_action(struct softirq_action *a)
{
	struct tasklet_struct *list;

	local_irq_disable();
	list = __this_cpu_read(tasklet_hi_vec.head);
	__this_cpu_write(tasklet_hi_vec.head, NULL);
	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
				unsigned long long start_time, end_time;

				start_time = sec_debug_clock();
#endif
				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
					BUG();
				sec_debug_softirq_log(9998, t->func, 7);
				t->func(t->data);
				sec_debug_softirq_log(9998, t->func, 8);
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
				end_time = sec_debug_clock();
				if (start_time + 950000000 < end_time) {
					sec_debug_aux_log(SEC_DEBUG_AUXLOG_IRQ, "TH:%llu %pf", start_time, t->func);
				}
#endif
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
		t->next = NULL;
		*__this_cpu_read(tasklet_hi_vec.tail) = t;
		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
		__raise_softirq_irqoff(HI_SOFTIRQ);
		local_irq_enable();
	}
}
コード例 #7
0
static int gpu_set_clock(struct exynos_context *platform, int clk)
{
	long g3d_rate_prev = -1;
	unsigned long g3d_rate = clk * MHZ;
	int ret = 0;

	if (aclk_g3d == 0)
		return -1;

#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_lock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */

	if (!gpu_is_power_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the power-off state!\n", __func__);
		goto err;
	}

	if (!gpu_is_clock_on()) {
		ret = -1;
		GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock in the clock-off state! %d\n", __func__, __raw_readl(EXYNOS5430_ENABLE_ACLK_G3D));
		goto err;
	}

	g3d_rate_prev = clk_get_rate(aclk_g3d);

	/* if changed the VPLL rate, set rate for VPLL and wait for lock time */
	if (g3d_rate != g3d_rate_prev) {
		/*change here for future stable clock changing*/
		ret = clk_set_parent(mout_g3d_pll, fin_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}

		if (g3d_rate_prev != GPU_OSC_CLK)
			sec_debug_aux_log(SEC_DEBUG_AUXLOG_CPU_BUS_CLOCK_CHANGE,
				"[GPU] %7d <= %7d", g3d_rate / 1000, g3d_rate_prev / 1000);

		/*change g3d pll*/
		ret = clk_set_rate(fout_g3d_pll, g3d_rate);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_rate [fout_g3d_pll]\n", __func__);
			goto err;
		}

		/*restore parent*/
		ret = clk_set_parent(mout_g3d_pll, fout_g3d_pll);
		if (ret < 0) {
			GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to clk_set_parent [mout_g3d_pll]\n", __func__);
			goto err;
		}
	}

	platform->cur_clock = gpu_get_cur_clock(platform);

	if (platform->cur_clock != clk_get_rate(fout_g3d_pll)/MHZ)
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "clock value is wrong (aclk_g3d: %d, fout_g3d_pll: %d)\n",
				platform->cur_clock, (int) clk_get_rate(fout_g3d_pll)/MHZ);

	if (g3d_rate != g3d_rate_prev)
		GPU_LOG(DVFS_DEBUG, LSI_CLOCK_VALUE, g3d_rate/MHZ, platform->cur_clock, "clock set: %d, clock get: %d\n", (int) g3d_rate/MHZ, platform->cur_clock);
err:
#ifdef CONFIG_MALI_RT_PM
	if (platform->exynos_pm_domain)
		mutex_unlock(&platform->exynos_pm_domain->access_lock);
#endif /* CONFIG_MALI_RT_PM */
	return ret;
}
コード例 #8
0
ファイル: softirq.c プロジェクト: awehoky/Googy-Max-N4-Kernel
asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	int cpu;
	unsigned long old_flags = current->flags;
	int max_restart = MAX_SOFTIRQ_RESTART;

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	account_irq_enter_time(current);

	__local_bh_disable((unsigned long)__builtin_return_address(0),
				SOFTIRQ_OFFSET);
	lockdep_softirq_enter();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1) {
			unsigned int vec_nr = h - softirq_vec;
			int prev_count = preempt_count();
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
			unsigned long long start_time, end_time;

			start_time = sec_debug_clock();
#endif
			kstat_incr_softirqs_this_cpu(vec_nr);

			trace_softirq_entry(vec_nr);
			sec_debug_softirq_log(9999, h->action, 7);
			h->action(h);
			sec_debug_softirq_log(9999, h->action, 8);
			trace_softirq_exit(vec_nr);
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
			end_time = sec_debug_clock();
			if (start_time + 950000000 < end_time) {
				sec_debug_aux_log(SEC_DEBUG_AUXLOG_IRQ, "S:%llu %pf", start_time, h->action);
			}
#endif
			if (unlikely(prev_count != preempt_count())) {
				printk(KERN_ERR "huh, entered softirq %u %s %p"
				       "with preempt_count %08x,"
				       " exited with %08x?\n", vec_nr,
				       softirq_to_name[vec_nr], h->action,
				       prev_count, preempt_count());
				preempt_count() = prev_count;
			}

			rcu_bh_qs(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	lockdep_softirq_exit();

	account_irq_exit_time(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
コード例 #9
0
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
	irqreturn_t retval = IRQ_NONE;
	unsigned int flags = 0, irq = desc->irq_data.irq;

	do {
		irqreturn_t res;
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
		unsigned long long start_time, end_time;

		start_time = sec_debug_clock();
#endif
		sec_debug_irq_log(irq, (void *)action->handler, 1);
		trace_irq_handler_entry(irq, action);
		exynos_ss_irq(irq, (void *)action->handler, (int)irqs_disabled(), ESS_FLAG_IN);
		res = action->handler(irq, action->dev_id);
		exynos_ss_irq(irq, (void *)action->handler, (int)irqs_disabled(), ESS_FLAG_OUT);
		trace_irq_handler_exit(irq, action, res);
		sec_debug_irq_log(irq, (void *)action->handler, 2);
#ifdef CONFIG_SEC_DEBUG_RT_THROTTLE_ACTIVE
		end_time = sec_debug_clock();
		if (start_time + 950000000 < end_time) {
			sec_debug_aux_log(SEC_DEBUG_AUXLOG_IRQ, "I:%llu %pf", start_time, action->handler);
		}
#endif

		if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
			      irq, action->handler))
			local_irq_disable();

		switch (res) {
		case IRQ_WAKE_THREAD:
			/*
			 * Catch drivers which return WAKE_THREAD but
			 * did not set up a thread function
			 */
			if (unlikely(!action->thread_fn)) {
				warn_no_thread(irq, action);
				break;
			}

			irq_wake_thread(desc, action);

			/* Fall through to add to randomness */
		case IRQ_HANDLED:
			flags |= action->flags;
			break;

		default:
			break;
		}

		retval |= res;
		action = action->next;
	} while (action);

	add_interrupt_randomness(irq, flags);

	if (!noirqdebug)
		note_interrupt(irq, desc, retval);
	return retval;
}
コード例 #10
0
ファイル: hrtimer.c プロジェクト: halaszk/SM-V700
/*
 * High resolution timer interrupt
 * Called with interrupts disabled
 */
void hrtimer_interrupt(struct clock_event_device *dev)
{
	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
	ktime_t expires_next, now, entry_time, delta;
	int i, retries = 0;

	BUG_ON(!cpu_base->hres_active);
	cpu_base->nr_events++;
	dev->next_event.tv64 = KTIME_MAX;

	entry_time = now = ktime_get();
retry:
	sec_debug_aux_log(SEC_DEBUG_AUXLOG_HRTIMER_CHANGE,
			"hrtimer_interrupt now:%lld, retry:%d", now.tv64, retries);

	expires_next.tv64 = KTIME_MAX;

	raw_spin_lock(&cpu_base->lock);
	/*
	 * We set expires_next to KTIME_MAX here with cpu_base->lock
	 * held to prevent that a timer is enqueued in our queue via
	 * the migration code. This does not affect enqueueing of
	 * timers which run their callback and need to be requeued on
	 * this CPU.
	 */
	cpu_base->expires_next.tv64 = KTIME_MAX;

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		struct hrtimer_clock_base *base;
		struct timerqueue_node *node;
		ktime_t basenow;

		if (!(cpu_base->active_bases & (1 << i)))
			continue;

		base = cpu_base->clock_base + i;
		basenow = ktime_add(now, base->offset);

		while ((node = timerqueue_getnext(&base->active))) {
			struct hrtimer *timer;

			timer = container_of(node, struct hrtimer, node);

			/*
			 * The immediate goal for using the softexpires is
			 * minimizing wakeups, not running timers at the
			 * earliest interrupt after their soft expiration.
			 * This allows us to avoid using a Priority Search
			 * Tree, which can answer a stabbing querry for
			 * overlapping intervals and instead use the simple
			 * BST we already have.
			 * We don't add extra wakeups by delaying timers that
			 * are right-of a not yet expired timer, because that
			 * timer will have to trigger a wakeup anyway.
			 */

			sec_debug_hrtimer_log(timer, &basenow.tv64, timer->function, 0);
			sec_debug_aux_log(SEC_DEBUG_AUXLOG_HRTIMER_CHANGE,
					"i:%d, now:%lld, basenow:%lld, _softexpire:%lld",
					i, now.tv64, basenow.tv64,
					hrtimer_get_softexpires_tv64(timer));

			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
				ktime_t expires;

				expires = ktime_sub(hrtimer_get_expires(timer),
						    base->offset);
				if (expires.tv64 < expires_next.tv64)
					expires_next = expires;
				break;
			}

			__run_hrtimer(timer, &basenow);
		}
	}

	/*
	 * Store the new expiry value so the migration code can verify
	 * against it.
	 */
	cpu_base->expires_next = expires_next;
	raw_spin_unlock(&cpu_base->lock);

	/* Reprogramming necessary ? */
	if (expires_next.tv64 == KTIME_MAX ||
	    !tick_program_event(expires_next, 0)) {
		cpu_base->hang_detected = 0;
		sec_debug_aux_log(SEC_DEBUG_AUXLOG_HRTIMER_CHANGE,
				"hrtimer_interrupt exit now:%lld, retry:%d",
				now.tv64, retries);
		return;
	}

	/*
	 * The next timer was already expired due to:
	 * - tracing
	 * - long lasting callbacks
	 * - being scheduled away when running in a VM
	 *
	 * We need to prevent that we loop forever in the hrtimer
	 * interrupt routine. We give it 3 attempts to avoid
	 * overreacting on some spurious event.
	 */
	now = ktime_get();
	cpu_base->nr_retries++;
	if (++retries < 3)
		goto retry;
	/*
	 * Give the system a chance to do something else than looping
	 * here. We stored the entry time, so we know exactly how long
	 * we spent here. We schedule the next event this amount of
	 * time away.
	 */
	cpu_base->nr_hangs++;
	cpu_base->hang_detected = 1;
	delta = ktime_sub(now, entry_time);
	if (delta.tv64 > cpu_base->max_hang_time.tv64)
		cpu_base->max_hang_time = delta;
	/*
	 * Limit it to a sensible value as we enforce a longer
	 * delay. Give the CPU at least 100ms to catch up.
	 */
	if (delta.tv64 > 100 * NSEC_PER_MSEC)
		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
	else
		expires_next = ktime_add(now, delta);
	tick_program_event(expires_next, 1);
	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
		    ktime_to_ns(delta));
}