Example #1
0
static int fb_counter_event(struct notifier_block *self, unsigned long cmd,
			    void *args)
{
	int ret = NOTIFY_OK;
	unsigned int cpu;
	struct fblock *fb;
	struct fb_counter_priv __percpu *fb_priv;

	rcu_read_lock();
	fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
	fb_priv = (struct fb_counter_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	switch (cmd) {
	case FBLOCK_BIND_IDP: {
		int bound = 0;
		struct fblock_bind_msg *msg = args;
		get_online_cpus();
		for_each_online_cpu(cpu) {
			struct fb_counter_priv *fb_priv_cpu;
			fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
			if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
				write_seqlock(&fb_priv_cpu->lock);
				fb_priv_cpu->port[msg->dir] = msg->idp;
				write_sequnlock(&fb_priv_cpu->lock);
				bound = 1;
			} else {
				ret = NOTIFY_BAD;
				break;
			}
		}
		put_online_cpus();
		if (bound)
			printk(KERN_INFO "[%s::%s] port %s bound to IDP%u\n",
			       fb->name, fb->factory->type,
			       path_names[msg->dir], msg->idp);
		} break;
	case FBLOCK_UNBIND_IDP: {
		int unbound = 0;
		struct fblock_bind_msg *msg = args;
		get_online_cpus();
		for_each_online_cpu(cpu) {
			struct fb_counter_priv *fb_priv_cpu;
			fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
			if (fb_priv_cpu->port[msg->dir] == msg->idp) {
				write_seqlock(&fb_priv_cpu->lock);
				fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
				write_sequnlock(&fb_priv_cpu->lock);
				unbound = 1;
			} else {
				ret = NOTIFY_BAD;
				break;
			}
		}
		put_online_cpus();
		if (unbound)
			printk(KERN_INFO "[%s::%s] port %s unbound\n",
			       fb->name, fb->factory->type,
			       path_names[msg->dir]);
		} break;
	case FBLOCK_SET_OPT: {
		struct fblock_opt_msg *msg = args;
		printk("Set option %s to %s!\n", msg->key, msg->val);
		} break;
	default:
		break;
	}

	return ret;
}
Example #2
0
int imx7d_lowpower_idle(uint32_t power_state __unused,
			uintptr_t entry __unused,
			uint32_t context_id __unused,
			struct sm_nsec_ctx *nsec)
{
	struct imx7_pm_info *p;
	uint32_t cpuidle_ocram_base;
	static uint32_t gic_inited;
	int ret;

	uint32_t cpu_id __maybe_unused = get_core_pos();
	uint32_t type = (power_state & PSCI_POWER_STATE_TYPE_MASK) >>
		PSCI_POWER_STATE_TYPE_SHIFT;
	uint32_t cpu = get_core_pos();

	cpuidle_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START +
					     LOWPOWER_IDLE_OCRAM_OFFSET,
					     MEM_AREA_TEE_COHERENT);
	p = (struct imx7_pm_info *)cpuidle_ocram_base;

	imx_pen_lock(cpu);

	if (!lowpoweridle_init) {
		imx7d_cpuidle_init();
		lowpoweridle_init = 1;
	}

	if (type != PSCI_POWER_STATE_TYPE_POWER_DOWN)
		panic();

	p->num_online_cpus = get_online_cpus();
	p->num_lpi_cpus++;

	sm_save_unbanked_regs(&nsec->ub_regs);

	ret = sm_pm_cpu_suspend((uint32_t)p, (int (*)(uint32_t))
				(cpuidle_ocram_base + sizeof(*p)));

	/*
	 * Sometimes cpu_suspend may not really suspended, we need to check
	 * it's return value to restore reg or not
	 */
	if (ret < 0) {
		p->num_lpi_cpus--;
		imx_pen_unlock(cpu);
		DMSG("=== Not suspended, GPC IRQ Pending === %d\n", cpu_id);
		return 0;
	}

	/*
	 * Restore register of different mode in secure world
	 * When cpu powers up, after ROM init, cpu in secure SVC
	 * mode, we first need to restore monitor regs.
	 */
	sm_restore_unbanked_regs(&nsec->ub_regs);

	p->num_lpi_cpus--;
	/* Back to Linux */
	nsec->mon_lr = (uint32_t)entry;

	if (gic_inited == 0) {
		/*
		 * TODO: Call the Wakeup Late function to restore some
		 * HW configuration (e.g. TZASC)
		 */
		plat_cpu_reset_late();

		main_init_gic();
		gic_inited = 1;
		DMSG("=== Back from Suspended ===\n");
	} else {
		main_secondary_init_gic();
		gic_inited = 0;
	}

	imx_pen_unlock(cpu);

	return 0;
}
int
CommOS_StartIO(const char *dispatchTaskName,    // IN
               CommOSDispatchFunc dispatchFunc, // IN
               unsigned int intervalMillis,     // IN
               unsigned int maxCycles,          // IN
               const char *aioTaskName)         // IN
{
   int cpu;

   if (running) {
      CommOS_Debug(("%s: I/O tasks already running.\n", __FUNCTION__));
      return 0;
   }

   /*
    * OK, let's test the handler against NULL. Though, the whole concept
    * of checking for NULL pointers, outside cases where NULL is meaningful
    * to the implementation, is relatively useless: garbage, random pointers
    * rarely happen to be all-zeros.
    */

   if (!dispatchFunc) {
      CommOS_Log(("%s: a NULL Dispatch handler was passed.\n", __FUNCTION__));
      return -1;
   }
   dispatch = dispatchFunc;

   if (intervalMillis == 0) {
      intervalMillis = 4;
   }
   if ((dispatchInterval = msecs_to_jiffies(intervalMillis)) < 1) {
      dispatchInterval = 1;
   }
   if (maxCycles > DISPATCH_MAX_CYCLES) {
      dispatchMaxCycles = DISPATCH_MAX_CYCLES;
   } else if (maxCycles > 0) {
      dispatchMaxCycles = maxCycles;
   }
   CommOS_Debug(("%s: Interval millis %u (jif:%u).\n", __FUNCTION__,
                 intervalMillis, dispatchInterval));
   CommOS_Debug(("%s: Max cycles %u.\n", __FUNCTION__, dispatchMaxCycles));

   dispatchWQ = CreateWorkqueue(dispatchTaskName);
   if (!dispatchWQ) {
      CommOS_Log(("%s: Couldn't create %s task(s).\n", __FUNCTION__,
                  dispatchTaskName));
      return -1;
   }

   if (aioTaskName) {
      aioWQ = CreateWorkqueue(aioTaskName);
      if (!aioWQ) {
         CommOS_Log(("%s: Couldn't create %s task(s).\n", __FUNCTION__,
                     aioTaskName));
         DestroyWorkqueue(dispatchWQ);
         return -1;
      }
   } else {
      aioWQ = NULL;
   }

   running = 1;
   for_each_possible_cpu(cpu) {
      CommOS_InitWork(&dispatchWorksNow[cpu], DispatchWrapper);
      CommOS_InitWork(&dispatchWorks[cpu], DispatchWrapper);
   }

#ifdef CONFIG_HOTPLUG_CPU
   register_hotcpu_notifier(&CpuNotifier);
#endif

   get_online_cpus();
   for_each_online_cpu(cpu) {
      QueueDelayedWorkOn(cpu, dispatchWQ,
                         &dispatchWorks[cpu],
                         dispatchInterval);
   }
   put_online_cpus();
   CommOS_Log(("%s: Created I/O task(s) successfully.\n", __FUNCTION__));
   return 0;
}
Example #4
0
static int clamp_thread(void *arg)
{
	int cpunr = (unsigned long)arg;
	DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
	static const struct sched_param param = {
		.sched_priority = MAX_USER_RT_PRIO/2,
	};
	unsigned int count = 0;
	unsigned int target_ratio;

	set_bit(cpunr, cpu_clamping_mask);
	set_freezable();
	init_timer_on_stack(&wakeup_timer);
	sched_setscheduler(current, SCHED_FIFO, &param);

	while (true == clamping && !kthread_should_stop() &&
		cpu_online(cpunr)) {
		int sleeptime;
		unsigned long target_jiffies;
		unsigned int guard;
		unsigned int compensation = 0;
		int interval; /* jiffies to sleep for each attempt */
		unsigned int duration_jiffies = msecs_to_jiffies(duration);
		unsigned int window_size_now;

		try_to_freeze();
		/*
		 * make sure user selected ratio does not take effect until
		 * the next round. adjust target_ratio if user has changed
		 * target such that we can converge quickly.
		 */
		target_ratio = set_target_ratio;
		guard = 1 + target_ratio/20;
		window_size_now = window_size;
		count++;

		/*
		 * systems may have different ability to enter package level
		 * c-states, thus we need to compensate the injected idle ratio
		 * to achieve the actual target reported by the HW.
		 */
		compensation = get_compensation(target_ratio);
		interval = duration_jiffies*100/(target_ratio+compensation);

		/* align idle time */
		target_jiffies = roundup(jiffies, interval);
		sleeptime = target_jiffies - jiffies;
		if (sleeptime <= 0)
			sleeptime = 1;
		schedule_timeout_interruptible(sleeptime);
		/*
		 * only elected controlling cpu can collect stats and update
		 * control parameters.
		 */
		if (cpunr == control_cpu && !(count%window_size_now)) {
			should_skip =
				powerclamp_adjust_controls(target_ratio,
							guard, window_size_now);
			smp_mb();
		}

		if (should_skip)
			continue;

		target_jiffies = jiffies + duration_jiffies;
		mod_timer(&wakeup_timer, target_jiffies);
		if (unlikely(local_softirq_pending()))
			continue;
		/*
		 * stop tick sched during idle time, interrupts are still
		 * allowed. thus jiffies are updated properly.
		 */
		preempt_disable();
		/* mwait until target jiffies is reached */
		while (time_before(jiffies, target_jiffies)) {
			unsigned long ecx = 1;
			unsigned long eax = target_mwait;

			/*
			 * REVISIT: may call enter_idle() to notify drivers who
			 * can save power during cpu idle. same for exit_idle()
			 */
			local_touch_nmi();
			stop_critical_timings();
			mwait_idle_with_hints(eax, ecx);
			start_critical_timings();
			atomic_inc(&idle_wakeup_counter);
		}
		preempt_enable();
	}
	del_timer_sync(&wakeup_timer);
	clear_bit(cpunr, cpu_clamping_mask);

	return 0;
}

/*
 * 1 HZ polling while clamping is active, useful for userspace
 * to monitor actual idle ratio.
 */
static void poll_pkg_cstate(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
static void poll_pkg_cstate(struct work_struct *dummy)
{
	static u64 msr_last;
	static u64 tsc_last;
	static unsigned long jiffies_last;

	u64 msr_now;
	unsigned long jiffies_now;
	u64 tsc_now;
	u64 val64;

	msr_now = pkg_state_counter();
	rdtscll(tsc_now);
	jiffies_now = jiffies;

	/* calculate pkg cstate vs tsc ratio */
	if (!msr_last || !tsc_last)
		pkg_cstate_ratio_cur = 1;
	else {
		if (tsc_now - tsc_last) {
			val64 = 100 * (msr_now - msr_last);
			do_div(val64, (tsc_now - tsc_last));
			pkg_cstate_ratio_cur = val64;
		}
	}

	/* update record */
	msr_last = msr_now;
	jiffies_last = jiffies_now;
	tsc_last = tsc_now;

	if (true == clamping)
		schedule_delayed_work(&poll_pkg_cstate_work, HZ);
}

static int start_power_clamp(void)
{
	unsigned long cpu;
	struct task_struct *thread;

	/* check if pkg cstate counter is completely 0, abort in this case */
	if (!has_pkg_state_counter()) {
		pr_err("pkg cstate counter not functional, abort\n");
		return -EINVAL;
	}

	set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
	/* prevent cpu hotplug */
	get_online_cpus();

	/* prefer BSP */
	control_cpu = 0;
	if (!cpu_online(control_cpu))
		control_cpu = smp_processor_id();

	clamping = true;
	schedule_delayed_work(&poll_pkg_cstate_work, 0);

	/* start one thread per online cpu */
	for_each_online_cpu(cpu) {
		struct task_struct **p =
			per_cpu_ptr(powerclamp_thread, cpu);

		thread = kthread_create_on_node(clamp_thread,
						(void *) cpu,
						cpu_to_node(cpu),
						"kidle_inject/%ld", cpu);
		/* bind to cpu here */
		if (likely(!IS_ERR(thread))) {
			kthread_bind(thread, cpu);
			wake_up_process(thread);
			*p = thread;
		}

	}
	put_online_cpus();

	return 0;
}
Example #5
0
File: vmstat.c Project: 3null/linux
/*
 * Accumulate the vm event counters across all CPUs.
 * The result is unavoidably approximate - it can change
 * during and after execution of this function.
*/
void all_vm_events(unsigned long *ret)
{
	get_online_cpus();
	sum_vm_events(ret);
	put_online_cpus();
}
Example #6
0
int mtrr_add_page(unsigned long base, unsigned long size, 
		  unsigned int type, bool increment)
{
	int i, replace, error;
	mtrr_type ltype;
	unsigned long lbase, lsize;

	if (!mtrr_if)
		return -ENXIO;
		
	if ((error = mtrr_if->validate_add_page(base,size,type)))
		return error;

	if (type >= MTRR_NUM_TYPES) {
		printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
		return -EINVAL;
	}

	/*  If the type is WC, check that this processor supports it  */
	if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
		printk(KERN_WARNING
		       "mtrr: your processor doesn't support write-combining\n");
		return -ENOSYS;
	}

	if (!size) {
		printk(KERN_WARNING "mtrr: zero sized request\n");
		return -EINVAL;
	}

	if (base & size_or_mask || size & size_or_mask) {
		printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
		return -EINVAL;
	}

	error = -EINVAL;
	replace = -1;

	/* No CPU hotplug when we change MTRR entries */
	get_online_cpus();
	/*  Search for existing MTRR  */
	mutex_lock(&mtrr_mutex);
	for (i = 0; i < num_var_ranges; ++i) {
		mtrr_if->get(i, &lbase, &lsize, &ltype);
		if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
			continue;
		/*  At this point we know there is some kind of overlap/enclosure  */
		if (base < lbase || base + size - 1 > lbase + lsize - 1) {
			if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
				/*  New region encloses an existing region  */
				if (type == ltype) {
					replace = replace == -1 ? i : -2;
					continue;
				}
				else if (types_compatible(type, ltype))
					continue;
			}
			printk(KERN_WARNING
			       "mtrr: 0x%lx000,0x%lx000 overlaps existing"
			       " 0x%lx000,0x%lx000\n", base, size, lbase,
			       lsize);
			goto out;
		}
		/*  New region is enclosed by an existing region  */
		if (ltype != type) {
			if (types_compatible(type, ltype))
				continue;
			printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
			     base, size, mtrr_attrib_to_str(ltype),
			     mtrr_attrib_to_str(type));
			goto out;
		}
		if (increment)
			++mtrr_usage_table[i];
		error = i;
		goto out;
	}
	/*  Search for an empty MTRR  */
	i = mtrr_if->get_free_region(base, size, replace);
	if (i >= 0) {
		set_mtrr(i, base, size, type);
		if (likely(replace < 0)) {
			mtrr_usage_table[i] = 1;
		} else {
			mtrr_usage_table[i] = mtrr_usage_table[replace];
			if (increment)
				mtrr_usage_table[i]++;
			if (unlikely(replace != i)) {
				set_mtrr(replace, 0, 0, 0);
				mtrr_usage_table[replace] = 0;
			}
		}
	} else
		printk(KERN_INFO "mtrr: no more MTRRs available\n");
	error = i;
 out:
	mutex_unlock(&mtrr_mutex);
	put_online_cpus();
	return error;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int)data;
	int src_cpu, ret;
	struct boost_policy *b = &per_cpu(boost_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	while (1) {
		wait_event_interruptible(b->sync_wq, b->pending ||
					kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&b->lock, flags);
		b->pending = false;
		src_cpu = b->src_cpu;
		spin_unlock_irqrestore(&b->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		req_freq = max((dest_policy.max * b->task_load) / 100,
							src_policy.cur);

		if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
			continue;
		}

		if (sync_threshold)
			req_freq = min(sync_threshold, req_freq);

		cancel_delayed_work_sync(&b->mig_boost_rem);

		b->migration_freq = req_freq;

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, boost_wq,
				&b->mig_boost_rem, msecs_to_jiffies(migration_boost_ms));
		} else
			b->migration_freq = 0;
		put_online_cpus();
	}

	return 0;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	while (1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());
#ifdef CONFIG_IRLED_GPIO
		if (unlikely(gir_boost_disable)) {
			pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n", 
				__func__, raw_smp_processor_id());
			continue;
		}
#endif

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (s->task_load < migration_load_threshold)
			continue;

		req_freq = load_based_syncs ?
			(dest_policy.max * s->task_load) / 100 : src_policy.cur;

		if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
			continue;
		}

        if (sync_threshold)
            req_freq = min(sync_threshold, req_freq);

		cancel_delayed_work_sync(&s->boost_rem);

#ifdef CONFIG_CPUFREQ_HARDLIMIT
        s->boost_min = check_cpufreq_hardlimit(req_freq);
#else
#ifdef CONFIG_CPUFREQ_LIMIT
        s->boost_min = check_cpufreq_limit(req_freq);
#else
		s->boost_min = req_freq;
#endif
#endif

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
Example #9
0
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	req_freq = load_based_syncs ?
		(dest_policy.max * s->task_load) / 100 :
						src_policy.cur;

	if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
		return;
	}

	if (sync_threshold)
		req_freq = min(sync_threshold, req_freq);

	cancel_delayed_work_sync(&s->boost_rem);

	s->boost_min = req_freq;

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(src_cpu))
		/*
		 * Send an unchanged policy update to the source
		 * CPU. Even though the policy isn't changed from
		 * its existing boosted or non-boosted state
		 * notifying the source CPU will let the governor
		 * know a boost happened on another CPU and that it
		 * should re-evaluate the frequency at the next timer
		 * event without interference from a min sample time.
		 */
		cpufreq_update_policy(src_cpu);
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
/**
 * channel_backend_init - initialize a channel backend
 * @chanb: channel backend
 * @name: channel name
 * @config: client ring buffer configuration
 * @priv: client private data
 * @parent: dentry of parent directory, %NULL for root directory
 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
 * @num_subbuf: number of sub-buffers (power of 2)
 *
 * Returns channel pointer if successful, %NULL otherwise.
 *
 * Creates per-cpu channel buffers using the sizes and attributes
 * specified.  The created channel buffer files will be named
 * name_0...name_N-1.  File permissions will be %S_IRUSR.
 *
 * Called with CPU hotplug disabled.
 */
int channel_backend_init(struct channel_backend *chanb,
			 const char *name,
			 const struct lib_ring_buffer_config *config,
			 void *priv, size_t subbuf_size, size_t num_subbuf)
{
	struct channel *chan = container_of(chanb, struct channel, backend);
	unsigned int i;
	int ret;

	if (!name)
		return -EPERM;

	/* Check that the subbuffer size is larger than a page. */
	if (subbuf_size < PAGE_SIZE)
		return -EINVAL;

	/*
	 * Make sure the number of subbuffers and subbuffer size are
	 * power of 2 and nonzero.
	 */
	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
		return -EINVAL;
	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
		return -EINVAL;
	/*
	 * Overwrite mode buffers require at least 2 subbuffers per
	 * buffer.
	 */
	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
		return -EINVAL;

	ret = subbuffer_id_check_index(config, num_subbuf);
	if (ret)
		return ret;

	chanb->priv = priv;
	chanb->buf_size = num_subbuf * subbuf_size;
	chanb->subbuf_size = subbuf_size;
	chanb->buf_size_order = get_count_order(chanb->buf_size);
	chanb->subbuf_size_order = get_count_order(subbuf_size);
	chanb->num_subbuf_order = get_count_order(num_subbuf);
	chanb->extra_reader_sb =
			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
	chanb->num_subbuf = num_subbuf;
	strlcpy(chanb->name, name, NAME_MAX);
	memcpy(&chanb->config, config, sizeof(chanb->config));

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
			return -ENOMEM;
	}

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		/* Allocating the buffer per-cpu structures */
		chanb->buf = alloc_percpu(struct lib_ring_buffer);
		if (!chanb->buf)
			goto free_cpumask;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
		chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
		ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
			&chanb->cpuhp_prepare.node);
		if (ret)
			goto free_bufs;
#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */

		{
			/*
			 * In case of non-hotplug cpu, if the ring-buffer is allocated
			 * in early initcall, it will not be notified of secondary cpus.
			 * In that off case, we need to allocate for all possible cpus.
			 */
#ifdef CONFIG_HOTPLUG_CPU
			/*
			 * buf->backend.allocated test takes care of concurrent CPU
			 * hotplug.
			 * Priority higher than frontend, so we create the ring buffer
			 * before we start the timer.
			 */
			chanb->cpu_hp_notifier.notifier_call =
					lib_ring_buffer_cpu_hp_callback;
			chanb->cpu_hp_notifier.priority = 5;
			register_hotcpu_notifier(&chanb->cpu_hp_notifier);

			get_online_cpus();
			for_each_online_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;	/* cpu hotplug locked */
			}
			put_online_cpus();
#else
			for_each_possible_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;
			}
#endif
		}
#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
	} else {
Example #11
0
static void __init smp_detect_cpus(void)
{
	unsigned int cpu, c_cpus, s_cpus;
	struct sclp_cpu_info *info;
	u16 boot_cpu_addr, cpu_addr;

	c_cpus = 1;
	s_cpus = 0;
	boot_cpu_addr = __cpu_logical_map[0];
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		panic("smp_detect_cpus failed to allocate memory\n");
#ifdef CONFIG_CRASH_DUMP
	if (OLDMEM_BASE && !is_kdump_kernel()) {
		struct save_area *save_area;

		save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
		if (!save_area)
			panic("could not allocate memory for save area\n");
		copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
				 0x200, 0);
		zfcpdump_save_areas[0] = save_area;
	}
#endif
	/* Use sigp detection algorithm if sclp doesn't work. */
	if (sclp_get_cpu_info(info)) {
		smp_use_sigp_detection = 1;
		for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
			if (cpu == boot_cpu_addr)
				continue;
			if (!raw_cpu_stopped(cpu))
				continue;
			smp_get_save_area(c_cpus, cpu);
			c_cpus++;
		}
		goto out;
	}

	if (info->has_cpu_type) {
		for (cpu = 0; cpu < info->combined; cpu++) {
			if (info->cpu[cpu].address == boot_cpu_addr) {
				smp_cpu_type = info->cpu[cpu].type;
				break;
			}
		}
	}

	for (cpu = 0; cpu < info->combined; cpu++) {
		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
			continue;
		cpu_addr = info->cpu[cpu].address;
		if (cpu_addr == boot_cpu_addr)
			continue;
		if (!raw_cpu_stopped(cpu_addr)) {
			s_cpus++;
			continue;
		}
		smp_get_save_area(c_cpus, cpu_addr);
		c_cpus++;
	}
out:
	kfree(info);
	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
	get_online_cpus();
	__smp_rescan_cpus();
	put_online_cpus();
}
Example #12
0
static struct fblock *fb_crr_rx_ctor(char *name)
{
	int ret = 0;
	unsigned int cpu, *tmp_rx_bitstream;
	unsigned char *tmp_expected_seq_nr, *tmp_rx_win_nr;
	struct sk_buff_head *tmp_list;
	struct fblock *fb;
	struct fb_crr_rx_priv __percpu *fb_priv;
	rwlock_t *tmp_rx_lock;

	

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;


	fb_priv = alloc_percpu(struct fb_crr_rx_priv);
	if (!fb_priv)
		goto err;

	if (unlikely((tmp_rx_bitstream = kzalloc(sizeof(unsigned int), GFP_ATOMIC)) == NULL))
		goto err_;

	if (unlikely((tmp_rx_win_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err__;

	if (unlikely((tmp_rx_lock = kzalloc(sizeof(rwlock_t), GFP_ATOMIC)) == NULL))
		goto err0;

	if (unlikely((tmp_list = kzalloc(sizeof(struct sk_buff_head), GFP_ATOMIC)) == NULL))
		goto err1;

	if (unlikely((tmp_expected_seq_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err1a;
	
	rwlock_init(tmp_rx_lock);

	*tmp_rx_bitstream = 0;
	*tmp_rx_win_nr = 0;

	*tmp_expected_seq_nr = 1;
	
	skb_queue_head_init(tmp_list);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_crr_rx_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		seqlock_init(&fb_priv_cpu->lock);
		//rwlock_init(&fb_priv_cpu->rx_lock);
		fb_priv_cpu->rx_lock = tmp_rx_lock;		
		fb_priv_cpu->port[0] = IDP_UNKNOWN;
		fb_priv_cpu->port[1] = IDP_UNKNOWN;
		fb_priv_cpu->rx_seq_nr = tmp_expected_seq_nr;
		fb_priv_cpu->list = tmp_list;
		fb_priv_cpu->rx_bitstream = tmp_rx_bitstream;
		fb_priv_cpu->rx_win_nr = tmp_rx_win_nr;
	}
	put_online_cpus();

	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_crr_rx_netrx;
	fb->event_rx = fb_crr_rx_event;
	ret = register_fblock_namespace(fb);
	if (ret)
		goto err3;
	__module_get(THIS_MODULE);
	printk(KERN_ERR "[CRR_RX] Initialization passed!\n");
	return fb;
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(tmp_expected_seq_nr);
err1a:
	kfree(tmp_list);
err1:
	kfree(tmp_rx_lock);
	
err0:
	kfree(tmp_rx_win_nr); 
err__:
	kfree(tmp_rx_bitstream);
err_:
	free_percpu(fb_priv);
err:
	kfree_fblock(fb);
	printk(KERN_ERR "[CRR_RX] Initialization failed!\n");
	return NULL;
}
Example #13
0
int aed_get_process_bt(struct aee_process_bt *bt)
{
    int nr_cpus, err;
    struct bt_sync s;
    struct task_struct *task;
    int timeout_max = 500000;

    if (down_interruptible(&process_bt_sem) < 0) {
        return -ERESTARTSYS;
    }

    err = 0;
    if (bt->pid > 0) {
        task = find_task_by_vpid(bt->pid);
        if (task == NULL) {
            err = -EINVAL;
            goto exit;
        }
    } else {
        err = -EINVAL;
        goto exit;
    }

    err = mutex_lock_killable(&task->signal->cred_guard_mutex);
    if (err)
        goto exit;
    if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
        mutex_unlock(&task->signal->cred_guard_mutex);
        err = -EPERM;
        goto exit;
    }

    get_online_cpus();
    preempt_disable();

    nr_cpus = num_online_cpus();
    atomic_set(&s.cpus_report, nr_cpus - 1);
    atomic_set(&s.cpus_lock, 1);

    smp_call_function(per_cpu_get_bt, &s, 0);

    while (atomic_read(&s.cpus_report) != 0) {
        if (timeout_max-- > 0) {
            udelay(1);
        } else {
            break;
        }
    }

    aed_get_bt(task, bt);

    atomic_set(&s.cpus_report, nr_cpus - 1);
    atomic_set(&s.cpus_lock, 0);
    timeout_max = 500000;
    while (atomic_read(&s.cpus_report) != 0) {
        if (timeout_max-- > 0) {
            udelay(1);
        } else {
            break;
        }
    }

    preempt_enable();
    put_online_cpus();

    mutex_unlock(&task->signal->cred_guard_mutex);

exit:
    up(&process_bt_sem);
    return err;

}
Example #14
0
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
{
	int ret;
	void __iomem *base;
	struct device *dev = &adev->dev;
	struct coresight_platform_data *pdata = NULL;
	struct etm_drvdata *drvdata;
	struct resource *res = &adev->res;
	struct coresight_desc *desc;
	struct device_node *np = adev->dev.of_node;

	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;

	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
	if (!drvdata)
		return -ENOMEM;

	if (np) {
		pdata = of_get_coresight_platform_data(dev, np);
		if (IS_ERR(pdata))
			return PTR_ERR(pdata);

		adev->dev.platform_data = pdata;
		drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
	}

	drvdata->dev = &adev->dev;
	dev_set_drvdata(dev, drvdata);

	/* Validity for the resource is already checked by the AMBA core */
	base = devm_ioremap_resource(dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);

	drvdata->base = base;

	spin_lock_init(&drvdata->spinlock);

	drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
	if (!IS_ERR(drvdata->atclk)) {
		ret = clk_prepare_enable(drvdata->atclk);
		if (ret)
			return ret;
	}

	drvdata->cpu = pdata ? pdata->cpu : 0;

	get_online_cpus();
	etmdrvdata[drvdata->cpu] = drvdata;

	if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
		drvdata->os_unlock = true;

	if (smp_call_function_single(drvdata->cpu,
				     etm_init_arch_data,  drvdata, 1))
		dev_err(dev, "ETM arch init failed\n");

	if (!etm_count++)
		register_hotcpu_notifier(&etm_cpu_notifier);

	put_online_cpus();

	if (etm_arch_supported(drvdata->arch) == false) {
		ret = -EINVAL;
		goto err_arch_supported;
	}
	etm_init_default_data(drvdata);

	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
	desc->ops = &etm_cs_ops;
	desc->pdata = pdata;
	desc->dev = dev;
	desc->groups = coresight_etm_groups;
	drvdata->csdev = coresight_register(desc);
	if (IS_ERR(drvdata->csdev)) {
		ret = PTR_ERR(drvdata->csdev);
		goto err_arch_supported;
	}

	pm_runtime_put(&adev->dev);
	dev_info(dev, "%s initialized\n", (char *)id->data);

	if (boot_enable) {
		coresight_enable(drvdata->csdev);
		drvdata->boot_enable = true;
	}

	return 0;

err_arch_supported:
	if (--etm_count == 0)
		unregister_hotcpu_notifier(&etm_cpu_notifier);
	return ret;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while(1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (src_policy.min == src_policy.cur &&
				src_policy.min <= dest_policy.min){
			continue;
		}


		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		cpufreq_update_policy(dest_cpu);
		/* Notify source CPU of policy change */
		cpufreq_update_policy(src_cpu);
#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8974PRO)
		get_online_cpus();
		if (cpu_online(dest_cpu))
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		put_online_cpus();
#else
		queue_delayed_work_on(s->cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
#endif
	}

	return 0;
}
Example #16
0
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
	int ret;
	void __iomem *base;
	struct device *dev = &adev->dev;
	struct coresight_platform_data *pdata = NULL;
	struct etmv4_drvdata *drvdata;
	struct resource *res = &adev->res;
	struct coresight_desc *desc;
	struct device_node *np = adev->dev.of_node;

	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;

	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
	if (!drvdata)
		return -ENOMEM;

	if (np) {
		pdata = of_get_coresight_platform_data(dev, np);
		if (IS_ERR(pdata))
			return PTR_ERR(pdata);
		adev->dev.platform_data = pdata;
	}

	drvdata->dev = &adev->dev;
	dev_set_drvdata(dev, drvdata);

	/* Validity for the resource is already checked by the AMBA core */
	base = devm_ioremap_resource(dev, res);
	if (IS_ERR(base))
		return PTR_ERR(base);

	drvdata->base = base;

	spin_lock_init(&drvdata->spinlock);

	drvdata->cpu = pdata ? pdata->cpu : 0;

	get_online_cpus();
	etmdrvdata[drvdata->cpu] = drvdata;

	if (smp_call_function_single(drvdata->cpu,
				etm4_init_arch_data,  drvdata, 1))
		dev_err(dev, "ETM arch init failed\n");

	if (!etm4_count++) {
		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
					  "AP_ARM_CORESIGHT4_STARTING",
					  etm4_starting_cpu, etm4_dying_cpu);
		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
						"AP_ARM_CORESIGHT4_ONLINE",
						etm4_online_cpu, NULL);
		if (ret < 0)
			goto err_arch_supported;
		hp_online = ret;
	}

	put_online_cpus();

	if (etm4_arch_supported(drvdata->arch) == false) {
		ret = -EINVAL;
		goto err_arch_supported;
	}

	etm4_init_trace_id(drvdata);
	etm4_set_default(&drvdata->config);

	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
	desc->ops = &etm4_cs_ops;
	desc->pdata = pdata;
	desc->dev = dev;
	desc->groups = coresight_etmv4_groups;
	drvdata->csdev = coresight_register(desc);
	if (IS_ERR(drvdata->csdev)) {
		ret = PTR_ERR(drvdata->csdev);
		goto err_arch_supported;
	}

	ret = etm_perf_symlink(drvdata->csdev, true);
	if (ret) {
		coresight_unregister(drvdata->csdev);
		goto err_arch_supported;
	}

	pm_runtime_put(&adev->dev);
	dev_info(dev, "%s initialized\n", (char *)id->data);

	if (boot_enable) {
		coresight_enable(drvdata->csdev);
		drvdata->boot_enable = true;
	}

	return 0;

err_arch_supported:
	if (--etm4_count == 0) {
		cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
		if (hp_online)
			cpuhp_remove_state_nocalls(hp_online);
	}
	return ret;
}
Example #17
0
/*
 * Allow secondary CPU threads to come online again
 */
void uninhibit_secondary_onlining(void)
{
	get_online_cpus();
	atomic_dec(&secondary_inhibit_count);
	put_online_cpus();
}
static void trace_start(void)
{
	int i;
	int pwr_down;

	if (tracer.state == TRACE_STATE_TRACING) {
		pr_info("ETM trace is already running\n");
		return;
	}

	get_online_cpus();

	mutex_lock(&tracer.mutex);
  
	/* AHBAP_EN to enable master port, then ETR could write the trace to bus */
	__raw_writel(DEM_UNLOCK_MAGIC, DEM_UNLOCK);
	mt65xx_reg_sync_writel(AHB_EN, AHBAP_EN);
  
	etb_unlock(&tracer);

	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_unlock(tracer.etm_regs[i]);
		}
	}

	cs_cpu_unlock(tracer.tpiu_regs);
	cs_cpu_unlock(tracer.funnel_regs);
	cs_cpu_unlock(tracer.etb_regs);
  
	cs_cpu_funnel_setup();
	cs_cpu_etb_setup();  
  
	/* Power-up TMs */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_ptm_powerup(tracer.etm_regs[i]);
		}
	}

	/* Disable TMs so that they can be set up safely */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_ptm_progbit(tracer.etm_regs[i]);
		}
	}

	/* Set up TMs */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_test_common_ptm_setup(tracer.etm_regs[i], i);
		}
	}
  
	/* Set up CoreSightTraceID */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_write(tracer.etm_regs[i], 0x200, i + 1);  
		}
	}

	/* update the ETMCR and ETMCCER */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			tracer.etm_info[i].etmcr = etm_readl(&tracer, i, ETMCR);
			tracer.etm_info[i].etmccer = etm_readl(&tracer, i, ETMCCER);
		}
	}
  
	/* Enable TMs now everything has been set up */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_ptm_clear_progbit(tracer.etm_regs[i]);
		}
	}

	/* Avoid DBG_sys being reset */
	__raw_writel(DEM_UNLOCK_MAGIC, DEM_UNLOCK);
	__raw_writel(POWER_ON_RESET, DBGRST_ALL);
	__raw_writel(BUSCLK_EN, DBGBUSCLK_EN);
	mt65xx_reg_sync_writel(SYSCLK_EN, DBGSYSCLK_EN);
  
	tracer.state = TRACE_STATE_TRACING;
  
	etb_lock(&tracer);

	mutex_unlock(&tracer.mutex);

	put_online_cpus();
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	while(1) {
		wait_event_interruptible(s->sync_wq, s->pending ||
					kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (dest_policy.cur >= src_policy.cur ) {
			pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
				 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
			continue;
		}

		if (sync_threshold && (dest_policy.cur >= sync_threshold))
			continue;

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static void trace_stop(void)
{ 
	int i;	 
	int pwr_down;

	if (tracer.state == TRACE_STATE_STOP) {
		pr_info("ETM trace is already stop!\n");
		return;
	}
  
	get_online_cpus();

	mutex_lock(&tracer.mutex);
  
	etb_unlock(&tracer);
  
	/* "Trace program done" */
	/* "Disable trace components" */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_ptm_progbit(tracer.etm_regs[i]);
		}
	}

#if 0
	cs_cpu_flushandstop(tracer.tpiu_regs);
#endif
  
	/* Disable ETB capture (ETB_CTL bit0 = 0x0) */
	cs_cpu_write(tracer.etb_regs, 0x20, 0x0);
	/* Reset ETB RAM Read Data Pointer (ETB_RRP = 0x0) */
	/* no need to reset RRP */
#if 0
	cs_cpu_write (tracer.etb_regs, 0x14, 0x0);
#endif

	/* power down */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_write(tracer.etm_regs[i], 0x0, 0x1);
		}
	}

	dsb();

	tracer.state = TRACE_STATE_STOP;

	etb_lock(&tracer);

	mutex_unlock(&tracer.mutex);

	put_online_cpus();
}
Example #21
0
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Example #22
0
void kvm_hv_vm_deactivated(void)
{
	get_online_cpus();
	atomic_dec(&hv_vm_count);
	put_online_cpus();
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while (1) {
		wait_event_interruptible(s->sync_wq,
					s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (src_policy.cur == src_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Source CPU%d@%dKHz at min freq\n",
				 src_cpu, src_policy.cur);
			continue;
		}

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
void op_block_hotplug(void)
{
	get_online_cpus();
}
Example #25
0
/*H:020
 * Now the Switcher is mapped and every thing else is ready, we need to do
 * some more i386-specific initialization.
 */
void __init lguest_arch_host_init(void)
{
	int i;

	/*
	 * Most of the x86/switcher_32.S doesn't care that it's been moved; on
	 * Intel, jumps are relative, and it doesn't access any references to
	 * external code or data.
	 *
	 * The only exception is the interrupt handlers in switcher.S: their
	 * addresses are placed in a table (default_idt_entries), so we need to
	 * update the table with the new addresses.  switcher_offset() is a
	 * convenience function which returns the distance between the
	 * compiled-in switcher code and the high-mapped copy we just made.
	 */
	for (i = 0; i < IDT_ENTRIES; i++)
		default_idt_entries[i] += switcher_offset();

	/*
	 * Set up the Switcher's per-cpu areas.
	 *
	 * Each CPU gets two pages of its own within the high-mapped region
	 * (aka. "struct lguest_pages").  Much of this can be initialized now,
	 * but some depends on what Guest we are running (which is set up in
	 * copy_in_guest_info()).
	 */
	for_each_possible_cpu(i) {
		/* lguest_pages() returns this CPU's two pages. */
		struct lguest_pages *pages = lguest_pages(i);
		/* This is a convenience pointer to make the code neater. */
		struct lguest_ro_state *state = &pages->state;

		/*
		 * The Global Descriptor Table: the Host has a different one
		 * for each CPU.  We keep a descriptor for the GDT which says
		 * where it is and how big it is (the size is actually the last
		 * byte, not the size, hence the "-1").
		 */
		state->host_gdt_desc.size = GDT_SIZE-1;
		state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);

		/*
		 * All CPUs on the Host use the same Interrupt Descriptor
		 * Table, so we just use store_idt(), which gets this CPU's IDT
		 * descriptor.
		 */
		store_idt(&state->host_idt_desc);

		/*
		 * The descriptors for the Guest's GDT and IDT can be filled
		 * out now, too.  We copy the GDT & IDT into ->guest_gdt and
		 * ->guest_idt before actually running the Guest.
		 */
		state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
		state->guest_idt_desc.address = (long)&state->guest_idt;
		state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
		state->guest_gdt_desc.address = (long)&state->guest_gdt;

		/*
		 * We know where we want the stack to be when the Guest enters
		 * the Switcher: in pages->regs.  The stack grows upwards, so
		 * we start it at the end of that structure.
		 */
		state->guest_tss.sp0 = (long)(&pages->regs + 1);
		/*
		 * And this is the GDT entry to use for the stack: we keep a
		 * couple of special LGUEST entries.
		 */
		state->guest_tss.ss0 = LGUEST_DS;

		/*
		 * x86 can have a finegrained bitmap which indicates what I/O
		 * ports the process can use.  We set it to the end of our
		 * structure, meaning "none".
		 */
		state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);

		/*
		 * Some GDT entries are the same across all Guests, so we can
		 * set them up now.
		 */
		setup_default_gdt_entries(state);
		/* Most IDT entries are the same for all Guests, too.*/
		setup_default_idt_entries(state, default_idt_entries);

		/*
		 * The Host needs to be able to use the LGUEST segments on this
		 * CPU, too, so put them in the Host GDT.
		 */
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
	}

	/*
	 * In the Switcher, we want the %cs segment register to use the
	 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
	 * it will be undisturbed when we switch.  To change %cs and jump we
	 * need this structure to feed to Intel's "lcall" instruction.
	 */
	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
	lguest_entry.segment = LGUEST_CS;

	/*
	 * Finally, we need to turn off "Page Global Enable".  PGE is an
	 * optimization where page table entries are specially marked to show
	 * they never change.  The Host kernel marks all the kernel pages this
	 * way because it's always present, even when userspace is running.
	 *
	 * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
	 * switch to the Guest kernel.  If you don't disable this on all CPUs,
	 * you'll get really weird bugs that you'll chase for two days.
	 *
	 * I used to turn PGE off every time we switched to the Guest and back
	 * on when we return, but that slowed the Switcher down noticibly.
	 */

	/*
	 * We don't need the complexity of CPUs coming and going while we're
	 * doing this.
	 */
	get_online_cpus();
	if (cpu_has_pge) { /* We have a broader idea of "global". */
		/* Remember that this was originally set (for cleanup). */
		cpu_had_pge = 1;
		/*
		 * adjust_pge is a helper function which sets or unsets the PGE
		 * bit on its CPU, depending on the argument (0 == unset).
		 */
		on_each_cpu(adjust_pge, (void *)0, 1);
		/* Turn off the feature in the global feature set. */
		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
	}
	put_online_cpus();
}
Example #26
0
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_t new_mask;
	cpumask_t effective_mask;
	int retval;
	struct task_struct *p;
	struct thread_info *ti;
	uid_t euid;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		put_online_cpus();
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and drop tasklist_lock before invoking
	 * set_cpus_allowed.
	 */
	get_task_struct(p);

	euid = current_euid();
	retval = -EPERM;
	if (euid != p->cred->euid && euid != p->cred->uid &&
	    !capable(CAP_SYS_NICE)) {
		read_unlock(&tasklist_lock);
		goto out_unlock;
	}

	retval = security_task_setscheduler(p, 0, NULL);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	p->thread.user_cpus_allowed = new_mask;

	/* Unlock the task list */
	read_unlock(&tasklist_lock);

	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(new_mask, mt_fpu_cpumask)) {
		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed(p, effective_mask);
	} else {
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed(p, new_mask);
	}

out_unlock:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}