示例#1
0
static int rtasd(void *unused)
{
	unsigned int err_type;
	int cpu = 0;
	int event_scan = rtas_token("event-scan");
	cpumask_t all = CPU_MASK_ALL;
	int rc;

	daemonize("rtasd");

	if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1)
		goto error;

	rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
	if (!rtas_log_buf) {
		printk(KERN_ERR "rtasd: no memory\n");
		goto error;
	}

	/* We can use rtas_log_buf now */
	no_more_logging = 0;

	printk(KERN_ERR "RTAS daemon started\n");

	DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);

	/* See if we have any error stored in NVRAM */
	memset(logdata, 0, rtas_error_log_max);

	rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type);
	if (!rc) {
		if (err_type != ERR_FLAG_ALREADY_LOGGED) {
			pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
		}
	}

	/* First pass. */
	lock_cpu_hotplug();
	for_each_online_cpu(cpu) {
		DEBUG("scheduling on %d\n", cpu);
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());

		do_event_scan(event_scan);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}
	unlock_cpu_hotplug();

	if (surveillance_timeout != -1) {
		DEBUG("enabling surveillance\n");
		enable_surveillance(surveillance_timeout);
		DEBUG("surveillance enabled\n");
	}

	lock_cpu_hotplug();
	cpu = first_cpu_const(mk_cpumask_const(cpu_online_map));
	for (;;) {
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		do_event_scan(event_scan);
		set_cpus_allowed(current, all);

		/* Drop hotplug lock, and sleep for a bit (at least
		 * one second since some machines have problems if we
		 * call event-scan too quickly). */
		unlock_cpu_hotplug();
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout((HZ*60/rtas_event_scan_rate) / 2);
		lock_cpu_hotplug();

		cpu = next_cpu_const(cpu, mk_cpumask_const(cpu_online_map));
		if (cpu == NR_CPUS)
			cpu = first_cpu_const(mk_cpumask_const(cpu_online_map));
	}

error:
	/* Should delete proc entries */
	return -EINVAL;
}
示例#2
0
cpumask_t target_cpus_x2apic(void)
{
    /* Deliver interrupts only to CPU0 for now */
    return cpumask_of_cpu(0);
}
/*
 * Check, if the new registered device should be used.
 */
static int tick_check_new_device(struct clock_event_device *newdev)
{
	struct clock_event_device *curdev;
	struct tick_device *td;
	int cpu, ret = NOTIFY_OK;
	unsigned long flags;
	cpumask_t cpumask;

	spin_lock_irqsave(&tick_device_lock, flags);

	cpu = smp_processor_id();
	if (!cpu_isset(cpu, newdev->cpumask))
		goto out;

	td = &per_cpu(tick_cpu_device, cpu);
	curdev = td->evtdev;
	cpumask = cpumask_of_cpu(cpu);

	/* cpu local device ? */
	if (!cpus_equal(newdev->cpumask, cpumask)) {

		/*
		 * If the cpu affinity of the device interrupt can not
		 * be set, ignore it.
		 */
		if (!irq_can_set_affinity(newdev->irq))
			goto out_bc;

		/*
		 * If we have a cpu local device already, do not replace it
		 * by a non cpu local device
		 */
		if (curdev && cpus_equal(curdev->cpumask, cpumask))
			goto out_bc;
	}

	/*
	 * If we have an active device, then check the rating and the oneshot
	 * feature.
	 */
	if (curdev) {
		/*
		 * Prefer one shot capable devices !
		 */
		if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
		    !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
			goto out_bc;
		/*
		 * Check the rating
		 */
		if (curdev->rating >= newdev->rating)
			goto out_bc;
	}

	/*
	 * Replace the eventually existing device by the new
	 * device. If the current device is the broadcast device, do
	 * not give it back to the clockevents layer !
	 */
	if (tick_is_broadcast_device(curdev)) {
		clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
		curdev = NULL;
	}
	clockevents_exchange_device(curdev, newdev);
	tick_setup_device(td, newdev, cpu, cpumask);
	if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
		tick_oneshot_notify();

	spin_unlock_irqrestore(&tick_device_lock, flags);
	return NOTIFY_STOP;

out_bc:
	/*
	 * Can the new device be used as a broadcast device ?
	 */
	if (tick_check_broadcast_device(newdev))
		ret = NOTIFY_STOP;
out:
	spin_unlock_irqrestore(&tick_device_lock, flags);

	return ret;
}
示例#4
0
文件: rtasd.c 项目: sarnobat/knoppix
static int rtasd(void *unused)
{
	int cpu = 0;
	int error;
	int first_pass = 1;
	int event_scan = rtas_token("event-scan");

	if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1)
		goto error;

	rtas_log_buf = vmalloc(rtas_error_log_max*LOG_NUMBER);
	if (!rtas_log_buf) {
		printk(KERN_ERR "rtasd: no memory\n");
		goto error;
	}

	DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2);

	daemonize("rtasd");

#if 0
	/* Rusty unreal time task */
	current->policy = SCHED_FIFO;
	current->nice = sys_sched_get_priority_max(SCHED_FIFO) + 1;
#endif

repeat:
	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		if (!cpu_online(cpu))
			continue;

		DEBUG("scheduling on %d\n", cpu);
		set_cpus_allowed(current, cpumask_of_cpu(cpu));
		DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id());

		do {
			memset(logdata, 0, rtas_error_log_max);
			error = rtas_call(event_scan, 4, 1, NULL,
					EVENT_SCAN_ALL_EVENTS, 0,
					__pa(logdata), rtas_error_log_max);
			if (error == -1) {
				printk(KERN_ERR "event-scan failed\n");
				break;
			}

			if (error == 0)
				log_rtas(logdata);

		} while(error == 0);

		/*
		 * Check all cpus for pending events quickly, sleeping for
		 * at least one second since some machines have problems
		 * if we call event-scan too quickly
		 */
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(first_pass ? HZ : (HZ*60/rtas_event_scan_rate) / 2);
	}

	if (first_pass && surveillance_requested) {
		DEBUG("enabling surveillance\n");
		if (enable_surveillance())
			goto error_vfree;
		DEBUG("surveillance enabled\n");
	}

	first_pass = 0;
	goto repeat;

error_vfree:
	vfree(rtas_log_buf);
error:
	/* Should delete proc entries */
	return -EINVAL;
}
示例#5
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_user_nice(create.result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->rt.nr_cpus_allowed = 1;
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	trace_sched_kthread_stop(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	put_task_struct(k);
	mutex_unlock(&kthread_stop_lock);

	trace_sched_kthread_stop_ret(ret);

	return ret;
}
/* HTC: Lloyd for throughput */
static void sdio_mux_affinity(struct work_struct *work)
{
	if ( sched_setaffinity(current->pid,&cpumask_of_cpu(0)) < 0)
		printk(KERN_ERR "sched_setaffinity fail - %s %s\n",__func__, current->comm);
}
示例#7
0
static cpumask_t x2apic_target_cpus(void)
{
	return cpumask_of_cpu(0);
}
示例#8
0
void __init time_init_hook(void)
{
	irq0.mask = cpumask_of_cpu(safe_smp_processor_id());
	setup_irq(0, &irq0);
}
示例#9
0
/* Result code is:
 *  0 - handled
 *  1 - normal page fault
 * -1 - critical hash insertion error
 */
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
	void *pgdir;
	unsigned long vsid;
	struct mm_struct *mm;
	pte_t *ptep;
	cpumask_t tmp;
	int rc, user_region = 0, local = 0;
	int psize;

	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
		ea, access, trap);

	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
		DBG_LOW(" out of pgtable range !\n");
 		return 1;
	}

	/* Get region & vsid */
 	switch (REGION_ID(ea)) {
	case USER_REGION_ID:
		user_region = 1;
		mm = current->mm;
		if (! mm) {
			DBG_LOW(" user region with no mm !\n");
			return 1;
		}
		vsid = get_vsid(mm->context.id, ea);
		psize = mm->context.user_psize;
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
		vsid = get_kernel_vsid(ea);
		if (ea < VMALLOC_END)
			psize = mmu_vmalloc_psize;
		else
			psize = mmu_io_psize;
		break;
	default:
		/* Not a valid range
		 * Send the problem up to do_page_fault 
		 */
		return 1;
	}
	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);

	/* Get pgdir */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return 1;

	/* Check CPU locality */
	tmp = cpumask_of_cpu(smp_processor_id());
	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
		local = 1;

	/* Handle hugepage regions */
	if (unlikely(in_hugepage_area(mm->context, ea))) {
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}

	/* Get PTE and page size from page tables */
	ptep = find_linux_pte(pgdir, ea);
	if (ptep == NULL || !pte_present(*ptep)) {
		DBG_LOW(" no PTE !\n");
		return 1;
	}

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	/* Pre-check access permissions (will be re-checked atomically
	 * in __hash_page_XX but this pre-check is a fast path
	 */
	if (access & ~pte_val(*ptep)) {
		DBG_LOW(" no access !\n");
		return 1;
	}

	/* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES
	rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			if (user_region) {
				psize = MMU_PAGE_4K;
				mm->context.user_psize = MMU_PAGE_4K;
				mm->context.sllp = SLB_VSID_USER |
					mmu_psize_defs[MMU_PAGE_4K].sllp;
			} else if (ea < VMALLOC_END) {
				/*
				 * some driver did a non-cacheable mapping
				 * in vmalloc space, so switch vmalloc
				 * to 4k pages
				 */
				printk(KERN_ALERT "Reducing vmalloc segment "
				       "to 4kB pages because of "
				       "non-cacheable mapping\n");
				psize = mmu_vmalloc_psize = MMU_PAGE_4K;
			}
		}
		if (user_region) {
			if (psize != get_paca()->context.user_psize) {
				get_paca()->context = mm->context;
				slb_flush_and_rebolt();
			}
		} else if (get_paca()->vmalloc_sllp !=
			   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
			get_paca()->vmalloc_sllp =
				mmu_psize_defs[mmu_vmalloc_psize].sllp;
			slb_flush_and_rebolt();
		}
	}
	if (psize == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	DBG_LOW(" -> rc=%d\n", rc);
	return rc;
}
示例#10
0
文件: smp.c 项目: 3sOx/asuswrt-merlin
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
static void native_smp_send_reschedule(int cpu)
{
	WARN_ON(cpu_is_offline(cpu));
	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
示例#11
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);

		int policy = SCHED_NORMAL;
#ifdef CONFIG_TIVO
		int  i;
		int bFound = 0;
		for (i=0; i<sizeof(s_tvKthreadInfoTable)/sizeof(TvKthreadInfo); i++)
		{
			if (!strcmp(s_tvKthreadInfoTable[i].name, create.result->comm))
			{
				if (s_tvKthreadInfoTable[i].policy != -1)
				{
					policy = s_tvKthreadInfoTable[i].policy;
					param.sched_priority = s_tvKthreadInfoTable[i].rt_priority;
				}
				bFound = 1;
				break;
			}
		}
		if (!bFound)
		{
		    printk("--- Unknown kthread %s is lanched?\n", create.result->comm);
		}
#endif

		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties, and should
		 * use specific RT priorities for some threads.
		 */
		sched_setscheduler_nocheck(create.result, policy, &param);
		set_user_nice(create.result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->rt.nr_cpus_allowed = 1;
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_user_nice(tsk, KTHREAD_NICE_LEVEL);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_possible_map);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
示例#12
0
void xnsched_init(struct xnsched *sched, int cpu)
{
	char rrbtimer_name[XNOBJECT_NAME_LEN];
	char htimer_name[XNOBJECT_NAME_LEN];
	char root_name[XNOBJECT_NAME_LEN];
	union xnsched_policy_param param;
	struct xnthread_init_attr attr;
	struct xnsched_class *p;

#ifdef CONFIG_SMP
	sched->cpu = cpu;
	ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu);
	ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu);
	ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu);
	cpus_clear(sched->resched);
#else
	strcpy(htimer_name, "[host-timer]");
	strcpy(rrbtimer_name, "[rrb-timer]");
	strcpy(root_name, "ROOT");
#endif
	for_each_xnsched_class(p) {
		if (p->sched_init)
			p->sched_init(sched);
	}

	sched->status = 0;
	sched->lflags = 0;
	sched->inesting = 0;
	sched->curr = &sched->rootcb;

	attr.flags = XNROOT | XNFPU;
	attr.name = root_name;
	attr.personality = &xenomai_personality;
	attr.affinity = cpumask_of_cpu(cpu);
	param.idle.prio = XNSCHED_IDLE_PRIO;

	__xnthread_init(&sched->rootcb, &attr,
			sched, &xnsched_class_idle, &param);

	/*
	 * No direct handler here since the host timer processing is
	 * postponed to xnintr_irq_handler(), as part of the interrupt
	 * exit code.
	 */
	xntimer_init(&sched->htimer, &nkclock, NULL,
		     sched, XNTIMER_IGRAVITY);
	xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
	xntimer_set_name(&sched->htimer, htimer_name);
	xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler,
		     sched, XNTIMER_IGRAVITY);
	xntimer_set_name(&sched->rrbtimer, rrbtimer_name);
	xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO);

	xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
#ifdef CONFIG_XENO_ARCH_FPU
	sched->fpuholder = &sched->rootcb;
#endif /* CONFIG_XENO_ARCH_FPU */

	xnthread_init_root_tcb(&sched->rootcb);
	list_add_tail(&sched->rootcb.glink, &nkthreadq);
	cobalt_nrthreads++;

#ifdef CONFIG_XENO_OPT_WATCHDOG
	xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler,
		     sched, XNTIMER_NOBLCK|XNTIMER_IGRAVITY);
	xntimer_set_name(&sched->wdtimer, "[watchdog]");
	xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO);
#endif /* CONFIG_XENO_OPT_WATCHDOG */
}
示例#13
0
文件: xpc_main.c 项目: 274914765/C
/*
 * This thread is responsible for nearly all of the partition
 * activation/deactivation.
 */
static int
xpc_hb_checker(void *ignore)
{
    int last_IRQ_count = 0;
    int new_IRQ_count;
    int force_IRQ = 0;

    /* this thread was marked active by xpc_hb_init() */

    set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));

    /* set our heartbeating to other partitions into motion */
    xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
    xpc_hb_beater(0);

    while (!xpc_exiting) {

        dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
                "been received\n",
                (int)(xpc_hb_check_timeout - jiffies),
                atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);

        /* checking of remote heartbeats is skewed by IRQ handling */
        if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
            dev_dbg(xpc_part, "checking remote heartbeats\n");
            xpc_check_remote_hb();

            /*
             * We need to periodically recheck to ensure no
             * IPI/AMO pairs have been missed.  That check
             * must always reset xpc_hb_check_timeout.
             */
            force_IRQ = 1;
        }

        /* check for outstanding IRQs */
        new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
        if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
            force_IRQ = 0;

            dev_dbg(xpc_part, "found an IRQ to process; will be "
                    "resetting xpc_hb_check_timeout\n");

            last_IRQ_count += xpc_identify_act_IRQ_sender();
            if (last_IRQ_count < new_IRQ_count) {
                /* retry once to help avoid missing AMO */
                (void)xpc_identify_act_IRQ_sender();
            }
            last_IRQ_count = new_IRQ_count;

            xpc_hb_check_timeout = jiffies +
                                   (xpc_hb_check_interval * HZ);
        }

        /* wait for IRQ or timeout */
        (void)wait_event_interruptible(xpc_act_IRQ_wq,
                                       (last_IRQ_count <
                                        atomic_read(&xpc_act_IRQ_rcvd)
                                        || time_after_eq(jiffies,
                                                xpc_hb_check_timeout) ||
                                        xpc_exiting));
    }

    dev_dbg(xpc_part, "heartbeat checker is exiting\n");

    /* mark this thread as having exited */
    complete(&xpc_hb_checker_exited);
    return 0;
}
示例#14
0
static void create_kthread(struct kthread_create_info *create)
{
	int pid;

	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
	if (pid < 0) {
		create->result = ERR_PTR(pid);
	} else {
		struct sched_param param = { .sched_priority = 0 };
		wait_for_completion(&create->started);
		read_lock(&tasklist_lock);
		create->result = find_task_by_pid_ns(pid, &init_pid_ns);
		read_unlock(&tasklist_lock);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler(create->result, SCHED_NORMAL, &param);
		set_user_nice(create->result, KTHREAD_NICE_LEVEL);
		set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
	}
	complete(&create->done);
}

/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.started);
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		va_list args;
		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @k: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}
	set_task_cpu(k, cpu);
	k->cpus_allowed = cpumask_of_cpu(cpu);
	k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit.  Your threadfn() must not call do_exit()
 * itself if you use this function!  This can also be called after
 * kthread_create() instead of calling wake_up_process(): the thread
 * will exit without calling threadfn().
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	int ret;

	mutex_lock(&kthread_stop_lock);

	/* It could exit after stop_info.k set, but before wake_up_process. */
	get_task_struct(k);

	trace_sched_kthread_stop(k);

	/* Must init completion *before* thread sees kthread_stop_info.k */
	init_completion(&kthread_stop_info.done);
	smp_wmb();

	/* Now set kthread_should_stop() to true, and wake it up. */
	kthread_stop_info.k = k;
	wake_up_process(k);
	put_task_struct(k);

	/* Once it dies, reset stop ptr, gather result and we're done. */
	wait_for_completion(&kthread_stop_info.done);
	kthread_stop_info.k = NULL;
	ret = kthread_stop_info.err;
	mutex_unlock(&kthread_stop_lock);

	trace_sched_kthread_stop_ret(ret);

	return ret;
}
示例#15
0
/*==========================================================================*
 * Name:         smp_send_reschedule
 *
 * Description:  This routine requests other CPU to execute rescheduling.
 *               1.Send 'RESCHEDULE_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_reschedule_interrupt()'.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpu_id - Target CPU ID
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_send_reschedule(int cpu_id)
{
	WARN_ON(cpu_is_offline(cpu_id));
	send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
}
示例#16
0
/*==========================================================================*
 * Name:         do_boot_cpu
 *
 * Description:  This routine boot up one AP.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    phys_id - Target CPU physical ID
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
static void __init do_boot_cpu(int phys_id)
{
	struct task_struct *idle;
	unsigned long send_status, boot_status;
	int timeout, cpu_id;

	cpu_id = ++cpucount;

	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu_id);
	if (IS_ERR(idle))
		panic("failed fork for CPU#%d.", cpu_id);

	idle->thread.lr = (unsigned long)start_secondary;

	map_cpu_to_physid(cpu_id, phys_id);

	/* So we see what's up   */
	printk("Booting processor %d/%d\n", phys_id, cpu_id);
	stack_start.spi = (void *)idle->thread.sp;
	task_thread_info(idle)->cpu = cpu_id;

	/*
	 * Send Startup IPI
	 *   1.IPI received by CPU#(phys_id).
	 *   2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
	 *   3.CPU#(phys_id) enter start_secondary()
	 */
	send_status = 0;
	boot_status = 0;

	cpu_set(phys_id, cpu_bootout_map);

	/* Send Startup IPI */
	send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);

	Dprintk("Waiting for send to finish...\n");
	timeout = 0;

	/* Wait 100[ms] */
	do {
		Dprintk("+");
		udelay(1000);
		send_status = !cpu_isset(phys_id, cpu_bootin_map);
	} while (send_status && (timeout++ < 100));

	Dprintk("After Startup.\n");

	if (!send_status) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu_id);
		cpu_set(cpu_id, cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu_id);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 5000; timeout++) {
			if (cpu_isset(cpu_id, cpu_callin_map))
				break;	/* It has booted */
			udelay(1000);
		}

		if (cpu_isset(cpu_id, cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
		} else {
			boot_status = 1;
			printk("Not responding.\n");
		}
	} else
		printk("IPI never delivered???\n");

	if (send_status || boot_status) {
		unmap_cpu_to_physid(cpu_id, phys_id);
		cpu_clear(cpu_id, cpu_callout_map);
		cpu_clear(cpu_id, cpu_callin_map);
		cpu_clear(cpu_id, cpu_initialized);
		cpucount--;
	}
}
示例#17
0
void arch_send_call_function_single_ipi(int cpu)
{
	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
}
示例#18
0
/**
 * kthread_create - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create(int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;

	//TODO:RAWLINSON
	struct cpumask cpu_padrao = cpumask_of_cpu(CPUID_PADRAO);

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);

		//TODO:RAWLINSON
		//set_cpus_allowed_ptr(create.result, cpu_all_mask); //TODO:RAWLINSON - CODIGO ORIGINAL...
		create.result->cpus_allowed = cpu_padrao;
		set_cpus_allowed_ptr(create.result, &cpu_padrao);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	p->cpus_allowed = cpumask_of_cpu(cpu);
	p->rt.nr_cpus_allowed = 1;
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	//TODO:RAWLINSON
	struct cpumask cpu_padrao = cpumask_of_cpu(CPUID_PADRAO);

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);

	//TODO:RAWLINSON
	//set_cpus_allowed_ptr(tsk, cpu_all_mask); //TODO:RAWLINSON - CODIGO ORIGINAL...
	tsk->cpus_allowed = cpu_padrao;
	set_cpus_allowed_ptr(tsk, &cpu_padrao);

	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
示例#19
0
int __cpuinit mips_clockevent_init(void)
{
	uint64_t mips_freq = mips_hpt_frequency;
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *cd;
	unsigned int irq;

	if (!cpu_has_counter || !mips_hpt_frequency)
		return -ENXIO;

#ifdef CONFIG_MIPS_MT_SMTC
	setup_smtc_dummy_clockevent_device();

	/*
	 * On SMTC we only register VPE0's compare interrupt as clockevent
	 * device.
	 */
	if (cpu)
		return 0;
#endif

	if (!c0_compare_int_usable())
		return -ENXIO;

	/*
	 * With vectored interrupts things are getting platform specific.
	 * get_c0_compare_int is a hook to allow a platform to return the
	 * interrupt number of it's liking.
	 */
	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
	if (get_c0_compare_int)
		irq = get_c0_compare_int();

	cd = &per_cpu(mips_clockevent_device, cpu);

	cd->name		= "MIPS";
	cd->features		= CLOCK_EVT_FEAT_ONESHOT;

	/* Calculate the min / max delta */
	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
	cd->shift		= 32;
	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);

	cd->rating		= 300;
	cd->irq			= irq;
#ifdef CONFIG_MIPS_MT_SMTC
	cd->cpumask		= CPU_MASK_ALL;
#else
	cd->cpumask		= cpumask_of_cpu(cpu);
#endif
	cd->set_next_event	= mips_next_event;
	cd->set_mode		= mips_set_mode;
	cd->event_handler	= mips_event_handler;

	clockevents_register_device(cd);

	if (cp0_timer_irq_installed)
		return 0;

	cp0_timer_irq_installed = 1;

#ifdef CONFIG_MIPS_MT_SMTC
#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
	setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
#else
	setup_irq(irq, &c0_compare_irqaction);
#endif

	return 0;
}
示例#20
0
static cpumask_t vector_allocation_domain(int cpu)
{
	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
		return cpumask_of_cpu(cpu);
	return CPU_MASK_ALL;
}
/*
 * Why isn't there a function to route an IRQ to a specific CPU in
 * genirq?
 */
static void em_route_irq(int irq, unsigned int cpu)
{
	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
	irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu));
}
示例#22
0
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
void smp_send_reschedule(int cpu)
{
	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
示例#23
0
static int rtswitch_create_ktask(rtswitch_context_t *ctx,
				 struct rttst_swtest_task *ptask)
{
	union xnsched_policy_param param;
	struct xnthread_start_attr sattr;
	struct xnthread_init_attr iattr;
	rtswitch_task_t *task;
	xnflags_t init_flags;
	struct taskarg arg;
	char name[30];
	int err;

	/*
	 * Silently disable FP tests in kernel if FPU is not supported
	 * there. Typical case is math emulation support: we can use
	 * it from userland as a synthetic FPU, but there is no sane
	 * way to use it from kernel-based threads (Xenomai or Linux).
	 */
	if (!fp_kernel_supported())
		ptask->flags &= ~RTTST_SWTEST_USE_FPU;
		
	ptask->flags |= RTSWITCH_KERNEL;
	err = rtswitch_register_task(ctx, ptask);

	if (err)
		return err;

	snprintf(name, sizeof(name), "rtk%d/%u", ptask->index, ctx->cpu);

	task = &ctx->tasks[ptask->index];

	arg.ctx = ctx;
	arg.task = task;

	init_flags = (ptask->flags & RTTST_SWTEST_FPU) ? XNFPU : 0;

	/*
	 * Migrate the calling thread to the same CPU as the created
	 * task, in order to be sure that the created task is
	 * suspended when this function returns. This also allow us to
	 * use the stack to pass the parameters to the created
	 * task.
	 */
	set_cpus_allowed(current, cpumask_of_cpu(ctx->cpu));

	iattr.tbase = rtdm_tbase;
	iattr.name = name;
	iattr.flags = init_flags;
	iattr.ops = NULL;
	iattr.stacksize = 0;
	param.rt.prio = 1;

	err = xnpod_init_thread(&task->ktask,
				&iattr, &xnsched_class_rt, &param);
	if (!err) {
		sattr.mode = 0;
		sattr.imask = 0;
		sattr.affinity = xnarch_cpumask_of_cpu(ctx->cpu);
		sattr.entry = rtswitch_ktask;
		sattr.cookie = &arg;
		err = xnpod_start_thread(&task->ktask, &sattr);
	} else
		/*
		 * In order to avoid calling xnpod_delete_thread with
		 * invalid thread.
		 */
		task->base.flags = 0;
	/*
	 * Putting the argument on stack is safe, because the new
	 * thread will preempt the current thread immediately, and
	 * will suspend only once the arguments on stack are used.
	 */

	return err;
}
示例#24
0
文件: setup.c 项目: AmesianX/winkvm
/**
 * time_init_hook - do any specific initialisations for the system timer.
 *
 * Description:
 *	Must plug the system timer interrupt source at HZ into the IRQ listed
 *	in irq_vectors.h:TIMER_IRQ
 **/
void __init time_init_hook(void)
{
	irq0.mask = cpumask_of_cpu(0);
	setup_irq(0, &irq0);
}
示例#25
0
static void __init smp_boot_cpus(unsigned int max_cpus)
{
	unsigned apicid, cpu, bit, kicked;

	nmi_watchdog_default();

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
	printk(KERN_INFO "CPU%d: ", 0);
	print_cpu_info(&cpu_data[0]);

	current_thread_info()->cpu = 0;
	smp_tune_scheduling();

	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
		       hard_smp_processor_id());
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config) {
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		goto smp_done;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 */
	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
								 boot_cpu_id);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_id);
		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		smp_found_config = 0;
		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	connect_bsp_APIC();
	setup_local_APIC();

	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
		BUG();

	x86_cpu_to_apicid[0] = boot_cpu_id;

	/*
	 * Now scan the CPU present map and fire up the other CPUs.
	 */
	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));

	kicked = 1;
	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
		apicid = cpu_present_to_apicid(bit);
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if (apicid == boot_cpu_id || (apicid == BAD_APICID))
			continue;

		if (!physid_isset(apicid, phys_cpu_present_map))
			continue;
		if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
			continue;

		do_boot_cpu(apicid);
		++kicked;
	}

	/*
	 * Cleanup possible dangling ends...
	 */
	{
		/*
		 * Install writable page 0 entry to set BIOS data area.
		 */
		local_flush_tlb();

		/*
		 * Paranoid:  Set warm reset code and vector here back
		 * to default values.
		 */
		CMOS_WRITE(0, 0xf);

		*((volatile int *) phys_to_virt(0x467)) = 0;
	}

	/*
	 * Allow the user to impress friends.
	 */

	Dprintk("Before bogomips.\n");
	if (!cpucount) {
		printk(KERN_INFO "Only one processor found.\n");
	} else {
		unsigned long bogosum = 0;
		for (cpu = 0; cpu < NR_CPUS; cpu++)
			if (cpu_isset(cpu, cpu_callout_map))
				bogosum += cpu_data[cpu].loops_per_jiffy;
		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
			cpucount+1,
			bogosum/(500000/HZ),
			(bogosum/(5000/HZ))%100);
		Dprintk("Before bogocount - setting activated=1.\n");
	}

	/*
	 * Construct cpu_sibling_map[], so that we can tell the
	 * sibling CPU efficiently.
	 */
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		cpus_clear(cpu_sibling_map[cpu]);

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		int siblings = 0;
		int i;
		if (!cpu_isset(cpu, cpu_callout_map))
			continue;

		if (smp_num_siblings > 1) {
			for (i = 0; i < NR_CPUS; i++) {
				if (!cpu_isset(i, cpu_callout_map))
					continue;
				if (phys_proc_id[cpu] == phys_proc_id[i]) {
					siblings++;
					cpu_set(i, cpu_sibling_map[cpu]);
				}
			}
		} else { 
			siblings++;
			cpu_set(cpu, cpu_sibling_map[cpu]);
		}

		if (siblings != smp_num_siblings) {
			printk(KERN_WARNING 
	       "WARNING: %d siblings found for CPU%d, should be %d\n", 
			       siblings, cpu, smp_num_siblings);
			smp_num_siblings = siblings;
		}       
	}

	Dprintk("Boot done.\n");

	/*
	 * Here we can be sure that there is an IO-APIC in the system. Let's
	 * go and set it up:
	 */
	if (!skip_ioapic_setup && nr_ioapics)
		setup_IO_APIC();
	else
		nr_ioapics = 0;

	setup_boot_APIC_clock();

	/*
	 * Synchronize the TSC with the AP
	 */
	if (cpu_has_tsc && cpucount)
		synchronize_tsc_bp();

 smp_done:
	time_init_smp();
}