Ejemplo n.º 1
0
/*
 * Detect available CPUs, populate cpu_possible_map before smp_init
 *
 * We don't want to start the secondary CPU yet nor do we have a nice probing
 * feature in PMON so we just assume presence of the secondary core.
 */
static void __init yos_smp_setup(void)
{
	int i;

	cpus_clear(cpu_possible_map);

	for (i = 0; i < 2; i++) {
		cpu_set(i, cpu_possible_map);
		__cpu_number_map[i]	= i;
		__cpu_logical_map[i]	= i;
	}
}
Ejemplo n.º 2
0
/*
 * Cycle through the APs sending Wakeup IPIs to boot each.
 */
void __init
smp_prepare_cpus (unsigned int max_cpus)
{
    int boot_cpu_id = hard_smp_processor_id();

    /*
     * Initialize the per-CPU profiling counter/multiplier
     */

    smp_setup_percpu_timer();

    /*
     * We have the boot CPU online for sure.
     */
    cpu_set(0, cpu_online_map);
    cpu_set(0, cpu_callin_map);

    local_cpu_data->loops_per_jiffy = loops_per_jiffy;
    ia64_cpu_to_sapicid[0] = boot_cpu_id;

    printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);

    current_thread_info()->cpu = 0;

    /*
     * If SMP should be disabled, then really disable it!
     */
    if (!max_cpus) {
        printk(KERN_INFO "SMP mode deactivated.\n");
        cpus_clear(cpu_online_map);
        cpus_clear(cpu_present_map);
        cpus_clear(cpu_possible_map);
        cpu_set(0, cpu_online_map);
        cpu_set(0, cpu_present_map);
        cpu_set(0, cpu_possible_map);
        return;
    }
}
Ejemplo n.º 3
0
static void __init jzsoc_smp_setup(void)
{
	int i, num;
	scpu_pwc = cpm_pwc_get(PWC_SCPU);
	cpus_clear(cpu_possible_map);
	cpus_clear(cpu_present_map);

	cpu_set(0, cpu_possible_map);
	cpu_set(0, cpu_present_map);

	__cpu_number_map[0] = 0;
	__cpu_logical_map[0] = 0;

	for (i = 1, num = 0; i < NR_CPUS; i++) {
		cpu_set(i, cpu_possible_map);
		cpu_set(i, cpu_present_map);

		__cpu_number_map[i] = ++num;
		__cpu_logical_map[num] = i;
	}

	pr_info("[SMP] Slave CPU(s) %i available.\n", num);
}
Ejemplo n.º 4
0
/**
 * percpu_populate_mask - populate per-cpu data for more cpu's
 * @__pdata: per-cpu data to populate further
 * @size: size of per-cpu object
 * @gfp: may sleep or not etc.
 * @mask: populate per-cpu data for cpu's selected through mask bits
 *
 * Per-cpu objects are populated with zeroed buffers.
 */
int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
			   cpumask_t *mask)
{
	cpumask_t populated;
	int cpu;

	cpus_clear(populated);
	for_each_cpu_mask(cpu, *mask)
		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
			__percpu_depopulate_mask(__pdata, &populated);
			return -ENOMEM;
		} else
			cpu_set(cpu, populated);
	return 0;
}
Ejemplo n.º 5
0
/**
 * @fn int xnsched_run(void)
 * @brief The rescheduling procedure.
 *
 * This is the central rescheduling routine which should be called to
 * validate and apply changes which have previously been made to the
 * nucleus scheduling state, such as suspending, resuming or changing
 * the priority of threads.  This call performs context switches as
 * needed. xnsched_run() schedules out the current thread if:
 *
 * - the current thread is about to block.
 * - a runnable thread from a higher priority scheduling class is
 * waiting for the CPU.
 * - the current thread does not lead the runnable threads from its
 * own scheduling class (i.e. round-robin).
 *
 * The Cobalt core implements a lazy rescheduling scheme so that most
 * of the services affecting the threads state MUST be followed by a
 * call to the rescheduling procedure for the new scheduling state to
 * be applied.
 *
 * In other words, multiple changes on the scheduler state can be done
 * in a row, waking threads up, blocking others, without being
 * immediately translated into the corresponding context switches.
 * When all changes have been applied, xnsched_run() should be called
 * for considering those changes, and possibly switching context.
 *
 * As a notable exception to the previous principle however, every
 * action which ends up suspending the current thread begets an
 * implicit call to the rescheduling procedure on behalf of the
 * blocking service.
 *
 * Typically, self-suspension or sleeping on a synchronization object
 * automatically leads to a call to the rescheduling procedure,
 * therefore the caller does not need to explicitly issue
 * xnsched_run() after such operations.
 *
 * The rescheduling procedure always leads to a null-effect if it is
 * called on behalf of an interrupt service routine. Any outstanding
 * scheduler lock held by the outgoing thread will be restored when
 * the thread is scheduled back in.
 *
 * Calling this procedure with no applicable context switch pending is
 * harmless and simply leads to a null-effect.
 *
 * @return Non-zero is returned if a context switch actually happened,
 * otherwise zero if the current thread was left running.
 *
 * @coretags{unrestricted}
 */
static inline int test_resched(struct xnsched *sched)
{
	int resched = xnsched_resched_p(sched);
#ifdef CONFIG_SMP
	/* Send resched IPI to remote CPU(s). */
	if (unlikely(!cpus_empty(sched->resched))) {
		smp_mb();
		ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, sched->resched);
		cpus_clear(sched->resched);
	}
#endif
	sched->status &= ~XNRESCHED;

	return resched;
}
Ejemplo n.º 6
0
/**
 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
 *
 * Build cpu to node mapping and initialize the per node cpu masks using
 * info from the node_cpuid array handed to us by ACPI.
 */
void __init build_cpu_to_node_map(void)
{
	int cpu, i, node;

	for(node=0; node < MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);

	for_each_possible_early_cpu(cpu) {
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
		map_cpu_to_node(cpu, node);
	}
}
Ejemplo n.º 7
0
void enable_nonboot_cpus(void)
{
    int cpu, error;

    printk("Enabling non-boot CPUs  ...\n");

    for_each_cpu_mask ( cpu, frozen_cpus )
    {
        if ( (error = cpu_up(cpu)) )
        {
            BUG_ON(error == -EBUSY);
            printk("Error taking CPU%d up: %d\n", cpu, error);
        }
    }

    cpus_clear(frozen_cpus);
}
Ejemplo n.º 8
0
void move_masked_irq(int irq)
{
	struct irq_desc *desc = irq_desc + irq;
	cpumask_t tmp;

	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
		return;

	/*
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
	 */
	if (CHECK_IRQ_PER_CPU(desc->status)) {
		WARN_ON(1);
		return;
	}

	desc->status &= ~IRQ_MOVE_PENDING;

	if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
		return;

	if (!desc->chip->set_affinity)
		return;

	assert_spin_locked(&desc->lock);

	cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);

	/*
	 * If there was a valid mask to work with, please
	 * do the disable, re-program, enable sequence.
	 * This is *not* particularly important for level triggered
	 * but in a edge trigger case, we might be setting rte
	 * when an active trigger is comming in. This could
	 * cause some ioapics to mal-function.
	 * Being paranoid i guess!
	 *
	 * For correct operation this depends on the caller
	 * masking the irqs.
	 */
	if (likely(!cpus_empty(tmp))) {
		desc->chip->set_affinity(irq,tmp);
	}
	cpus_clear(irq_desc[irq].pending_mask);
}
Ejemplo n.º 9
0
/**
 * Runs a function on the specified CPU.
 */
static void 
palacios_xcall(
	int			cpu_id, 
	void			(*fn)(void *arg),
	void *			arg
)
{
	cpumask_t cpu_mask;

	cpus_clear(cpu_mask);
	cpu_set(cpu_id, cpu_mask);

	printk(KERN_DEBUG
		"Palacios making xcall to cpu %d from cpu %d.\n",
		cpu_id, current->cpu_id);

	xcall_function(cpu_mask, fn, arg, 1);
}
Ejemplo n.º 10
0
/*
 * Use CFE to find out how many CPUs are available, setting up
 * phys_cpu_present_map and the logical/physical mappings.
 * XXXKW will the boot CPU ever not be physical 0?
 *
 * Common setup before any secondaries are started
 */
void __init plat_smp_setup(void)
{
	int i, num;

	cpus_clear(phys_cpu_present_map);
	cpu_set(0, phys_cpu_present_map);
	__cpu_number_map[0] = 0;
	__cpu_logical_map[0] = 0;

	for (i = 1, num = 0; i < NR_CPUS; i++) {
		if (cfe_cpu_stop(i) == 0) {
			cpu_set(i, phys_cpu_present_map);
			__cpu_number_map[i] = ++num;
			__cpu_logical_map[num] = i;
		}
	}
	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
Ejemplo n.º 11
0
/*
 * Detect available CPUs, populate phys_cpu_present_map before smp_init
 *
 * We don't want to start the secondary CPU yet nor do we have a nice probing
 * feature in PMON so we just assume presence of the secondary core.
 */
void prom_prepare_cpus(unsigned int max_cpus)
{
	cpus_clear(phys_cpu_present_map);

	/*
	 * The boot CPU
	 */
	cpu_set(0, phys_cpu_present_map);
	__cpu_number_map[0]	= 0;
	__cpu_logical_map[0]	= 0;

	/*
	 * The secondary core
	 */
	cpu_set(1, phys_cpu_present_map);
	__cpu_number_map[1]	= 1;
	__cpu_logical_map[1]	= 1;
}
Ejemplo n.º 12
0
Archivo: smp.c Proyecto: mobilipia/iods
void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned cpu;

	for_each_possible_cpu(cpu) {
		cpus_clear(per_cpu(cpu_sibling_map, cpu));
		/*
		 * cpu_core_ map will be zeroed when the per
		 * cpu area is allocated.
		 *
		 * cpus_clear(per_cpu(cpu_core_map, cpu));
		 */
	}

	smp_store_cpu_info(0);
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

	cpu_initialized_map = cpumask_of_cpu(0);

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
		for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
			continue;
		cpu_clear(cpu, cpu_possible_map);
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

		cpu_set(cpu, cpu_present_map);
	}

	//init_xenbus_allowed_cpumask();
}
Ejemplo n.º 13
0
/**
 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
 *
 * Build cpu to node mapping and initialize the per node cpu masks using
 * info from the node_cpuid array handed to us by ACPI.
 */
void __init build_cpu_to_node_map(void)
{
	int cpu, i, node;

	for(node=0; node < MAX_NUMNODES; node++)
		cpus_clear(node_to_cpu_mask[node]);

	for(cpu = 0; cpu < NR_CPUS; ++cpu) {
		node = -1;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
				node = node_cpuid[i].nid;
				break;
			}
		cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
		if (node >= 0)
			cpu_set(cpu, node_to_cpu_mask[node]);
	}
}
Ejemplo n.º 14
0
static int __init mips_smp_init(void)
{
    cpumask_t forbidden;
    unsigned int cpu;

    cpus_clear(forbidden);
#ifdef CONFIG_SMP
    smp_call_function(mips_setup_cpu_mask, &forbidden, 1);
#endif
    mips_setup_cpu_mask(&forbidden);
    if (cpus_empty(forbidden))
        return 0;
    perfctr_cpus_forbidden_mask = forbidden;
    for(cpu = 0; cpu < NR_CPUS; ++cpu)
        if (cpu_isset(cpu, forbidden))
            printk(" %u", cpu);
        printk("\n");
    return 0;
}
Ejemplo n.º 15
0
void disable_nonboot_cpus(void)
{
	int cpu, error;

	error = 0;
	cpus_clear(frozen_cpus);
	printk("Freezing cpus ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == 0)
			continue;
		error = cpu_down(cpu);
		if (!error) {
			cpu_set(cpu, frozen_cpus);
			printk("CPU%d is down\n", cpu);
			continue;
		}
		printk("Error taking cpu %d down: %d\n", cpu, error);
	}
	BUG_ON(raw_smp_processor_id() != 0);
	if (error)
		panic("cpus not sleeping");
}
Ejemplo n.º 16
0
Archivo: smp.c Proyecto: mobilipia/iods
void __init xen_smp_prepare_boot_cpu(void)
{
	int cpu;

	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	/* We've switched to the "real" per-cpu gdt, so make sure the
	   old memory can be recycled */
	make_lowmem_page_readwrite(&per_cpu__gdt_page);

	for_each_possible_cpu(cpu) {
		cpus_clear(per_cpu(cpu_sibling_map, cpu));
		/*
		 * cpu_core_map lives in a per cpu area that is cleared
		 * when the per cpu array is allocated.
		 *
		 * cpus_clear(per_cpu(cpu_core_map, cpu));
		 */
	}

	xen_setup_vcpu_info_placement();
}
Ejemplo n.º 17
0
/*
 * Initialize the logical CPU number to SAPICID mapping
 */
void __init
smp_build_cpu_map (void)
{
	int sapicid, cpu, i;
	int boot_cpu_id = hard_smp_processor_id();

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		ia64_cpu_to_sapicid[cpu] = -1;
	}

	ia64_cpu_to_sapicid[0] = boot_cpu_id;
	cpus_clear(cpu_present_map);
	cpu_set(0, cpu_present_map);
	cpu_set(0, cpu_possible_map);
	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
		sapicid = smp_boot_data.cpu_phys_id[i];
		if (sapicid == boot_cpu_id)
			continue;
		cpu_set(cpu, cpu_present_map);
		cpu_set(cpu, cpu_possible_map);
		ia64_cpu_to_sapicid[cpu] = sapicid;
		cpu++;
	}
}
void __init setup_replication_mask(void)
{
	/* Set only the master cnode's bit.  The master cnode is always 0. */
	cpus_clear(ktext_repmask);
	cpu_set(0, ktext_repmask);

#ifdef CONFIG_REPLICATE_KTEXT
#ifndef CONFIG_MAPPED_KERNEL
#error Kernel replication works with mapped kernel support. No calias support.
#endif
	{
		cnodeid_t	cnode;

		for_each_online_node(cnode) {
			if (cnode == 0)
				continue;
			/* Advertise that we have a copy of the kernel */
			cpu_set(cnode, ktext_repmask);
		}
	}
#endif
	/* Set up a GDA pointer to the replication mask. */
	GDA->g_ktext_repmask = &ktext_repmask;
}
Ejemplo n.º 19
0
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
	flush_cache_dup_mm(oldmm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->cached_hole_size = ~0UL;
	mm->map_count = 0;
	cpus_clear(mm->cpu_vm_mask);
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;

	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			long pages = vma_pages(mpnt);
			mm->total_vm -= pages;
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
								-pages);
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		pol = mpol_dup(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		anon_vma_link(tmp);
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_path.dentry->d_inode;
			struct address_space *mapping = file->f_mapping;

			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
			spin_lock(&mapping->i_mmap_lock);
			if (tmp->vm_flags & VM_SHARED)
				mapping->i_mmap_writable++;
			tmp->vm_truncate_count = mpnt->vm_truncate_count;
			flush_dcache_mmap_lock(mapping);
			/* insert tmp into the share list, just after mpnt */
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(mapping);
			spin_unlock(&mapping->i_mmap_lock);
		}

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		if (is_vm_hugetlb_page(tmp))
			reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
Ejemplo n.º 20
0
void
dtrace_xcall2(processorid_t cpu, dtrace_xcall_t func, void *arg)
{	int	c;
	int	cpu_id = smp_processor_id();
	int	cpus_todo = 0;
# if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24)
typedef struct cpumask cpumask_t;
//#define cpu_set(c, mask) cpumask_set_cpu(c, &(mask))
//#define cpus_clear(mask) cpumask_clear(&mask)
# endif
	cpumask_t mask;

	/***********************************************/
	/*   If  we had an internal panic, stop doing  */
	/*   xcalls.   Shouldnt  happen,  but  useful  */
	/*   during debugging so we can diagnose what  */
	/*   caused the panic.			       */
	/***********************************************/
	if (dtrace_shutdown)
		return;
	/***********************************************/
	/*   Special case - just 'us'.		       */
	/***********************************************/
	cnt_xcall1++;
	if (cpu_id == cpu) {
		local_irq_disable();
//dtrace_printf("[%d] sole cnt=%lu\n", smp_processor_id(), cnt_xcall1);
		func(arg);
		local_irq_enable();
		return;
	}

	/***********************************************/
	/*   Set  up  the  cpu  mask  to  do just the  */
	/*   relevant cpu.			       */
	/***********************************************/
	if (cpu != DTRACE_CPUALL) {
//dtrace_printf("just me %d %d\n", cpu_id, cpu);
		cpu = 1 << cpu;
	}


//dtrace_printf("xcall %d f=%p\n", cpu_id, func);
	cnt_xcall2++;
	if (xcall_levels[cpu_id]++)
		cnt_xcall3++;
	/***********************************************/
	/*   Set  up  the  rendezvous  with the other  */
	/*   targetted  cpus.  We use a nearly square  */
	/*   NCPU*NCPU matrix to allow for any cpu to  */
	/*   wait  for  any  other. We have two slots  */
	/*   per  cpu  -  because  we  may  be  in an  */
	/*   interrupt.				       */
	/*   					       */
	/*   The  interrupt  slave  will  service all  */
	/*   queued  calls  -  sometimes  it  will be  */
	/*   lucky and see multiple, especially if we  */
	/*   are heavily loaded.		       */
	/***********************************************/
	cpus_clear(mask);
	for (c = 0; c < nr_cpus; c++) {
		struct xcalls *xc = &xcalls[cpu_id][c];
		unsigned int cnt;

		/***********************************************/
		/*   Dont  set  ourselves  - we dont want our  */
		/*   cpu  to  be  taking an IPI interrupt and  */
		/*   doing   the   work   twice.   We  inline  */
		/*   ourselves below.			       */
		/***********************************************/
		if ((cpu & (1 << c)) == 0 || c == cpu_id) {
			continue;
		}

		/***********************************************/
		/*   Is  this  safe?  We want to avoid an IPI  */
		/*   call  if the other cpu is idle/not doing  */
		/*   dtrace  work.  If  thats the case and we  */
		/*   are  calling  dtrace_sync,  then  we can  */
		/*   avoid the xcall.			       */
		/***********************************************/
		if ((void *) func == (void *) dtrace_sync_func &&
		    cpu_core[c].cpuc_probe_level == 0) {
			cpu &= ~(1 << c);
			cnt_xcall7++;
			continue;
		}
//dtrace_printf("xcall %p\n", func);

		xc->xc_func = func;
		xc->xc_arg = arg;
		/***********************************************/
		/*   Spinlock  in  case  the  interrupt hasnt  */
		/*   fired.  This  should  be  very rare, and  */
		/*   when it happens, we would be hanging for  */
		/*   100m  iterations  (about  1s). We reduce  */
		/*   the   chance  of  a  hit  by  using  the  */
		/*   NCPU*NCPU*2 array approach. These things  */
		/*   happen  when buffers are full or user is  */
		/*   ^C-ing dtrace.			       */
		/***********************************************/
		for (cnt = 0; dtrace_cas32((void *) &xc->xc_state, XC_WORKING, XC_WORKING) == XC_WORKING; cnt++) {
			/***********************************************/
			/*   Avoid noise for tiny windows.	       */
			/***********************************************/
			if ((cnt == 0 && xcall_debug) || !(xcall_debug && cnt == 50)) {
				dtrace_printf("[%d] cpu%d in wrong state (state=%d)\n",
					smp_processor_id(), c, xc->xc_state);
			}
//			xcall_slave2();
			if (cnt == 100 * 1000 * 1000) {
				dtrace_printf("[%d] cpu%d - busting lock\n",
					smp_processor_id(), c);
				break;
			}
		}
		if ((cnt && xcall_debug) || (!xcall_debug && cnt > 50)) {
			dtrace_printf("[%d] cpu%d in wrong state (state=%d) %u cycles\n",
				smp_processor_id(), c, xc->xc_state, cnt);
		}
		/***********************************************/
		/*   As  soon  as  we set xc_state and BEFORE  */
		/*   the  apic  call,  the  cpu  may  see the  */
		/*   change  since  it  may  be taking an IPI  */
		/*   interrupt  for  someone else. We need to  */
		/*   be  careful  with  barriers  (I  think -  */
		/*   although    the   clflush/wmb   may   be  */
		/*   redundant).			       */
		/***********************************************/
		xc->xc_state = XC_WORKING;
//		clflush(&xc->xc_state);
//		smp_wmb();
		cpu_set(c, mask);
		cpus_todo++;
	}

	smp_mb();

	/***********************************************/
	/*   Now tell the other cpus to do some work.  */
	/***********************************************/
	if (cpus_todo)
		send_ipi_interrupt(&mask, ipi_vector);

	/***********************************************/
	/*   Check for ourselves.		       */
	/***********************************************/
	if (cpu & (1 << cpu_id)) {
		func(arg);
	}

	if (xcall_debug)
		dtrace_printf("[%d] getting ready.... (%ld) mask=%x func=%p\n", smp_processor_id(), cnt_xcall1, *(int *) &mask, func);

	/***********************************************/
	/*   Wait for the cpus we invoked the IPI on.  */
	/*   Cycle  thru  the  cpus,  to avoid mutual  */
	/*   deadlock  between one cpu trying to call  */
	/*   us whilst we are calling them.	       */
	/***********************************************/
	while (cpus_todo > 0) {
		for (c = 0; c < nr_cpus && cpus_todo > 0; c++) {
			xcall_slave2();
			if (c == cpu_id || (cpu & (1 << c)) == 0)
				continue;

			/***********************************************/
			/*   Wait  a  little  while  for  this cpu to  */
			/*   respond before going on to the next one.  */
			/***********************************************/
			if (ack_wait(c, 100)) {
				cpus_todo--;
				cpu &= ~(1 << c);
			}
		}
	}
//	smp_mb();

	xcall_levels[cpu_id]--;
}
Ejemplo n.º 21
0
int
ack_wait(int c, int attempts)
{
	unsigned long cnt = 0;
	int	cnt1 = 0;
	volatile struct xcalls *xc = &xcalls[smp_processor_id()][c];

	/***********************************************/
	/*   Avoid holding on to a stale cache line.   */
	/***********************************************/
	while (dtrace_cas32((void *) &xc->xc_state, XC_WORKING, XC_WORKING) != XC_IDLE) {
		if (attempts-- <= 0)
			return 0;

		barrier();

		/***********************************************/
		/*   Be HT friendly.			       */
		/***********************************************/
//		smt_pause();

		cnt_xcall6++;
		/***********************************************/
		/*   Keep track of the max.		       */
		/***********************************************/
		if (cnt > cnt_xcall5)
			cnt_xcall5 = cnt;

		/***********************************************/
		/*   On  my  Dual Core 2.26GHz system, we are  */
		/*   seeing counters in the range of hundreds  */
		/*   to  maybe  2,000,000  for  more  extreme  */
		/*   cases.  (This  is  inside  a VM). During  */
		/*   debugging,  we  found  problems with the  */
		/*   two  cores  not  seeing  each  other  --  */
		/*   possibly because I wasnt doing the right  */
		/*   things to ensure memory barriers were in  */
		/*   place.				       */
		/*   					       */
		/*   We  dont  want  to  wait forever because  */
		/*   that  will  crash/hang your machine, but  */
		/*   we  do  need to give up if its taken far  */
		/*   too long.				       */
		/***********************************************/
//		if (cnt++ == 50 * 1000 * 1000UL) {
		if (cnt++ == 1 * 1000 * 1000UL) {
			cnt = 0;
			cnt_xcall4++;

			if (cnt1 == 0) {
				/***********************************************/
				/*   Looks like we are having trouble getting  */
				/*   the interrupt, so try for an NMI.	       */
				/***********************************************/
				cpumask_t mask;
				cpus_clear(mask);
				cpu_set(c, mask);
//				nmi_masks[c] = 1;
//				send_ipi_interrupt(&mask, 2); //NMI_VECTOR);
			}

			if (1) {
//				set_console_on(1);
				dtrace_printf("ack_wait cpu=%d xcall %staking too long! c=%d [xcall1=%lu]\n", 
					smp_processor_id(), 
					cnt1 ? "STILL " : "",
					c, cnt_xcall1);
				//dump_stack();
//				set_console_on(0);
			}

			if (cnt1++ > 3) {
				dump_xcalls();
				dtrace_linux_panic("xcall taking too long");
				break;
			}
		}
	}

	if (xcall_debug) {
		dtrace_printf("[%d] ack_wait finished c=%d cnt=%lu (max=%lu)\n", smp_processor_id(), c, cnt, cnt_xcall5);
	}
	return 1;
}
Ejemplo n.º 22
0
int acpi_processor_preregister_performance(
		struct acpi_processor_performance **performance)
{
	int count, count_target;
	int retval = 0;
	unsigned int i, j;
	cpumask_t covered_cpus;
	struct acpi_processor *pr;
	struct acpi_psd_package *pdomain;
	struct acpi_processor *match_pr;
	struct acpi_psd_package *match_pdomain;

	mutex_lock(&performance_mutex);

	retval = 0;

	/* Call _PSD for all CPUs */
	for_each_possible_cpu(i) {
		pr = processors[i];
		if (!pr) {
			/* Look only at processors in ACPI namespace */
			continue;
		}

		if (pr->performance) {
			retval = -EBUSY;
			continue;
		}

		if (!performance || !performance[i]) {
			retval = -EINVAL;
			continue;
		}

		pr->performance = performance[i];
		cpu_set(i, pr->performance->shared_cpu_map);
		if (acpi_processor_get_psd(pr)) {
			retval = -EINVAL;
			continue;
		}
	}
	if (retval)
		goto err_ret;

	/*
	 * Now that we have _PSD data from all CPUs, lets setup P-state 
	 * domain info.
	 */
	for_each_possible_cpu(i) {
		pr = processors[i];
		if (!pr)
			continue;

		/* Basic validity check for domain info */
		pdomain = &(pr->performance->domain_info);
		if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
		    (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
			retval = -EINVAL;
			goto err_ret;
		}
		if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
		    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
		    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
			retval = -EINVAL;
			goto err_ret;
		}
	}

	cpus_clear(covered_cpus);
	for_each_possible_cpu(i) {
		pr = processors[i];
		if (!pr)
			continue;

		if (cpu_isset(i, covered_cpus))
			continue;

		pdomain = &(pr->performance->domain_info);
		cpu_set(i, pr->performance->shared_cpu_map);
		cpu_set(i, covered_cpus);
		if (pdomain->num_processors <= 1)
			continue;

		/* Validate the Domain info */
		count_target = pdomain->num_processors;
		count = 1;
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = processors[j];
			if (!match_pr)
				continue;

			match_pdomain = &(match_pr->performance->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			/* Here i and j are in the same domain */

			if (match_pdomain->num_processors != count_target) {
				retval = -EINVAL;
				goto err_ret;
			}

			if (pdomain->coord_type != match_pdomain->coord_type) {
				retval = -EINVAL;
				goto err_ret;
			}

			cpu_set(j, covered_cpus);
			cpu_set(j, pr->performance->shared_cpu_map);
			count++;
		}

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = processors[j];
			if (!match_pr)
				continue;

			match_pdomain = &(match_pr->performance->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			match_pr->performance->shared_type = 
					pr->performance->shared_type;
			match_pr->performance->shared_cpu_map =
				pr->performance->shared_cpu_map;
		}
	}

err_ret:
	for_each_possible_cpu(i) {
		pr = processors[i];
		if (!pr || !pr->performance)
			continue;

		/* Assume no coordination on any error parsing domain info */
		if (retval) {
			cpus_clear(pr->performance->shared_cpu_map);
			cpu_set(i, pr->performance->shared_cpu_map);
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		}
		pr->performance = NULL; /* Will be set for real in register */
	}

	mutex_unlock(&performance_mutex);
	return retval;
}
Ejemplo n.º 23
0
static int cachemiss_thread (void *data)
{
	tux_req_t *req;
	struct k_sigaction *ka;
	DECLARE_WAITQUEUE(wait, current);
	iothread_t *iot = data;
	int nr = iot->ti->cpu, wake_up;

	Dprintk("iot %p/%p got started.\n", iot, current);
	drop_permissions();

	spin_lock(&iot->async_lock);
	iot->threads++;
	sprintf(current->comm, "async IO %d/%d", nr, iot->threads);


	spin_lock_irq(&current->sighand->siglock);
	ka = current->sighand->action + SIGCHLD-1;
	ka->sa.sa_handler = SIG_IGN;
	siginitsetinv(&current->blocked, sigmask(SIGCHLD));
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_unlock(&iot->async_lock);
#ifdef CONFIG_SMP
	{
		cpumask_t mask;

		if (cpu_isset(nr, cpu_online_map)) {
			cpus_clear(mask);
			cpu_set(nr, mask);
			set_cpus_allowed(current, mask);
		}

	}
#endif

	add_wait_queue_exclusive(&iot->async_sleep, &wait);

	for (;;) {
		while (!list_empty(&iot->async_queue) &&
				(req = get_cachemiss(iot))) {

			if (!req->atom_idx) {
				add_tux_atom(req, flush_request);
				add_req_to_workqueue(req);
				continue;
			}
			tux_schedule_atom(req, 1);
			if (signal_pending(current))
				flush_all_signals();
		}
		if (signal_pending(current))
			flush_all_signals();
		if (!list_empty(&iot->async_queue))
			continue;
		if (iot->shutdown) {
			Dprintk("iot %p/%p got shutdown!\n", iot, current);
			break;
		}
		__set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&iot->async_queue)) {
			Dprintk("iot %p/%p going to sleep.\n", iot, current);
			schedule();
			Dprintk("iot %p/%p got woken up.\n", iot, current);
		}
		__set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&iot->async_sleep, &wait);

	wake_up = 0;
	spin_lock(&iot->async_lock);
	if (!--iot->threads)
		wake_up = 1;
	spin_unlock(&iot->async_lock);
	Dprintk("iot %p/%p has finished shutdown!\n", iot, current);
	if (wake_up) {
		Dprintk("iot %p/%p waking up master.\n", iot, current);
		wake_up(&iot->wait_shutdown);
	}

	return 0;
}
Ejemplo n.º 24
0
RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    IPRT_LINUX_SAVE_EFL_AC();
    int rc;
    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;

    AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
    AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);

    /*
     * Check that both CPUs are online before doing the broadcast call.
     */
    RTThreadPreemptDisable(&PreemptState);
    if (   RTMpIsCpuOnline(idCpu1)
        && RTMpIsCpuOnline(idCpu2))
    {
        /*
         * Use the smp_call_function variant taking a cpu mask where available,
         * falling back on broadcast with filter.  Slight snag if one of the
         * CPUs is the one we're running on, we must do the call and the post
         * call wait ourselves.
         */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        cpumask_t   DstCpuMask;
#endif
        RTCPUID     idCpuSelf = RTMpCpuId();
        bool const  fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
        RTMPARGS    Args;
        Args.pfnWorker = pfnWorker;
        Args.pvUser1 = pvUser1;
        Args.pvUser2 = pvUser2;
        Args.idCpu   = idCpu1;
        Args.idCpu2  = idCpu2;
        Args.cHits   = 0;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
        cpumask_clear(&DstCpuMask);
        cpumask_set_cpu(idCpu1, &DstCpuMask);
        cpumask_set_cpu(idCpu2, &DstCpuMask);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        cpus_clear(DstCpuMask);
        cpu_set(idCpu1, DstCpuMask);
        cpu_set(idCpu2, DstCpuMask);
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
        smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
        rc = 0;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
        rc = smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
        rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
#else /* older kernels */
        rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
#endif /* older kernels */
        Assert(rc == 0);

        /* Call ourselves if necessary and wait for the other party to be done. */
        if (fCallSelf)
        {
            uint32_t cLoops = 0;
            rtmpLinuxWrapper(&Args);
            while (ASMAtomicReadU32(&Args.cHits) < 2)
            {
                if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
                    break;
                cLoops++;
                ASMNopPause();
            }
        }

        Assert(Args.cHits <= 2);
        if (Args.cHits == 2)
            rc = VINF_SUCCESS;
        else if (Args.cHits == 1)
            rc = VERR_NOT_ALL_CPUS_SHOWED;
        else if (Args.cHits == 0)
            rc = VERR_CPU_OFFLINE;
        else
            rc = VERR_CPU_IPE_1;
    }
    /*
     * A CPU must be present to be considered just offline.
     */
    else if (   RTMpIsCpuPresent(idCpu1)
             && RTMpIsCpuPresent(idCpu2))
        rc = VERR_CPU_OFFLINE;
    else
        rc = VERR_CPU_NOT_FOUND;
    RTThreadPreemptRestore(&PreemptState);;
    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
}
Ejemplo n.º 25
0
unsigned long ipipe_critical_enter(void (*syncfn)(void))
{
	int cpu __maybe_unused, n __maybe_unused;
	unsigned long flags, loops __maybe_unused;
	cpumask_t allbutself __maybe_unused;

	flags = hard_local_irq_save();

	if (num_online_cpus() == 1)
		return flags;

#ifdef CONFIG_SMP

	cpu = ipipe_processor_id();
	if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
		while (test_and_set_bit(0, &__ipipe_critical_lock)) {
			n = 0;
			hard_local_irq_enable();

			do
				cpu_relax();
			while (++n < cpu);

			hard_local_irq_disable();
		}
restart:
		spin_lock(&__ipipe_cpu_barrier);

		__ipipe_cpu_sync = syncfn;

		cpus_clear(__ipipe_cpu_pass_map);
		cpu_set(cpu, __ipipe_cpu_pass_map);

		/*
		 * Send the sync IPI to all processors but the current
		 * one.
		 */
		cpus_andnot(allbutself, cpu_online_map, __ipipe_cpu_pass_map);
		ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
		loops = IPIPE_CRITICAL_TIMEOUT;

		while (!cpus_equal(__ipipe_cpu_sync_map, allbutself)) {
			if (--loops > 0) {
				cpu_relax();
				continue;
			}
			/*
			 * We ran into a deadlock due to a contended
			 * rwlock. Cancel this round and retry.
			 */
			__ipipe_cpu_sync = NULL;

			spin_unlock(&__ipipe_cpu_barrier);
			/*
			 * Ensure all CPUs consumed the IPI to avoid
			 * running __ipipe_cpu_sync prematurely. This
			 * usually resolves the deadlock reason too.
			 */
			while (!cpus_equal(cpu_online_map, __ipipe_cpu_pass_map))
				cpu_relax();

			goto restart;
		}
	}

	atomic_inc(&__ipipe_critical_count);

#endif	/* CONFIG_SMP */

	return flags;
}
Ejemplo n.º 26
0
static void __init smp_boot_cpus(unsigned int max_cpus)
{
	int apicid, cpu, bit, kicked;
	unsigned long bogosum = 0;

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
	printk("CPU%d: ", 0);
	print_cpu_info(&cpu_data[0]);

	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
	boot_cpu_logical_apicid = logical_smp_processor_id();
	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;

	current_thread_info()->cpu = 0;
	smp_tune_scheduling();
	cpus_clear(cpu_sibling_map[0]);
	cpu_set(0, cpu_sibling_map[0]);

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config && !acpi_lapic) {
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		map_cpu_to_logical_apicid();
		return;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 * Makes no sense to do this check in clustered apic mode, so skip it
	 */
	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
				boot_cpu_physical_apicid);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_physical_apicid);
		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		return;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		smp_found_config = 0;
		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		return;
	}

	connect_bsp_APIC();
	setup_local_APIC();
	map_cpu_to_logical_apicid();


	setup_portio_remap();

	/*
	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
	 *
	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
	 * clustered apic ID.
	 */
	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));

	kicked = 1;
	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
		apicid = cpu_present_to_apicid(bit);
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
			continue;

		if (!check_apicid_present(bit))
			continue;
		if (max_cpus <= cpucount+1)
			continue;

		if (do_boot_cpu(apicid))
			printk("CPU #%d not responding - cannot use it.\n",
								apicid);
		else
			++kicked;
	}

	/*
	 * Cleanup possible dangling ends...
	 */
	smpboot_restore_warm_reset_vector();

	/*
	 * Allow the user to impress friends.
	 */
	Dprintk("Before bogomips.\n");
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		if (cpu_isset(cpu, cpu_callout_map))
			bogosum += cpu_data[cpu].loops_per_jiffy;
	printk(KERN_INFO
		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
		cpucount+1,
		bogosum/(500000/HZ),
		(bogosum/(5000/HZ))%100);
	
	Dprintk("Before bogocount - setting activated=1.\n");

	if (smp_b_stepping)
		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");

	/*
	 * Don't taint if we are running SMP kernel on a single non-MP
	 * approved Athlon
	 */
	if (tainted & TAINT_UNSAFE_SMP) {
		if (cpucount)
			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
		else
			tainted &= ~TAINT_UNSAFE_SMP;
	}

	Dprintk("Boot done.\n");

	/*
	 * construct cpu_sibling_map[], so that we can tell sibling CPUs
	 * efficiently.
	 */
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		cpus_clear(cpu_sibling_map[cpu]);

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		int siblings = 0;
		int i;
		if (!cpu_isset(cpu, cpu_callout_map))
			continue;

		if (smp_num_siblings > 1) {
			for (i = 0; i < NR_CPUS; i++) {
				if (!cpu_isset(i, cpu_callout_map))
					continue;
				if (phys_proc_id[cpu] == phys_proc_id[i]) {
					siblings++;
					cpu_set(i, cpu_sibling_map[cpu]);
				}
			}
		} else {
			siblings++;
			cpu_set(cpu, cpu_sibling_map[cpu]);
		}

		if (siblings != smp_num_siblings)
			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
	}

	if (nmi_watchdog == NMI_LOCAL_APIC)
		check_nmi_watchdog();

	smpboot_setup_io_apic();

	setup_boot_APIC_clock();

	/*
	 * Synchronize the TSC with the AP
	 */
	if (cpu_has_tsc && cpucount && cpu_khz)
		synchronize_tsc_bp();
}
Ejemplo n.º 27
0
static int
start_thread(int id, id_t cpu)
{
        int status;
        id_t aspace_id;
        vaddr_t stack_ptr;
        user_cpumask_t cpumask;

        aspace_get_myid(&aspace_id);
        stack_ptr = (vaddr_t)malloc(THREAD_STACK_SIZE);
        cpus_clear(cpumask);
        cpu_set(cpu, cpumask);

        start_state_t start_state = {
                .task_id	= id,
		.user_id        = 1,
                .group_id       = 1,
                .aspace_id      = aspace_id,
                .entry_point    = (vaddr_t)&simple_task,
                .stack_ptr      = stack_ptr + THREAD_STACK_SIZE,
                .cpu_id         = cpu,
		.edf.slice	= SLICE,
		.edf.period     = PERIOD,
        };

        sprintf(start_state.task_name, "cpu%u-task%d", cpu ,id);

	printf(" *** Creating Task %s (S: %llu, T: %llu) on cpu %d\n",
	       start_state.task_name,
	       (unsigned long long)start_state.edf.slice,
	       (unsigned long long)start_state.edf.period,
	       cpu);

	status = task_create(&start_state, NULL);
	if (status) {
		printf("ERROR: failed to create thread (status=%d) on cpu %d.\n",
		       status,
		       cpu);
		return -1;
        }

        return 0;
}



static int
edf_sched_test(void)
{
	int status;
	unsigned i;
	id_t my_id;
	cpu_set_t cpu_mask;
	pthread_attr_t attr;

	printf("\n EDF TEST BEGIN (Running for %d seconds)\n",TEST_TIME);

	status = aspace_get_myid(&my_id);
	if (status) {
		printf("ERROR: task_get_myid() status=%d\n", status);
		return -1;
	}

	pthread_attr_init(&attr);
	pthread_attr_setstacksize(&attr, 1024 * 32);

	printf("  My Linux process id (PID) is: %d\n",  getpid());

	status = pthread_getaffinity_np(pthread_self(), sizeof(cpu_mask), &cpu_mask);
	if (status) {
		printf("    ERROR: pthread_getaffinity_np() status=%d\n", status);
		return -1;
	}

	printf("  My task ID is %u\n", my_id);
	printf("  The following CPUs are in the cpumask:\n    ");
	for (i = CPU_MIN_ID; i <= CPU_MAX_ID; i++) {
		if (CPU_ISSET(i, &cpu_mask))
			printf("%d ", i);
	}
	printf("\n");

	printf("Creating two EDF threads on each CPU:\n");
	for (i = CPU_MIN_ID; i <= CPU_MAX_ID; i++) {
		if (CPU_ISSET(i, &cpu_mask)) {
			printf("CPU %d\n", i);
			start_thread(0x1000+i,i);
			start_thread(0x1100+i,i);
		}
	}

	printf("\n");
	return 0;
}
Ejemplo n.º 28
0
void __init prom_prepare_cpus(unsigned int max_cpus)
{
	cpus_clear(phys_cpu_present_map);
}
Ejemplo n.º 29
0
static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
{
	struct vm_area_struct * mpnt, *tmp, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
	flush_cache_mm(current->mm);
	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->map_count = 0;
	mm->rss = 0;
	mm->anon_rss = 0;
	cpus_clear(mm->cpu_vm_mask);
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;

	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			__vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
							-vma_pages(mpnt));
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		pol = mpol_copy(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		anon_vma_link(tmp);
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_dentry->d_inode;
			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
      
			/* insert tmp into the share list, just after mpnt */
			spin_lock(&file->f_mapping->i_mmap_lock);
			tmp->vm_truncate_count = mpnt->vm_truncate_count;
			flush_dcache_mmap_lock(file->f_mapping);
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(file->f_mapping);
			spin_unlock(&file->f_mapping->i_mmap_lock);
		}

		/*
		 * Link in the new vma and copy the page table entries:
		 * link in first so that swapoff can see swap entries,
		 * and try_to_unmap_one's find_vma find the new vma.
		 */
		spin_lock(&mm->page_table_lock);
		*pprev = tmp;
		pprev = &tmp->vm_next;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, current->mm, tmp);
		spin_unlock(&mm->page_table_lock);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
Ejemplo n.º 30
0
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{
	cpus_clear(*retmask);
	cpu_set(cpu, *retmask);
}