Esempio n. 1
0
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_NUMA
	unsigned node;
	int cpu = smp_processor_id();

	/* Don't do the funky fallback heuristics the AMD version employs
	   for now. */
	node = numa_cpu_node(cpu);
	if (node == NUMA_NO_NODE || !node_online(node)) {
		/* reuse the value from init_cpu_to_node() */
		node = cpu_to_node(cpu);
	}
	numa_set_node(cpu, node);
#endif
}
Esempio n. 2
0
/*! 2017. 5. 6 study -ing */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
					  void *data, unsigned int cpu,
					  const char *namefmt)
{
	struct task_struct *p;

	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
				   cpu);
	if (IS_ERR(p))
		return p;
	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
	to_kthread(p)->cpu = cpu;
	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
	kthread_park(p);
	return p;
}
Esempio n. 3
0
File: en_main.c Progetto: Dyoed/ath
static int mlx5e_create_sq(struct mlx5e_channel *c,
			   int tc,
			   struct mlx5e_sq_param *param,
			   struct mlx5e_sq *sq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *sqc = param->sqc;
	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
	int err;

	err = mlx5_alloc_map_uar(mdev, &sq->uar);
	if (err)
		return err;

	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
				 &sq->wq_ctrl);
	if (err)
		goto err_unmap_free_uar;

	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
	sq->uar_map     = sq->uar.map;
	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;

	if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
		goto err_sq_wq_destroy;

	sq->txq = netdev_get_tx_queue(priv->netdev,
				      c->ix + tc * priv->params.num_channels);

	sq->pdev    = c->pdev;
	sq->mkey_be = c->mkey_be;
	sq->channel = c;
	sq->tc      = tc;

	return 0;

err_sq_wq_destroy:
	mlx5_wq_destroy(&sq->wq_ctrl);

err_unmap_free_uar:
	mlx5_unmap_free_uar(mdev, &sq->uar);

	return err;
}
Esempio n. 4
0
static int dtl_enable(struct dtl *dtl)
{
	unsigned long addr;
	int ret, hwcpu;

	/* only allow one reader */
	if (dtl->buf)
		return -EBUSY;

	/* we need to store the original allocation size for use during read */
	dtl->buf_entries = dtl_buf_entries;

	dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry),
			GFP_KERNEL, cpu_to_node(dtl->cpu));
	if (!dtl->buf) {
		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
				__func__, dtl->cpu);
		return -ENOMEM;
	}

	/* Register our dtl buffer with the hypervisor. The HV expects the
	 * buffer size to be passed in the second word of the buffer */
	((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry);

	hwcpu = get_hard_smp_processor_id(dtl->cpu);
	addr = __pa(dtl->buf);
	ret = register_dtl(hwcpu, addr);
	if (ret) {
		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
		kfree(dtl->buf);
		return -EIO;
	}

	/* set our initial buffer indices */
	dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0;

	/* ensure that our updates to the lppaca fields have occurred before
	 * we actually enable the logging */
	smp_wmb();

	/* enable event logging */
	lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask;

	return 0;
}
Esempio n. 5
0
/*
 * register_cpu - Setup a driverfs device for a CPU.
 * @cpu - Callers can set the cpu->no_control field to 1, to indicate not to
 *		  generate a control file in sysfs for this CPU.
 * @num - CPU number to use when creating the device.
 *
 * Initialize and register the CPU device.
 */
int __init register_cpu(struct cpu *cpu, int num, struct node *root)
{
	int error;

	cpu->node_id = cpu_to_node(num);
	cpu->sysdev.id = num;
	cpu->sysdev.cls = &cpu_sysdev_class;

	error = sysdev_register(&cpu->sysdev);
	if (!error && root)
		error = sysfs_create_link(&root->sysdev.kobj,
					  &cpu->sysdev.kobj,
					  kobject_name(&cpu->sysdev.kobj));
	if (!error && !cpu->no_control)
		register_cpu_control(cpu);
	return error;
}
Esempio n. 6
0
static int intr_connect_level(int cpu, int bit)
{
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
	struct slice_data *si = cpu_data[cpu].data;

	set_bit(bit, si->irq_enable_mask);

	if (!cputoslice(cpu)) {
		REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
		REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
	} else {
		REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
		REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
	}

	return 0;
}
Esempio n. 7
0
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
	unsigned node;
	int cpu = smp_processor_id();
	int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;

	/* Don't do the funky fallback heuristics the AMD version employs
	   for now. */
	node = apicid_to_node[apicid];
	if (node == NUMA_NO_NODE || !node_online(node)) {
		/* reuse the value from init_cpu_to_node() */
		node = cpu_to_node(cpu);
	}
	numa_set_node(cpu, node);
#endif
}
Esempio n. 8
0
/**
 * percpu_populate - populate per-cpu data for given cpu
 * @__pdata: per-cpu data to populate further
 * @size: size of per-cpu object
 * @gfp: may sleep or not etc.
 * @cpu: populate per-data for this cpu
 *
 * Populating per-cpu data for a cpu coming online would be a typical
 * use case. You need to register a cpu hotplug handler for that purpose.
 * Per-cpu object is populated with zeroed buffer.
 */
void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
{
	struct percpu_data *pdata = __percpu_disguise(__pdata);
	int node = cpu_to_node(cpu);

	/*
	 * We should make sure each CPU gets private memory.
	 */
	size = roundup(size, cache_line_size());

	BUG_ON(pdata->ptrs[cpu]);
	if (node_online(node))
		pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
	else
		pdata->ptrs[cpu] = kzalloc(size, gfp);
	return pdata->ptrs[cpu];
}
Esempio n. 9
0
void top_obj_parse(struct s* s) {
   struct symbol *sym = get_function(s);
   if(strstr(sym->function_name, "plt")) {
      nb_plt++;
   }
   else
   {
      nb_non_plt++;
      struct symbol *ob = get_object(s);
      struct dyn_lib* ob3 = sample_to_mmap(s);
      char *obj = NULL;
      if(ob)
         obj = ob->object_name;
      if(!obj && strstr(sym->function_name, "@plt"))
         obj = sym->function_name;
       if(!obj && !strcmp(sym->function_name, "[vdso]"))
         obj = sym->function_name;
      if(!obj && ob3) 
         obj = ob3->name;
      struct value *value = rbtree_lookup(r, obj, cmp);
      if(!value) {
         value = calloc(1, sizeof(*value));
         value->from_accesses = calloc(max_node, sizeof(*value->from_accesses));
         value->to_accesses = calloc(max_node, sizeof(*value->to_accesses));
         rbtree_insert(r, obj, value, cmp);
      }
      value->accesses++;
      value->dist_accesses += is_distant(s);
      value->from_accesses[cpu_to_node(s->cpu)]++;
      value->to_accesses[get_addr_node(s)]++;
      if(ob) {
         value->dist_by_allocator += (is_distant(s) && (get_tid(s) == ob->allocator_tid));
         value->dist_by_allocator_remote_cpu += (is_distant(s) && (get_tid(s) == ob->allocator_tid) && (ob->allocator_cpu != s->cpu));
         value->dist_by_allocator_alloc_cpu += (is_distant(s) && (get_tid(s) == ob->allocator_tid) && (ob->allocator_cpu == s->cpu));
         value->dist_for_obj += (is_distant(s));
   
         value->by_allocator += ((get_tid(s) == ob->allocator_tid));
         value->by_everybody += ((get_tid(s) != ob->allocator_tid));

         value->by_allocator_before_everybody += (value->by_everybody == 0);
         value->uid = ob->uid;
      }
      nb_total_access++;
   }
}
Esempio n. 10
0
void init_espfix_ap(int cpu)
{
	unsigned int page;
	unsigned long addr;
	pud_t pud, *pud_p;
	pmd_t pmd, *pmd_p;
	pte_t pte, *pte_p;
	int n, node;
	void *stack_page;
	pteval_t ptemask;

	/* We only have to do this once... */
	if (likely(per_cpu(espfix_stack, cpu)))
		return;		/* Already initialized */

	addr = espfix_base_addr(cpu);
	page = cpu/ESPFIX_STACKS_PER_PAGE;

	/* Did another CPU already set this up? */
	stack_page = ACCESS_ONCE(espfix_pages[page]);
	if (likely(stack_page))
		goto done;

	mutex_lock(&espfix_init_mutex);

	/* Did we race on the lock? */
	stack_page = ACCESS_ONCE(espfix_pages[page]);
	if (stack_page)
		goto unlock_done;

	node = cpu_to_node(cpu);
	ptemask = __supported_pte_mask;

	pud_p = &espfix_pud_page[pud_index(addr)];
	pud = *pud_p;
	if (!pud_present(pud)) {
		if (cpu)
			pmd_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
		else
			pmd_p = espfix_pmd_page;
		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
		paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
		for (n = 0; n < ESPFIX_PUD_CLONES; n++)
			set_pud(&pud_p[n], pud);
	} else
Esempio n. 11
0
File: en_main.c Progetto: Dyoed/ath
static int mlx5e_create_cq(struct mlx5e_channel *c,
			   struct mlx5e_cq_param *param,
			   struct mlx5e_cq *cq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_cq *mcq = &cq->mcq;
	int eqn_not_used;
	int irqn;
	int err;
	u32 i;

	param->wq.numa = cpu_to_node(c->cpu);
	param->eq_ix   = c->ix;

	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
			       &cq->wq_ctrl);
	if (err)
		return err;

	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);

	cq->napi        = &c->napi;

	mcq->cqe_sz     = 64;
	mcq->set_ci_db  = cq->wq_ctrl.db.db;
	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
	*mcq->set_ci_db = 0;
	*mcq->arm_db    = 0;
	mcq->vector     = param->eq_ix;
	mcq->comp       = mlx5e_completion_event;
	mcq->event      = mlx5e_cq_error_event;
	mcq->irqn       = irqn;
	mcq->uar        = &priv->cq_uar;

	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);

		cqe->op_own = 0xf1;
	}

	cq->channel = c;

	return 0;
}
Esempio n. 12
0
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
{
	int node;
	void *ptr;

	node = cpu_to_node(cpu);
	ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);

	/*
	 * don't overwite if can not get new one
	 * init_copy_kstat_irqs() could still use old one
	 */
	if (ptr) {
		printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n",
			 cpu, node);
		desc->kstat_irqs = ptr;
	}
}
static struct amd_nb *amd_alloc_nb(int cpu)
{
	struct amd_nb *nb;
	int i;

	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
			  cpu_to_node(cpu));
	if (!nb)
		return NULL;

	nb->nb_id = -1;

	for (i = 0; i < x86_pmu.num_counters; i++) {
		__set_bit(i, nb->event_constraints[i].idxmsk);
		nb->event_constraints[i].weight = 1;
	}
	return nb;
}
Esempio n. 14
0
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
		       const struct cpumask *affinity, struct module *owner)
{
	const struct cpumask *mask = NULL;
	struct irq_desc *desc;
	unsigned int flags;
	int i;

	/* Validate affinity mask(s) */
	if (affinity) {
		for (i = 0, mask = affinity; i < cnt; i++, mask++) {
			if (cpumask_empty(mask))
				return -EINVAL;
		}
	}

	flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
	mask = NULL;

	for (i = 0; i < cnt; i++) {
		if (affinity) {
			node = cpu_to_node(cpumask_first(affinity));
			mask = affinity;
			affinity++;
		}
		desc = alloc_desc(start + i, node, flags, mask, owner);
		if (!desc)
			goto err;
		mutex_lock(&sparse_irq_lock);
		irq_insert_desc(start + i, desc);
		irq_sysfs_add(start + i, desc);
		mutex_unlock(&sparse_irq_lock);
	}
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);

	mutex_lock(&sparse_irq_lock);
	bitmap_clear(allocated_irqs, start, cnt);
	mutex_unlock(&sparse_irq_lock);
	return -ENOMEM;
}
static int __init create_hash_tables(void)
{
	int cpu;

	for_each_online_cpu(cpu) {
		int node = cpu_to_node(cpu);
		struct page *page;

		page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[1]
				= (struct profile_hit *)page_address(page);
		page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[0]
				= (struct profile_hit *)page_address(page);
	}
	return 0;
out_cleanup:
	immediate_set_early(prof_on, 0);
	smp_mb();
	on_each_cpu(profile_nop, NULL, 0, 1);
	for_each_online_cpu(cpu) {
		struct page *page;

		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
	}
	return -1;
}
Esempio n. 16
0
static unsigned int startup_bridge_irq(struct irq_data *d)
{
	struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
	struct bridge_controller *bc;
	nasid_t nasid;
	u32 device;
	int pin;

	if (!hd)
		return -EINVAL;

	pin = hd->pin;
	bc = hd->bc;

	nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu));
	bridge_write(bc, b_int_addr[pin].addr,
		     (0x20000 | hd->bit | (nasid << 8)));
	bridge_set(bc, b_int_enable, (1 << pin));
	bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */

	/*
	 * Enable sending of an interrupt clear packt to the hub on a high to
	 * low transition of the interrupt pin.
	 *
	 * IRIX sets additional bits in the address which are documented as
	 * reserved in the bridge docs.
	 */
	bridge_set(bc, b_int_mode, (1UL << pin));

	/*
	 * We assume the bridge to have a 1:1 mapping between devices
	 * (slots) and intr pins.
	 */
	device = bridge_read(bc, b_int_device);
	device &= ~(7 << (pin*3));
	device |= (pin << (pin*3));
	bridge_write(bc, b_int_device, device);

	bridge_read(bc, b_wid_tflush);

	enable_hub_irq(d);

	return 0;	/* Never anything pending.  */
}
Esempio n. 17
0
void __init init_se7724_IRQ(void)
{
	int i, nid = cpu_to_node(boot_cpu_data);

	__raw_writew(0xffff, IRQ0_MR);  
	__raw_writew(0xffff, IRQ1_MR);  
	__raw_writew(0xffff, IRQ2_MR);  
	__raw_writew(0x0000, IRQ0_SR);  
	__raw_writew(0x0000, IRQ1_SR);  
	__raw_writew(0x0000, IRQ2_SR);  
	__raw_writew(0x002a, IRQ_MODE); 

	for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) {
		int irq, wanted;

		wanted = SE7724_FPGA_IRQ_BASE + i;

		irq = create_irq_nr(wanted, nid);
		if (unlikely(irq == 0)) {
			pr_err("%s: failed hooking irq %d for FPGA\n",
			       __func__, wanted);
			return;
		}

		if (unlikely(irq != wanted)) {
			pr_err("%s: got irq %d but wanted %d, bailing.\n",
			       __func__, irq, wanted);
			destroy_irq(irq);
			return;
		}

		irq_set_chip_and_handler_name(irq, &se7724_irq_chip,
					      handle_level_irq, "level");
	}

	irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux);
	irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);

	irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux);
	irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);

	irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux);
	irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW);
}
Esempio n. 18
0
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
		       const struct irq_affinity_desc *affinity,
		       struct module *owner)
{
	struct irq_desc *desc;
	int i;

	/* Validate affinity mask(s) */
	if (affinity) {
		for (i = 0; i < cnt; i++) {
			if (cpumask_empty(&affinity[i].mask))
				return -EINVAL;
		}
	}

	for (i = 0; i < cnt; i++) {
		const struct cpumask *mask = NULL;
		unsigned int flags = 0;

		if (affinity) {
			if (affinity->is_managed) {
				flags = IRQD_AFFINITY_MANAGED |
					IRQD_MANAGED_SHUTDOWN;
			}
			mask = &affinity->mask;
			node = cpu_to_node(cpumask_first(mask));
			affinity++;
		}

		desc = alloc_desc(start + i, node, flags, mask, owner);
		if (!desc)
			goto err;
		irq_insert_desc(start + i, desc);
		irq_sysfs_add(start + i, desc);
		irq_add_debugfs_entry(start + i, desc);
	}
	bitmap_set(allocated_irqs, start, cnt);
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);
	return -ENOMEM;
}
Esempio n. 19
0
int arch_register_cpu(int num)
{
	struct node *parent = NULL;
	
#ifdef CONFIG_NUMA
	parent = &sysfs_nodes[cpu_to_node(num)];
#endif /* CONFIG_NUMA */

#ifdef CONFIG_ACPI_BOOT
	/*
	 * If CPEI cannot be re-targetted, and this is
	 * CPEI target, then dont create the control file
	 */
	if (!can_cpei_retarget() && is_cpu_cpei_target(num))
		sysfs_cpus[num].cpu.no_control = 1;
#endif

	return register_cpu(&sysfs_cpus[num].cpu, num, parent);
}
Esempio n. 20
0
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
{
	nasid_t nasid;
	int cpu;

	cpu = cpumask_first_and(mask, cpu_online_mask);
	nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
	hd->cpu = cpu;
	if (!cputoslice(cpu)) {
		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
	} else {
		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
	}

	/* Make sure it's not already pending when we connect it. */
	REMOTE_HUB_CLR_INTR(nasid, hd->bit);
}
static void acpi_processor_remove(struct acpi_device *device)
{
	struct acpi_processor *pr;

	if (!device || !acpi_driver_data(device))
		return;

	pr = acpi_driver_data(device);
	if (pr->id >= nr_cpu_ids)
		goto out;

	/*
	 * The only reason why we ever get here is CPU hot-removal.  The CPU is
	 * already offline and the ACPI device removal locking prevents it from
	 * being put back online at this point.
	 *
	 * Unbind the driver from the processor device and detach it from the
	 * ACPI companion object.
	 */
	device_release_driver(pr->dev);
	acpi_unbind_one(pr->dev);

	/* Clean up. */
	per_cpu(processor_device_array, pr->id) = NULL;
	per_cpu(processors, pr->id) = NULL;

	cpu_maps_update_begin();
	cpu_hotplug_begin();

	/* Remove the CPU. */
	arch_unregister_cpu(pr->id);
	acpi_unmap_lsapic(pr->id);

	cpu_hotplug_done();
	cpu_maps_update_done();

	try_offline_node(cpu_to_node(pr->id));

 out:
	free_cpumask_var(pr->throttling.shared_cpu_map);
	kfree(pr);
}
Esempio n. 22
0
static int __init topology_init(void)
{
	int cpu;
	struct node *parent = NULL;

	register_nodes();

	register_cpu_notifier(&sysfs_cpu_nb);

	for_each_cpu(cpu) {
		struct cpu *c = &per_cpu(cpu_devices, cpu);

#ifdef CONFIG_NUMA
		/* The node to which a cpu belongs can't be known
		 * until the cpu is made present.
		 */
		parent = NULL;
		if (cpu_present(cpu))
			parent = &node_devices[cpu_to_node(cpu)];
#endif
		/*
		 * For now, we just see if the system supports making
		 * the RTAS calls for CPU hotplug.  But, there may be a
		 * more comprehensive way to do this for an individual
		 * CPU.  For instance, the boot cpu might never be valid
		 * for hotplugging.
		 */
		if (!ppc_md.cpu_die)
			c->no_control = 1;

		if (cpu_online(cpu) || (c->no_control == 0)) {
			register_cpu(c, cpu, parent);

			sysdev_create_file(&c->sysdev, &attr_physical_id);
		}

		if (cpu_online(cpu))
			register_cpu_online(cpu);
	}

	return 0;
}
Esempio n. 23
0
static int fb_bpf_init_filter(struct fb_bpf_priv __percpu *fb_priv_cpu,
			      struct sock_fprog_kern *fprog, unsigned int cpu)
{
	int err;
	struct sk_filter *sf, *sfold;
	unsigned int fsize;
	unsigned long flags;

	if (fprog->filter == NULL)
		return -EINVAL;

	fsize = sizeof(struct sock_filter) * fprog->len;

	sf = kmalloc_node(fsize + sizeof(*sf), GFP_KERNEL, cpu_to_node(cpu));
	if (!sf)
		return -ENOMEM;

	memcpy(sf->insns, fprog->filter, fsize);
	atomic_set(&sf->refcnt, 1);
	sf->len = fprog->len;
	sf->bpf_func = sk_run_filter;

	err = sk_chk_filter(sf->insns, sf->len);
	if (err) {
		kfree(sf);
		return err;
	}

	fb_bpf_jit_compile(sf);

	spin_lock_irqsave(&fb_priv_cpu->flock, flags);
	sfold = fb_priv_cpu->filter;
	fb_priv_cpu->filter = sf;
	spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);

	if (sfold) {
		fb_bpf_jit_free(sfold);
		kfree(sfold);
	}

	return 0;
}
Esempio n. 24
0
static int tangier_probe(struct platform_device *pdev)
{
	int gsi;
	struct irq_alloc_info info;
	struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;

	if (!pdata)
		return -EINVAL;

	/* IOAPIC builds identity mapping between GSI and IRQ on MID */
	gsi = pdata->irq;
	ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
	if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
		dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
			 gsi);
		return -EINVAL;
	}

	return 0;
}
Esempio n. 25
0
static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
			      void **pages, int nr_pages, bool overwrite)
{
	int node;
	struct cs_buffers *buf;

	if (cpu == -1)
		cpu = smp_processor_id();
	node = cpu_to_node(cpu);

	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
	if (!buf)
		return NULL;

	buf->snapshot = overwrite;
	buf->nr_pages = nr_pages;
	buf->data_pages = pages;

	return buf;
}
Esempio n. 26
0
File: numa.c Progetto: Gaffey/linux
static void verify_cpu_node_mapping(int cpu, int node)
{
	int base, sibling, i;

	/* Verify that all the threads in the core belong to the same node */
	base = cpu_first_thread_sibling(cpu);

	for (i = 0; i < threads_per_core; i++) {
		sibling = base + i;

		if (sibling == cpu || cpu_is_offline(sibling))
			continue;

		if (cpu_to_node(sibling) != node) {
			WARN(1, "CPU thread siblings %d and %d don't belong"
				" to the same node!\n", cpu, sibling);
			break;
		}
	}
}
Esempio n. 27
0
void __init setup_per_cpu_areas(void)
{
	int i;
	unsigned long size;
	char *ptr;

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
#ifdef CONFIG_MODULES
	if (size < PERCPU_ENOUGH_ROOM)
		size = PERCPU_ENOUGH_ROOM;
#endif

	for_each_possible_cpu(i) {
		ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);

		paca[i].data_offset = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
	}
}
Esempio n. 28
0
/*
 * Assume __initcall executes before all user space. Hopefully kmod
 * doesn't violate that. We'll find out if it does.
 */
static void vsyscall_set_cpu(int cpu)
{
	unsigned long d;
	unsigned long node = 0;
#ifdef CONFIG_NUMA
	node = cpu_to_node(cpu);
#endif
	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
		write_rdtscp_aux((node << 12) | cpu);

	/*
	 * Store cpu number in limit so that it can be loaded quickly
	 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
	 */
	d = 0x0f40000000000ULL;
	d |= cpu;
	d |= (node & 0xf) << 12;
	d |= (node >> 4) << 48;

	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
Esempio n. 29
0
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
				  void **pages, int nr_pages, bool overwrite)
{
	int node;
	struct cs_buffers *buf;

	if (cpu == -1)
		cpu = smp_processor_id();
	node = cpu_to_node(cpu);

	/* Allocate memory structure for interaction with Perf */
	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
	if (!buf)
		return NULL;

	buf->snapshot = overwrite;
	buf->nr_pages = nr_pages;
	buf->data_pages = pages;

	return buf;
}
Esempio n. 30
0
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
{
	struct irq_desc *desc;
	unsigned long flags;
	int node;

	if (irq >= nr_irqs) {
		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
			irq, nr_irqs);
		return NULL;
	}

	desc = irq_desc_ptrs[irq];
	if (desc)
		return desc;

	spin_lock_irqsave(&sparse_irq_lock, flags);

	/* We have to check it to avoid races with another CPU */
	desc = irq_desc_ptrs[irq];
	if (desc)
		goto out_unlock;

	node = cpu_to_node(cpu);
	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
	printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
		 irq, cpu, node);
	if (!desc) {
		printk(KERN_ERR "can not alloc irq_desc\n");
		BUG_ON(1);
	}
	init_one_irq_desc(irq, desc, cpu);

	irq_desc_ptrs[irq] = desc;

out_unlock:
	spin_unlock_irqrestore(&sparse_irq_lock, flags);

	return desc;
}