static void
do_something(void)
{
	void *p;
	struct kmem_cache* mem_cache = kmem_cache_create("kedr_cache", 32, 32, 0, NULL);

	if(mem_cache != NULL)
	{
#if defined(CONFIG_SLAB)
#  if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
		p = kmem_cache_alloc_node_trace(32, mem_cache, GFP_KERNEL, numa_node_id());
#  else
		p = kmem_cache_alloc_node_trace(mem_cache, GFP_KERNEL, numa_node_id(), 32);
#  endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) */

#elif defined(CONFIG_SLUB)
		p = kmem_cache_alloc_node_trace(mem_cache, GFP_KERNEL, numa_node_id(), 32);
#else
#  error "kmem_cache_alloc_node_trace() should not be defined"
#endif
		if(p != NULL)
		{
			kmem_cache_free(mem_cache, p);
		}
		else
		{
			printk(KERN_INFO "Cannot allocate object in own kmem_cache.");
		}
		kmem_cache_destroy(mem_cache);
	}
	else
	{
		printk(KERN_INFO "Cannot create kmem_cache.");
	}
}
Esempio n. 2
0
void
tfw_cache_req_process(TfwHttpReq *req, tfw_http_req_cache_cb_t action,
		      void *data)
{
	int node;
	unsigned long key;

	if (!tfw_cfg.cache)
		return;

	key = tfw_cache_key_calc(req);

	node = tfw_cache_key_node(key);
	if (node != numa_node_id()) {
		/* Schedule the cache entry to the right node. */
		TfwCWork *cw = kmem_cache_alloc(c_cache, GFP_ATOMIC);
		if (!cw)
			goto process_locally;
		INIT_WORK(&cw->work, tfw_cache_req_process_node);
		cw->cw_req = req;
		cw->cw_act = action;
		cw->cw_data = data;
		cw->cw_key = key;
		queue_work_on(tfw_cache_sched_work_cpu(node), cache_wq,
			      (struct work_struct *)cw);
	}

process_locally:
	__cache_req_process_node(req, key, action, data);
}
Esempio n. 3
0
/*
 * mspec_nopfn
 *
 * Creates a mspec page and maps it to user space.
 */
static unsigned long
mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
{
    unsigned long paddr, maddr;
    unsigned long pfn;
    int index;
    struct vma_data *vdata = vma->vm_private_data;

    index = (address - vma->vm_start) >> PAGE_SHIFT;
    maddr = (volatile unsigned long) vdata->maddr[index];
    if (maddr == 0) {
        maddr = uncached_alloc_page(numa_node_id());
        if (maddr == 0)
            return NOPFN_OOM;

        spin_lock(&vdata->lock);
        if (vdata->maddr[index] == 0) {
            vdata->count++;
            vdata->maddr[index] = maddr;
        } else {
            uncached_free_page(maddr);
            maddr = vdata->maddr[index];
        }
        spin_unlock(&vdata->lock);
    }

    if (vdata->type == MSPEC_FETCHOP)
        paddr = TO_AMO(maddr);
    else
        paddr = maddr & ~__IA64_UNCACHED_OFFSET;

    pfn = paddr >> PAGE_SHIFT;

    return pfn;
}
Esempio n. 4
0
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
			   const struct sk_buff *skb)
{
	struct flow_stats *stats;
	int node = numa_node_id();

	stats = rcu_dereference(flow->stats[node]);

	/* Check if already have node-specific stats. */
	if (likely(stats)) {
		spin_lock(&stats->lock);
		/* Mark if we write on the pre-allocated stats. */
		if (node == 0 && unlikely(flow->stats_last_writer != node))
			flow->stats_last_writer = node;
	} else {
		stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
		spin_lock(&stats->lock);

		/* If the current NUMA-node is the only writer on the
		 * pre-allocated stats keep using them.
		 */
		if (unlikely(flow->stats_last_writer != node)) {
			/* A previous locker may have already allocated the
			 * stats, so we need to check again.  If node-specific
			 * stats were already allocated, we update the pre-
			 * allocated stats as we have already locked them.
			 */
			if (likely(flow->stats_last_writer != NUMA_NO_NODE)
			    && likely(!rcu_access_pointer(flow->stats[node]))) {
				/* Try to allocate node-specific stats. */
				struct flow_stats *new_stats;

				new_stats =
					kmem_cache_alloc_node(flow_stats_cache,
							      GFP_THISNODE |
							      __GFP_NOMEMALLOC,
							      node);
				if (likely(new_stats)) {
					new_stats->used = jiffies;
					new_stats->packet_count = 1;
					new_stats->byte_count = skb->len;
					new_stats->tcp_flags = tcp_flags;
					spin_lock_init(&new_stats->lock);

					rcu_assign_pointer(flow->stats[node],
							   new_stats);
					goto unlock;
				}
			}
			flow->stats_last_writer = node;
		}
	}

	stats->used = jiffies;
	stats->packet_count++;
	stats->byte_count += skb->len;
	stats->tcp_flags |= tcp_flags;
unlock:
	spin_unlock(&stats->lock);
}
Esempio n. 5
0
static void __init mx31ads_init_expio(void)
{
	int irq_base;
	int i, irq;

	printk(KERN_INFO "MX31ADS EXPIO(CPLD) hardware\n");

	/*
	 * Configure INT line as GPIO input
	 */
	mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_GPIO1_4, IOMUX_CONFIG_GPIO), "expio");

	/* disable the interrupt and clear the status */
	__raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
	__raw_writew(0xFFFF, PBC_INTSTATUS_REG);

	irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
	WARN_ON(irq_base < 0);

	domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
				       &irq_domain_simple_ops, NULL);
	WARN_ON(!domain);

	for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
		irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID);
	}
	irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4));
	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
	irq_set_chained_handler(irq, mx31ads_expio_irq_handler);
}
Esempio n. 6
0
void __init sh7372_init_irq(void)
{
	void __iomem *intevtsa;
	int n;

	intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE);
	intevtsa = intcs_ffd2 + 0x100;
	intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE);

	register_intc_controller(&intca_desc);
	register_intc_controller(&intca_irq_pins_lo_desc);
	register_intc_controller(&intca_irq_pins_hi_desc);
	register_intc_controller(&intcs_desc);

	/* setup dummy cascade chip for INTCS */
	n = evt2irq(0xf80);
	irq_alloc_desc_at(n, numa_node_id());
	irq_set_chip_and_handler_name(n, &dummy_irq_chip,
				      handle_level_irq, "level");
	set_irq_flags(n, IRQF_VALID); /* yuck */

	/* demux using INTEVTSA */
	irq_set_handler_data(n, (void *)intevtsa);
	irq_set_chained_handler(n, intcs_demux);

	/* unmask INTCS in INTAMASK */
	iowrite16(0, intcs_ffd2 + 0x104);
}
Esempio n. 7
0
/*
 * The shift value is now the number of bits to shift, not the number of
 * bits/4. This is to make it easier to read the value directly from the
 * datasheets. The IPR address is calculated using the ipr_offset table.
 */
void register_ipr_controller(struct ipr_desc *desc)
{
	int i;

	desc->chip.mask = disable_ipr_irq;
	desc->chip.unmask = enable_ipr_irq;
	desc->chip.mask_ack = disable_ipr_irq;

	for (i = 0; i < desc->nr_irqs; i++) {
		struct ipr_data *p = desc->ipr_data + i;
		struct irq_desc *irq_desc;

		BUG_ON(p->ipr_idx >= desc->nr_offsets);
		BUG_ON(!desc->ipr_offsets[p->ipr_idx]);

		irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id());
		if (unlikely(!irq_desc)) {
			printk(KERN_INFO "can not get irq_desc for %d\n",
			       p->irq);
			continue;
		}

		disable_irq_nosync(p->irq);
		set_irq_chip_and_handler_name(p->irq, &desc->chip,
				      handle_level_irq, "level");
		set_irq_chip_data(p->irq, p);
		disable_ipr_irq(p->irq);
	}
}
Esempio n. 8
0
static int __init topology_init(void)
{
	int i, ret;

#ifdef CONFIG_NEED_MULTIPLE_NODES
	for_each_online_node(i)
		register_one_node(i);
#endif

	for_each_present_cpu(i) {
		ret = register_cpu(&per_cpu(cpu_devices, i), i);
		if (unlikely(ret))
			printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
			       __FUNCTION__, i, ret);
	}

#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
	/*
	 * In the UP case, make sure the CPU association is still
	 * registered under each node. Without this, sysfs fails
	 * to make the connection between nodes other than node0
	 * and cpu0.
	 */
	for_each_online_node(i)
		if (i != numa_node_id())
			register_cpu_under_node(raw_smp_processor_id(), i);
#endif

	return 0;
}
Esempio n. 9
0
/*
 * uncached_alloc_page
 *
 * @starting_nid: node id of node to start with, or -1
 * @n_pages: number of contiguous pages to allocate
 *
 * Allocate the specified number of contiguous uncached pages on the
 * the requested node. If not enough contiguous uncached pages are available
 * on the requested node, roundrobin starting with the next higher node.
 */
unsigned long uncached_alloc_page(int starting_nid, int n_pages)
{
	unsigned long uc_addr;
	struct uncached_pool *uc_pool;
	int nid;

	if (unlikely(starting_nid >= MAX_NUMNODES))
		return 0;

	if (starting_nid < 0)
		starting_nid = numa_node_id();
	nid = starting_nid;

	do {
		if (!node_state(nid, N_HIGH_MEMORY))
			continue;
		uc_pool = &uncached_pools[nid];
		if (uc_pool->pool == NULL)
			continue;
		do {
			uc_addr = gen_pool_alloc(uc_pool->pool,
						 n_pages * PAGE_SIZE);
			if (uc_addr != 0)
				return uc_addr;
		} while (uncached_add_chunk(uc_pool, nid) == 0);

	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);

	return 0;
}
Esempio n. 10
0
/*
 * This can be refined. Currently, tries to do round robin, instead
 * should do concentratic circle search, starting from current node.
 */
struct page * alloc_pages(int gfp_mask, unsigned long order)
{
	struct page *ret = 0;
	pg_data_t *start, *temp;
#ifndef CONFIG_NUMA
	unsigned long flags;
	static pg_data_t *next = 0;
#endif

	if (order >= MAX_ORDER)
		return NULL;
#ifdef CONFIG_NUMA
	temp = NODE_DATA(numa_node_id());
#else
	spin_lock_irqsave(&node_lock, flags);
	if (!next) next = pgdat_list;
	temp = next;
	next = next->node_next;
	spin_unlock_irqrestore(&node_lock, flags);
#endif
	start = temp;
	while (temp) {
		if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
			return(ret);
		temp = temp->node_next;
	}
	temp = pgdat_list;
	while (temp != start) {
		if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
			return(ret);
		temp = temp->node_next;
	}
	return(0);
}
Esempio n. 11
0
/*
 * The shift value is now the number of bits to shift, not the number of
 * bits/4. This is to make it easier to read the value directly from the
 * datasheets. The IPR address is calculated using the ipr_offset table.
 */
void register_ipr_controller(struct ipr_desc *desc)
{
	int i;

	desc->chip.irq_mask = disable_ipr_irq;
	desc->chip.irq_unmask = enable_ipr_irq;

	for (i = 0; i < desc->nr_irqs; i++) {
		struct ipr_data *p = desc->ipr_data + i;
		int res;

		BUG_ON(p->ipr_idx >= desc->nr_offsets);
		BUG_ON(!desc->ipr_offsets[p->ipr_idx]);

		res = irq_alloc_desc_at(p->irq, numa_node_id());
		if (unlikely(res != p->irq && res != -EEXIST)) {
			printk(KERN_INFO "can not get irq_desc for %d\n",
			       p->irq);
			continue;
		}

		disable_irq_nosync(p->irq);
		irq_set_chip_and_handler_name(p->irq, &desc->chip,
					      handle_level_irq, "level");
		irq_set_chip_data(p->irq, p);
		disable_ipr_irq(irq_get_irq_data(p->irq));
	}
}
static int __init topology_init(void)
{
	int i, ret;

#ifdef CONFIG_NEED_MULTIPLE_NODES
	for_each_online_node(i)
		register_one_node(i);
#endif

	for_each_present_cpu(i) {
		struct cpu *c = &per_cpu(cpu_devices, i);

		c->hotpluggable = 1;

		ret = register_cpu(c, i);
		if (unlikely(ret))
			printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
			       __func__, i, ret);
	}

#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
	for_each_online_node(i)
		if (i != numa_node_id())
			register_cpu_under_node(raw_smp_processor_id(), i);
#endif

	return 0;
}
Esempio n. 13
0
static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
{
	struct platform_device *pdev = p->pdev;
	struct gpio_em_config *pdata = pdev->dev.platform_data;

	p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
				      pdata->number_of_pins, numa_node_id());
	if (IS_ERR_VALUE(p->irq_base)) {
		dev_err(&pdev->dev, "cannot get irq_desc\n");
		return -ENXIO;
	}
	pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
		 pdata->gpio_base, pdata->number_of_pins, p->irq_base);

	p->irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					      pdata->number_of_pins,
					      p->irq_base, 0,
					      &em_gio_irq_domain_ops, p);
	if (!p->irq_domain) {
		irq_free_descs(p->irq_base, pdata->number_of_pins);
		return -ENXIO;
	}

	return 0;
}
/**
 * pcpu_alloc_pages - allocates pages for @chunk
 * @chunk: target chunk
 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
 * @populated: populated bitmap
 * @page_start: page index of the first page to be allocated
 * @page_end: page index of the last page to be allocated + 1
 *
 * Allocate pages [@page_start,@page_end) into @pages for all units.
 * The allocation is for @chunk.  Percpu core doesn't care about the
 * content of @pages and will pass it verbatim to pcpu_map_pages().
 */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int nid;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			//*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			nid = cpu_to_node(cpu);

			if((nid == -1) || !(node_zonelist(nid, GFP_KERNEL)->_zonerefs->zone))
				nid = numa_node_id();

			*pagep = alloc_pages_node(nid, gfp, 0);
			
			if (!*pagep) {
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Esempio n. 15
0
int create_irq(void)
{
	int irq = irq_alloc_desc(numa_node_id());
	if (irq >= 0)
		activate_irq(irq);

	return irq;
}
Esempio n. 16
0
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
	if (tsk == kthreadd_task)
		return tsk->pref_node_fork;
#endif
	return numa_node_id();
}
Esempio n. 17
0
static unsigned long max_pages(unsigned long min_pages)
{
	unsigned long node_free_pages, max;

	node_free_pages = node_page_state(numa_node_id(),
			NR_FREE_PAGES);
	max = node_free_pages / FRACTION_OF_NODE_MEM;
	return max(max, min_pages);
}
Esempio n. 18
0
static int pcifront_enable_irq(struct pci_dev *dev)
{
	u8 irq;
	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
	irq_to_desc_alloc_node(irq, numa_node_id());
	evtchn_register_pirq(irq);
	dev->irq = irq;

	return 0;
}
Esempio n. 19
0
void __init arch_init_irq(void)
{
	int corehi_irq;

	/*
	 * Preallocate the i8259's expected virq's here. Since irqchip_init()
	 * will probe the irqchips in hierarchial order, i8259 is probed last.
	 * If anything allocates a virq before the i8259 is probed, it will
	 * be given one of the i8259's expected range and consequently setup
	 * of the i8259 will fail.
	 */
	WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
			    16, numa_node_id()) < 0,
		"Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);

	i8259_set_poll(mips_pcibios_iack);
	irqchip_init();

	switch (mips_revision_sconid) {
	case MIPS_REVISION_SCON_SOCIT:
	case MIPS_REVISION_SCON_ROCIT:
		if (cpu_has_veic)
			init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
					MSC01E_INT_BASE, msc_eicirqmap,
					msc_nr_eicirqs);
		else
			init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
					MSC01C_INT_BASE, msc_irqmap,
					msc_nr_irqs);
		break;

	case MIPS_REVISION_SCON_SOCITSC:
	case MIPS_REVISION_SCON_SOCITSCP:
		if (cpu_has_veic)
			init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
					MSC01E_INT_BASE, msc_eicirqmap,
					msc_nr_eicirqs);
		else
			init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
					MSC01C_INT_BASE, msc_irqmap,
					msc_nr_irqs);
	}

	if (gic_present) {
		corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
	} else if (cpu_has_veic) {
		set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
		corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
	} else {
		corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
	}

	setup_irq(corehi_irq, &corehi_irqaction);
}
Esempio n. 20
0
static unsigned long max_pages(unsigned long min_pages)
{
    unsigned long node_free_pages, max;
    struct zone *zones = NODE_DATA(numa_node_id())->node_zones;

    node_free_pages =
#ifdef CONFIG_ZONE_DMA
        zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
#endif
#ifdef CONFIG_ZONE_DMA32
        zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
#endif
        zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);

    max = node_free_pages / FRACTION_OF_NODE_MEM;
    return max(max, min_pages);
}
Esempio n. 21
0
static struct page *dequeue_huge_page(void)
{
	int nid = numa_node_id();
	struct page *page = NULL;

	if (list_empty(&hugepage_freelists[nid])) {
		for (nid = 0; nid < MAX_NUMNODES; ++nid)
			if (!list_empty(&hugepage_freelists[nid]))
				break;
	}
	if (nid >= 0 && nid < MAX_NUMNODES &&
	    !list_empty(&hugepage_freelists[nid])) {
		page = list_entry(hugepage_freelists[nid].next,
				  struct page, lru);
		list_del(&page->lru);
		free_huge_pages--;
		free_huge_pages_node[nid]--;
	}
Esempio n. 22
0
/* Allocate DMA memory on node near device */
noinline static void *
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
	struct page *page;
	int node;
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		node = pcibus_to_node(to_pci_dev(dev)->bus);
	else
#endif
		node = numa_node_id();

	if (node < first_node(node_online_map))
		node = first_node(node_online_map);

	page = alloc_pages_node(node, gfp, order);
	return page ? page_address(page) : NULL;
}
Esempio n. 23
0
void
tfw_cache_add(TfwHttpResp *resp, TfwHttpReq *req)
{
	TfwCWork *cw;
	TfwCacheEntry *ce, cdata = {{}};
	unsigned long key;
	size_t len = sizeof(cdata);

	if (!tfw_cfg.cache)
		goto out;

	key = tfw_cache_key_calc(req);

	/* TODO copy at least first part of URI here. */

	ce = (TfwCacheEntry *)tdb_entry_create(db, key, &cdata, &len);
	BUG_ON(len != sizeof(cdata));
	if (!ce)
		goto out;

	ce->resp = resp;

	/*
	 * We must write the entry key now because the request dies
	 * when the function finishes.
	 */
	if (tfw_cache_entry_key_copy(ce, req))
		goto out;

	cw = kmem_cache_alloc(c_cache, GFP_ATOMIC);
	if (!cw)
		goto out;
	INIT_WORK(&cw->work, tfw_cache_copy_resp);
	cw->cw_ce = ce;
	queue_work_on(tfw_cache_sched_work_cpu(numa_node_id()), cache_wq,
		      (struct work_struct *)cw);

out:
	/* Now we don't need the request and the reponse anymore. */
	tfw_http_msg_free((TfwHttpMsg *)req);
	tfw_http_msg_free((TfwHttpMsg *)resp);
}
Esempio n. 24
0
/*
 * mspec_fault
 *
 * Creates a mspec page and maps it to user space.
 */
static int
mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long paddr, maddr;
	unsigned long pfn;
	pgoff_t index = vmf->pgoff;
	struct vma_data *vdata = vma->vm_private_data;

	maddr = (volatile unsigned long) vdata->maddr[index];
	if (maddr == 0) {
		maddr = uncached_alloc_page(numa_node_id(), 1);
		if (maddr == 0)
			return VM_FAULT_OOM;

		spin_lock(&vdata->lock);
		if (vdata->maddr[index] == 0) {
			vdata->count++;
			vdata->maddr[index] = maddr;
		} else {
			uncached_free_page(maddr, 1);
			maddr = vdata->maddr[index];
		}
		spin_unlock(&vdata->lock);
	}

	if (vdata->type == MSPEC_FETCHOP)
		paddr = TO_AMO(maddr);
	else
		paddr = maddr & ~__IA64_UNCACHED_OFFSET;

	pfn = paddr >> PAGE_SHIFT;

	/*
	 * vm_insert_pfn can fail with -EBUSY, but in that case it will
	 * be because another thread has installed the pte first, so it
	 * is no problem.
	 */
	vm_insert_pfn(vma, vmf->address, pfn);

	return VM_FAULT_NOPAGE;
}
Esempio n. 25
0
static unsigned long max_pages(unsigned long min_pages)
{
	unsigned long node_free_pages, max;
	int node = numa_node_id();
	struct zone *zones = NODE_DATA(node)->node_zones;
	int num_cpus_on_node;

	node_free_pages =
#ifdef CONFIG_ZONE_DMA
		zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
#endif
#ifdef CONFIG_ZONE_DMA32
		zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
#endif
		zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);

	max = node_free_pages / FRACTION_OF_NODE_MEM;

	num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
	max /= num_cpus_on_node;

	return max(max, min_pages);
}
Esempio n. 26
0
static void
do_something(void)
{
	void *p;
	struct kmem_cache* mem_cache = kmem_cache_create("kedr_cache", 32, 32, 0, NULL);

	if(mem_cache != NULL)
	{
		p = kmem_cache_alloc_node_notrace(mem_cache, GFP_KERNEL, numa_node_id());
		if(p != NULL)
		{
			kmem_cache_free(mem_cache, p);
		}
		else
		{
			printk(KERN_INFO "Cannot allocate object in own kmem_cache.");
		}
		kmem_cache_destroy(mem_cache);
	}
	else
	{
		printk(KERN_INFO "Cannot create kmem_cache.");
	}
}
Esempio n. 27
0
	/*
	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
	 * are not aligned to pageblock_nr_pages.
	 * Then we just check migratetype first.
	 */
	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page && !is_migrate_isolate_page(page))
			break;
	}
	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
	if ((pfn < end_pfn) || !page)
		return -EBUSY;
	/* Check all pages are free or marked as ISOLATED */
	zone = page_zone(page);
	spin_lock_irqsave(&zone->lock, flags);
	pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
						skip_hwpoisoned_pages);
	spin_unlock_irqrestore(&zone->lock, flags);

	trace_test_pages_isolated(start_pfn, end_pfn, pfn);

	return pfn < end_pfn ? -EBUSY : 0;
}

struct page *alloc_migrate_target(struct page *page, unsigned long private,
				  int **resultp)
{
	return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
}
Esempio n. 28
0
void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{
	struct flow_stats *stats;
	__be16 tcp_flags = 0;
	int node = numa_node_id();

	stats = rcu_dereference(flow->stats[node]);

	if ((flow->key.eth.type == htons(ETH_P_IP) ||
	     flow->key.eth.type == htons(ETH_P_IPV6)) &&
	    flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
	    flow->key.ip.proto == IPPROTO_TCP &&
	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
		tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
	}

	/* Check if already have node-specific stats. */
	if (likely(stats)) {
		spin_lock(&stats->lock);
		/* Mark if we write on the pre-allocated stats. */
		if (node == 0 && unlikely(flow->stats_last_writer != node))
			flow->stats_last_writer = node;
	} else {
		stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
		spin_lock(&stats->lock);

		/* If the current NUMA-node is the only writer on the
		 * pre-allocated stats keep using them.
		 */
		if (unlikely(flow->stats_last_writer != node)) {
			/* A previous locker may have already allocated the
			 * stats, so we need to check again.  If node-specific
			 * stats were already allocated, we update the pre-
			 * allocated stats as we have already locked them.
			 */
			if (likely(flow->stats_last_writer != NUMA_NO_NODE)
			    && likely(!rcu_dereference(flow->stats[node]))) {
				/* Try to allocate node-specific stats. */
				struct flow_stats *new_stats;

				new_stats =
					kmem_cache_alloc_node(flow_stats_cache,
							      GFP_THISNODE |
							      __GFP_NOMEMALLOC,
							      node);
				if (likely(new_stats)) {
					new_stats->used = jiffies;
					new_stats->packet_count = 1;
					new_stats->byte_count = skb->len;
					new_stats->tcp_flags = tcp_flags;
					spin_lock_init(&new_stats->lock);

					rcu_assign_pointer(flow->stats[node],
							   new_stats);
					goto unlock;
				}
			}
			flow->stats_last_writer = node;
		}
	}

	stats->used = jiffies;
	stats->packet_count++;
	stats->byte_count += skb->len;
	stats->tcp_flags |= tcp_flags;
unlock:
	spin_unlock(&stats->lock);
}
Esempio n. 29
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __cpuinit sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	cpuid = smp_processor_id();
	if (cpuid == 0 && IS_MEDUSA()) {
		if (ia64_sn_is_fake_prom())
			sn_prom_type = 2;
		else
			sn_prom_type = 1;
		printk(KERN_INFO "Running on medusa with %s PROM\n",
		       (sn_prom_type == 1) ? "real" : "fake");
	}

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
				&sn_hub_info->nasid_bitmask,
				&sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size,
				&sn_partition_id, &sn_coherency_id,
				&sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * Don't check status. The SAL call is not supported on all PROMs
	 * but a failure is harmless.
	 */
	(void) ia64_sn_set_cpu_number(cpuid);

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
		if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
			break;

	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	sn_nodepda = nodepdaindr[cnode];

	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
		memcpy(sn_cnodeid_to_nasid,
		       (&per_cpu(__sn_cnodeid_to_nasid, 0)),
		       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses this cpu's
	 * sn_cnodeid_to_nasid table which was just initialized if this
	 * isn't cpu 0.
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
			SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr =
		   (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}
Esempio n. 30
0
/**
 * sn_cpu_init - initialize per-cpu data areas
 * @cpuid: cpuid of the caller
 *
 * Called during cpu initialization on each cpu as it starts.
 * Currently, initializes the per-cpu data area for SNIA.
 * Also sets up a few fields in the nodepda.  Also known as
 * platform_cpu_init() by the ia64 machvec code.
 */
void __init sn_cpu_init(void)
{
	int cpuid;
	int cpuphyid;
	int nasid;
	int subnode;
	int slice;
	int cnode;
	int i;
	static int wars_have_been_checked;

	memset(pda, 0, sizeof(pda));
	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
				&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
				&sn_coherency_id, &sn_region_size))
		BUG();
	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;

	/*
	 * The boot cpu makes this call again after platform initialization is
	 * complete.
	 */
	if (nodepdaindr[0] == NULL)
		return;

	cpuid = smp_processor_id();
	cpuphyid = get_sapicid();

	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
		BUG();

	for (i=0; i < MAX_NUMNODES; i++) {
		if (nodepdaindr[i]) {
			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
		}
	}

	cnode = nasid_to_cnodeid(nasid);

	pda->p_nodepda = nodepdaindr[cnode];
	pda->led_address =
	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
	pda->led_state = LED_ALWAYS_SET;
	pda->hb_count = HZ / 2;
	pda->hb_state = 0;
	pda->idle_flag = 0;

	if (cpuid != 0) {
		memcpy(pda->cnodeid_to_nasid_table,
		       pdacpu(0)->cnodeid_to_nasid_table,
		       sizeof(pda->cnodeid_to_nasid_table));
	}

	/*
	 * Check for WARs.
	 * Only needs to be done once, on BSP.
	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
	 * Has to be done before assignment below.
	 */
	if (!wars_have_been_checked) {
		sn_check_for_wars();
		wars_have_been_checked = 1;
	}
	sn_hub_info->shub_1_1_found = shub_1_1_found;

	/*
	 * Set up addresses of PIO/MEM write status registers.
	 */
	{
		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, 
			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
		u64 *pio;
		pio = is_shub1() ? pio1 : pio2;
		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
	}

	/*
	 * WAR addresses for SHUB 1.x.
	 */
	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
		int buddy_nasid;
		buddy_nasid =
		    cnodeid_to_nasid(numa_node_id() ==
				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
		pda->pio_shub_war_cam_addr =
		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
							      SH1_PI_CAM_CONTROL);
	}
}