Esempio n. 1
0
/*
 * uncached_alloc_page
 *
 * @starting_nid: node id of node to start with, or -1
 * @n_pages: number of contiguous pages to allocate
 *
 * Allocate the specified number of contiguous uncached pages on the
 * the requested node. If not enough contiguous uncached pages are available
 * on the requested node, roundrobin starting with the next higher node.
 */
unsigned long uncached_alloc_page(int starting_nid, int n_pages)
{
	unsigned long uc_addr;
	struct uncached_pool *uc_pool;
	int nid;

	if (unlikely(starting_nid >= MAX_NUMNODES))
		return 0;

	if (starting_nid < 0)
		starting_nid = numa_node_id();
	nid = starting_nid;

	do {
		if (!node_state(nid, N_HIGH_MEMORY))
			continue;
		uc_pool = &uncached_pools[nid];
		if (uc_pool->pool == NULL)
			continue;
		do {
			uc_addr = gen_pool_alloc(uc_pool->pool,
						 n_pages * PAGE_SIZE);
			if (uc_addr != 0)
				return uc_addr;
		} while (uncached_add_chunk(uc_pool, nid) == 0);

	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);

	return 0;
}
int __meminit online_page_cgroup(unsigned long start_pfn,
			unsigned long nr_pages,
			int nid)
{
	unsigned long start, end, pfn;
	int fail = 0;

	start = SECTION_ALIGN_DOWN(start_pfn);
	end = SECTION_ALIGN_UP(start_pfn + nr_pages);

	if (nid == -1) {
		nid = pfn_to_nid(start_pfn);
		VM_BUG_ON(!node_state(nid, N_ONLINE));
	}

	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
		if (!pfn_present(pfn))
			continue;
		fail = init_section_page_cgroup(pfn, nid);
	}
	if (!fail)
		return 0;

	
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
		__free_page_cgroup(pfn);

	return -ENOMEM;
}
Esempio n. 3
0
static int __meminit online_page_ext(unsigned long start_pfn,
				unsigned long nr_pages,
				int nid)
{
	unsigned long start, end, pfn;
	int fail = 0;

	start = SECTION_ALIGN_DOWN(start_pfn);
	end = SECTION_ALIGN_UP(start_pfn + nr_pages);

	if (nid == -1) {
		/*
		 * In this case, "nid" already exists and contains valid memory.
		 * "start_pfn" passed to us is a pfn which is an arg for
		 * online__pages(), and start_pfn should exist.
		 */
		nid = pfn_to_nid(start_pfn);
		VM_BUG_ON(!node_state(nid, N_ONLINE));
	}

	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
		if (!pfn_present(pfn))
			continue;
		fail = init_section_page_ext(pfn, nid);
	}
	if (!fail)
		return 0;

	/* rollback */
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
		__free_page_ext(pfn);

	return -ENOMEM;
}
Esempio n. 4
0
File: node.c Progetto: jthatch12/STi
static inline bool hugetlb_register_node(struct node *node)
{
	if (__hugetlb_register_node &&
			node_state(node->dev.id, N_MEMORY)) {
		__hugetlb_register_node(node);
		return true;
	}
	return false;
}
Esempio n. 5
0
/*
%F Calls the user callback associated with the event of closing a folder
%i h     : tree's handle.
%o returns IUP_DEFAULT to close the branch or not defined
           IUP_IGNORE to keep the branch opened
*/
int treecallBranchCloseCb(Ihandle* h, Node n)
{
  IFni cb = (IFni)IupGetCallback(h,IUP_BRANCHCLOSE_CB);
  if (cb && node_kind(n) == BRANCH && 
            node_state(n) == EXPANDED)
  {
    int id = treefindNodeId(h, n);
    return cb(h,id);
  }

  return IUP_DEFAULT;
}
Esempio n. 6
0
/*
%F Calls the user callback associated with the event of opening a folder
%i h     : tree's handle.
%o returns IUP_DEFAULT to open the branch or not defined
           IUP_IGNORE to keep the branch closed
*/
int treecallBranchOpenCb(Ihandle* h, Node n )
{
  IFni cb = (IFni)IupGetCallback(h,IUP_BRANCHOPEN_CB);
  if (cb && node_kind(n) == BRANCH && 
            node_state(n) == COLLAPSED)
  {
    int id = treefindNodeId(h, n);
    return cb(h,id);
  }

  return IUP_DEFAULT;
}
Esempio n. 7
0
static void *__meminit alloc_page_cgroup(size_t size, int nid)
{
	void *addr = NULL;

	addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
	if (addr)
		return addr;

	if (node_state(nid, N_HIGH_MEMORY))
		addr = vmalloc_node(size, nid);
	else
		addr = vmalloc(size);

	return addr;
}
Esempio n. 8
0
/* __alloc_bootmem...() is protected by !slab_available() */
static int __init_refok init_section_page_cgroup(unsigned long pfn)
{
	struct mem_section *section = __pfn_to_section(pfn);
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	int nid, index;

	if (!section->page_cgroup) {
		nid = page_to_nid(pfn_to_page(pfn));
		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
		VM_BUG_ON(!slab_is_available());
		if (node_state(nid, N_HIGH_MEMORY)) {
			base = kmalloc_node(table_size,
				GFP_KERNEL | __GFP_NOWARN, nid);
			if (!base)
				base = vmalloc_node(table_size, nid);
		} else {
			base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
			if (!base)
				base = vmalloc(table_size);
		}
	} else {
		/*
 		 * We don't have to allocate page_cgroup again, but
		 * address of memmap may be changed. So, we have to initialize
		 * again.
		 */
		base = section->page_cgroup + pfn;
		table_size = 0;
		/* check address of memmap is changed or not. */
		if (base->page == pfn_to_page(pfn))
			return 0;
	}

	if (!base) {
		printk(KERN_ERR "page cgroup allocation failure\n");
		return -ENOMEM;
	}

	for (index = 0; index < PAGES_PER_SECTION; index++) {
		pc = base + index;
		__init_page_cgroup(pc, pfn + index);
	}

	section->page_cgroup = base - pfn;
	total_usage += table_size;
	return 0;
}
Esempio n. 9
0
static void *__meminit alloc_page_ext(size_t size, int nid)
{
	gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
	void *addr = NULL;

	addr = alloc_pages_exact_nid(nid, size, flags);
	if (addr) {
		kmemleak_alloc(addr, size, 1, flags);
		return addr;
	}

	if (node_state(nid, N_HIGH_MEMORY))
		addr = vzalloc_node(size, nid);
	else
		addr = vzalloc(size);

	return addr;
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
	/* If the main allocator is up use that, fallback to bootmem. */
	if (slab_is_available()) {
		struct page *page;

		if (node_state(node, N_HIGH_MEMORY))
			page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO, get_order(size));
		else
			page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
				get_order(size));
		if (page)
			return page_address(page);
		return NULL;
	} else
		return __earlyonly_bootmem_alloc(node, size, size,
				__pa(MAX_DMA_ADDRESS));
}
Esempio n. 11
0
static void votequorum_notification_fn(
	votequorum_handle_t handle,
	uint64_t context,
	uint32_t quorate,
	uint32_t node_list_entries,
	votequorum_node_t node_list[]
	)
{
	int i;

	printf("votequorum notification called \n");
	printf("  quorate         = %d\n", quorate);
	printf("  number of nodes = %d\n", node_list_entries);

	for (i = 0; i< node_list_entries; i++) {
		printf("      %d: %s\n", node_list[i].nodeid, node_state(node_list[i].state));
	}
	printf("\n");
}