static int profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { int node, cpu = (unsigned long)__cpu; struct page *page; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: node = cpu_to_mem(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) return notifier_from_errno(-ENOMEM); per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!page) goto out_free; per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); } break; out_free: page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); return notifier_from_errno(-ENOMEM); case CPU_ONLINE: case CPU_ONLINE_FROZEN: if (prof_cpu_mask != NULL) cpumask_set_cpu(cpu, prof_cpu_mask); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: if (prof_cpu_mask != NULL) cpumask_clear_cpu(cpu, prof_cpu_mask); if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu(cpu_profile_hits, cpu)[0] = NULL; __free_page(page); } if (per_cpu(cpu_profile_hits, cpu)[1]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); } break; } return NOTIFY_OK; }
static int create_hash_tables(void) { int cpu; for_each_online_cpu(cpu) { int node = cpu_to_mem(cpu); struct page *page; page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); page = alloc_pages_exact_node(node, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[0] = (struct profile_hit *)page_address(page); } return 0; out_cleanup: prof_on = 0; smp_mb(); on_each_cpu(profile_nop, NULL, 1); for_each_online_cpu(cpu) { struct page *page; if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu(cpu_profile_hits, cpu)[0] = NULL; __free_page(page); } if (per_cpu(cpu_profile_hits, cpu)[1]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); } } return -1; }
static void *slob_new_pages(gfp_t gfp, int order, int node) { void *page; #ifdef CONFIG_NUMA if (node != NUMA_NO_NODE) page = alloc_pages_exact_node(node, gfp, order); else #endif page = alloc_pages(gfp, order); if (!page) return NULL; return page_address(page); }
static void *slob_new_pages(gfp_t gfp, int order, int node) { void *page; #ifdef CONFIG_NUMA if (node != -1) page = alloc_pages_exact_node(node, gfp, order); else #endif page = alloc_pages(gfp, order); if (!page) return NULL; /*prosthetoyme to megethos ths page sto total_alloc*/ total_alloc = total_alloc + slob_units(page); return page_address(page); }
/* * Add a new chunk of uncached memory pages to the specified pool. * * @pool: pool to add new chunk of uncached memory to * @nid: node id of node to allocate memory from, or -1 * * This is accomplished by first allocating a granule of cached memory pages * and then converting them to uncached memory pages. */ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* interrupted by a signal */ if (uc_pool->nchunks_added > nchunks_added) { /* someone added a new chunk while we were waiting */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* convert the memory pages from cached to uncached */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* * There's a small race here where it's possible for someone to * access the page through /dev/mem halfway through the conversion * to uncached - not sure it's really worth bothering about */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* flush the just introduced uncached translation from the TLB */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* * The chunk of memory pages has been converted to uncached so now we * can add it to the pool. */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* failed to convert or add the chunk so give it back to the kernel */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* */ if (uc_pool->nchunks_added > nchunks_added) { /* */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
static struct xpc_gru_mq_uv * xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, irq_handler_t irq_handler) { enum xp_retval xp_ret; int ret; int nid; int nasid; int pg_order; struct page *page; struct xpc_gru_mq_uv *mq; struct uv_IO_APIC_route_entry *mmr_value; mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); if (mq == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a xpc_gru_mq_uv structure\n"); ret = -ENOMEM; goto out_0; } mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), GFP_KERNEL); if (mq->gru_mq_desc == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a gru_message_queue_desc structure\n"); ret = -ENOMEM; goto out_1; } pg_order = get_order(mq_size); mq->order = pg_order + PAGE_SHIFT; mq_size = 1UL << mq->order; mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); ret = -ENOMEM; goto out_2; } mq->address = page_address(page); /* enable generation of irq when GRU mq operation occurs to this mq */ ret = xpc_gru_mq_watchlist_alloc_uv(mq); if (ret != 0) goto out_3; ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); if (ret != 0) goto out_4; ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); if (ret != 0) { dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", mq->irq, -ret); goto out_5; } nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, nasid, mmr_value->vector, mmr_value->dest); if (ret != 0) { dev_err(xpc_part, "gru_create_message_queue() returned " "error=%d\n", ret); ret = -EINVAL; goto out_6; } /* allow other partitions to access this GRU mq */ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); if (xp_ret != xpSuccess) { ret = -EACCES; goto out_6; } return mq; /* something went wrong */ out_6: free_irq(mq->irq, NULL); out_5: xpc_release_gru_mq_irq_uv(mq); out_4: xpc_gru_mq_watchlist_free_uv(mq); out_3: free_pages((unsigned long)mq->address, pg_order); out_2: kfree(mq->gru_mq_desc); out_1: kfree(mq); out_0: return ERR_PTR(ret); }