/** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } }
static void nmi_shutdown(void) { struct op_msrs *msrs; get_online_cpus(); unregister_cpu_notifier(&oprofile_cpu_nb); on_each_cpu(nmi_cpu_shutdown, NULL, 1); nmi_enabled = 0; ctr_running = 0; put_online_cpus(); /* make variables visible to the nmi handler: */ smp_mb(); unregister_nmi_handler(NMI_LOCAL, "oprofile"); msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); free_msrs(); put_cpu_var(cpu_msrs); }
static int __init per_cpu_fixed_init(void) { int proc_id; int *cnt; preempt_disable(); proc_id = smp_processor_id(); cnt = &get_cpu_var(my_counter); preempt_enable(); printk("=====INIT=====\n"); printk("Howdy from CPU #%d\n",proc_id); *cnt += proc_id; put_cpu_var(my_counter); printk("my_counter = %d\n",*cnt); return 0; }
/* * Queue the entry and raise the IPI if needed. */ static void __irq_work_queue(struct irq_work *entry) { struct irq_work **head, *next; head = &get_cpu_var(irq_work_list); do { next = *head; /* Can assign non-atomic because we keep the flags set. */ entry->next = next_flags(next, IRQ_WORK_FLAGS); } while (cmpxchg(head, next, entry) != next); /* The list was empty, raise self-interrupt to start processing. */ if (!irq_work_next(entry)) arch_irq_work_raise(); put_cpu_var(irq_work_list); }
/* * The etr get_clock function. It will write the current clock value * to the clock pointer and return 0 if the clock is in sync with the * external time source. If the clock mode is local it will return * -ENOSYS and -EAGAIN if the clock is not in sync with the external * reference. This function is what ETR is all about.. */ int get_sync_clock(unsigned long long *clock) { atomic_t *sw_ptr; unsigned int sw0, sw1; sw_ptr = &get_cpu_var(etr_sync_word); sw0 = atomic_read(sw_ptr); *clock = get_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(etr_sync_sync); if (sw0 == sw1 && (sw0 & 0x80000000U)) /* Success: time is in sync. */ return 0; if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) return -ENOSYS; if (test_bit(ETR_FLAG_EACCES, &etr_flags)) return -EACCES; return -EAGAIN; }
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec, unsigned int hugepage_shift) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (exec) vaddr |= 0x1UL; nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { flush_tsb_user_page(mm, vaddr, hugepage_shift); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) { tb->mm = mm; tb->hugepage_shift = hugepage_shift; } if (tb->hugepage_shift != hugepage_shift) { flush_tlb_pending(); tb->hugepage_shift = hugepage_shift; nr = 0; } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); }
void flush_tlb_pending(void) { struct mmu_gather *mp = &get_cpu_var(mmu_gathers); if (mp->tlb_nr) { flush_tsb_user(mp); if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, &mp->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(mp->mm->context), mp->tlb_nr, &mp->vaddrs[0]); #endif } mp->tlb_nr = 0; } put_cpu_var(mmu_gathers); }
void quicklist_trim(int nr, void (*dtor)(void *), unsigned long min_pages, unsigned long max_free) { long pages_to_free; struct quicklist *q; q = &get_cpu_var(quicklist)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { void *p = quicklist_alloc(nr, 0, NULL); if (dtor) dtor(p); free_page((unsigned long)p); pages_to_free--; } } put_cpu_var(quicklist); }
/* * The synchronous get_clock function. It will write the current clock * value to the clock pointer and return 0 if the clock is in sync with * the external time source. If the clock mode is local it will return * -ENOSYS and -EAGAIN if the clock is not in sync with the external * reference. */ int get_sync_clock(unsigned long long *clock) { atomic_t *sw_ptr; unsigned int sw0, sw1; sw_ptr = &get_cpu_var(clock_sync_word); sw0 = atomic_read(sw_ptr); *clock = get_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(clock_sync_sync); if (sw0 == sw1 && (sw0 & 0x80000000U)) /* Success: time is in sync. */ return 0; if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) return -ENOSYS; if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) return -EACCES; return -EAGAIN; }
void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); if (tb->tlb_nr) { flush_tsb_user(tb); if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } tb->tlb_nr = 0; } put_cpu_var(tlb_batch); }
static void free_fdtable_rcu(struct rcu_head *rcu) { struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); struct fdtable_defer *fddef; BUG_ON(!fdt); BUG_ON(fdt->max_fds <= NR_OPEN_DEFAULT); if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) { kfree(fdt->fd); kfree(fdt->open_fds); kfree(fdt); } else { fddef = &get_cpu_var(fdtable_defer_list); spin_lock(&fddef->lock); fdt->next = fddef->next; fddef->next = fdt; /* vmallocs are handled from the workqueue context */ schedule_work(&fddef->wq); spin_unlock(&fddef->lock); put_cpu_var(fdtable_defer_list); } }
static int restart_trace(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu; volatile int *pwr_down; cpu = (unsigned int)hcpu; unsigned int cpu_ix = 0; switch (action) { case CPU_STARTING: switch (cpu) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: pwr_down = &get_cpu_var(trace_pwr_down); if (*pwr_down) { trace_start_by_cpus(cpumask_of(cpu), 0); *pwr_down = 0; } put_cpu_var(trace_pwr_down); break; default: break; } break; case CPU_DYING: switch (cpu) { case 4: case 5: case 6: case 7: if (num_possible_cpus() > 4) { if ((!cpu_online(4)) && (!cpu_online(5)) && (!cpu_online(6)) && (!cpu_online(7))) { // num_possible_cpus could be 4, 6, 8 for (cpu_ix = 4; cpu_ix < num_possible_cpus(); cpu_ix++) { per_cpu(trace_pwr_down, cpu_ix) = 1; } } } break; default: break; } break; default: break; } return NOTIFY_OK; }
/* attempts to compress and store an single page */ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; int ret; unsigned int dlen = PAGE_SIZE, len; unsigned long handle; char *buf; u8 *src, *dst; struct zswap_header *zhdr; if (!tree) { ret = -ENODEV; goto reject; } /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; if (zbud_reclaim_page(tree->pool, 8)) { zswap_reject_reclaim_fail++; ret = -ENOMEM; goto reject; } } /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL); if (!entry) { zswap_reject_kmemcache_fail++; ret = -ENOMEM; goto reject; } /* compress */ dst = get_cpu_var(zswap_dstmem); src = kmap_atomic(page); ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); if (ret) { ret = -EINVAL; goto freepage; } /* store */ len = dlen + sizeof(struct zswap_header); ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto freepage; } if (ret) { zswap_reject_alloc_fail++; goto freepage; } zhdr = zbud_map(tree->pool, handle); zhdr->swpentry = swp_entry(type, offset); buf = (u8 *)(zhdr + 1); memcpy(buf, dst, dlen); zbud_unmap(tree->pool, handle); put_cpu_var(zswap_dstmem); /* populate entry */ entry->offset = offset; entry->handle = handle; entry->length = dlen; /* map */ spin_lock(&tree->lock); do { ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); if (ret == -EEXIST) { zswap_duplicate_entry++; /* remove from rbtree */ zswap_rb_erase(&tree->rbroot, dupentry); zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); spin_unlock(&tree->lock); /* update stats */ atomic_inc(&zswap_stored_pages); zswap_pool_pages = zbud_get_pool_size(tree->pool); return 0; freepage: put_cpu_var(zswap_dstmem); zswap_entry_cache_free(entry); reject: return ret; }
static inline void get_seq(__u32 *ts, int *cpu) { *ts = get_cpu_var(proc_event_counts)++; *cpu = smp_processor_id(); put_cpu_var(proc_event_counts); }
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (pte_exec(orig)) vaddr |= 0x1UL; if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: if (fullmm) { put_cpu_var(tlb_batch); return; } nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { global_flush_tlb_page(mm, vaddr); flush_tsb_user_page(mm, vaddr); goto out; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); }
/* attempts to compress and store an single page */ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; int ret; unsigned int dlen = PAGE_SIZE, len; unsigned long handle; char *buf; u8 *src, *dst; struct zswap_header *zhdr; if (!tree) { ret = -ENODEV; goto reject; } /* if this page got EIO on pageout before, give up immediately */ if (PageError(page)) { ret = -ENOMEM; goto reject; } /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; if (zpool_shrink(zswap_pool, 1, NULL)) { zswap_reject_reclaim_fail++; ret = -ENOMEM; goto reject; } } /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL); if (!entry) { zswap_reject_kmemcache_fail++; ret = -ENOMEM; goto reject; } /* compress */ src = kmap_atomic(page); if (page_zero_filled(src)) { atomic_inc(&zswap_zero_pages); entry->zero_flag = 1; kunmap_atomic(src); handle = 0; dlen = PAGE_SIZE; goto zeropage_out; } dst = get_cpu_var(zswap_dstmem); ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); if (ret) { ret = -EINVAL; goto freepage; } /* store */ len = dlen + sizeof(struct zswap_header); ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto freepage; } if (ret) { zswap_reject_alloc_fail++; goto freepage; } zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW); zhdr->swpentry = swp_entry(type, offset); buf = (u8 *)(zhdr + 1); memcpy(buf, dst, dlen); zpool_unmap_handle(zswap_pool, handle); put_cpu_var(zswap_dstmem); zeropage_out: /* populate entry */ entry->offset = offset; entry->handle = handle; entry->length = dlen; /* map */ spin_lock(&tree->lock); do { ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); if (ret == -EEXIST) { zswap_duplicate_entry++; /* remove from rbtree */ zswap_rb_erase(&tree->rbroot, dupentry); zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); spin_unlock(&tree->lock); /* update stats */ atomic_inc(&zswap_stored_pages); zswap_pool_total_size = zpool_get_total_size(zswap_pool); zswap_pool_pages = zswap_pool_total_size >> PAGE_SHIFT; return 0; freepage: put_cpu_var(zswap_dstmem); zswap_entry_cache_free(entry); reject: return ret; }
static inline void put_affine_portal(void) { put_cpu_var(bman_affine_portal); }
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu, u32 *sh_desc) { size_t size; u32 num_words; dma_addr_t hwdesc; struct caam_drv_ctx *drv_ctx; const cpumask_t *cpus = qman_affine_cpus(); static DEFINE_PER_CPU(int, last_cpu); num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); return ERR_PTR(-EINVAL); } drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); if (!drv_ctx) return ERR_PTR(-ENOMEM); /* * Initialise pre-header - set RSLS and SDLEN - and shared descriptor * and dma-map them. */ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | num_words); memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, hwdesc)) { dev_err(qidev, "DMA map error for preheader + shdesc\n"); kfree(drv_ctx); return ERR_PTR(-ENOMEM); } drv_ctx->context_a = hwdesc; /* If given CPU does not own the portal, choose another one that does */ if (!cpumask_test_cpu(*cpu, cpus)) { int *pcpu = &get_cpu_var(last_cpu); *pcpu = cpumask_next(*pcpu, cpus); if (*pcpu >= nr_cpu_ids) *pcpu = cpumask_first(cpus); *cpu = *pcpu; put_cpu_var(last_cpu); } drv_ctx->cpu = *cpu; /* Find response FQ hooked with this CPU */ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); /* Attach request FQ */ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, QMAN_INITFQ_FLAG_SCHED); if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) { dev_err(qidev, "create_caam_req_fq failed\n"); dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); kfree(drv_ctx); return ERR_PTR(-ENOMEM); } drv_ctx->qidev = qidev; return drv_ctx; }
/* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { unsigned long vpn; struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid; unsigned int psize; int ssize; real_pte_t rpte; int i; i = batch->index; /* Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like * for SPEs, we obtain the page size from the slice, which thus * must still exist (and thus the VMA not reused) at the time * of this call */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); /* Mask the address for the correct page size */ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif } else { psize = pte_pagesize_index(mm, addr, pte); /* Mask the address for the standard page size. If we * have a 64k page kernel, but the hardware does not * support 64k pages, this might be different from the * hardware page size encoded in the slice table. */ addr &= PAGE_MASK; } /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); /* * Check if we have an active batch on this CPU. If not, just * flush now and return. For now, we don global invalidates * in that case, might be worth testing the mm cpu mask though * and decide to use local invalidates instead... */ if (!batch->active) { flush_hash_page(vpn, rpte, psize, ssize, 0); put_cpu_var(ppc64_tlb_batch); return; } /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize || batch->ssize != ssize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; batch->ssize = ssize; } batch->pte[i] = rpte; batch->vpn[i] = vpn; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); }
void net_srandom(unsigned long entropy) { struct nrnd_state *state = &get_cpu_var(net_rand_state); __net_srandom(state, state->s1^entropy); put_cpu_var(state); }
/* * Add an irq to the disabled mask. We disable the HW interrupt * immediately so that there's no possibility of it firing. If we're * in an interrupt context, the return path is careful to avoid * unmasking a newly disabled interrupt. */ void disable_percpu_irq(unsigned int irq) { get_cpu_var(irq_disable_mask) |= (1UL << irq); mask_irqs(1UL << irq); put_cpu_var(irq_disable_mask); }
inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length) { unsigned long flags; struct sk_buff_head *h; struct sk_buff *skb = NULL; if (unlikely(length > SKB_RECYCLE_SIZE)) { return NULL; } h = &get_cpu_var(recycle_list); local_irq_save(flags); skb = __skb_dequeue(h); #ifdef CONFIG_SKB_RECYCLER_MULTI_CPU if (unlikely(!skb)) { uint8_t head; spin_lock(&glob_recycler.lock); /* If global recycle list is not empty, use global buffers */ head = glob_recycler.head; if (likely(head != glob_recycler.tail)) { /* Move SKBs from global list to CPU pool */ skb_queue_splice_init(&glob_recycler.pool[head], h); head = (head + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK; glob_recycler.head = head; spin_unlock(&glob_recycler.lock); /* We have refilled the CPU pool - dequeue */ skb = __skb_dequeue(h); } else { spin_unlock(&glob_recycler.lock); } } #endif local_irq_restore(flags); put_cpu_var(recycle_list); if (likely(skb)) { struct skb_shared_info *shinfo; /* * We're about to write a large amount to the skb to * zero most of the structure so prefetch the start * of the shinfo region now so it's in the D-cache * before we start to write that. */ shinfo = skb_shinfo(skb); prefetchw(shinfo); zero_struct(skb, offsetof(struct sk_buff, tail)); skb->data = skb->head; skb_reset_tail_pointer(skb); #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->mac_header = ~0U; #endif zero_struct(shinfo, offsetof(struct skb_shared_info, dataref)); atomic_set(&shinfo->dataref, 1); skb_reserve(skb, NET_SKB_PAD); if (dev) { skb->dev = dev; } } return skb; }
/* * Add an irq to the disabled mask. We disable the HW interrupt * immediately so that there's no possibility of it firing. If we're * in an interrupt context, the return path is careful to avoid * unmasking a newly disabled interrupt. */ static void tile_irq_chip_disable(struct irq_data *d) { get_cpu_var(irq_disable_mask) |= (1UL << d->irq); mask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); }