コード例 #1
0
ファイル: tlb.c プロジェクト: romanbb/android_kernel_lge_d851
void flush_tlb_pending(void)
{
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);

	if (tb->tlb_nr) {
		flush_tsb_user(tb);

		if (CTX_VALID(tb->mm->context)) {
#ifdef CONFIG_SMP
			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
					      &tb->vaddrs[0]);
#else
			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
					    tb->tlb_nr, &tb->vaddrs[0]);
#endif
		}
		tb->tlb_nr = 0;
	}

	put_cpu_var(tlb_batch);
}
コード例 #2
0
/*
 * The synchronous get_clock function. It will write the current clock
 * value to the clock pointer and return 0 if the clock is in sync with
 * the external time source. If the clock mode is local it will return
 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
 * reference.
 */
int get_sync_clock(unsigned long long *clock)
{
	atomic_t *sw_ptr;
	unsigned int sw0, sw1;

	sw_ptr = &get_cpu_var(clock_sync_word);
	sw0 = atomic_read(sw_ptr);
	*clock = get_clock();
	sw1 = atomic_read(sw_ptr);
	put_cpu_var(clock_sync_word);
	if (sw0 == sw1 && (sw0 & 0x80000000U))
		/* Success: time is in sync. */
		return 0;
	if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
	    !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
		return -ENOSYS;
	if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
	    !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
		return -EACCES;
	return -EAGAIN;
}
コード例 #3
0
void quicklist_trim(int nr, void (*dtor)(void *),
	unsigned long min_pages, unsigned long max_free)
{
	long pages_to_free;
	struct quicklist *q;

	q = &get_cpu_var(quicklist)[nr];
	if (q->nr_pages > min_pages) {
		pages_to_free = min_pages_to_free(q, min_pages, max_free);

		while (pages_to_free > 0) {
			void *p = quicklist_alloc(nr, 0, NULL);

			if (dtor)
				dtor(p);
			free_page((unsigned long)p);
			pages_to_free--;
		}
	}
	put_cpu_var(quicklist);
}
コード例 #4
0
ファイル: capability.c プロジェクト: Anjali05/linux
/**
 * audit_caps - audit a capability
 * @sa: audit data
 * @profile: profile being tested for confinement (NOT NULL)
 * @cap: capability tested
 * @error: error code returned by test
 *
 * Do auditing of capability and handle, audit/complain/kill modes switching
 * and duplicate message elimination.
 *
 * Returns: 0 or sa->error on success,  error code on failure
 */
static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
		      int cap, int error)
{
	struct audit_cache *ent;
	int type = AUDIT_APPARMOR_AUTO;

	aad(sa)->error = error;

	if (likely(!error)) {
		/* test if auditing is being forced */
		if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
			   !cap_raised(profile->caps.audit, cap)))
			return 0;
		type = AUDIT_APPARMOR_AUDIT;
	} else if (KILL_MODE(profile) ||
		   cap_raised(profile->caps.kill, cap)) {
		type = AUDIT_APPARMOR_KILL;
	} else if (cap_raised(profile->caps.quiet, cap) &&
		   AUDIT_MODE(profile) != AUDIT_NOQUIET &&
		   AUDIT_MODE(profile) != AUDIT_ALL) {
		/* quiet auditing */
		return error;
	}

	/* Do simple duplicate message elimination */
	ent = &get_cpu_var(audit_cache);
	if (profile == ent->profile && cap_raised(ent->caps, cap)) {
		put_cpu_var(audit_cache);
		if (COMPLAIN_MODE(profile))
			return complain_error(error);
		return error;
	} else {
		aa_put_profile(ent->profile);
		ent->profile = aa_get_profile(profile);
		cap_raise(ent->caps, cap);
	}
	put_cpu_var(audit_cache);

	return aa_audit(type, profile, sa, audit_cb);
}
コード例 #5
0
ファイル: file.c プロジェクト: robcore/Alucard-Kernel-jfltexx
static void free_fdtable_rcu(struct rcu_head *rcu)
{
	struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
	struct fdtable_defer *fddef;

	BUG_ON(!fdt);
	BUG_ON(fdt->max_fds <= NR_OPEN_DEFAULT);

	if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
		kfree(fdt->fd);
		kfree(fdt->open_fds);
		kfree(fdt);
	} else {
		fddef = &get_cpu_var(fdtable_defer_list);
		spin_lock(&fddef->lock);
		fdt->next = fddef->next;
		fddef->next = fdt;
		/* vmallocs are handled from the workqueue context */
		schedule_work(&fddef->wq);
		spin_unlock(&fddef->lock);
		put_cpu_var(fdtable_defer_list);
	}
}
コード例 #6
0
static void nmi_shutdown(void)
{
	struct op_msrs *msrs;

	cpu_notifier_register_begin();

	/* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
	get_online_cpus();
	on_each_cpu(nmi_cpu_shutdown, NULL, 1);
	nmi_enabled = 0;
	ctr_running = 0;
	__unregister_cpu_notifier(&oprofile_cpu_nb);
	put_online_cpus();

	cpu_notifier_register_done();

	/* make variables visible to the nmi handler: */
	smp_mb();
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
	msrs = &get_cpu_var(cpu_msrs);
	model->shutdown(msrs);
	free_msrs();
	put_cpu_var(cpu_msrs);
}
コード例 #7
0
int runqueues_ok_cpu(unsigned cpu)
{
  int q, l = 0;
  register struct proc *xp;
  struct proc **rdy_head, **rdy_tail;

  rdy_head = get_cpu_var(cpu, run_q_head);
  rdy_tail = get_cpu_var(cpu, run_q_tail);

  for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
	xp->p_found = 0;
	if (l++ > MAX_LOOP) panic("check error");
  }

  for (q=l=0; q < NR_SCHED_QUEUES; q++) {
    if (rdy_head[q] && !rdy_tail[q]) {
	printf("head but no tail in %d\n", q);
	return 0;
    }
    if (!rdy_head[q] && rdy_tail[q]) {
	printf("tail but no head in %d\n", q);
	return 0;
    }
    if (rdy_tail[q] && rdy_tail[q]->p_nextready) {
	printf("tail and tail->next not null in %d\n", q);
	return 0;
    }
    for(xp = rdy_head[q]; xp; xp = xp->p_nextready) {
	const vir_bytes vxp = (vir_bytes) xp;
	vir_bytes dxp;
	if(vxp < (vir_bytes) BEG_PROC_ADDR || vxp >= (vir_bytes) END_PROC_ADDR) {
  		printf("xp out of range\n");
		return 0;
	}
	dxp = vxp - (vir_bytes) BEG_PROC_ADDR;
	if(dxp % sizeof(struct proc)) {
  		printf("xp not a real pointer");
		return 0;
	}
	if(!proc_ptr_ok(xp)) {
  		printf("xp bogus pointer");
		return 0;
	}
	if (RTS_ISSET(xp, RTS_SLOT_FREE)) {
		printf("scheduling error: dead proc q %d %d\n",
			q, xp->p_endpoint);
		return 0;
	}
        if (!proc_is_runnable(xp)) {
		printf("scheduling error: unready on runq %d proc %d\n",
			q, xp->p_nr);
		return 0;
        }
        if (xp->p_priority != q) {
		printf("scheduling error: wrong priority q %d proc %d ep %d name %s\n",
			q, xp->p_nr, xp->p_endpoint, xp->p_name);
		return 0;
	}
	if (xp->p_found) {
		printf("scheduling error: double sched q %d proc %d\n",
			q, xp->p_nr);
		return 0;
	}
	xp->p_found = 1;
	if (!xp->p_nextready && rdy_tail[q] != xp) {
		printf("sched err: last element not tail q %d proc %d\n",
			q, xp->p_nr);
		return 0;
	}
	if (l++ > MAX_LOOP) {
		printf("loop in schedule queue?");
		return 0;
	}
    }
  }	

  for (xp = BEG_PROC_ADDR; xp < END_PROC_ADDR; ++xp) {
	if(!proc_ptr_ok(xp)) {
		printf("xp bogus pointer in proc table\n");
		return 0;
	}
	if (isemptyp(xp))
		continue;
	if(proc_is_runnable(xp) && !xp->p_found) {
		printf("sched error: ready proc %d not on queue\n", xp->p_nr);
		return 0;
	}
  }

  /* All is ok. */
  return 1;
}
コード例 #8
0
ファイル: irq.c プロジェクト: CSCLOG/beaglebone
/*
 * Add an irq to the disabled mask.  We disable the HW interrupt
 * immediately so that there's no possibility of it firing.  If we're
 * in an interrupt context, the return path is careful to avoid
 * unmasking a newly disabled interrupt.
 */
void disable_percpu_irq(unsigned int irq)
{
	get_cpu_var(irq_disable_mask) |= (1UL << irq);
	mask_irqs(1UL << irq);
	put_cpu_var(irq_disable_mask);
}
コード例 #9
0
ファイル: skbuff_recycle.c プロジェクト: darcyg/ap_project_v2
inline struct sk_buff *skb_recycler_alloc(struct net_device *dev, unsigned int length) {
	unsigned long flags;
	struct sk_buff_head *h;
	struct sk_buff *skb = NULL;

	if (unlikely(length > SKB_RECYCLE_SIZE)) {
		return NULL;
	}

	h = &get_cpu_var(recycle_list);
	local_irq_save(flags);
	skb = __skb_dequeue(h);
#ifdef CONFIG_SKB_RECYCLER_MULTI_CPU
	if (unlikely(!skb)) {
		uint8_t head;
		spin_lock(&glob_recycler.lock);
		/* If global recycle list is not empty, use global buffers */
		head = glob_recycler.head;
		if (likely(head != glob_recycler.tail)) {
			/* Move SKBs from global list to CPU pool */
			skb_queue_splice_init(&glob_recycler.pool[head], h);
			head = (head + 1) & SKB_RECYCLE_MAX_SHARED_POOLS_MASK;
			glob_recycler.head = head;
			spin_unlock(&glob_recycler.lock);
			/* We have refilled the CPU pool - dequeue */
			skb = __skb_dequeue(h);
		} else {
			spin_unlock(&glob_recycler.lock);
		}
	}
#endif
	local_irq_restore(flags);
	put_cpu_var(recycle_list);

	if (likely(skb)) {
		struct skb_shared_info *shinfo;

		/*
		 * We're about to write a large amount to the skb to
		 * zero most of the structure so prefetch the start
		 * of the shinfo region now so it's in the D-cache
		 * before we start to write that.
		 */
		shinfo = skb_shinfo(skb);
		prefetchw(shinfo);

		zero_struct(skb, offsetof(struct sk_buff, tail));
		skb->data = skb->head;
		skb_reset_tail_pointer(skb);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
		skb->mac_header = ~0U;
#endif
		zero_struct(shinfo, offsetof(struct skb_shared_info, dataref));
		atomic_set(&shinfo->dataref, 1);

		skb_reserve(skb, NET_SKB_PAD);

		if (dev) {
			skb->dev = dev;
		}
	}

	return skb;
}
コード例 #10
0
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
<<<<<<< HEAD
=======
 *
 * Must be called from within some kind of spinlock/non-preempt region...
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
{
<<<<<<< HEAD
	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
=======
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
	unsigned long vsid, vaddr;
	unsigned int psize;
	int ssize;
	real_pte_t rpte;
	int i;

	i = batch->index;

	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
コード例 #11
0
ファイル: efi_64.c プロジェクト: 3null/fastsocket
void efi_call_phys_prelog_in_physmode(void)
{
	local_irq_save(get_cpu_var(efi_flags));
	get_cpu_var(save_cr3)= read_cr3();
	write_cr3(virt_to_phys(efi_pgd));
}
コード例 #12
0
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
		   pte_t *ptep, pte_t orig, int fullmm)
{
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
	unsigned long nr;

	vaddr &= PAGE_MASK;
	if (pte_exec(orig))
		vaddr |= 0x1UL;

	if (tlb_type != hypervisor &&
	    pte_dirty(orig)) {
		unsigned long paddr, pfn = pte_pfn(orig);
		struct address_space *mapping;
		struct page *page;

		if (!pfn_valid(pfn))
			goto no_cache_flush;

		page = pfn_to_page(pfn);
		if (PageReserved(page))
			goto no_cache_flush;

		/* A real file page? */
		mapping = page_mapping(page);
		if (!mapping)
			goto no_cache_flush;

		paddr = (unsigned long) page_address(page);
		if ((paddr ^ vaddr) & (1 << 13))
			flush_dcache_page_all(mm, page);
	}

no_cache_flush:

	if (fullmm) {
		put_cpu_var(tlb_batch);
		return;
	}

	nr = tb->tlb_nr;

	if (unlikely(nr != 0 && mm != tb->mm)) {
		flush_tlb_pending();
		nr = 0;
	}

	if (!tb->active) {
		global_flush_tlb_page(mm, vaddr);
		flush_tsb_user_page(mm, vaddr);
		goto out;
	}

	if (nr == 0)
		tb->mm = mm;

	tb->vaddrs[nr] = vaddr;
	tb->tlb_nr = ++nr;
	if (nr >= TLB_BATCH_NR)
		flush_tlb_pending();

out:
	put_cpu_var(tlb_batch);
}
コード例 #13
0
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
				       int *cpu,
				       u32 *sh_desc)
{
	size_t size;
	u32 num_words;
	dma_addr_t hwdesc;
	struct caam_drv_ctx *drv_ctx;
	const cpumask_t *cpus = qman_affine_cpus();
	static DEFINE_PER_CPU(int, last_cpu);

	num_words = desc_len(sh_desc);
	if (num_words > MAX_SDLEN) {
		dev_err(qidev, "Invalid descriptor len: %d words\n",
			num_words);
		return ERR_PTR(-EINVAL);
	}

	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
	if (!drv_ctx)
		return ERR_PTR(-ENOMEM);

	/*
	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
	 * and dma-map them.
	 */
	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
					   num_words);
	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
				DMA_BIDIRECTIONAL);
	if (dma_mapping_error(qidev, hwdesc)) {
		dev_err(qidev, "DMA map error for preheader + shdesc\n");
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}
	drv_ctx->context_a = hwdesc;

	/* If given CPU does not own the portal, choose another one that does */
	if (!cpumask_test_cpu(*cpu, cpus)) {
		int *pcpu = &get_cpu_var(last_cpu);

		*pcpu = cpumask_next(*pcpu, cpus);
		if (*pcpu >= nr_cpu_ids)
			*pcpu = cpumask_first(cpus);
		*cpu = *pcpu;

		put_cpu_var(last_cpu);
	}
	drv_ctx->cpu = *cpu;

	/* Find response FQ hooked with this CPU */
	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);

	/* Attach request FQ */
	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
					     QMAN_INITFQ_FLAG_SCHED);
	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
		dev_err(qidev, "create_caam_req_fq failed\n");
		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}

	drv_ctx->qidev = qidev;
	return drv_ctx;
}
コード例 #14
0
static int
restart_trace(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
	unsigned int cpu;
	volatile int *pwr_down;

	cpu = (unsigned int)hcpu;
	unsigned int cpu_ix = 0;

	switch (action) {
	case CPU_STARTING:
		switch (cpu) {
		case 1:
		case 2:
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
			pwr_down = &get_cpu_var(trace_pwr_down);
			if (*pwr_down) {
				trace_start_by_cpus(cpumask_of(cpu), 0);
				*pwr_down = 0;
			}
			put_cpu_var(trace_pwr_down);
			break;

		default:
			break;
		}

		break;

	case CPU_DYING:
		switch (cpu) {
		case 4:
		case 5:
		case 6:
		case 7:
                    if (num_possible_cpus() > 4) 
                    {
                        if ((!cpu_online(4)) && (!cpu_online(5)) && (!cpu_online(6)) && (!cpu_online(7))) {
			    // num_possible_cpus could be 4, 6, 8
			    for (cpu_ix = 4; cpu_ix < num_possible_cpus(); cpu_ix++) {
				per_cpu(trace_pwr_down, cpu_ix) = 1;
			    }
                        }
                    }
			break;

		default:
			break;
		}

		break;

	default:
		break;
	}

	return NOTIFY_OK;
}
コード例 #15
0
ファイル: efi_64.c プロジェクト: 3null/fastsocket
void efi_call_phys_epilog_in_physmode(void)
{
	write_cr3(get_cpu_var(save_cr3));
	local_irq_restore(get_cpu_var(efi_flags));
}
コード例 #16
0
ファイル: bman.c プロジェクト: AshishNamdev/linux
static inline struct bman_portal *get_affine_portal(void)
{
	return &get_cpu_var(bman_affine_portal);
}
コード例 #17
0
/* attempts to compress and store an single page */
static int zswap_frontswap_store(unsigned type, pgoff_t offset,
				struct page *page)
{
	struct zswap_tree *tree = zswap_trees[type];
	struct zswap_entry *entry, *dupentry;
	int ret;
	unsigned int dlen = PAGE_SIZE, len;
	unsigned long handle;
	char *buf;
	u8 *src, *dst;
	struct zswap_header *zhdr;

	if (!tree) {
		ret = -ENODEV;
		goto reject;
	}

	/* reclaim space if needed */
	if (zswap_is_full()) {
		zswap_pool_limit_hit++;
		if (zbud_reclaim_page(tree->pool, 8)) {
			zswap_reject_reclaim_fail++;
			ret = -ENOMEM;
			goto reject;
		}
	}

	/* allocate entry */
	entry = zswap_entry_cache_alloc(GFP_KERNEL);
	if (!entry) {
		zswap_reject_kmemcache_fail++;
		ret = -ENOMEM;
		goto reject;
	}

	/* compress */
	dst = get_cpu_var(zswap_dstmem);
	src = kmap_atomic(page);
	ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
	kunmap_atomic(src);
	if (ret) {
		ret = -EINVAL;
		goto freepage;
	}

	/* store */
	len = dlen + sizeof(struct zswap_header);
	ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN,
		&handle);
	if (ret == -ENOSPC) {
		zswap_reject_compress_poor++;
		goto freepage;
	}
	if (ret) {
		zswap_reject_alloc_fail++;
		goto freepage;
	}
	zhdr = zbud_map(tree->pool, handle);
	zhdr->swpentry = swp_entry(type, offset);
	buf = (u8 *)(zhdr + 1);
	memcpy(buf, dst, dlen);
	zbud_unmap(tree->pool, handle);
	put_cpu_var(zswap_dstmem);

	/* populate entry */
	entry->offset = offset;
	entry->handle = handle;
	entry->length = dlen;

	/* map */
	spin_lock(&tree->lock);
	do {
		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
		if (ret == -EEXIST) {
			zswap_duplicate_entry++;
			/* remove from rbtree */
			zswap_rb_erase(&tree->rbroot, dupentry);
			zswap_entry_put(tree, dupentry);
		}
	} while (ret == -EEXIST);
	spin_unlock(&tree->lock);

	/* update stats */
	atomic_inc(&zswap_stored_pages);
	zswap_pool_pages = zbud_get_pool_size(tree->pool);

	return 0;

freepage:
	put_cpu_var(zswap_dstmem);
	zswap_entry_cache_free(entry);
reject:
	return ret;
}
コード例 #18
0
ファイル: tlb_hash64.c プロジェクト: quadcores/cbs_4.2.4
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
{
    unsigned long vpn;
    struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
    unsigned long vsid;
    unsigned int psize;
    int ssize;
    real_pte_t rpte;
    int i;

    i = batch->index;

    /* Get page size (maybe move back to caller).
     *
     * NOTE: when using special 64K mappings in 4K environment like
     * for SPEs, we obtain the page size from the slice, which thus
     * must still exist (and thus the VMA not reused) at the time
     * of this call
     */
    if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
        psize = get_slice_psize(mm, addr);
        /* Mask the address for the correct page size */
        addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
#else
        BUG();
        psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
    } else {
        psize = pte_pagesize_index(mm, addr, pte);
        /* Mask the address for the standard page size.  If we
         * have a 64k page kernel, but the hardware does not
         * support 64k pages, this might be different from the
         * hardware page size encoded in the slice table. */
        addr &= PAGE_MASK;
    }


    /* Build full vaddr */
    if (!is_kernel_addr(addr)) {
        ssize = user_segment_size(addr);
        vsid = get_vsid(mm->context.id, addr, ssize);
    } else {
        vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
        ssize = mmu_kernel_ssize;
    }
    WARN_ON(vsid == 0);
    vpn = hpt_vpn(addr, vsid, ssize);
    rpte = __real_pte(__pte(pte), ptep);

    /*
     * Check if we have an active batch on this CPU. If not, just
     * flush now and return. For now, we don global invalidates
     * in that case, might be worth testing the mm cpu mask though
     * and decide to use local invalidates instead...
     */
    if (!batch->active) {
        flush_hash_page(vpn, rpte, psize, ssize, 0);
        put_cpu_var(ppc64_tlb_batch);
        return;
    }

    /*
     * This can happen when we are in the middle of a TLB batch and
     * we encounter memory pressure (eg copy_page_range when it tries
     * to allocate a new pte). If we have to reclaim memory and end
     * up scanning and resetting referenced bits then our batch context
     * will change mid stream.
     *
     * We also need to ensure only one page size is present in a given
     * batch
     */
    if (i != 0 && (mm != batch->mm || batch->psize != psize ||
                   batch->ssize != ssize)) {
        __flush_tlb_pending(batch);
        i = 0;
    }
    if (i == 0) {
        batch->mm = mm;
        batch->psize = psize;
        batch->ssize = ssize;
    }
    batch->pte[i] = rpte;
    batch->vpn[i] = vpn;
    batch->index = ++i;
    if (i >= PPC64_TLB_BATCH_NR)
        __flush_tlb_pending(batch);
    put_cpu_var(ppc64_tlb_batch);
}
コード例 #19
0
ファイル: utils.c プロジェクト: BackupTheBerlios/arp2-svn
void net_srandom(unsigned long entropy)
{
	struct nrnd_state *state = &get_cpu_var(net_rand_state);
	__net_srandom(state, state->s1^entropy);
	put_cpu_var(state);
}
コード例 #20
0
ファイル: arch_clock.c プロジェクト: CesarBallardini/minix3
PUBLIC int init_local_timer(unsigned freq)
{
#ifdef USE_APIC
	/* if we know the address, lapic is enabled and we should use it */
	if (lapic_addr) {
		unsigned cpu = cpuid;
		tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
		lapic_set_timer_one_shot(1000000/system_hz);
	} else
	{
		BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
#else
	{
#endif
		init_8253A_timer(freq);
		estimate_cpu_freq();
		/* always only 1 cpu in the system */
		tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
	}

	return 0;
}

PUBLIC void stop_local_timer(void)
{
#ifdef USE_APIC
	if (lapic_addr) {
		lapic_stop_timer();
		apic_eoi();
	} else
#endif
	{
		stop_8253A_timer();
	}
}

PUBLIC void restart_local_timer(void)
{
#ifdef USE_APIC
	if (lapic_addr) {
		lapic_restart_timer();
	}
#endif
}

PUBLIC int register_local_timer_handler(const irq_handler_t handler)
{
#ifdef USE_APIC
	if (lapic_addr) {
		/* Using APIC, it is configured in apic_idt_init() */
		BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
	} else
#endif
	{
		/* Using PIC, Initialize the CLOCK's interrupt hook. */
		pic_timer_hook.proc_nr_e = NONE;
		pic_timer_hook.irq = CLOCK_IRQ;

		put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
	}

	return 0;
}

PUBLIC void cycles_accounting_init(void)
{
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;
#endif

	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));

	make_zero64(get_cpu_var(cpu, cpu_last_tsc));
	make_zero64(get_cpu_var(cpu, cpu_last_idle));
}
コード例 #21
0
static inline void get_seq(__u32 *ts, int *cpu)
{
	*ts = get_cpu_var(proc_event_counts)++;
	*cpu = smp_processor_id();
	put_cpu_var(proc_event_counts);
}
コード例 #22
0
/* attempts to compress and store an single page */
static int zswap_frontswap_store(unsigned type, pgoff_t offset,
				struct page *page)
{
	struct zswap_tree *tree = zswap_trees[type];
	struct zswap_entry *entry, *dupentry;
	int ret;
	unsigned int dlen = PAGE_SIZE, len;
	unsigned long handle;
	char *buf;
	u8 *src, *dst;
	struct zswap_header *zhdr;

	if (!tree) {
		ret = -ENODEV;
		goto reject;
	}

	/* if this page got EIO on pageout before, give up immediately */
	if (PageError(page)) {
		ret = -ENOMEM;
		goto reject;
	}

	/* reclaim space if needed */
	if (zswap_is_full()) {
		zswap_pool_limit_hit++;
		if (zpool_shrink(zswap_pool, 1, NULL)) {
			zswap_reject_reclaim_fail++;
			ret = -ENOMEM;
			goto reject;
		}
	}

	/* allocate entry */
	entry = zswap_entry_cache_alloc(GFP_KERNEL);
	if (!entry) {
		zswap_reject_kmemcache_fail++;
		ret = -ENOMEM;
		goto reject;
	}

	/* compress */
	src = kmap_atomic(page);
	if (page_zero_filled(src)) {
		atomic_inc(&zswap_zero_pages);
		entry->zero_flag = 1;
		kunmap_atomic(src);

		handle = 0;
		dlen = PAGE_SIZE;
		goto zeropage_out;
	}
	dst = get_cpu_var(zswap_dstmem);

	ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
	kunmap_atomic(src);
	if (ret) {
		ret = -EINVAL;
		goto freepage;
	}

	/* store */
	len = dlen + sizeof(struct zswap_header);
	ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
		&handle);
	if (ret == -ENOSPC) {
		zswap_reject_compress_poor++;
		goto freepage;
	}
	if (ret) {
		zswap_reject_alloc_fail++;
		goto freepage;
	}
	zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW);
	zhdr->swpentry = swp_entry(type, offset);
	buf = (u8 *)(zhdr + 1);
	memcpy(buf, dst, dlen);
	zpool_unmap_handle(zswap_pool, handle);
	put_cpu_var(zswap_dstmem);

zeropage_out:
	/* populate entry */
	entry->offset = offset;
	entry->handle = handle;
	entry->length = dlen;

	/* map */
	spin_lock(&tree->lock);
	do {
		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
		if (ret == -EEXIST) {
			zswap_duplicate_entry++;
			/* remove from rbtree */
			zswap_rb_erase(&tree->rbroot, dupentry);
			zswap_entry_put(tree, dupentry);
		}
	} while (ret == -EEXIST);
	spin_unlock(&tree->lock);

	/* update stats */
	atomic_inc(&zswap_stored_pages);
	zswap_pool_total_size = zpool_get_total_size(zswap_pool);
	zswap_pool_pages = zswap_pool_total_size >> PAGE_SHIFT;

	return 0;

freepage:
	put_cpu_var(zswap_dstmem);
	zswap_entry_cache_free(entry);
reject:
	return ret;
}
コード例 #23
0
/*
 * Add an irq to the disabled mask.  We disable the HW interrupt
 * immediately so that there's no possibility of it firing.  If we're
 * in an interrupt context, the return path is careful to avoid
 * unmasking a newly disabled interrupt.
 */
static void tile_irq_chip_disable(struct irq_data *d)
{
    get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
    mask_irqs(1UL << d->irq);
    put_cpu_var(irq_disable_mask);
}