Exemple #1
0
void vfp_sync_hwstate(struct thread_info *thread)
{
	unsigned int cpu = get_cpu();

	/*
	 * If the thread we're interested in is the current owner of the
	 * hardware VFP state, then we need to save its state.
	 */
	if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
		u32 fpexc = fmrx(FPEXC);

		/*
		 * Save the last VFP state on this CPU.
		 */
		fmxr(FPEXC, fpexc | FPEXC_EN);
		vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
		fmxr(FPEXC, fpexc);
	}

	put_cpu();
}
static int od_init(struct dbs_data *dbs_data, bool notify)
{
    struct od_dbs_tuners *tuners;
    u64 idle_time;
    int cpu;

    tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
    if (!tuners) {
        pr_err("%s: kzalloc failed\n", __func__);
        return -ENOMEM;
    }

    cpu = get_cpu();
    idle_time = get_cpu_idle_time_us(cpu, NULL);
    put_cpu();
    if (idle_time != -1ULL) {
        /* Idle micro accounting is supported. Use finer thresholds */
        tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
        /*
         * In nohz/micro accounting case we set the minimum frequency
         * not depending on HZ, but fixed (very low). The deferred
         * timer might skip some samples if idle/sleeping as needed.
        */
        dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
    } else {
        tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

        /* For correct statistics, we need 10 ticks for each measure */
        dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
                                      jiffies_to_usecs(10);
    }

    tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
    tuners->ignore_nice_load = 0;
    tuners->powersave_bias = default_powersave_bias;
    tuners->io_is_busy = should_io_be_busy();

    dbs_data->tuners = tuners;
    return 0;
}
Exemple #3
0
irqreturn_t
handle_IPI (int irq, void *dev_id)
{
	int this_cpu = get_cpu();
	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
	unsigned long ops;

	mb();	/* Order interrupt and bit testing. */
	while ((ops = xchg(pending_ipis, 0)) != 0) {
		mb();	/* Order bit clearing and data access. */
		do {
			unsigned long which;

			which = ffz(~ops);
			ops &= ~(1 << which);

			switch (which) {
			case IPI_CALL_FUNC:
				handle_call_data();
				break;

			case IPI_CPU_STOP:
				stop_this_cpu();
				break;
#ifdef CONFIG_KEXEC
			case IPI_KDUMP_CPU_STOP:
				unw_init_running(kdump_cpu_freeze, NULL);
				break;
#endif
			default:
				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
						this_cpu, which);
				break;
			}
		} while (ops);
		mb();	/* Order data access and bit testing. */
	}
	put_cpu();
	return IRQ_HANDLED;
}
static void msm_spm_smp_set_vdd(void *data)
{
	struct msm_spm_device *dev;
	struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;

	if (msm_spm_L2_apcs_master)
		dev = &msm_spm_l2_device;
	else
		dev = &per_cpu(msm_cpu_spm_device, info->cpu);

	if (!dev->initialized)
		return;

	if (msm_spm_L2_apcs_master)
		get_cpu();

	dev->cpu_vdd = info->vlevel;
	info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);

	if (msm_spm_L2_apcs_master)
		put_cpu();
}
Exemple #5
0
void ovs_flow_stats_clear(struct sw_flow *flow)
{
	int cpu, cur_cpu;

	if (!flow->stats.is_percpu) {
		stats_reset(flow->stats.stat);
	} else {
		cur_cpu = get_cpu();

		for_each_possible_cpu(cpu) {

			if (cpu == cur_cpu)
				local_bh_disable();

			stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));

			if (cpu == cur_cpu)
				local_bh_enable();
		}
		put_cpu();
	}
}
Exemple #6
0
/*
 * this changes the io permissions bitmap in the current task.
 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
	struct thread_struct * t = &current->thread;
	struct tss_struct * tss;
	unsigned long *bitmap;

	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
		return -EINVAL;
	if (turn_on && !capable(CAP_SYS_RAWIO))
		return -EPERM;

	/*
	 * If it's the first ioperm() call in this thread's lifetime, set the
	 * IO bitmap up. ioperm() is much less timing critical than clone(),
	 * this is why we delay this operation until now:
	 */
	if (!t->io_bitmap_ptr) {
		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
		if (!bitmap)
			return -ENOMEM;

		memset(bitmap, 0xff, IO_BITMAP_BYTES);
		t->io_bitmap_ptr = bitmap;
	}

	/*
	 * do it in the per-thread copy and in the TSS ...
	 */
	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
	tss = init_tss + get_cpu();
	if (tss->io_bitmap_base == IO_BITMAP_OFFSET) { /* already active? */
		set_bitmap(tss->io_bitmap, from, num, !turn_on);
	} else {
		memcpy(tss->io_bitmap, t->io_bitmap_ptr, IO_BITMAP_BYTES);
		tss->io_bitmap_base = IO_BITMAP_OFFSET; /* Activate it in the TSS */
	}
	put_cpu();
	return 0;
}
Exemple #7
0
static void profile_flip_buffers(void)
{
	int i, j, cpu;

	down(&profile_flip_mutex);
	j = per_cpu(cpu_profile_flip, get_cpu());
	put_cpu();
	on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
	for_each_online_cpu(cpu) {
		struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
		for (i = 0; i < NR_PROFILE_HIT; ++i) {
			if (!hits[i].hits) {
				if (hits[i].pc)
					hits[i].pc = 0;
				continue;
			}
			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
			hits[i].hits = hits[i].pc = 0;
		}
	}
	up(&profile_flip_mutex);
}
Exemple #8
0
void vfp_flush_hwstate(struct thread_info *thread)
{
    unsigned int cpu = get_cpu();

    /*
     * If the thread we're interested in is the current owner of the
     * hardware VFP state, then we need to save its state.
     */
    if (last_VFP_context[cpu] == &thread->vfpstate) {
        u32 fpexc = fmrx(FPEXC);

        fmxr(FPEXC, fpexc & ~FPEXC_EN);

        /*
         * Set the context to NULL to force a reload the next time
         * the thread uses the VFP.
         */
        last_VFP_context[cpu] = NULL;
    }

    put_cpu();
}
Exemple #9
0
static void raise_mce(struct mce *m)
{
	int context = MCJ_CTX(m->inject_flags);

	inject_mce(m);

	if (context == MCJ_CTX_RANDOM)
		return;

#ifdef CONFIG_X86_LOCAL_APIC
	if (m->inject_flags & MCJ_NMI_BROADCAST) {
		unsigned long start;
		int cpu;
		get_online_cpus();
		mce_inject_cpumask = cpu_online_map;
		cpu_clear(get_cpu(), mce_inject_cpumask);
		for_each_online_cpu(cpu) {
			struct mce *mcpu = &per_cpu(injectm, cpu);
			if (!mcpu->finished ||
			    MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
				cpu_clear(cpu, mce_inject_cpumask);
		}
		if (!cpus_empty(mce_inject_cpumask))
			apic->send_IPI_mask(&mce_inject_cpumask, NMI_VECTOR);
		start = jiffies;
		while (!cpus_empty(mce_inject_cpumask)) {
			if (!time_before(jiffies, start + 2*HZ)) {
				printk(KERN_ERR
				"Timeout waiting for mce inject NMI %lx\n",
					*cpus_addr(mce_inject_cpumask));
				break;
			}
			cpu_relax();
		}
		raise_local();
		put_cpu();
		put_online_cpus();
	} else
Exemple #10
0
static inline int zcache_comp_op(enum comp_op op,
				const u8 *src, unsigned int slen,
				u8 *dst, unsigned int *dlen)
{
	struct crypto_comp *tfm;
	int ret = -1;

	BUG_ON(!zcache_comp_pcpu_tfms);
	tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
	BUG_ON(!tfm);
	switch (op) {
	case ZCACHE_COMPOP_COMPRESS:
		ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
		break;
	case ZCACHE_COMPOP_DECOMPRESS:
		ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
		break;
	default:
		ret = -EINVAL;
	}
	put_cpu();
	return ret;
}
/*
 * Free current thread data structures etc..
 */
void exit_thread(void)
{
	struct task_struct *me = current;
	struct thread_struct *t = &me->thread;
	unsigned long *bp = t->io_bitmap_ptr;
	struct fpu *fpu = &t->fpu;

	if (bp) {
		struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());

		t->io_bitmap_ptr = NULL;
		clear_thread_flag(TIF_IO_BITMAP);
		/*
		 * Careful, clear this in the TSS too:
		 */
		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
		t->io_bitmap_max = 0;
		put_cpu();
		kfree(bp);
	}

	fpu__drop(fpu);
}
Exemple #12
0
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
	int i;
	int cpu;
	cpumask_t tmp;
	int local = 0;

	BUG_ON(in_interrupt());

	cpu = get_cpu();
	i = batch->index;
	tmp = cpumask_of_cpu(cpu);
	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
		local = 1;

	if (i == 1)
		flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
				local);
	else
		flush_hash_range(batch->context, i, local);
	batch->index = 0;
	put_cpu();
}
Exemple #13
0
/*
 * Free current thread data structures etc..
 */
void exit_thread(void)
{
	/* The process may have allocated an io port bitmap... nuke it. */
	if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
		struct task_struct *tsk = current;
		struct thread_struct *t = &tsk->thread;
		int cpu = get_cpu();
		struct tss_struct *tss = &per_cpu(init_tss, cpu);

		kfree(t->io_bitmap_ptr);
		t->io_bitmap_ptr = NULL;
		clear_thread_flag(TIF_IO_BITMAP);
		/*
		 * Careful, clear this in the TSS too:
		 */
		memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
		t->io_bitmap_max = 0;
		tss->io_bitmap_owner = NULL;
		tss->io_bitmap_max = 0;
		tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
		put_cpu();
	}
}
Exemple #14
0
void ovs_flow_stats_clear(struct sw_flow *flow)
{
	int cpu, cur_cpu;

	cur_cpu = get_cpu();
	for_each_possible_cpu(cpu) {
		struct sw_flow_stats *stats = &flow->stats[cpu];

		if (cpu == cur_cpu)
			local_bh_disable();

		spin_lock(&stats->lock);
		stats->used = 0;
		stats->packet_count = 0;
		stats->byte_count = 0;
		stats->tcp_flags = 0;
		spin_unlock(&stats->lock);

		if (cpu == cur_cpu)
			local_bh_enable();
	}
	put_cpu();
}
Exemple #15
0
void vfp_pm_save_context(void)
{
	u32 fpexc = fmrx(FPEXC);
	unsigned int cpu = get_cpu();

	/* Save last_VFP_context if needed */
	if (last_VFP_context[cpu]) {
		/* Enable vfp to save context */
		if (!(fpexc & FPEXC_EN)) {
			vfp_enable(NULL);
			fmxr(FPEXC, fpexc | FPEXC_EN);
		}

		vfp_save_state(last_VFP_context[cpu], fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);

		last_VFP_context[cpu] = NULL;
	}

	put_cpu();
}
static void set_tls_desc(struct task_struct *p, int idx,
			 const struct user_desc *info, int n)
{
	struct thread_struct *t = &p->thread;
	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
	int cpu;

	cpu = get_cpu();

	while (n-- > 0) {
		if (LDT_empty(info))
			desc->a = desc->b = 0;
		else
			fill_ldt(desc, info);
		++info;
		++desc;
	}

	if (t == &current->thread)
		load_TLS(t, cpu);

	put_cpu();
}
Exemple #17
0
static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
{
	struct msm_idle_stats_device *stats_dev;
	unsigned int cpu;
	int64_t now;
	int64_t interval;

	stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
	cpu = get_cpu();

	if (cpu != stats_dev->cpu) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
			pr_info("%s: timer migrated from cpu%u to cpu%u\n",
				__func__, stats_dev->cpu, cpu);

		stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
		goto timer_exit;
	}

	now = ktime_to_us(ktime_get());
	interval = now - stats_dev->stats.last_busy_start;

	if (stats_dev->stats.busy_timer > 0 &&
			interval >= stats_dev->stats.busy_timer - 1)
		stats_dev->stats.event =
			MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
	else
		stats_dev->stats.event =
			MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;

timer_exit:
	atomic_set(&stats_dev->collecting, 0);
	wake_up_interruptible(&stats_dev->wait_q);

	put_cpu();
	return HRTIMER_NORESTART;
}
Exemple #18
0
static void __init
check_sal_cache_flush (void)
{
	unsigned long flags;
	int cpu;
	u64 vector;

	cpu = get_cpu();
	local_irq_save(flags);

	/*
	 * Schedule a timer interrupt, wait until it's reported, and see if
	 * SAL_CACHE_FLUSH drops it.
	 */
	ia64_set_itv(IA64_TIMER_VECTOR);
	ia64_set_itm(ia64_get_itc() + 1000);

	while (!ia64_get_irr(IA64_TIMER_VECTOR))
		cpu_relax();

	ia64_sal_cache_flush(3);

	if (ia64_get_irr(IA64_TIMER_VECTOR)) {
		vector = ia64_get_ivr();
		ia64_eoi();
		WARN_ON(vector != IA64_TIMER_VECTOR);
	} else {
		sal_cache_flush_drops_interrupts = 1;
		printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; "
			"PAL_CACHE_FLUSH will be used instead\n");
		ia64_eoi();
	}

	local_irq_restore(flags);
	put_cpu();
}
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

		
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice_load = 0;
	tuners->powersave_bias = default_powersave_bias;
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
	int cpu;

	set_user_gs(regs, 0);

	regs->fs		= 0;
	regs->ds		= __USER_DS;
	regs->es		= __USER_DS;
	regs->ss		= __USER_DS;
	regs->cs		= __USER_CS;
	regs->ip		= new_ip;
	regs->sp		= new_sp;

	cpu = get_cpu();
	load_user_cs_desc(cpu, current->mm);
	put_cpu();

	/*
	 * Free the old FP and other extended state
	 */
	free_thread_xstate(current);
}
Exemple #21
0
int __init __module_ref_addr_init(void) 
{ 	
	local_t * addr;
	unsigned int cpu = get_cpu();  //获取当前cpu ID 

    	/*addr 为指向当前模块引用计数的指针*/
	addr = __module_ref_addr( THIS_MODULE, cpu ); 
	printk("<0>addr: %lx\n", (unsigned long)addr);	
	
	printk("<0>originally,\n");  //输出初始时当前模块的引用计数
	printk("<0>refs of this module is: %d\n",module_refcount(THIS_MODULE));
	
	local_inc(addr);  //实现将addr所指向的内容加1
	printk("<0>after calling local_inc,\n");  
	printk("<0>refs of this module is: %d\n",module_refcount(THIS_MODULE));	

	local_dec(addr);  //实现将addr所指向的内容减1
	printk("<0>after calling local_dec,\n");
	printk("<0>refs of this module is: %d\n",module_refcount(THIS_MODULE));

	put_cpu();  //允许抢占 preempt_enable( )

	return 0; 
}
static void kexec_prepare_cpus(void)
{
	wake_offline_cpus();
	smp_call_function(kexec_smp_down, NULL, /* wait */0);
	local_irq_disable();
	mb(); /* make sure IRQs are disabled before we say they are */
	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;

	kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
	/* we are sure every CPU has IRQs off at this point */
	kexec_all_irq_disabled = 1;

	/* after we tell the others to go down */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);

	/*
	 * Before removing MMU mappings make sure all CPUs have entered real
	 * mode:
	 */
	kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);

	put_cpu();
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int cpu, i, rc = NOTIFY_STOP;
	struct perf_event *bp;
	unsigned int cmf, resume_mask;

	cmf = sh_ubc->triggered_mask();
	if (unlikely(!cmf))
		return NOTIFY_DONE;

	resume_mask = sh_ubc->active_mask();

	sh_ubc->disable_all();

	cpu = get_cpu();
	for (i = 0; i < sh_ubc->num_events; i++) {
		unsigned long event_mask = (1 << i);

		if (likely(!(cmf & event_mask)))
			continue;

		rcu_read_lock();

		bp = per_cpu(bp_per_reg[i], cpu);
		if (bp)
			rc = NOTIFY_DONE;

		sh_ubc->clear_triggered_mask(event_mask);

		if (!bp) {
			rcu_read_unlock();
			break;
		}

		if (bp->overflow_handler == ptrace_triggered)
			resume_mask &= ~(1 << i);

		perf_bp_event(bp, args->regs);

		
		if (!arch_check_bp_in_kernelspace(bp)) {
			siginfo_t info;

			info.si_signo = args->signr;
			info.si_errno = notifier_to_errno(rc);
			info.si_code = TRAP_HWBKPT;

			force_sig_info(args->signr, &info, current);
		}

		rcu_read_unlock();
	}

	if (cmf == 0)
		rc = NOTIFY_DONE;

	sh_ubc->enable_all(resume_mask);

	put_cpu();

	return rc;
}
void dump_trace(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack,
		const struct stacktrace_ops *ops, void *data)
{
	const unsigned cpu = get_cpu();
	unsigned long *irq_stack_end =
		(unsigned long *)per_cpu(irq_stack_ptr, cpu);
	unsigned used = 0;
	struct thread_info *tinfo;
	int graph = 0;
	unsigned long bp;

	if (!task)
		task = current;

	if (!stack) {
		unsigned long dummy;
		stack = &dummy;
		if (task && task != current)
			stack = (unsigned long *)task->thread.sp;
	}

	bp = stack_frame(task, regs);
	/*
	 * Print function call entries in all stacks, starting at the
	 * current stack address. If the stacks consist of nested
	 * exceptions
	 */
	tinfo = task_thread_info(task);
	for (;;) {
		char *id;
		unsigned long *estack_end;
		estack_end = in_exception_stack(cpu, (unsigned long)stack,
						&used, &id);

		if (estack_end) {
			if (ops->stack(data, id) < 0)
				break;

			bp = ops->walk_stack(tinfo, stack, bp, ops,
					     data, estack_end, &graph);
			ops->stack(data, "<EOE>");
			/*
			 * We link to the next stack via the
			 * second-to-last pointer (index -2 to end) in the
			 * exception stack:
			 */
			stack = (unsigned long *) estack_end[-2];
			continue;
		}
		if (irq_stack_end) {
			unsigned long *irq_stack;
			irq_stack = irq_stack_end -
				(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);

			if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
				if (ops->stack(data, "IRQ") < 0)
					break;
				bp = ops->walk_stack(tinfo, stack, bp,
					ops, data, irq_stack_end, &graph);
				/*
				 * We link to the next stack (which would be
				 * the process stack normally) the last
				 * pointer (index -1 to end) in the IRQ stack:
				 */
				stack = (unsigned long *) (irq_stack_end[-1]);
				bp = fixup_bp_irq_link(bp, stack, irq_stack,
						       irq_stack_end);
				irq_stack_end = NULL;
				ops->stack(data, "EOI");
				continue;
			}
		}
		break;
	}

	/*
	 * This handles the process stack:
	 */
	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
	put_cpu();
}
Exemple #25
0
/*
 * this changes the io permissions bitmap in the current task.
 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
	struct thread_struct *t = &current->thread;
	struct tss_struct *tss;
	unsigned int i, max_long, bytes, bytes_updated;

	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
		return -EINVAL;
#if defined(CONFIG_SCHED_BFS_AUTOISO)
	if (turn_on) {
		struct sched_param param = { .sched_priority = 0 };
		if (!capable(CAP_SYS_RAWIO))
			return -EPERM;
		/* Start X as SCHED_ISO */
		sched_setscheduler_nocheck(current, SCHED_ISO, &param);
	}
#else
	if (turn_on && !capable(CAP_SYS_RAWIO))
		return -EPERM;
#endif

	/*
	 * If it's the first ioperm() call in this thread's lifetime, set the
	 * IO bitmap up. ioperm() is much less timing critical than clone(),
	 * this is why we delay this operation until now:
	 */
	if (!t->io_bitmap_ptr) {
		unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);

		if (!bitmap)
			return -ENOMEM;

		memset(bitmap, 0xff, IO_BITMAP_BYTES);
		t->io_bitmap_ptr = bitmap;
		set_thread_flag(TIF_IO_BITMAP);
	}

	/*
	 * do it in the per-thread copy and in the TSS ...
	 *
	 * Disable preemption via get_cpu() - we must not switch away
	 * because the ->io_bitmap_max value must match the bitmap
	 * contents:
	 */
	tss = &per_cpu(init_tss, get_cpu());

	if (turn_on)
		bitmap_clear(t->io_bitmap_ptr, from, num);
	else
		bitmap_set(t->io_bitmap_ptr, from, num);

	/*
	 * Search for a (possibly new) maximum. This is simple and stupid,
	 * to keep it obviously correct:
	 */
	max_long = 0;
	for (i = 0; i < IO_BITMAP_LONGS; i++)
		if (t->io_bitmap_ptr[i] != ~0UL)
			max_long = i;

	bytes = (max_long + 1) * sizeof(unsigned long);
	bytes_updated = max(bytes, t->io_bitmap_max);

	t->io_bitmap_max = bytes;

	/* Update the TSS: */
	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);

	put_cpu();

	return 0;
}

/*
 * sys_iopl has to be used when you want to access the IO ports
 * beyond the 0x3ff range: to get the full 65536 ports bitmapped
 * you'd need 8kB of bitmaps/process, which is a bit excessive.
 *
 * Here we just change the flags value on the stack: we allow
 * only the super-user to do it. This depends on the stack-layout
 * on system-call entry - see also fork() and the signal handling
 * code.
 */
long sys_iopl(unsigned int level, struct pt_regs *regs)
{
	unsigned int old = (regs->flags >> 12) & 3;
	struct thread_struct *t = &current->thread;

	if (level > 3)
		return -EINVAL;
	/* Trying to gain more privileges? */
	if (level > old) {
#if defined(CONFIG_SCHED_BFS_AUTOISO)
		struct sched_param param = { .sched_priority = 0 };
		if (!capable(CAP_SYS_RAWIO))
			return -EPERM;
		/* Start X as SCHED_ISO */
		sched_setscheduler_nocheck(current, SCHED_ISO, &param);
#else
		if (!capable(CAP_SYS_RAWIO))
			return -EPERM;
#endif
	}
	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
	t->iopl = level << 12;
	set_iopl_mask(t->iopl);

	return 0;
}
/*
 * Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
 *
 * We set up our own source and destination PTEs that we fully control.
 * This is the only way to guarantee that we don't race with another
 * thread that is modifying the PTE; we can't afford to try the
 * copy_{to,from}_user() technique of catching the interrupt, since
 * we must run with interrupts disabled to avoid the risk of some
 * other code seeing the incoherent data in our cache.  (Recall that
 * our cache is indexed by PA, so even if the other code doesn't use
 * our kmap_atomic virtual addresses, they'll still hit in cache using
 * the normal VAs that aren't supposed to hit in cache.)
 */
static void memcpy_multicache(void *dest, const void *source,
			      pte_t dst_pte, pte_t src_pte, int len)
{
	int idx;
	unsigned long flags, newsrc, newdst;
	pmd_t *pmdp;
	pte_t *ptep;
	int type0, type1;
	int cpu = get_cpu();

	/*
	 * Disable interrupts so that we don't recurse into memcpy()
	 * in an interrupt handler, nor accidentally reference
	 * the PA of the source from an interrupt routine.  Also
	 * notify the simulator that we're playing games so we don't
	 * generate spurious coherency warnings.
	 */
	local_irq_save(flags);
	sim_allow_multiple_caching(1);

	/* Set up the new dest mapping */
	type0 = kmap_atomic_idx_push();
	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
	newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
	ptep = pte_offset_kernel(pmdp, newdst);
	if (pte_val(*ptep) != pte_val(dst_pte)) {
		set_pte(ptep, dst_pte);
		local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
	}

	/* Set up the new source mapping */
	type1 = kmap_atomic_idx_push();
	idx += (type0 - type1);
	src_pte = hv_pte_set_nc(src_pte);
	src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
	newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
	ptep = pte_offset_kernel(pmdp, newsrc);
	__set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/* Actually move the data. */
	__memcpy_asm((void *)newdst, (const void *)newsrc, len);

	/*
	 * Remap the source as locally-cached and not OLOC'ed so that
	 * we can inval without also invaling the remote cpu's cache.
	 * This also avoids known errata with inv'ing cacheable oloc data.
	 */
	src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
	src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
	__set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/*
	 * Do the actual invalidation, covering the full L2 cache line
	 * at the end since __memcpy_asm() is somewhat aggressive.
	 */
	__inv_buffer((void *)newsrc, len);

	/*
	 * We're done: notify the simulator that all is back to normal,
	 * and re-enable interrupts and pre-emption.
	 */
	kmap_atomic_idx_pop();
	kmap_atomic_idx_pop();
	sim_allow_multiple_caching(0);
	local_irq_restore(flags);
	put_cpu();
}
Exemple #27
0
/*
 * this changes the io permissions bitmap in the current task.
 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
{
	struct thread_struct *t = &current->thread;
	struct tss_struct *tss;
	unsigned int i, max_long, bytes, bytes_updated;

	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
		return -EINVAL;
	if (turn_on && !capable(CAP_SYS_RAWIO))
		return -EPERM;

	if (!l4x_x86_handle_user_port_request(current, from, num))
		return -EPERM;

	/*
	 * If it's the first ioperm() call in this thread's lifetime, set the
	 * IO bitmap up. ioperm() is much less timing critical than clone(),
	 * this is why we delay this operation until now:
	 */
	if (!t->io_bitmap_ptr) {
		unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);

		if (!bitmap)
			return -ENOMEM;

		memset(bitmap, 0xff, IO_BITMAP_BYTES);
		t->io_bitmap_ptr = bitmap;
		set_thread_flag(TIF_IO_BITMAP);
	}

	/*
	 * do it in the per-thread copy and in the TSS ...
	 *
	 * Disable preemption via get_cpu() - we must not switch away
	 * because the ->io_bitmap_max value must match the bitmap
	 * contents:
	 */
	tss = &per_cpu(init_tss, get_cpu());

	if (turn_on)
		bitmap_clear(t->io_bitmap_ptr, from, num);
	else
		bitmap_set(t->io_bitmap_ptr, from, num);

	/*
	 * Search for a (possibly new) maximum. This is simple and stupid,
	 * to keep it obviously correct:
	 */
	max_long = 0;
	for (i = 0; i < IO_BITMAP_LONGS; i++)
		if (t->io_bitmap_ptr[i] != ~0UL)
			max_long = i;

	bytes = (max_long + 1) * sizeof(unsigned long);
	bytes_updated = max(bytes, t->io_bitmap_max);

	t->io_bitmap_max = bytes;

	/* Update the TSS: */
	memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);

	put_cpu();

	return 0;
}
/*
 * Queue up a socket with data pending. If there are idle nfsd
 * processes, wake 'em up.
 *
 */
static void
svc_sock_enqueue(struct svc_sock *svsk)
{
	struct svc_serv	*serv = svsk->sk_server;
	struct svc_pool *pool;
	struct svc_rqst	*rqstp;
	int cpu;

	if (!(svsk->sk_flags &
	      ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
		return;
	if (test_bit(SK_DEAD, &svsk->sk_flags))
		return;

	cpu = get_cpu();
	pool = svc_pool_for_cpu(svsk->sk_server, cpu);
	put_cpu();

	spin_lock_bh(&pool->sp_lock);

	if (!list_empty(&pool->sp_threads) &&
	    !list_empty(&pool->sp_sockets))
		printk(KERN_ERR
			"svc_sock_enqueue: threads and sockets both waiting??\n");

	if (test_bit(SK_DEAD, &svsk->sk_flags)) {
		/* Don't enqueue dead sockets */
		dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
		goto out_unlock;
	}

	/* Mark socket as busy. It will remain in this state until the
	 * server has processed all pending data and put the socket back
	 * on the idle list.  We update SK_BUSY atomically because
	 * it also guards against trying to enqueue the svc_sock twice.
	 */
	if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
		/* Don't enqueue socket while already enqueued */
		dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
		goto out_unlock;
	}
	BUG_ON(svsk->sk_pool != NULL);
	svsk->sk_pool = pool;

	set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
	if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
	     > svc_sock_wspace(svsk))
	    && !test_bit(SK_CLOSE, &svsk->sk_flags)
	    && !test_bit(SK_CONN, &svsk->sk_flags)) {
		/* Don't enqueue while not enough space for reply */
		dprintk("svc: socket %p  no space, %d*2 > %ld, not enqueued\n",
			svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
			svc_sock_wspace(svsk));
		svsk->sk_pool = NULL;
		clear_bit(SK_BUSY, &svsk->sk_flags);
		goto out_unlock;
	}
	clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);


	if (!list_empty(&pool->sp_threads)) {
		rqstp = list_entry(pool->sp_threads.next,
				   struct svc_rqst,
				   rq_list);
		dprintk("svc: socket %p served by daemon %p\n",
			svsk->sk_sk, rqstp);
		svc_thread_dequeue(pool, rqstp);
		if (rqstp->rq_sock)
			printk(KERN_ERR
				"svc_sock_enqueue: server %p, rq_sock=%p!\n",
				rqstp, rqstp->rq_sock);
		rqstp->rq_sock = svsk;
		atomic_inc(&svsk->sk_inuse);
		rqstp->rq_reserved = serv->sv_max_mesg;
		atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
		BUG_ON(svsk->sk_pool != pool);
		wake_up(&rqstp->rq_wait);
	} else {
Exemple #29
0
static int msm_idle_stats_collect(struct file *filp,
				  unsigned int cmd, unsigned long arg)
{
	struct msm_idle_stats_device *stats_dev;
	struct msm_idle_stats *stats;
	int rc;

	stats_dev = (struct msm_idle_stats_device *) filp->private_data;
	stats = &stats_dev->stats;

	rc = mutex_lock_interruptible(&stats_dev->mutex);
	if (rc) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
			pr_info("%s: interrupted while waiting on device "
				"mutex\n", __func__);

		rc = -EINTR;
		goto collect_exit;
	}

	if (atomic_read(&stats_dev->collecting)) {
		pr_err("%s: inconsistent state\n", __func__);
		rc = -EBUSY;
		goto collect_unlock_exit;
	}

	rc = copy_from_user(stats, (void *)arg, sizeof(*stats));
	if (rc) {
		rc = -EFAULT;
		goto collect_unlock_exit;
	}

	if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS ||
			stats->busy_timer > MSM_IDLE_STATS_MAX_TIMER ||
			stats->collection_timer > MSM_IDLE_STATS_MAX_TIMER) {
		rc = -EINVAL;
		goto collect_unlock_exit;
	}

	if (get_cpu() != stats_dev->cpu) {
		put_cpu();
		rc = -EACCES;
		goto collect_unlock_exit;
	}

	stats_dev->collection_expiration =
		ktime_to_us(ktime_get()) + stats->collection_timer;

	atomic_set(&stats_dev->collecting, 1);

	if (stats->busy_timer > 0) {
		rc = hrtimer_start(&stats_dev->timer,
			ktime_set(0, stats->busy_timer * 1000),
			HRTIMER_MODE_REL_PINNED);
		WARN_ON(rc);
	}

	put_cpu();
	if (wait_event_interruptible(stats_dev->wait_q,
			!atomic_read(&stats_dev->collecting))) {
		if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
			pr_info("%s: interrupted while waiting on "
				"collection\n", __func__);

		hrtimer_cancel(&stats_dev->timer);
		atomic_set(&stats_dev->collecting, 0);

		rc = -EINTR;
		goto collect_unlock_exit;
	}

	stats->return_timestamp = ktime_to_us(ktime_get());

	rc = copy_to_user((void *)arg, stats, sizeof(*stats));
	if (rc) {
		rc = -EFAULT;
		goto collect_unlock_exit;
	}

collect_unlock_exit:
	mutex_unlock(&stats_dev->mutex);

collect_exit:
	return rc;
}
Exemple #30
0
void lru_add_drain(void)
{
	lru_add_drain_cpu(get_cpu());
	put_cpu();
}