コード例 #1
0
void __tasklet_hi_schedule(struct tasklet_struct *t)
{
	unsigned long flags;

	local_irq_save(flags);
	t->next = NULL;
	*__this_cpu_read(tasklet_hi_vec.tail) = t;
	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
	raise_softirq_irqoff(HI_SOFTIRQ);
	local_irq_restore(flags);
}
コード例 #2
0
ファイル: fakekey.c プロジェクト: SantoshShilimkar/linux
/*
 * Send a simulated down-arrow to the application.
 */
void speakup_fake_down_arrow(void)
{
	unsigned long flags;

	/* disable keyboard interrupts */
	local_irq_save(flags);
	/* don't change CPU */
	preempt_disable();

	__this_cpu_write(reporting_keystroke, true);
	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
	input_sync(virt_keyboard);
	__this_cpu_write(reporting_keystroke, false);

	/* reenable preemption */
	preempt_enable();
	/* reenable keyboard interrupts */
	local_irq_restore(flags);
}
コード例 #3
0
ファイル: cio.c プロジェクト: 119-org/hi3518-osdrv
/*
 * do_IRQ() handles all normal I/O device IRQ's (the special
 *	    SMP cross-CPU interrupts have their own specific
 *	    handlers).
 *
 */
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	s390_idle_check(regs, S390_lowcore.int_clock,
			S390_lowcore.async_enter_timer);
	irq_enter();
	__this_cpu_write(s390_idle.nohz_delay, 1);
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		/* Serve timer interrupts first. */
		clock_comparator_work();
	/*
	 * Get interrupt information from lowcore
	 */
	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	irb = (struct irb *)&S390_lowcore.irb;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		if (tpi_info->adapter_IO) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			/* Clear pending interrupt condition. */
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		/* Store interrupt response block to lowcore. */
		if (tsch(tpi_info->schid, irb) == 0) {
			/* Keep subchannel information word up to date. */
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			/* Call interrupt handler if there is one. */
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
		}
		spin_unlock(sch->lock);
		/*
		 * Are more interrupts pending?
		 * If so, the tpi instruction will update the lowcore
		 * to hold the info for the next interrupt.
		 * We don't do this for VM because a tpi drops the cpu
		 * out of the sie which costs more cycles than it saves.
		 */
	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
コード例 #4
0
ファイル: softirq.c プロジェクト: Pafcholini/Beta_TW
static void tasklet_action(struct softirq_action *a)
{
	struct tasklet_struct *list;

	local_irq_disable();
	list = __this_cpu_read(tasklet_vec.head);
	__this_cpu_write(tasklet_vec.head, NULL);
	__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
					BUG();
#ifdef CONFIG_SEC_DEBUG
				sec_debug_irq_sched_log(-1, t->func, 3);
				t->func(t->data);
				sec_debug_irq_sched_log(-1, t->func, 4);
#else
				t->func(t->data);
#endif

				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
		t->next = NULL;
		*__this_cpu_read(tasklet_vec.tail) = t;
		__this_cpu_write(tasklet_vec.tail, &(t->next));
		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
		local_irq_enable();
	}
}
コード例 #5
0
ファイル: mce_intel.c プロジェクト: 03199618/linux
unsigned long mce_intel_adjust_timer(unsigned long interval)
{
	int r;

	if (interval < CMCI_POLL_INTERVAL)
		return interval;

	switch (__this_cpu_read(cmci_storm_state)) {
	case CMCI_STORM_ACTIVE:
		/*
		 * We switch back to interrupt mode once the poll timer has
		 * silenced itself. That means no events recorded and the
		 * timer interval is back to our poll interval.
		 */
		__this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
		r = atomic_sub_return(1, &cmci_storm_on_cpus);
		if (r == 0)
			pr_notice("CMCI storm subsided: switching to interrupt mode\n");
		/* FALLTHROUGH */

	case CMCI_STORM_SUBSIDED:
		/*
		 * We wait for all cpus to go back to SUBSIDED
		 * state. When that happens we switch back to
		 * interrupt mode.
		 */
		if (!atomic_read(&cmci_storm_on_cpus)) {
			__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
			cmci_reenable();
			cmci_recheck();
		}
		return CMCI_POLL_INTERVAL;
	default:
		/*
		 * We have shiny weather. Let the poll do whatever it
		 * thinks.
		 */
		return interval;
	}
}
コード例 #6
0
ファイル: core.c プロジェクト: garyvan/openwrt-1.6
/*S:010
 * We approach the Switcher.
 *
 * Remember that each CPU has two pages which are visible to the Guest when it
 * runs on that CPU.  This has to contain the state for that Guest: we copy the
 * state in just before we run the Guest.
 *
 * Each Guest has "changed" flags which indicate what has changed in the Guest
 * since it last ran.  We saw this set in interrupts_and_traps.c and
 * segments.c.
 */
static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
{
	/*
	 * Copying all this data can be quite expensive.  We usually run the
	 * same Guest we ran last time (and that Guest hasn't run anywhere else
	 * meanwhile).  If that's not the case, we pretend everything in the
	 * Guest has changed.
	 */
	if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
		__this_cpu_write(lg_last_cpu, cpu);
		cpu->last_pages = pages;
		cpu->changed = CHANGED_ALL;
	}

	/*
	 * These copies are pretty cheap, so we do them unconditionally: */
	/* Save the current Host top-level page directory.
	 */

#ifdef CONFIG_PAX_PER_CPU_PGD
	pages->state.host_cr3 = read_cr3();
#else
	pages->state.host_cr3 = __pa(current->mm->pgd);
#endif

	/*
	 * Set up the Guest's page tables to see this CPU's pages (and no
	 * other CPU's pages).
	 */
	map_switcher_in_guest(cpu, pages);
	/*
	 * Set up the two "TSS" members which tell the CPU what stack to use
	 * for traps which do directly into the Guest (ie. traps at privilege
	 * level 1).
	 */
	pages->state.guest_tss.sp1 = cpu->esp1;
	pages->state.guest_tss.ss1 = cpu->ss1;

	/* Copy direct-to-Guest trap entries. */
	if (cpu->changed & CHANGED_IDT)
		copy_traps(cpu, pages->state.guest_idt, default_idt_entries);

	/* Copy all GDT entries which the Guest can change. */
	if (cpu->changed & CHANGED_GDT)
		copy_gdt(cpu, pages->state.guest_gdt);
	/* If only the TLS entries have changed, copy them. */
	else if (cpu->changed & CHANGED_GDT_TLS)
		copy_gdt_tls(cpu, pages->state.guest_gdt);

	/* Mark the Guest as unchanged for next time. */
	cpu->changed = 0;
}
コード例 #7
0
/* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event,
		 struct perf_sample_data *data,
		 struct pt_regs *regs)
{
	/* Ensure the watchdog never gets throttled */
	event->hw.interrupts = 0;

	if (__this_cpu_read(watchdog_nmi_touch) == true) {
		__this_cpu_write(watchdog_nmi_touch, false);
		return;
	}

	/* check for a hardlockup
	 * This is done by making sure our timer interrupt
	 * is incrementing.  The timer interrupt should have
	 * fired multiple times before we overflow'd.  If it hasn't
	 * then this is a good indication the cpu is stuck
	 */
	if (is_hardlockup()) {
		int this_cpu = smp_processor_id();

		/* only print hardlockups once */
		if (__this_cpu_read(hard_watchdog_warn) == true)
			return;

		if (hardlockup_panic) {
			trigger_all_cpu_backtrace();
			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
		}
		else
			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);

		__this_cpu_write(hard_watchdog_warn, true);
		return;
	}

	__this_cpu_write(hard_watchdog_warn, false);
	return;
}
コード例 #8
0
static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
	s64 blocked, runnable, offline, stolen;
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

	snap = &__get_cpu_var(xen_runstate_snapshot);

	
	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);

	if (stolen < 0)
		stolen = 0;

	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
	__this_cpu_write(xen_residual_stolen, stolen);
	account_steal_ticks(ticks);

	blocked += __this_cpu_read(xen_residual_blocked);

	if (blocked < 0)
		blocked = 0;

	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
	__this_cpu_write(xen_residual_blocked, blocked);
	account_idle_ticks(ticks);
}
コード例 #9
0
ファイル: softirq.c プロジェクト: qkdang/m462
static void tasklet_hi_action(struct softirq_action *a)
{
	struct tasklet_struct *list;

	local_irq_disable();
	list = __this_cpu_read(tasklet_hi_vec.head);
	__this_cpu_write(tasklet_hi_vec.head, NULL);
	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
					BUG();
				exynos_ss_softirq(ESS_FLAG_SOFTIRQ_HI_TASKLET,
							t->func, irqs_disabled(), ESS_FLAG_IN);
				t->func(t->data);
				exynos_ss_softirq(ESS_FLAG_SOFTIRQ_HI_TASKLET,
							t->func, irqs_disabled(), ESS_FLAG_OUT);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
		t->next = NULL;
		*__this_cpu_read(tasklet_hi_vec.tail) = t;
		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
		__raise_softirq_irqoff(HI_SOFTIRQ);
		local_irq_enable();
	}
}
コード例 #10
0
ファイル: irq-xtensa-mx.c プロジェクト: asmalldev/linux
static void xtensa_mx_irq_unmask(struct irq_data *d)
{
	unsigned int mask = 1u << d->hwirq;

	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
				XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
		set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
					HW_IRQ_MX_BASE), MIENGSET);
	} else {
		mask |= __this_cpu_read(cached_irq_mask);
		__this_cpu_write(cached_irq_mask, mask);
		set_sr(mask, intenable);
	}
}
コード例 #11
0
/*
 * Optimized increment and decrement functions.
 *
 * These are only for a single page and therefore can take a struct page *
 * argument instead of struct zone *. This allows the inclusion of the code
 * generated for page_zone(page) into the optimized functions.
 *
 * No overflow check is necessary and therefore the differential can be
 * incremented or decremented in place which may allow the compilers to
 * generate better code.
 * The increment or decrement is known and therefore one boundary check can
 * be omitted.
 *
 * NOTE: These functions are very performance sensitive. Change only
 * with care.
 *
 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 * However, the code must first determine the differential location in a zone
 * based on the processor number and then inc/dec the counter. There is no
 * guarantee without disabling preemption that the processor will not change
 * in between and therefore the atomicity vs. interrupt cannot be exploited
 * in a useful way here.
 */
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
	s8 v, t;

	v = __this_cpu_inc_return(*p);
	t = __this_cpu_read(pcp->stat_threshold);
	if (unlikely(v > t)) {
		s8 overstep = t >> 1;

		zone_page_state_add(v + overstep, zone, item);
		__this_cpu_write(*p, -overstep);
	}
コード例 #12
0
ファイル: ftrace.c プロジェクト: 03199618/linux
static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
			     struct kprobe_ctlblk *kcb)
{
	/*
	 * Emulate singlestep (and also recover regs->ip)
	 * as if there is a 5byte nop
	 */
	regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
	if (unlikely(p->post_handler)) {
		kcb->kprobe_status = KPROBE_HIT_SSDONE;
		p->post_handler(p, regs, 0);
	}
	__this_cpu_write(current_kprobe, NULL);
	return 1;
}
コード例 #13
0
ファイル: kprobes.c プロジェクト: EMCAntimatter/linux
/*
 * Singlestep is implemented by disabling the current kprobe and setting one
 * on the next instruction, following branches. Two probes are set if the
 * branch is conditional.
 */
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
	__this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc);

	if (p != NULL) {
		struct kprobe *op1, *op2;

		arch_disarm_kprobe(p);

		op1 = this_cpu_ptr(&saved_next_opcode);
		op2 = this_cpu_ptr(&saved_next_opcode2);

		if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
			op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
		} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
コード例 #14
0
ファイル: boot.c プロジェクト: LeeDroid-/bricked-endeavoru
/*
 * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
 * interrupt (except 128, which is used for system calls), and then tells the
 * Linux infrastructure that each interrupt is controlled by our level-based
 * lguest interrupt controller.
 */
static void __init lguest_init_IRQ(void)
{
	unsigned int i;

	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
		if (i != SYSCALL_VECTOR)
			set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
	}

	/*
	 * This call is required to set up for 4k stacks, where we have
	 * separate stacks for hard and soft interrupts.
	 */
	irq_ctx_init(smp_processor_id());
}
コード例 #15
0
void __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct tpi_info *tpi_info;
	struct subchannel *sch;
	struct irb *irb;
	struct pt_regs *old_regs;

	old_regs = set_irq_regs(regs);
	irq_enter();
	__this_cpu_write(s390_idle.nohz_delay, 1);
	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
		
		clock_comparator_work();
	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
	irb = (struct irb *)&S390_lowcore.irb;
	do {
		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
		if (tpi_info->adapter_IO) {
			do_adapter_IO(tpi_info->isc);
			continue;
		}
		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
		if (!sch) {
			
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
			tsch(tpi_info->schid, irb);
			continue;
		}
		spin_lock(sch->lock);
		
		if (tsch(tpi_info->schid, irb) == 0) {
			
			memcpy (&sch->schib.scsw, &irb->scsw,
				sizeof (irb->scsw));
			
			if (sch->driver && sch->driver->irq)
				sch->driver->irq(sch);
			else
				kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		} else
			kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
		spin_unlock(sch->lock);
	} while (MACHINE_IS_LPAR && tpi(NULL) != 0);
	irq_exit();
	set_irq_regs(old_regs);
}
コード例 #16
0
ファイル: irq_work.c プロジェクト: gundal/nobleltehk
static void __irq_work_run(void)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;


	/*
	 * Reset the "raised" state right before we check the list because
	 * an NMI may enqueue after we find the list empty from the runner.
	 */
	__this_cpu_write(irq_work_raised, 0);
	barrier();

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty_relaxed(this_list))
		return;

	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
コード例 #17
0
/*
 * For use when we know that interrupts are disabled,
 * or when we know that preemption is disabled and that
 * particular counter cannot be updated from interrupt context.
 */
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
			   long delta)
{
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
	long x;
	long t;

	x = delta + __this_cpu_read(*p);

	t = __this_cpu_read(pcp->stat_threshold);

	if (unlikely(x > t || x < -t)) {
		zone_page_state_add(x, zone, item);
		x = 0;
	}
	__this_cpu_write(*p, x);
}
コード例 #18
0
ファイル: vmstat.c プロジェクト: forgivemyheart/linux
void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
				long delta)
{
	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
	s8 __percpu *p = pcp->vm_node_stat_diff + item;
	long x;
	long t;

	x = delta + __this_cpu_read(*p);

	t = __this_cpu_read(pcp->stat_threshold);

	if (unlikely(x > t || x < -t)) {
		node_page_state_add(x, pgdat, item);
		x = 0;
	}
	__this_cpu_write(*p, x);
}
コード例 #19
0
ファイル: irq.c プロジェクト: EMFPGA/linux_media
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	struct irq_desc * desc;
	/* high bit used in ret_from_ code  */
	unsigned vector = ~regs->orig_ax;

	/*
	 * NB: Unlike exception entries, IRQ entries do not reliably
	 * handle context tracking in the low-level entry code.  This is
	 * because syscall entries execute briefly with IRQs on before
	 * updating context tracking state, so we can take an IRQ from
	 * kernel mode with CONTEXT_USER.  The low-level entry code only
	 * updates the context if we came from user mode, so we won't
	 * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
	 * code is cleaned up enough that we can cleanly defer enabling
	 * IRQs.
	 */

	entering_irq();

	/* entering_irq() tells RCU that we're not quiescent.  Check it. */
	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");

	desc = __this_cpu_read(vector_irq[vector]);

	if (!handle_irq(desc, regs)) {
		ack_APIC_irq();

		if (desc != VECTOR_RETRIGGERED) {
			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
					     __func__, smp_processor_id(),
					     vector);
		} else {
			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
		}
	}

	exiting_irq();

	set_irq_regs(old_regs);
	return 1;
}
コード例 #20
0
ファイル: irq.c プロジェクト: tolimit/linux-comment-cn
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{
	/* 将栈顶地址保存到全局变量__irq_regs中,old_regs用于保存现在的__irq_regs值 */
	struct pt_regs *old_regs = set_irq_regs(regs);

	/* 获取中断向量号,因为中断向量号是以取反方式保存的,这里再次取反 */
	unsigned vector = ~regs->orig_ax;
	/* 中断向量号 */
	unsigned irq;

	/* 硬中断计数器增加,硬中断计数器保存在preempt_count */
	irq_enter();
	/* 这里开始禁止调度,因为preempt_count不为0 */

	/* 退出idle进程(如果当前进程是idle进程的情况下) */
	exit_idle();

	/* 根据中断向量号获取中断号 */
	irq = __this_cpu_read(vector_irq[vector]);

	/* 主要函数是handle_irq,进行中断服务例程的处理 */
	if (!handle_irq(irq, regs)) {
		/* EIO模式的应答 */
		ack_APIC_irq();

		/* 该中断号并没有发生过多次触发 */
		if (irq != VECTOR_RETRIGGERED) {
			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
					     __func__, smp_processor_id(),
					     vector, irq);
		} else {
			/* 将此中断向量号对应的vector_irq设置为未定义 */
			__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
		}
	}
	/* 硬中断计数器减少,并检查是否有软中断需要执行,如果有,会设置CPU在开中断的状态下执行软中断处理 */
	irq_exit();
	/* 这里开始允许调度 */

	/* 恢复原来的__irq_regs值 */
	set_irq_regs(old_regs);
	return 1;
}
コード例 #21
0
ファイル: smpboot.c プロジェクト: fhgwright/bb-linux
/*
 * Activate a secondary processor.  Very minimal; don't add anything
 * to this path without knowing what you're doing, since SMP booting
 * is pretty fragile.
 */
static void start_secondary(void)
{
	int cpuid;

	preempt_disable();

	cpuid = smp_processor_id();

	/* Set our thread pointer appropriately. */
	set_my_cpu_offset(__per_cpu_offset[cpuid]);

	/*
	 * In large machines even this will slow us down, since we
	 * will be contending for for the printk spinlock.
	 */
	/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */

	/* Initialize the current asid for our first page table. */
	__this_cpu_write(current_asid, min_asid);

	/* Set up this thread as another owner of the init_mm */
	mmgrab(&init_mm);
	current->active_mm = &init_mm;
	if (current->mm)
		BUG();
	enter_lazy_tlb(&init_mm, current);

	/* Allow hypervisor messages to be received */
	init_messaging();
	local_irq_enable();

	/* Indicate that we're ready to come up. */
	/* Must not do this before we're ready to receive messages */
	if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
		pr_warn("CPU#%d already started!\n", cpuid);
		for (;;)
			local_irq_enable();
	}

	smp_nap();
}
コード例 #22
0
ファイル: kprobes-ftrace.c プロジェクト: CCNITSilchar/linux
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
			   struct ftrace_ops *ops, struct pt_regs *regs)
{
	struct kprobe *p;
	struct kprobe_ctlblk *kcb;

	preempt_disable();

	p = get_kprobe((kprobe_opcode_t *)nip);
	if (unlikely(!p) || kprobe_disabled(p))
		goto end;

	kcb = get_kprobe_ctlblk();
	if (kprobe_running()) {
		kprobes_inc_nmissed_count(p);
	} else {
		unsigned long orig_nip = regs->nip;

		/*
		 * On powerpc, NIP is *before* this instruction for the
		 * pre handler
		 */
		regs->nip -= MCOUNT_INSN_SIZE;

		__this_cpu_write(current_kprobe, p);
		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
		if (!p->pre_handler || !p->pre_handler(p, regs))
			__skip_singlestep(p, regs, kcb, orig_nip);
		else {
			/*
			 * If pre_handler returns !0, it sets regs->nip and
			 * resets current kprobe. In this case, we should not
			 * re-enable preemption.
			 */
			return;
		}
	}
end:
	preempt_enable_no_resched();
}
コード例 #23
0
static void takeover_tasklets(unsigned int cpu)
{
	/* CPU is dead, so no lock needed. */
	local_irq_disable();

	/* Find end, append list for that CPU. */
	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
		per_cpu(tasklet_vec, cpu).head = NULL;
		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
	}
	raise_softirq_irqoff(TASKLET_SOFTIRQ);

	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
		per_cpu(tasklet_hi_vec, cpu).head = NULL;
		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
	}
	raise_softirq_irqoff(HI_SOFTIRQ);

	local_irq_enable();
}
コード例 #24
0
ファイル: watchdog.c プロジェクト: hexianren/linux-3.7-Read
/*
 * The watchdog thread function - touches the timestamp.
 *
 * It only runs once every get_sample_period() seconds (4 seconds by
 * default) to reset the softlockup timestamp. If this gets delayed
 * for more than 2*watchdog_thresh seconds then the debug-printout
 * triggers in watchdog_timer_fn().
 */
static void watchdog(unsigned int cpu)
{
	__this_cpu_write(soft_lockup_hrtimer_cnt,
			 __this_cpu_read(hrtimer_interrupts));
	__touch_watchdog();
}
コード例 #25
0
/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
	__this_cpu_write(watchdog_touch_ts, get_timestamp());
}
コード例 #26
0
ファイル: watchdog.c プロジェクト: hexianren/linux-3.7-Read
/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
	int this_cpu = smp_processor_id();

	__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
}
コード例 #27
0
static void __kprobes set_current_kprobe(struct kprobe *p)
{
	__this_cpu_write(current_kprobe, p);
}
コード例 #28
0
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
	kcb->kprobe_status = kcb->prev_kprobe.status;
}
コード例 #29
0
ファイル: watchdog.c プロジェクト: hexianren/linux-3.7-Read
void touch_softlockup_watchdog(void)
{
	__this_cpu_write(watchdog_touch_ts, 0);
}
コード例 #30
0
ファイル: watchdog.c プロジェクト: hexianren/linux-3.7-Read
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
	struct pt_regs *regs = get_irq_regs();
	int duration;

	/* kick the hardlockup detector */
	watchdog_interrupt_count();

	/* kick the softlockup detector */
	wake_up_process(__this_cpu_read(softlockup_watchdog));

	/* .. and repeat */
	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));

	if (touch_ts == 0) {
		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
			/*
			 * If the time stamp was touched atomically
			 * make sure the scheduler tick is up to date.
			 */
			__this_cpu_write(softlockup_touch_sync, false);
			sched_clock_tick();
		}

		/* Clear the guest paused flag on watchdog reset */
		kvm_check_and_clear_guest_paused();
		__touch_watchdog();
		return HRTIMER_RESTART;
	}

	/* check for a softlockup
	 * This is done by making sure a high priority task is
	 * being scheduled.  The task touches the watchdog to
	 * indicate it is getting cpu time.  If it hasn't then
	 * this is a good indication some task is hogging the cpu
	 */
	duration = is_softlockup(touch_ts);
	if (unlikely(duration)) {
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like a soft lockup, check to see if the host
		 * stopped the vm before we issue the warning
		 */
		if (kvm_check_and_clear_guest_paused())
			return HRTIMER_RESTART;

		/* only warn once */
		if (__this_cpu_read(soft_watchdog_warn) == true)
			return HRTIMER_RESTART;

		printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
			smp_processor_id(), duration,
			current->comm, task_pid_nr(current));
		print_modules();
		print_irqtrace_events(current);
		if (regs)
			show_regs(regs);
		else
			dump_stack();

		if (softlockup_panic)
			panic("softlockup: hung tasks");
		__this_cpu_write(soft_watchdog_warn, true);
	} else
		__this_cpu_write(soft_watchdog_warn, false);

	return HRTIMER_RESTART;
}