示例#1
0
文件: irq.c 项目: fgeraci/cs518-sched
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;

        cpu = smp_processor_id();
        irq_enter(cpu);
	kstat.irqs[cpu][irq]++;

	action = irq_action[irq];
        if (action) {
                if (!(action->flags & SA_INTERRUPT))
                        __sti();
                action = irq_action[irq];
                do_random = 0;
                do {
                        do_random |= action->flags;
                        action->handler(irq, action->dev_id, regs);
                        action = action->next;
                } while (action);
                if (do_random & SA_SAMPLE_RANDOM)
                        add_interrupt_randomness(irq);
                __cli();
        }
        irq_exit(cpu);

	if (softirq_pending(cpu))
                do_softirq();

        /* unmasking and bottom half handling is done magically for us. */
}
示例#2
0
/*
 * This should really return information about whether
 * we should do bottom half handling etc. Right now we
 * end up _always_ checking the bottom half, which is a
 * waste of time and is not what some drivers would
 * prefer.
 */
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{
	int status;
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);

	status = 1;	/* Force the "do bottom halves" bit */

	if (!(action->flags & SA_INTERRUPT))
		__sti();

	do {
		status |= action->flags;
		action->handler(irq, action->dev_id, regs);
		action = action->next;
	} while (action);
	if (status & SA_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	__cli();

	irq_exit(cpu, irq);

	return status;
}
示例#3
0
文件: smp.c 项目: fgeraci/cs518-sched
void __init smp_callin(void)
{
	int cpu = current->processor;
	
        smp_store_cpu_info(cpu);
	set_dec(tb_ticks_per_jiffy);
	cpu_callin_map[cpu] = 1;

	smp_ops->setup_cpu(cpu);

	init_idle();

	/*
	 * This cpu is now "online".  Only set them online
	 * before they enter the loop below since write access
	 * to the below variable is _not_ guaranteed to be
	 * atomic.
	 *   -- Cort <*****@*****.**>
	 */
	cpu_online_map |= 1UL << smp_processor_id();
	
	while(!smp_commenced)
		barrier();

	/* see smp_commence for more info */
	if (!smp_tb_synchronized && smp_num_cpus == 2) {
		smp_software_tb_sync(cpu);
	}
	__sti();
}
void do_IRQ (int irq /* gets currently not passed: , struct pt_regs * regs*/)
/*
 * Note: the code in "interrupt-entry.s" that calls this function
 * does leave IRQs disabled until return from this function.
 * This function gets entered with the IRQ still disabled. You
 * may enable it here.
 */
{
  struct irqdesc * desc;
  struct irqaction * action;

  if (irq >= NR_IRQS) {
    // spurious intr
    return;
  }
  desc = irq_desc + irq;
  if (desc->mask_ack) {
    desc->mask_ack(irq);
    action = desc->action;
    if (action) {
      if (desc->nomask) {
        desc->unmask(irq);
      }
      __sti(); // enable IRQs
      do {
        action->handler(irq, action->dev_id, 0);
        action = action->next;
      } while (action);
      __cli(); // disable IRQs so that unmask() can be called safely
      if (!desc->nomask && desc->enabled) {
        desc->unmask(irq);
      }
    }
  }
}
示例#5
0
文件: irq.c 项目: TKr/Wive-ng-rt8186
static int inline handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
#endif	
{
	int status;
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);

	status = 1;	/* Force the "do bottom halves" bit */
#if 0
	if (!(action->flags & SA_INTERRUPT))
		__sti();
#endif
//sc_yang => open shared IRQ
#if 1
//#ifndef JACKSON_NET_WORK	
	do {
		status |= action->flags;
		action->handler(irq, action->dev_id, regs);
		action = action->next;
	} while (action);
	if (status & SA_SAMPLE_RANDOM)
	   	add_interrupt_randomness(irq);

#else
	action->handler(irq, action->dev_id, regs);
#endif	
	//__cli();

	irq_exit(cpu, irq);

	return status;
}
示例#6
0
/*
 * do_IRQ handles IRQ's that have been installed without the
 * SA_INTERRUPT flag: it uses the full signal-handling return
 * and runs with other interrupts enabled. All relatively slow
 * IRQ's should use this format: notably the keyboard/timer
 * routines.
 */
static void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqaction *action;
	int do_random, cpu;

	cpu = smp_processor_id();
	irq_enter(cpu);
	kstat.irqs[cpu][irq]++;

	mask_irq(irq);  
	action = *(irq + irq_action);
	if (action) {
		if (!(action->flags & SA_INTERRUPT))
			__sti();
		action = *(irq + irq_action);
		do_random = 0;
        	do {
			do_random |= action->flags;
			action->handler(irq, action->dev_id, regs);
			action = action->next;
        	} while (action);
		if (do_random & SA_SAMPLE_RANDOM)
			add_interrupt_randomness(irq);
		__cli();
	} else {
		printk("do_IRQ: Unregistered IRQ (0x%X) occured\n", irq);
	}
	unmask_irq(irq);
	irq_exit(cpu);

	/* unmasking and bottom half handling is done magically for us. */
}
示例#7
0
void __global_sti(void)
{
	int cpu = smp_processor_id();

	if (!local_irq_count(cpu))
		release_irqlock(cpu);
	__sti();
}
示例#8
0
void smp_send_stop(void)
{
	smp_call_function(stop_this_cpu, NULL, 1, 0);
	smp_num_cpus = 1;

	__cli();
	disable_local_APIC();
	__sti();
}
示例#9
0
static void stop_this_cpu(void *dummy)
{
	/*
	 * Remove this CPU:
	 */
	clear_bit(smp_processor_id(), &cpu_online_map);
	/* May need to service _machine_restart IPI */
	__sti();
	/* XXXKW wait if available? */
	for (;;);
}
示例#10
0
void __global_sti(void)
{
	int cpu;

	preempt_disable();
	cpu = smp_processor_id();
	if (!local_irq_count(cpu))
		release_irqlock(cpu);
	preempt_enable();
	__sti();
}
示例#11
0
文件: kernel.c 项目: gandro/rumprun
/*
 * INITIAL C ENTRY POINT.
 */
void _minios_start_kernel(start_info_t *si)
{

    bmk_printf_init(minios_putc, NULL);
    bmk_core_init(STACK_SIZE_PAGE_ORDER, PAGE_SHIFT);

    arch_init(si);
    trap_init();
    bmk_sched_init();

    /* print out some useful information  */
    minios_printk("  start_info: %p(VA)\n", si);
    minios_printk("    nr_pages: 0x%lx\n", si->nr_pages);
    minios_printk("  shared_inf: 0x%08lx(MA)\n", si->shared_info);
    minios_printk("     pt_base: %p(VA)\n", (void *)si->pt_base); 
    minios_printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames);
    minios_printk("    mfn_list: %p(VA)\n", (void *)si->mfn_list); 
    minios_printk("   mod_start: 0x%lx(VA)\n", si->mod_start);
    minios_printk("     mod_len: %lu\n", si->mod_len); 
    minios_printk("       flags: 0x%x\n", (unsigned int)si->flags);
    minios_printk("    cmd_line: %s\n",  
           si->cmd_line ? (const char *)si->cmd_line : "NULL");

    /* Set up events. */
    init_events();
    
    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
    __sti();

    arch_print_info();

    setup_xen_features();

    /* Init memory management. */
    init_mm();

    /* Init time and timers. */
    init_time();

    /* Init the console driver. */
    init_console();

    /* Init grant tables */
    init_gnttab();
 
    /* Init XenBus */
    init_xenbus();

    /* Init scheduler. */
    bmk_sched_startmain(_app_main, &start_info);
    bmk_platform_halt("unreachable");
}
示例#12
0
void setup_APIC_timer(void * data)
{
	unsigned int clocks = (unsigned int) data, slice, t0, t1;
	unsigned long flags;
	int delta;

	__save_flags(flags);
	__sti();
	/*
	 * ok, Intel has some smart code in their APIC that knows
	 * if a CPU was in 'hlt' lowpower mode, and this increases
	 * its APIC arbitration priority. To avoid the external timer
	 * IRQ APIC event being in synchron with the APIC clock we
	 * introduce an interrupt skew to spread out timer events.
	 *
	 * The number of slices within a 'big' timeslice is smp_num_cpus+1
	 */

	slice = clocks / (smp_num_cpus+1);
	printk("cpu: %d, clocks: %d, slice: %d\n",
		smp_processor_id(), clocks, slice);

	/*
	 * Wait for IRQ0's slice:
	 */
	wait_8254_wraparound();

	__setup_APIC_LVTT(clocks);

	t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;
	/* Wait till TMCCT gets reloaded from TMICT... */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta >= 0);
	/* Now wait for our slice for real. */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta < 0);

	__setup_APIC_LVTT(clocks);

	printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
			smp_processor_id(), t0, t1, delta, slice, clocks);

	__restore_flags(flags);
}
示例#13
0
void __init setup_APIC_clocks (void)
{
	printk("Using local APIC timer interrupts.\n");
	using_apic_timer = 1;

	__cli();

	calibration_result = calibrate_APIC_clock();
	/*
	 * Now set up the timer for real.
	 */
	setup_APIC_timer((void *)calibration_result);

	__sti();

	/* and update all other cpus */
	smp_call_function(setup_APIC_timer, (void *)calibration_result, 1, 1);
}
示例#14
0
void __global_restore_flags(unsigned long flags)
{
	switch (flags) {
		case 0:
			__global_cli();
			break;
		case 1:
			__global_sti();
			break;
		case 2:
			__cli();
			break;
		case 3:
			__sti();
			break;
		default:
			printk("global_restore_flags: %08lx\n", flags);
	}
}
示例#15
0
/*
 * INITIAL C ENTRY POINT.
 */
void start_kernel(start_info_t *si)
{
    static char hello[] = "Bootstrapping...\n";

    (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(hello), hello);

    arch_init(si);

    trap_init();

    /* print out some useful information  */
    printk("Mirage OS!\n");
    printk("  start_info: %p(VA)\n", si);
    printk("    nr_pages: 0x%lx\n", si->nr_pages);
    printk("  shared_inf: 0x%08lx(MA)\n", si->shared_info);
    printk("     pt_base: %p(VA)\n", (void *)si->pt_base); 
    printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames);
    printk("    mfn_list: %p(VA)\n", (void *)si->mfn_list); 
    printk("   mod_start: 0x%lx(VA)\n", si->mod_start);
    printk("     mod_len: %lu\n", si->mod_len); 
    printk("       flags: 0x%x\n", (unsigned int)si->flags);
    printk("    cmd_line: %s\n",  
           si->cmd_line ? (const char *)si->cmd_line : "NULL");

    /* Set up events. */
    init_events();
    
    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
    __sti();

    arch_print_info();

    setup_xen_features();

    /* Init memory management. */
    init_mm();

    /* Init time and timers. */
    init_time();

    /* Call (possibly overridden) app_main() */
    app_main(&start_info);
}
示例#16
0
void __init smp_callin(void)
{
	int cpu = current->processor;
	
        smp_store_cpu_info(cpu);
	set_dec(paca[cpu].default_decr);
	cpu_callin_map[cpu] = 1;

	ppc_md.smp_setup_cpu(cpu);

	init_idle();

	set_bit(smp_processor_id(), &cpu_online_map);
	
	while(!smp_commenced) {
		barrier();
	}
	__sti();
}
示例#17
0
文件: smp.c 项目: dduval/kernel-rhel3
void __init smp_callin(void)
{
	int cpu = current->processor;
	
        smp_store_cpu_info(cpu);
	smp_ops->setup_cpu(cpu);
	set_dec(tb_ticks_per_jiffy);
	cpu_online_map |= 1UL << cpu;
	mb();
	cpu_callin_map[cpu] = 1;
	
	while(!smp_commenced)
		barrier();

	/* see smp_commence for more info */
	if (!smp_tb_synchronized && smp_num_cpus == 2) {
		smp_software_tb_sync(cpu);
	}
	__sti();
}
示例#18
0
void __init setup_APIC_clocks (void)
{
	/* Disabled by DMI scan or kernel option? */
	if (dont_use_local_apic_timer)
		return;

	printk("Using local APIC timer interrupts.\n");
	using_apic_timer = 1;

	__cli();

	calibration_result = calibrate_APIC_clock();
	/*
	 * Now set up the timer for real.
	 */
	setup_APIC_timer((void *)(u64)calibration_result);

	__sti();

	/* and update all other cpus */
	smp_call_function(setup_APIC_timer, (void *)(u64)calibration_result, 1, 1);
}
示例#19
0
static inline void wait_on_irq(int cpu)
{
	int count = MAXCOUNT;

	for (;;) {

		/*
		 * Wait until all interrupts are gone. Wait
		 * for bottom half handlers unless we're
		 * already executing in one..
		 */
		if (!irqs_running())
			if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
				break;

		/* Duh, we have to loop. Release the lock to avoid deadlocks */
		spin_unlock(&global_irq_lock);

		for (;;) {
			if (!--count) {
				printk("Count spun out.  Huh?\n");
				count = ~0;
			}
			__sti();
			SYNC_OTHER_CORES(cpu);
			__cli();
			if (irqs_running())
				continue;
			if (spin_is_locked(&global_irq_lock))
				continue;
			if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
				continue;
			if (spin_trylock(&global_irq_lock))
				break;
		}
	}
}
示例#20
0
asmlinkage void i8259_do_irq(int irq, struct pt_regs *regs)
{
	struct irqaction *action;
	int do_random, cpu;

	cpu = smp_processor_id();
	hardirq_enter(cpu);

	if (irq >= 16)
		goto out;

	i8259_mask_and_ack_irq(irq);

	kstat.irqs[cpu][irq]++;

	action = *(irq + irq_action);
	if (!action)
		goto out;

	if (!(action->flags & SA_INTERRUPT))
		__sti();
	action = *(irq + irq_action);
	do_random = 0;
       	do {
		do_random |= action->flags;
		action->handler(irq, action->dev_id, regs);
		action = action->next;
       	} while (action);
	if (do_random & SA_SAMPLE_RANDOM)
		add_interrupt_randomness(irq);
	__cli();
	unmask_irq (irq);

out:
	hardirq_exit(cpu);
}
示例#21
0
文件: fault.c 项目: TitaniumBoy/lin
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * error_code:
 *             ****0004       Protection           ->  Write-Protection  (suprression)
 *             ****0010       Segment translation  ->  Not present       (nullification)
 *             ****0011       Page translation     ->  Not present       (nullification)
 *             ****003B       Region third exception ->  Not present       (nullification)
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long address;
        unsigned long fixup;
        int write;
	int si_code = SEGV_MAPERR;
	int kernel_address = 0;

        tsk = current;
        mm = tsk->mm;
	
	/* 
         * Check for low-address protection.  This needs to be treated
	 * as a special case because the translation exception code 
	 * field is not guaranteed to contain valid data in this case.
	 */
	if ((error_code & 0xff) == 4 && !(S390_lowcore.trans_exc_code & 4)) {

		/* Low-address protection hit in kernel mode means 
		   NULL pointer write access in kernel mode.  */
 		if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
			address = 0;
			kernel_address = 1;
			goto no_context;
		}

		/* Low-address protection hit in user mode 'cannot happen'.  */
		die ("Low-address protection", regs, error_code);
        	do_exit(SIGKILL);
	}

        /* 
         * get the failing address 
         * more specific the segment and page table portion of 
         * the address 
         */

        address = S390_lowcore.trans_exc_code&-4096L;


	/*
	 * Check which address space the address belongs to
	 */
	switch (S390_lowcore.trans_exc_code & 3)
	{
	case 0: /* Primary Segment Table Descriptor */
		kernel_address = 1;
		goto no_context;

	case 1: /* STD determined via access register */
		if (S390_lowcore.exc_access_id == 0)
		{
			kernel_address = 1;
			goto no_context;
		}
		if (regs && S390_lowcore.exc_access_id < NUM_ACRS)
		{
			if (regs->acrs[S390_lowcore.exc_access_id] == 0)
			{
				kernel_address = 1;
				goto no_context;
			}
			if (regs->acrs[S390_lowcore.exc_access_id] == 1)
			{
				/* user space address */
				break;
			}
		}
		die("page fault via unknown access register", regs, error_code);
        	do_exit(SIGKILL);
		break;

	case 2: /* Secondary Segment Table Descriptor */
	case 3: /* Home Segment Table Descriptor */
		/* user space address */
		break;
	}

	/*
	 * Check whether we have a user MM in the first place.
	 */
        if (in_interrupt() || !mm || !(regs->psw.mask & _PSW_IO_MASK_BIT))
                goto no_context;

	/*
	 * When we get here, the fault happened in the current
	 * task's user address space, so we can switch on the
	 * interrupts again and then search the VMAs
	 */

	__sti();

        down_read(&mm->mmap_sem);

        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
        if (vma->vm_start <= address) 
                goto good_area;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                goto bad_area;
        if (expand_stack(vma, address))
                goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
        write = 0;
	si_code = SEGV_ACCERR;

        switch (error_code & 0xFF) {
                case 0x04:                                /* write, present*/
                        write = 1;
                        break;
                case 0x10:                                   /* not present*/
                case 0x11:                                   /* not present*/
                case 0x3B:                                   /* not present*/
                        if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                                goto bad_area;
                        break;
                default:
                       printk("code should be 4, 10 or 11 (%lX) \n",error_code&0xFF);  
                       goto bad_area;
        }

 survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	switch (handle_mm_fault(mm, vma, address, write)) {
	case 1:
		tsk->min_flt++;
		break;
	case 2:
		tsk->maj_flt++;
		break;
	case 0:
		goto do_sigbus;
	default:
		goto out_of_memory;
	}

        up_read(&mm->mmap_sem);
        return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
        up_read(&mm->mmap_sem);

        /* User mode accesses just cause a SIGSEGV */
        if (regs->psw.mask & PSW_PROBLEM_STATE) {
                tsk->thread.prot_addr = address;
                tsk->thread.trap_no = error_code;
#ifndef CONFIG_SYSCTL
#ifdef CONFIG_PROCESS_DEBUG
                printk("User process fault: interruption code 0x%lX\n",error_code);
                printk("failing address: %lX\n",address);
		show_regs(regs);
#endif
#else
		if (sysctl_userprocess_debug) {
			printk("User process fault: interruption code 0x%lX\n",
			       error_code);
			printk("failing address: %lX\n", address);
			show_regs(regs);
		}
#endif

		force_sigsegv(tsk, si_code, (void *)address);
                return;
	}

no_context:
        /* Are we prepared to handle this kernel fault?  */
        if ((fixup = search_exception_table(regs->psw.addr)) != 0) {
                regs->psw.addr = fixup;
                return;
        }

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */

        if (kernel_address)
                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
        	       " at virtual kernel address %016lx\n", address);
        else
                printk(KERN_ALERT "Unable to handle kernel paging request"
		       " at virtual user address %016lx\n", address);

        die("Oops", regs, error_code);
        do_exit(SIGKILL);


/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
*/
out_of_memory:
	up_read(&mm->mmap_sem);
	if (tsk->pid == 1) {
		tsk->policy |= SCHED_YIELD;
		schedule();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	printk("VM: killing process %s\n", tsk->comm);
	if (regs->psw.mask & PSW_PROBLEM_STATE)
		do_exit(SIGKILL);
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
        tsk->thread.prot_addr = address;
        tsk->thread.trap_no = error_code;
	force_sig(SIGBUS, tsk);

	/* Kernel mode? Handle exceptions or die */
	if (!(regs->psw.mask & PSW_PROBLEM_STATE))
		goto no_context;
}
示例#22
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * error_code:
 *   04       Protection           ->  Write-Protection  (suprression)
 *   10       Segment translation  ->  Not present       (nullification)
 *   11       Page translation     ->  Not present       (nullification)
 *   3b       Region third trans.  ->  Not present       (nullification)
 */
extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
{
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long address;
	int user_address;
        unsigned long fixup;
	int si_code = SEGV_MAPERR;

        tsk = current;
        mm = tsk->mm;
	
	/* 
         * Check for low-address protection.  This needs to be treated
	 * as a special case because the translation exception code 
	 * field is not guaranteed to contain valid data in this case.
	 */
	if (error_code == 4 && !(S390_lowcore.trans_exc_code & 4)) {

		/* Low-address protection hit in kernel mode means 
		   NULL pointer write access in kernel mode.  */
 		if (!(regs->psw.mask & PSW_PROBLEM_STATE)) {
			address = 0;
			user_address = 0;
			goto no_context;
		}

		/* Low-address protection hit in user mode 'cannot happen'.  */
		die ("Low-address protection", regs, error_code);
        	do_exit(SIGKILL);
	}

        /* 
         * get the failing address 
         * more specific the segment and page table portion of 
         * the address 
         */
        address = S390_lowcore.trans_exc_code & -4096L;
	user_address = check_user_space(regs, error_code);

	/*
	 * Verify that the fault happened in user space, that
	 * we are not in an interrupt and that there is a 
	 * user context.
	 */
        if (user_address == 0 || in_interrupt() || !mm)
                goto no_context;

	/*
	 * When we get here, the fault happened in the current
	 * task's user address space, so we can switch on the
	 * interrupts again and then search the VMAs
	 */
	__sti();

        down_read(&mm->mmap_sem);

        vma = find_vma(mm, address);
        if (!vma)
                goto bad_area;
        if (vma->vm_start <= address) 
                goto good_area;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                goto bad_area;
        if (expand_stack(vma, address))
                goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	si_code = SEGV_ACCERR;
	if (error_code != 4) {
		/* page not present, check vm flags */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
			goto bad_area;
	} else {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	}

survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	switch (handle_mm_fault(mm, vma, address, error_code == 4)) {
	case 1:
		tsk->min_flt++;
		break;
	case 2:
		tsk->maj_flt++;
		break;
	case 0:
		goto do_sigbus;
	default:
		goto out_of_memory;
	}

        up_read(&mm->mmap_sem);
        return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
        up_read(&mm->mmap_sem);

        /* User mode accesses just cause a SIGSEGV */
        if (regs->psw.mask & PSW_PROBLEM_STATE) {
                tsk->thread.prot_addr = address;
                tsk->thread.trap_no = error_code;
		force_sigsegv(regs, error_code, si_code, address);
                return;
	}

no_context:
        /* Are we prepared to handle this kernel fault?  */
        if ((fixup = search_exception_table(regs->psw.addr)) != 0) {
                regs->psw.addr = fixup;
                return;
        }

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */
        if (user_address == 0)
                printk(KERN_ALERT "Unable to handle kernel pointer dereference"
        	       " at virtual kernel address %016lx\n", address);
        else
                printk(KERN_ALERT "Unable to handle kernel paging request"
		       " at virtual user address %016lx\n", address);

        die("Oops", regs, error_code);
        do_exit(SIGKILL);


/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
*/
out_of_memory:
	if (tsk->pid == 1) {
		yield();
		goto survive;
	}
	up_read(&mm->mmap_sem);
	printk("VM: killing process %s\n", tsk->comm);
	if (regs->psw.mask & PSW_PROBLEM_STATE)
		do_exit(SIGKILL);
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
        tsk->thread.prot_addr = address;
        tsk->thread.trap_no = error_code;
	force_sig(SIGBUS, tsk);

	/* Kernel mode? Handle exceptions or die */
	if (!(regs->psw.mask & PSW_PROBLEM_STATE))
		goto no_context;
}
示例#23
0
/*
 * do_IRQ handles all normal device IRQ's
 */
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
	struct irqdesc * desc;
	struct irqaction * action;
	int cpu;

	irq = fixup_irq(irq);

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (irq >= NR_IRQS)
		goto bad_irq;

	desc = irq_desc + irq;

	spin_lock(&irq_controller_lock);
	desc->mask_ack(irq);
	spin_unlock(&irq_controller_lock);

	cpu = smp_processor_id();
	irq_enter(cpu, irq);
	kstat.irqs[cpu][irq]++;
	desc->triggered = 1;

	/* Return with this interrupt masked if no action */
	action = desc->action;

	if (action) {
		int status = 0;

		if (desc->nomask) {
			spin_lock(&irq_controller_lock);
			desc->unmask(irq);
			spin_unlock(&irq_controller_lock);
		}

		if (!(action->flags & SA_INTERRUPT))
			__sti();

		do {
			status |= action->flags;
			action->handler(irq, action->dev_id, regs);
			action = action->next;
		} while (action);

		if (status & SA_SAMPLE_RANDOM)
			add_interrupt_randomness(irq);
		__cli();

		if (!desc->nomask && desc->enabled) {
			spin_lock(&irq_controller_lock);
			desc->unmask(irq);
			spin_unlock(&irq_controller_lock);
		}
	}

	/*
	 * Debug measure - hopefully we can continue if an
	 * IRQ lockup problem occurs...
	 */
	check_irq_lock(desc, irq, regs);

	irq_exit(cpu, irq);

	if (softirq_active(cpu) & softirq_mask(cpu))
		do_softirq();
	return;

bad_irq:
	irq_err_count += 1;
	printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
	return;
}
示例#24
0
/*
 * INITIAL C ENTRY POINT.
 */
void start_kernel(start_info_t *si)
{
    static char hello[] = "Bootstrapping...\n";

    (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(hello), hello);

    setup_xen_features();

    pvh_early_init();

    arch_init(si);

    trap_init();

    /* print out some useful information  */
    printk("Xen Minimal OS!\n");
    printk("  start_info: %p(VA)\n", si);
    printk("    nr_pages: 0x%lx\n", si->nr_pages);
    printk("  shared_inf: 0x%08lx(MA)\n", si->shared_info);
    printk("     pt_base: %p(VA)\n", (void *)si->pt_base); 
    printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames);
    printk("    mfn_list: %p(VA)\n", (void *)si->mfn_list); 
    printk("   mod_start: 0x%lx(VA)\n", si->mod_start);
    printk("     mod_len: %lu\n", si->mod_len); 
    printk("       flags: 0x%x\n", (unsigned int)si->flags);
    printk("    cmd_line: %s\n",  
           si->cmd_line ? (const char *)si->cmd_line : "NULL");

    /* Set up events. */
    init_events();
    
    /* ENABLE EVENT DELIVERY. This is disabled at start of day. */
    __sti();

    arch_print_info();

    /* Init memory management. */
    init_mm();

    /* Init time and timers. */
    init_time();

    /* Init the console driver. */
    init_console();

    /* Init grant tables */
    init_gnttab();
    
    /* Init scheduler. */
    init_sched();
 
    /* Init XenBus */
    init_xenbus();

#ifdef CONFIG_XENBUS
    /* Init shutdown thread */
    init_shutdown(si);
#endif

    /* Call (possibly overridden) app_main() */
    app_main(&start_info);

    /* Everything initialised, start idle thread */
    run_idle_thread();
}
示例#25
0
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int __noinstrument do_IRQ(int irq, struct pt_regs *regs)
{
	/* 
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	int cpu = smp_processor_id();
	irq_desc_t *desc = irq_desc + irq;
	struct irqaction * action;
	unsigned int status;
#ifdef CONFIG_ILATENCY
	 {
		extern void interrupt_overhead_start(void);
	
		interrupt_overhead_start();
	}
#endif /* CONFIG_ILATENCY */


	preempt_disable();

	TRACE_IRQ_ENTRY(irq, !user_mode(regs));

	kstat.irqs[cpu][irq]++;
	spin_lock(&desc->lock);
	desc->handler->ack(irq);
	/*
	   REPLAY is when Linux resends an IRQ that was dropped earlier
	   WAITING is used by probe to mark irqs that are being tested
	   */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	   Since we set PENDING, if another processor is handling
	   a different instance of this same irq, the other processor
	   will take care of it.
	 */
	if (!action)
		goto out;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
#ifdef CONFIG_ILATENCY
         {
	        extern void interrupt_overhead_stop(void);

		interrupt_overhead_stop();
	}
#endif /* CONFIG_ILATENCY */

	for (;;) {
		spin_unlock(&desc->lock);
		handle_IRQ_event(irq, regs, action);
		spin_lock(&desc->lock);

		if (!(desc->status & IRQ_PENDING))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;
out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->handler->end(irq);
	spin_unlock(&desc->lock);

	TRACE_IRQ_EXIT();

	if (softirq_pending(cpu))
		do_softirq();

#if defined(CONFIG_PREEMPT)
	for(;;) {
		preempt_enable_no_resched();
		if (preempt_is_disabled() || !need_resched())
			break;

		db_assert(intr_off());
		db_assert(!in_interrupt());

		preempt_disable();
		__sti();
		preempt_schedule();
		__cli();
	}
#endif
#ifdef CONFIG_ILATENCY
        intr_ret_from_exception();
#endif

	return 1;
}
示例#26
0
/* Stopping processors. */
void smp_stop_cpu_irq(void)
{
	__sti();
	while(1)
		barrier();
}
示例#27
0
static int __do_suspend(void *ignore)
{
	int i, j, k, fpp, err;

	extern unsigned long max_pfn;
	extern unsigned long *pfn_to_mfn_frame_list_list;
	extern unsigned long *pfn_to_mfn_frame_list[];

	extern void time_resume(void);

	BUG_ON(smp_processor_id() != 0);
	BUG_ON(in_interrupt());

	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		printk(KERN_WARNING "Cannot suspend in "
		       "auto_translated_physmap mode.\n");
		return -EOPNOTSUPP;
	}

	err = smp_suspend();
	if (err)
		return err;

	xenbus_suspend();

	preempt_disable();

#ifdef __i386__
	kmem_cache_shrink(pgd_cache);
#endif
	mm_pin_all();

	__cli();
	preempt_enable();

	gnttab_suspend();

	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
	clear_fixmap(FIX_SHARED_INFO);

	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));

	shutting_down = SHUTDOWN_INVALID;

	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);

	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);

	memset(empty_zero_page, 0, PAGE_SIZE);

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		virt_to_mfn(pfn_to_mfn_frame_list_list);

	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			pfn_to_mfn_frame_list_list[k] =
				virt_to_mfn(pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		pfn_to_mfn_frame_list[k][j] =
			virt_to_mfn(&phys_to_machine_mapping[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();

	irq_resume();

	time_resume();

	switch_idle_mm();

	__sti();

	xencons_resume();

	xenbus_resume();

	smp_resume();

	return err;
}
示例#28
0
文件: xen_intr.c 项目: ryo/netbsd-src
void
x86_enable_intr(void)
{
	__sti();
}