void ttyinit(int dev) { int baseport, ret; /* initialize baseport and return val variable for queue operations*/ struct tty *tty; /* ptr to tty software params/data block */ baseport = devtab[dev].dvbaseport; /* pick up hardware addr */ tty = (struct tty *)devtab[dev].dvdata; /* and software params struct */ cli(); if (baseport == COM1_BASE) { /* arm interrupts by installing interrupt vector address */ set_intr_gate(COM1_IRQ+IRQ_TO_INT_N_SHIFT, &irq4inthand); pic_enable_irq(COM1_IRQ); } else if (baseport == COM2_BASE) { /* arm interrupts by installing interrupt vector address */ set_intr_gate(COM2_IRQ+IRQ_TO_INT_N_SHIFT, &irq3inthand); pic_enable_irq(COM2_IRQ); } else { kprintf("Bad TTY device table entry, dev %d\n", dev); return; /* give up */ } tty->echoflag = 1; /* default to echoing */ ret = init_queue((tty->rqu), MAXQ); /* init RX queue */ ret = init_queue((tty->tqu), MAXQ); /* init TX queue */ outpt(baseport+UART_IER, UART_IER_RDI|UART_IER_THRI); /* enable interrupts on receiver and transmitter */ }
void init_irq(void) { int i; init_pic(); for( i = 0; i < MAX_IRQ; i++ ) irqhandler[i] = _ignore_irq; set_intr_gate(IRQ_VECTOR+0, _irq_0); set_intr_gate(IRQ_VECTOR+1, _irq_1); // IRQ2~15 is implemented later /*set_intr_gate(IRQ_VECTOR+2, _irq_2); set_intr_gate(IRQ_VECTOR+3, _irq_3); set_intr_gate(IRQ_VECTOR+4, _irq_4); set_intr_gate(IRQ_VECTOR+5, _irq_5); set_intr_gate(IRQ_VECTOR+6, _irq_6); set_intr_gate(IRQ_VECTOR+7, _irq_7); set_intr_gate(IRQ_VECTOR+8, _irq_8); set_intr_gate(IRQ_VECTOR+9, _irq_9); set_intr_gate(IRQ_VECTOR+10, _irq_10); set_intr_gate(IRQ_VECTOR+11, _irq_11); set_intr_gate(IRQ_VECTOR+12, _irq_12); set_intr_gate(IRQ_VECTOR+13, _irq_13); set_intr_gate(IRQ_VECTOR+14, _irq_14); set_intr_gate(IRQ_VECTOR+15, _irq_15);*/ }
/* 注册中断 */ int irqaction(unsigned int irq, struct sigaction * new_sa) { struct sigaction * sa; unsigned long flags; if (irq > 15) return -EINVAL; sa = irq + irq_sigaction; if (sa->sa_mask) return -EBUSY; if (!new_sa->sa_handler) return -EINVAL; save_flags(flags); cli(); *sa = *new_sa; sa->sa_mask = 1; if (sa->sa_flags & SA_INTERRUPT) set_intr_gate(0x20+irq,fast_interrupt[irq]); else set_intr_gate(0x20+irq,interrupt[irq]); if (irq < 8) { cache_21 &= ~(1<<irq); outb(cache_21,0x21); } else { cache_21 &= ~(1<<2); cache_A1 &= ~(1<<(irq-8)); outb(cache_21,0x21); outb(cache_A1,0xA1); } restore_flags(flags); return 0; }
void rs_init(void) { set_intr_gate(0x24,rs1_interrupt); // set up serial port 1 interruption set_intr_gate(0x23,rs2_interrupt); // serial port 2 interruption init(tty_table[1].read_q.data); // initialize serial port 1 init(tty_table[2].read_q.data); // initialize serial port 2 outb(inb_p(0x21)&0xE7,0x21); // Allow IRQ3 and IRQ4 }
/* Initialize the IDT table */ void idt_init() { /* Initialize Exception Gates */ set_trap_gate(0, ÷_error_wrapper); set_trap_gate(1, &debug); set_trap_gate(2, &nmi); set_system_gate(3, &int3); set_system_gate(4, &overflow); set_system_gate(5, &bounds); set_trap_gate(6, &invalid_op); set_trap_gate(7, &device_not_available); set_trap_gate(8, &double_fault); set_trap_gate(9, &coprocessor_segment_overrun); set_trap_gate(10, &invalid_TSS); set_trap_gate(11, &segment_not_present); set_trap_gate(12, &stack_segment); set_trap_gate(13, &general_protection); set_trap_gate(14, &page_fault_wrapper); set_trap_gate(16, &coprocessor_error); set_trap_gate(17, &alignment_check); /* Initialize Interrupt Gates */ // refer to student-notes.pdf P29 set_intr_gate(0x20, &PIT_wrapper); set_intr_gate(0x21, &keyboard_wrapper); // Initialize keyboard entry set_intr_gate(0x28, &rtc_wrapper); // Initialize the RTC entry /* Initialize system call gates*/ set_system_gate(0x80, &syscall_linkage); }
void rs_init(void) { set_intr_gate(0x24,rs1_interrupt); set_intr_gate(0x23,rs2_interrupt); init(tty_table[1].read_q.data); /* data = 0x3f8, rs1 */ init(tty_table[2].read_q.data); /* data = 0x2f8, rs2 */ outb(inb_p(0x21)&0xE7,0x21); }
//// 初始化串行中断程序和串行接口。 void rs_init (void) { set_intr_gate (0x24, rs1_interrupt); // 设置串行口1 的中断门向量(硬件IRQ4 信号)。 set_intr_gate (0x23, rs2_interrupt); // 设置串行口2 的中断门向量(硬件IRQ3 信号)。 init (tty_table[1].read_q.data); // 初始化串行口1(.data 是端口号)。 init (tty_table[2].read_q.data); // 初始化串行口2。 outb (inb_p (0x21) & 0xE7, 0x21); // 允许主8259A 芯片的IRQ3,IRQ4 中断信号请求。 }
static void setup_idt(void) { for (int i = 0; i < EXCEPTION_GATES; i ++) set_intr_gate(i, &idt_exception_stubs[i]); set_intr_gate(HALT_CPU_IPI_VECTOR, halt_cpu_ipi_handler); load_idt(&idtdesc); }
void traps_init(void) { int i; for (i = 0; i < VALID_ISR+2; ++i) set_intr_gate(i, (unsigned int)isr_table[(i << 1) + 1]); for (; i < 256; ++i) set_intr_gate(i, (unsigned int)isr_table[(i << 1) + 1]); }
void __init native_init_IRQ(void) { int i; /* Execute any quirks before the call gates are initialised: */ /* 这里又是执行x86_init结构中的初始化函数,pre_vector_init()指向 init_ISA_irqs */ x86_init.irqs.pre_vector_init(); /* 初始化中断描述符表中的中断控制器中特定的一些中断门初始化 */ apic_intr_init(); /* * Cover the whole vector space, no vector can escape * us. (some of these will be overridden and become * 'special' SMP interrupts) */ /* 第一个外部中断,默认是32 */ i = FIRST_EXTERNAL_VECTOR; /* 在used_vectors变量中找出所有没有置位的中断向量,我们知道,在trap_init()中对所有异常和陷阱和系统调用中断都置位了used_vectors,没有置位的都为中断 * 这里就是对所有中断设置门描述符 */ for_each_clear_bit_from(i, used_vectors, NR_VECTORS) { /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ /* interrupt[]数组保存的是外部中断的中断门信息 * 这里就是将空闲的中断设置为外部中断,interrupt是一个函数指针数组,其将31~255数组元素指向common_interrupt函数 */ set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); }
void free_irq(unsigned int irq) { struct irqaction * action = irq + irq_action; unsigned long flags; if (irq > 15) { printk("Trying to free IRQ%d\n",irq); return; } if (!action->handler) { printk("Trying to free free IRQ%d\n",irq); return; } save_flags(flags); cli(); if (irq < 8) { cache_21 |= 1 << irq; outb(cache_21,0x21); } else { cache_A1 |= 1 << (irq-8); outb(cache_A1,0xA1); } set_intr_gate(0x20+irq,bad_interrupt[irq]); action->handler = NULL; action->flags = 0; action->mask = 0; action->name = NULL; restore_flags(flags); }
void free_irq(unsigned int irq) { struct sigaction * sa = irq + irq_sigaction; unsigned long flags; if (irq > 15) { printk("Trying to free IRQ%d\n",irq); return; } if (!sa->sa_mask) { printk("Trying to free free IRQ%d\n",irq); return; } save_flags(flags); cli(); if (irq < 8) { cache_21 |= 1 << irq; outb(cache_21,0x21); } else { cache_A1 |= 1 << (irq-8); outb(cache_A1,0xA1); } set_intr_gate(0x20+irq,bad_interrupt[irq]); sa->sa_handler = NULL; sa->sa_flags = 0; sa->sa_mask = 0; sa->sa_restorer = NULL; restore_flags(flags); }
static void __init smp_intr_init(void) { #ifdef CONFIG_SMP /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. */ alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPIs for invalidation */ alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6); alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7); /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); /* IPI for generic single function call */ alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); #endif }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); for (i = 0; i < IDT_ENTRIES; i++) set_intr_gate(i, early_idt_handler); load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); for (i = 0; i < NR_CPUS; i++) cpu_pda(i) = &boot_cpu_pda[i]; pda_init(0); copy_bootdata(__va(real_mode_data)); #ifdef CONFIG_SMP cpu_set(0, cpu_online_map); #endif start_kernel(); }
void __init native_init_IRQ(void) { int i; /* Execute any quirks before the call gates are initialised: */ x86_init.irqs.pre_vector_init(); apic_intr_init(); /* * Cover the whole vector space, no vector can escape * us. (some of these will be overridden and become * 'special' SMP interrupts) */ for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { /* IA32_SYSCALL_VECTOR could be used in trap_init already. */ if (!test_bit(i, used_vectors)) set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); } if (!acpi_ioapic && !of_ioapic) setup_irq(2, &irq2); #ifdef CONFIG_X86_32 /* * External FPU? Set up irq13 if so, for * original braindamaged IBM FERR coupling. */ if (boot_cpu_data.hard_math && !cpu_has_fpu) setup_irq(FPU_IRQ, &fpu_irq); irq_ctx_init(smp_processor_id()); #endif }
void hd_init(void) { blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; set_intr_gate(0x2E,&hd_interrupt); outb_p(inb_p(0x21)&0xfb,0x21); outb(inb_p(0xA1)&0xbf,0xA1); }
static void __init smp_intr_init(void) { #ifdef CONFIG_SMP #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. */ alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPI for generic function call */ alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); /* IPI for generic single function call */ alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); /* IPI used for rebooting/stopping */ alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt); #endif #endif /* CONFIG_SMP */ }
void __init apic_intr_init(void) { #ifdef CONFIG_SMP smp_intr_init(); #endif /* self generated IPI for local APIC timer */ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); /* IPI vectors for APIC spurious and error interrupts */ set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); set_intr_gate(ERROR_APIC_VECTOR, error_interrupt); /* thermal monitor LVT interrupt */ #ifdef CONFIG_X86_MCE_P4THERMAL set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif }
static int get_new_vector(void) { int vector = assign_msi_vector(); if (vector > 0) set_intr_gate(vector, interrupt[vector]); return vector; }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); /* Cleanup the over mapped high alias */ cleanup_highmap(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); #else set_intr_gate(i, early_idt_handler); #endif } load_idt((const struct desc_ptr *)&idt_descr); early_printk("Kernel alive\n"); x86_64_init_pda(); early_printk("Kernel really alive\n"); x86_64_start_reservations(real_mode_data); }
void __init x86_64_start_kernel(char * real_mode_data) { int i; /* * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ /* 문제 있으면 BUILD_BUG_ON 매크로로 컴파일시 에러가 뜬다. */ /* 커널 이미지 크기가 커서 모듈 주소를 침범하면 에러발생 */ BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* clear bss before set_intr_gate with early_idt_handler */ /* bss 초기화 (__bss_start부터 __bss_stop) */ clear_bss(); /* Make NULL pointers segfault */ zap_identity_mappings(); max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; /* 512M / 4K 매핑되는 최대 페이지 프레임 넘버*/ /* 예외 처리 인터럽트들 설정 */ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); /* 인터럽트 예외처리 루틴들을 쓴다. */ #else set_intr_gate(i, early_idt_handler); #endif } load_idt((const struct desc_ptr *)&idt_descr); /* lidt로 interrupt descriptor table을 읽어온다. */ if (console_loglevel == 10) early_printk("Kernel alive\n"); x86_64_start_reservations(real_mode_data); }
void hd_init(void) { /* setup hd handler function */ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST; /* do_hd_request */ /* enable hd interrupt (IRQ14) */ set_intr_gate(0x2e, &hd_interrupt); outb_p(inb_p(0x21) & 0xfb, 0x21); outb(inb_p(0xa1) & 0xbf, 0xa1); }
void __init smp_intr_init(void) { /* * IRQ0 must be given a fixed assignment and initialized, * because it's used before the IO-APIC is set up. */ set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); /* * The reschedule interrupt is a CPU-to-CPU reschedule-helper * IPI, driven by wakeup. */ set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); /* IPI for invalidation */ set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); /* IPI for generic function call */ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); }
int setup_idt (void) { uint_t i; ulong_t irq_start = (ulong_t)&early_irq_handlers; ulong_t excp_start = (ulong_t)&early_excp_handlers; // clear the IDT out memset(&idt64, 0, sizeof(struct gate_desc64) * NUM_IDT_ENTRIES); for (i = 0; i < NUM_EXCEPTIONS; i++) { set_intr_gate(idt64, i, (void*)(excp_start + i*16)); idt_assign_entry(i, (ulong_t)null_excp_handler); } for (i = 32; i < NUM_IDT_ENTRIES; i++) { set_intr_gate(idt64, i, (void*)(irq_start + (i-32)*16)); idt_assign_entry(i, (ulong_t)null_irq_handler); } if (idt_assign_entry(PF_EXCP, (ulong_t)nk_pf_handler) < 0) { ERROR_PRINT("Couldn't assign page fault handler\n"); return -1; } if (idt_assign_entry(DF_EXCP, (ulong_t)df_handler) < 0) { ERROR_PRINT("Couldn't assign double fault handler\n"); return -1; } if (idt_assign_entry(0xf, (ulong_t)pic_spur_int_handler) < 0) { ERROR_PRINT("Couldn't assign PIC spur int handler\n"); return -1; } lidt(&idt_descriptor); return 0; }
/** * Initialize the Interrupt Descriptor Table (IDT). * * All entries in asm_idtvec_table (the hardware IDT) point to * an entry in asm_handler[]. Set the C entry handlers to * call the appropriate hardware exception or default to * do_unhandled_idt_vector(). */ void __init interrupts_init(void) { int vector; for (vector = 0; vector < NUM_IDT_ENTRIES; vector++) { void *asm_handler = (void *) ( (uintptr_t)(&asm_idtvec_table) + (vector * 16) ); set_intr_gate(vector, asm_handler); set_idtvec_handler(vector, &do_unhandled_idt_vector); } /* * Register handlers for the standard x86_64 interrupts & exceptions. */ set_idtvec_handler( DIVIDE_ERROR_VECTOR, &do_divide_error ); set_idtvec_handler( DEBUG_VECTOR, &do_debug ); set_idtvec_handler( NMI_VECTOR, &do_nmi ); set_idtvec_handler( INT3_VECTOR, &do_int3 ); set_idtvec_handler( OVERFLOW_VECTOR, &do_overflow ); set_idtvec_handler( BOUNDS_VECTOR, &do_bounds ); set_idtvec_handler( INVALID_OP_VECTOR, &do_invalid_op ); set_idtvec_handler( DEVICE_NOT_AVAILABLE_VECTOR, &do_device_not_available ); set_idtvec_handler( DOUBLE_FAULT_VECTOR, &do_double_fault ); set_idtvec_handler( COPROC_SEGMENT_OVERRUN_VECTOR, &do_coproc_segment_overrun ); set_idtvec_handler( INVALID_TSS_VECTOR, &do_invalid_tss ); set_idtvec_handler( SEGMENT_NOT_PRESENT_VECTOR, &do_segment_not_present ); set_idtvec_handler( STACK_SEGMENT_VECTOR, &do_stack_segment ); set_idtvec_handler( GENERAL_PROTECTION_VECTOR, &do_general_protection ); set_idtvec_handler( PAGE_FAULT_VECTOR, &do_page_fault ); set_idtvec_handler( SPURIOUS_INTERRUPT_BUG_VECTOR, &do_spurious_interrupt_bug ); set_idtvec_handler( COPROCESSOR_ERROR_VECTOR, &do_coprocessor_error ); set_idtvec_handler( ALIGNMENT_CHECK_VECTOR, &do_alignment_check ); set_idtvec_handler( MACHINE_CHECK_VECTOR, &do_machine_check ); set_idtvec_handler( SIMD_COPROCESSOR_ERROR_VECTOR, &do_simd_coprocessor_error ); /* * Register handlers for all of the local APIC vectors. */ set_idtvec_handler( APIC_TIMER_VECTOR, &do_apic_timer ); set_idtvec_handler( APIC_PERF_COUNTER_VECTOR, &do_apic_perf_counter ); set_idtvec_handler( APIC_THERMAL_VECTOR, &do_apic_thermal ); set_idtvec_handler( APIC_ERROR_VECTOR, &do_apic_error ); set_idtvec_handler( APIC_SPURIOUS_VECTOR, &do_apic_spurious ); /* * Register handlers for inter-CPU interrupts (cross calls). */ set_idtvec_handler( LWK_XCALL_FUNCTION_VECTOR, &arch_xcall_function_interrupt ); set_idtvec_handler( LWK_XCALL_RESCHEDULE_VECTOR, &arch_xcall_reschedule_interrupt ); }
int request_irq(unsigned int irq, void (*handler)(int, struct pt_regs *), unsigned long irqflags, const char * devname) { struct irqaction * action; unsigned long flags; if (irq > 15) return -EINVAL; action = irq + irq_action; if (action->handler) return -EBUSY; if (!handler) return -EINVAL; save_flags(flags); cli(); action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; if (!(action->flags & SA_PROBE)) { /* SA_ONESHOT is used by probing */ if (action->flags & SA_INTERRUPT) set_intr_gate(0x20+irq,fast_interrupt[irq]); else set_intr_gate(0x20+irq,interrupt[irq]); } if (irq < 8) { cache_21 &= ~(1<<irq); outb(cache_21,0x21); } else { cache_21 &= ~(1<<2); cache_A1 &= ~(1<<(irq-8)); outb(cache_21,0x21); outb(cache_A1,0xA1); } restore_flags(flags); return 0; }
/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * interrupt (except 128, which is used for system calls), and then tells the * Linux infrastructure that each interrupt is controlled by our level-based * lguest interrupt controller. */ static void __init lguest_init_IRQ(void) { unsigned int i; for (i = 0; i < LGUEST_IRQS; i++) { int vector = FIRST_EXTERNAL_VECTOR + i; /* Some systems map "vectors" to interrupts weirdly. Lguest has * a straightforward 1 to 1 mapping, so force that here. */ __get_cpu_var(vector_irq)[vector] = i; if (vector != SYSCALL_VECTOR) set_intr_gate(vector, interrupt[i]); } /* This call is required to set up for 4k stacks, where we have * separate stacks for hard and soft interrupts. */ irq_ctx_init(smp_processor_id()); }
bool init_pit(void) { uint32_t freq = 100; // Hz uint16_t count = (uint16_t)(DEF_PIT_CLOCK / freq); uint8_t command = DEF_PIT_COM_MODE_SQUAREWAVE | DEF_PIT_COM_RL_DATA | DEF_PIT_COM_COUNTER0; outb(PIT_REG_CONTROL, command); outb(PIT_REG_COUNTER0, (uint8_t)(count & 0xff)); outb(PIT_REG_COUNTER0, (uint8_t)((count >> 8) & 0xff)); set_intr_gate(IDT_ENTRY_PIC_TIMER, &timer_handler); return true; }
static void run_ipi_test_tasklet(unsigned long ignore) { cpumask_t mask; BUG_ON(!local_irq_is_enabled()); if (!done_initialisation) { printk("Running initialisation; x2 apic enabled %d\n", x2apic_enabled); set_intr_gate(IPI_TEST_VECTOR, ipi_test_interrupt); test_cpu_x = 0; test_cpu_y = 1; done_initialisation = 1; } else { unsigned long time_taken = finish_time - start_time; printk("CPUs %d -> %d took %ld nanoseconds to perform %ld round trips; RTT %ldns\n", test_cpu_x, test_cpu_y, time_taken, nr_trips - INITIAL_DISCARD, time_taken / (nr_trips - INITIAL_DISCARD)); printk("%d -> %d send IPI time %ld nanoseconds (%ld each)\n", test_cpu_x, test_cpu_y, send_ipi_time, send_ipi_time / (nr_trips - INITIAL_DISCARD)); nr_trips = 0; test_cpu_y = next_cpu(test_cpu_y, cpu_online_map); if (test_cpu_y == test_cpu_x) test_cpu_y = next_cpu(test_cpu_y, cpu_online_map); if (test_cpu_y == NR_CPUS) { test_cpu_x = next_cpu(test_cpu_x, cpu_online_map); if (test_cpu_x == NR_CPUS) { printk("Finished test\n"); machine_restart(0); } test_cpu_y = 0; } } BUG_ON(test_cpu_x == test_cpu_y); if (test_cpu_x == smp_processor_id()) { local_irq_disable(); __smp_ipi_test_interrupt(); local_irq_enable(); } else { mask = cpumask_of_cpu(test_cpu_x); send_IPI_mask(&mask, IPI_TEST_VECTOR); } }
static void __init lguest_init_IRQ(void) { unsigned int i; for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { /* Some systems map "vectors" to interrupts weirdly. Not us! */ __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; if (i != SYSCALL_VECTOR) set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); } /* * This call is required to set up for 4k stacks, where we have * separate stacks for hard and soft interrupts. */ irq_ctx_init(smp_processor_id()); }