Пример #1
0
static void __save_processor_state(struct saved_context *ctxt)
{
	mtrr_save_fixed_ranges(NULL);
	kernel_fpu_begin();

	/*
	 * descriptor tables
	 */
	store_gdt(&ctxt->gdt);
	store_idt(&ctxt->idt);
	store_tr(ctxt->tr);

	/*
	 * segment registers
	 */
	savesegment(es, ctxt->es);
	savesegment(fs, ctxt->fs);
	savesegment(gs, ctxt->gs);
	savesegment(ss, ctxt->ss);

	/*
	 * control registers
	 */
	ctxt->cr0 = read_cr0();
	ctxt->cr2 = read_cr2();
	ctxt->cr3 = read_cr3();
	ctxt->cr4 = read_cr4_safe();
}
Пример #2
0
static int isr_hooker_init(void)
{
	gate_desc *old_idt, *new_idt;
	unsigned long new_idt_page;

	pr_info("%s\n", __func__);

	/* obtain IDT descriptor */
	store_idt(&old_idtr);
	old_idt = (gate_desc *)old_idtr.address;

	/* prepare new IDT */
	new_idt_page = __get_free_page(GFP_KERNEL);
	if(!new_idt_page)
		return -ENOMEM;

	new_idtr.address = new_idt_page;
	new_idtr.size = old_idtr.size;
	new_idt = (gate_desc *)new_idtr.address;

	memcpy(new_idt, old_idt, old_idtr.size);

	/* modify the target entry */
	orig_isr = gate_offset(new_idt[TRAP_NR]);
	pr_info("orig_isr@%p\n", (void*)orig_isr);
	pack_gate(&new_idt[TRAP_NR], GATE_INTERRUPT, (unsigned long)stub, 0, 0, __KERNEL_CS);

	/* setup new entry */
	load_idt((void *)&new_idtr);
	smp_call_function((smp_call_func_t)load_idt, &new_idtr, 1);

	return 0;
}
Пример #3
0
void unregister_my_page_fault_handler(void){
    struct desc_ptr idtr;
    store_idt(&idtr);
    //if the current idt is not the default one, restore the default one
    if(idtr.address != default_idtr.address || idtr.size != default_idtr.size){
        load_idt(&default_idtr);
        smp_call_function(my_load_idt, (void *)&default_idtr, 1);
        free_page(new_idt_table_page);
    }
}
void unregisterPageFaultListener(void){
    struct desc_ptr idtr;
    store_idt(&idtr);
    //if the current idt is not the default one, restore the default one
    if(idtr.address != defaultIDTR.address || idtr.size != defaultIDTR.size){
        load_idt(&defaultIDTR);
        smp_call_function(loadMyIDTTable, (void *)&defaultIDTR, 1);
        free_page(newIDTTablePage);
    }
    debugfs_remove(file);
}
Пример #5
0
void restore_nmi(void)
{
	struct _descr descr;

	smp_call_function(mask_lvtpc, NULL, 0, 1);
	mask_lvtpc(NULL);

	store_idt(descr);
	descr.base[NMI_VECTOR_NUM] = kernel_nmi;

	smp_call_function(unmask_lvtpc, NULL, 0, 1);
	unmask_lvtpc(NULL);
}
Пример #6
0
void __init lguest_arch_host_init(void)
{
	int i;

	for (i = 0; i < IDT_ENTRIES; i++)
		default_idt_entries[i] += switcher_offset();

	for_each_possible_cpu(i) {
		
		struct lguest_pages *pages = lguest_pages(i);
		
		struct lguest_ro_state *state = &pages->state;

		state->host_gdt_desc.size = GDT_SIZE-1;
		state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);

		store_idt(&state->host_idt_desc);

		state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
		state->guest_idt_desc.address = (long)&state->guest_idt;
		state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
		state->guest_gdt_desc.address = (long)&state->guest_gdt;

		state->guest_tss.sp0 = (long)(&pages->regs + 1);
		state->guest_tss.ss0 = LGUEST_DS;

		state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);

		setup_default_gdt_entries(state);
		
		setup_default_idt_entries(state, default_idt_entries);

		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
	}

	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
	lguest_entry.segment = LGUEST_CS;


	get_online_cpus();
	if (cpu_has_pge) { 
		
		cpu_had_pge = 1;
		on_each_cpu(adjust_pge, (void *)0, 1);
		
		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
	}
	put_online_cpus();
}
Пример #7
0
void install_nmi(void)
{
	struct _descr descr;

	/* NMI handler is at idt_table[IDT_VECTOR_NUMBER]            */
	/* see Intel Vol.3 Figure 5-2, interrupt gate                */

	smp_call_function(mask_lvtpc, NULL, 0, 1);
	mask_lvtpc(NULL);

	store_idt(descr);
	kernel_nmi = descr.base[NMI_VECTOR_NUM];
	SET_NMI_GATE;

	smp_call_function(unmask_lvtpc, NULL, 0, 1);
	unmask_lvtpc(NULL);
}
int registerPageFaultListener(void){
    struct desc_ptr idtr;
    gate_desc *old_idt, *new_idt;
    int retval;

    //first, do some initialization work.
    retval = initMyFault();
    if(retval)
        return retval;

    //record the default idtr
    store_idt(&defaultIDTR);

    //read the content of idtr register and get the address of old IDT table
    old_idt = (gate_desc *)defaultIDTR.address; //'defaultIDTR' is initialized in 'my_virt_drv_init'

    //allocate a page to store the new IDT table
    printk(KERN_INFO "Page fault Listener: alloc a page to store new idt table.\n");
    newIDTTablePage = __get_free_page(GFP_KERNEL);
    if(!newIDTTablePage)
        return -ENOMEM;

    idtr.address = newIDTTablePage;
    idtr.size = defaultIDTR.size;
    
    //copy the old idt table to the new one
    new_idt = (gate_desc *)idtr.address;
    memcpy(new_idt, old_idt, idtr.size);
    pack_gate(&new_idt[PGFAULT_NR], GATE_INTERRUPT, (unsigned long)customPageFault, 0, 0, __KERNEL_CS);
    
    //load idt for all the processors
    printk(KERN_INFO "Page fault Listener: Load the new idt table.\n");
    load_idt(&idtr);
    
    printk(KERN_INFO "Page fault Listener: new idt table loaded.\n");
    smp_call_function(loadMyIDTTable, (void *)&idtr, 1); //wait till all are finished
    printk(KERN_INFO "Page fault Listener: all CPUs have loaded the new idt table.\n");
    
    return 0;
}
Пример #9
0
/*H:020
 * Now the Switcher is mapped and every thing else is ready, we need to do
 * some more i386-specific initialization.
 */
void __init lguest_arch_host_init(void)
{
	int i;

	/*
	 * Most of the x86/switcher_32.S doesn't care that it's been moved; on
	 * Intel, jumps are relative, and it doesn't access any references to
	 * external code or data.
	 *
	 * The only exception is the interrupt handlers in switcher.S: their
	 * addresses are placed in a table (default_idt_entries), so we need to
	 * update the table with the new addresses.  switcher_offset() is a
	 * convenience function which returns the distance between the
	 * compiled-in switcher code and the high-mapped copy we just made.
	 */
	for (i = 0; i < IDT_ENTRIES; i++)
		default_idt_entries[i] += switcher_offset();

	/*
	 * Set up the Switcher's per-cpu areas.
	 *
	 * Each CPU gets two pages of its own within the high-mapped region
	 * (aka. "struct lguest_pages").  Much of this can be initialized now,
	 * but some depends on what Guest we are running (which is set up in
	 * copy_in_guest_info()).
	 */
	for_each_possible_cpu(i) {
		/* lguest_pages() returns this CPU's two pages. */
		struct lguest_pages *pages = lguest_pages(i);
		/* This is a convenience pointer to make the code neater. */
		struct lguest_ro_state *state = &pages->state;

		/*
		 * The Global Descriptor Table: the Host has a different one
		 * for each CPU.  We keep a descriptor for the GDT which says
		 * where it is and how big it is (the size is actually the last
		 * byte, not the size, hence the "-1").
		 */
		state->host_gdt_desc.size = GDT_SIZE-1;
		state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);

		/*
		 * All CPUs on the Host use the same Interrupt Descriptor
		 * Table, so we just use store_idt(), which gets this CPU's IDT
		 * descriptor.
		 */
		store_idt(&state->host_idt_desc);

		/*
		 * The descriptors for the Guest's GDT and IDT can be filled
		 * out now, too.  We copy the GDT & IDT into ->guest_gdt and
		 * ->guest_idt before actually running the Guest.
		 */
		state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
		state->guest_idt_desc.address = (long)&state->guest_idt;
		state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
		state->guest_gdt_desc.address = (long)&state->guest_gdt;

		/*
		 * We know where we want the stack to be when the Guest enters
		 * the Switcher: in pages->regs.  The stack grows upwards, so
		 * we start it at the end of that structure.
		 */
		state->guest_tss.sp0 = (long)(&pages->regs + 1);
		/*
		 * And this is the GDT entry to use for the stack: we keep a
		 * couple of special LGUEST entries.
		 */
		state->guest_tss.ss0 = LGUEST_DS;

		/*
		 * x86 can have a finegrained bitmap which indicates what I/O
		 * ports the process can use.  We set it to the end of our
		 * structure, meaning "none".
		 */
		state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);

		/*
		 * Some GDT entries are the same across all Guests, so we can
		 * set them up now.
		 */
		setup_default_gdt_entries(state);
		/* Most IDT entries are the same for all Guests, too.*/
		setup_default_idt_entries(state, default_idt_entries);

		/*
		 * The Host needs to be able to use the LGUEST segments on this
		 * CPU, too, so put them in the Host GDT.
		 */
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
	}

	/*
	 * In the Switcher, we want the %cs segment register to use the
	 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
	 * it will be undisturbed when we switch.  To change %cs and jump we
	 * need this structure to feed to Intel's "lcall" instruction.
	 */
	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
	lguest_entry.segment = LGUEST_CS;

	/*
	 * Finally, we need to turn off "Page Global Enable".  PGE is an
	 * optimization where page table entries are specially marked to show
	 * they never change.  The Host kernel marks all the kernel pages this
	 * way because it's always present, even when userspace is running.
	 *
	 * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
	 * switch to the Guest kernel.  If you don't disable this on all CPUs,
	 * you'll get really weird bugs that you'll chase for two days.
	 *
	 * I used to turn PGE off every time we switched to the Guest and back
	 * on when we return, but that slowed the Switcher down noticibly.
	 */

	/*
	 * We don't need the complexity of CPUs coming and going while we're
	 * doing this.
	 */
	get_online_cpus();
	if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
		/* Remember that this was originally set (for cleanup). */
		cpu_had_pge = 1;
		/*
		 * adjust_pge is a helper function which sets or unsets the PGE
		 * bit on its CPU, depending on the argument (0 == unset).
		 */
		on_each_cpu(adjust_pge, (void *)0, 1);
		/* Turn off the feature in the global feature set. */
		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
	}
	put_online_cpus();
}