void set_gdtr(void) { set_gdt(&gdt[0], 0, 0, 0, 0, 0, 0, 0, 0, 0); set_gdt(&gdt[1], 0, 0x0FFFFF, 1, 0, 1, 10, 1, 1, 1); set_gdt(&gdt[2], 0, 0x0FFFFF, 1, 0, 1, 2, 1, 1, 1); gdtr.size = NUM_GDT * sizeof( SEGMENT_DESCRIPTOR ); gdtr.base = ( SEGMENT_DESCRIPTOR *)gdt; load_gdt(); flush_pipeline(); }
static void seg_init(){ g_ptr.limit = (sizeof(struct w_gdte) * 5) - 1; g_ptr.base = (w_uint32)&gdt_entries; w_uint32 tmp_access; w_uint32 tmp_gran; /* Segment 0 - Null Segment */ set_gdt(0, 0, 0, 0, 0); /* Segment 1 - Kernel Code Segment */ tmp_access = SEG_P | SEG_W | SEG_SEG | SEG_KERN | SEG_X; tmp_gran = SEG_32 | SEG_GPAGE; set_gdt(1, 0, 0xFFFFFFFF, tmp_access, tmp_gran); /* Segment 2 - Kernel Data Segment */ tmp_access = SEG_P | SEG_W | SEG_SEG | SEG_KERN; tmp_gran = SEG_32 | SEG_GPAGE; set_gdt(2, 0, 0xFFFFFFFF, tmp_access, tmp_gran); /* Segment 3 User Code Segment */ tmp_access = SEG_P | SEG_W | SEG_SEG | SEG_USER | SEG_X; tmp_gran = SEG_32 | SEG_GPAGE | SEG_AVAIL; set_gdt(3, 0, 0xFFFFFFFF, tmp_access, tmp_gran); /* Segment 4 - User Data Segment */ tmp_access = SEG_P | SEG_W | SEG_SEG | SEG_USER; tmp_gran = SEG_32 | SEG_GPAGE | SEG_AVAIL; set_gdt(4, 0, 0xFFFFFFFF, tmp_access, tmp_gran); /* Segment 5 - User TLS Segment */ tmp_access = SEG_P | SEG_W | SEG_SEG | SEG_USER; tmp_gran = SEG_32 | SEG_GPAGE | SEG_AVAIL; set_gdt(5, 0, 0xFFFFFFFF, tmp_access, tmp_gran); /* Segment 6 - TSS Segment */ tmp_access = SEG_P | SEG_KERN | SEG_X | SEG_TSS; tmp_gran = SEG_32 | SEG_GBYTE; w_uint32 addr = (w_uint32)¤t_tss; set_gdt(6, addr, sizeof(struct w_tss), tmp_access, tmp_gran); gdt_flush(&g_ptr); }
PUBLIC void cstart() { showMsg();//set gs first, or you will get error when you show msg disp_str("\ncstart-start"); disp_str("\n"); set_gdt(0, 0x0000, 0x0000, 0x0000, 0x0000); set_gdt(1, 0x0FFF, 0x0000, 0x9A00, 0x00C0); // code set_gdt(2, 0x0FFF, 0x0000, 0x9200, 0x00C0); // data segment set_gdt(3, 0xFFFF, 0x8000, 0x920B|0x6000, 0x00C0); // GS, SET DPL = 3 disp_int(0x67AB); init_gptr(); init_iptr(); init_prot(); disp_str("\ncse"); }
void create_task(unsigned int task_id,TSS_t *TSS,unsigned int LDT_id){ set_gdt(GDT_USEABLE_NUM+task_id*2,(uint32_t)TSS,103,SEG_TSS_GATE|SEG_RING3); set_gdt(GDT_USEABLE_NUM+task_id*2+1,(uint32_t)(LDT_BASE_ADDRESS+LDT_id*LDT_SIZE),LDT_SIZE-1,SEG_LDT|SEG_RING3); }
int emu_0f(unsigned char *lina) { unsigned char *orig_lina = lina; switch (lina[1]) { case 0x00: /* lldt, ltr */ { switch (REG_OPCODE(lina[2])) { case 2: /* 0F 00 /2 LLDT r/m16 Load segment selector r/m16 into LDTR */ { u_int addr, is_addr, reg, selector; trace("lldt\n"); if (!opa.pe) { set_guest_exc(EXC_UD, 0); goto exc; } if (opa.cpl != 0) { set_guest_exc(EXC_GP, 0); goto exc; } lina += 2; /* move up to RM byte */ decode_rm16(&addr, &is_addr, ®, &lina); if (emu_get(is_addr, addr, &selector, 2)) goto exc; if (load_ldtr(selector)) goto exc; break; } case 3: /* 0F 00 /3 LTR r/m16 Load r/m16 into TR */ { u_int addr, is_addr, reg; Bit32u dtr; trace("ltr\n"); if (!opa.pe) { set_guest_exc(EXC_UD, 0); goto exc; } if (opa.cpl != 0) { set_guest_exc(EXC_GP, 0); goto exc; } lina += 2; /* move up to RM byte */ decode_rm16(&addr, &is_addr, ®, &lina); if (emu_get(is_addr, addr, &dtr, 2)) goto exc; if (load_tr(dtr)) goto exc; break; } default: unknown(lina); } break; } case 0x01: /* invlpg, lgdt, lidt, lmsw */ { int reg_op = REG_OPCODE(lina[2]); switch (reg_op) { case 2: /* lgdt */ case 3: /* lidt */ { u_int addr, is_addr, reg, limit, base; lina += 2; /* move up to RM byte */ decode_rm(&addr, &is_addr, ®, &lina); ASSERT(is_addr); /* addr now points to the m16&32; lina is at next instr */ if (get_memory(opa.seg, addr, &limit, 2) != 0 || get_memory(opa.seg, addr+2, &base, opa.opb==4 ? 4 : 3) != 0) goto exc; /* by definition of lgdt/lidt, base is a linear address already. */ if (reg_op == 2) { set_gdt(base, limit); } else { set_idt(base, limit); } debug_mem("loaded %cdt from 0x%08x\n", reg_op==2 ? 'g' : 'i', base); break; } case 6: /* 0F 01 /6 LMSW r/m16 Loads r/m16 in msw of CR0 */ { u_int addr, is_addr, reg, val; trace("lmsw\n"); if (opa.pe && opa.cpl!=0) { set_guest_exc(13, 0); goto exc; } lina += 2; /* move up to RM byte */ decode_rm16(&addr, &is_addr, ®, &lina); if (emu_get(is_addr, addr, &val, 2)) goto exc; if (vmstate.cr[0] & 1 && !(val & 1)) val |= 1; /* can't clear PE with lmsw */ mov_to_cr(0, val, 0x0000000f); /* only PE, MP, EM, and TS can be affected */ break; } case 7: /* invlpg */ { Bit32u ptr; debug("invlpg\n"); lina += 2; /* move up to memory operand */ if (opa.opb==4) { ptr = *(Bit32u*)lina; } else { ptr = *(Bit16u*)lina; } lina += opa.opb; if (vmstate.cr[0]&PG_MASK) { /* Modify a pte with itself. This should have the desired side effect of flushing that TLB entry. */ sys_self_mod_pte_range(0, 0, /* add no flag bits */ 0, /* remove no flag bits */ ptr, 1); } break; } default: unknown(lina); } break; } case 0x06: /* clts 0F 06 */ { if (opa.cpl!=0) { set_guest_exc(13, 0); goto exc; } else { vmstate.cr[0] &= ~TS_MASK; lina += 2; } break; } case 0x08: /* invd 0F 08 */ case 0x09: /* wbinvd 0F 09 */ { if (opa.cpl!=0) { set_guest_exc(13, 0); goto exc; } else { /* will not implement */ lina += 2; } break; } case 0x0b: /* UD2 */ { set_guest_exc(6, 0); goto exc; } case 0x20: /* MOV r <- CRx */ { int cr = REG_OPCODE(lina[2]); int reg = RM(lina[2]); ASSERT(cr<5); set_reg(reg, vmstate.cr[cr], 4); lina += 3; break; } case 0x21: /* MOV r <- DRx */ { int dr = REG_OPCODE(lina[2]); int reg = RM(lina[2]); set_reg(reg, vmstate.dr[dr], 4); lina += 3; break; } case 0x22: /* MOV CRx <- r */ { int cr = REG_OPCODE(lina[2]); ASSERT(cr<5); if (opa.pe && opa.cpl!=0) { set_guest_exc(13, 0); goto exc; } mov_to_cr(cr, get_reg(RM(lina[2]), 4), 0xffffffff); lina += 3; break; } case 0x23: /* MOV DRx <- r */ { int dr = REG_OPCODE(lina[2]); debug("mov dr%d <- r%d\n", dr, RM(lina[2])); if (opa.pe && opa.cpl!=0) { set_guest_exc(13, 0); goto exc; } vmstate.dr[dr] = get_reg(RM(lina[2]), 4); lina += 3; break; } case 0x30: /* wrmsr */ { int ctr = 0; if (REG(ecx) == P6MSR_CTRSEL0) ctr = 0; else if (REG(ecx) == P6MSR_CTRSEL1) ctr = 1; else unknown(lina); /* only performance counters are implemented */ sys_pctr(ctr==0 ? PCIOCS0 : PCIOCS1, 0, ®(eax)); lina += 2; break; } case 0x32: /* rdmsr */ { struct pctrst p; int ctr = 0; if (REG(ecx) == P6MSR_CTR0) ctr = 0; else if (REG(ecx) == P6MSR_CTR1) ctr = 1; else unknown(lina); /* only performance counters are implemented */ sys_pctr(PCIOCRD, 0, &p); REG(eax) = p.pctr_hwc[ctr]; REG(edx) = p.pctr_hwc[ctr] >> 32; lina += 2; break; } #if 0 case 0x33: /* rdpmc */ { struct pctrst p; /* or cpl!=0 and cr4 ... */ if (REG(ecx)>1) { set_guest_exc(EXC_GP, 0); goto exc; } sys_pctr(PCIOCRD, 0, &p); REG(eax) = p.pctr_hwc[REG(ecx)]; REG(edx) = p.pctr_hwc[REG(ecx)] >> 32; lina += 2; break; } #endif case 0xa2: /* cpuid */ { /* cpuid may trap on a Cyrix. I don't care. */ leaveemu(ERR_UNIMPL); break; } case 0xb2: /* lss */ case 0xb4: /* lfs */ case 0xb5: /* lgs */ { int seg; if (lina[1]==0xb2) { seg = REGNO_SS; } else if (lina[1]==0xb4) { seg = REGNO_FS; } else seg = REGNO_GS; if (load_far_pointer(&lina, seg)) goto exc; break; } case 0x31: /* rdtsc ... should be enabled in xok */ case 0xc8: /* bswap should not trap */ default: unknown(lina); } REG(eip) += lina-orig_lina; return 0; exc: return -1; }
void install() { set_gdt(this->gdt, sizeof(uint64_t) * GDT_SIZE); }