// Initialise global descriptor table with flat segments and all segmentes needed // for the advanced power management system void init_GDT(multiboot_info_t *mbi) { uns2 seg_apm; apm_table_t *apm; memset(&gdt, 0, sizeof(descriptor_t) * GDT_SIZE); gdt_count = 0; set_descriptor(0, 0, 0, 0, 0); // NULL entry set_descriptor(1, 0, 0xfffff, 0x9a, 0xc0); // kernel code segment set_descriptor(2, 0, 0xfffff, 0x92, 0xc0); // kernel data segment set_descriptor(3, 0, 0xbffff, 0xfa, 0xc0); // user code segment set_descriptor(4, 0, 0xbffff, 0xf2, 0xc0); // user data segment // Build descriptors that are needed by APM system if (CHECK_FLAG(mbi->flags, 10)) { apm = (apm_table_t*)mbi->apm_table; seg_apm = add_descriptor(apm->cseg << 4, apm->cseg_len - 1, 0x9a, 0x40); // apm 32-bit code add_descriptor(apm->cseg_16 << 4, apm->cseg_16_len - 1, 0x9a, 0x00); // apm 16-bit code add_descriptor(apm->dseg << 4, apm->dseg_len - 1, 0x92, 0x00); // apm 16-bit data // Build FAR pointer to call into APM system apm_call_ptr[0] = apm->offset & 0xffff; apm_call_ptr[1] = apm->offset >> 16; apm_call_ptr[2] = seg_apm; } else {
void init_gdt(void) { gdt_ptr.limit = sizeof(segment_descriptor_t) * 5 - 1; gdt_ptr.base = &gdt; set_descriptor(0, 0, 0, 0, 0); set_descriptor(1, 0, MEMMAX, 0x9A, 0xC0); set_descriptor(2, 0, MEMMAX, 0x92, 0xC0); set_descriptor(3, 0, MEMMAX, 0xFA, 0xC0); set_descriptor(4, 0, MEMMAX, 0xF2, 0xC0); reload_gdt(&gdt_ptr); }
LLVMValueRef gencall_allocstruct(compile_t* c, reach_type_t* t) { // We explicitly want a boxed version. // Allocate the object. LLVMValueRef args[3]; args[0] = codegen_ctx(c); LLVMValueRef result; size_t size = t->abi_size; if(size == 0) size = 1; if(size <= HEAP_MAX) { uint32_t index = ponyint_heap_index(size); args[1] = LLVMConstInt(c->i32, index, false); if(t->final_fn == NULL) result = gencall_runtime(c, "pony_alloc_small", args, 2, ""); else result = gencall_runtime(c, "pony_alloc_small_final", args, 2, ""); } else { args[1] = LLVMConstInt(c->intptr, size, false); if(t->final_fn == NULL) result = gencall_runtime(c, "pony_alloc_large", args, 2, ""); else result = gencall_runtime(c, "pony_alloc_large_final", args, 2, ""); } result = LLVMBuildBitCast(c->builder, result, t->structure_ptr, ""); set_descriptor(c, t, result); return result; }
/** * 初始化描述符表 */ void init_gdt(void) { set_descriptor(gdt + 0, 0, 0, 0, 0); // NULL set_descriptor(gdt + 1, STA_X | STA_R, 0, 0, 0xffffffff); // KERNEL CODE set_descriptor(gdt + 2, STA_W | STA_R, 0, 0, 0xffffffff); // KERNEL DATA set_descriptor(gdt + 3, 9, 0, (uint32_t)&tss, sizeof(tss) - 1); // TSS gdt[3].sd_s = 0; // Required by TSS memset(&tss, 0, sizeof(tss)); tss.ts_iomb = 104; // Not check I/O Permission Map DTR gdtr = { .limit = sizeof(gdt) - 1, .base = (uint32_t)gdt, }; asm volatile ("lgdt %0"::"m"(gdtr)); asm volatile ("ltr %%ax"::"a"(3 << 3)); }
static void init_gdt(void) { int i = 0; gdt_ptr_t gdt_ptr = { 0 }; /* zero */ for (i = 0; i < gdt_size; i++) t_memset(&g_gdt[i], 0, sizeof(descriptor_t)); /* set */ set_descriptor(&g_gdt[ring0_code_index], 0, 0xfffff, ring0_code_attr); set_descriptor(&g_gdt[ring0_data_index], 0, 0xfffff, ring0_data_attr); set_descriptor(&g_gdt[ring3_code_index], 0, 0xfffff, ring3_code_attr); set_descriptor(&g_gdt[ring3_data_index], 0, 0xfffff, ring3_data_attr); set_descriptor(&g_gdt[tss_index], (u32_t)(void*)&g_tss, sizeof(tss_t) - 1, tss_attr); /* set ptr */ gdt_ptr.address = (u32_t)(void*)&g_gdt; gdt_ptr.limit = sizeof(descriptor_t) * gdt_size - 1; __asm lgdt[gdt_ptr] }
t_error set_type_pipe(i_set setid) { o_set* o; SET_ENTER(set); if (set_descriptor(setid, &o) != ERROR_NONE) SET_LEAVE(set, ERROR_UNKNOWN); if (o->type == SET_TYPE_PIPE) SET_LEAVE(set, ERROR_NONE); SET_LEAVE(set, ERROR_UNKNOWN); }
static LLVMValueRef gen_constructor_receiver(compile_t* c, reach_type_t* t, ast_t* call) { ast_t* fieldref = find_embed_constructor_receiver(call); if(fieldref != NULL) { LLVMValueRef receiver = gen_fieldptr(c, fieldref); set_descriptor(c, t, receiver); return receiver; } else { return gencall_alloc(c, t); } }
LLVMValueRef gen_call(compile_t* c, ast_t* ast) { // Special case calls. LLVMValueRef special; if(special_case_call(c, ast, &special)) return special; AST_GET_CHILDREN(ast, positional, named, postfix); AST_GET_CHILDREN(postfix, receiver, method); ast_t* typeargs = NULL; // Dig through function qualification. switch(ast_id(receiver)) { case TK_NEWREF: case TK_NEWBEREF: case TK_BEREF: case TK_FUNREF: case TK_BECHAIN: case TK_FUNCHAIN: typeargs = method; AST_GET_CHILDREN_NO_DECL(receiver, receiver, method); break; default: {} } // Get the receiver type. const char* method_name = ast_name(method); ast_t* type = ast_type(receiver); reach_type_t* t = reach_type(c->reach, type); pony_assert(t != NULL); // Generate the arguments. size_t count = ast_childcount(positional) + 1; size_t buf_size = count * sizeof(void*); LLVMValueRef* args = (LLVMValueRef*)ponyint_pool_alloc_size(buf_size); ast_t* arg = ast_child(positional); int i = 1; while(arg != NULL) { LLVMValueRef value = gen_expr(c, arg); if(value == NULL) { ponyint_pool_free_size(buf_size, args); return NULL; } args[i] = value; arg = ast_sibling(arg); i++; } bool is_new_call = false; // Generate the receiver. Must be done after the arguments because the args // could change things in the receiver expression that must be accounted for. if(call_needs_receiver(postfix, t)) { switch(ast_id(postfix)) { case TK_NEWREF: case TK_NEWBEREF: { call_tuple_indices_t tuple_indices = {NULL, 0, 4}; tuple_indices.data = (size_t*)ponyint_pool_alloc_size(4 * sizeof(size_t)); ast_t* current = ast; ast_t* parent = ast_parent(current); while((parent != NULL) && (ast_id(parent) != TK_ASSIGN) && (ast_id(parent) != TK_CALL)) { if(ast_id(parent) == TK_TUPLE) { size_t index = 0; ast_t* child = ast_child(parent); while(current != child) { ++index; child = ast_sibling(child); } tuple_indices_push(&tuple_indices, index); } current = parent; parent = ast_parent(current); } // If we're constructing an embed field, pass a pointer to the field // as the receiver. Otherwise, allocate an object. if((parent != NULL) && (ast_id(parent) == TK_ASSIGN)) { size_t index = 1; current = ast_childidx(parent, 1); while((ast_id(current) == TK_TUPLE) || (ast_id(current) == TK_SEQ)) { parent = current; if(ast_id(current) == TK_TUPLE) { // If there are no indices left, we're destructuring a tuple. // Errors in those cases have already been catched by the expr // pass. if(tuple_indices.count == 0) break; index = tuple_indices_pop(&tuple_indices); current = ast_childidx(parent, index); } else { current = ast_childlast(parent); } } if(ast_id(current) == TK_EMBEDREF) { args[0] = gen_fieldptr(c, current); set_descriptor(c, t, args[0]); } else { args[0] = gencall_alloc(c, t); } } else { args[0] = gencall_alloc(c, t); } is_new_call = true; ponyint_pool_free_size(tuple_indices.alloc * sizeof(size_t), tuple_indices.data); break; } case TK_BEREF: case TK_FUNREF: case TK_BECHAIN: case TK_FUNCHAIN: args[0] = gen_expr(c, receiver); break; default: pony_assert(0); return NULL; } } else { // Use a null for the receiver type. args[0] = LLVMConstNull(t->use_type); } // Static or virtual dispatch. token_id cap = cap_dispatch(type); reach_method_t* m = reach_method(t, cap, method_name, typeargs); LLVMValueRef func = dispatch_function(c, t, m, args[0]); bool is_message = false; if((ast_id(postfix) == TK_NEWBEREF) || (ast_id(postfix) == TK_BEREF) || (ast_id(postfix) == TK_BECHAIN)) { switch(t->underlying) { case TK_ACTOR: is_message = true; break; case TK_UNIONTYPE: case TK_ISECTTYPE: case TK_INTERFACE: case TK_TRAIT: if(m->cap == TK_TAG) is_message = can_inline_message_send(t, m, method_name); break; default: {} } } // Cast the arguments to the parameter types. LLVMTypeRef f_type = LLVMGetElementType(LLVMTypeOf(func)); LLVMTypeRef* params = (LLVMTypeRef*)ponyint_pool_alloc_size(buf_size); LLVMGetParamTypes(f_type, params); arg = ast_child(positional); i = 1; LLVMValueRef r = NULL; if(is_message) { // If we're sending a message, trace and send here instead of calling the // sender to trace the most specific types possible. LLVMValueRef* cast_args = (LLVMValueRef*)ponyint_pool_alloc_size(buf_size); cast_args[0] = args[0]; while(arg != NULL) { cast_args[i] = gen_assign_cast(c, params[i], args[i], ast_type(arg)); arg = ast_sibling(arg); i++; } token_id cap = cap_dispatch(type); reach_method_t* m = reach_method(t, cap, method_name, typeargs); codegen_debugloc(c, ast); gen_send_message(c, m, args, cast_args, positional); codegen_debugloc(c, NULL); switch(ast_id(postfix)) { case TK_NEWREF: case TK_NEWBEREF: r = args[0]; break; default: r = c->none_instance; break; } ponyint_pool_free_size(buf_size, cast_args); } else { while(arg != NULL) { args[i] = gen_assign_cast(c, params[i], args[i], ast_type(arg)); arg = ast_sibling(arg); i++; } if(func != NULL) { // If we can error out and we have an invoke target, generate an invoke // instead of a call. codegen_debugloc(c, ast); if(ast_canerror(ast) && (c->frame->invoke_target != NULL)) r = invoke_fun(c, func, args, i, "", true); else r = codegen_call(c, func, args, i); if(is_new_call) { LLVMValueRef md = LLVMMDNodeInContext(c->context, NULL, 0); LLVMSetMetadataStr(r, "pony.newcall", md); } codegen_debugloc(c, NULL); } } // Class constructors return void, expression result is the receiver. if(((ast_id(postfix) == TK_NEWREF) || (ast_id(postfix) == TK_NEWBEREF)) && (t->underlying == TK_CLASS)) r = args[0]; // Chained methods forward their receiver. if((ast_id(postfix) == TK_BECHAIN) || (ast_id(postfix) == TK_FUNCHAIN)) r = args[0]; ponyint_pool_free_size(buf_size, args); ponyint_pool_free_size(buf_size, params); return r; }
// Add a new segment to GDT, return corresponding segment selector int add_descriptor(uns4 base, uns4 len, int flags1, int flags2) { int seg = gdt_count * 8; set_descriptor(gdt_count, base, len, flags1, flags2); return seg; }