void MethodBuilder::setup() { std::vector<Type*> ftypes; ftypes.push_back(ls_->ptr_type("VM")); ftypes.push_back(ls_->ptr_type("CallFrame")); ftypes.push_back(ls_->ptr_type("Executable")); ftypes.push_back(ls_->ptr_type("Module")); ftypes.push_back(ls_->ptr_type("Arguments")); FunctionType* ft = FunctionType::get(ls_->ptr_type("Object"), ftypes, false); std::ostringstream ss; ss << std::string("_X_") << ls_->enclosure_name(info_.method()) << "#" << ls_->symbol_debug_str(info_.method()->name()) << "@" << ls_->add_jitted_method(); llvm::Function* func = Function::Create(ft, GlobalValue::ExternalLinkage, ss.str().c_str(), ls_->module()); Function::arg_iterator ai = func->arg_begin(); llvm::Value* vm = ai++; vm->setName("state"); llvm::Value* prev = ai++; prev->setName("previous"); exec = ai++; exec->setName("exec"); module = ai++; module->setName("mod"); llvm::Value* args = ai++; args->setName("args"); BasicBlock* block = BasicBlock::Create(ls_->ctx(), "entry", func); builder_.SetInsertPoint(block); info_.context().set_function(func); info_.set_vm(vm); info_.set_args(args); info_.set_previous(prev); info_.set_entry(block); alloc_frame("method_body"); check_arity(); // check_self_type(); initialize_frame(vmm_->stack_size); nil_stack(vmm_->stack_size, constant(cNil, obj_type)); import_args(); import_args_ = b().GetInsertBlock(); b().CreateBr(body_); b().SetInsertPoint(body_); }
void initialize_all_frames(void) { intmask mask; mask = disable(); int frame = 0; fifo_head = NULL; frame_t *frameptr = NULL; for (frame = 0; frame < NFRAMES; ++frame) { frameptr = &frames[frame]; initialize_frame(frameptr); frameptr->id = frame; } restore(mask); return; }
void BlockBuilder::setup() { std::vector<const Type*> ftypes; ftypes.push_back(ls_->ptr_type("VM")); ftypes.push_back(ls_->ptr_type("CallFrame")); ftypes.push_back(ls_->ptr_type("BlockEnvironment")); ftypes.push_back(ls_->ptr_type("Arguments")); ftypes.push_back(ls_->ptr_type("BlockInvocation")); FunctionType* ft = FunctionType::get(ls_->ptr_type("Object"), ftypes, false); std::stringstream ss; ss << std::string("_X_") << ls_->enclosure_name(info_.method()) << "#" << ls_->symbol_cstr(info_.method()->name()) << "$block@" << ls_->add_jitted_method(); func = Function::Create(ft, GlobalValue::ExternalLinkage, ss.str().c_str(), ls_->module()); Function::arg_iterator ai = func->arg_begin(); vm = ai++; vm->setName("state"); prev = ai++; prev->setName("previous"); block_env = ai++; block_env->setName("env"); args = ai++; args->setName("args"); block_inv = ai++; block_inv->setName("invocation"); BasicBlock* block = BasicBlock::Create(ls_->ctx(), "entry", func); b().SetInsertPoint(block); info_.set_function(func); info_.set_vm(vm); info_.set_args(args); info_.set_previous(prev); info_.set_entry(block); BasicBlock* body = BasicBlock::Create(ls_->ctx(), "block_body", func); pass_one(body); info_.set_counter(b().CreateAlloca(ls_->Int32Ty, 0, "counter_alloca")); counter2_ = b().CreateAlloca(ls_->Int32Ty, 0, "counter2"); // The 3 here is because we store {ip, sp, type} per unwind. info_.set_unwind_info(b().CreateAlloca(ls_->Int32Ty, ConstantInt::get(ls_->Int32Ty, rubinius::kMaxUnwindInfos * 3), "unwind_info")); valid_flag = b().CreateAlloca(ls_->Int1Ty, 0, "valid_flag"); Value* cfstk = b().CreateAlloca(obj_type, ConstantInt::get(ls_->Int32Ty, (sizeof(CallFrame) / sizeof(Object*)) + vmm_->stack_size), "cfstk"); call_frame = b().CreateBitCast( cfstk, llvm::PointerType::getUnqual(cf_type), "call_frame"); info_.set_out_args(b().CreateAlloca(ls_->type("Arguments"), 0, "out_args")); if(ls_->include_profiling()) { method_entry_ = b().CreateAlloca(ls_->Int8Ty, ConstantInt::get(ls_->Int32Ty, sizeof(tooling::MethodEntry)), "method_entry"); info_.set_profiling_entry(method_entry_); } info_.set_call_frame(call_frame); stk = b().CreateConstGEP1_32(cfstk, sizeof(CallFrame) / sizeof(Object*), "stack"); info_.set_stack(stk); Value* var_mem = b().CreateAlloca(obj_type, ConstantInt::get(ls_->Int32Ty, (sizeof(StackVariables) / sizeof(Object*)) + vmm_->number_of_locals), "var_mem"); vars = b().CreateBitCast( var_mem, llvm::PointerType::getUnqual(stack_vars_type), "vars"); info_.set_variables(vars); initialize_frame(vmm_->stack_size); nil_stack(vmm_->stack_size, constant(Qnil, obj_type)); setup_block_scope(); if(ls_->config().version >= 19) { import_args_19_style(); } if(ls_->include_profiling()) { Value* test = b().CreateLoad(ls_->profiling(), "profiling"); BasicBlock* setup_profiling = BasicBlock::Create(ls_->ctx(), "setup_profiling", func); BasicBlock* cont = BasicBlock::Create(ls_->ctx(), "continue", func); b().CreateCondBr(test, setup_profiling, cont); b().SetInsertPoint(setup_profiling); Signature sig(ls_, ls_->VoidTy); sig << "VM"; sig << llvm::PointerType::getUnqual(ls_->Int8Ty); sig << "BlockEnvironment"; sig << "Module"; sig << "CompiledMethod"; Value* call_args[] = { vm, method_entry_, block_env, module_, method }; sig.call("rbx_begin_profiling_block", call_args, 5, "", b()); b().CreateBr(cont); b().SetInsertPoint(cont); } b().CreateBr(body); b().SetInsertPoint(body); }
void BlockBuilder::setup() { std::vector<const Type*> ftypes; ftypes.push_back(ls_->ptr_type("VM")); ftypes.push_back(ls_->ptr_type("CallFrame")); ftypes.push_back(ls_->ptr_type("BlockEnvironment")); ftypes.push_back(ls_->ptr_type("Arguments")); ftypes.push_back(ls_->ptr_type("BlockInvocation")); FunctionType* ft = FunctionType::get(ls_->ptr_type("Object"), ftypes, false); std::ostringstream ss; ss << std::string("_X_") << ls_->enclosure_name(info_.method()) << "#" << ls_->symbol_debug_str(info_.method()->name()) << "$block@" << ls_->add_jitted_method(); llvm::Function* func = Function::Create(ft, GlobalValue::ExternalLinkage, ss.str().c_str(), ls_->module()); Function::arg_iterator ai = func->arg_begin(); llvm::Value* vm = ai++; vm->setName("state"); llvm::Value* prev = ai++; prev->setName("previous"); block_env = ai++; block_env->setName("env"); llvm::Value* args = ai++; args->setName("args"); block_inv = ai++; block_inv->setName("invocation"); BasicBlock* block = BasicBlock::Create(ls_->ctx(), "entry", func); b().SetInsertPoint(block); info_.context().set_function(func); info_.set_vm(vm); info_.set_args(args); info_.set_previous(prev); info_.set_entry(block); alloc_frame("block_body"); initialize_frame(vmm_->stack_size); nil_stack(vmm_->stack_size, constant(Qnil, obj_type)); setup_block_scope(); if(ls_->config().version >= 19) { import_args_19_style(); } if(ls_->include_profiling()) { Value* test = b().CreateLoad(ls_->profiling(), "profiling"); BasicBlock* setup_profiling = BasicBlock::Create(ls_->ctx(), "setup_profiling", func); BasicBlock* cont = BasicBlock::Create(ls_->ctx(), "continue", func); b().CreateCondBr(test, setup_profiling, cont); b().SetInsertPoint(setup_profiling); Signature sig(ls_, ls_->VoidTy); sig << "VM"; sig << llvm::PointerType::getUnqual(ls_->Int8Ty); sig << "BlockEnvironment"; sig << "Module"; sig << "CompiledMethod"; Value* call_args[] = { vm, method_entry_, block_env, module_, method }; sig.call("rbx_begin_profiling_block", call_args, 5, "", b()); b().CreateBr(cont); b().SetInsertPoint(cont); } b().CreateBr(body_); b().SetInsertPoint(body_); }
frame_t * retrieve_new_frame(frame_type type) { int frame = 0; frame_t *frameptr = NULL; frame_t *available_frame = NULL; frame_t *tmp = NULL; intmask mask; mask = disable(); //1. Search the inverted page table for an empty frame. If one exists, stop. int FRAME_COUNT_LOWER_BOUND = (type == GPTBL) ? 1 : 0; int FRAM_COUNT_UPPER_BOUND = (type == GPTBL) ? NUM_GLOBAL_PAGE_TABLES+1 : NFRAMES-1; for (frame = FRAME_COUNT_LOWER_BOUND; frame <= FRAM_COUNT_UPPER_BOUND; ++frame) { frameptr = &frames[frame]; if(frameptr->type == FREE){ available_frame = frameptr; break; } } //2. Else, pick a page to replace (using the current replacement policy). if(available_frame == NULL) { //LOG(" No available_frames, must evict "); if(policy == FIFO) available_frame = evict_frame_using_fifo(); else if(policy == AGING) available_frame = evict_frame_using_aging(); } if(available_frame!= NULL) { //LOG(" available_frame type %d", available_frame->type); initialize_frame(available_frame); available_frame->type = type; available_frame->pid = currpid; //LOG(" TYPE IN ARGUMENT IS %d", type); //LOG(" available_frame type %d", available_frame->type); // Correct the fifo queue if(fifo_head == NULL) { fifo_head = available_frame; //LOG("FIFO head set to 0x%08x ", available_frame); //LOG(" available_frame type 23 %d", fifo_head->type); } else { frame_t * current = fifo_head; frame_t * previous = NULL; while(current) { previous = current; current = current->next; } previous->next = available_frame; //print_fifo_list(); //LOG(" fifo_head set to what again? ", fifo_head); } } //LOG("Made it here too "); restore(mask); return available_frame; }
int free_frame(frame_t * frame) { intmask mask; mask = disable(); //LOG("Freeing"); //print_frame(frame); if(frame->id <5) { LOG(" WHAT THE F**K %d %d", frame->id, frame->type); restore(mask); return OK; } //kprintf("id %d type %d ", frame->id, frame->type); //print_fifo_list(); //kprintf("\n"); if(frame == NULL) { restore(mask); return SYSERR; } else if(!FRAMEID_IS_VALID(frame->id)) { restore(mask); return SYSERR; } else if(frame->type == FREE) { restore(mask); return OK; } else if(frame->type == PAGE){ //print_fifo_list(); //LOG("Got here 0.5"); //3. Using the inverted page table, get vp, the virtual page number of the page to be replaced. uint32 vp = frame->vp_no; //4. Let a be vp*4096 (the first virtual address on page vp). hook_pswap_out(vp, frame->id + FRAME0); uint32 a = vp*PAGE_SIZE; virtual_addr * virt = (virtual_addr *) &a; //5. Let p be the high 10 bits of a. Let q be bits [21:12] of a. uint32 p = virt->page_directory_offset; uint32 q = virt->page_table_offset; //6. Let pid be the process id of the process owning vp. pid32 pid = frame->pid; //7. Let pd point to the page directory of process pid. struct procent *prptr; /* Ptr to process table entry */ prptr = &proctab[pid]; pd_t * pd = prptr->pagedir; if( pd == NULL) { LOG(" pd doesn't exist "); restore(mask); return SYSERR; } bool8 pt_pres = FALSE; pt_pres = (bool8) pd[p].pd_pres; bool8 pg_pres = FALSE; bool8 dirty = FALSE; if(pt_pres) { //8. Let pt point to the pid's p_th page table. pt_t * pt = (pt_t *) ((pd[p].pd_base) * PAGE_SIZE); pg_pres = (bool8) pt[q].pt_pres; uint32 pg_base = (uint32) pt[q].pt_base; if(pg_pres){ if((uint32)FRAMEID_TO_VPAGE(frame->id) == pg_base) { pg_pres = TRUE; dirty = pt[q].pt_dirty; } else { pg_pres = FALSE; } } } if(pg_pres) { frame_t * pt_frame = &frames[(pd[p].pd_base) - FRAME0]; pt_t * pt = (pt_t *) ((pd[p].pd_base) * PAGE_SIZE); //9. Mark the appropriate entry of pt as not present. pt[q].pt_pres = 0; if(pt_frame->type == VPTBL){ decr_frame_refcount(pt_frame); if(pt_frame->refcount == 0){ pd[p].pd_pres = 0; free_frame(pt_frame); } bzero((char *)&pt[q], sizeof(pt_t)); } else if(pt_frame->type == GPTBL) { // kprintf(" Uh OH"); } // If the reference count has reached zero, you should mark the appropriate entry in pd as "not present." // This conserves frames by keeping only page tables which are necessary. //LOG("Got here 1.5"); //If the dirty bit for page vp was set in its page table, you must do the following: //a) Using the backing store map, find the store and page offset within the store, given pid and a. // If the lookup fails, something is wrong. Print an error message and kill the process with id pid. //b) Write the page back to the backing store. //LOG("Got here 2"); if(dirty){ bsd_t bs_store_id; int bs_store_page_offset; if(SYSERR == bs_map_check(pid, vp, &bs_store_id, &bs_store_page_offset)) { kprintf("FATAL :Can't find the bs_map"); restore(mask); kill(currpid); return SYSERR; } //print_frame(frame); if(BACKSTORE_ID_IS_VALID(frame->backstore) && BACKSTORE_OFFSET_IS_VALID(frame->backstore_offset) && frame->backstore == bs_store_id && frame->backstore_offset == bs_store_page_offset) { //LOG("Frame %d was dirty", frame->id); open_bs(frame->backstore); write_bs(FRAMEID_TO_PHYSICALADDR(frame->id), frame->backstore, frame->backstore_offset); close_bs(frame->backstore); } //else //{ // print_frame(frame); // kprintf("Fatal error: Cannot locate backstore for vpage %d to swap out page for pid %d ", vp, pid); // kill(pid); // initialize_frame(frame); // restore(mask); // return SYSERR; //} } else{ //print_frame(frame); } } //LOG("Got here 1"); //10. If the page being removed belongs to the current process, // invalidate the TLB entry for the page vp, using the invlpg instruction (see Intel Manual, volumes II and III for more details on this instruction). // 11. In the inverted page table, decrement the reference count of the frame occupied by pt. //LOG(" Free frame"); //print_frame(frame); enable_paging(); initialize_frame(frame); // Update page table entries associated with this frame // set the frame to be free } else if(frame->type == VPTBL) { evict_from_fifo_list(frame); hook_ptable_delete(frame->id + FRAME0); enable_paging(); initialize_frame(frame); } else if(frame->type == DIR) { struct procent * prptrNULL = &proctab[NULLPROC]; pd_t * null_pg_dir = prptrNULL->pagedir; struct procent *prptr; /* Ptr to process table entry */ prptr = &proctab[currpid]; if(prptr->pagedir!= null_pg_dir) { evict_from_fifo_list(frame); prptr->pagedir = prptrNULL->pagedir; switch_page_directory(prptr->pagedir); enable_paging(); initialize_frame(frame); } } restore(mask); return OK; }