/* This function make perfect mapping from kernel virtual memory phyical memory */ void init_kernel_page_table() { kernel_page_table = (pte_t*) malloc(sizeof(pte_t) * GET_PAGE_NUMBER(VMEM_0_SIZE)); // For text segment mapping TracePrintf(0, "Text Start=%p, End=%p\n", kernel_memory.text_low, kernel_memory.data_low); write_kernel_pte(kernel_memory.text_low, kernel_memory.data_low , _VALID, PROT_READ | PROT_EXEC); // For data segment mapping TracePrintf(0, "Data Start=%p, End=%p\n", kernel_memory.data_low, kernel_memory.brk_low); write_kernel_pte(kernel_memory.data_low, kernel_memory.brk_low , _VALID, PROT_READ | PROT_WRITE); // For stack segment mapping, noted that stack space is reserved even if it is not used TracePrintf(0, "Stack Start=%p, End=%p\n", KERNEL_STACK_BASE, KERNEL_STACK_LIMIT); write_kernel_pte(KERNEL_STACK_BASE, KERNEL_STACK_LIMIT , _VALID, PROT_READ | PROT_WRITE); int i; // Add free pages between heap and stack int start_pfn = GET_PAGE_NUMBER(UP_TO_PAGE(kernel_memory.brk_high)); int end_pfn = GET_PAGE_NUMBER(DOWN_TO_PAGE(KERNEL_STACK_BASE)); for(i = start_pfn; i < end_pfn; i++) { add_tail_available_frame(i); } // Add free pages above kernel space start_pfn = GET_PAGE_NUMBER(UP_TO_PAGE(VMEM_0_LIMIT)); end_pfn = GET_PAGE_NUMBER(DOWN_TO_PAGE(PMEM_BASE)) + PAGE_SIZE; for(i = start_pfn; i < end_pfn; i++) { add_tail_available_frame(i); } }
/* A kernel magic function that is only used for getting kernel context for newbie */ KernelContext *init_newbie_kernel(KernelContext *kernel_context, void *_prev_pcb, void *_next_pcb){ pcb_t *next_proc = (pcb_t *) _next_pcb; //log_info("First time to init PID(%d) kernel stack!", next_proc->pid); if(next_proc->kernel_stack_pages == NULL) { log_err("Init kernel stack fail, pcb->kernel_stack_pages not malloc yet"); Halt(); } next_proc->kernel_context = *kernel_context; int rc = alloc_frame_and_copy(next_proc->kernel_stack_pages, kernel_page_table, GET_PAGE_NUMBER(KERNEL_STACK_BASE), GET_PAGE_NUMBER(KERNEL_STACK_LIMIT), kernel_memory.swap_addr); if(rc) { log_err("PID(%d) kernel stack cannot init", next_proc->pid); return NULL; } next_proc->init_done = 1; //print_page_table(kernel_page_table, 120, GET_PAGE_NUMBER(VMEM_0_LIMIT)); //print_page_table(next_proc->kernel_stack_pages, 0, 2); //log_info("First time to init PID(%d) kernel stack done", next_proc->pid); return kernel_context; }
/* * Kernel page table must be delicate, * perfectly mapping to physical memory, * so I set all the manually here, * in order to aligned with phyical memory. */ void write_kernel_pte(uint32 addr_low, uint32 addr_high, int isValid, int prot) { int i; for(i = GET_PAGE_NUMBER(addr_low); i < GET_PAGE_NUMBER(addr_high); i++) { kernel_page_table[i].valid = isValid; kernel_page_table[i].prot = prot; kernel_page_table[i].pfn = i; } }
/* * Not necessary in checkpoint 2 */ int SetKernelBrk (void *addr) { TracePrintf(0, "SetKernelBrk Start = %p, End = %p, New Addr = %p\n", kernel_memory.brk_low, kernel_memory.brk_high, addr); int page_cnt, rc; uint32 new_addr = (uint32)addr; uint32 new_page_bound = UP_TO_PAGE(new_addr); uint32 current_page_bound = UP_TO_PAGE(kernel_memory.brk_high); // Boudaries check if(new_addr > kernel_memory.stack_low) { TracePrintf(0, "Kernel Break Warning: Trying to Access Stack Addr = %p\n", new_addr); return _FAILURE; } // Check if trying to access below brk base line else if(new_addr < kernel_memory.brk_low) { TracePrintf(0, "Kernel Break Warning: Trying to Access Text Addr = %p\n", addr); return _FAILURE; } // Before the virual memory is enabled if(!ReadRegister(REG_VM_ENABLE)) { kernel_memory.brk_high = new_addr; return _SUCCESS; } TracePrintf(0, "SetKernelBrk CHECK DONE = %p, End = %p, New Addr = %p\n", kernel_memory.brk_low, kernel_memory.brk_high, addr); // Modify the brk if(new_addr > kernel_memory.brk_high) { TracePrintf(0, "SetKernelBrk ADD = %p, End = %p, New Addr = %p\n", kernel_memory.brk_low, kernel_memory.brk_high, addr); page_cnt = GET_PAGE_NUMBER(new_page_bound) - GET_PAGE_NUMBER(current_page_bound); rc = map_page_to_frame(kernel_page_table, current_page_bound, page_cnt, PROT_READ | PROT_WRITE); if(rc) { TracePrintf(0, "Kernel Break Warning: Not enough phycial memory\n"); return _FAILURE; } } else if (new_page_bound < kernel_memory.brk_high) { TracePrintf(0, "SetKernelBrk RM = %p, End = %p, New Addr = %p\n", kernel_memory.brk_low, kernel_memory.brk_high, addr); page_cnt = GET_PAGE_NUMBER(new_page_bound) - GET_PAGE_NUMBER(current_page_bound); rc = unmap_page_to_frame(kernel_page_table, new_page_bound, page_cnt); if(rc) { TracePrintf(0, "Kernel Break Warning: Not able to release pages\n"); return _FAILURE; } } TracePrintf(0, "SetKernelBrk DONE = %p, End = %p, New Addr = %p\n", kernel_memory.brk_low, kernel_memory.brk_high, addr); kernel_memory.brk_high = new_addr; return _SUCCESS; }
void Cooking(pte_t *user_page_table, UserContext* uctxt) { // map_page_to_frame(user_page_table, GET_PAGE_NUMBER(VMEM_1_BASE), 1, PROT_READ | PROT_WRITE); int i = GET_PAGE_NUMBER(VMEM_1_LIMIT); user_page_table[0].valid = _VALID; user_page_table[0].prot = PROT_READ | PROT_WRITE; user_page_table[0].pfn = frame_get_pfn(list_rm_head(available_frames)); uctxt->pc = &DoIdle; uctxt->sp = (void*)(VMEM_1_BASE + PAGESIZE - 4); }
/* Caller kernel context switch magic function, * 1. Back up the current kernel context * 2. Restore kernel stack and do the corresponding TLB flush * * @param kernel_context: the mysterious kernel context * @param _prev_proc: the process to be switched out * @param _next_proc: the process to be switched in * @return: kernel context */ KernelContext *kernel_context_switch(KernelContext *kernel_context, void *_prev_pcb, void *_next_pcb) { pcb_t *prev_proc = (pcb_t *) _prev_pcb; pcb_t *next_proc = (pcb_t *) _next_pcb; // Backup current kernel context, and set next running process if(is_proc_active(prev_proc)) { //log_info("Backup kernel context for PID(%d)", prev_proc->pid); memcpy(&prev_proc->kernel_context, kernel_context, sizeof(KernelContext)); } running_proc = next_proc; running_proc->state = RUN; if(next_proc->init_done == 0) { // If just initialized (like just forked), init the context and kernel stack next_proc->kernel_context = *kernel_context; int rc = alloc_frame_and_copy(next_proc->kernel_stack_pages, kernel_page_table, GET_PAGE_NUMBER(KERNEL_STACK_BASE), GET_PAGE_NUMBER(KERNEL_STACK_LIMIT), kernel_memory.swap_addr); if(rc) { log_err("PID(%d) kernel stack cannot init", next_proc->pid); return NULL; } //log_info("Init kernel context for PID(%d) done", next_proc->pid); next_proc->init_done = 1; } // Load kernel stack from next processs and flush corresponding TLB int addr; memcpy(&kernel_page_table[GET_PAGE_NUMBER(KERNEL_STACK_BASE)], next_proc->kernel_stack_pages, sizeof(pte_t) * KERNEL_STACK_MAXSIZE / PAGESIZE ); WriteRegister(REG_TLB_FLUSH, TLB_FLUSH_0); //for(addr = KERNEL_STACK_BASE; addr < KERNEL_STACK_LIMIT; addr += PAGESIZE) { // WriteRegister(REG_TLB_FLUSH, addr); //} log_info("Magic kernel switch done from PID(%d) to PID(%d)", prev_proc->pid, next_proc->pid); *kernel_context = next_proc->kernel_context; return kernel_context; }
int Y_Brk(uint32 addr){ int page_cnt, rc; uint32 new_addr = (uint32)addr; uint32 new_page_bound = UP_TO_PAGE(new_addr); uint32 current_page_bound = UP_TO_PAGE(user_memory.brk_high); // Boudaries check if(new_addr > user_memory.stack_low - PAGESIZE) { log_err("New addr trepass the red zone below stack!"); return _FAILURE; } // Check if trying to access below brk base line else if(new_addr < user_memory.brk_low) { log_err("New addr trepass the data area!"); return _FAILURE; } // Modify the brk if(new_addr > user_memory.brk_high) { page_cnt = GET_PAGE_NUMBER(new_page_bound) - GET_PAGE_NUMBER(current_page_bound); rc = map_page_to_frame(user_page_table, current_page_bound, page_cnt, PROT_READ | PROT_WRITE); if(rc) { log_err("User Break Warning: Not enough phycial memory\n"); return _FAILURE; } log_info("Y_Brk ADD DONE = %p, End = %p, New Addr = %p", user_memory.brk_low, user_memory.brk_high, addr); } else if (new_page_bound < user_memory.brk_high) { page_cnt = GET_PAGE_NUMBER(new_page_bound) - GET_PAGE_NUMBER(current_page_bound); rc = unmap_page_to_frame(user_page_table, new_page_bound, page_cnt); if(rc) { log_err("User Break Warning: Not able to release pages\n"); return _FAILURE; } } user_memory.brk_high = new_addr; log_info("PID %d user brk done, new addr at %p", running_proc->pid, new_addr); return _SUCCESS; }
/* Init a dummy idle proc */ void init_idle_proc() { idle_proc = (pcb_t*) malloc(sizeof(pcb_t)); if(!idle_proc) { log_err("Cannot malloc idle proc!"); return; } bzero(idle_proc, sizeof(pcb_t)); idle_proc->user_context.pc = DoDoIdle; idle_proc->user_context.sp = (void *)kernel_memory.stack_low; //idle_proc->user_context.ebp = (void *)kernel_memory.stack_low; //idle_proc->user_context.code = YALNIX_NOP; //idle_proc->user_context.vector = TRAP_KERNEL; idle_proc->page_table = (pte_t*) malloc(sizeof(pte_t) * GET_PAGE_NUMBER(VMEM_1_SIZE)); idle_proc->kernel_stack_pages = (pte_t*) malloc(sizeof(pte_t) * KERNEL_STACK_MAXSIZE / PAGESIZE); map_page_to_frame(idle_proc->page_table, 0, GET_PAGE_NUMBER(VMEM_1_SIZE), PROT_READ); idle_proc->pid = get_next_pid(); idle_proc->state = READY; idle_proc->init_done = 0; //init_process_kernel(idle_proc); return; }
/* A general function to initialize user proc * * @return: A pointer to the newly created pcb; * NULL if creation fails */ pcb_t *init_user_proc(pcb_t* parent) { // Create pcb pcb_t *proc = (pcb_t*) malloc(sizeof(pcb_t)); if(!proc) { log_err("Cannot malloc user proc!"); return NULL; } bzero(proc, sizeof(pcb_t)); // Create page table proc->page_table = (pte_t*) malloc(sizeof(pte_t) * GET_PAGE_NUMBER(VMEM_1_SIZE)); if(!proc->page_table) { log_err("proc->page_table cannot be malloc!"); return NULL; } bzero(proc->page_table, sizeof(pte_t) * GET_PAGE_NUMBER(VMEM_1_SIZE)); // Create kernel stack page table proc->kernel_stack_pages = (pte_t*) malloc(sizeof(pte_t) * KERNEL_STACK_MAXSIZE / PAGESIZE); if(!proc->kernel_stack_pages) { log_err("proc->kernel_stack_pages cannot be malloc!"); return NULL; } bzero(proc->kernel_stack_pages, sizeof(pte_t) * KERNEL_STACK_MAXSIZE / PAGESIZE); // Init vitals proc->init_done = 0; proc->parent = (struct y_PBC*)parent; proc->children = dlist_init(); proc->zombie = dlist_init(); proc->pid = get_next_pid(); if(parent) { dlist_add_tail(parent->children, proc); } proc->state = READY; proc->wait_zombie = 0; return proc; }
/* Copy runtime info * * @param dest_proc: process copy to * @param dest_proc: process copy from * @param user_context: user context */ int copy_user_runtime(pcb_t *dest_proc, pcb_t *src_proc, UserContext *user_context) { save_user_runtime(src_proc, user_context); dest_proc->user_context = src_proc->user_context; int rc = alloc_frame_and_copy(dest_proc->page_table, src_proc->page_table, 0, GET_PAGE_NUMBER(VMEM_1_SIZE), kernel_memory.swap_addr); if(rc) { log_err("PID(%d) cannot alloc or copy data from PID(%d) page table", dest_proc->pid, src_proc->pid); return 1; } dest_proc->mm = src_proc->mm; return 0; }
/* Completely free the PCB block */ int free_proc(pcb_t *proc) { log_info("Going to destryo process PID(%d)", proc->pid); int rc, pid = proc->pid; rc = unmap_page_to_frame(proc->page_table, 0, GET_PAGE_NUMBER(VMEM_1_SIZE)); if(rc) { log_err("Unable to free frames of PID(%d) page table", proc->pid); return 1; } free(proc->page_table); unmap_page_to_frame(proc->kernel_stack_pages, 0, GET_PAGE_NUMBER(KERNEL_STACK_MAXSIZE)); if(rc) { log_err("Unable to free frames of PID(%d) kernel stack page table", proc->pid); return 1; } free(proc->kernel_stack_pages); free(proc); proc = NULL; id_generator_push(pid_list, pid); log_info("PID(%d) is freed", pid); return 0; }
void KernelStart(char* cmd_args[], unsigned int pmem_size, UserContext* uctxt ) { TracePrintf(0, "Start the kernel\n"); // Initialize vector table mapping from interrupt/exception/trap to a handler fun init_trap_vector(); // REG_VECTOR_BASE point to vector table WriteRegister(REG_VECTOR_BASE, (uint32)interrupt_vector); // Memory management, linked list of frames of free memory available_frames = list_init(); PAGE_SIZE = GET_PAGE_NUMBER(pmem_size); init_kernel_page_table(); user_page_table = init_user_page_table(); if(!kernel_page_table || !user_page_table) { _debug("Cannot allocate memory for page tables.\n"); return; } // Build page tables using REG_PTBR0/1 REG_PTLR0/1 WriteRegister(REG_PTBR0, (uint32)kernel_page_table); WriteRegister(REG_PTLR0, GET_PAGE_NUMBER(VMEM_0_SIZE)); WriteRegister(REG_PTBR1, (uint32)user_page_table); WriteRegister(REG_PTLR1, GET_PAGE_NUMBER(VMEM_1_SIZE)); // Enable virtual memroy WriteRegister(REG_VM_ENABLE, _ENABLE); // Create idle proc Cooking(user_page_table, uctxt); // Load init process (in checkpoint 3) TracePrintf(0, "Leave the kernel\n"); return; }
/************************************************************************************************** * @fn sbCmnd * * @brief Act on the SB command and received buffer. * * input parameters * * @param sbCmd - Received SBL command. * @param payload_len - Length of command payload * * output parameters * * None. * * @return TRUE to indicate that the SB_ENABLE_CMD command was successful; FALSE otherwise. ************************************************************************************************** */ static uint8 sbCmnd(uint8 sbCmd, uint32 payload_len) { uint32 firstAddr; uint32 lastAddr; uint32 operationLength; uint32 writeLength; uint32 respPayloadLen = 0; uint32 pageNumber; uint32 i; uint32 actual_number_of_data_bytes_to_send; uint8 paddingLength; uint8 rsp = SB_SUCCESS; uint8 imageEnabledSuccessfully = FALSE; uint8 *pBuf; pBuf = sbBuf; switch (sbCmd) { case SB_HANDSHAKE_CMD: /* Mark all pages as not-deleted-yet */ memset(pageDeleted, 0, sizeof(pageDeleted)); UINT32_TO_BUF_LITTLE_ENDIAN(pBuf, SB_BOOTLOADER_REVISION); *pBuf++ = SB_DEVICE_TYPE_2538; UINT32_TO_BUF_LITTLE_ENDIAN(pBuf, SB_RW_BUF_LEN ); UINT32_TO_BUF_LITTLE_ENDIAN(pBuf, SB_DEVICE_PAGE_SIZE); respPayloadLen = pBuf - sbBuf; break; case SB_WRITE_CMD: firstAddr = BUF_TO_UINT32_LITTLE_ENDIAN(pBuf); operationLength = BUF_TO_UINT32_LITTLE_ENDIAN(pBuf); /* The payload_len includes the addr_offset * and the operationLength fields. The value * (pBuf - sbBuf) gives the number of bytes * used by those firelds. The remaining bytes * are the actual data bytes to be written. */ writeLength = payload_len - (pBuf - sbBuf); lastAddr = firstAddr + operationLength - 1; if ((firstAddr < FLASH_BASE) || (lastAddr > CC2538_CODE_FLASH_END_ADDRESS) || (writeLength > operationLength)) { rsp = SB_FAILURE; break; } /* Before writing to a flash page for the first time during a bootloading session, the * page must be erased. The following section makes sure that every page being written * to have already been erased, otherwise, it erases it (before writing to it). * Note that the write command may span over more than a single page. */ for (pageNumber = GET_PAGE_NUMBER(firstAddr); pageNumber <= GET_PAGE_NUMBER(lastAddr); pageNumber++) { if (!IS_PAGE_ERASED(pageNumber)) { if (FlashMainPageErase(GET_PAGE_ADDRESS(pageNumber)) != 0) { rsp = SB_FAILURE; break; } MARK_PAGE_ERASED(pageNumber); } } /* Note that the start address (firstAddr) and the byte count (writeLength) must be * word aligned. The start address is expected to be already aligned (by the SBL server), * since aligning it here would require padding the buffer's start, which would require * shifting the buffer content (as the buffer is passesd as (uint32_t *pui32Data) so it * should be aligned by itself. The byte count is aligned below. */ paddingLength = ((4 - (writeLength & 0x00000003)) % 4); for (i = 0; i < paddingLength; i++) { pBuf[writeLength + i] = 0xFF; } writeLength += paddingLength; /* If the page was successfully erased (or was previously erased), perform the write action. * Note that pBuf must point to a uint32-aligned address, as required by FlashMainPageProgram(). * This is the case now (the prefixing field are total of 8 bytes), and _sbBuf is 32bit aligned. */ if ((rsp == SB_SUCCESS) && (writeLength > 0) && (FlashMainPageProgram((uint32_t *)(pBuf), firstAddr, writeLength) != 0)) { rsp = SB_FAILURE; } break; case SB_READ_CMD: firstAddr = BUF_TO_UINT32_LITTLE_ENDIAN(pBuf); operationLength = BUF_TO_UINT32_LITTLE_ENDIAN(pBuf); lastAddr = firstAddr + operationLength - 1; if ((firstAddr < FLASH_BASE) || (lastAddr > CC2538_CODE_FLASH_END_ADDRESS) || (operationLength > sizeof(_sbBuf))) { rsp = SB_FAILURE; break; } #if !MT_SYS_OSAL_NV_READ_CERTIFICATE_DATA #if (HAL_IMG_A_BEG > HAL_NV_START_ADDR) #warning This check assumes NV PAGES located at the end of the program flash memory #endif if (GET_PAGE_NUMBER(lastAddr) >= HAL_NV_PAGE_BEG) { rsp = SB_FAILURE; break; } #endif /* If the end of the buffer is made only of 0xFF characters, no need to * send them. Find out the number of bytes that needs to be sent: */ for (actual_number_of_data_bytes_to_send = operationLength; (actual_number_of_data_bytes_to_send > 0) && ((*(uint8 *)(firstAddr + actual_number_of_data_bytes_to_send - 1)) == 0xFF); actual_number_of_data_bytes_to_send--); /* As a future upgrade, memcopy can be avoided. Instead, * may pass a pointer to the actual flash address */ (void)memcpy(pBuf, (const void *)firstAddr, actual_number_of_data_bytes_to_send); respPayloadLen = (pBuf - sbBuf) + actual_number_of_data_bytes_to_send; break; case SB_ENABLE_CMD: if (enableImg()) { imageEnabledSuccessfully = TRUE; } else { rsp = SB_VALIDATE_FAILED; } break; default: break; } sbResp(sbCmd, rsp, respPayloadLen); return imageEnabledSuccessfully; }
/* * A dummy user page table for the init process */ pte_t *init_user_page_table() { return (void*) malloc(sizeof(pte_t) * GET_PAGE_NUMBER(VMEM_1_SIZE)); }