int mcount_arch_enable_event(struct mcount_event_info *mei) { static bool sdt_handler_set = false; if (!sdt_handler_set) { struct sigaction act = { .sa_flags = SA_SIGINFO, .sa_sigaction = sdt_handler, }; sigemptyset(&act.sa_mask); sigaction(SIGILL, &act, NULL); sdt_handler_set = true; } if (mprotect(PAGE_ADDR(mei->addr), PAGE_SIZE, PROT_READ | PROT_WRITE)) { pr_dbg("cannot enable event due to protection: %m\n"); return -1; } /* replace NOP to an invalid OP so that it can catch SIGILL */ memset((void *)mei->addr, INVALID_OPCODE, 1); if (mprotect(PAGE_ADDR(mei->addr), PAGE_SIZE, PROT_EXEC)) pr_err("cannot setup event due to protection"); return 0; }
static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; to_index = find_available_block(false); if (!to_index) { MSG("Cannot find an available block for BMT\n"); return 0; } for (page = 0; page < error_page; page++) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } if (nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } { memset(oob_buf, 0xFF, sizeof(oob_buf)); #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520) memcpy(oob_buf, write_oob, mtd_bmt->oobsize); #else memcpy(oob_buf, write_oob, 1 << nand_chip_bmt->flash->oob_shift); #endif if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } MSG("Migrate from %d to %d done!\n",error_block, to_index); return to_index; }
int pmic_read_reg(unsigned bus, uint16_t reg, uint8_t *data) { if (i2c_readb(bus, PAGE_ADDR(reg), PAGE_OFFSET(reg), data)) { printk(BIOS_ERR, "%s: page = 0x%02X, reg = 0x%02X failed!\n", __func__, PAGE_ADDR(reg), PAGE_OFFSET(reg)); return -1; } return 0; }
void pmic_write_reg(unsigned bus, uint16_t reg, uint8_t val, int delay) { if (i2c_writeb(bus, PAGE_ADDR(reg), PAGE_OFFSET(reg), val)) { printk(BIOS_ERR, "%s: page = 0x%02X, reg = 0x%02X, " "value = 0x%02X failed!\n", __func__, PAGE_ADDR(reg), PAGE_OFFSET(reg), val); /* Reset the SoC on any PMIC write error */ cpu_reset(); } else { if (delay) udelay(500); } }
// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end phys_bmt_struct phys_table; int i; MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index))) { MSG(INIT, "Skip bad block: %d\n", bmt_index); continue; } if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG(INIT, "Error found when read block %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); if (!valid_bmt_data(&phys_table)) { MSG(INIT, "BMT data is not correct %d\n", bmt_index); continue; } else { bmt.mapped_count = phys_table.header.mapped_count; bmt.version = phys_table.header.version; // bmt.bad_count = phys_table.header.bad_count; memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) { MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); } } return bmt_index; } } MSG(INIT, "bmt block not found!\n"); return 0; }
// Обработчик #PF для A.OUT-процессов // Аргумент - адрес, по которому программа попыталась обратиться // Результат - адрес загруженной страницы + флаги, или 0, если процесс "ошибся адресом" ulong aout_pf(uint address) { TaskStruct *task = Task[Current]; // Наибольшая страница, которая будет загружаться из файла // (я все время предполагаю, что размеры секций кратны странице) uint filepages = task->header.a_text + task->header.a_data; // Объем АП процесса uint maxpage = filepages + PAGE_ADDR(task->header.a_bss + 0xfff) + USER_STACK_PAGES * PAGE_SIZE; bool ok = 0; // Преобразуем адрес в адрес страницы ulong pageaddr = PAGE_ADDR(address); ulong *tmppage = 0; // Если это страница должна грузиться из файла if (address < filepages) { // Выделяем страницу... tmppage = (ulong*)alloc_first_page(); // ... и загружаем ее. Пользуемся тем, что LoadPart остановится // на конце файла. LoadPart(&task->file, tmppage, pageaddr+N_TXTOFF(task->header), PAGE_SIZE); ok = 1; } // Если это страница bss или стека if (address >= filepages && address < maxpage) { // Выделяем страницу... tmppage = (ulong*)alloc_first_page(); // ... и обнуляем ее memset(tmppage, 0, PAGE_SIZE); ok = 1; } if (ok) return (ulong)tmppage + PAGE_ATTR; else return 0; }
// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end int i; MSG("begin to search BMT from block %d \n", bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bmt_index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", bmt_index); continue; } if (nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG("Error found when read block: %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG("Match bmt signature @ block: %d\n", bmt_index); memcpy(&lbd_phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(lbd_phys_table)); if (!valid_bmt_data(&lbd_phys_table)) { MSG("BMT data is not correct: %d\n", bmt_index); continue; } else { bmt.mapped_count = lbd_phys_table.header.mapped_count; bmt.version = lbd_phys_table.header.version; memcpy(bmt.table, lbd_phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG("bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW)) { MSG("block %d is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW); } } return bmt_index; } } MSG("bmt not found!\n"); return 0; }
// Функция убивает процесс, заданный идентификатором // При этом освобождаются все занятые им страницы, в т.ч. страницы, // содержащие его каталог страниц и TSS void scheduler_kill(ulong pid) { ulong i; for (i = 0; i < NTasks; i++) { if (Task[i]->pid == pid) { uint pagecount = 0; TaskStruct* task = Task[i]; int j, k; // Проходим по каталогу страниц задачи и ищем непустые записи // Для каждой записи проходим по соответствующей таблице страниц // и освобождаем выделенные страницы. По пути считаем их количество. ulong *pg_dir = (ulong*)PAGE_ADDR(task->tss.cr3); for (j = 0; j < 512; j++) if ((ulong)pg_dir[j] & 0x1) { for (k = 0; k < 1024; k++) { addr_t page = ((ulong*)PAGE_ADDR(pg_dir[j]))[k]; if ((page & PA_P) && ((page & PA_NONFREE) == 0)) { pagecount++; free_page(page); } } pagecount++; free_page(pg_dir[j]); } pagecount+=2; free_page(task->tss.cr3); free_page((ulong)task); printf_color(0x4, "\n%d pages freed\n", pagecount); if (NTasks <= 1) // Система осталась без процессов :( return panic("Heh... Last process has died...\n"); Task[i] = Task[NTasks-1]; NTasks--; if (Current >= NTasks) Current = 0; // Вызываем смену задачи, на случай, если убили текущий процесс CALL_SCHEDULER; // Над этим надо еще подумать return; } } }
static int load_bbt_data(int start, int pool_size, init_bbt_struct *init_bbt) { int i; int ret = 0; int bbt_index = start; for(;bbt_index < (start + pool_size); bbt_index++) { if (nand_block_bad_bmt(OFFSET(bbt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bbt_index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d\n", bbt_index); continue; } if (nand_read_page_bmt(PAGE_ADDR(bbt_index), dat_buf, oob_buf)) { MSG("Error found when read block %d\n", bbt_index); continue; } if (!match_bbt_signature(dat_buf, oob_buf)) { continue; } MSG("Match bbt signature \n"); memcpy(&lbd_init_table, dat_buf + BBT_SIGNATURE_OFFSET, sizeof(lbd_init_table)); if (!valid_bbt_data(&lbd_init_table)) { MSG("BBT data is not correct \n"); continue; } else { init_bbt->badblock_count = lbd_init_table.header.badblock_count; init_bbt->version = lbd_init_table.header.version; memcpy(init_bbt->badblock_table, lbd_init_table.badblock_table, (init_bbt->badblock_count) * 2); MSG("bbt found, bad block count: %d\n", lbd_init_bbt->badblock_count); for (i = 0; i < init_bbt->badblock_count; i++) { MSG("lbd_init_bbt->badblock_table[%d]: %d \n", i, lbd_init_bbt->badblock_table[i]); } return bbt_index; } } return ret; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; // init everything in BMT struct bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", index); continue; } nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { MSG("get bad index: 0x%x \n", bad_index); if (bad_index != 0xFFFF) MSG("Invalid bad index found in block: %d \n", index); continue; } MSG("Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW)) { mark_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW); MSG("block %d is not marked as bad, mark it\n", bad_index); } { // add mapping to BMT bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG("Add mapping: %d -> %d to BMT\n", bad_index, index); } MSG("Scan replace pool done, mapped block: %d\n", bmt->mapped_count); return bmt; }
void scr_state_init(void) { int i; for (i = 0; i < MAX_OSS; i++) { scr_state[i].vt_allow = 0; scr_state[i].vt_requested = 0; scr_state[i].mapped = 0; scr_state[i].pageno = 0; scr_state[i].virt_address = PAGE_ADDR(0); } current_console = 0; }
// Печатает информацию о том, сколько страниц занимает процесс // К пользовательским страницам относятся те, которые заняты // кодом и данными процесса, а к системным - содержащие каталог // и таблицы страниц, TSS void scheduler_pages(ulong pid) { ulong i; for (i = 0; i < NTasks; i++) { if (Task[i]->pid == pid) { TaskStruct* task = Task[i]; int j, k; ulong *pg_dir = (ulong*)PAGE_ADDR(task->tss.cr3); uint user = 0, sys= 0, nf = 0; for (j = 0; j < 512; j++) if ((ulong)pg_dir[j] & 0x1) { sys++; for (k = 0; k < 1024; k++) { addr_t page = ((ulong*)PAGE_ADDR(pg_dir[j]))[k]; if (page & PA_P) { if (page & PA_NONFREE) nf++; else user++; } } } printf("User pages:\t%d\n", user); sys += 2; // Еще TSS и pg_dir printf("System pages:\t%d\n", sys); printf("System pages\nin user space:\t%d\n", nf); printf("=================\n"); printf("Total pages:\t%d\n", user+sys+nf); printf("Process size:\t%d\n", user+sys); } } printf("\n"); }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG(INIT, "Try to write BMT\n"); MSG(INIT, "bmt_block_index = 0x%x\n", bmt_block_index); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; MSG(INIT, "set need_erase = 0x%x\n", need_erase); if ( !(bmt_block_index = find_available_block(true)) ) { MSG(INIT, "Cannot find an available block for BMT\n"); return false; } } MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); MSG(INIT, "need_erase = 0x%x\n", need_erase); // write bmt to flash if (need_erase) { if (!nand_erase_bmt(OFFSET(bmt_block_index))) { MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( !nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG(INIT, "Write BMT data fail, need to write again\n"); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); return true; }
static VG_REGPARM(2) void trace_store(Addr addr, SizeT size) { Trace_Block *block; #if VG_WORDSIZE == 4 addr = PAGE_OFFSET_DOWN(addr); if (!mmap_section.used_blocks[addr]) return; block = mmap_section.trace_blocks[addr]; mmap_section.used_blocks[addr] = '\0'; #else Addr page_addr = PAGE_ADDR(addr); Mmap_Section* mmap_section; if (mmap_section_cache->page_addr == page_addr) mmap_section = mmap_section_cache; else { mmap_section = mmap_sections; while (1) { if (mmap_section->page_addr == page_addr) break; mmap_section = mmap_section->next; if (!mmap_section) return; } mmap_section_cache = mmap_section; } addr = PAGE_OFFSET_DOWN(addr); if (!mmap_section->used_blocks[addr]) return; block = mmap_section->trace_blocks[addr]; mmap_section->used_blocks[addr] = '\0'; #endif tl_assert(block); do { block->allocs ++; block->total += PAGE_SIZE; block->current += PAGE_SIZE; if (block->peak < block->current) block->peak = block->current; block = block->parent; } while(block); }
static __inline__ void mem_map(Addr addr, Addr end_addr, Trace_Block *block_arg) { // Must be strictly greater tl_assert(end_addr > addr); #if VG_WORDSIZE == 4 mark_blocks( mmap_section.trace_blocks + PAGE_OFFSET_DOWN(addr), mmap_section.trace_blocks + page_offset_up(end_addr), mmap_section.used_blocks + PAGE_OFFSET_DOWN(addr), block_arg ); #else Addr current_end_addr; Mmap_Section* mmap_section; Addr page_addr; while (addr < end_addr) { page_addr = PAGE_ADDR(addr); current_end_addr = page_addr + 0x100000000ll; if (current_end_addr > end_addr) current_end_addr = end_addr; mmap_section = mmap_sections; while (1) { if (mmap_section->page_addr == page_addr) break; if (!mmap_section->next) { mmap_section->next = VG_(calloc)("freya.mem_map.1", 1, sizeof(Mmap_Section)); mmap_section = mmap_section->next; mmap_section->next = NULL; mmap_section->page_addr = page_addr; mmap_section->trace_blocks = VG_(calloc)("freya.mem_map.2", PAGE_NUMBER, sizeof(Trace_Block*)); mmap_section->used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char)); break; } mmap_section = mmap_section->next; } mark_blocks( mmap_section->trace_blocks + PAGE_OFFSET_DOWN(addr), mmap_section->trace_blocks + page_offset_up(current_end_addr), mmap_section->used_blocks + PAGE_OFFSET_DOWN(addr), block_arg ); addr = current_end_addr; } #endif }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG("Try to write BMT\n"); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; if ( !(bmt_block_index = find_available_block(true)) ) { MSG("Cannot find an available block for BMT\n"); return false; } } // write bmt to flash if (need_erase) { if (nand_erase_bmt(OFFSET(bmt_block_index))) { MSG("BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG("Write BMT data fail \n"); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG("Write BMT to block %d success\n", bmt_block_index); return true; }
// Выводит справку о запущенных процессах. Для каждого процесса печатается его // идентификатор, номер сегмента TSS и адрес каталога страниц. void scheduler_ps() { printf("PID\tFILENAME\tTSSs\tPG_DIR\n"); uint i; uchar name83[12]; name83[11] = 0; for (i = 0; i < NTasks; i++) { memcpy(&name83, &Task[i]->file.Name, 11); int j; for (j = 0; j < 11; j++) if (name83[j] == 0) name83[j] = ' '; printf("%d\t%s\t%xh\t%xh\n", Task[i]->pid, &name83, Task[i]->tsss, PAGE_ADDR(Task[i]->tss.cr3)); } }
static bool write_bbt_to_flash(u8 *dat, u8 *oob) { if ( !(bbt_block_index = find_available_block(false)) ) { MSG("Cannot find an available block for BBT\n"); return false; } if ( nand_write_page_bmt(PAGE_ADDR(bbt_block_index), dat, oob) ) { MSG("Write BBT data fail \n"); mark_block_bad_bmt(OFFSET(bbt_block_index), BMT_BADBLOCK_GENERATE_LATER); bbt_block_index = 0; return write_bbt_to_flash(dat, oob); // recursive call } MSG("Write BBT to block %d success\n", bbt_block_index); return true; }
void put_video_ram(void) { char *putbuf = (char *) malloc(TEXT_SIZE); char *graph_mem; if (SCR_STATE.mapped) { debug_vid("put_video_ram called\n"); if (config.vga) { if (memmap(graph_mem = (void*)GRAPH_BASE, GRAPH_SIZE, PG_U | PG_W | PG_P, (void *)GRAPH_BASE) != 0) { error("put_video_ram: memmap failed for 0x%08x\n", GRAPH_BASE); leaveemu(ERR_PT); } if (dosemu_regs->mem && READ_BYTE(BIOS_VIDEO_MODE) == 3 && READ_BYTE(BIOS_CURRENT_SCREEN_PAGE) < 8) { memcpy((caddr_t) PAGE_ADDR(0), dosemu_regs->mem, dosemu_regs->save_mem_size[0]); } } else { memcpy(putbuf, SCR_STATE.virt_address, TEXT_SIZE); if (memmap(graph_mem = (void*)SCR_STATE.virt_address, TEXT_SIZE, PG_U | PG_W | PG_P, (void *)SCR_STATE.virt_address) != 0) { error("put_video_ram: memmap failed for 0x%08x\n", (u_int)SCR_STATE.virt_address); leaveemu(ERR_PT); } memcpy(SCR_STATE.virt_address, putbuf, TEXT_SIZE); } giveup_permissions(); SCR_STATE.mapped = 0; } else warn("VID: put_video-ram but not mapped!\n"); if (putbuf) free(putbuf); debug_vid("put_video_ram completed\n"); }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; int mapped; bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index))) { MSG(INFO, "Skip bad block: 0x%x\n", index); continue; } nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { MSG(INIT, "get bad index: 0x%x\n", bad_index); if (bad_index != 0xFFFF) MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index); continue; } MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index))) { MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); continue; // no need to erase here, it will be erased later when trying to write BMT } if ((mapped = is_block_mapped(bad_index)) >= 0) { MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); bmt->table[mapped].mapped_index = index; // use new one instead. } else { bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); } MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) { MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n"); } return bmt; }
static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; memcpy(oob_buf, write_oob, MAX_OOB_SIZE); to_index = find_available_block(false); if (!to_index) { MSG(INIT, "Cannot find an available block for BMT\n"); return 0; } { // migrate error page first MSG(INIT, "Write error page: 0x%x\n", error_page); if (!write_dat) { nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); write_dat = dat_buf; } // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } for (page = 0; page < page_per_block; page++) { if (page != error_page) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (is_page_used(dat_buf, oob_buf)) { if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } } } MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); return to_index; }
// Запуск файла, заданного именем // (см. комментарии в binfmt.c) void aout_load(char *Name) { char name83[11]; DirEntry Entry; Make83Name(Name, name83); if (FindEntry(0, name83, &Entry) == (uint)-1) { printf("Cannot open file '%s'!\n", Name); return; } // Грузим заголовок Exec exec; LoadPart(&Entry, &exec, 0, sizeof(Exec)); // Проверяем пригодность файла if (N_MID(exec) != M_386) { printf("Only 386's binaries are supported\n"); return; } if (N_MAGIC(exec) != ZMAGIC) { printf("Not-ZMAGIC binaries are not supported yet\n"); return; } // Количество страниц в каждой секции ushort TextPages = (exec.a_text + 0xfff) / PAGE_SIZE; ushort DataPages = (exec.a_data + 0xfff) / PAGE_SIZE; ushort BSSPages = (exec.a_bss + 0xfff) / PAGE_SIZE; // Создаем каталог страниц ulong *pg_dir = (ulong*)alloc_first_page(); // И записываем в него системные таблицы memset(pg_dir, 0, PAGE_SIZE); pg_dir[0x200] = 0x3000 + SYS_PAGE_ATTR; pg_dir[0x201] = 0x4000 + SYS_PAGE_ATTR; // Создаем для процесса TaskStruct TaskStruct *task = (TaskStruct*)alloc_first_page(); // Достаем адрес GDT GDTDescriptor GDT; __asm__("sgdt %0":: "m" (GDT)); ushort desc_count = (GDT.Size + 1) >> 3; // Сюда допишем TSS ushort tssn = desc_count; // Заполняем атрибуты task->pid = CurPID++; task->tsss = tssn << 3; task->BinFormat = BIN_AOUT; // Копируем DirEntry и заголовок A.OUT memcpy(&task->file, &Entry, sizeof(DirEntry)); memcpy(&task->header, &exec, sizeof(Exec)); // Заполняем TSS task->tss.tl = 0; task->tss.esp0 = (ulong)&task->syscall_stack + sizeof(task->syscall_stack); // Стек для системных вызовов task->tss.ss0 = KERNEL_DS; task->tss.cr3 = (ulong)pg_dir; task->tss.eip = exec.a_entry; task->tss.eflags = 0x200; // Только IF task->tss.eax = task->tss.ebx = task->tss.ecx = task->tss.edx = task->tss.esi = task->tss.edi = 0; // Стек следует сразу за остальными секциями task->tss.esp = task->tss.ebp = (TextPages+DataPages+BSSPages + USER_STACK_PAGES) * PAGE_SIZE - 4; // 4 байта на адрес возврата task->tss.cs = USER_CS; task->tss.es = task->tss.ss = task->tss.ds = task->tss.fs = task->tss.gs = USER_DS; task->tss.ldt = 0; task->tss.iomap_trace = 0; // Чтобы процесс имел возможность нормально завершится, мы должны предоставить ему адрес // возврата в стеке. Процесс передаст управление по этому адресу при выходе из main(). // Для этого мы маппируем страницу с функцией user_exit_code (head.S) в АП процесса. // Адрес, по которому мы ее будет маппировать, расположим сразу после стека (FIXME: создаем // сами себе грабли для динамической линковки...) addr_t exit_page = (TextPages+DataPages+BSSPages+USER_STACK_PAGES) * PAGE_SIZE; // FIXME: Я НЕ ТЕСТИРОВАЛ ЗАВЕРШЕНИЕ ПРОЦЕССОВ A.OUT addr_t stack_page = alloc_first_page(); map_page(stack_page, task, PAGE_ADDR(task->tss.esp), PAGE_ATTR); *(ulong*)(stack_page+0xffc) = exit_page; map_page((addr_t)&user_exit_code, task, exit_page, PA_USER | PA_P | PA_NONFREE); // Создаем в GDT дескриптор для TSS // FIXME: Их нужно удалять при завершении процесса! // TSS адресуется через верхнюю память ulong tss_addr = (ulong)&task->tss + 0x80000000; GDT.Addr = (Descriptor*)((ulong)GDT.Addr - 0x80000000); GDT.Addr[tssn].a = (tss_addr<<16)|0x0067; GDT.Addr[tssn].b = (tss_addr&0xff000000)|0x00408b00|((tss_addr>>16)&0xff); GDT.Addr = (Descriptor*)((ulong)GDT.Addr + 0x80000000); GDT.Size += 8; // Один дескриптор добавили __asm__("lgdt %0"::"m"(GDT)); // Ура Task[NTasks] = task; NTasks++; }
/* allows remapping even if memory is mapped in...this is useful, as it * remembers where the video mem *was* mapped, unmaps from that, and then * remaps it to where the text page number says it should be */ void get_video_ram(int waitflag) { char *graph_mem; char *sbase; size_t ssize; char *textbuf = NULL, *vgabuf = NULL; debug_vid("get_video_ram STARTED\n"); if (config.vga) { ssize = GRAPH_SIZE; sbase = (char *) GRAPH_BASE; } else { ssize = TEXT_SIZE; sbase = PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)); } #if 0 if (waitflag == WAIT) { config.console_video = 0; debug_vid("VID: get_video_ram WAITING\n"); /* XXX - wait until our console is current (mixed signal functions) */ do { if (!wait_vc_active ()) break; debug_vid("Keeps waiting...And\n"); } while (errno == EINTR); } #endif if (config.vga) { debug("config.vga\n"); if (READ_BYTE(BIOS_VIDEO_MODE) == 3 && READ_BYTE(BIOS_CURRENT_SCREEN_PAGE) < 8) { textbuf = malloc(TEXT_SIZE * 8); if (!textbuf) leaveemu(ERR_MEM); memcpy(textbuf, PAGE_ADDR(0), TEXT_SIZE * 8); } if (SCR_STATE.mapped) { vgabuf = (char *)malloc(GRAPH_SIZE); if (!vgabuf) leaveemu(ERR_MEM); memcpy(vgabuf, (caddr_t) GRAPH_BASE, GRAPH_SIZE); if (memmap(graph_mem = (void *)GRAPH_BASE, GRAPH_SIZE, PG_U | PG_W | PG_P, (void *)GRAPH_BASE) != 0) { error("memmap failed for 0x%08x\n", GRAPH_BASE); leaveemu(ERR_PT); } memcpy((caddr_t) GRAPH_BASE, vgabuf, GRAPH_SIZE); } } else { textbuf = (char *)malloc(TEXT_SIZE); if (!textbuf) leaveemu(ERR_MEM); memcpy(textbuf, SCR_STATE.virt_address, TEXT_SIZE); if (SCR_STATE.mapped) { if (memmap(graph_mem = SCR_STATE.virt_address, TEXT_SIZE, PG_U | PG_W | PG_P, SCR_STATE.virt_address) != 0) { error("memmap failed for 0x%08x\n", (u_int)SCR_STATE.virt_address); leaveemu(ERR_PT); } memcpy(SCR_STATE.virt_address, textbuf, TEXT_SIZE); } } SCR_STATE.mapped = 0; if (config.vga) { if (READ_BYTE(BIOS_VIDEO_MODE) == 3) { if (dosemu_regs->mem && textbuf) memcpy(dosemu_regs->mem, textbuf, dosemu_regs->save_mem_size[0]); /* else error("ERROR: no dosemu_regs->mem!\n"); */ } debug("mapping GRAPH_BASE\n"); if (memmap(graph_mem = (void *) GRAPH_BASE, GRAPH_SIZE, PG_U | PG_W | PG_P, (void *) GRAPH_BASE) != 0) { error("memmap failed for 0x%08x\n", GRAPH_BASE); leaveemu(ERR_PT); } /* the code below is done by the video save/restore code */ get_permissions(); } else { /* this is used for page switching */ if (PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)) != SCR_STATE.virt_address) memcpy(textbuf, PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)), TEXT_SIZE); debug("mapping PAGE_ADDR\n"); if (memmap(graph_mem = PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)), TEXT_SIZE, PG_U | PG_W | PG_P, (void*)phys_text_base) != 0) { error("memmap failed for 0x%08x\n", GRAPH_BASE); leaveemu(ERR_PT); } #if 0 /* Map CGA, etc text memory to HGA memory. Useful for debugging systems with HGA or MDA cards. */ graph_mem = (char *) mmap((caddr_t) 0xb8000, TEXT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, mem_fd, phys_text_base); if ((long) graph_mem < 0) { error("ERROR: mmap error in get_video_ram (text): %x, errno %d\n", (Bit32u) graph_mem, errno); return; } else #endif debug_vid("CONSOLE VIDEO address: %p %p %p\n", (void *) graph_mem, (void *) phys_text_base, (void *) PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE))); get_permissions(); /* copy contents of page onto video RAM */ memcpy((caddr_t) PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)), textbuf, TEXT_SIZE); } if (vgabuf) free(vgabuf); if (textbuf) free(textbuf); SCR_STATE.pageno = READ_BYTE(BIOS_CURRENT_SCREEN_PAGE); SCR_STATE.virt_address = PAGE_ADDR(READ_BYTE(BIOS_CURRENT_SCREEN_PAGE)); SCR_STATE.phys_address = graph_mem; SCR_STATE.mapped = 1; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; int mapped; // init everything in BMT struct bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index))) { // MSG(INIT, "Skip bad block: 0x%x\n", index); // bmt->bad_count++; continue; } // MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index)); nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); /* if (mt6573_nand_read_page_hw(PAGE_ADDR(index), dat_buf)) { MSG(INIT, "Error when read block %d\n", bmt_block_index); continue; } */ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { // MSG(INIT, "get bad index: 0x%x\n", bad_index); if (bad_index != 0xFFFF) MSG(INIT, "warning @ 0x%x\n", index); continue; } // MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index))) { // MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); continue; // no need to erase here, it will be erased later when trying to write BMT } if ( (mapped = is_block_mapped(bad_index)) >= 0) { // MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", // bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); bmt->table[mapped].mapped_index = index; // use new one instead. } else { // add mapping to BMT bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); } MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); // dump_bmt_info(bmt); // fill NAND BMT buffer memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); // write BMT back if (!write_bmt_to_flash(dat_buf, oob_buf)) { MSG(INIT, "TRAGEDY\n"); } return bmt; }