// Set up a two-level page table: // kern_pgdir is its linear (virtual) address of the root // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void mem_init(void) { uint32_t cr0, cr4; size_t n,i; // Find out how much memory the machine has (npages & npages_basemem). i386_detect_memory(); // Remove this line when you're ready to test this function. ////////////////////////////////////////////////////////////////////// // create initial page directory. kern_pgdir = (pde_t *) boot_alloc(PGSIZE); memset(kern_pgdir, 0, PGSIZE); ////////////////////////////////////////////////////////////////////// // Recursively insert PD in itself as a page table, to form // a virtual page table at virtual address UVPT. // (For now, you don't have understand the greater purpose of the // following line.) // Permissions: kernel R, user R kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P; ////////////////////////////////////////////////////////////////////// // Allocate an array of npages 'struct PageInfo's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct PageInfo in this // array. 'npages' is the number of physical pages in memory. Use memset // to initialize all fields of each struct PageInfo to 0. // Your code goes here: pages = (struct PageInfo *)boot_alloc(npages * sizeof(struct PageInfo)); memset(pages, 0, npages * sizeof(struct PageInfo)); ////////////////////////////////////////////////////////////////////// // Make 'envs' point to an array of size 'NENV' of 'struct Env'. // LAB 3: Your code here. //check pages address------> envs = (struct Env *)boot_alloc(NENV *sizeof(struct Env)); memset(envs, 0, NENV *sizeof(struct Env)); //allocating memory for transmit descriptor array LAB 6 //tx_desc = (struct e1000_tx_desc *)boot_alloc(sizeof(struct e1000_tx_desc)*E1000_TXD_TOTAL); //memset(tx_desc, 0, sizeof(struct e1000_tx_desc)*E1000_TXD_TOTAL); //allocating buffer for each transmit descriptor. // for(i=0;i<E1000_TXD_TOTAL;i++) // { // txd_buffer[i] = (char *)boot_alloc(sizeof(char)*E1000_TXD_BUFFER_SIZE); // memset(txd_buffer[i], 0, E1000_TXD_BUFFER_SIZE); // } // rx_desc = (struct e1000_rx_desc *)boot_alloc(sizeof(struct e1000_rx_desc)*E1000_RXD_TOTAL); // //memset(rx_desc, 0, sizeof(struct e1000_rx_desc)*E1000_RXD_TOTAL); // //allocating buffer for each transmit descriptor. // for(i=0;i<E1000_RXD_TOTAL;i++) // { // rxd_buffer[i] = (char *)boot_alloc(sizeof(char)*E1000_RXD_BUFFER_SIZE); // memset(rxd_buffer[i], 0, E1000_RXD_BUFFER_SIZE); // } ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using boot_map_region // or page_insert page_init(); check_page_free_list(1); check_page_alloc(); check_page(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES ----- we need to map physical address of //pages structure array to upages // Permissions: // - the new image at UPAGES -- kernel R, user R // (ie. perm = PTE_U | PTE_P) // - pages itself -- kernel RW, user NONE // Your code goes here: boot_map_region(kern_pgdir, UPAGES, ROUNDUP(sizeof(struct PageInfo)*npages, PGSIZE) , PADDR(pages) , PTE_P | PTE_U); ////////////////////////////////////////////////////////////////////// // Map the 'envs' array read-only by the user at linear address UENVS // (ie. perm = PTE_U | PTE_P). // Permissions: // - the new image at UENVS -- kernel R, user R // - envs itself -- kernel RW, user NONE // LAB 3: Your code here. boot_map_region(kern_pgdir, UENVS, ROUNDUP(sizeof(struct Env)*NENV, PGSIZE) , PADDR(envs) , PTE_P | PTE_U); ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'bootstack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE // Your code goes here: boot_map_region(kern_pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE , PADDR(bootstack) , PTE_P | PTE_W); ////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE // Your code goes here: // Initialize the SMP-related parts of the memory map mem_init_mp(); boot_map_region(kern_pgdir, KERNBASE, ~0x0 - KERNBASE, 0, PTE_P | PTE_W ); // Check that the initial page directory has been set up correctly. check_kern_pgdir(); // Switch from the minimal entry page directory to the full kern_pgdir // page table we just created. Our instruction pointer should be // somewhere between KERNBASE and KERNBASE+4MB right now, which is // mapped the same way by both page tables. // // If the machine reboots at this point, you've probably set up your // kern_pgdir wrong. //cprintf("\nnow kernel virtual address\n"); //print_kerndir(kern_pgdir); lcr3(PADDR(kern_pgdir)); //print_kerndir((pde_t *)UVPT); check_page_free_list(0); // entry.S set the really important flags in cr0 (including enabling // paging). Here we configure the rest of the flags that we care about. cr0 = rcr0(); cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP; cr0 &= ~(CR0_TS|CR0_EM); lcr0(cr0); // Some more checks, only possible after kern_pgdir is installed. check_page_installed_pgdir(); }
// Set up a two-level page table: // kern_pgdir is its linear (virtual) address of the root // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void mem_init(void) { uint32_t cr0; size_t n; // Find out how much memory the machine has (npages & npages_basemem). i386_detect_memory(); // Remove this line when you're ready to test this function. //panic("mem_init: This function is not finished\n"); ////////////////////////////////////////////////////////////////////// // create initial page directory. kern_pgdir = (pde_t *) boot_alloc(PGSIZE); memset(kern_pgdir, 0, PGSIZE); ////////////////////////////////////////////////////////////////////// // Recursively insert PD in itself as a page table, to form // a virtual page table at virtual address UVPT. // (For now, you don't have understand the greater purpose of the // following line.) // Permissions: kernel R, user R kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P; ////////////////////////////////////////////////////////////////////// // Allocate an array of npages 'struct PageInfo's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct PageInfo in this // array. 'npages' is the number of physical pages in memory. // Your code goes here: // SUNUS, 22, October, 2013 pages = boot_alloc(sizeof(struct PageInfo) * npages); ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using boot_map_region // or page_insert page_init(); check_page_free_list(1); check_page_alloc(); check_page(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES // Permissions: // - the new image at UPAGES -- kernel R, user R // (ie. perm = PTE_U | PTE_P) // - pages itself -- kernel RW, user NONE // Your code goes here: // SUNUS, Nov 26, 2013 boot_map_region(kern_pgdir, UPAGES, PTSIZE, PADDR(pages), PTE_U|PTE_P); ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'bootstack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE // Your code goes here: // SUNUS, Nov 27, 2013 boot_map_region(kern_pgdir, KSTACKTOP - KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_P|PTE_W); boot_map_region(kern_pgdir, KSTACKTOP - PTSIZE, PTSIZE - KSTKSIZE, 0, 0); ////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE // Your code goes here: boot_map_region(kern_pgdir, KERNBASE, 0x10000000, 0, PTE_P|PTE_W); // Check that the initial page directory has been set up correctly. check_kern_pgdir(); // Switch from the minimal entry page directory to the full kern_pgdir // page table we just created. Our instruction pointer should be // somewhere between KERNBASE and KERNBASE+4MB right now, which is // mapped the same way by both page tables. // // If the machine reboots at this point, you've probably set up your // kern_pgdir wrong. lcr3(PADDR(kern_pgdir)); check_page_free_list(0); // entry.S set the really important flags in cr0 (including enabling // paging). Here we configure the rest of the flags that we care about. cr0 = rcr0(); cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP; cr0 &= ~(CR0_TS|CR0_EM); lcr0(cr0); // Some more checks, only possible after kern_pgdir is installed. check_page_installed_pgdir(); }
int main(int argc, char *argv[]) { WT_SESSION *session; clock_t ce, cs; pthread_t idlist[100]; uint64_t i, id; char buf[100]; /* Bypass this test for valgrind */ if (testutil_is_flag_set("TESTUTIL_BYPASS_VALGRIND")) return (EXIT_SUCCESS); opts = &_opts; memset(opts, 0, sizeof(*opts)); opts->table_type = TABLE_ROW; opts->n_append_threads = N_APPEND_THREADS; opts->nrecords = N_RECORDS; testutil_check(testutil_parse_opts(argc, argv, opts)); testutil_make_work_dir(opts->home); testutil_check(__wt_snprintf(buf, sizeof(buf), "create," "cache_size=%s," "eviction=(threads_max=5)," "statistics=(fast)", opts->table_type == TABLE_FIX ? "500MB" : "2GB")); testutil_check(wiredtiger_open(opts->home, NULL, buf, &opts->conn)); testutil_check( opts->conn->open_session(opts->conn, NULL, NULL, &session)); testutil_check(__wt_snprintf(buf, sizeof(buf), "key_format=r,value_format=%s," "allocation_size=4K,leaf_page_max=64K", opts->table_type == TABLE_FIX ? "8t" : "S")); testutil_check(session->create(session, opts->uri, buf)); testutil_check(session->close(session, NULL)); page_init(5000); /* Force to disk and re-open. */ testutil_check(opts->conn->close(opts->conn, NULL)); testutil_check(wiredtiger_open(opts->home, NULL, NULL, &opts->conn)); (void)signal(SIGINT, onsig); cs = clock(); id = 0; for (i = 0; i < opts->n_append_threads; ++i, ++id) { printf("append: %" PRIu64 "\n", id); testutil_check( pthread_create(&idlist[id], NULL, thread_append, opts)); } for (i = 0; i < id; ++i) testutil_check(pthread_join(idlist[i], NULL)); ce = clock(); printf("%" PRIu64 "M records: %.2lf processor seconds\n", opts->max_inserted_id / MILLION, (ce - cs) / (double)CLOCKS_PER_SEC); testutil_cleanup(opts); return (EXIT_SUCCESS); }
const char * convert_gpdb4_heap_file(const char *src, const char *dst, bool has_numerics, AttInfo *atts, int natts) { int src_fd; int dstfd; int blkno; char buf[BLCKSZ]; ssize_t bytesRead; const char *msg = NULL; curr_hasnumerics = has_numerics; curr_atts = atts; curr_natts = natts; page_init(overflow_buf); overflow_blkno = 0; if ((src_fd = open(src, O_RDONLY, 0)) < 0) return "can't open source file"; if ((dstfd = open(dst, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) < 0) { close(src_fd); return "can't create destination file"; } blkno = 0; curr_dstfd = dstfd; while ((bytesRead = read(src_fd, buf, BLCKSZ)) == BLCKSZ) { msg = convert_gpdb4_heap_page(buf); if (msg) break; /* * GPDB 4.x doesn't support checksums so we don't need to worry about * retaining an existing checksum like for upgrades from 5.x. If we're * not adding them we want a zeroed out portion in the header */ if (user_opts.checksum_mode == CHECKSUM_ADD) ((PageHeader) buf)->pd_checksum = pg_checksum_page(buf, blkno); else memset(&(((PageHeader) buf)->pd_checksum), 0, sizeof(uint16)); if (write(dstfd, buf, BLCKSZ) != BLCKSZ) { msg = "can't write new page to destination"; break; } blkno++; } flush_overflow_page(); close(src_fd); close(dstfd); if (msg) return msg; else if (bytesRead != 0) return "found partial page in source file"; else return NULL; }
/** * This is the first real C function ever called. It performs a lot of * hardware-specific initialization, then creates a pseudo-context to * execute the bootstrap function in. */ void kmain() { GDB_CALL_HOOK(boot); dbg_init(); dbgq(DBG_CORE, "Kernel binary:\n"); dbgq(DBG_CORE, " text: 0x%p-0x%p\n", &kernel_start_text, &kernel_end_text); dbgq(DBG_CORE, " data: 0x%p-0x%p\n", &kernel_start_data, &kernel_end_data); dbgq(DBG_CORE, " bss: 0x%p-0x%p\n", &kernel_start_bss, &kernel_end_bss); page_init(); pt_init(); slab_init(); pframe_init(); acpi_init(); apic_init(); pci_init(); intr_init(); gdt_init(); /* initialize slab allocators */ #ifdef __VM__ anon_init(); shadow_init(); #endif vmmap_init(); proc_init(); kthread_init(); #ifdef __DRIVERS__ bytedev_init(); blockdev_init(); #endif void *bstack = page_alloc(); pagedir_t *bpdir = pt_get(); KASSERT(NULL != bstack && "Ran out of memory while booting."); /* This little loop gives gdb a place to synch up with weenix. In the * past the weenix command started qemu was started with -S which * allowed gdb to connect and start before the boot loader ran, but * since then a bug has appeared where breakpoints fail if gdb connects * before the boot loader runs. See * * https://bugs.launchpad.net/qemu/+bug/526653 * * This loop (along with an additional command in init.gdb setting * gdb_wait to 0) sticks weenix at a known place so gdb can join a * running weenix, set gdb_wait to zero and catch the breakpoint in * bootstrap below. See Config.mk for how to set GDBWAIT correctly. * * DANGER: if GDBWAIT != 0, and gdb is not running, this loop will never * exit and weenix will not run. Make SURE the GDBWAIT is set the way * you expect. */ while (gdb_wait) ; context_setup(&bootstrap_context, bootstrap, 0, NULL, bstack, PAGE_SIZE, bpdir); context_make_active(&bootstrap_context); panic("\nReturned to kmain()!!!\n"); }
/* determine version of OS by mem * 1.To get signature of multiply versions of os, which is the md5 of kernel code * 2.scan all pages of input memory, generate the md5 checksum of one page, compare to all the signature, * if they are match, output the version of the os. * 3.Done! * 4.abandoned*/ void determineOsVersion2(Mem * mem) { //get signature int osNumber = initDb(); int pageSize = 4 * 1024; //4k int totalPageNumber = mem->mem_size / (4 * 1024); //assume that every page has 4k //record when two page have different page index and the same sharp int calledPages[totalPageNumber]; int dsmPages[totalPageNumber]; //record virtual address int i; unsigned virtualAddrs[totalPageNumber]; for (i = 0; i < totalPageNumber; i++) { calledPages[i] = 0; dsmPages[i] = 0; virtualAddrs[i] = 0; } //start address unsigned start_vaddr = KERNEL_START_ADDRESS; unsigned vaddr = start_vaddr; int matchCount = 0; int matchPageIndex = 0; int availableOs[FINGERPRINT_NO]; for (i = 0; i < FINGERPRINT_NO; i++) availableOs[i] = 1; for (; vaddr > start_vaddr - 1; vaddr += 0x1000) { int rw = 0, us = 0, g = 0, ps = 0; //page size 4M or 4k unsigned pAddr = vtopPageProperty(mem->mem, mem->mem_size, mem->pgd, vaddr, &rw, &us, &g, &ps); if (pAddr == -1 || pAddr > mem->mem_size) continue; int pageIndex = pAddr / pageSize; if (us == 0 && g == 256 && is_code_page(mem, vaddr, pageSize, virtualAddrs) == 0) { page_init(mem, pageIndex, pageSize, dsmPages, 0, vaddr, calledPages); if (dsmPages[pageIndex] != 1) continue; void *startAdress = (void *) ((unsigned) mem->mem + pageIndex * pageSize); unsigned char md5digest[16]; MDMem(startAdress, pageSize, md5digest); MDPrint(md5digest); printf("\n"); //search hash table int ret = match(osNumber, md5digest, &matchPageIndex, availableOs); while (ret == 2) { matchPageIndex++; ret = match(osNumber, md5digest, &matchPageIndex, availableOs); } if (ret >= 0) { matchPageIndex++; matchCount++; if (ret == 1) break; } } } if (matchCount == 0) puts("Unknown OS!"); printf("match Count:%d\n", matchCount); }
// Set up a four-level page table: // boot_pml4e is its linear (virtual) address of the root // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void x64_vm_init(void) { pml4e_t* pml4e; uint32_t cr0; int i; size_t n; int r; struct Env *env; i386_detect_memory(); //panic("i386_vm_init: This function is not finished\n"); ////////////////////////////////////////////////////////////////////// // create initial page directory. ///panic("x64_vm_init: this function is not finished\n"); pml4e = boot_alloc(PGSIZE); memset(pml4e, 0, PGSIZE); boot_pml4e = pml4e; boot_cr3 = PADDR(pml4e); ////////////////////////////////////////////////////////////////////// // Allocate an array of npage 'struct Page's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct Page in this // array. 'npage' is the number of physical pages in memory. // User-level programs will get read-only access to the array as well. // Your code goes here: pages = boot_alloc(npages * sizeof(struct Page)); ////////////////////////////////////////////////////////////////////// // Make 'envs' point to an array of size 'NENV' of 'struct Env'. // LAB 3: Your code here. envs = boot_alloc(NENV * sizeof(struct Env)); ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using boot_map_segment or page_insert page_init(); check_page_free_list(1); check_page_alloc(); page_check(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES // Permissions: // - the new image at UPAGES -- kernel R, user R // (ie. perm = PTE_U | PTE_P) // - pages itself -- kernel RW, user NONE // Your code goes here: ////////////////////////////////////////////////////////////////////// // Map the 'envs' array read-only by the user at linear address UENVS // (ie. perm = PTE_U | PTE_P). // Permissions: // - the new image at UENVS -- kernel R, user R // - envs itself -- kernel RW, user NONE // LAB 3: Your code here. boot_map_segment(boot_pml4e, UPAGES, ROUNDUP(npages*sizeof(struct Page), PGSIZE), PADDR(pages), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)pages, ROUNDUP(npages *sizeof(struct Page), PGSIZE), PADDR(pages), PTE_P | PTE_W); boot_map_segment(boot_pml4e, UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), PADDR(envs), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)envs, ROUNDUP(NENV *sizeof(struct Env), PGSIZE), PADDR(envs), PTE_P | PTE_W); ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'bootstack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KSTACKTOP-KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_P | PTE_W); /////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KERNBASE, ~(uint32_t)0 - KERNBASE + 1, 0, PTE_P | PTE_W); // Check that the initial page directory has been set up correctly. // Initialize the SMP-related parts of the memory map mem_init_mp(); check_boot_pml4e(boot_pml4e); ////////////////////////////////////////////////////////////////////// // Permissions: kernel RW, user NONE pdpe_t *pdpe = KADDR(PTE_ADDR(pml4e[0])); pde_t *pgdir = KADDR(PTE_ADDR(pdpe[3])); lcr3(boot_cr3); check_page_free_list(0); }
// Set up a two-level page table: // kern_pgdir is its linear (virtual) address of the root // Then turn on paging. Then effectively turn off segmentation. // (i.e., the segment base addrs are set to zero). // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void mem_init(void) { uint32_t cr0; size_t n; // Ensure user & kernel struct Pages agree. static_assert(sizeof(struct Page) == sizeof(struct UserPage)); // Find out how much memory the machine has (npages & npages_basemem). i386_detect_memory(); // Remove this line when you're ready to test this function. //panic("mem_init: This function is not finished\n"); ////////////////////////////////////////////////////////////////////// // create initial page directory. kern_pgdir = (pde_t *) boot_alloc(PGSIZE); memset(kern_pgdir, 0, PGSIZE); ////////////////////////////////////////////////////////////////////// // Recursively insert PD in itself as a page table, to form // a virtual page table at virtual address UVPT. // (For now, you don't have understand the greater purpose of the // following line.) // Permissions: kernel R, user R kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P; ////////////////////////////////////////////////////////////////////// // Allocate an array of npages 'struct Page's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct Page in this // array. 'npages' is the number of physical pages in memory. pages = (Page *) boot_alloc(npages * sizeof(struct Page)); ////////////////////////////////////////////////////////////////////// // Make 'envs' point to an array of size 'NENV' of 'struct Env'. // LAB 3: Your code here. envs = (Env *) boot_alloc(NENV * sizeof(struct Env)); ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using page_map_segment // or page_insert page_init(); check_page_free_list(true); check_page_alloc(); check_page(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'entry_stack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE Page *pp = pa2page((physaddr_t)entry_stack-KERNBASE); for (uintptr_t ptr = KSTACKTOP-KSTKSIZE; ptr < KSTACKTOP; ptr += PGSIZE) { if (page_insert(kern_pgdir, pp, ptr, PTE_W | PTE_P) < 0) panic("Couldn't create page table entries for stack.\n"); pp++; } ////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE page_map_segment(kern_pgdir, KERNBASE, 0xFFFFFFFF-KERNBASE, 0x0, PTE_W | PTE_P); //print_page_table(kern_pgdir, false, false); ////////////////////////////////////////////////////////////////////// // Map the 'envs' array read-only by the user at linear address UENVS. // Permissions: kernel R, user R // (That's the UENVS version; 'envs' itself is kernel RW, user NONE.) // LAB 3: Your code here. page_map_segment(kern_pgdir, (uintptr_t) UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), PADDR(envs), PTE_U | PTE_P); ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES. // Permissions: kernel R, user R // (That's the UPAGES version; 'pages' itself is kernel RW, user NONE.) // LAB 3: Your code here. page_map_segment(kern_pgdir, UPAGES, ROUNDUP(npages*sizeof(struct Page), PGSIZE), PADDR(pages), PTE_U | PTE_P); // Check that the initial page directory has been set up correctly. check_kern_pgdir(); // Switch from the minimal entry page directory to the full kern_pgdir // page table we just created. Our instruction pointer should be // somewhere between KERNBASE and KERNBASE+4MB right now, which is // mapped the same way by both page tables. // // If the machine reboots at this point, you've probably set up your // kern_pgdir wrong. lcr3(PADDR(kern_pgdir)); // entry.S set the really important flags in cr0 (including enabling // paging). Here we configure the rest of the flags we need. cr0 = rcr0(); cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP; cr0 &= ~(CR0_TS|CR0_EM); lcr0(cr0); // Some more checks, only possible after kern_pgdir is installed. check_page_installed_pgdir(); }
int wmllogin(char * buf) { char id[IDLEN + 2], pw[20]; struct userec *x; struct user_info * uol[MULTI_LOGINS]; char buf2[256], filename[256]; int i, kick; page_init(NULL); strncpy(id, getparm("id"), IDLEN + 1); strncpy(pw, getparm("pw"), 19); if (!*pw) { strncpy(pw, getparm("pw2"), 19); } kick = atoi(getparm("kick")) - 1; if (!strcasecmp(id, "SYSOP")) { strcpy (buf, "用户SYSOP登录受限。"); return -65536; } if(file_has_word(".bad_host", fromhost)) { sprintf (buf, "对不起, 本站不欢迎来自 [%s] 的登录。 若有疑问, 请与SYSOP联系,", fromhost); return -256; } if(loginok && strcasecmp(id, currentuser.userid)) { sprintf (buf, "系统检测到目前你的计算机上已经登录有一个帐号 %s,请先退出。", currentuser.userid); return 1; } x = getuser(id); if (!x) { strcpy (buf, "错误的使用者帐号"); return -1; } sprintf(buf2, "home/%c/%s/badhost", toupper(x->userid[0]), x->userid); if(bad_host(fromhost,buf2)) { sprintf (buf, "对不起,此帐号已被设定为不可从 [%s] 登录本站。",fromhost); return -257; } if(strcasecmp(id, "guest")) { if(!checkpasswd(x->passwd, pw)) { if(*pw) { sleep(2); getdatestring (time(0), NA); sprintf(buf2, "%-12.12s %-30s %s[Wap]\n",id, datestring, fromhost); sprintf(filename, "home/%c/%s/logins.bad", toupper(x->userid[0]), x->userid); f_append(filename, buf2); } sprintf (buf, "密码错误"); return -2; } if (check_login_limit(x)) { strcpy (buf, "此ID在24小时内上站次数过多,请稍候再来。"); return -4; } if(!user_perm(x, PERM_BASIC)) { strcpy (buf, "此帐号已被停机。若有疑问,请用其他帐号在sysop版询问。"); return -5; } if (check_multi_d(x, uol, kick)) { wml_httpheader(); wml_head(); printf ("<card title=\"登录 -- %s\">", BBSNAME); printf ("<p>用户%s已经在本站登录了%d个线程,你需要踢掉一个才能登录。<br />", x->userid, MULTI_LOGINS); for (i = 0; i < MULTI_LOGINS; i++) { printf ("#%d %s %s%s 发呆%d分<br />", i, uol[i]->from, uol[i]->mode >= 20000 ? "@" : "", ModeType(uol[i]->mode >= 20000 ? uol[i]->mode - 20000 : uol[i]->mode), (time(0) - uol[i]->idle_time) / 60); } printf ("踢掉哪个:<select name=\"inp_kick\">"); for (i = 0; i < MULTI_LOGINS; i++) { printf ("<option value=\"%d\">%d</option>", i + 1, i + 1); } printf ("</select><br />"); printf ("您的密码:<input type=\"password\" maxlength=\"8\" name=\"inp_pw\" /><br />"); printf ("<anchor><go href=\"login.wml?id=%s\" method=\"post\"><postfield name=\"pw\" value=\"$(inp_pw)\" /><postfield name=\"kick\" value=\"$(inp_kick)\" /></go>登录</anchor></p>", x->userid); return 0; } x->lastlogin = time(0); x->numlogins++; strsncpy(x->lasthost, fromhost, 17); save_user_data(x); currentuser = *x; } report("WapEnter"); int iutmpnum, iutmpkey; if (!wwwlogin(x, &iutmpnum, &iutmpkey))//0 : succeed { encodingtest(); sprintf(buf2, "%d", iutmpnum); headerCookie("utmpnum", buf2); sprintf(buf2, "%d", iutmpkey); headerCookie("utmpkey", buf2); headerCookie("utmpuserid", currentuser.userid); wml_httpheader(); } else { strcpy (buf, "抱歉,登录人数太多,请稍候再来:("); return -65537; } sprintf (buf, "用户 %s 登录成功。", x->userid); wml_head(); printf ("<card title=\"登录 -- %s\" ontimer=\"%s\">", BBSNAME, "bbsboa.wml"); printf ("<timer value=\"50\" />"); printf ("<p>"); w_hprintf(buf); printf ("</p>"); printf ("<p>跳转中……</p>"); printf ("<p><anchor><go href=\"%s\" />如果不能自动跳转,请使用此链接。</anchor></p>", "bbsboa.wml"); return 0; }
void shim_init(void) { for (int i = 1; i <= SIZES; i++) { page_init(&pages[i], 1 << i); } }