示例#1
0
文件: ram.c 项目: lekkerbit/os161
/*
 * Called very early in system boot to figure out how much physical
 * RAM is available.
 */
void
ram_bootstrap(void)
{
	size_t ramsize;

	/* Get size of RAM. */
	ramsize = mainbus_ramsize();

	/*
	 * This is the same as the last physical address, as long as
	 * we have less than 512 megabytes of memory. If we had more,
	 * we wouldn't be able to access it all through kseg0 and
	 * everything would get a lot more complicated. This is not a
	 * case we are going to worry about.
	 */
	if (ramsize > 512*1024*1024) {
		ramsize = 512*1024*1024;
	}

	lastpaddr = ramsize;

	/*
	 * Get first free virtual address from where start.S saved it.
	 * Convert to physical address.
	 */
	firstpaddr = firstfree - MIPS_KSEG0;

	kprintf("%uk physical memory available\n",
		(lastpaddr-firstpaddr)/1024);
}
示例#2
0
void coremap_init() {
	int v_size = ( mainbus_ramsize()-(ram_stealmem(0) + PAGE_SIZE) )/PAGE_SIZE;
	mem_map = bitmap_create(v_size);
	coremap = (struct page_table_entry*)kmalloc(v_size * sizeof(struct page_table_entry));
	coremap_start = ram_stealmem(0);
	for(int i = 0; i < v_size; i++) {
		coremap[i].paddr = (coremap_start + (i * PAGE_SIZE));
	}
	coremap_size = v_size;
	kprintf("COREMAP INIT: %d %d\n",coremap_start,coremap_size);
}
示例#3
0
文件: swap.c 项目: script3r/os161
void
swap_bootstrap() {
	char		sdevice[64];
	int		res;
	size_t		ram_size;
	size_t		swap_size;

	//get the ram size.
	ram_size = ROUNDUP( mainbus_ramsize(), PAGE_SIZE );

	//prepare to open the swap device.
	strcpy( sdevice, SWAP_DEVICE );

	//open.
	res = vfs_open( sdevice, O_RDWR, 0, &vn_sw );
	if( res )
		panic( "swap_bootstrap: could not open swapping partition." );
	
	//make sure it is of suficient size.
	if( !swap_device_suficient( ram_size, &swap_size ) )
		panic( "swap_bootstrap: the swap partition is not large enough." );
	
	//init the stats.
	swap_init_stats( swap_size );

	//create the bitmap to manage the swap partition.
	bm_sw = bitmap_create( ss_sw.ss_total );
	if( bm_sw == NULL ) 
		panic( "swap_bootstrap: could not create the swap bitmap." );

	lk_sw = lock_create( "lk_sw" );
	if( lk_sw == NULL )
		panic( "swap_bootstrap: could not create the swap lock." );
		//remove the first page.
	bitmap_mark( bm_sw, 0 );

	//update stats.
	--ss_sw.ss_free;
}
示例#4
0
文件: swap.c 项目: Adam-Koza/A3
/*
 * swap_bootstrap: Initializes swap information and finishes
 * bootstrapping the VM so that processes can use it.
 *
 * Synchronization: none (runs during boot before anyone else uses VM)
 */
void
swap_bootstrap(void)
{
	int rv;
	struct stat st;
	char path[sizeof(swapfilename)];
	off_t minsize;
	size_t pmemsize;

	pmemsize = mainbus_ramsize();

	strcpy(path, swapfilename);
	rv = vfs_open(path, O_RDWR, 0, &swapstore);
	if (rv) {
		kprintf("swap: Error %d opening swapfile %s\n", rv, 
			swapfilename);
		kprintf("swap: Please create swapfile/swapdisk.\n");
		panic("swap: Unable to continue.\n");
	}

	minsize = pmemsize*20;

	VOP_STAT(swapstore, &st);
	if (st.st_size < minsize) {
		kprintf("swap: swapfile %s is only %lu bytes.\n", swapfilename,
			(unsigned long) st.st_size);
		kprintf("swap: with %lu bytes of physical memory it should "
			"be at least\n", (unsigned long) pmemsize);
		kprintf("      %lu bytes (%lu blocks), perhaps larger.\n", 
			(unsigned long) minsize, 
			(unsigned long) minsize / 512);
		kprintf("swap: Because we conservatively reserve swap, a "
			"large amount may be\n");
		kprintf("      needed to run large workloads.\n");
		kprintf("swap: Please extend it.\n");
		panic("swap: Unable to continue.\n");
	}

	kprintf("swap: swapping to %s (%lu bytes; %lu pages)\n", swapfilename,
		(unsigned long) st.st_size, 
		(unsigned long) st.st_size / PAGE_SIZE);

	swap_total_pages = st.st_size / PAGE_SIZE;
	swap_free_pages = swap_total_pages;
	swap_reserved_pages = 0;

	swapmap = bitmap_create(st.st_size/PAGE_SIZE);
	DEBUG(DB_VM, "creating swap map with %lld entries\n",
			st.st_size/PAGE_SIZE);
	if (swapmap == NULL) {
		panic("swap: No memory for swap bitmap\n");
	}

	swaplock = lock_create("swaplock");
	if (swaplock == NULL) {
		panic("swap: No memory for swap lock\n");
	}

	/* mark the first page of swap used so we can check for errors */
	bitmap_mark(swapmap, 0);
	swap_free_pages--;
}
示例#5
0
文件: kmalloctest.c 项目: riaz/os161
/*
 * Allocate and free all physical memory a number of times. Along the we, we
 * check coremap_used_bytes to make sure it's reporting the number we're
 * expecting.
 */
int
kmalloctest5(int nargs, char **args)
{
#define KM5_ITERATIONS 5

	// We're expecting an even number of arguments, less arg[0].
	if (nargs > 5 || (nargs % 2) == 0) {
		km5_usage();
		return 0;
	}

	unsigned avail_page_slack = 0, kernel_page_limit = 0;
	int arg = 1;

	while (arg < nargs) {
		if (strcmp(args[arg], "--avail") == 0) {
			arg++;
			avail_page_slack = atoi(args[arg++]);
		} else if (strcmp(args[arg], "--kernel") == 0) {
			arg++;
			kernel_page_limit = atoi(args[arg++]);
		} else {
			km5_usage();
			return 0;
		}
	}

#if OPT_DUMBVM
	kprintf("(This test will not work with dumbvm)\n");
#endif

	// First, we need to figure out how much memory we're running with and how
	// much space it will take up if we maintain a pointer to each allocated
	// page. We do something similar to km3 - for 32 bit systems with
	// PAGE_SIZE == 4096, we can store 1024 pointers on a page. We keep an array
	// of page size blocks of pointers which in total can hold enough pointers
	// for each page of available physical memory.
	unsigned orig_used, ptrs_per_page, num_ptr_blocks, max_pages;
	unsigned total_ram, avail_ram, magic, orig_magic, known_pages;

	ptrs_per_page = PAGE_SIZE / sizeof(void *);
	total_ram = mainbus_ramsize();
	avail_ram = total_ram - (uint32_t)(firstfree - MIPS_KSEG0);
	max_pages = (avail_ram + PAGE_SIZE-1) / PAGE_SIZE;
	num_ptr_blocks = (max_pages + ptrs_per_page-1) / ptrs_per_page;

	// The array can go on the stack, we won't have that many
	// (sys161 16M max => 4 blocks)
	void **ptrs[num_ptr_blocks];

	for (unsigned i = 0; i < num_ptr_blocks; i++) {
		ptrs[i] = kmalloc(PAGE_SIZE);
		if (ptrs[i] == NULL) {
			panic("Can't allocate ptr page!");
		}
		bzero(ptrs[i], PAGE_SIZE);
	}

	kprintf("km5 --> phys ram: %uk avail ram: %uk  (%u pages) ptr blocks: %u\n", total_ram/1024,
		avail_ram/1024, max_pages, num_ptr_blocks);

	// Initially, there must be at least 1 page allocated for each thread stack,
	// one page for kmalloc for this thread struct, plus what we just allocated).
	// This probably isn't the GLB, but its a decent lower bound.
	orig_used = coremap_used_bytes();
	known_pages = num_cpus + num_ptr_blocks + 1;
	if (orig_used < known_pages * PAGE_SIZE) {
		panic ("Not enough pages initially allocated");
	}
	if ((orig_used % PAGE_SIZE) != 0) {
		panic("Coremap used bytes should be a multiple of PAGE_SIZE");
	}

	// Test for kernel bloat.
	if (kernel_page_limit > 0) {
		uint32_t kpages = (total_ram - avail_ram + PAGE_SIZE) / PAGE_SIZE;
		if (kpages > kernel_page_limit) {
			panic("You're kernel is bloated! Max allowed pages: %d, used pages: %d",
				kernel_page_limit, kpages);
		}
	}

	orig_magic = magic = random();

	for (int i = 0; i < KM5_ITERATIONS; i++) {
		// Step 1: allocate all physical memory, with checks along the way
		unsigned int block, pos, oom, pages, used, prev;
		void *page;

		block = pos = oom = pages = used = 0;
		prev = orig_used;

		while (pages < max_pages+1) {
			PROGRESS(pages);
			page = kmalloc(PAGE_SIZE);
			if (page == NULL) {
				oom = 1;
				break;
			}

			// Make sure we can write to the page
			*(uint32_t *)page = magic++;

			// Make sure the number of used bytes is going up, and by increments of PAGE_SIZE
			used = coremap_used_bytes();
			if (used != prev + PAGE_SIZE) {
				panic("Allocation not equal to PAGE_SIZE. prev: %u used: %u", prev, used);
			}
			prev = used;

			ptrs[block][pos] = page;
			pos++;
			if (pos >= ptrs_per_page) {
				pos = 0;
				block++;
			}
			pages++;
		}

		// Step 2: Check that we were able to allocate a reasonable number of pages
		unsigned expected;
		if (avail_page_slack > 0 ) {
			// max avail pages + what we can prove we allocated + some slack
			expected = max_pages - (known_pages + avail_page_slack);
		} else {
			// At the very least, just so we know things are working.
			expected = 3;
		}

		if (pages < expected) {
			panic("Expected to allocate at least %d pages, only allocated %d",
				expected, pages);
		}

		// We tried to allocate 1 more page than is available in physical memory. That
		// should fail unless you're swapping out kernel pages, which you should
		// probably not be doing.
		if (!oom) {
			panic("Allocated more pages than physical memory. Are you swapping kernel pages?");
		}

		// Step 3: free everything and check that we're back to where we started
		for (block = 0; block < num_ptr_blocks; block++) {
			for (pos = 0; pos < ptrs_per_page; pos++) {
				if (ptrs[block][pos] != NULL) {
					// Make sure we got unique addresses
					if ((*(uint32_t *)ptrs[block][pos]) != orig_magic++) {
						panic("km5: expected %u got %u - your VM is broken!",
							orig_magic-1, (*(uint32_t *)ptrs[block][pos]));
					}
					kfree(ptrs[block][pos]);
				}
			}
		}

		// Check that we're back to where we started
		used = coremap_used_bytes();
		if (used != orig_used) {
			panic("orig (%u) != used (%u)", orig_used, used);
		}
	}

	//Clean up the pointer blocks
	for (unsigned i = 0; i < num_ptr_blocks; i++) {
		kfree(ptrs[i]);
	}

	kprintf("\n");
	success(TEST161_SUCCESS, SECRET, "km5");

	return 0;
}