Exemplo n.º 1
0
status_t
PrecacheIO::Prepare(vm_page_reservation* reservation)
{
	if (fPageCount == 0)
		return B_BAD_VALUE;

	fPages = new(std::nothrow) vm_page*[fPageCount];
	if (fPages == NULL)
		return B_NO_MEMORY;

	fVecs = new(std::nothrow) generic_io_vec[fPageCount];
	if (fVecs == NULL)
		return B_NO_MEMORY;

	// allocate pages for the cache and mark them busy
	uint32 i = 0;
	for (generic_size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
		vm_page* page = vm_page_allocate_page(reservation,
			PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY);

		fCache->InsertPage(page, fOffset + pos);

		add_to_iovec(fVecs, fVecCount, fPageCount,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
		fPages[i++] = page;
	}

#if DEBUG_PAGE_ACCESS
	fAllocatingThread = find_thread(NULL);
#endif

	return B_OK;
}
Exemplo n.º 2
0
int vm_translation_map_create(vm_translation_map *new_map, bool kernel)
{
	ASSERT(new_map);

	// initialize the new object
	new_map->ops = &tmap_ops;
	new_map->map_count = 0;
	if(recursive_lock_create(&new_map->lock) < 0)
		return ERR_NO_MEMORY;

	new_map->arch_data = kmalloc(sizeof(vm_translation_map_arch_info));
	if(new_map->arch_data == NULL) {
		recursive_lock_destroy(&new_map->lock);
		return ERR_NO_MEMORY;
	}

	if (!kernel) {
		// user
		vm_page *page = vm_page_allocate_page(PAGE_STATE_CLEAR);
		list_add_head(&new_map->arch_data->pagetable_list, &page->queue_node);

		new_map->arch_data->pgdir_phys = page->ppn * PAGE_SIZE;
		get_physical_page_tmap(page->ppn * PAGE_SIZE, (addr_t *)&new_map->arch_data->pgdir_virt, PHYSICAL_PAGE_NO_WAIT);

		// copy the kernel bits into this one (one entry at the top)
		memcpy(new_map->arch_data->pgdir_virt + 256, (unsigned long *)kernel_pgdir_virt + 256, sizeof(unsigned long) * 256);
	} else {
		// kernel top level page dir is already allocated
		new_map->arch_data->pgdir_phys = kernel_pgdir_phys;
		new_map->arch_data->pgdir_virt = (unsigned long *)kernel_pgdir_virt;

		vm_page *page = vm_lookup_page(kernel_pgdir_phys / PAGE_SIZE);
		TMAP_TRACE("page %p, state %d\n", page, page->state);
		list_add_head(&new_map->arch_data->pagetable_list, &page->queue_node);

		// zero out the bottom of it, where user space mappings would go
		memset(new_map->arch_data->pgdir_virt, 0, sizeof(unsigned long) * 256);

		// XXX account for prexisting kernel page tables
	}

	return 0;
}
Exemplo n.º 3
0
extern "C" status_t
file_cache_init(void)
{
	// allocate a clean page we can use for writing zeroes
	vm_page_reservation reservation;
	vm_page_reserve_pages(&reservation, 1, VM_PRIORITY_SYSTEM);
	vm_page* page = vm_page_allocate_page(&reservation,
		PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
	vm_page_unreserve_pages(&reservation);

	sZeroPage = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

	for (uint32 i = 0; i < kZeroVecCount; i++) {
		sZeroVecs[i].base = sZeroPage;
		sZeroVecs[i].length = B_PAGE_SIZE;
	}

	register_generic_syscall(CACHE_SYSCALLS, file_cache_control, 1, 0);
	return B_OK;
}
Exemplo n.º 4
0
static int map_tmap(vm_translation_map *map, addr_t va, addr_t pa, unsigned int attributes)
{
	addr_t pgtable_phys;
	unsigned long *pgtable;
	int index;
	vm_page *page;

	TMAP_TRACE("map_tmap: va 0x%lx pa 0x%lx, attributes 0x%x\n", va, pa, attributes);

	// look up and dereference the first entry
	pgtable = map->arch_data->pgdir_virt;
	ASSERT(pgtable);
	TMAP_TRACE("map_tmap top level pgdir virt %p\n", pgtable);
	index = PGTABLE0_ENTRY(va);
	if (!PGENT_PRESENT(pgtable[index])) {
		page = vm_page_allocate_page(PAGE_STATE_CLEAR);
		pgtable_phys = page->ppn * PAGE_SIZE;
		list_add_head(&map->arch_data->pagetable_list, &page->queue_node);

		pgtable[index] = pgtable_phys | (PT_PRESENT|PT_WRITE|PT_USER);
		map->map_count++;

		TMAP_TRACE("map_tmap: had to allocate level 1: paddr 0x%lx, ent @ %p = 0x%lx\n", pgtable_phys, &pgtable[index], pgtable[index]);
	} else {
		pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
		TMAP_TRACE("map_tmap level 1: paddr 0x%lx\n", pgtable_phys);
	}

	// level 2
	pgtable = phys_to_virt(pgtable_phys);
	index = PGTABLE1_ENTRY(va);
	if (!PGENT_PRESENT(pgtable[index])) {
		page = vm_page_allocate_page(PAGE_STATE_CLEAR);
		pgtable_phys = page->ppn * PAGE_SIZE;
		list_add_head(&map->arch_data->pagetable_list, &page->queue_node);

		pgtable[index] = pgtable_phys | (PT_PRESENT|PT_WRITE|PT_USER);
		map->map_count++;
		
		TMAP_TRACE("map_tmap: had to allocate level 2: paddr 0x%lx, ent @ %p = 0x%lx\n", pgtable_phys, &pgtable[index], pgtable[index]);
	} else {
		pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
		TMAP_TRACE("map_tmap level 2: paddr 0x%lx\n", pgtable_phys);
	}

	// level 3
	pgtable = phys_to_virt(pgtable_phys);
	index = PGTABLE2_ENTRY(va);
	if (!PGENT_PRESENT(pgtable[index])) {
		page = vm_page_allocate_page(PAGE_STATE_CLEAR);
		pgtable_phys = page->ppn * PAGE_SIZE;
		list_add_head(&map->arch_data->pagetable_list, &page->queue_node);

		pgtable[index] = pgtable_phys | (PT_PRESENT|PT_WRITE|PT_USER);
		map->map_count++;

		TMAP_TRACE("map_tmap: had to allocate level 3: paddr 0x%lx, ent @ %p = 0x%lx\n", pgtable_phys, &pgtable[index], pgtable[index]);
	} else {
		pgtable_phys = PGENT_TO_ADDR(pgtable[index]);
		TMAP_TRACE("map_tmap level 3: paddr 0x%lx\n", pgtable_phys);
	}

	// map the page
	pgtable = phys_to_virt(pgtable_phys);
	index = PGTABLE3_ENTRY(va);
	pa = ROUNDOWN(pa, PAGE_SIZE);
	pgtable[index] = pa 
		| ((attributes & LOCK_RW) ? PT_WRITE : 0) 
		| ((attributes & LOCK_KERNEL) ? 0 : PT_USER) 
		| PT_PRESENT;
	map->map_count++;

	TMAP_TRACE("map_tmap: ent @ %p = 0x%lx\n", &pgtable[index], pgtable[index]);

	return 0;
}
Exemplo n.º 5
0
/*!	Iteratively correct the reported capacity by trying to read from the device
	close to its end.
*/
static uint64
test_capacity(cd_driver_info *info)
{
	static const size_t kMaxEntries = 4;
	const uint32 blockSize = info->block_size;
	const size_t kBufferSize = blockSize * 4;

	TRACE("test_capacity: read with buffer size %" B_PRIuSIZE ", block size %"
		B_PRIu32", capacity %llu\n", kBufferSize, blockSize,
		info->original_capacity);

	info->capacity = info->original_capacity;

	size_t numBlocks = B_PAGE_SIZE / blockSize;
	uint64 offset = info->original_capacity;
	if (offset <= numBlocks)
		return B_OK;

	offset -= numBlocks;

	scsi_ccb *request = info->scsi->alloc_ccb(info->scsi_device);
	if (request == NULL)
		return B_NO_MEMORY;

	// Allocate buffer

	physical_entry entries[4];
	size_t numEntries = 0;

	vm_page_reservation reservation;
	vm_page_reserve_pages(&reservation,
		(kBufferSize - 1 + B_PAGE_SIZE) / B_PAGE_SIZE, VM_PRIORITY_SYSTEM);

	for (size_t left = kBufferSize; numEntries < kMaxEntries && left > 0;
			numEntries++) {
		size_t bytes = std::min(left, (size_t)B_PAGE_SIZE);

		vm_page* page = vm_page_allocate_page(&reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_BUSY);

		entries[numEntries].address = page->physical_page_number * B_PAGE_SIZE;
		entries[numEntries].size = bytes;;

		left -= bytes;
	}

	vm_page_unreserve_pages(&reservation);

	// Read close to the end of the device to find out its real end

	// Only try 1 second before the end (= 75 blocks)
	while (offset > info->original_capacity - 75) {
		size_t bytesTransferred;
		status_t status = sSCSIPeripheral->read_write(info->scsi_periph_device,
			request, offset, numBlocks, entries, numEntries, false,
			&bytesTransferred);

		TRACE("test_capacity: read from offset %llu: %s\n", offset,
			strerror(status));

		if (status == B_OK || (request->sense[0] & 0x7f) != 0x70)
			break;

		switch (request->sense[2]) {
			case SCSIS_KEY_MEDIUM_ERROR:
			case SCSIS_KEY_ILLEGAL_REQUEST:
			case SCSIS_KEY_VOLUME_OVERFLOW:
			{
				// find out the problematic sector
				uint32 errorBlock = (request->sense[3] << 24U)
					| (request->sense[4] << 16U) | (request->sense[5] << 8U)
					| request->sense[6];
				if (errorBlock >= offset)
					info->capacity = errorBlock;
				break;
			}

			default:
				break;
		}

		if (numBlocks > offset)
			break;

		offset -= numBlocks;
	}

	info->scsi->free_ccb(request);

	for (size_t i = 0; i < numEntries; i++) {
		vm_page_set_state(vm_lookup_page(entries[i].address / B_PAGE_SIZE),
			PAGE_STATE_FREE);
	}

	if (info->capacity != info->original_capacity) {
		dprintf("scsi_cd: adjusted capacity from %llu to %llu blocks.\n",
			info->original_capacity, info->capacity);
	}

	return B_OK;
}
Exemplo n.º 6
0
status_t
ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
	uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);

/*
	dprintf("pgdir at 0x%x\n", pgdir);
	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
	page_directory_entry* pd = fPagingStructures->pgdir_virt;

	// check to see if a page table exists for this range
	uint32 index = VADDR_TO_PDENT(va);
	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
		phys_addr_t pgtable;
		vm_page *page;

		// we need to allocate a pgtable
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);

		// put it in the pgdir
		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
			attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

		// update any other page directories, if it maps kernel space
		if (index >= FIRST_KERNEL_PGDIR_ENT
			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
		}

		fMapCount++;
	}

	// now, fill in the pentry
	Thread* thread = thread_get_current_thread();
	ThreadCPUPinner pinner(thread);

	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
		pd[index] & ARM_PDE_ADDRESS_MASK);
	index = VADDR_TO_PTENT(va);

	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
		pt[index]);

	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
		memoryType, fIsKernelMap);

	pinner.Unlock();

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return 0;
}
status_t
M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
	uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);

/*
	dprintf("pgdir at 0x%x\n", pgdir);
	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
	page_root_entry *pr = fPagingStructures->pgroot_virt;
	page_directory_entry *pd;
	page_table_entry *pt;
	addr_t pd_pg, pt_pg;
	uint32 rindex, dindex, pindex;


	// check to see if a page directory exists for this range
	rindex = VADDR_TO_PRENT(va);
	if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
		phys_addr_t pgdir;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgdir group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);

		// for each pgdir on the allocated page:
		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
			uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
			page_root_entry *apr = &pr[aindex + i];

			// put in the pgroot
			M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// update any other page roots, if it maps kernel space
			//XXX: suboptimal, should batch them
			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
					< (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
				M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
					pr[aindex+i]);

			pgdir += SIZ_DIRTBL;
		}
		fMapCount++;
	}
	// now, fill in the pentry
	//XXX: is this required?
	Thread* thread = thread_get_current_thread();
	ThreadCPUPinner pinner(thread);

	pd = (page_directory_entry*)MapperGetPageTableAt(
		PRE_TO_PA(pr[rindex]));

	//pinner.Unlock();

	// we want the table at rindex, not at rindex%(tbl/page)
	//pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;

	// check to see if a page table exists for this range
	dindex = VADDR_TO_PDENT(va);
	if (PDE_TYPE(pd[dindex]) != DT_DIR) {
		phys_addr_t pgtable;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgtable group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);

		// for each pgtable on the allocated page:
		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
			uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
			page_directory_entry *apd = &pd[aindex + i];

			// put in the pgdir
			M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// no need to update other page directories for kernel space;
			// the root-level already point to us.

			pgtable += SIZ_PAGETBL;
		}

#warning M68K: really mean map_count++ ??
		fMapCount++;
	}

	// now, fill in the pentry
	//ThreadCPUPinner pinner(thread);

	pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
	// we want the table at rindex, not at rindex%(tbl/page)
	//pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;

	pindex = VADDR_TO_PTENT(va);

	ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
		pt[pindex]);

	M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
		memoryType, fIsKernelMap);

	pinner.Unlock();

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return B_OK;
}
Exemplo n.º 8
0
/*!	Like read_into_cache() but writes data into the cache.
	To preserve data consistency, it might also read pages into the cache,
	though, if only a partial page gets written.
	The same restrictions apply.
*/
static status_t
write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	// TODO: We're using way too much stack! Rather allocate a sufficiently
	// large chunk on the heap.
	generic_io_vec vecs[MAX_IO_VECS];
	uint32 vecCount = 0;
	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
	vm_page* pages[MAX_IO_VECS];
	int32 pageIndex = 0;
	status_t status = B_OK;

	// ToDo: this should be settable somewhere
	bool writeThrough = false;

	// allocate pages for the cache and mark them busy
	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
		// TODO: if space is becoming tight, and this cache is already grown
		//	big - shouldn't we better steal the pages directly in that case?
		//	(a working set like approach for the file cache)
		// TODO: the pages we allocate here should have been reserved upfront
		//	in cache_io()
		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
			reservation,
			(writeThrough ? PAGE_STATE_CACHED : PAGE_STATE_MODIFIED)
				| VM_PAGE_ALLOC_BUSY);

		page->modified = !writeThrough;

		ref->cache->InsertPage(page, offset + pos);

		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
	}

	push_access(ref, offset, bufferSize, true);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	// copy contents (and read in partially written pages first)

	if (pageOffset != 0) {
		// This is only a partial write, so we have to read the rest of the page
		// from the file to have consistent data in the cache
		generic_io_vec readVec = { vecs[0].base, B_PAGE_SIZE };
		generic_size_t bytesRead = B_PAGE_SIZE;

		status = vfs_read_pages(ref->vnode, cookie, offset, &readVec, 1,
			B_PHYSICAL_IO_REQUEST, &bytesRead);
		// ToDo: handle errors for real!
		if (status < B_OK)
			panic("1. vfs_read_pages() failed: %s!\n", strerror(status));
	}

	size_t lastPageOffset = (pageOffset + bufferSize) % B_PAGE_SIZE;
	if (lastPageOffset != 0) {
		// get the last page in the I/O vectors
		generic_addr_t last = vecs[vecCount - 1].base
			+ vecs[vecCount - 1].length - B_PAGE_SIZE;

		if ((off_t)(offset + pageOffset + bufferSize) == ref->cache->virtual_end) {
			// the space in the page after this write action needs to be cleaned
			vm_memset_physical(last + lastPageOffset, 0,
				B_PAGE_SIZE - lastPageOffset);
		} else {
			// the end of this write does not happen on a page boundary, so we
			// need to fetch the last page before we can update it
			generic_io_vec readVec = { last, B_PAGE_SIZE };
			generic_size_t bytesRead = B_PAGE_SIZE;

			status = vfs_read_pages(ref->vnode, cookie,
				PAGE_ALIGN(offset + pageOffset + bufferSize) - B_PAGE_SIZE,
				&readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead);
			// ToDo: handle errors for real!
			if (status < B_OK)
				panic("vfs_read_pages() failed: %s!\n", strerror(status));

			if (bytesRead < B_PAGE_SIZE) {
				// the space beyond the file size needs to be cleaned
				vm_memset_physical(last + bytesRead, 0,
					B_PAGE_SIZE - bytesRead);
			}
		}
	}

	for (uint32 i = 0; i < vecCount; i++) {
		generic_addr_t base = vecs[i].base;
		generic_size_t bytes = min_c((generic_size_t)bufferSize,
			generic_size_t(vecs[i].length - pageOffset));

		if (useBuffer) {
			// copy data from user buffer
			vm_memcpy_to_physical(base + pageOffset, (void*)buffer, bytes,
				IS_USER_ADDRESS(buffer));
		} else {
			// clear buffer instead
			vm_memset_physical(base + pageOffset, 0, bytes);
		}

		bufferSize -= bytes;
		if (bufferSize == 0)
			break;

		buffer += bytes;
		pageOffset = 0;
	}

	if (writeThrough) {
		// write cached pages back to the file if we were asked to do that
		status_t status = vfs_write_pages(ref->vnode, cookie, offset, vecs,
			vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
		if (status < B_OK) {
			// ToDo: remove allocated pages, ...?
			panic("file_cache: remove allocated pages! write pages failed: %s\n",
				strerror(status));
		}
	}

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, true);

	ref->cache->Lock();

	// make the pages accessible in the cache
	for (int32 i = pageIndex; i-- > 0;) {
		ref->cache->MarkPageUnbusy(pages[i]);

		DEBUG_PAGE_ACCESS_END(pages[i]);
	}

	return status;
}
Exemplo n.º 9
0
/*!	Reads the requested amount of data into the cache, and allocates
	pages needed to fulfill that request. This function is called by cache_io().
	It can only handle a certain amount of bytes, and the caller must make
	sure that it matches that criterion.
	The cache_ref lock must be held when calling this function; during
	operation it will unlock the cache, though.
*/
static status_t
read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	TRACE(("read_into_cache(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
		"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));

	VMCache* cache = ref->cache;

	// TODO: We're using way too much stack! Rather allocate a sufficiently
	// large chunk on the heap.
	generic_io_vec vecs[MAX_IO_VECS];
	uint32 vecCount = 0;

	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
	vm_page* pages[MAX_IO_VECS];
	int32 pageIndex = 0;

	// allocate pages for the cache and mark them busy
	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
			reservation, PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY);

		cache->InsertPage(page, offset + pos);

		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
			// TODO: check if the array is large enough (currently panics)!
	}

	push_access(ref, offset, bufferSize, false);
	cache->Unlock();
	vm_page_unreserve_pages(reservation);

	// read file into reserved pages
	status_t status = read_pages_and_clear_partial(ref, cookie, offset, vecs,
		vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
	if (status != B_OK) {
		// reading failed, free allocated pages

		dprintf("file_cache: read pages failed: %s\n", strerror(status));

		cache->Lock();

		for (int32 i = 0; i < pageIndex; i++) {
			cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
			cache->RemovePage(pages[i]);
			vm_page_set_state(pages[i], PAGE_STATE_FREE);
		}

		return status;
	}

	// copy the pages if needed and unmap them again

	for (int32 i = 0; i < pageIndex; i++) {
		if (useBuffer && bufferSize != 0) {
			size_t bytes = min_c(bufferSize, (size_t)B_PAGE_SIZE - pageOffset);

			vm_memcpy_from_physical((void*)buffer,
				pages[i]->physical_page_number * B_PAGE_SIZE + pageOffset,
				bytes, IS_USER_ADDRESS(buffer));

			buffer += bytes;
			bufferSize -= bytes;
			pageOffset = 0;
		}
	}

	reserve_pages(ref, reservation, reservePages, false);
	cache->Lock();

	// make the pages accessible in the cache
	for (int32 i = pageIndex; i-- > 0;) {
		DEBUG_PAGE_ACCESS_END(pages[i]);

		cache->MarkPageUnbusy(pages[i]);
	}

	return B_OK;
}