Esempio n. 1
0
static void
reserve_pages(file_cache_ref* ref, vm_page_reservation* reservation,
	size_t reservePages, bool isWrite)
{
	if (low_resource_state(B_KERNEL_RESOURCE_PAGES) != B_NO_LOW_RESOURCE) {
		VMCache* cache = ref->cache;
		cache->Lock();

		if (cache->consumers.IsEmpty() && cache->areas == NULL
			&& access_is_sequential(ref)) {
			// we are not mapped, and we're accessed sequentially

			if (isWrite) {
				// Just write some pages back, and actually wait until they
				// have been written back in order to relieve the page pressure
				// a bit.
				int32 index = ref->last_access_index;
				int32 previous = index - 1;
				if (previous < 0)
					previous = LAST_ACCESSES - 1;

				vm_page_write_modified_page_range(cache,
					ref->LastAccessPageOffset(previous, true),
					ref->LastAccessPageOffset(index, true));
			} else {
				// free some pages from our cache
				// TODO: start with oldest
				uint32 left = reservePages;
				vm_page* page;
				for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
						(page = it.Next()) != NULL && left > 0;) {
					if (page->State() == PAGE_STATE_CACHED && !page->busy) {
						DEBUG_PAGE_ACCESS_START(page);
						ASSERT(!page->IsMapped());
						ASSERT(!page->modified);
						cache->RemovePage(page);
						vm_page_set_state(page, PAGE_STATE_FREE);
						left--;
					}
				}
			}
		}
		cache->Unlock();
	}

	vm_page_reserve_pages(reservation, reservePages, VM_PRIORITY_USER);
}
Esempio n. 2
0
extern "C" status_t
file_cache_init(void)
{
	// allocate a clean page we can use for writing zeroes
	vm_page_reservation reservation;
	vm_page_reserve_pages(&reservation, 1, VM_PRIORITY_SYSTEM);
	vm_page* page = vm_page_allocate_page(&reservation,
		PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
	vm_page_unreserve_pages(&reservation);

	sZeroPage = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

	for (uint32 i = 0; i < kZeroVecCount; i++) {
		sZeroVecs[i].base = sZeroPage;
		sZeroVecs[i].length = B_PAGE_SIZE;
	}

	register_generic_syscall(CACHE_SYSCALLS, file_cache_control, 1, 0);
	return B_OK;
}
Esempio n. 3
0
/*!	Iteratively correct the reported capacity by trying to read from the device
	close to its end.
*/
static uint64
test_capacity(cd_driver_info *info)
{
	static const size_t kMaxEntries = 4;
	const uint32 blockSize = info->block_size;
	const size_t kBufferSize = blockSize * 4;

	TRACE("test_capacity: read with buffer size %" B_PRIuSIZE ", block size %"
		B_PRIu32", capacity %llu\n", kBufferSize, blockSize,
		info->original_capacity);

	info->capacity = info->original_capacity;

	size_t numBlocks = B_PAGE_SIZE / blockSize;
	uint64 offset = info->original_capacity;
	if (offset <= numBlocks)
		return B_OK;

	offset -= numBlocks;

	scsi_ccb *request = info->scsi->alloc_ccb(info->scsi_device);
	if (request == NULL)
		return B_NO_MEMORY;

	// Allocate buffer

	physical_entry entries[4];
	size_t numEntries = 0;

	vm_page_reservation reservation;
	vm_page_reserve_pages(&reservation,
		(kBufferSize - 1 + B_PAGE_SIZE) / B_PAGE_SIZE, VM_PRIORITY_SYSTEM);

	for (size_t left = kBufferSize; numEntries < kMaxEntries && left > 0;
			numEntries++) {
		size_t bytes = std::min(left, (size_t)B_PAGE_SIZE);

		vm_page* page = vm_page_allocate_page(&reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_BUSY);

		entries[numEntries].address = page->physical_page_number * B_PAGE_SIZE;
		entries[numEntries].size = bytes;;

		left -= bytes;
	}

	vm_page_unreserve_pages(&reservation);

	// Read close to the end of the device to find out its real end

	// Only try 1 second before the end (= 75 blocks)
	while (offset > info->original_capacity - 75) {
		size_t bytesTransferred;
		status_t status = sSCSIPeripheral->read_write(info->scsi_periph_device,
			request, offset, numBlocks, entries, numEntries, false,
			&bytesTransferred);

		TRACE("test_capacity: read from offset %llu: %s\n", offset,
			strerror(status));

		if (status == B_OK || (request->sense[0] & 0x7f) != 0x70)
			break;

		switch (request->sense[2]) {
			case SCSIS_KEY_MEDIUM_ERROR:
			case SCSIS_KEY_ILLEGAL_REQUEST:
			case SCSIS_KEY_VOLUME_OVERFLOW:
			{
				// find out the problematic sector
				uint32 errorBlock = (request->sense[3] << 24U)
					| (request->sense[4] << 16U) | (request->sense[5] << 8U)
					| request->sense[6];
				if (errorBlock >= offset)
					info->capacity = errorBlock;
				break;
			}

			default:
				break;
		}

		if (numBlocks > offset)
			break;

		offset -= numBlocks;
	}

	info->scsi->free_ccb(request);

	for (size_t i = 0; i < numEntries; i++) {
		vm_page_set_state(vm_lookup_page(entries[i].address / B_PAGE_SIZE),
			PAGE_STATE_FREE);
	}

	if (info->capacity != info->original_capacity) {
		dprintf("scsi_cd: adjusted capacity from %llu to %llu blocks.\n",
			info->original_capacity, info->capacity);
	}

	return B_OK;
}
Esempio n. 4
0
extern "C" void
cache_prefetch_vnode(struct vnode* vnode, off_t offset, size_t size)
{
	if (size == 0)
		return;

	VMCache* cache;
	if (vfs_get_vnode_cache(vnode, &cache, false) != B_OK)
		return;

	file_cache_ref* ref = ((VMVnodeCache*)cache)->FileCacheRef();
	off_t fileSize = cache->virtual_end;

	if ((off_t)(offset + size) > fileSize)
		size = fileSize - offset;

	// "offset" and "size" are always aligned to B_PAGE_SIZE,
	offset = ROUNDDOWN(offset, B_PAGE_SIZE);
	size = ROUNDUP(size, B_PAGE_SIZE);

	size_t reservePages = size / B_PAGE_SIZE;

	// Don't do anything if we don't have the resources left, or the cache
	// already contains more than 2/3 of its pages
	if (offset >= fileSize || vm_page_num_unused_pages() < 2 * reservePages
		|| 3 * cache->page_count > 2 * fileSize / B_PAGE_SIZE) {
		cache->ReleaseRef();
		return;
	}

	size_t bytesToRead = 0;
	off_t lastOffset = offset;

	vm_page_reservation reservation;
	vm_page_reserve_pages(&reservation, reservePages, VM_PRIORITY_USER);

	cache->Lock();

	while (true) {
		// check if this page is already in memory
		if (size > 0) {
			vm_page* page = cache->LookupPage(offset);

			offset += B_PAGE_SIZE;
			size -= B_PAGE_SIZE;

			if (page == NULL) {
				bytesToRead += B_PAGE_SIZE;
				continue;
			}
		}
		if (bytesToRead != 0) {
			// read the part before the current page (or the end of the request)
			PrecacheIO* io = new(std::nothrow) PrecacheIO(ref, lastOffset,
				bytesToRead);
			if (io == NULL || io->Prepare(&reservation) != B_OK) {
				delete io;
				break;
			}

			// we must not have the cache locked during I/O
			cache->Unlock();
			io->ReadAsync();
			cache->Lock();

			bytesToRead = 0;
		}

		if (size == 0) {
			// we have reached the end of the request
			break;
		}

		lastOffset = offset;
	}

	cache->ReleaseRefAndUnlock();
	vm_page_unreserve_pages(&reservation);
}