Example #1
0
static inline status_t
read_pages_and_clear_partial(file_cache_ref* ref, void* cookie, off_t offset,
	const generic_io_vec* vecs, size_t count, uint32 flags,
	generic_size_t* _numBytes)
{
	generic_size_t bytesUntouched = *_numBytes;

	status_t status = vfs_read_pages(ref->vnode, cookie, offset, vecs, count,
		flags, _numBytes);

	generic_size_t bytesEnd = *_numBytes;

	if (offset + (off_t)bytesEnd > ref->cache->virtual_end)
		bytesEnd = ref->cache->virtual_end - offset;

	if (status == B_OK && bytesEnd < bytesUntouched) {
		// Clear out any leftovers that were not touched by the above read.
		// We're doing this here so that not every file system/device has to
		// implement this.
		bytesUntouched -= bytesEnd;

		for (int32 i = count; i-- > 0 && bytesUntouched != 0; ) {
			generic_size_t length = min_c(bytesUntouched, vecs[i].length);
			vm_memset_physical(vecs[i].base + vecs[i].length - length, 0,
				length);

			bytesUntouched -= length;
		}
	}

	return status;
}
Example #2
0
static status_t
read_from_file(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	TRACE(("read_from_file(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
		"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));

	if (!useBuffer)
		return B_OK;

	generic_io_vec vec;
	vec.base = buffer;
	vec.length = bufferSize;

	push_access(ref, offset, bufferSize, false);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	generic_size_t toRead = bufferSize;
	status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset,
		&vec, 1, 0, &toRead);

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, false);

	ref->cache->Lock();

	return status;
}
Example #3
0
static fssh_status_t
read_from_file(file_cache_ref *ref, void *cookie, fssh_off_t offset,
               int32_t pageOffset, fssh_addr_t buffer, fssh_size_t bufferSize)
{
    fssh_iovec vec;
    vec.iov_base = (void *)buffer;
    vec.iov_len = bufferSize;

    fssh_mutex_unlock(&ref->lock);

    fssh_status_t status = vfs_read_pages(ref->node, cookie,
                                          offset + pageOffset, &vec, 1, &bufferSize);

    fssh_mutex_lock(&ref->lock);

    return status;
}
Example #4
0
extern "C" status_t
file_cache_read(void* _cacheRef, void* cookie, off_t offset, void* buffer,
	size_t* _size)
{
	file_cache_ref* ref = (file_cache_ref*)_cacheRef;

	TRACE(("file_cache_read(ref = %p, offset = %Ld, buffer = %p, size = %lu)\n",
		ref, offset, buffer, *_size));

	if (ref->disabled_count > 0) {
		// Caching is disabled -- read directly from the file.
		generic_io_vec vec;
		vec.base = (addr_t)buffer;
		generic_size_t size = vec.length = *_size;
		status_t error = vfs_read_pages(ref->vnode, cookie, offset, &vec, 1, 0,
			&size);
		*_size = size;
		return error;
	}

	return cache_io(ref, cookie, offset, (addr_t)buffer, _size, false);
}
Example #5
0
/*!	Like read_into_cache() but writes data into the cache.
	To preserve data consistency, it might also read pages into the cache,
	though, if only a partial page gets written.
	The same restrictions apply.
*/
static status_t
write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	// TODO: We're using way too much stack! Rather allocate a sufficiently
	// large chunk on the heap.
	generic_io_vec vecs[MAX_IO_VECS];
	uint32 vecCount = 0;
	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
	vm_page* pages[MAX_IO_VECS];
	int32 pageIndex = 0;
	status_t status = B_OK;

	// ToDo: this should be settable somewhere
	bool writeThrough = false;

	// allocate pages for the cache and mark them busy
	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
		// TODO: if space is becoming tight, and this cache is already grown
		//	big - shouldn't we better steal the pages directly in that case?
		//	(a working set like approach for the file cache)
		// TODO: the pages we allocate here should have been reserved upfront
		//	in cache_io()
		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
			reservation,
			(writeThrough ? PAGE_STATE_CACHED : PAGE_STATE_MODIFIED)
				| VM_PAGE_ALLOC_BUSY);

		page->modified = !writeThrough;

		ref->cache->InsertPage(page, offset + pos);

		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
	}

	push_access(ref, offset, bufferSize, true);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	// copy contents (and read in partially written pages first)

	if (pageOffset != 0) {
		// This is only a partial write, so we have to read the rest of the page
		// from the file to have consistent data in the cache
		generic_io_vec readVec = { vecs[0].base, B_PAGE_SIZE };
		generic_size_t bytesRead = B_PAGE_SIZE;

		status = vfs_read_pages(ref->vnode, cookie, offset, &readVec, 1,
			B_PHYSICAL_IO_REQUEST, &bytesRead);
		// ToDo: handle errors for real!
		if (status < B_OK)
			panic("1. vfs_read_pages() failed: %s!\n", strerror(status));
	}

	size_t lastPageOffset = (pageOffset + bufferSize) % B_PAGE_SIZE;
	if (lastPageOffset != 0) {
		// get the last page in the I/O vectors
		generic_addr_t last = vecs[vecCount - 1].base
			+ vecs[vecCount - 1].length - B_PAGE_SIZE;

		if ((off_t)(offset + pageOffset + bufferSize) == ref->cache->virtual_end) {
			// the space in the page after this write action needs to be cleaned
			vm_memset_physical(last + lastPageOffset, 0,
				B_PAGE_SIZE - lastPageOffset);
		} else {
			// the end of this write does not happen on a page boundary, so we
			// need to fetch the last page before we can update it
			generic_io_vec readVec = { last, B_PAGE_SIZE };
			generic_size_t bytesRead = B_PAGE_SIZE;

			status = vfs_read_pages(ref->vnode, cookie,
				PAGE_ALIGN(offset + pageOffset + bufferSize) - B_PAGE_SIZE,
				&readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead);
			// ToDo: handle errors for real!
			if (status < B_OK)
				panic("vfs_read_pages() failed: %s!\n", strerror(status));

			if (bytesRead < B_PAGE_SIZE) {
				// the space beyond the file size needs to be cleaned
				vm_memset_physical(last + bytesRead, 0,
					B_PAGE_SIZE - bytesRead);
			}
		}
	}

	for (uint32 i = 0; i < vecCount; i++) {
		generic_addr_t base = vecs[i].base;
		generic_size_t bytes = min_c((generic_size_t)bufferSize,
			generic_size_t(vecs[i].length - pageOffset));

		if (useBuffer) {
			// copy data from user buffer
			vm_memcpy_to_physical(base + pageOffset, (void*)buffer, bytes,
				IS_USER_ADDRESS(buffer));
		} else {
			// clear buffer instead
			vm_memset_physical(base + pageOffset, 0, bytes);
		}

		bufferSize -= bytes;
		if (bufferSize == 0)
			break;

		buffer += bytes;
		pageOffset = 0;
	}

	if (writeThrough) {
		// write cached pages back to the file if we were asked to do that
		status_t status = vfs_write_pages(ref->vnode, cookie, offset, vecs,
			vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
		if (status < B_OK) {
			// ToDo: remove allocated pages, ...?
			panic("file_cache: remove allocated pages! write pages failed: %s\n",
				strerror(status));
		}
	}

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, true);

	ref->cache->Lock();

	// make the pages accessible in the cache
	for (int32 i = pageIndex; i-- > 0;) {
		ref->cache->MarkPageUnbusy(pages[i]);

		DEBUG_PAGE_ACCESS_END(pages[i]);
	}

	return status;
}