static fssh_status_t
cache_io(void *_cacheRef, void *cookie, fssh_off_t offset, fssh_addr_t buffer,
         fssh_size_t *_size, bool doWrite)
{
    if (_cacheRef == NULL)
        fssh_panic("cache_io() called with NULL ref!\n");

    file_cache_ref *ref = (file_cache_ref *)_cacheRef;
    fssh_off_t fileSize = ref->virtual_size;

    TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %u, %s)\n",
           ref, offset, (void *)buffer, *_size, doWrite ? "write" : "read"));

    // out of bounds access?
    if (offset >= fileSize || offset < 0) {
        *_size = 0;
        return FSSH_B_OK;
    }

    int32_t pageOffset = offset & (FSSH_B_PAGE_SIZE - 1);
    fssh_size_t size = *_size;
    offset -= pageOffset;

    if ((uint64_t)offset + pageOffset + size > (uint64_t)fileSize) {
        // adapt size to be within the file's offsets
        size = fileSize - pageOffset - offset;
        *_size = size;
    }
    if (size == 0)
        return FSSH_B_OK;

    cache_func function;
    if (doWrite) {
        // in low memory situations, we bypass the cache beyond a
        // certain I/O size
        function = write_to_file;
    } else {
        function = read_from_file;
    }

    // "offset" and "lastOffset" are always aligned to B_PAGE_SIZE,
    // the "last*" variables always point to the end of the last
    // satisfied request part

    const uint32_t kMaxChunkSize = MAX_IO_VECS * FSSH_B_PAGE_SIZE;
    fssh_size_t bytesLeft = size, lastLeft = size;
    int32_t lastPageOffset = pageOffset;
    fssh_addr_t lastBuffer = buffer;
    fssh_off_t lastOffset = offset;

    MutexLocker locker(ref->lock);

    while (bytesLeft > 0) {
        // check if this page is already in memory
        fssh_size_t bytesInPage = fssh_min_c(
                                      fssh_size_t(FSSH_B_PAGE_SIZE - pageOffset), bytesLeft);

        if (bytesLeft <= bytesInPage)
            break;

        buffer += bytesInPage;
        bytesLeft -= bytesInPage;
        pageOffset = 0;
        offset += FSSH_B_PAGE_SIZE;

        if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) {
            fssh_status_t status = satisfy_cache_io(ref, cookie, function,
                                                    offset, buffer, pageOffset, bytesLeft, lastOffset,
                                                    lastBuffer, lastPageOffset, lastLeft);
            if (status != FSSH_B_OK)
                return status;
        }
    }

    // fill the last remaining bytes of the request (either write or read)

    return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer,
                    lastLeft);
}
Exemple #2
0
static status_t
cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
	size_t* _size, bool doWrite)
{
	if (_cacheRef == NULL)
		panic("cache_io() called with NULL ref!\n");

	file_cache_ref* ref = (file_cache_ref*)_cacheRef;
	VMCache* cache = ref->cache;
	off_t fileSize = cache->virtual_end;
	bool useBuffer = buffer != 0;

	TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n",
		ref, offset, (void*)buffer, *_size, doWrite ? "write" : "read"));

	// out of bounds access?
	if (offset >= fileSize || offset < 0) {
		*_size = 0;
		return B_OK;
	}

	int32 pageOffset = offset & (B_PAGE_SIZE - 1);
	size_t size = *_size;
	offset -= pageOffset;

	if ((off_t)(offset + pageOffset + size) > fileSize) {
		// adapt size to be within the file's offsets
		size = fileSize - pageOffset - offset;
		*_size = size;
	}
	if (size == 0)
		return B_OK;

	// "offset" and "lastOffset" are always aligned to B_PAGE_SIZE,
	// the "last*" variables always point to the end of the last
	// satisfied request part

	const uint32 kMaxChunkSize = MAX_IO_VECS * B_PAGE_SIZE;
	size_t bytesLeft = size, lastLeft = size;
	int32 lastPageOffset = pageOffset;
	addr_t lastBuffer = buffer;
	off_t lastOffset = offset;
	size_t lastReservedPages = min_c(MAX_IO_VECS, (pageOffset + bytesLeft
		+ B_PAGE_SIZE - 1) >> PAGE_SHIFT);
	size_t reservePages = 0;
	size_t pagesProcessed = 0;
	cache_func function = NULL;

	vm_page_reservation reservation;
	reserve_pages(ref, &reservation, lastReservedPages, doWrite);

	AutoLocker<VMCache> locker(cache);

	while (bytesLeft > 0) {
		// Periodically reevaluate the low memory situation and select the
		// read/write hook accordingly
		if (pagesProcessed % 32 == 0) {
			if (size >= BYPASS_IO_SIZE
				&& low_resource_state(B_KERNEL_RESOURCE_PAGES)
					!= B_NO_LOW_RESOURCE) {
				// In low memory situations we bypass the cache beyond a
				// certain I/O size.
				function = doWrite ? write_to_file : read_from_file;
			} else
				function = doWrite ? write_to_cache : read_into_cache;
		}

		// check if this page is already in memory
		vm_page* page = cache->LookupPage(offset);
		if (page != NULL) {
			// The page may be busy - since we need to unlock the cache sometime
			// in the near future, we need to satisfy the request of the pages
			// we didn't get yet (to make sure no one else interferes in the
			// meantime).
			status_t status = satisfy_cache_io(ref, cookie, function, offset,
				buffer, useBuffer, pageOffset, bytesLeft, reservePages,
				lastOffset, lastBuffer, lastPageOffset, lastLeft,
				lastReservedPages, &reservation);
			if (status != B_OK)
				return status;

			// Since satisfy_cache_io() unlocks the cache, we need to look up
			// the page again.
			page = cache->LookupPage(offset);
			if (page != NULL && page->busy) {
				cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
				continue;
			}
		}

		size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft);

		TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset "
			"= %lu\n", offset, page, bytesLeft, pageOffset));

		if (page != NULL) {
			if (doWrite || useBuffer) {
				// Since the following user_mem{cpy,set}() might cause a page
				// fault, which in turn might cause pages to be reserved, we
				// need to unlock the cache temporarily to avoid a potential
				// deadlock. To make sure that our page doesn't go away, we mark
				// it busy for the time.
				page->busy = true;
				locker.Unlock();

				// copy the contents of the page already in memory
				phys_addr_t pageAddress
					= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
						+ pageOffset;
				bool userBuffer = IS_USER_ADDRESS(buffer);
				if (doWrite) {
					if (useBuffer) {
						vm_memcpy_to_physical(pageAddress, (void*)buffer,
							bytesInPage, userBuffer);
					} else {
						vm_memset_physical(pageAddress, 0, bytesInPage);
					}
				} else if (useBuffer) {
					vm_memcpy_from_physical((void*)buffer, pageAddress,
						bytesInPage, userBuffer);
				}

				locker.Lock();

				if (doWrite) {
					DEBUG_PAGE_ACCESS_START(page);

					page->modified = true;

					if (page->State() != PAGE_STATE_MODIFIED)
						vm_page_set_state(page, PAGE_STATE_MODIFIED);

					DEBUG_PAGE_ACCESS_END(page);
				}

				cache->MarkPageUnbusy(page);
			}

			// If it is cached only, requeue the page, so the respective queue
			// roughly remains LRU first sorted.
			if (page->State() == PAGE_STATE_CACHED
					|| page->State() == PAGE_STATE_MODIFIED) {
				DEBUG_PAGE_ACCESS_START(page);
				vm_page_requeue(page, true);
				DEBUG_PAGE_ACCESS_END(page);
			}

			if (bytesLeft <= bytesInPage) {
				// we've read the last page, so we're done!
				locker.Unlock();
				vm_page_unreserve_pages(&reservation);
				return B_OK;
			}

			// prepare a potential gap request
			lastBuffer = buffer + bytesInPage;
			lastLeft = bytesLeft - bytesInPage;
			lastOffset = offset + B_PAGE_SIZE;
			lastPageOffset = 0;
		}

		if (bytesLeft <= bytesInPage)
			break;

		buffer += bytesInPage;
		bytesLeft -= bytesInPage;
		pageOffset = 0;
		offset += B_PAGE_SIZE;
		pagesProcessed++;

		if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) {
			status_t status = satisfy_cache_io(ref, cookie, function, offset,
				buffer, useBuffer, pageOffset, bytesLeft, reservePages,
				lastOffset, lastBuffer, lastPageOffset, lastLeft,
				lastReservedPages, &reservation);
			if (status != B_OK)
				return status;
		}
	}

	// fill the last remaining bytes of the request (either write or read)

	return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer,
		lastLeft, useBuffer, &reservation, 0);
}