Ejemplo n.º 1
0
static int __init mymodule(void) {
  int error =  1;
  int free_pages = nr_free_pages();//get the nr of free pages

#ifdef DEBUG_NOT_NOW
  printk(KERN_ALERT "Nr of Inactive Clean pages=%d\n",count_free_pages() );
#endif

if(free_pages > TARGET_PAGES) { //if there are available free pages
   if( !(fill_contiguous(TARGET_PAGES) ) )
    goto out_free;
#ifdef OOPS  
   if(nr_r_contiguous) 
     my_free(index); //free off the non contiguous part
#endif 
     reserve_pages(r_contiguous_array,nr_r_contiguous,1); //reserve
 
   if(allocator_initialise(r_contiguous_array,nr_r_contiguous)) {
#ifdef DEBUG
     printk(KERN_ALERT "Failed to initialise the allocator device:\n");
#endif
     goto out_free_allocator;
   }
 
   
}
  error = 0;
  goto out;
 out_free_allocator:
  allocator_cleanup(); //cleanup the allocator
  out_free:
  destroy_contiguous(&v_contiguous_list);
 out:
  return error;
}
Ejemplo n.º 2
0
static void __exit mymodule_cleanup(void) {
  allocator_cleanup(); //cleanup the allocator
  reserve_pages(r_contiguous_array,nr_r_contiguous,0); //unreserve
  destroy_contiguous(&v_contiguous_list);
  my_free_pages(vmalloc_addr);
  return ;
}
Ejemplo n.º 3
0
static status_t
read_from_file(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	TRACE(("read_from_file(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
		"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));

	if (!useBuffer)
		return B_OK;

	generic_io_vec vec;
	vec.base = buffer;
	vec.length = bufferSize;

	push_access(ref, offset, bufferSize, false);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	generic_size_t toRead = bufferSize;
	status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset,
		&vec, 1, 0, &toRead);

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, false);

	ref->cache->Lock();

	return status;
}
Ejemplo n.º 4
0
static status_t
write_to_file(file_cache_ref* ref, void* cookie, off_t offset, int32 pageOffset,
	addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	push_access(ref, offset, bufferSize, true);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	status_t status = B_OK;

	if (!useBuffer) {
		while (bufferSize > 0) {
			generic_size_t written = min_c(bufferSize, kZeroVecSize);
			status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset,
				sZeroVecs, kZeroVecCount, B_PHYSICAL_IO_REQUEST, &written);
			if (status != B_OK)
				return status;
			if (written == 0)
				return B_ERROR;

			bufferSize -= written;
			pageOffset += written;
		}
	} else {
		generic_io_vec vec;
		vec.base = buffer;
		vec.length = bufferSize;
		generic_size_t toWrite = bufferSize;
		status = vfs_write_pages(ref->vnode, cookie, offset + pageOffset,
			&vec, 1, 0, &toWrite);
	}

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, true);

	ref->cache->Lock();

	return status;
}
Ejemplo n.º 5
0
void setup_page_directory(struct Process* process, int kernel) {

    struct ProcessMemory* mm = &process->mm;
    if (kernel) {
        mm->directory = NULL;
        return;
    }

    struct Pages* pages = reserve_pages(process, 1);
    assert(pages != NULL);

    struct PageDirectory* directory = pages->start;
    mm_pagination_clear_directory(directory);
    process->mm.directory = directory;

    mm_pagination_map(process, 0, 0, 1024, 1, 1);

    unsigned int idt = idt_page_address();
    mm_pagination_map(process, idt, idt, 1, 0, 0);

    unsigned int gdt = gdt_page_address();
    mm_pagination_map(process, gdt, gdt, 1, 0, 0);

    unsigned int ts = task_page_address();
    mm_pagination_map(process, ts, ts, 1, 0, 0);

    unsigned int kernelStackBottom = (unsigned int) mm->kernelStack - mm->pagesInKernelStack * PAGE_SIZE;
    mm_pagination_map(process, kernelStackBottom, kernelStackBottom, mm->pagesInKernelStack, 0, 1);

    if (mm->mallocContext) {
        mm_pagination_map(process, mm->mallocContext, mm->mallocContext, mm->pagesInHeap, 1, 1);
    }

    if (mm->esp) {
        unsigned int stackBottom = (unsigned int) mm->esp - mm->pagesInStack * PAGE_SIZE;
        mm_pagination_map(process, stackBottom, STACK_TOP_MAPPING - mm->pagesInStack * PAGE_SIZE, mm->pagesInStack, 1, 1);
    }
}
Ejemplo n.º 6
0
void createProcess(struct Process* process, EntryPoint entryPoint, struct Process* parent, char* args, int terminal, int kernel) {

    process->pid = ++pid;
    process->kernel = !!kernel;
    process->terminal = terminal;
    process->active = 0;

    process->parent = parent;
    process->firstChild = NULL;

    process->cycles = 0;
    process->curr_cycles = 0;
    process->prev_cycles = 0;
    process->timeStart = _time(NULL);

    process->uid = 0;
    process->gid = 0;
    if (parent != NULL) {
        process->uid = parent->uid;
        process->gid = parent->gid;
    }

    process->prev = NULL;
    if (parent == NULL) {
        process->ppid = 0;
        process->next = NULL;

        process->cwd = kalloc(2 * sizeof(char));
        strcpy(process->cwd, "/");
    } else {
        process->ppid = parent->pid;

        process->next = parent->firstChild;
        if (parent->firstChild) {
            parent->firstChild->prev = process;
        }

        parent->firstChild = process;

        process->cwd = kalloc(strlen(parent->cwd) + 1);
        strcpy(process->cwd, parent->cwd);
    }

    process->entryPoint = entryPoint;
    if (args == NULL) {
        process->args[0] = 0;
    } else {
        int i;
        for (i = 0; *(args + i) && i < ARGV_SIZE - 1; i++) {
            process->args[i] = args[i];
        }
        process->args[i] = 0;
    }

    process->schedule.priority = 2;
    process->schedule.status = StatusReady;
    process->schedule.inWait = 0;
    process->schedule.ioWait = 0;
    process->schedule.done = 0;

    for (size_t i = 0; i < MAX_OPEN_FILES; i++) {

        if (parent && parent->fdTable[i].inode) {
            fs_dup(&process->fdTable[i], parent->fdTable[i]);
        } else {
            process->fdTable[i].inode = NULL;
        }
    }

    {
        process->mm.pagesInKernelStack = KERNEL_STACK_PAGES;
        struct Pages* mem = reserve_pages(process, process->mm.pagesInKernelStack);
        assert(mem != NULL);

        process->mm.esp0 = (char*)mem->start + PAGE_SIZE * process->mm.pagesInKernelStack;
        process->mm.kernelStack = process->mm.esp0;
    }

    if (kernel) {
        process->mm.pagesInHeap = 0;
        process->mm.mallocContext = NULL;
    } else {
        process->mm.pagesInHeap = 256;
        struct Pages* mem = reserve_pages(process, process->mm.pagesInHeap);
        assert(mem != NULL);

        process->mm.mallocContext = mm_create_context(mem->start, process->mm.pagesInHeap * PAGE_SIZE);
        mem_check();
    }

    if (!kernel) {
        process->mm.pagesInStack = 16;
        struct Pages* mem = reserve_pages(process, process->mm.pagesInStack);
        assert(mem != NULL);

        process->mm.esp = (char*)mem->start + PAGE_SIZE * process->mm.pagesInStack;
    } else {
        process->mm.pagesInStack = 0;
        process->mm.esp = NULL;
    }

    setup_page_directory(process, kernel);

    if (!kernel) {
        struct Pages* ungetPage = reserve_pages(process, 1);
        assert(ungetPage != NULL);
        mm_pagination_map(process, (unsigned int)ungetPage->start, (unsigned int)STACK_TOP_MAPPING, 1, 1, 1);
        FILE* files;
        
        for (int i = 0; i < 3; i++) {
            files = ungetPage->start + i * sizeof(FILE);
            files->fd = i;
            files->flag = 0;
            files->unget = 0;
        }
    }

    int codeSegment, dataSegment;
    if (kernel) {
        codeSegment = KERNEL_CODE_SEGMENT;
        dataSegment = KERNEL_DATA_SEGMENT;

        char* esp0 = (char*) process->mm.esp0 - ARGV_SIZE;
        for (size_t i = 0; i < ARGV_SIZE; i++) {
            esp0[i] = process->args[i];
        }
        process->mm.esp0 = esp0;

        push((unsigned int**) &process->mm.esp0, (unsigned int) process->mm.esp0);
        push((unsigned int**) &process->mm.esp0, (unsigned int) exit);
        push((unsigned int**) &process->mm.esp0, 0x202);
    } else {
        codeSegment = USER_CODE_SEGMENT;
        dataSegment = USER_DATA_SEGMENT;

        char* esp = (char*) process->mm.esp - ARGV_SIZE;
        for (size_t i = 0; i < ARGV_SIZE; i++) {
            esp[i] = process->args[i];
        }
        process->mm.esp = esp;

        push((unsigned int**) &process->mm.esp, STACK_TOP_MAPPING - ARGV_SIZE);
        push((unsigned int**) &process->mm.esp, (unsigned int) exit);

        push((unsigned int**) &process->mm.esp0, dataSegment);
        push((unsigned int**) &process->mm.esp0, STACK_TOP_MAPPING - 2 * sizeof(int) - ARGV_SIZE);
        push((unsigned int**) &process->mm.esp0, 0x3202);
    }

    push((unsigned int**) &process->mm.esp0, codeSegment);
    push((unsigned int**) &process->mm.esp0, (unsigned int) entryPoint);
    push((unsigned int**) &process->mm.esp0, dataSegment);
    push((unsigned int**) &process->mm.esp0, dataSegment);
    push((unsigned int**) &process->mm.esp0, dataSegment);
    push((unsigned int**) &process->mm.esp0, dataSegment);
    push((unsigned int**) &process->mm.esp0, (unsigned int) _interruptEnd);
    push((unsigned int**) &process->mm.esp0, (unsigned int) signalPIC);
    push((unsigned int**) &process->mm.esp0, 0);
}
Ejemplo n.º 7
0
static status_t
cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
	size_t* _size, bool doWrite)
{
	if (_cacheRef == NULL)
		panic("cache_io() called with NULL ref!\n");

	file_cache_ref* ref = (file_cache_ref*)_cacheRef;
	VMCache* cache = ref->cache;
	off_t fileSize = cache->virtual_end;
	bool useBuffer = buffer != 0;

	TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n",
		ref, offset, (void*)buffer, *_size, doWrite ? "write" : "read"));

	// out of bounds access?
	if (offset >= fileSize || offset < 0) {
		*_size = 0;
		return B_OK;
	}

	int32 pageOffset = offset & (B_PAGE_SIZE - 1);
	size_t size = *_size;
	offset -= pageOffset;

	if ((off_t)(offset + pageOffset + size) > fileSize) {
		// adapt size to be within the file's offsets
		size = fileSize - pageOffset - offset;
		*_size = size;
	}
	if (size == 0)
		return B_OK;

	// "offset" and "lastOffset" are always aligned to B_PAGE_SIZE,
	// the "last*" variables always point to the end of the last
	// satisfied request part

	const uint32 kMaxChunkSize = MAX_IO_VECS * B_PAGE_SIZE;
	size_t bytesLeft = size, lastLeft = size;
	int32 lastPageOffset = pageOffset;
	addr_t lastBuffer = buffer;
	off_t lastOffset = offset;
	size_t lastReservedPages = min_c(MAX_IO_VECS, (pageOffset + bytesLeft
		+ B_PAGE_SIZE - 1) >> PAGE_SHIFT);
	size_t reservePages = 0;
	size_t pagesProcessed = 0;
	cache_func function = NULL;

	vm_page_reservation reservation;
	reserve_pages(ref, &reservation, lastReservedPages, doWrite);

	AutoLocker<VMCache> locker(cache);

	while (bytesLeft > 0) {
		// Periodically reevaluate the low memory situation and select the
		// read/write hook accordingly
		if (pagesProcessed % 32 == 0) {
			if (size >= BYPASS_IO_SIZE
				&& low_resource_state(B_KERNEL_RESOURCE_PAGES)
					!= B_NO_LOW_RESOURCE) {
				// In low memory situations we bypass the cache beyond a
				// certain I/O size.
				function = doWrite ? write_to_file : read_from_file;
			} else
				function = doWrite ? write_to_cache : read_into_cache;
		}

		// check if this page is already in memory
		vm_page* page = cache->LookupPage(offset);
		if (page != NULL) {
			// The page may be busy - since we need to unlock the cache sometime
			// in the near future, we need to satisfy the request of the pages
			// we didn't get yet (to make sure no one else interferes in the
			// meantime).
			status_t status = satisfy_cache_io(ref, cookie, function, offset,
				buffer, useBuffer, pageOffset, bytesLeft, reservePages,
				lastOffset, lastBuffer, lastPageOffset, lastLeft,
				lastReservedPages, &reservation);
			if (status != B_OK)
				return status;

			// Since satisfy_cache_io() unlocks the cache, we need to look up
			// the page again.
			page = cache->LookupPage(offset);
			if (page != NULL && page->busy) {
				cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
				continue;
			}
		}

		size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft);

		TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset "
			"= %lu\n", offset, page, bytesLeft, pageOffset));

		if (page != NULL) {
			if (doWrite || useBuffer) {
				// Since the following user_mem{cpy,set}() might cause a page
				// fault, which in turn might cause pages to be reserved, we
				// need to unlock the cache temporarily to avoid a potential
				// deadlock. To make sure that our page doesn't go away, we mark
				// it busy for the time.
				page->busy = true;
				locker.Unlock();

				// copy the contents of the page already in memory
				phys_addr_t pageAddress
					= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE
						+ pageOffset;
				bool userBuffer = IS_USER_ADDRESS(buffer);
				if (doWrite) {
					if (useBuffer) {
						vm_memcpy_to_physical(pageAddress, (void*)buffer,
							bytesInPage, userBuffer);
					} else {
						vm_memset_physical(pageAddress, 0, bytesInPage);
					}
				} else if (useBuffer) {
					vm_memcpy_from_physical((void*)buffer, pageAddress,
						bytesInPage, userBuffer);
				}

				locker.Lock();

				if (doWrite) {
					DEBUG_PAGE_ACCESS_START(page);

					page->modified = true;

					if (page->State() != PAGE_STATE_MODIFIED)
						vm_page_set_state(page, PAGE_STATE_MODIFIED);

					DEBUG_PAGE_ACCESS_END(page);
				}

				cache->MarkPageUnbusy(page);
			}

			// If it is cached only, requeue the page, so the respective queue
			// roughly remains LRU first sorted.
			if (page->State() == PAGE_STATE_CACHED
					|| page->State() == PAGE_STATE_MODIFIED) {
				DEBUG_PAGE_ACCESS_START(page);
				vm_page_requeue(page, true);
				DEBUG_PAGE_ACCESS_END(page);
			}

			if (bytesLeft <= bytesInPage) {
				// we've read the last page, so we're done!
				locker.Unlock();
				vm_page_unreserve_pages(&reservation);
				return B_OK;
			}

			// prepare a potential gap request
			lastBuffer = buffer + bytesInPage;
			lastLeft = bytesLeft - bytesInPage;
			lastOffset = offset + B_PAGE_SIZE;
			lastPageOffset = 0;
		}

		if (bytesLeft <= bytesInPage)
			break;

		buffer += bytesInPage;
		bytesLeft -= bytesInPage;
		pageOffset = 0;
		offset += B_PAGE_SIZE;
		pagesProcessed++;

		if (buffer - lastBuffer + lastPageOffset >= kMaxChunkSize) {
			status_t status = satisfy_cache_io(ref, cookie, function, offset,
				buffer, useBuffer, pageOffset, bytesLeft, reservePages,
				lastOffset, lastBuffer, lastPageOffset, lastLeft,
				lastReservedPages, &reservation);
			if (status != B_OK)
				return status;
		}
	}

	// fill the last remaining bytes of the request (either write or read)

	return function(ref, cookie, lastOffset, lastPageOffset, lastBuffer,
		lastLeft, useBuffer, &reservation, 0);
}
Ejemplo n.º 8
0
/*!	Like read_into_cache() but writes data into the cache.
	To preserve data consistency, it might also read pages into the cache,
	though, if only a partial page gets written.
	The same restrictions apply.
*/
static status_t
write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	// TODO: We're using way too much stack! Rather allocate a sufficiently
	// large chunk on the heap.
	generic_io_vec vecs[MAX_IO_VECS];
	uint32 vecCount = 0;
	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
	vm_page* pages[MAX_IO_VECS];
	int32 pageIndex = 0;
	status_t status = B_OK;

	// ToDo: this should be settable somewhere
	bool writeThrough = false;

	// allocate pages for the cache and mark them busy
	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
		// TODO: if space is becoming tight, and this cache is already grown
		//	big - shouldn't we better steal the pages directly in that case?
		//	(a working set like approach for the file cache)
		// TODO: the pages we allocate here should have been reserved upfront
		//	in cache_io()
		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
			reservation,
			(writeThrough ? PAGE_STATE_CACHED : PAGE_STATE_MODIFIED)
				| VM_PAGE_ALLOC_BUSY);

		page->modified = !writeThrough;

		ref->cache->InsertPage(page, offset + pos);

		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
	}

	push_access(ref, offset, bufferSize, true);
	ref->cache->Unlock();
	vm_page_unreserve_pages(reservation);

	// copy contents (and read in partially written pages first)

	if (pageOffset != 0) {
		// This is only a partial write, so we have to read the rest of the page
		// from the file to have consistent data in the cache
		generic_io_vec readVec = { vecs[0].base, B_PAGE_SIZE };
		generic_size_t bytesRead = B_PAGE_SIZE;

		status = vfs_read_pages(ref->vnode, cookie, offset, &readVec, 1,
			B_PHYSICAL_IO_REQUEST, &bytesRead);
		// ToDo: handle errors for real!
		if (status < B_OK)
			panic("1. vfs_read_pages() failed: %s!\n", strerror(status));
	}

	size_t lastPageOffset = (pageOffset + bufferSize) % B_PAGE_SIZE;
	if (lastPageOffset != 0) {
		// get the last page in the I/O vectors
		generic_addr_t last = vecs[vecCount - 1].base
			+ vecs[vecCount - 1].length - B_PAGE_SIZE;

		if ((off_t)(offset + pageOffset + bufferSize) == ref->cache->virtual_end) {
			// the space in the page after this write action needs to be cleaned
			vm_memset_physical(last + lastPageOffset, 0,
				B_PAGE_SIZE - lastPageOffset);
		} else {
			// the end of this write does not happen on a page boundary, so we
			// need to fetch the last page before we can update it
			generic_io_vec readVec = { last, B_PAGE_SIZE };
			generic_size_t bytesRead = B_PAGE_SIZE;

			status = vfs_read_pages(ref->vnode, cookie,
				PAGE_ALIGN(offset + pageOffset + bufferSize) - B_PAGE_SIZE,
				&readVec, 1, B_PHYSICAL_IO_REQUEST, &bytesRead);
			// ToDo: handle errors for real!
			if (status < B_OK)
				panic("vfs_read_pages() failed: %s!\n", strerror(status));

			if (bytesRead < B_PAGE_SIZE) {
				// the space beyond the file size needs to be cleaned
				vm_memset_physical(last + bytesRead, 0,
					B_PAGE_SIZE - bytesRead);
			}
		}
	}

	for (uint32 i = 0; i < vecCount; i++) {
		generic_addr_t base = vecs[i].base;
		generic_size_t bytes = min_c((generic_size_t)bufferSize,
			generic_size_t(vecs[i].length - pageOffset));

		if (useBuffer) {
			// copy data from user buffer
			vm_memcpy_to_physical(base + pageOffset, (void*)buffer, bytes,
				IS_USER_ADDRESS(buffer));
		} else {
			// clear buffer instead
			vm_memset_physical(base + pageOffset, 0, bytes);
		}

		bufferSize -= bytes;
		if (bufferSize == 0)
			break;

		buffer += bytes;
		pageOffset = 0;
	}

	if (writeThrough) {
		// write cached pages back to the file if we were asked to do that
		status_t status = vfs_write_pages(ref->vnode, cookie, offset, vecs,
			vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
		if (status < B_OK) {
			// ToDo: remove allocated pages, ...?
			panic("file_cache: remove allocated pages! write pages failed: %s\n",
				strerror(status));
		}
	}

	if (status == B_OK)
		reserve_pages(ref, reservation, reservePages, true);

	ref->cache->Lock();

	// make the pages accessible in the cache
	for (int32 i = pageIndex; i-- > 0;) {
		ref->cache->MarkPageUnbusy(pages[i]);

		DEBUG_PAGE_ACCESS_END(pages[i]);
	}

	return status;
}
Ejemplo n.º 9
0
/*!	Reads the requested amount of data into the cache, and allocates
	pages needed to fulfill that request. This function is called by cache_io().
	It can only handle a certain amount of bytes, and the caller must make
	sure that it matches that criterion.
	The cache_ref lock must be held when calling this function; during
	operation it will unlock the cache, though.
*/
static status_t
read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
	int32 pageOffset, addr_t buffer, size_t bufferSize, bool useBuffer,
	vm_page_reservation* reservation, size_t reservePages)
{
	TRACE(("read_into_cache(offset = %Ld, pageOffset = %ld, buffer = %#lx, "
		"bufferSize = %lu\n", offset, pageOffset, buffer, bufferSize));

	VMCache* cache = ref->cache;

	// TODO: We're using way too much stack! Rather allocate a sufficiently
	// large chunk on the heap.
	generic_io_vec vecs[MAX_IO_VECS];
	uint32 vecCount = 0;

	generic_size_t numBytes = PAGE_ALIGN(pageOffset + bufferSize);
	vm_page* pages[MAX_IO_VECS];
	int32 pageIndex = 0;

	// allocate pages for the cache and mark them busy
	for (generic_size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
		vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
			reservation, PAGE_STATE_CACHED | VM_PAGE_ALLOC_BUSY);

		cache->InsertPage(page, offset + pos);

		add_to_iovec(vecs, vecCount, MAX_IO_VECS,
			page->physical_page_number * B_PAGE_SIZE, B_PAGE_SIZE);
			// TODO: check if the array is large enough (currently panics)!
	}

	push_access(ref, offset, bufferSize, false);
	cache->Unlock();
	vm_page_unreserve_pages(reservation);

	// read file into reserved pages
	status_t status = read_pages_and_clear_partial(ref, cookie, offset, vecs,
		vecCount, B_PHYSICAL_IO_REQUEST, &numBytes);
	if (status != B_OK) {
		// reading failed, free allocated pages

		dprintf("file_cache: read pages failed: %s\n", strerror(status));

		cache->Lock();

		for (int32 i = 0; i < pageIndex; i++) {
			cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
			cache->RemovePage(pages[i]);
			vm_page_set_state(pages[i], PAGE_STATE_FREE);
		}

		return status;
	}

	// copy the pages if needed and unmap them again

	for (int32 i = 0; i < pageIndex; i++) {
		if (useBuffer && bufferSize != 0) {
			size_t bytes = min_c(bufferSize, (size_t)B_PAGE_SIZE - pageOffset);

			vm_memcpy_from_physical((void*)buffer,
				pages[i]->physical_page_number * B_PAGE_SIZE + pageOffset,
				bytes, IS_USER_ADDRESS(buffer));

			buffer += bytes;
			bufferSize -= bytes;
			pageOffset = 0;
		}
	}

	reserve_pages(ref, reservation, reservePages, false);
	cache->Lock();

	// make the pages accessible in the cache
	for (int32 i = pageIndex; i-- > 0;) {
		DEBUG_PAGE_ACCESS_END(pages[i]);

		cache->MarkPageUnbusy(pages[i]);
	}

	return B_OK;
}