コード例 #1
0
ファイル: memdev.c プロジェクト: 0xffea/xnu
static int mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) {
	int 			status;
	addr64_t		mdata;
	int 			devid;
	enum uio_seg 	saveflag;

	devid = minor(dev);									/* Get minor device number */

	if (devid > 16) return (ENXIO);						/* Not valid */
	if (!(mdev[devid].mdFlags & mdInited))  return (ENXIO);	/* Have we actually been defined yet? */

	mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset;	/* Point to the area in "file" */
	
	saveflag = uio->uio_segflg;							/* Remember what the request is */
#if LP64_DEBUG
	if (UIO_IS_USER_SPACE(uio) == 0 && UIO_IS_SYS_SPACE(uio) == 0) {
	  panic("mdevrw - invalid uio_segflg\n"); 
	}
#endif /* LP64_DEBUG */
	/* Make sure we are moving from physical ram if physical device */
	if (mdev[devid].mdFlags & mdPhys) {
		if (uio->uio_segflg == UIO_USERSPACE64) 
			uio->uio_segflg = UIO_PHYS_USERSPACE64;	
		else if (uio->uio_segflg == UIO_USERSPACE32)
			uio->uio_segflg = UIO_PHYS_USERSPACE32;	
		else
			uio->uio_segflg = UIO_PHYS_USERSPACE;	
	}
	status = uiomove64(mdata, uio_resid(uio), uio);		/* Move the data */
	uio->uio_segflg = saveflag;							/* Restore the flag */

	return (status);
}
コード例 #2
0
ファイル: chipsec.cpp プロジェクト: abazhaniuk/chipsec
// Copy the requested amount to userspace if it doesn't cross page boundaries
// or memory mapped io. If it does, stop at the boundary. Will copy zeroes
// if the given physical address is not backed by physical memory.
//
// args: uio is the userspace io request object
// return: number of bytes copied successfully
//
static uint64_t pmem_partial_read(struct uio *uio, addr64_t start_addr,
                                  addr64_t end_addr) {
    // Separate page and offset
    uint64_t page_offset = start_addr & PAGE_MASK;
    addr64_t page = trunc_page_64(start_addr);
    // don't copy across page boundaries
    uint32_t chunk_len = (uint32_t)MIN(PAGE_SIZE - page_offset,
                                       end_addr - start_addr);
    // Prepare the page for IOKit
    IOMemoryDescriptor *page_desc = (
                                     IOMemoryDescriptor::withPhysicalAddress(page, PAGE_SIZE, kIODirectionIn));
    if (page_desc == NULL) {
        pmem_error("Can't read from %#016llx, address not in physical memory range",
                   start_addr);
        // Skip this range as it is not even in the physical address space
        return chunk_len;
    } else {
        // Map the page containing address into kernel address space.
        IOMemoryMap *page_map = (
                                 page_desc->createMappingInTask(kernel_task, 0, kIODirectionIn, 0, 0));
        // Check if the mapping succeded.
        if (!page_map) {
            pmem_error("page %#016llx could not be mapped into the kernel, "
                       "zero padding return buffer", page);
            // Zero pad this chunk, as it is not inside a valid page frame.
            uiomove64((addr64_t)pmem_zero_page + page_offset,
                      (uint32_t)chunk_len, uio);
        } else {
            // Successfully mapped page, copy contents...
            pmem_log("partial_read");
            log_addr(page_map->getAddress(), 64, "page_map->getAddress()");
            log_addr(page_offset, 64, "page_offset");
            uiomove64(page_map->getAddress() + page_offset, (uint32_t)chunk_len, uio);
            page_map->release();
        }
        page_desc->release();
    }
    return chunk_len;
}
コード例 #3
0
ファイル: pmem.cpp プロジェクト: KarlVogel/rekall
// Copy the requested amount to userspace if it doesn't cross page boundaries
// or memory mapped io. If it does, stop at the boundary. Will copy zeroes
// if the given physical address is not backed by physical memory.
//
// args: uio is the userspace io request object
// return: number of bytes copied successfully
//
static uint64_t pmem_partial_read(struct uio *uio, addr64_t start_addr,
                                  addr64_t end_addr) {
  void *vaddr_page = NULL;
  // Separate page and offset
  uint64_t page_offset = start_addr & PAGE_MASK;
  addr64_t page = trunc_page_64(start_addr);
  // don't copy across page boundaries
  uint32_t chunk_len = (uint32_t)MIN(PAGE_SIZE - page_offset,
                                     end_addr - start_addr);
  if (pmem_map_physical_page(page, &vaddr_page) != KERN_SUCCESS) {
    pmem_error("page %#016llx could not be mapped into the kernel, "
               "zero padding return buffer", page);
    // Zero pad this chunk, as it is not inside a valid page frame.
    uiomove64((addr64_t)pmem_zero_page + page_offset,
              (uint32_t)chunk_len, uio);
  } else {
    // Successfully mapped page, copy contents...
    uiomove64((reinterpret_cast<uint64_t>(vaddr_page) + page_offset),
              (uint32_t)chunk_len, uio);
  }

  return chunk_len;
}
コード例 #4
0
/*
 * Returns:	0			Success
 *	uiomove64:EFAULT
 *
 * Notes:	The first argument should be a caddr_t, but const poisoning
 *		for typedef'ed types doesn't work in gcc.
 */
int
uiomove(const char * cp, int n, uio_t uio)
{
	return uiomove64((const addr64_t)((const unsigned int)cp), n, uio);
}
コード例 #5
0
int
memory_object_control_uiomove(
	memory_object_control_t	control,
	memory_object_offset_t	offset,
	void		*	uio,
	int			start_offset,
	int			io_requested,
	int			mark_dirty,
	int			take_reference)
{
	vm_object_t		object;
	vm_page_t		dst_page;
	int			xsize;
	int			retval = 0;
	int			cur_run;
	int			cur_needed;
	int			i;
	int			orig_offset;
	boolean_t		make_lru = FALSE;
	vm_page_t		page_run[MAX_RUN];

	object = memory_object_control_to_vm_object(control);
	if (object == VM_OBJECT_NULL) {
		return (0);
	}
	assert(!object->internal);

	vm_object_lock(object);

	if (mark_dirty && object->copy != VM_OBJECT_NULL) {
		/*
		 * We can't modify the pages without honoring
		 * copy-on-write obligations first, so fall off
		 * this optimized path and fall back to the regular
		 * path.
		 */
		vm_object_unlock(object);
		return 0;
	}
	orig_offset = start_offset;
	    
	while (io_requested && retval == 0) {

		cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;

		if (cur_needed > MAX_RUN)
		        cur_needed = MAX_RUN;

		for (cur_run = 0; cur_run < cur_needed; ) {

		        if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
			        break;
			/*
			 * Sync up on getting the busy bit
			 */
			if ((dst_page->busy || dst_page->cleaning)) {
			        /*
				 * someone else is playing with the page... if we've
				 * already collected pages into this run, go ahead
				 * and process now, we can't block on this
				 * page while holding other pages in the BUSY state
				 * otherwise we will wait
				 */
			        if (cur_run)
				        break;
			        PAGE_SLEEP(object, dst_page, THREAD_UNINT);
				continue;
			}
			/*
			 * this routine is only called when copying
			 * to/from real files... no need to consider
			 * encrypted swap pages
			 */
			assert(!dst_page->encrypted);

		        if (mark_dirty) {
			        dst_page->dirty = TRUE;
				if (dst_page->cs_validated) {
					/*
					 * CODE SIGNING:
					 * We're modifying a code-signed
					 * page:  assume that it is now tainted.
					 */
					dst_page->cs_tainted = TRUE;
					vm_cs_tainted_forces++;
				}
			}
			dst_page->busy = TRUE;

			page_run[cur_run++] = dst_page;

			offset += PAGE_SIZE_64;
		}
		if (cur_run == 0)
		        /*
			 * we hit a 'hole' in the cache
			 * we bail at this point
			 * we'll unlock the object below
			 */
		        break;
		vm_object_unlock(object);

		for (i = 0; i < cur_run; i++) {
		  
		        dst_page = page_run[i];

			if ((xsize = PAGE_SIZE - start_offset) > io_requested)
			        xsize = io_requested;

			if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
			        break;

			io_requested -= xsize;
			start_offset = 0;
		}
		vm_object_lock(object);

		/*
		 * if we have more than 1 page to work on
		 * in the current run, or the original request
		 * started at offset 0 of the page, or we're
		 * processing multiple batches, we will move
		 * the pages to the tail of the inactive queue
		 * to implement an LRU for read/write accesses
		 *
		 * the check for orig_offset == 0 is there to 
		 * mitigate the cost of small (< page_size) requests
		 * to the same page (this way we only move it once)
		 */
		if (take_reference && (cur_run > 1 || orig_offset == 0)) {
			vm_page_lockspin_queues();
			make_lru = TRUE;
		}
		for (i = 0; i < cur_run; i++) {
		        dst_page = page_run[i];

			/*
			 * someone is explicitly referencing this page...
			 * update clustered and speculative state
			 * 
			 */
			VM_PAGE_CONSUME_CLUSTERED(dst_page);

			if (make_lru == TRUE)
				vm_page_lru(dst_page);

			PAGE_WAKEUP_DONE(dst_page);
		}
		if (make_lru == TRUE) {
			vm_page_unlock_queues();
			make_lru = FALSE;
		}
		orig_offset = 0;
	}
	vm_object_unlock(object);

	return (retval);
}
コード例 #6
0
ファイル: bsd_vm.c プロジェクト: SbIm/xnu-env
int
memory_object_control_uiomove(
	memory_object_control_t	control,
	memory_object_offset_t	offset,
	void		*	uio,
	int			start_offset,
	int			io_requested,
	int			mark_dirty,
	int			take_reference)
{
	vm_object_t		object;
	vm_page_t		dst_page;
	int			xsize;
	int			retval = 0;
	int			cur_run;
	int			cur_needed;
	int			i;
	int			orig_offset;
	vm_page_t		page_run[MAX_RUN];

	object = memory_object_control_to_vm_object(control);
	if (object == VM_OBJECT_NULL) {
		return (0);
	}
	assert(!object->internal);

	vm_object_lock(object);

	if (mark_dirty && object->copy != VM_OBJECT_NULL) {
		/*
		 * We can't modify the pages without honoring
		 * copy-on-write obligations first, so fall off
		 * this optimized path and fall back to the regular
		 * path.
		 */
		vm_object_unlock(object);
		return 0;
	}
	orig_offset = start_offset;
	    
	while (io_requested && retval == 0) {

		cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;

		if (cur_needed > MAX_RUN)
		        cur_needed = MAX_RUN;

		for (cur_run = 0; cur_run < cur_needed; ) {

		        if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
			        break;

			/*
			 * if we're in this routine, we are inside a filesystem's
			 * locking model, so we don't ever want to wait for pages that have
			 * list_req_pending == TRUE since it means that the
			 * page is a candidate for some type of I/O operation,
			 * but that it has not yet been gathered into a UPL...
			 * this implies that it is still outside the domain
			 * of the filesystem and that whoever is responsible for
			 * grabbing it into a UPL may be stuck behind the filesystem
			 * lock this thread owns, or trying to take a lock exclusively
			 * and waiting for the readers to drain from a rw lock...
			 * if we block in those cases, we will deadlock
			 */
			if (dst_page->list_req_pending) {

				if (dst_page->absent) {
					/*
					 * this is the list_req_pending | absent | busy case
					 * which originates from vm_fault_page... we want
					 * to fall out of the fast path and go back
					 * to the caller which will gather this page
					 * into a UPL and issue the I/O if no one
					 * else beats us to it
					 */
					break;
				}
				if (dst_page->pageout || dst_page->cleaning) {
					/*
					 * this is the list_req_pending | pageout | busy case
					 * or the list_req_pending | cleaning case...
					 * which originate from the pageout_scan and
					 * msync worlds for the pageout case and the hibernate
					 * pre-cleaning world for the cleaning case...
					 * we need to reset the state of this page to indicate
					 * it should stay in the cache marked dirty... nothing else we
					 * can do at this point... we can't block on it, we can't busy
					 * it and we can't clean it from this routine.
					 */
					vm_page_lockspin_queues();

					vm_pageout_queue_steal(dst_page, TRUE); 
					vm_page_deactivate(dst_page);

					vm_page_unlock_queues();
				}
				/*
				 * this is the list_req_pending | cleaning case...
				 * we can go ahead and deal with this page since
				 * its ok for us to mark this page busy... if a UPL
				 * tries to gather this page, it will block until the
				 * busy is cleared, thus allowing us safe use of the page
				 * when we're done with it, we will clear busy and wake
				 * up anyone waiting on it, thus allowing the UPL creation
				 * to finish
				 */

			} else if (dst_page->busy || dst_page->cleaning) {
				/*
				 * someone else is playing with the page... if we've
				 * already collected pages into this run, go ahead
				 * and process now, we can't block on this
				 * page while holding other pages in the BUSY state
				 * otherwise we will wait
				 */
				if (cur_run)
					break;
				PAGE_SLEEP(object, dst_page, THREAD_UNINT);
				continue;
			}

			/*
			 * this routine is only called when copying
			 * to/from real files... no need to consider
			 * encrypted swap pages
			 */
			assert(!dst_page->encrypted);

		        if (mark_dirty) {
			        dst_page->dirty = TRUE;
				if (dst_page->cs_validated && 
				    !dst_page->cs_tainted) {
					/*
					 * CODE SIGNING:
					 * We're modifying a code-signed
					 * page: force revalidate
					 */
					dst_page->cs_validated = FALSE;
#if DEVELOPMENT || DEBUG
                                        vm_cs_validated_resets++;
#endif
					pmap_disconnect(dst_page->phys_page);
				}
			}
			dst_page->busy = TRUE;

			page_run[cur_run++] = dst_page;

			offset += PAGE_SIZE_64;
		}
		if (cur_run == 0)
		        /*
			 * we hit a 'hole' in the cache or
			 * a page we don't want to try to handle,
			 * so bail at this point
			 * we'll unlock the object below
			 */
		        break;
		vm_object_unlock(object);

		for (i = 0; i < cur_run; i++) {
		  
		        dst_page = page_run[i];

			if ((xsize = PAGE_SIZE - start_offset) > io_requested)
			        xsize = io_requested;

			if ( (retval = uiomove64((addr64_t)(((addr64_t)(dst_page->phys_page) << 12) + start_offset), xsize, uio)) )
			        break;

			io_requested -= xsize;
			start_offset = 0;
		}
		vm_object_lock(object);

		/*
		 * if we have more than 1 page to work on
		 * in the current run, or the original request
		 * started at offset 0 of the page, or we're
		 * processing multiple batches, we will move
		 * the pages to the tail of the inactive queue
		 * to implement an LRU for read/write accesses
		 *
		 * the check for orig_offset == 0 is there to 
		 * mitigate the cost of small (< page_size) requests
		 * to the same page (this way we only move it once)
		 */
		if (take_reference && (cur_run > 1 || orig_offset == 0)) {

			vm_page_lockspin_queues();

			for (i = 0; i < cur_run; i++)
				vm_page_lru(page_run[i]);

			vm_page_unlock_queues();
		}
		for (i = 0; i < cur_run; i++) {
		        dst_page = page_run[i];

			/*
			 * someone is explicitly referencing this page...
			 * update clustered and speculative state
			 * 
			 */
			VM_PAGE_CONSUME_CLUSTERED(dst_page);

			PAGE_WAKEUP_DONE(dst_page);
		}
		orig_offset = 0;
	}
	vm_object_unlock(object);

	return (retval);
}
コード例 #7
0
ファイル: kern_subr.c プロジェクト: Apple-FOSS-Mirror/xnu
/*
 * Returns:	0			Success
 *	uiomove64:EFAULT
 *
 * Notes:	The first argument should be a caddr_t, but const poisoning
 *		for typedef'ed types doesn't work in gcc.
 */
int
uiomove(const char * cp, int n, uio_t uio)
{
	return uiomove64((const addr64_t)(uintptr_t)cp, n, uio);
}