示例#1
0
OSData * IOMapper::
NewARTTable(IOByteCount size, void ** virtAddrP, ppnum_t *physAddrP)
{
    if (!virtAddrP || !physAddrP)
	return 0;

    kern_return_t kr;
    vm_address_t address;

    size = round_page(size);
    kr = kmem_alloc_contig(kernel_map, &address, size, PAGE_MASK, 0 /*max_pnum*/, 0 /*pnum_mask*/, false);
    if (kr)
        return 0;

    ppnum_t pagenum = pmap_find_phys(kernel_pmap, (addr64_t) address);
    if (pagenum)
	*physAddrP = pagenum;
    else {
	FreeARTTable((OSData *) address, size);
	address = 0;
    }

    *virtAddrP = (void *) address;

    return (OSData *) address;
}
示例#2
0
static inline kern_return_t chudxnu_private_task_read_bytes(task_t task, vm_offset_t addr, int size, void *data)
{
    
    kern_return_t ret;
    
    if(task==kernel_task) {
        if(size==sizeof(unsigned int)) {
            addr64_t phys_addr;
            ppnum_t pp;

			pp = pmap_find_phys(kernel_pmap, addr);			/* Get the page number */
			if(!pp) return KERN_FAILURE;					/* Not mapped... */
			
			phys_addr = ((addr64_t)pp << 12) | (addr & 0x0000000000000FFFULL);	/* Shove in the page offset */
			
            if(phys_addr < mem_actual) {					/* Sanity check: is it in memory? */
                *((uint32_t *)data) = ml_phys_read_64(phys_addr);
                return KERN_SUCCESS;
            }
        } else {
            return KERN_FAILURE;
        }
    } else {
        
		ret = KERN_SUCCESS;									/* Assume everything worked */
		if(copyin((void *)addr, data, size)) ret = KERN_FAILURE;	/* Get memory, if non-zero rc, it didn't work */
		return ret;
    }
}
示例#3
0
boolean_t
db_check_access(
	vm_offset_t	addr,
	int		size,
	task_t		task)
{
	register int	n;
	unsigned int	kern_addr;

	if (task == kernel_task || task == TASK_NULL) {
	    if (kernel_task == TASK_NULL)  return(TRUE);
	    task = kernel_task;
	} else if (task == TASK_NULL) {
	    if (current_act() == THR_ACT_NULL) return(FALSE);
	    task = current_act()->task;
	}

	while (size > 0) {
		if(!pmap_find_phys(task->map->pmap, (addr64_t)addr)) return (FALSE);	/* Fail if page not mapped */
	    n = trunc_page_32(addr+PPC_PGBYTES) - addr;
	    if (n > size)
		n = size;
	    size -= n;
	    addr += n;
	}
	return(TRUE);
}
示例#4
0
kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
					mach_vm_size_t __unused length, unsigned int __unused options)
{
    mach_vm_size_t off;
    vm_prot_t	   prot;
    unsigned int   flags;
    pmap_t 	   pmap = map->pmap;
    pmap_flush_context	pmap_flush_context_storage;
    boolean_t		delayed_pmap_flush = FALSE;

    prot = (options & kIOMapReadOnly)
		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);

    switch (options & kIOMapCacheMask)
    {
	// what cache mode do we need?
	case kIOMapDefaultCache:
	default:
	    return (KERN_INVALID_ARGUMENT);

	case kIOMapInhibitCache:
	    flags = VM_WIMG_IO;
	    break;

	case kIOMapWriteThruCache:
	    flags = VM_WIMG_WTHRU;
	    break;

	case kIOMapWriteCombineCache:
	    flags = VM_WIMG_WCOMB;
	    break;

	case kIOMapCopybackCache:
	    flags = VM_WIMG_COPYBACK;
	    break;
    }

    pmap_flush_context_init(&pmap_flush_context_storage);
    delayed_pmap_flush = FALSE;

    //  enter each page's physical address in the target map
    for (off = 0; off < length; off += page_size)
    {
	ppnum_t ppnum = pmap_find_phys(pmap, va + off);
	if (ppnum) {
		pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE, 
				   PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
		delayed_pmap_flush = TRUE;
	}
    }
    if (delayed_pmap_flush == TRUE)
	    pmap_flush(&pmap_flush_context_storage);

    return (KERN_SUCCESS);
}
示例#5
0
文件: pmem.cpp 项目: KarlVogel/rekall
// Converts a given kernel virtual address to an actual physical address.
// The page needs to be mapped into the kernel_map for this to work.
//
// args: addr is the kernel virtual address
// return: physical address if successful, otherwise 0
//
// This was adapted from xnu/osfmk/i386/phys.c to work from within the
// restricted symbol set available to a kext.
static addr64_t pmem_kernel_virt_to_phys(addr64_t addr) {
  addr64_t phys_addr;

  // pmap_find_phys returns a pfn so we have to shift it
  phys_addr = (pmap_find_phys(kernel_pmap, addr)) << PAGE_SHIFT;
  if (phys_addr != 0) {
    // Add the offset back in
    phys_addr |= (addr & PAGE_MASK);
  }
  return phys_addr;
}
示例#6
0
文件: phys.c 项目: aglab2/darwin-xnu
/*
 *	kvtophys(addr)
 *
 *	Convert a kernel virtual address to a physical address
 */
addr64_t
kvtophys(
	vm_offset_t addr)
{
	pmap_paddr_t pa;

	pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
	if (pa)
		pa |= (addr & INTEL_OFFMASK);

	return ((addr64_t)pa);
}
示例#7
0
static addr64_t
kdp_vtophys(
	pmap_t pmap,
	addr64_t va)
{
	addr64_t    pa;
	ppnum_t pp;

	pp = pmap_find_phys(pmap, va);				/* Get the page number */
	if(!pp) return 0;							/* Just return if no translation */
	
	pa = ((addr64_t)pp << 12) | (va & 0x0000000000000FFFULL);	/* Shove in the page offset */
	return(pa);
}
示例#8
0
int pmap_traverse_present_mappings(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end, pmap_traverse_callback callback, void *context)
{
    int ret = KERN_SUCCESS;
    vm_map_offset_t vcurstart, vcur;
    boolean_t lastvavalid = FALSE;

    /* Assumes pmap is locked, or being called from the kernel debugger */

    if (start > end) {
        return (KERN_INVALID_ARGUMENT);
    }

    if (start & PAGE_MASK_64) {
        return (KERN_INVALID_ARGUMENT);
    }

    for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
        ppnum_t ppn = pmap_find_phys(pmap, vcur);

        if (ppn != 0 && !pmap_valid_page(ppn)) {
            /* not something we want */
            ppn = 0;
        }

        if (ppn != 0) {
            if (!lastvavalid) {
                /* Start of a new virtual region */
                vcurstart = vcur;
                lastvavalid = TRUE;
            }
        } else {
            if (lastvavalid) {
                /* end of a virtual region */
                ret = callback(vcurstart, vcur, context);
                lastvavalid = FALSE;
            }
        }

        vcur += PAGE_SIZE;
    }

    if ((ret == KERN_SUCCESS)
            && lastvavalid) {
        /* send previous run */

        ret = callback(vcurstart, vcur, context);
    }
    return (ret);
}
示例#9
0
addr64_t db_vtophys(
	pmap_t pmap,
	vm_offset_t va)
{
	ppnum_t pp;
	addr64_t pa;

	pp = pmap_find_phys(pmap, (addr64_t)va);

	if (pp == 0) return(0);					/* Couldn't find it */
	
	pa = ((addr64_t)pp << 12) | (addr64_t)(va & 0xFFF);	/* Get physical address */

	return(pa);
}
示例#10
0
/*
 *	Routine:	cpu_per_proc_register
 *	Function:
 */
kern_return_t
cpu_per_proc_register(
	struct per_proc_info	*proc_info
)
{
	int						cpu;

	mutex_lock(&ppt_lock);
	if (real_ncpus >= max_ncpus) {
		mutex_unlock(&ppt_lock);
		return KERN_FAILURE;
	}
	cpu = real_ncpus;
	proc_info->cpu_number = cpu;
	PerProcTable[cpu].ppe_vaddr = proc_info;
	PerProcTable[cpu].ppe_paddr = (addr64_t)pmap_find_phys(kernel_pmap, (addr64_t)(unsigned int)proc_info) << PAGE_SHIFT;
	eieio();
	real_ncpus++;
	mutex_unlock(&ppt_lock);
	return KERN_SUCCESS;
}
示例#11
0
/*
 *	Routine:	cpu_per_proc_alloc
 *	Function:
 */
struct per_proc_info *
cpu_per_proc_alloc(
		void)
{
	struct per_proc_info	*proc_info = NULL;
	void			*interrupt_stack = NULL;
	void			*debugger_stack = NULL;

	if ((proc_info = (struct per_proc_info*)kalloc(sizeof(struct per_proc_info))) == (struct per_proc_info*)0)
		return (struct per_proc_info *)NULL;
	if ((interrupt_stack = kalloc(INTSTACK_SIZE)) == 0) {
		kfree(proc_info, sizeof(struct per_proc_info));
		return (struct per_proc_info *)NULL;
	}

	if ((debugger_stack = kalloc(KERNEL_STACK_SIZE)) == 0) {
		kfree(proc_info, sizeof(struct per_proc_info));
		kfree(interrupt_stack, INTSTACK_SIZE);
		return (struct per_proc_info *)NULL;
	}

	bzero((void *)proc_info, sizeof(struct per_proc_info));

	/* Set physical address of the second page */
	proc_info->pp2ndPage = (addr64_t)pmap_find_phys(kernel_pmap,
				((addr64_t)(unsigned int)proc_info) + 0x1000)
			       << PAGE_SHIFT;
	proc_info->next_savearea = (uint64_t)save_get_init();
	proc_info->pf = BootProcInfo.pf;
	proc_info->istackptr = (vm_offset_t)interrupt_stack + INTSTACK_SIZE - FM_SIZE;
	proc_info->intstack_top_ss = proc_info->istackptr;
	proc_info->debstackptr = (vm_offset_t)debugger_stack + KERNEL_STACK_SIZE - FM_SIZE;
	proc_info->debstack_top_ss = proc_info->debstackptr;

	return proc_info;

}
示例#12
0
文件: memdev.c 项目: 0xffea/xnu
static void mdevstrategy(struct buf *bp) {
	unsigned int left, lop, csize;
	vm_offset_t vaddr, blkoff;
	int devid;
	addr64_t paddr, fvaddr;
	ppnum_t pp;

	devid = minor(buf_device(bp));							/* Get minor device number */

	if ((mdev[devid].mdFlags & mdInited) == 0) {		/* Have we actually been defined yet? */
	        buf_seterror(bp, ENXIO);
		buf_biodone(bp);
		return;
	}

	buf_setresid(bp, buf_count(bp));						/* Set byte count */
	
	blkoff = buf_blkno(bp) * mdev[devid].mdSecsize;		/* Get offset into file */

/*
 *	Note that reading past end is an error, but reading at end is an EOF.  For these
 *	we just return with resid == count.
 */

	if (blkoff >= (mdev[devid].mdSize << 12)) {			/* Are they trying to read/write at/after end? */
		if(blkoff != (mdev[devid].mdSize << 12)) {		/* Are we trying to read after EOF? */
		        buf_seterror(bp, EINVAL);						/* Yeah, this is an error */
		}
		buf_biodone(bp);								/* Return */
		return;
	}

	if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) {		/* Will this read go past end? */
		buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff));	/* Yes, trim to max */
	}
	/*
	 * make sure the buffer's data area is
	 * accessible
	 */
	if (buf_map(bp, (caddr_t *)&vaddr))
	        panic("ramstrategy: buf_map failed\n");

	fvaddr = (mdev[devid].mdBase << 12) + blkoff;		/* Point to offset into ram disk */
	
	if (buf_flags(bp) & B_READ) {					/* Is this a read? */
		if(!(mdev[devid].mdFlags & mdPhys)) {			/* Physical mapped disk? */
			bcopy((void *)((uintptr_t)fvaddr),
				(void *)vaddr, (size_t)buf_count(bp));	/* This is virtual, just get the data */
		}
		else {
			left = buf_count(bp);						/* Init the amount left to copy */
			while(left) {								/* Go until it is all copied */
				
				lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095)));	/* Get smallest amount left on sink and source */
				csize = min(lop, left);					/* Don't move more than we need to */
				
				pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr));	/* Get the sink physical address */
				if(!pp) {								/* Not found, what gives? */
					panic("mdevstrategy: sink address %016llX not mapped\n", (addr64_t)((uintptr_t)vaddr));
				}
				paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095));	/* Get actual address */
				bcopy_phys(fvaddr, paddr, csize);		/* Copy this on in */
				mapping_set_mod(paddr >> 12);			/* Make sure we know that it is modified */
				
				left = left - csize;					/* Calculate what is left */
				vaddr = vaddr + csize;					/* Move to next sink address */
				fvaddr = fvaddr + csize;				/* Bump to next physical address */
			}
		}
	}
	else {												/* This is a write */
		if(!(mdev[devid].mdFlags & mdPhys)) {			/* Physical mapped disk? */
示例#13
0
addr64_t
vmx_paddr(void *va)
{
	return (ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t)va)));
}