Beispiel #1
0
int
fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uint32_t  *vnodeaddr, uint32_t  *vid)
{

	vm_map_t map;
	vm_map_offset_t	address = (vm_map_offset_t )arg;
	vm_map_entry_t		tmp_entry;
	vm_map_entry_t		entry;
	vm_map_offset_t		start;
	vm_region_extended_info_data_t extended;
	vm_region_top_info_data_t top;

	    task_lock(task);
	    map = task->map;
	    if (map == VM_MAP_NULL) 
	    {
			task_unlock(task);
			return(0);
	    }
	    vm_map_reference(map); 
	    task_unlock(task);
	    
	    vm_map_lock_read(map);

	    start = address;
	    if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
		if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
			vm_map_unlock_read(map);
	    		vm_map_deallocate(map); 
		   	return(0);
		}
	    } else {
		entry = tmp_entry;
	    }

	    start = entry->vme_start;

	    pinfo->pri_offset = entry->offset;
	    pinfo->pri_protection = entry->protection;
	    pinfo->pri_max_protection = entry->max_protection;
	    pinfo->pri_inheritance = entry->inheritance;
	    pinfo->pri_behavior = entry->behavior;
	    pinfo->pri_user_wired_count = entry->user_wired_count;
	    pinfo->pri_user_tag = entry->alias;

	    if (entry->is_sub_map) {
		pinfo->pri_flags |= PROC_REGION_SUBMAP;
	    } else {
		if (entry->is_shared)
			pinfo->pri_flags |= PROC_REGION_SHARED;
	    }


	    extended.protection = entry->protection;
	    extended.user_tag = entry->alias;
	    extended.pages_resident = 0;
	    extended.pages_swapped_out = 0;
	    extended.pages_shared_now_private = 0;
	    extended.pages_dirtied = 0;
	    extended.external_pager = 0;
	    extended.shadow_depth = 0;

	    vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);

	    if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
	            extended.share_mode = SM_PRIVATE;

	    top.private_pages_resident = 0;
	    top.shared_pages_resident = 0;
	    vm_map_region_top_walk(entry, &top);

	
	    pinfo->pri_pages_resident = extended.pages_resident;
	    pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
	    pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
	    pinfo->pri_pages_dirtied = extended.pages_dirtied;
	    pinfo->pri_ref_count = extended.ref_count;
	    pinfo->pri_shadow_depth = extended.shadow_depth;
	    pinfo->pri_share_mode = extended.share_mode;

	    pinfo->pri_private_pages_resident = top.private_pages_resident;
	    pinfo->pri_shared_pages_resident = top.shared_pages_resident;
	    pinfo->pri_obj_id = top.obj_id;
		
	    pinfo->pri_address = (uint64_t)start;
	    pinfo->pri_size = (uint64_t)(entry->vme_end - start);
	    pinfo->pri_depth = 0;
	
	    if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
		*vnodeaddr = (uint32_t)0;

		if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
			vm_map_unlock_read(map);
	    		vm_map_deallocate(map); 
			return(1);
		}
	    }

	    vm_map_unlock_read(map);
	    vm_map_deallocate(map); 
	    return(1);
}
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}