コード例 #1
0
ファイル: OSSerialize.cpp プロジェクト: Apple-FOSS-Mirror/xnu
bool OSSerialize::initWithCapacity(unsigned int inCapacity)
{
    if (!super::init())
            return false;

    tags = OSDictionary::withCapacity(32);
    if (!tags) {
        return false;
    }

    tag = 0;
    length = 1;
    capacity = (inCapacity) ? round_page_32(inCapacity) : round_page_32(1);
    capacityIncrement = capacity;

    // allocate from the kernel map so that we can safely map this data
    // into user space (the primary use of the OSSerialize object)
    
    kern_return_t rc = kmem_alloc(kernel_map, (vm_offset_t *)&data, capacity);
    if (rc) {
        tags->release();
        tags = 0;
        return false;
    }
    bzero((void *)data, capacity);


    ACCUMSIZE(capacity);

    return true;
}
コード例 #2
0
ファイル: OSSerialize.cpp プロジェクト: Apple-FOSS-Mirror/xnu
unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity)
{
	char *newData;

	if (newCapacity <= capacity)
		return capacity;

	// round up
	newCapacity = round_page_32(newCapacity);

	kern_return_t rc = kmem_realloc(kernel_map,
					(vm_offset_t)data,
					capacity,
					(vm_offset_t *)&newData,
					newCapacity);
	if (!rc) {
	    ACCUMSIZE(newCapacity);

	    // kmem realloc does not free the old address range
	    kmem_free(kernel_map, (vm_offset_t)data, capacity); 
	    ACCUMSIZE(-capacity);
	    
	    // kmem realloc does not zero out the new memory
	    // and this could end up going to user land
	    bzero(&newData[capacity], newCapacity - capacity);
		
	    data = newData;
	    capacity = newCapacity;
	}

	return capacity;
}
コード例 #3
0
ファイル: kmod.cpp プロジェクト: OpenDarwin-CVS/SEDarwin
/*********************************************************************
* This function is registered before kmod_load_from_memory() is
* invoked to actually load a new kmod. It rounds up the header and
* total sizes and vm_allocates a buffer for the kmod. Now, KLD doesn't
* enforce any alignment of headers or segments, and we want to make
* sure that the executable code of the kmod lies on a page boundary.
* to do so, this function figures the pad between the actual header
* size and the page-rounded header size, and returns that offset into
* the allocated buffer. After kmod_load_from_memory() returns, its
* caller will move the mach_header struct back to the beginning of the
* allocated buffer so that the kmod_info_t structure contains the
* correct address.
*********************************************************************/
static
unsigned long alloc_for_kmod(
    unsigned long size,
    unsigned long headers_size) {

    vm_address_t  buffer = 0;
    kern_return_t k_result;

    unsigned long round_headers_size;
    unsigned long round_segments_size;
    unsigned long round_size;
    unsigned long headers_pad;

    round_headers_size  = round_page_32(headers_size);
    round_segments_size = round_page_32(size - headers_size);
    round_size  = round_headers_size + round_segments_size;
    headers_pad = round_headers_size - headers_size;

    k_result = vm_allocate(kernel_map, (vm_offset_t *)&buffer,
        round_size, TRUE);
    if (k_result != KERN_SUCCESS) {
        IOLog("alloc_for_kmod(): Can't allocate memory.\n");
        LOG_DELAY();
        link_buffer_address = 0;  // make sure it's clear
        link_load_address = 0;    // error sentinel for kld client
        return 0;
    }

    link_load_size = size;

    link_buffer_address = buffer;
    link_buffer_size = round_size;
    link_header_size = headers_size; // NOT rounded!

    link_load_address = link_buffer_address + headers_pad;

    return link_load_address;
}
コード例 #4
0
void IOHIDEventQueue::start() 
{
    if ( _lock )
        IOLockLock(_lock);

    if ( _state & kHIDQueueStarted )
        goto START_END;

    if ( _currentEntrySize != _maxEntrySize )
    {
        mach_port_t port = notifyMsg ? ((mach_msg_header_t *)notifyMsg)->msgh_remote_port : MACH_PORT_NULL;
        
        // Free the existing queue data
        if (dataQueue) {
            IOFreeAligned(dataQueue, round_page_32(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE));
            dataQueue = NULL;
        }
        
        if (_descriptor) {
            _descriptor->release();
            _descriptor = 0;
        }
        
        // init the queue again.  This will allocate the appropriate data.
        if ( !initWithEntries(_numEntries, _maxEntrySize) ) {
            goto START_END;
        }
        
        _currentEntrySize = _maxEntrySize;
        
        // RY: since we are initing the queue, we should reset the port as well
        if ( port ) 
            setNotificationPort(port);
    }
    else if ( dataQueue )
    {
        dataQueue->head = 0;
        dataQueue->tail = 0;
    }

    _state |= kHIDQueueStarted;

START_END:
    if ( _lock )
        IOLockUnlock(_lock);

}
コード例 #5
0
/*
 * free:
 *
 * Free resources
 */
void IOBufferMemoryDescriptor::free()
{
    // Cache all of the relevant information on the stack for use
    // after we call super::free()!
    IOOptionBits options   = _options;
    vm_size_t    size	   = _capacity;
    void *       buffer	   = _buffer;
    vm_map_t	 map	   = 0;
    vm_offset_t  alignment = _alignment;

    if (reserved)
    {
        map = reserved->map;
        IODelete( reserved, ExpansionData, 1 );
    }

    /* super::free may unwire - deallocate buffer afterwards */
    super::free();

    if (buffer)
    {
        if (options & kIOMemoryPageable)
        {
            if (map)
                vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
            else
                IOFreePageable(buffer, size);
        }
        else
        {
            if (options & kIOMemoryPhysicallyContiguous)
                IOFreeContiguous(buffer, size);
            else if (alignment > 1)
                IOFreeAligned(buffer, size);
            else
                IOFree(buffer, size);
        }
    }
    if (map)
        vm_map_deallocate(map);
}
コード例 #6
0
ファイル: kmod.cpp プロジェクト: OpenDarwin-CVS/SEDarwin
/*********************************************************************
* This function is registered before kmod_load_from_memory() is
* invoked to build symbol table entries for an already-loaded
* kmod. This function just checks the g_current_kmod_info variable
* to gets its load address, and futzes it by the header offset (pad).
* See lower comments for more info on load address futzing.
*********************************************************************/
static
unsigned long address_for_loaded_kmod(
    unsigned long size,
    unsigned long headers_size) {

    unsigned long round_headers_size;
    unsigned long headers_pad;

    if (!g_current_kmod_info) {
        IOLog("address_for_loaded_kmod(): No current kmod.\n");
        LOG_DELAY();
        link_load_address = 0;  // error sentinel for kld client
        return 0;
    }

    round_headers_size = round_page_32(headers_size);
    headers_pad = round_headers_size - headers_size;

    link_load_address = (unsigned long)g_current_kmod_info->address +
        headers_pad;

    return link_load_address;
}
コード例 #7
0
void
vnode_pager_cluster_write(
	vnode_pager_t		vnode_object,
	vm_object_offset_t	offset,
	vm_size_t		cnt,
	vm_object_offset_t   *	resid_offset,
	int		     *  io_error,
	int			upl_flags)
{
        vm_size_t       size;
	upl_t	        upl = NULL;
	int             request_flags;
	int		errno;

	if (upl_flags & UPL_MSYNC) {

	        upl_flags |= UPL_VNODE_PAGER;

		if ( (upl_flags & UPL_IOSYNC) && io_error)
		        upl_flags |= UPL_KEEPCACHED;

	        while (cnt) {
		        kern_return_t   kr;

			size = (cnt < (PAGE_SIZE * MAX_UPL_TRANSFER)) ? cnt : (PAGE_SIZE * MAX_UPL_TRANSFER); /* effective max */

			request_flags = UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
			                UPL_SET_INTERNAL | UPL_SET_LITE;

			kr = memory_object_upl_request(vnode_object->control_handle, 
						       offset, size, &upl, NULL, NULL, request_flags);
			if (kr != KERN_SUCCESS)
			        panic("vnode_pager_cluster_write: upl request failed\n");

			vnode_pageout(vnode_object->vnode_handle, 
				      upl, (vm_offset_t)0, offset, size, upl_flags, &errno);

			if ( (upl_flags & UPL_KEEPCACHED) ) {
			        if ( (*io_error = errno) )
				        break;
			}
			cnt    -= size;
			offset += size;
		}
		if (resid_offset)
			*resid_offset = offset;

	} else {
	        vm_object_offset_t      vnode_size;
	        vm_object_offset_t	base_offset;
		vm_object_t             object;

	        /*
		 * this is the pageout path
		 */
		vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);

		if (vnode_size > (offset + PAGE_SIZE)) {
		        /*
			 * preset the maximum size of the cluster
			 * and put us on a nice cluster boundary...
			 * and then clip the size to insure we
			 * don't request past the end of the underlying file
			 */
		        size = PAGE_SIZE * MAX_UPL_TRANSFER;
		        base_offset = offset & ~((signed)(size - 1));

			if ((base_offset + size) > vnode_size)
			        size = round_page_32(((vm_size_t)(vnode_size - base_offset)));
		} else {
		        /*
			 * we've been requested to page out a page beyond the current
			 * end of the 'file'... don't try to cluster in this case...
			 * we still need to send this page through because it might
			 * be marked precious and the underlying filesystem may need
			 * to do something with it (besides page it out)...
			 */
		        base_offset = offset;
			size = PAGE_SIZE;
		}
		object = memory_object_control_to_vm_object(vnode_object->control_handle);

		if (object == VM_OBJECT_NULL)
		        panic("vnode_pager_cluster_write: NULL vm_object in control handle\n");

		request_flags = UPL_NOBLOCK | UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
		                UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM |
		                UPL_SET_INTERNAL | UPL_SET_LITE;

		vm_object_upl_request(object, base_offset, size,
				      &upl, NULL, NULL, request_flags);
		if (upl == NULL)
		        panic("vnode_pager_cluster_write: upl request failed\n");

	        vnode_pageout(vnode_object->vnode_handle,
			       upl, (vm_offset_t)0, upl->offset, upl->size, UPL_VNODE_PAGER, NULL);
	}
}
コード例 #8
0
ファイル: kmod.cpp プロジェクト: OpenDarwin-CVS/SEDarwin
/*********************************************************************
* This function takes a dependency list containing a series of
* already-loaded module names, followed by a single name for a module
* that hasn't yet been loaded. It invokes kld_load_from_memory() to
* build symbol info for the already-loaded modules, and then finally
* loads the actually requested module.
*********************************************************************/
static
kern_return_t load_kmod(OSArray * dependencyList) {
    kern_return_t result = KERN_SUCCESS;

    unsigned int  num_dependencies = 0;
    kmod_info_t ** kmod_dependencies = NULL;
    unsigned int  i;
    OSString    * requestedKmodName;   // don't release
    const char  * requested_kmod_name;
    OSString    * currentKmodName;     // don't release
    char        * kmod_address;
    unsigned long kmod_size;
    struct mach_header * kmod_header;
    unsigned long kld_result;
    int           do_kld_unload = 0;
    kmod_info_t * kmod_info_freeme = 0;
    kmod_info_t * kmod_info = 0;
    kmod_t        kmod_id;


   /* Separate the requested kmod from its dependencies.
    */
    i = dependencyList->getCount();
    if (i == 0) {
        IOLog("load_kmod(): Called with empty list.\n");
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    } else {
        i--;  // make i be the index of the last entry
    }

    requestedKmodName = OSDynamicCast(OSString, dependencyList->getObject(i));
    if (!requestedKmodName) {
        IOLog("load_kmod(): Called with invalid list of kmod names.\n");
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }
    requested_kmod_name = requestedKmodName->getCStringNoCopy();
    dependencyList->removeObject(i);

   /* If the requested kmod is already loaded, there's no work to do.
    */
    kmod_info_freeme = kmod_lookupbyname_locked(requested_kmod_name);
    if (kmod_info_freeme) {
        // FIXME: Need to check for version mismatch if already loaded.
        result = KERN_SUCCESS;
        goto finish;
    }


   /* Do the KLD loads for the already-loaded modules in order to get
    * their symbols.
    */
    kld_address_func(&address_for_loaded_kmod);

    num_dependencies = dependencyList->getCount();
    kmod_dependencies = (kmod_info_t **)kalloc(num_dependencies *
        sizeof(kmod_info_t *));
    if (!kmod_dependencies) {
        IOLog("load_kmod(): Failed to allocate memory for dependency array "
            "during load of kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

    bzero(kmod_dependencies, num_dependencies *
        sizeof(kmod_info_t *));

    for (i = 0; i < num_dependencies; i++) {

        currentKmodName = OSDynamicCast(OSString,
            dependencyList->getObject(i));

        if (!currentKmodName) {
            IOLog("load_kmod(): Invalid dependency name at index %d for "
                "kmod \"%s\".\n", i, requested_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        const char * current_kmod_name = currentKmodName->getCStringNoCopy();

        // These globals are needed by the kld_address functions
        g_current_kmod_info = kmod_lookupbyname_locked(current_kmod_name);
        g_current_kmod_name = current_kmod_name;

        if (!g_current_kmod_info) {
            IOLog("load_kmod(): Missing dependency \"%s\".\n",
                current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

       /* Record the current kmod as a dependency of the requested
        * one. This will be used in building references after the
        * load is complete.
        */
        kmod_dependencies[i] = g_current_kmod_info;

        /* If the current kmod's size is zero it means that we have a
         * fake in-kernel dependency.  If so then don't have to arrange
         * for its symbol table to be reloaded as it is
         * part of the kernel's symbol table..
         */ 
        if (!g_current_kmod_info->size)
            continue;

	if (!kld_file_merge_OSObjects(current_kmod_name)) {
            IOLog("load_kmod(): Can't merge OSObjects \"%s\".\n",
		current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

	kmod_address = (char *)
	    kld_file_getaddr(current_kmod_name, (long *) &kmod_size);
        if (!kmod_address) {

            IOLog("load_kmod() failed for dependency kmod "
                "\"%s\".\n", current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        kld_result = kld_load_from_memory(&kmod_header,
            current_kmod_name, kmod_address, kmod_size);

        if (kld_result) {
            do_kld_unload = 1;
        }

        if (!kld_result || !link_load_address) {
            IOLog("kld_load_from_memory() failed for dependency kmod "
                "\"%s\".\n", current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        kld_forget_symbol("_kmod_info");
    }

   /*****
    * Now that we've done all the dependencies, which should have already
    * been loaded, we do the last requested module, which should not have
    * already been loaded.
    */
    kld_address_func(&alloc_for_kmod);

    g_current_kmod_name = requested_kmod_name;
    g_current_kmod_info = 0;  // there is no kmod yet

    if (!map_and_patch(requested_kmod_name)) {
	IOLog("load_kmod: map_and_patch() failed for "
	    "kmod \"%s\".\n", requested_kmod_name);
	LOG_DELAY();
	result = KERN_FAILURE;
	goto finish;
    }

    kmod_address = (char *)
	kld_file_getaddr(requested_kmod_name, (long *) &kmod_size);
    if (!kmod_address) {
        IOLog("load_kmod: kld_file_getaddr()  failed internal error "
            "on \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

    kld_result = kld_load_from_memory(&kmod_header,
			    requested_kmod_name, kmod_address, kmod_size);

    if (kld_result) {
        do_kld_unload = 1;
    }

    if (!kld_result || !link_load_address) {
        IOLog("load_kmod(): kld_load_from_memory() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }


   /* Copy the linked header and image into the vm_allocated buffer.
    * Move each onto the appropriate page-aligned boundary as given
    * by the global link_... variables.
    */
    bzero((char *)link_buffer_address, link_buffer_size);
    // bcopy() is (from, to, length)
    bcopy((char *)kmod_header, (char *)link_buffer_address, link_header_size);
    bcopy((char *)kmod_header + link_header_size,
        (char *)link_buffer_address + round_page_32(link_header_size),
        link_load_size - link_header_size);


   /* Get the kmod_info struct for the newly-loaded kmod.
    */
    if (!kld_lookup("_kmod_info", (unsigned long *)&kmod_info)) {
        IOLog("kld_lookup() of \"_kmod_info\" failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }


    if (!stamp_kmod(requested_kmod_name, kmod_info)) {
        // stamp_kmod() logs a meaningful message
        result = KERN_FAILURE;
        goto finish;
    }


   /* kld_lookup of _kmod_info yielded the actual linked address,
    * so now that we've copied the data into its real place,
    * we can set this stuff.
    */
    kmod_info->address = link_buffer_address;
    kmod_info->size = link_buffer_size;
    kmod_info->hdr_size = round_page_32(link_header_size);

   /* We've written data and instructions, so *flush* the data cache
    * and *invalidate* the instruction cache.
    */
    flush_dcache64((addr64_t)link_buffer_address, link_buffer_size, false);
    invalidate_icache64((addr64_t)link_buffer_address, link_buffer_size, false);


   /* Register the new kmod with the kernel proper.
    */
    if (kmod_create_internal(kmod_info, &kmod_id) != KERN_SUCCESS) {
        IOLog("load_kmod(): kmod_create() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

#if DEBUG
    IOLog("kmod id %d successfully created at 0x%lx, size %ld.\n",
        (unsigned int)kmod_id, link_buffer_address, link_buffer_size);
    LOG_DELAY();
#endif /* DEBUG */

   /* Record dependencies for the newly-loaded kmod.
    */
    for (i = 0; i < num_dependencies; i++) {
        kmod_info_t * cur_dependency_info;
        kmod_t packed_id;
        cur_dependency_info = kmod_dependencies[i];
        packed_id = KMOD_PACK_IDS(kmod_id, cur_dependency_info->id);
        if (kmod_retain(packed_id) != KERN_SUCCESS) {
            IOLog("load_kmod(): kmod_retain() failed for "
                "kmod \"%s\".\n", requested_kmod_name);
            LOG_DELAY();
            kmod_destroy_internal(kmod_id);
            result = KERN_FAILURE;
            goto finish;
        }
    }

   /* Start the kmod (which invokes constructors for I/O Kit
    * drivers.
    */
    // kmod_start_or_stop(id, start?, user data, datalen)
    if (kmod_start_or_stop(kmod_id, 1, 0, 0) != KERN_SUCCESS) {
        IOLog("load_kmod(): kmod_start_or_stop() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        kmod_destroy_internal(kmod_id);
        result = KERN_FAILURE;
        goto finish;
    }

finish:

    if (kmod_info_freeme) {
        kfree((unsigned int)kmod_info_freeme, sizeof(kmod_info_t));
    }

   /* Only do a kld_unload_all() if at least one load happened.
    */
    if (do_kld_unload) {
        kld_unload_all(/* deallocate sets */ 1);
    }

   /* If the link failed, blow away the allocated link buffer.
    */
    if (result != KERN_SUCCESS && link_buffer_address) {
        vm_deallocate(kernel_map, link_buffer_address, link_buffer_size);
    }

    if (kmod_dependencies) {
        for (i = 0; i < num_dependencies; i++) {
            if (kmod_dependencies[i]) {
                kfree((unsigned int)kmod_dependencies[i], sizeof(kmod_info_t));
            }
        }
        kfree((unsigned int)kmod_dependencies,
            num_dependencies * sizeof(kmod_info_t *));
    }

   /* Reset these static global variables for the next call.
    */
    g_current_kmod_name = NULL;
    g_current_kmod_info = NULL;
    link_buffer_address = 0;
    link_load_address = 0;
    link_load_size = 0;
    link_buffer_size = 0;
    link_header_size = 0;

    return result;
}
コード例 #9
0
ファイル: kalloc.c プロジェクト: OpenDarwin-CVS/SEDarwin
void
krealloc(
	vm_offset_t	*addrp,
	vm_size_t	old_size,
	vm_size_t	new_size,
	simple_lock_t	lock)
{
	register int zindex;
	register vm_size_t allocsize;
	vm_offset_t naddr;

	/* can only be used for increasing allocation size */

	assert(new_size > old_size);

	/* if old_size is zero, then we are simply allocating */

	if (old_size == 0) {
		simple_unlock(lock);
		naddr = kalloc(new_size);
		simple_lock(lock);
		*addrp = naddr;
		return;
	}

	/* if old block was kmem_alloc'd, then use kmem_realloc if necessary */

	if (old_size >= kalloc_max_prerounded) {
		old_size = round_page_32(old_size);
		new_size = round_page_32(new_size);
		if (new_size > old_size) {

			if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr,
					 new_size) != KERN_SUCCESS) {
				panic("krealloc: kmem_realloc");
				naddr = 0;
			}

			simple_lock(lock);
			*addrp = naddr;

			/* kmem_realloc() doesn't free old page range. */
			kmem_free(kalloc_map, *addrp, old_size);

			kalloc_large_total += (new_size - old_size);

			if (kalloc_large_total > kalloc_large_max)
			        kalloc_large_max = kalloc_large_total;
		}
		return;
	}

	/* compute the size of the block that we actually allocated */

	allocsize = KALLOC_MINSIZE;
	zindex = first_k_zone;
	while (allocsize < old_size) {
		allocsize <<= 1;
		zindex++;
	}

	/* if new size fits in old block, then return */

	if (new_size <= allocsize) {
		return;
	}

	/* if new size does not fit in zone, kmem_alloc it, else zalloc it */

	simple_unlock(lock);
	if (new_size >= kalloc_max_prerounded) {
		if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) {
			panic("krealloc: kmem_alloc");
			simple_lock(lock);
			*addrp = 0;
			return;
		}
		kalloc_large_inuse++;
		kalloc_large_total += new_size;

		if (kalloc_large_total > kalloc_large_max)
		        kalloc_large_max = kalloc_large_total;
	} else {
		register int new_zindex;

		allocsize <<= 1;
		new_zindex = zindex + 1;
		while (allocsize < new_size) {
			allocsize <<= 1;
			new_zindex++;
		}
		naddr = zalloc(k_zone[new_zindex]);
	}
	simple_lock(lock);

	/* copy existing data */

	bcopy((const char *)*addrp, (char *)naddr, old_size);

	/* free old block, and return */

	zfree(k_zone[zindex], *addrp);

	/* set up new address */

	*addrp = naddr;
}
コード例 #10
0
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}
コード例 #11
0
void
bsd_startupearly()
{
	vm_offset_t		firstaddr;
	vm_size_t		size;
	kern_return_t	ret;

	if (nbuf == 0)
		nbuf = atop_64(sane_size / 100); /* Get 1% of ram, but no more than we can map */
	if (nbuf > 8192)
		nbuf = 8192;
	if (nbuf < 256)
		nbuf = 256;

	if (niobuf == 0)
		niobuf = nbuf;
	if (niobuf > 4096)
		niobuf = 4096;
	if (niobuf < 128)
		niobuf = 128;

	size = (nbuf + niobuf) * sizeof (struct buf);
	size = round_page_32(size);

	ret = kmem_suballoc(kernel_map,
			&firstaddr,
			size,
			FALSE,
			TRUE,
			&bufferhdr_map);

	if (ret != KERN_SUCCESS) 
		panic("Failed to create bufferhdr_map");
	
	ret = kernel_memory_allocate(bufferhdr_map,
			&firstaddr,
			size,
			0,
			KMA_HERE | KMA_KOBJECT);

	if (ret != KERN_SUCCESS)
		panic("Failed to allocate bufferhdr_map");

	buf = (struct buf * )firstaddr;
	bzero(buf,size);

	if ((sane_size > (64 * 1024 * 1024)) || ncl) {
		int scale;
		extern u_long tcp_sendspace;
		extern u_long tcp_recvspace;

		if ((nmbclusters = ncl) == 0) {
			if ((nmbclusters = ((sane_size / 16) / MCLBYTES)) > 16384)
				nmbclusters = 16384;
		}
		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
			tcp_sendspace *= scale;
			tcp_recvspace *= scale;

			if (tcp_sendspace > (32 * 1024))
				tcp_sendspace = 32 * 1024;
			if (tcp_recvspace > (32 * 1024))
				tcp_recvspace = 32 * 1024;
		}
	}
}
コード例 #12
0
ファイル: mach_port.c プロジェクト: OpenDarwin-CVS/SEDarwin
kern_return_t
mach_port_names(
	ipc_space_t		space,
	mach_port_name_t	**namesp,
	mach_msg_type_number_t	*namesCnt,
	mach_port_type_t	**typesp,
	mach_msg_type_number_t	*typesCnt)
{
	ipc_entry_bits_t *capability;
	ipc_tree_entry_t tentry;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	ipc_entry_num_t actual;	/* this many names */
	ipc_port_timestamp_t timestamp;	/* logical time of this operation */
	mach_port_name_t *names;
	mach_port_type_t *types;
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr1;	/* allocated memory, for names */
	vm_offset_t addr2;	/* allocated memory, for types */
	vm_map_copy_t memory1;	/* copied-in memory, for names */
	vm_map_copy_t memory2;	/* copied-in memory, for types */

	/* safe simplifying assumption */
	assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	size = 0;

	for (;;) {
		ipc_entry_num_t bound;
		vm_size_t size_needed;

		is_read_lock(space);
		if (!space->is_active) {
			is_read_unlock(space);
			if (size != 0) {
				kmem_free(ipc_kernel_map, addr1, size);
				kmem_free(ipc_kernel_map, addr2, size);
			}
			return KERN_INVALID_TASK;
		}

		/* upper bound on number of names in the space */

		bound = space->is_table_size + space->is_tree_total;
		size_needed = round_page_32(bound * sizeof(mach_port_name_t));

		if (size_needed <= size)
			break;

		is_read_unlock(space);

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
		size = size_needed;

		kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr1, addr1 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		kr = vm_map_wire(ipc_kernel_map, addr2, addr2 + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
			return KERN_RESOURCE_SHORTAGE;
		}

	}
	/* space is read-locked and active */

	names = (mach_port_name_t *) addr1;
	types = (mach_port_type_t *) addr2;
	actual = 0;

	timestamp = ipc_port_timestamp();

	table = space->is_table;
	tsize = space->is_table_size;

	for (index = 0; index < tsize; index++) {
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits = entry->ie_bits;

		if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
			mach_port_name_t name;

			name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
			mach_port_names_helper(timestamp, entry, name, names,
					       types, &actual, space);
		}
	}

	for (tentry = ipc_splay_traverse_start(&space->is_tree);
	    tentry != ITE_NULL;
	    tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
		ipc_entry_t entry = &tentry->ite_entry;
		mach_port_name_t name = tentry->ite_name;

		assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
		mach_port_names_helper(timestamp, entry, name, names,
				       types, &actual, space);
	}
	ipc_splay_traverse_finish(&space->is_tree);
	is_read_unlock(space);

	if (actual == 0) {
		memory1 = VM_MAP_COPY_NULL;
		memory2 = VM_MAP_COPY_NULL;

		if (size != 0) {
			kmem_free(ipc_kernel_map, addr1, size);
			kmem_free(ipc_kernel_map, addr2, size);
		}
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr1, addr1 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_unwire(ipc_kernel_map,
				     addr2, addr2 + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
				   TRUE, &memory1);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
				   TRUE, &memory2);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size) {
			kmem_free(ipc_kernel_map,
				  addr1 + vm_size_used, size - vm_size_used);
			kmem_free(ipc_kernel_map,
				  addr2 + vm_size_used, size - vm_size_used);
		}
	}

	*namesp = (mach_port_name_t *) memory1;
	*namesCnt = actual;
	*typesp = (mach_port_type_t *) memory2;
	*typesCnt = actual;
	return KERN_SUCCESS;
}
コード例 #13
0
ファイル: mach_port.c プロジェクト: OpenDarwin-CVS/SEDarwin
kern_return_t
mach_port_get_set_status(
	ipc_space_t			space,
	mach_port_name_t		name,
	mach_port_name_t		**members,
	mach_msg_type_number_t		*membersCnt)
{
	ipc_entry_num_t actual;		/* this many members */
	ipc_entry_num_t maxnames;	/* space for this many members */
	kern_return_t kr;

	vm_size_t size;		/* size of allocated memory */
	vm_offset_t addr;	/* allocated memory */
	vm_map_copy_t memory;	/* copied-in memory */

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	if (!MACH_PORT_VALID(name))
		return KERN_INVALID_RIGHT;

	size = PAGE_SIZE;	/* initial guess */

	for (;;) {
		ipc_tree_entry_t tentry;
		ipc_entry_t entry, table;
		ipc_entry_num_t tsize;
		mach_port_index_t index;
		mach_port_name_t *names;
		ipc_pset_t pset;

		kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		/* can't fault while we hold locks */

		kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
				     VM_PROT_READ|VM_PROT_WRITE, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = ipc_right_lookup_read(space, name, &entry);
		if (kr != KERN_SUCCESS) {
			kmem_free(ipc_kernel_map, addr, size);
			return kr;
		}
		/* space is read-locked and active */

		if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
			is_read_unlock(space);
			kmem_free(ipc_kernel_map, addr, size);
			return KERN_INVALID_RIGHT;
		}

		pset = (ipc_pset_t) entry->ie_object;
		assert(pset != IPS_NULL);
		/* the port set must be active */

		names = (mach_port_name_t *) addr;
		maxnames = size / sizeof(mach_port_name_t);
		actual = 0;

		table = space->is_table;
		tsize = space->is_table_size;

		for (index = 0; index < tsize; index++) {
			ipc_entry_t ientry = &table[index];

			if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
				ipc_port_t port =
					(ipc_port_t) ientry->ie_object;

				mach_port_gst_helper(pset, port,
						     maxnames, names, &actual);
			}
		}

		for (tentry = ipc_splay_traverse_start(&space->is_tree);
		    tentry != ITE_NULL;
		    tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
			ipc_entry_bits_t bits = tentry->ite_bits;

			assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);

			if (bits & MACH_PORT_TYPE_RECEIVE) {
			    ipc_port_t port = (ipc_port_t) tentry->ite_object;

			    mach_port_gst_helper(pset, port, maxnames,
						 names, &actual);
			}
		}
		ipc_splay_traverse_finish(&space->is_tree);
		is_read_unlock(space);

		if (actual <= maxnames)
			break;

		/* didn't have enough memory; allocate more */

		kmem_free(ipc_kernel_map, addr, size);
		size = round_page_32(actual * sizeof(mach_port_name_t)) + PAGE_SIZE;
	}

	if (actual == 0) {
		memory = VM_MAP_COPY_NULL;

		kmem_free(ipc_kernel_map, addr, size);
	} else {
		vm_size_t size_used;
		vm_size_t vm_size_used;

		size_used = actual * sizeof(mach_port_name_t);
		vm_size_used = round_page_32(size_used);

		/*
		 *	Make used memory pageable and get it into
		 *	copied-in form.  Free any unused memory.
		 */

		kr = vm_map_unwire(ipc_kernel_map,
				     addr, addr + vm_size_used, FALSE);
		assert(kr == KERN_SUCCESS);

		kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
				   TRUE, &memory);
		assert(kr == KERN_SUCCESS);

		if (vm_size_used != size)
			kmem_free(ipc_kernel_map,
				  addr + vm_size_used, size - vm_size_used);
	}

	*members = (mach_port_name_t *) memory;
	*membersCnt = actual;
	return KERN_SUCCESS;
}