Exemplo n.º 1
0
void IOFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
{
    if (gIOCopyMapper)
    {
	gIOCopyMapper->iovmFree(atop_64(address), atop_64(round_page(size)));
    }
}
Exemplo n.º 2
0
kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
			mach_vm_size_t length, unsigned int options)
{
    vm_prot_t	 prot;
    unsigned int flags;
    ppnum_t	 pagenum;
    pmap_t 	 pmap = map->pmap;

    prot = (options & kIOMapReadOnly)
		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);

    pagenum = (ppnum_t)atop_64(pa);

    switch(options & kIOMapCacheMask ) {			/* What cache mode do we need? */

	case kIOMapDefaultCache:
	default:
	    flags = IODefaultCacheBits(pa);
	    break;

	case kIOMapInhibitCache:
	    flags = VM_WIMG_IO;
	    break;

	case kIOMapWriteThruCache:
	    flags = VM_WIMG_WTHRU;
	    break;

	case kIOMapWriteCombineCache:
	    flags = VM_WIMG_WCOMB;
	    break;

	case kIOMapCopybackCache:
	    flags = VM_WIMG_COPYBACK;
	    break;
	case kIOMapCopybackInnerCache:
	    flags = VM_WIMG_INNERWBACK;
	    break;
    }

    pmap_set_cache_attributes(pagenum, flags);

    vm_map_set_cache_attr(map, (vm_map_offset_t)va);


    // Set up a block mapped area
    pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);

    return( KERN_SUCCESS );
}
Exemplo n.º 3
0
mach_vm_address_t IOMallocPhysical(mach_vm_size_t size, mach_vm_address_t mask)
{
    mach_vm_address_t address = 0;
    if (gIOCopyMapper)
    {
	address = ptoa_64(gIOCopyMapper->iovmAlloc(atop_64(round_page(size))));
    }
    return (address);
}
Exemplo n.º 4
0
void
hibernate_page_list_setall_machine(hibernate_page_list_t * page_list,
                                    hibernate_page_list_t * page_list_wired,
                                    uint32_t * pagesOut)
{
    uint32_t page, count, PCAsize;

    /* Get total size of PCA table */
    PCAsize = round_page((hash_table_size / PerProcTable[0].ppe_vaddr->pf.pfPTEG) 
                          * sizeof(PCA_t));

    page = atop_64(hash_table_base - PCAsize);
    count = atop_64(hash_table_size + PCAsize);

    hibernate_set_page_state(page_list, page_list_wired, page, count, 0);
    pagesOut -= count;

    HIBLOG("removed hash, pca: %d pages\n", count);

    save_snapshot();
}
Exemplo n.º 5
0
__BEGIN_DECLS

// These are C accessors to the system mapper for non-IOKit clients
ppnum_t IOMapperIOVMAlloc(unsigned pages)
{
    IOReturn ret;
    uint64_t dmaAddress, dmaLength;

    IOMapper::checkForSystemMapper();

    ret = kIOReturnUnsupported;
    if (IOMapper::gSystem)
    {
        ret = IOMapper::gSystem->iovmMapMemory(
        		NULL, 0, ptoa_64(pages), 
        		(kIODMAMapReadAccess | kIODMAMapWriteAccess),
        		NULL, NULL, NULL,
        		&dmaAddress, &dmaLength);
    }

    if (kIOReturnSuccess == ret) return (atop_64(dmaAddress));
    return (0);
}
Exemplo n.º 6
0
mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, 
			                mach_vm_size_t alignment, bool contiguous)
{
    kern_return_t	kr;
    mach_vm_address_t	address;
    mach_vm_address_t	allocationAddress;
    mach_vm_size_t	adjustedSize;
    mach_vm_address_t	alignMask;

    if (size == 0)
	return (0);
    if (alignment == 0) 
        alignment = 1;

    alignMask = alignment - 1;
    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);

    contiguous = (contiguous && (adjustedSize > page_size))
                   || (alignment > page_size);

    if (contiguous || maxPhys)
    {
        int options = 0;
	vm_offset_t virt;

	adjustedSize = size;
        contiguous = (contiguous && (adjustedSize > page_size))
                           || (alignment > page_size);

	if (!contiguous)
	{
	    if (maxPhys <= 0xFFFFFFFF)
	    {
		maxPhys = 0;
		options |= KMA_LOMEM;
	    }
	    else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
	    {
		maxPhys = 0;
	    }
	}
	if (contiguous || maxPhys)
	{
	    kr = kmem_alloc_contig(kernel_map, &virt, size,
				   alignMask, atop(maxPhys), atop(alignMask), 0);
	}
	else
	{
	    kr = kernel_memory_allocate(kernel_map, &virt,
					size, alignMask, options);
	}
	if (KERN_SUCCESS == kr)
	    address = virt;
	else
	    address = 0;
    }
    else
    {
	adjustedSize += alignMask;
        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);

        if (allocationAddress) {

            address = (allocationAddress + alignMask
                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
                    & (~alignMask);

            if (atop_32(address) != atop_32(address + size - 1))
                address = round_page(address);

            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
                            - sizeof(mach_vm_address_t))) = adjustedSize;
            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
                            = allocationAddress;
	} else
	    address = 0;
    }

    if (address) {
    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
#if IOALLOCDEBUG
	debug_iomalloc_size += size;
#endif
    }

    return (address);
}
Exemplo n.º 7
0
void
bsd_startupearly()
{
	vm_offset_t		firstaddr;
	vm_size_t		size;
	kern_return_t	ret;

	if (nbuf == 0)
		nbuf = atop_64(sane_size / 100); /* Get 1% of ram, but no more than we can map */
	if (nbuf > 8192)
		nbuf = 8192;
	if (nbuf < 256)
		nbuf = 256;

	if (niobuf == 0)
		niobuf = nbuf;
	if (niobuf > 4096)
		niobuf = 4096;
	if (niobuf < 128)
		niobuf = 128;

	size = (nbuf + niobuf) * sizeof (struct buf);
	size = round_page_32(size);

	ret = kmem_suballoc(kernel_map,
			&firstaddr,
			size,
			FALSE,
			TRUE,
			&bufferhdr_map);

	if (ret != KERN_SUCCESS) 
		panic("Failed to create bufferhdr_map");
	
	ret = kernel_memory_allocate(bufferhdr_map,
			&firstaddr,
			size,
			0,
			KMA_HERE | KMA_KOBJECT);

	if (ret != KERN_SUCCESS)
		panic("Failed to allocate bufferhdr_map");

	buf = (struct buf * )firstaddr;
	bzero(buf,size);

	if ((sane_size > (64 * 1024 * 1024)) || ncl) {
		int scale;
		extern u_long tcp_sendspace;
		extern u_long tcp_recvspace;

		if ((nmbclusters = ncl) == 0) {
			if ((nmbclusters = ((sane_size / 16) / MCLBYTES)) > 16384)
				nmbclusters = 16384;
		}
		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
			tcp_sendspace *= scale;
			tcp_recvspace *= scale;

			if (tcp_sendspace > (32 * 1024))
				tcp_sendspace = 32 * 1024;
			if (tcp_recvspace > (32 * 1024))
				tcp_recvspace = 32 * 1024;
		}
	}
}