示例#1
0
static void *__shm_cmem_alloc(unsigned int sz)
{
  void *mem = ((void *)0);
  sz += sizeof(void *);
  #if 0 /* may be needed some places */
  {
    unsigned int pgsize = getpagesize();
    if ((sz % pgsize) != 0)
      sz += pgsize - (sz % pgsize);
  }
  #endif
  #if (CLIENT_OS == OS_NEXTSTEP)
  if (vm_allocate(task_self(), (vm_address_t *)&mem,
                  sz, TRUE) != KERN_SUCCESS)
    return NULL;

  if (vm_inherit(task_self(), (vm_address_t)mem, sz,
                 VM_INHERIT_SHARE) != KERN_SUCCESS) {
    vm_deallocate(task_self(), (vm_address_t)mem, sz);
    return NULL;
  }
  #elif defined(USE_SYSV_SHM)
  {
    int shmid = shmget(IPC_PRIVATE, sz, 0600 );
    if (shmid != -1)
    {
      mem = (void *)shmat(shmid, 0, 0 );
      shmctl( shmid, IPC_RMID, 0);
    }
    TRACE_OUT((0,"shmat(%d)=>%p\n", shmid, mem));
  }
  #elif defined(MAP_ANON) /* BSD style */
  {
    mem = mmap( 0, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1, 0);
    if (mem == ((void *)-1))
      mem = (void *)0;
    TRACE_OUT((0,"mmap(0, %u, ..., MAP_ANON|MAP_SHARED, -1, 0)=%p\n%s\n",
               sz, mem, ((mem) ? ("") : (strerror(errno))) ));
  }
  #else /* SysV style */
  {
    int fd = open("/dev/zero", O_RDWR);
    if (fd != -1)
    {
      mem = mmap( 0, sz, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
      if (mem == ((void *)-1))
        mem = (void *)0;
      close(fd);
    }
    TRACE_OUT((0, "mmap(0, %u, ..., MAP_SHARED, fd=%d, 0)=%p\n%s\n",
               sz, fd, mem, ((mem) ? ("") : (strerror(errno))) ));
  }
  #endif
  if (mem)
  {
    char *p = (char *)mem;
    memset( mem, 0, sz );
    *((unsigned int *)mem) = sz;
    p += sizeof(void *);
    mem = (void *)p;
  }
  return mem;
}
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}