bool TPCircularBufferInit(TPCircularBuffer *buffer, int length) {
    // keep trying tuntil we get our buffer, needed to handle race conditions
    while(1) {
        buffer->length = round_page(length);    // We need whole page sizes
        // Temporarily allocate twice the length, so we have the contiguous address space to
        // support a second instance of the buffer directly after
        vm_address_t bufferAddress;
        if ( !checkResult(vm_allocate(mach_task_self(),
                                      &bufferAddress,
                                      buffer->length * 2,
                                      VM_FLAGS_ANYWHERE), // allocate anywhere it'll fit
                          "Buffer allocation") ) {
            // try again if we fail
            continue;
        }
        
        // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
        if ( !checkResult(vm_deallocate(mach_task_self(),
                                        bufferAddress + buffer->length,
                                        buffer->length),
                          "Buffer deallocation") ) {
            // if this fails somehow, deallocate the whole region and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        // Re-map the buffer to the address space immediately after the buffer
        vm_address_t virtualAddress = bufferAddress + buffer->length;
        vm_prot_t cur_prot, max_prot;
        if(!checkResult(vm_remap(mach_task_self(),
                                 &virtualAddress, // mirror target
                                 buffer->length, // size of mirror
                                 0, // auto alignment
                                 0, // force remapping to virtualAddress
                                 mach_task_self(), // same task
                                 bufferAddress, // mirror source
                                 0, // MAP READ-WRITE, NOT COPY
                                 &cur_prot, // unused protection struct
                                 &max_prot, // unused protection struct
                                 VM_INHERIT_DEFAULT), "Remap buffer memory")) {
            // if this remap failed, we hit a race condition, so deallocate and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        if ( virtualAddress != bufferAddress+buffer->length ) {
            // if the memory is not contiguous, clean up both allocated buffers and try again
            printf("Couldn't map buffer memory to end of buffer\n");
            vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        buffer->buffer = (void*)bufferAddress;
        buffer->fillCount = 0;
        buffer->head = buffer->tail = 0;
        
        return true;
    }
}
Ejemplo n.º 2
0
static ffi_trampoline_table *
ffi_trampoline_table_alloc (void)
{
  ffi_trampoline_table *table;
  vm_address_t config_page;
  vm_address_t trampoline_page;
  vm_address_t trampoline_page_template;
  vm_prot_t cur_prot;
  vm_prot_t max_prot;
  kern_return_t kt;
  uint16_t i;

  /* Allocate two pages -- a config page and a placeholder page */
  config_page = 0x0;
  kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
		    VM_FLAGS_ANYWHERE);
  if (kt != KERN_SUCCESS)
    return NULL;

  /* Remap the trampoline table on top of the placeholder page */
  trampoline_page = config_page + PAGE_MAX_SIZE;
  trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
#ifdef __arm__
  /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
  trampoline_page_template &= ~1UL;
#endif
  kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0,
		 VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template,
		 FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE);
  if (kt != KERN_SUCCESS)
    {
      vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2);
      return NULL;
    }

  /* We have valid trampoline and config pages */
  table = calloc (1, sizeof (ffi_trampoline_table));
  table->free_count = FFI_TRAMPOLINE_COUNT;
  table->config_page = config_page;
  table->trampoline_page = trampoline_page;

  /* Create and initialize the free list */
  table->free_list_pool =
    calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));

  for (i = 0; i < table->free_count; i++)
    {
      ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
      entry->trampoline =
	(void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));

      if (i < table->free_count - 1)
	entry->next = &table->free_list_pool[i + 1];
    }

  table->free_list = table->free_list_pool;

  return table;
}
Ejemplo n.º 3
0
/*
 * Maps into the server the address range specified by user_addr and size
 * in the task specified by task.  The range need not be page aligned.
 * Returns a newly allocated shared memory region that is locked.  Does not
 * enter the region into share cache or initialize any of its list links.
 */
user_memory_t
map_region(
	struct task_struct	*task,
	vm_address_t		user_addr,
	vm_size_t		size,
	vm_prot_t		prot)
{
	kern_return_t	ret;
	vm_address_t	svr_addr;
	vm_address_t	aligned_user_addr;
	user_memory_t	region_p;
	vm_prot_t	cur_prot;
	vm_prot_t	max_prot;

	svr_addr = 0;
	aligned_user_addr = ALIGN_ADDR_DOWN(user_addr);

	debug(0, ++user_memory_num_maps);

	size = ALIGN_ADDR_UP(user_addr + size) - aligned_user_addr;
	user_addr = aligned_user_addr;

	server_thread_blocking(FALSE);
	ret = vm_remap(mach_task_self(),
		       &svr_addr,
		       size,
		       0,		/* alignment */
		       TRUE,		/* anywhere */
		       task->osfmach3.task->mach_task_port, /* from_task */
		       user_addr,
		       FALSE,		/* copy */
		       &cur_prot,
		       &max_prot,
		       VM_INHERIT_NONE);
	server_thread_unblocking(FALSE);

	if (ret != KERN_SUCCESS) {
		debug_prf(1, ("%s to map addr %x, len %x of task %p, because\n",
			  "map_region: failed", user_addr, size, task));
		debug_prf(1,
			  ("           vm_remap failed. (Returned %x)\n", ret));
		return NULL;
	}
	debug_prf(2, ("map_region: mapped addr %x, len %x of task %p.\n",
				user_addr, size, task));

	region_p = (user_memory_t) kmalloc(sizeof (*region_p), GFP_KERNEL);
	if (!region_p)
		panic("map_region: kmalloc failed for user_memory elt\n");

	region_p->task = task;
	region_p->user_page = ADDR_TO_PAGENUM(user_addr);
	region_p->svr_addr = svr_addr;
	region_p->size = size;
	region_p->prot = cur_prot;
	region_p->ref_count = 0;

	return region_p;
}
Ejemplo n.º 4
0
prange_t pdup(prange_t range, size_t newsize, size_t offset) {
    if(newsize < offset + range.size) {
        die("pdup: newsize=%zu < offset=%zu + range.size=%zu", newsize, offset, range.size);
    }
    void *buf = mmap(NULL, newsize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
    if(buf == MAP_FAILED) {
        edie("pdup: could not mmap");
    }
#ifdef __APPLE__
    munmap(buf + offset, range.size);
    vm_prot_t cur, max;
    vm_address_t addr = (vm_address_t) (buf + offset);
    kern_return_t kr = vm_remap(mach_task_self(), &addr, range.size, 0xfff, 0, mach_task_self(), (vm_address_t) range.start, true, &cur, &max, VM_INHERIT_NONE);
    if(kr) {
        die("pdup: kr = %d", (int) kr);
    }
#else
    memcpy(buf + offset, range.start, range.size);
#endif
    return (prange_t) {buf, newsize};
}
Ejemplo n.º 5
0
/*===========================================================================*
 *				do_shmat		     		     *
 *===========================================================================*/
PUBLIC int do_shmat(message *m)
{
	int id, flag;
	vir_bytes addr;
	void *ret;
	struct shm_struct *shm;

	id = m->SHMAT_ID;
	addr = (vir_bytes) m->SHMAT_ADDR;
	flag = m->SHMAT_FLAG;

	if (addr && (addr % I386_PAGE_SIZE)) {
		if (flag & SHM_RND)
			addr -= (addr % I386_PAGE_SIZE);
		else
			return EINVAL;
	}

	if (!(shm = shm_find_id(id)))
		return EINVAL;

	if (flag & SHM_RDONLY)
		flag = 0444;
	else
		flag = 0666;
	if (!check_perm(&shm->shmid_ds.shm_perm, who_e, flag))
		return EACCES;

	ret = vm_remap(who_e, SELF_E, (void *)addr, (void *)shm->page,
			shm->shmid_ds.shm_segsz);
	if (ret == MAP_FAILED)
		return ENOMEM;

	shm->shmid_ds.shm_atime = time(NULL);
	shm->shmid_ds.shm_lpid = getnpid(who_e);
	/* nattach is updated lazily */

	m->SHMAT_RETADDR = (long) ret;
	return OK;
}
Ejemplo n.º 6
0
Archivo: shm.c Proyecto: Hooman3/minix
/*===========================================================================*
 *				do_shmat		     		     *
 *===========================================================================*/
int do_shmat(message *m)
{
	int id, flag;
	vir_bytes addr;
	void *ret;
	struct shm_struct *shm;

	id = m->m_lc_ipc_shmat.id;
	addr = (vir_bytes) m->m_lc_ipc_shmat.addr;
	flag = m->m_lc_ipc_shmat.flag;

	if (addr && (addr % PAGE_SIZE)) {
		if (flag & SHM_RND)
			addr -= (addr % PAGE_SIZE);
		else
			return EINVAL;
	}

	if (!(shm = shm_find_id(id)))
		return EINVAL;

	if (flag & SHM_RDONLY)
		flag = 0444;
	else
		flag = 0666;
	if (!check_perm(&shm->shmid_ds.shm_perm, who_e, flag))
		return EACCES;

	ret = vm_remap(who_e, sef_self(), (void *)addr, (void *)shm->page,
			shm->shmid_ds.shm_segsz);
	if (ret == MAP_FAILED)
		return ENOMEM;

	shm->shmid_ds.shm_atime = time(NULL);
	shm->shmid_ds.shm_lpid = getnpid(who_e);
	/* nattach is updated lazily */

	m->m_lc_ipc_shmat.retaddr = ret;
	return OK;
}
Ejemplo n.º 7
0
bool BufferInit(Buffer *buffer, int length) {

    // race conditions
    int retries = 3;
    while ( true ) {

        buffer->length = round_page(length);    // get whole page size

        // Temporarily allocate twice the length, so we have the contiguous address space to
        // support a second instance of the buffer directly after
        vm_address_t bufferAddress;
        kern_return_t result = vm_allocate(mach_task_self(),
                                           &bufferAddress,
                                           buffer->length * 2,
                                           VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                Flag(result, "Buffer allocation");
                return false;
            }
            // Try again if we fail
            continue;
        }
        
        // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
        result = vm_deallocate(mach_task_self(),
                               bufferAddress + buffer->length,
                               buffer->length);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                Flag(result, "Buffer deallocation");
                return false;
            }
            // If at first you don't succeed, try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        // Re-map the buffer to the address space immediately after the buffer
        vm_address_t virtualAddress = bufferAddress + buffer->length;
        vm_prot_t cur_prot, max_prot;
        result = vm_remap(mach_task_self(),
                          &virtualAddress,   // mirror target
                          buffer->length,    // size of mirror
                          0,                 // auto alignment
                          0,                 // force remapping to virtualAddress
                          mach_task_self(),  // same task
                          bufferAddress,     // mirror source
                          0,                 // MAP READ-WRITE, NOT COPY
                          &cur_prot,         // unused protection struct
                          &max_prot,         // unused protection struct
                          VM_INHERIT_DEFAULT);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                Flag(result, "Remap buffer memory");
                return false;
            }
            // If this remap failed, we hit a race condition, so deallocate and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        if ( virtualAddress != bufferAddress+buffer->length ) {
            // If the memory is not contiguous, clean up both buffers and try again
            if ( retries-- == 0 ) {
                printf("Couldn't map buffer memory to end of buffer\n");
                return false;
            }

            vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        buffer->buffer = (void*)bufferAddress;
        buffer->fillCount = 0;
        buffer->head = buffer->tail = 0;
        
        return true;
    }
    return false;
}
Ejemplo n.º 8
0
static int get_trampoline(void *func, void *arg1, void *arg2, void *tramp_ptr) {
    int ret, rerrno = 0;
    pthread_mutex_lock(&tramp_mutex);

    struct tramp_info_page_header *header = LIST_FIRST(&tramp_free_page_list);
    if (!header) {
        if (PAGE_SIZE > _PAGE_SIZE)
            substitute_panic("%s: strange PAGE_SIZE %lx\n",
                             __func__, (long) PAGE_SIZE);
        void *new_pages = mmap(NULL, _PAGE_SIZE * 2, PROT_READ | PROT_WRITE,
                               MAP_SHARED | MAP_ANON, -1, 0);
        if (new_pages == MAP_FAILED) {
            ret = SUBSTITUTE_ERR_OOM;
            rerrno = errno;
            goto out;
        }
        vm_address_t tramp_page = (vm_address_t) new_pages;
        vm_prot_t cur_prot, max_prot;
        kern_return_t kr = vm_remap(
            mach_task_self(),
            &tramp_page,
            _PAGE_SIZE,
            _PAGE_SIZE - 1,
            VM_FLAGS_OVERWRITE | VM_FLAGS_FIXED,
            mach_task_self(),
            (vm_address_t) remap_start,
            FALSE, /* copy */
            &cur_prot,
            &max_prot,
            VM_INHERIT_NONE);
        if (kr != KERN_SUCCESS || tramp_page != (vm_address_t) new_pages) {
            ret = SUBSTITUTE_ERR_VM;
            goto out;
        }
        header = new_pages + _PAGE_SIZE * 2 - sizeof(*header);
        header->magic = TRAMP_MAGIC;
        header->version = TRAMP_VERSION;
        header->first_free = NULL;
        header->nfree = TRAMPOLINES_PER_PAGE;
        LIST_INSERT_HEAD(&tramp_free_page_list, header, free_pages);
    }

    void *page = (void *) (((uintptr_t) header) & ~(_PAGE_SIZE - 1));
    struct tramp_info_page_entry *entries = page;
    struct tramp_info_page_entry *entry = header->first_free;
    if (entry == NULL) {
        entry = &entries[TRAMPOLINES_PER_PAGE - header->nfree];
        entry->next_free = NULL;
    }

    header->first_free = entry->next_free;
    if (--header->nfree == 0)
        LIST_REMOVE(header, free_pages);

    entry->func = func;
    entry->arg1 = arg1;
    entry->arg2 = arg2;
    void *tramp = (page - PAGE_SIZE) + (entry - entries) * TRAMPOLINE_SIZE;
#ifdef __arm__
    tramp += 1;
#endif
    *(void **) tramp_ptr = tramp;
    ret = SUBSTITUTE_OK;
out:
    pthread_mutex_unlock(&tramp_mutex);
    errno = rerrno;
    return ret;
}
Ejemplo n.º 9
0
static ffi_trampoline_table *
ffi_trampoline_table_alloc ()
{
  ffi_trampoline_table *table = NULL;

  /* Loop until we can allocate two contiguous pages */
  while (table == NULL)
    {
      vm_address_t config_page = 0x0;
      kern_return_t kt;

      /* Try to allocate two pages */
      kt =
	vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2,
		     VM_FLAGS_ANYWHERE);
      if (kt != KERN_SUCCESS)
	{
	  fprintf (stderr, "vm_allocate() failure: %d at %s:%d\n", kt,
		   __FILE__, __LINE__);
	  break;
	}

      /* Now drop the second half of the allocation to make room for the trampoline table */
      vm_address_t trampoline_page = config_page + PAGE_MAX_SIZE;
      kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_MAX_SIZE);
      if (kt != KERN_SUCCESS)
	{
	  fprintf (stderr, "vm_deallocate() failure: %d at %s:%d\n", kt,
		   __FILE__, __LINE__);
	  break;
	}

      /* Remap the trampoline table to directly follow the config page */
      vm_prot_t cur_prot;
      vm_prot_t max_prot;

	  vm_address_t trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page;
#ifdef __arm__
	  /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */
	  trampoline_page_template &= ~1UL;
#endif

      kt =
	vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, FALSE,
		  mach_task_self (), trampoline_page_template, FALSE,
		  &cur_prot, &max_prot, VM_INHERIT_SHARE);

      /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
      if (kt != KERN_SUCCESS)
	{
	  /* Log unexpected failures */
	  if (kt != KERN_NO_SPACE)
	    {
	      fprintf (stderr, "vm_remap() failure: %d at %s:%d\n", kt,
		       __FILE__, __LINE__);
	    }

	  vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
	  continue;
	}

      /* We have valid trampoline and config pages */
      table = calloc (1, sizeof (ffi_trampoline_table));
      table->free_count = FFI_TRAMPOLINE_COUNT;
      table->config_page = config_page;
      table->trampoline_page = trampoline_page;

      /* Create and initialize the free list */
      table->free_list_pool =
	calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry));

      uint16_t i;
      for (i = 0; i < table->free_count; i++)
	{
	  ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
	  entry->trampoline =
	    (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));

	  if (i < table->free_count - 1)
	    entry->next = &table->free_list_pool[i + 1];
	}

      table->free_list = table->free_list_pool;
    }

  return table;
}
Ejemplo n.º 10
0
bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize) {
    
    assert(length > 0);
    
    if ( structSize != sizeof(TPCircularBuffer) ) {
        fprintf(stderr, "TPCircularBuffer: Header version mismatch. Check for old versions of TPCircularBuffer in your project\n");
        abort();
    }
    
    // Keep trying until we get our buffer, needed to handle race conditions
    int retries = 3;
    while ( true ) {
        
        buffer->length = (int32_t)round_page(length);    // We need whole page sizes
        
        // Temporarily allocate twice the length, so we have the contiguous address space to
        // support a second instance of the buffer directly after
        vm_address_t bufferAddress;
        kern_return_t result = vm_allocate(mach_task_self(),
                                           &bufferAddress,
                                           buffer->length * 2,
                                           VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Buffer allocation");
                return false;
            }
            // Try again if we fail
            continue;
        }
        
        // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half...
        result = vm_deallocate(mach_task_self(),
                               bufferAddress + buffer->length,
                               buffer->length);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Buffer deallocation");
                return false;
            }
            // If this fails somehow, deallocate the whole region and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        // Re-map the buffer to the address space immediately after the buffer
        vm_address_t virtualAddress = bufferAddress + buffer->length;
        vm_prot_t cur_prot, max_prot;
        result = vm_remap(mach_task_self(),
                          &virtualAddress,   // mirror target
                          buffer->length,    // size of mirror
                          0,                 // auto alignment
                          0,                 // force remapping to virtualAddress
                          mach_task_self(),  // same task
                          bufferAddress,     // mirror source
                          0,                 // MAP READ-WRITE, NOT COPY
                          &cur_prot,         // unused protection struct
                          &max_prot,         // unused protection struct
                          VM_INHERIT_DEFAULT);
        if ( result != ERR_SUCCESS ) {
            if ( retries-- == 0 ) {
                reportResult(result, "Remap buffer memory");
                return false;
            }
            // If this remap failed, we hit a race condition, so deallocate and try again
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        if ( virtualAddress != bufferAddress+buffer->length ) {
            // If the memory is not contiguous, clean up both allocated buffers and try again
            if ( retries-- == 0 ) {
                printf("Couldn't map buffer memory to end of buffer\n");
                return false;
            }
            
            vm_deallocate(mach_task_self(), virtualAddress, buffer->length);
            vm_deallocate(mach_task_self(), bufferAddress, buffer->length);
            continue;
        }
        
        buffer->buffer = (void*)bufferAddress;
        buffer->fillCount = 0;
        buffer->head = buffer->tail = 0;
        buffer->atomic = true;
        
        return true;
    }
    return false;
}