Beispiel #1
0
static auto_ptr<ArgumentDecoder> createArgumentDecoder(mach_msg_header_t* header)
{
    if (!(header->msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
        // We have a simple message.
        size_t bodySize = header->msgh_size - sizeof(mach_msg_header_t);
        uint8_t* body = reinterpret_cast<uint8_t*>(header + 1);
        
        return auto_ptr<ArgumentDecoder>(new ArgumentDecoder(body, bodySize));
    }

    bool messageBodyIsOOL = header->msgh_id & MessageBodyIsOOL;

    mach_msg_body_t* body = reinterpret_cast<mach_msg_body_t*>(header + 1);
    mach_msg_size_t numDescriptors = body->msgh_descriptor_count;
    ASSERT(numDescriptors);

    // Build attachment list
    Deque<Attachment> attachments;
    uint8_t* descriptorData = reinterpret_cast<uint8_t*>(body + 1);

    // If the message body was sent out-of-line, don't treat the last descriptor
    // as an attachment, since it is really the message body.
    if (messageBodyIsOOL)
        --numDescriptors;

    for (mach_msg_size_t i = 0; i < numDescriptors; ++i) {
        mach_msg_descriptor_t* descriptor = reinterpret_cast<mach_msg_descriptor_t*>(descriptorData);

        switch (descriptor->type.type) {
        case MACH_MSG_PORT_DESCRIPTOR:
            attachments.append(Attachment(descriptor->port.name, descriptor->port.disposition));
            descriptorData += sizeof(mach_msg_port_descriptor_t);
            break;
        case MACH_MSG_OOL_DESCRIPTOR:
            attachments.append(Attachment(descriptor->out_of_line.address, descriptor->out_of_line.size,
                                          descriptor->out_of_line.copy, descriptor->out_of_line.deallocate));
            descriptorData += sizeof(mach_msg_ool_descriptor_t);
            break;
        default:
            ASSERT(false && "Unhandled descriptor type");
        }
    }

    if (messageBodyIsOOL) {
        mach_msg_descriptor_t* descriptor = reinterpret_cast<mach_msg_descriptor_t*>(descriptorData);
        ASSERT(descriptor->type.type == MACH_MSG_OOL_DESCRIPTOR);
        Attachment messageBodyAttachment(descriptor->out_of_line.address, descriptor->out_of_line.size,
                                         descriptor->out_of_line.copy, descriptor->out_of_line.deallocate);

        uint8_t* messageBody = static_cast<uint8_t*>(messageBodyAttachment.address());
        size_t messageBodySize = messageBodyAttachment.size();

        ArgumentDecoder* argumentDecoder;

        if (attachments.isEmpty())
            argumentDecoder = new ArgumentDecoder(messageBody, messageBodySize);
        else
            argumentDecoder = new ArgumentDecoder(messageBody, messageBodySize, attachments);

        vm_deallocate(mach_task_self(), reinterpret_cast<vm_address_t>(messageBodyAttachment.address()), messageBodyAttachment.size());

        return auto_ptr<ArgumentDecoder>(argumentDecoder);
    }

    uint8_t* messageBody = descriptorData;
    size_t messageBodySize = header->msgh_size - (descriptorData - reinterpret_cast<uint8_t*>(header));

    return auto_ptr<ArgumentDecoder>(new ArgumentDecoder(messageBody, messageBodySize, attachments));
}
Beispiel #2
0
SharedMemory::Handle::~Handle()
{
    if (m_port)
        mach_port_deallocate(mach_task_self(), m_port);
}
static void
i386_macosx_dr_set (int regnum, uint64_t value)
{
  thread_t current_thread;
  x86_debug_state_t dr_regs;
  unsigned int dr_count = x86_DEBUG_STATE_COUNT;
  kern_return_t ret;
  thread_array_t thread_list;
  unsigned int nthreads;
  int i;

  gdb_assert (regnum >= 0 && regnum <= DR_CONTROL);

  /* We have to set the watchpoint value in all the threads.  */
  ret = task_threads (macosx_status->task, &thread_list, &nthreads);
  if (ret != KERN_SUCCESS)
    {
      printf_unfiltered ("Error getting the task threads for task: 0x%x.\n",
			 (int) macosx_status->task);
      MACH_CHECK_ERROR (ret);
    }

  for (i = 0; i < nthreads; i++)
    {
      current_thread = thread_list[i];

      if (TARGET_OSABI == GDB_OSABI_DARWIN64)
        {
          dr_regs.dsh.flavor = x86_DEBUG_STATE64;
          dr_regs.dsh.count = x86_DEBUG_STATE64_COUNT;
          ret = thread_get_state (current_thread, x86_DEBUG_STATE,
                                  (thread_state_t) &dr_regs, &dr_count);

          if (ret != KERN_SUCCESS)
            {
              printf_unfiltered ("Error reading debug registers thread 0x%x via thread_get_state\n", (int) current_thread);
              MACH_CHECK_ERROR (ret);
            }
          
          switch (regnum) 
            {
            case 0:
              dr_regs.uds.ds64.__dr0 = value;
              break;
            case 1:
              dr_regs.uds.ds64.__dr1 = value;
              break;
            case 2:
              dr_regs.uds.ds64.__dr2 = value;
              break;
            case 3:
              dr_regs.uds.ds64.__dr3 = value;
              break;
            case 4:
              dr_regs.uds.ds64.__dr4 = value;
              break;
            case 5:
              dr_regs.uds.ds64.__dr5 = value;
              break;
            case 6:
              dr_regs.uds.ds64.__dr6 = value;
              break;
            case 7:
              dr_regs.uds.ds64.__dr7 = value;
              break;
            }
          
          ret = thread_set_state (current_thread, x86_DEBUG_STATE,
                                  (thread_state_t) &dr_regs, dr_count);

          if (ret != KERN_SUCCESS)
            {
              printf_unfiltered ("Error writing debug registers thread "
                                 "0x%x via thread_get_state\n", 
                                 (int) current_thread);
              MACH_CHECK_ERROR (ret);
            }
        }
      else
        {
          uint32_t val_32 = value & 0xffffffff;

          dr_regs.dsh.flavor = x86_DEBUG_STATE32;
          dr_regs.dsh.count = x86_DEBUG_STATE32_COUNT;
          dr_count = x86_DEBUG_STATE_COUNT;
          ret = thread_get_state (current_thread, x86_DEBUG_STATE, 
                                  (thread_state_t) &dr_regs, &dr_count);
          
          if (ret != KERN_SUCCESS)
            {
              printf_unfiltered ("Error reading debug registers thread 0x%x via thread_get_state\n", (int) current_thread);
              MACH_CHECK_ERROR (ret);
            }
          
          switch (regnum) 
            {
            case 0:
              dr_regs.uds.ds32.__dr0 = val_32;
              break;
            case 1:
              dr_regs.uds.ds32.__dr1 = val_32;
              break;
            case 2:
              dr_regs.uds.ds32.__dr2 = val_32;
              break;
            case 3:
              dr_regs.uds.ds32.__dr3 = val_32;
              break;
            case 4:
              dr_regs.uds.ds32.__dr4 = val_32;
              break;
            case 5:
              dr_regs.uds.ds32.__dr5 = val_32;
              break;
            case 6:
              dr_regs.uds.ds32.__dr6 = val_32;
              break;
            case 7:
              dr_regs.uds.ds32.__dr7 = val_32;
              break;
            }
          
          ret = thread_set_state (current_thread, x86_DEBUG_STATE, 
                                  (thread_state_t) &dr_regs, dr_count);

          if (ret != KERN_SUCCESS)
            {
              printf_unfiltered ("Error writing debug registers thread "
                                 "0x%x via thread_get_state\n", 
                                 (int) current_thread);
              MACH_CHECK_ERROR (ret);
            }
        }
#if HAVE_TASK_SET_STATE          
      /* Now call task_set_state with the values of the last thread we
         set -- gdb doesn't support putting watchpoints on individual threads
         so it doesn't matter which one we use.  The task_set_state call here
         will make the kernel set the watchpoints on any newly-created 
         threads.  */

      ret = task_set_state (macosx_status->task, x86_DEBUG_STATE, 
                              (thread_state_t) &dr_regs, dr_count);
      if (ret != KERN_SUCCESS)
        {
          printf_unfiltered ("Error writing debug registers task "
                             "0x%x via thread_set_state\n", 
                             (int) macosx_status->task);
          MACH_CHECK_ERROR (ret);
        }
#endif
    }
  ret = vm_deallocate (mach_task_self (), (vm_address_t) thread_list, 
			(nthreads * sizeof (int)));
}
Semaphore::~Semaphore()
{
    assert(!mValid && "Semaphore not closed before deletion");
    semaphore_destroy(mach_task_self(), mInternal);
}
Beispiel #5
0
BOOL CloseHandle(HANDLE hObject)
{
	int i;
	ULONG Type;
	PVOID Object;

	if (!winpr_Handle_GetInfo(hObject, &Type, &Object))
		return FALSE;

	if (pthread_once(&_HandleCloseCbsInitialized, _HandleCloseCbsInit) != 0)
	{
		return FALSE;
	}

	if (_HandleCloseCbs == NULL)
	{
		return FALSE;
	}

	EnterCriticalSection(&_HandleCloseCbsLock);

	for (i=0; _HandleCloseCbs[i] != NULL; i++)
	{
		HANDLE_CLOSE_CB* close_cb = (HANDLE_CLOSE_CB*)_HandleCloseCbs[i];

		if (close_cb && close_cb->IsHandled(hObject))
		{
			BOOL result = close_cb->CloseHandle(hObject);
			LeaveCriticalSection(&_HandleCloseCbsLock);
			return result;
		}
	}

	LeaveCriticalSection(&_HandleCloseCbsLock);

	if (Type == HANDLE_TYPE_MUTEX)
	{
		WINPR_MUTEX* mutex;
		mutex = (WINPR_MUTEX*) Object;
		pthread_mutex_destroy(&mutex->mutex);
		free(Object);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_EVENT)
	{
		WINPR_EVENT* event;
		event = (WINPR_EVENT*) Object;

		if (!event->bAttached)
		{
			if (event->pipe_fd[0] != -1)
			{
				close(event->pipe_fd[0]);
				event->pipe_fd[0] = -1;
			}

			if (event->pipe_fd[1] != -1)
			{
				close(event->pipe_fd[1]);
				event->pipe_fd[1] = -1;
			}
		}

		free(Object);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_SEMAPHORE)
	{
		WINPR_SEMAPHORE* semaphore;
		semaphore = (WINPR_SEMAPHORE*) Object;
#ifdef WINPR_PIPE_SEMAPHORE

		if (semaphore->pipe_fd[0] != -1)
		{
			close(semaphore->pipe_fd[0]);
			semaphore->pipe_fd[0] = -1;

			if (semaphore->pipe_fd[1] != -1)
			{
				close(semaphore->pipe_fd[1]);
				semaphore->pipe_fd[1] = -1;
			}
		}

#else
#if defined __APPLE__
		semaphore_destroy(mach_task_self(), *((winpr_sem_t*) semaphore->sem));
#else
		sem_destroy((winpr_sem_t*) semaphore->sem);
#endif
#endif
		free(Object);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_TIMER)
	{
		WINPR_TIMER* timer;
		timer = (WINPR_TIMER*) Object;
#ifdef __linux__

		if (timer->fd != -1)
			close(timer->fd);

#endif
		free(Object);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_ANONYMOUS_PIPE)
	{
		WINPR_PIPE* pipe;
		pipe = (WINPR_PIPE*) Object;

		if (pipe->fd != -1)
		{
			close(pipe->fd);
		}

		free(Object);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_NAMED_PIPE)
	{
		WINPR_NAMED_PIPE* pNamedPipe = (WINPR_NAMED_PIPE*) Object;

		if (pNamedPipe->clientfd != -1)
		{
			//WLOG_DBG(TAG, "closing clientfd %d", pNamedPipe->clientfd);
			close(pNamedPipe->clientfd);
		}

		if (pNamedPipe->serverfd != -1)
		{
			//WLOG_DBG(TAG, "closing serverfd %d", pNamedPipe->serverfd);
			close(pNamedPipe->serverfd);
		}

		if (pNamedPipe->pfnUnrefNamedPipe)
			pNamedPipe->pfnUnrefNamedPipe(pNamedPipe);

		free((void*)pNamedPipe->lpFileName);
		free((void*)pNamedPipe->lpFilePath);
		free((void*)pNamedPipe->name);
		free(pNamedPipe);
		return TRUE;
	}
	else if (Type == HANDLE_TYPE_ACCESS_TOKEN)
	{
		WINPR_ACCESS_TOKEN* token;
		token = (WINPR_ACCESS_TOKEN*) Object;

		if (token->Username)
			free(token->Username);

		if (token->Domain)
			free(token->Domain);

		free(token);
		return TRUE;
	}

	return FALSE;
}
/**
 * Map pages starting at @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of
 * VM_PROT_READ.
 *
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 * @param[out] result The in-process address at which the pages were mapped.
 * @param[out] result_length The total size, in bytes, of the mapped pages.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 *
 * @note
 * This code previously used vm_remap() to perform atomic remapping of process memory. However, this appeared
 * to trigger a kernel bug (and resulting panic) on iOS 6.0 through 6.1.2, possibly fixed in 6.1.3. Note that
 * no stable release of PLCrashReporter shipped with the vm_remap() code.
 *
 * Investigation of the failure seems to show an over-release of the target vm_map and backing vm_object, leading to
 * NULL dereference, invalid memory references, and in some cases, deadlocks that result in watchdog timeouts.
 *
 * In one example case, the crash occurs in update_first_free_ll() as a NULL dereference of the vm_map_entry_t parameter.
 * Analysis of the limited reports shows that this is called via vm_map_store_update_first_free(). No backtrace is
 * available from the kernel panics, but analyzing the register state demonstrates:
 * - A reference to vm_map_store_update_first_free() remains in the link register.
 * - Of the following callers, one can be eliminated by register state:
 *     - vm_map_enter - not possible, r3 should be equal to r0
 *     - vm_map_clip_start - possible
 *     - vm_map_clip_unnest - possible
 *     - vm_map_clip_end - possible
 *
 * In the other panic seen in vm_object_reap_pages(), a value of 0x8008 is loaded and deferenced from the next pointer
 * of an element within the vm_object's resident page queue (object->memq).
 *
 * Unfortunately, our ability to investigate has been extremely constrained by the following issues;
 * - The panic is not easily or reliably reproducible
 * - Apple's does not support iOS kernel debugging
 * - There is no support for jailbreak kernel debugging against iOS 6.x devices at the time of writing.
 *
 * The work-around used here is to split the vm_remap() into distinct calls to mach_make_memory_entry_64() and
 * vm_map(); this follows a largely distinct code path from vm_remap(). In testing by a large-scale user of PLCrashReporter,
 * they were no longer able to reproduce the issue with this fix in place. Additionally, they've not been able to reproduce
 * the issue on 6.1.3 devices, or had any reports of the issue occuring on 6.1.3 devices.
 *
 * The mach_make_memory_entry_64() API may not actually return an entry for the full requested length; this requires
 * that we loop through the full range, requesting an entry for the remaining unallocated pages, and then mapping
 * the pages in question. Since this requires multiple calls to vm_map(), we pre-allocate a contigious range of pages
 * for the target mappings into which we'll insert (via overwrite) our own mappings.
 *
 * @note
 * As a work-around for bugs in Apple's Mach-O/dyld implementation, we provide the @a require_full flag; if false,
 * a successful mapping that is smaller than the requested range may be made, and will not return an error. This is necessary
 * to allow our callers to work around bugs in update_dyld_shared_cache(1), which writes out a larger Mach-O VM segment
 * size value than is actually available and mappable. See the plcrash_async_macho_map_segment() API documentation for
 * more details. This bug has been reported to Apple as rdar://13707406.
 */
static plcrash_error_t plcrash_async_mobject_remap_pages_workaround (mach_port_t task,
                                                                     pl_vm_address_t task_addr,
                                                                     pl_vm_size_t length,
                                                                     bool require_full,
                                                                     pl_vm_address_t *result,
                                                                     pl_vm_size_t *result_length)
{
    kern_return_t kt;

    /* Compute the total required page size. */
    pl_vm_address_t base_addr = mach_vm_trunc_page(task_addr);
    pl_vm_size_t total_size = mach_vm_round_page(length + (task_addr - base_addr));
    
    /*
     * If short mappings are permitted, determine the actual mappable size of the target range. Due
     * to rdar://13707406 (update_dyld_shared_cache appears to write invalid LINKEDIT vmsize), an
     * LC_SEGMENT-reported VM size may be far larger than the actual mapped pages. This would result
     * in us making large (eg, 36MB) allocations in cases where the mappable range is actually much
     * smaller, which can trigger out-of-memory conditions on smaller devices.
     */
    if (!require_full) {
        pl_vm_size_t verified_size = 0;
        
        while (verified_size < total_size) {            
            memory_object_size_t entry_length = total_size - verified_size;
            mach_port_t mem_handle;
            
            /* Fetch an entry reference */
            kt = mach_make_memory_entry_64(task, &entry_length, base_addr + verified_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
            if (kt != KERN_SUCCESS) {
                /* Once we hit an unmappable page, break */
                break;
            }
            
            /* Drop the reference */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }

            /* Note the size */
            verified_size += entry_length;
        }

        /* No valid page found at the task_addr */
        if (verified_size == 0) {
            PLCF_DEBUG("No mappable pages found at 0x%" PRIx64, (uint64_t) task_addr);
            return PLCRASH_ENOMEM;
        }

        /* Reduce the total size to the verified size */
        if (verified_size < total_size)
            total_size = verified_size;
    }

    /*
     * Set aside a memory range large enough for the total requested number of pages. Ideally the kernel
     * will lazy-allocate the backing physical pages so that we don't waste actual memory on this
     * pre-emptive page range reservation.
     */
    pl_vm_address_t mapping_addr = 0x0;
    pl_vm_size_t mapped_size = 0;
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#else
    kt = vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#endif

    if (kt != KERN_SUCCESS) {
        PLCF_DEBUG("Failed to allocate a target page range for the page remapping: %d", kt);
        return PLCRASH_EINTERNAL;
    }

    /* Map the source pages into the allocated region, overwriting the existing page mappings */
    while (mapped_size < total_size) {
        /* Create a reference to the target pages. The returned entry may be smaller than the total length. */
        memory_object_size_t entry_length = total_size - mapped_size;
        mach_port_t mem_handle;
        kt = mach_make_memory_entry_64(task, &entry_length, base_addr + mapped_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
        if (kt != KERN_SUCCESS) {            
            /* No pages are found at the target. When validating the total length above, we already verified the
             * availability of the requested pages; if they've now disappeared, we can treat it as an error,
             * even if !require_full was specified */
            PLCF_DEBUG("mach_make_memory_entry_64() failed: %d", kt);
            
            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }
            
            /* Return error */
            return PLCRASH_ENOMEM;
        }
        
        /* Map the pages into our local task, overwriting the allocation used to reserve the target space above. */
        pl_vm_address_t target_address = mapping_addr + mapped_size;
#ifdef PL_HAVE_MACH_VM
        kt = mach_vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#else
        kt = vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#endif /* !PL_HAVE_MACH_VM */
        
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("vm_map() failure: %d", kt);

            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }

            /* Drop the memory handle */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }
            
            return PLCRASH_ENOMEM;
        }

        /* Drop the memory handle */
        kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
        }
        
        /* Adjust the total mapping size */
        mapped_size += entry_length;
    }
    
    *result = mapping_addr;
    *result_length = mapped_size;

    return PLCRASH_ESUCCESS;
}
Beispiel #7
0
	mach_error_t
allocateBranchIsland(
		BranchIsland	**island,
		int				allocateHigh,
		void *originalFunctionAddress)
{
	assert( island );
	
	mach_error_t	err = err_none;
	
	if( allocateHigh ) {
		vm_size_t pageSize;
		err = host_page_size( mach_host_self(), &pageSize );
		if( !err ) {
			assert( sizeof( BranchIsland ) <= pageSize );
#if defined(__ppc__) || defined(__POWERPC__)
			vm_address_t first = 0xfeffffff;
			vm_address_t last = 0xfe000000 + pageSize;
#elif defined(__x86_64__)
			vm_address_t first = ((uint64_t)originalFunctionAddress & ~(uint64_t)(((uint64_t)1 << 31) - 1)) | ((uint64_t)1 << 31); // start in the middle of the page?
			vm_address_t last = 0x0;
#else
			vm_address_t first = 0xffc00000;
			vm_address_t last = 0xfffe0000;
#endif

			vm_address_t page = first;
			int allocated = 0;
			vm_map_t task_self = mach_task_self();
			
			while( !err && !allocated && page != last ) {

				err = vm_allocate( task_self, &page, pageSize, 0 );
				if( err == err_none )
					allocated = 1;
				else if( err == KERN_NO_SPACE ) {
#if defined(__x86_64__)
					page -= pageSize;
#else
					page += pageSize;
#endif
					err = err_none;
				}
			}
			if( allocated )
				*island = (BranchIsland*) page;
			else if( !allocated && !err )
				err = KERN_NO_SPACE;
		}
	} else {
		void *block = malloc( sizeof( BranchIsland ) );
		if( block )
			*island = block;
		else
			err = KERN_NO_SPACE;
	}
	if( !err )
		(**island).allocatedHigh = allocateHigh;
	
	return err;
}
/*
 * The meat of our exception handler. This thread waits for an exception
 * message, annotates the exception if needed, then forwards it to the
 * previously installed handler (which will likely terminate the process).
 */
static void
MachExceptionHandler()
{
    kern_return_t ret;
    MachExceptionParameters& current = sMachExceptionState.current;
    MachExceptionParameters& previous = sMachExceptionState.previous;

    // We use the simplest kind of 64-bit exception message here.
    ExceptionRequest64 request = {};
    request.header.msgh_local_port = current.port;
    request.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(request));
    ret = mach_msg(&request.header, MACH_RCV_MSG, 0, request.header.msgh_size,
                   current.port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);

    // Restore the previous handler. We're going to forward to it
    // anyway, and if we crash while doing so we don't want to hang.
    task_set_exception_ports(mach_task_self(), previous.mask, previous.port,
                             previous.behavior, previous.flavor);

    // If we failed even receiving the message, just give up.
    if (ret != MACH_MSG_SUCCESS)
        MOZ_CRASH("MachExceptionHandler: mach_msg failed to receive a message!");

    // Terminate the thread if we're shutting down.
    if (request.header.msgh_id == sIDQuit)
        return;

    // The only other valid message ID is the one associated with the
    // EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES behavior we chose.
    if (request.header.msgh_id != sIDRequest64)
        MOZ_CRASH("MachExceptionHandler: Unexpected Message ID!");

    // Make sure we can understand the exception we received.
    if (request.exception != EXC_BAD_ACCESS || request.code_count != 2)
        MOZ_CRASH("MachExceptionHandler: Unexpected exception type!");

    // Get the address that the offending code tried to access.
    uintptr_t address = uintptr_t(request.code[1]);

    // If the faulting address is inside one of our protected regions, we
    // want to annotate the crash to make it stand out from the crowd.
    if (sProtectedRegions.isProtected(address)) {
        ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
        MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
    }

    // Forward to the previous handler which may be a debugger, the unix
    // signal handler, the crash reporter or something else entirely.
    if (previous.port != MACH_PORT_NULL) {
        mach_msg_type_number_t stateCount;
        thread_state_data_t state;
        if ((uint32_t(previous.behavior) & ~MACH_EXCEPTION_CODES) != EXCEPTION_DEFAULT) {
            // If the previous handler requested thread state, get it here.
            stateCount = THREAD_STATE_MAX;
            ret = thread_get_state(request.thread.name, previous.flavor, state, &stateCount);
            if (ret != KERN_SUCCESS)
                MOZ_CRASH("MachExceptionHandler: Could not get the thread state to forward!");
        }

        // Depending on the behavior of the previous handler, the forwarded
        // exception message will have a different set of fields.
        // Of particular note is that exception handlers that lack
        // MACH_EXCEPTION_CODES will get 32-bit fields even on 64-bit
        // systems. It appears that OSX simply truncates these fields.
        ExceptionRequestUnion forward;
        switch (uint32_t(previous.behavior)) {
          case EXCEPTION_DEFAULT:
             CopyExceptionRequest32(request, forward.r32);
             break;
          case EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES:
             CopyExceptionRequest64(request, forward.r64);
             break;
          case EXCEPTION_STATE:
             CopyExceptionRequestState32(request, forward.rs32,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE | MACH_EXCEPTION_CODES:
             CopyExceptionRequestState64(request, forward.rs64,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY:
             CopyExceptionRequestStateIdentity32(request, forward.rsi32,
                                                 previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES:
             CopyExceptionRequestStateIdentity64(request, forward.rsi64,
                                                 previous.flavor, stateCount, state);
             break;
          default:
             MOZ_CRASH("MachExceptionHandler: Unknown previous handler behavior!");
        }

        // Forward the generated message to the old port. The local and remote
        // port fields *and their rights* are swapped on arrival, so we need to
        // swap them back first.
        forward.header.msgh_bits = (request.header.msgh_bits & ~MACH_MSGH_BITS_PORTS_MASK) |
            MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(request.header.msgh_bits),
                           MACH_MSGH_BITS_REMOTE(request.header.msgh_bits));
        forward.header.msgh_local_port = forward.header.msgh_remote_port;
        forward.header.msgh_remote_port = previous.port;
        ret = mach_msg(&forward.header, MACH_SEND_MSG, forward.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the previous handler!");
    } else {
        // There was no previous task-level exception handler, so defer to the
        // host level one instead. We set the return code to KERN_FAILURE to
        // indicate that we did not handle the exception.
        // The reply message ID is always the request ID + 100.
        ExceptionReply reply = {};
        reply.header.msgh_bits =
            MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.header.msgh_bits), 0);
        reply.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(reply));
        reply.header.msgh_remote_port = request.header.msgh_remote_port;
        reply.header.msgh_local_port = MACH_PORT_NULL;
        reply.header.msgh_id = request.header.msgh_id + 100;
        reply.NDR = request.NDR;
        reply.RetCode = KERN_FAILURE;
        ret = mach_msg(&reply.header, MACH_SEND_MSG, reply.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the host level!");
    }
}
Beispiel #9
0
/* Arrange for hurd_cancel to be called on RPC's thread if OBJECT gets notified
   that any of the things in COND have happened to PORT.  RPC should be an
   rpc on OBJECT.  */
error_t
ports_interrupt_rpc_on_notification (void *object,
				     struct rpc_info *rpc,
				     mach_port_t port, mach_msg_id_t what)
{
  int req_notify;
  struct ports_notify *pn;
  struct rpc_notify *new_req, *req;
  struct port_info *pi = object;

  pthread_mutex_lock (&_ports_lock);

  if (! MACH_PORT_VALID (port))
    /* PORT is already dead or bogus, so interrupt the rpc immediately.  */
    {
      hurd_thread_cancel (rpc->thread);
      pthread_mutex_unlock (&_ports_lock);
      return 0;
    }

  new_req = _ports_free_rpc_notifies;
  if (new_req)
    /* We got a req off the free list.  */
    _ports_free_rpc_notifies = new_req->next;
  else
    /* No free notify structs, allocate one; it's expected that 99% of the
       time we'll add a new structure, so we malloc while we don't have the
       lock, and free it if we're wrong.  */
    {
      pthread_mutex_unlock (&_ports_lock); /* Don't hold the lock during malloc. */
      new_req = malloc (sizeof (struct rpc_notify));
      if (! new_req)
	return ENOMEM;
      pthread_mutex_lock (&_ports_lock);
    }

  /* Find any existing entry for PORT/WHAT.  */
  for (pn = _ports_notifications; pn; pn = pn->next)
    if (pn->port == port && pn->what == what)
      break;

  if (! pn)
    /* A notification on a new port.  */
    {
      pn = _ports_free_ports_notifies;

      if (pn)
	_ports_free_ports_notifies = pn->next;
      else
	{
	  pn = malloc (sizeof (struct ports_notify));
	  if (! pn)
	    /* sigh.  Free what we've alloced and return.  */
	    {
	      new_req->next = _ports_free_rpc_notifies;
	      _ports_free_rpc_notifies = new_req;
	      pthread_mutex_unlock (&_ports_lock);
	      return ENOMEM;
	    }
	}

      pn->reqs = 0;
      pn->port = port;
      pn->what = what;
      pn->pending = 0;
      pthread_mutex_init (&pn->lock, NULL);

      pn->next = _ports_notifications;
      pn->prevp = &_ports_notifications;
      if (_ports_notifications)
	_ports_notifications->prevp = &pn->next;
      _ports_notifications = pn;
    }

  for (req = rpc->notifies; req; req = req->next)
    if (req->notify == pn)
      break;

  if (req)
    /* REQ is already pending for PORT/WHAT on RPC, so free NEW_REQ.  */
    {
      new_req->next = _ports_free_rpc_notifies;
      _ports_free_rpc_notifies = new_req;
    }
  else
    /* Add a new request for PORT/WHAT on RPC.  */
    {
      req = new_req;

      req->rpc = rpc;
      req->notify = pn;
      req->pending = 0;

      req->next_req = pn->reqs;
      req->prev_req_p = &pn->reqs;
      if (pn->reqs)
	pn->reqs->prev_req_p = &req->next_req;
      pn->reqs = req;

      req->next = rpc->notifies;
      rpc->notifies = req;
    }

  /* Make sure that this request results in an interrupt.  */
  req->pending++;

  /* Find out whether we should request a new notification (after we release
     _PORTS_LOCK) -- PN may be new, or left over after a previous
     notification (in which case our new request is likely to trigger an
     immediate notification).  */
  req_notify = !pn->pending;
  if (req_notify)
    pthread_mutex_lock (&pn->lock);

  pthread_mutex_unlock (&_ports_lock);

  if (req_notify)
    {
      mach_port_t old;
      error_t err =
	mach_port_request_notification (mach_task_self (), port,
					what, 1, pi->port_right,
					MACH_MSG_TYPE_MAKE_SEND_ONCE, &old);

      if (! err && old != MACH_PORT_NULL)
	mach_port_deallocate (mach_task_self (), old);

      pn->pending = 1;
      pthread_mutex_unlock (&pn->lock);

      return err;
    }
  else
    return 0;
}
Beispiel #10
0
void
default_pager_initialize(
	mach_port_t host_port)
{
	kern_return_t		kr;
	static char		here[] = "default_pager_initialize";

	/* 
	 * Initial thread and task ports.
	 */
	default_pager_self = mach_task_self();
	default_pager_default_thread = mach_thread_self();

	PRINTF_LOCK_INIT();

	/*
	 * Make ourselves unswappable.
	 */
	kr = task_swappable(default_pager_host_port, default_pager_self, FALSE);
	if (kr != KERN_SUCCESS)
		dprintf(("task_swappable failed 0x%x %s\n",
			 kr, mach_error_string(kr)));

	/*
	 * Exported DMM port.
	 */
	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_RECEIVE,
				&default_pager_default_port);
	if (kr != KERN_SUCCESS)
		Panic("default port");

	/*
	 * Port sets.
	 */
	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_internal_set);
	if (kr != KERN_SUCCESS)
		Panic("internal set");

	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_external_set);
	if (kr != KERN_SUCCESS)
		Panic("external set");

	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_default_port))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x %s\n",
			 kr, mach_error_string(kr)));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		int clsize;
		memory_object_t DMM;

		/* get a send right for vm_set_default_memory_manager */
		kr = mach_port_insert_right(default_pager_self,
					    default_pager_default_port,
					    default_pager_default_port,
					    MACH_MSG_TYPE_MAKE_SEND);
		DMM = default_pager_default_port;
		clsize = (vm_page_size << vstruct_def_clshift);

		kr = host_default_memory_manager(host_port, &DMM, clsize);
		if ((kr != KERN_SUCCESS) || (DMM != MACH_PORT_NULL))
			Panic("default memory manager");

		/* release the extra send right */
		(void) mach_port_mod_refs(default_pager_self,
					  default_pager_default_port,
					  MACH_PORT_RIGHT_SEND,
					  -1);
	}
#endif	/* USER_PAGER */

	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_default_set);
	if (kr != KERN_SUCCESS)
		Panic("default set");

	kr = mach_port_move_member(default_pager_self,
				   default_pager_default_port,
				   default_pager_default_set);
	if (kr != KERN_SUCCESS)
		Panic("set up default");

	/*
	 * Arrange for wiring privileges.
	 */
	wire_setup(host_port);

	/*
	 * Find out how many CPUs we have, to determine the number
	 * of threads to create.
	 */
	if (default_pager_internal_count == 0) {
		host_basic_info_data_t h_info;
		mach_msg_type_number_t h_info_count;

		h_info_count = HOST_BASIC_INFO_COUNT;
		(void) host_info(host_port, HOST_BASIC_INFO,
				(host_info_t) &h_info, &h_info_count);

		/*
		 * Random computation to get more parallelism on
		 * multiprocessors.
		 */
		default_pager_internal_count = ((h_info.avail_cpus > 32)
						? 32
						: h_info.avail_cpus) / 4 + 3;
	}

	/*
	 * Vm variables.
	 */
	vm_page_mask = vm_page_size - 1;
	vm_page_shift = log2(vm_page_size);

	/*
	 * List of all vstructs.
	 */
	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	queue_init(&vstruct_list.vsl_leak_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();
}
Beispiel #11
0
/*
 * Initialize and Run the default pager
 */
void
default_pager(void)
{
	int			i, id;
	static char		here[] = "default_pager";
	mach_msg_options_t 	server_options;
	default_pager_thread_t	dpt;
	default_pager_thread_t	**dpt_array;

	default_pager_thread_privileges();

	/*
	 * Wire down code, data, stack
	 */
	wire_all_memory();

	/*
	 * Give me space for the thread array and zero it.
	 */
	i = default_pager_internal_count + default_pager_external_count + 1;
	dpt_array = (default_pager_thread_t **)
	    kalloc(i * sizeof(default_pager_thread_t *));
	memset(dpt_array, 0, i * sizeof(default_pager_thread_t *));

	/* Setup my thread structure.  */
	id = 0;
	dpt.dpt_thread = cthread_self();
	dpt.dpt_buffer = 0;
	dpt.dpt_internal = FALSE;
	dpt.dpt_id = id++;
	dpt.dpt_initialized_p = TRUE;
	cthread_set_data(cthread_self(), (char *) &dpt);
	dpt_array[0] = &dpt;

	/*
	 * Now we create the threads that will actually
	 * manage objects.
	 */

	for (i = 0; i < default_pager_internal_count; i++) {
		dpt_array[id] = start_default_pager_thread(id, TRUE);
		id++;
	 }

	for (i = 0; i < default_pager_external_count; i++) {
		dpt_array[id] = start_default_pager_thread(id, FALSE);
		id++;
	}

	/* Is everybody ready?  */
	for (i = 0; i < id; i++)
	    while (!dpt_array[i])
		cthread_yield();

	/* Tell the bootstrap process to go ahead.  */
	bootstrap_completed(bootstrap_port, mach_task_self());

	/* Start servicing requests.  */
	server_options = MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_SEQNO);
	for (;;) {
		mach_msg_server(default_pager_demux_default,
				default_pager_msg_size,
				default_pager_default_set,
				server_options);
		Panic("default server");
	}
}
/* Implement the object termination call from the kernel as described
   in <mach/memory_object.defs>. */
kern_return_t
_pager_seqnos_memory_object_terminate (mach_port_t object, 
				       mach_port_seqno_t seqno,
				       mach_port_t control,
				       mach_port_t name)
{
  struct pager *p;
  
  p = ports_lookup_port (0, object, _pager_class);
  if (!p)
    return EOPNOTSUPP;

  mutex_lock (&p->interlock);
  _pager_wait_for_seqno (p, seqno);
  
  if (control != p->memobjcntl)
    {
      printf ("incg terminate: wrong control port");
      goto out;
    }
  if (name != p->memobjname)
    {
      printf ("incg terminate: wrong name port");
      goto out;
    }

  while (p->noterm)
    {
      p->termwaiting = 1;
      condition_wait (&p->wakeup, &p->interlock);
    }

  /* Destry the ports we received; mark that in P so that it doesn't bother
     doing it again. */
  mach_port_destroy (mach_task_self (), control);
  mach_port_destroy (mach_task_self (), name);
  p->memobjcntl = p->memobjname = MACH_PORT_NULL;

  _pager_free_structure (p);

#ifdef KERNEL_INIT_RACE
  if (p->init_head)
    {
      struct pending_init *i = p->init_head;
      p->init_head = i->next;
      if (!i->next)
	p->init_tail = 0;
      p->memobjcntl = i->control;
      p->memobjname = i->name;
      memory_object_ready (i->control, p->may_cache, p->copy_strategy);
      p->pager_state = NORMAL;
      free (i);
    }
#endif

 out:
  _pager_release_seqno (p, seqno);
  mutex_unlock (&p->interlock);
  ports_port_deref (p);

  return 0;
}
Beispiel #13
0
/*++
Function :
    SEHInitializeMachExceptions 

    Initialize all SEH-related stuff related to mach exceptions

    (no parameters)

Return value :
    TRUE  if SEH support initialization succeeded
    FALSE otherwise
--*/
BOOL SEHInitializeMachExceptions (void)
{
#if !DISABLE_EXCEPTIONS
    kern_return_t MachRet;
    int CreateRet;
    pthread_t exception_thread;

    // Allocate a mach port that will listen in on exceptions
    MachRet = mach_port_allocate(mach_task_self(),
                                 MACH_PORT_RIGHT_RECEIVE,
                                 &s_ExceptionPort);

    if (MachRet != KERN_SUCCESS)
    {
        UTIL_SetLastErrorFromMach(MachRet);
        return FALSE;
    }

    // Insert the send right into the task
    MachRet = mach_port_insert_right(mach_task_self(),
                                     s_ExceptionPort,
                                     s_ExceptionPort,
                                     MACH_MSG_TYPE_MAKE_SEND);

    if (MachRet != KERN_SUCCESS)
    {
        UTIL_SetLastErrorFromMach(MachRet);
        return FALSE;
    }

    MachRet = task_set_exception_ports(mach_task_self(),
                                       PAL_EXC_MASK ,
                                       s_ExceptionPort,
                                       EXCEPTION_DEFAULT,
                                       PPC_THREAD_STATE);

    if (MachRet != KERN_SUCCESS)
    {
        UTIL_SetLastErrorFromMach(MachRet);
        return FALSE;
    }

    // Create the thread that will listen to the exception for all threads
    CreateRet = pthread_create(&exception_thread, NULL, SEHExceptionThread, NULL);

    if ( CreateRet != 0 )
    {
        ERROR("pthread_create failed, error is %d (%s)\n", CreateRet, strerror(CreateRet));
        SetLastError(ERROR_NOT_ENOUGH_MEMORY);
        return FALSE;
    }
#endif // !DISABLE_EXCEPTIONS

    // Tell the system to ignore SIGPIPE signals rather than use the default
    // behavior of terminating the process. Ignoring SIGPIPE will cause
    // calls that would otherwise raise that signal to return EPIPE instead.
    // The PAL expects EPIPE from those functions and won't handle a
    // SIGPIPE signal.
    signal(SIGPIPE, SIG_IGN);

    // We're done
    return TRUE;
}
	mach_error_t
mach_inject(
		const mach_inject_entry	threadEntry,
		const void				*paramBlock,
		size_t					paramSize,
		pid_t					targetProcess,
		vm_size_t				stackSize ) {
	;//assertCodePtr( threadEntry );
	;//assertPtrIfNotNull( paramBlock );
	;//assertPositive( targetProcess );
	;//assertIsTrue( stackSize == 0 || stackSize > 1024 );
	
	//	Find the image.
	const void		*image;
	unsigned long	imageSize;
	mach_error_t	err = machImageForPointer( threadEntry, &image, &imageSize );
	
	//	Initialize stackSize to default if requested.
	if( stackSize == 0 )
		/** @bug We only want an 8K default, fix the plop-in-the-middle code below. */
		stackSize = 16 * 1024;
	
	//	Convert PID to Mach Task ref.
	mach_port_t	remoteTask = 0;
	if( !err )
		err = task_for_pid( mach_task_self(), targetProcess, &remoteTask );
	
	/** @todo	Would be nice to just allocate one block for both the remote stack
				*and* the remoteCode (including the parameter data block once that's
				written.
	*/
	
	//	Allocate the remoteStack.
	vm_address_t remoteStack = 0;
	if( !err )
		err = vm_allocate( remoteTask, &remoteStack, stackSize, 1 );
	
	//	Allocate the code.
	vm_address_t remoteCode = 0;
	if( !err )
		err = vm_allocate( remoteTask, &remoteCode, imageSize, 1 );
	if( !err ) {
		ASSERT_CAST( pointer_t, image );
		err = vm_write( remoteTask, remoteCode, (pointer_t) image, imageSize );
	}
	
	//	Allocate the paramBlock if specified.
	vm_address_t remoteParamBlock = 0;
	if( !err && paramBlock != NULL && paramSize ) {
		err = vm_allocate( remoteTask, &remoteParamBlock, paramSize, 1 );
		if( !err ) {
			ASSERT_CAST( pointer_t, paramBlock );
			err = vm_write( remoteTask, remoteParamBlock, (pointer_t) paramBlock, paramSize );
		}
	}
	
	//	Calculate offsets.
	ptrdiff_t	threadEntryOffset, imageOffset;
	if( !err ) {
		;//assertIsWithinRange( threadEntry, image, image+imageSize );
		ASSERT_CAST( void*, threadEntry );
		threadEntryOffset = ((void*) threadEntry) - image;
		
		ASSERT_CAST( void*, remoteCode );
		imageOffset = ((void*) remoteCode) - image;
	}
	
	//	Allocate the thread.
	thread_act_t remoteThread;
	if( !err ) {
		ppc_thread_state_t remoteThreadState;
		
		/** @bug Stack math should be more sophisticated than this (ala redzone). */
		remoteStack += stackSize / 2;
		
		bzero( &remoteThreadState, sizeof(remoteThreadState) );
		
		ASSERT_CAST( unsigned int, remoteCode );
		remoteThreadState.srr0 = (unsigned int) remoteCode;
		remoteThreadState.srr0 += threadEntryOffset;
		assert( remoteThreadState.srr0 < (remoteCode + imageSize) );
		
		ASSERT_CAST( unsigned int, remoteStack );
		remoteThreadState.r1 = (unsigned int) remoteStack;
		
		ASSERT_CAST( unsigned int, imageOffset );
		remoteThreadState.r3 = (unsigned int) imageOffset;
		
		ASSERT_CAST( unsigned int, remoteParamBlock );
		remoteThreadState.r4 = (unsigned int) remoteParamBlock;
		
		ASSERT_CAST( unsigned int, paramSize );
		remoteThreadState.r5 = (unsigned int) paramSize;
		
		ASSERT_CAST( unsigned int, 0xDEADBEEF );
		remoteThreadState.lr = (unsigned int) 0xDEADBEEF;
		
		//printf( "remoteCode start: %p\n", (void*) remoteCode );
		//printf( "remoteCode size: %ld\n", imageSize );
		//printf( "remoteCode pc: %p\n", (void*) remoteThreadState.srr0 );
		//printf( "remoteCode end: %p\n", (void*) (((char*)remoteCode)+imageSize) );
		fflush(0);
		
		err = thread_create_running( remoteTask, PPC_THREAD_STATE,
				(thread_state_t) &remoteThreadState, PPC_THREAD_STATE_COUNT,
				&remoteThread );
	}
Beispiel #15
0
void Threading::Semaphore::Reset()
{
	MACH_CHECK(semaphore_destroy(mach_task_self(), (semaphore_t) m_sema));
	MACH_CHECK(semaphore_create(mach_task_self(), (semaphore_t *) &m_sema, SYNC_POLICY_FIFO, 0));
	__atomic_store_n(&m_counter, 0, __ATOMIC_SEQ_CST);
}
Beispiel #16
0
struct i_mem_object *
imo_create(
	struct inode	*inode,
	boolean_t	allocate_port)
{
	struct i_mem_object	*imo;
	kern_return_t		kr;

	imo = (struct i_mem_object *)
		kmalloc(sizeof (struct i_mem_object), GFP_KERNEL);
	if (imo == NULL)
		return MEMORY_OBJECT_NULL;

	if (inode->i_mem_object != NULL) {
		/*
		 * Somebody else beat us...
		 */
		kfree(imo);
		return inode->i_mem_object;
	}

	inode->i_count++;
	inode->i_mem_object = imo;

	imo->imo_mem_obj = MACH_PORT_NULL;
	imo->imo_mem_obj_control = MACH_PORT_NULL;
	imo->imo_refcnt = 0;
	imo->imo_cacheable = TRUE;
	imo->imo_attrchange = FALSE;
	imo->imo_attrchange_wait = NULL;
	imo->imo_copy_strategy = MEMORY_OBJECT_COPY_DELAY;
	imo->imo_errors = 0;
	imo->imo_inode = inode;
	imo->imo_urefs = 0;

	if (allocate_port) {
		/*
		 * Allocate a memory object port
		 */
		kr = serv_port_allocate_name(&imo->imo_mem_obj, imo);
		if (kr != KERN_SUCCESS) {
			panic("imo_create: can't allocate port");
		}

		/*
		 * Get a send right for this port
		 */
		kr = mach_port_insert_right(mach_task_self(),
					    imo->imo_mem_obj, imo->imo_mem_obj,
					    MACH_MSG_TYPE_MAKE_SEND);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(0, kr,
				    ("imo_create: mach_port_insert_right(0x%x)",
				     imo->imo_mem_obj));
			panic("imo_create: can't allocate send right");
		}

		/*
		 * Add the new memory_object port to the port set
		 */
		kr = mach_port_move_member(mach_task_self(),
					   imo->imo_mem_obj,
					   inode_pager_port_set);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(0, kr,
				    ("imo_create: mach_port_move_member(0x%x)",
				     imo->imo_mem_obj));
			panic("imo_create: can't add object to port set");
		}
	}

	return imo;
}
Beispiel #17
0
static inline int sem_init(sem_t* sem, int pshared, unsigned int value) {
    return semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
}
Beispiel #18
0
void *
inode_pager_thread(
	void	*arg)
{
	struct server_thread_priv_data	priv_data;
	kern_return_t		kr;
	mach_msg_header_t	*in_msg;
	mach_msg_header_t	*out_msg;

	cthread_set_name(cthread_self(), "inode pager thread");
	server_thread_set_priv_data(cthread_self(), &priv_data);
	/*
	 * The inode pager runs in its own Linux task...
	 */
	priv_data.current_task = &inode_pager_task;
#if 0
	inode_pager_task.osfmach3.thread->active_on_cthread = cthread_self();
#endif
	/*
	 * Allow this thread to preempt preemptible threads, to solve deadlocks
	 * where the server touches some data that is backed by the inode
	 * pager. See user_copy.c.
	 */
	priv_data.preemptive = TRUE;

	uniproc_enter();

	kr = vm_allocate(mach_task_self(),
			 (vm_offset_t *) &in_msg,
			 INODE_PAGER_MESSAGE_SIZE,
			 TRUE);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(0, kr, ("inode_pager_thread: vm_allocate"));
		panic("inode_pager_thread: can't allocate in_msg");
	}
	kr = vm_allocate(mach_task_self(),
			 (vm_offset_t *) &out_msg,
			 INODE_PAGER_MESSAGE_SIZE,
			 TRUE);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(0, kr, ("inode_pager_thread: vm_allocate"));
		panic("inode_pager_thread: can't allocate out_msg");
	}

	inode_pager_task.state = TASK_INTERRUPTIBLE;
	server_thread_blocking(FALSE);
	for (;;) {
		kr = mach_msg(in_msg, MACH_RCV_MSG, 0, INODE_PAGER_MESSAGE_SIZE,
			      inode_pager_port_set, MACH_MSG_TIMEOUT_NONE,
			      MACH_PORT_NULL);
		server_thread_unblocking(FALSE);	/* can preempt ! */
		inode_pager_task.state = TASK_RUNNING;
		if (kr != MACH_MSG_SUCCESS) {
			MACH3_DEBUG(1, kr,
				    ("inode_pager_thread: mach_msg(RCV)"));
			server_thread_blocking(FALSE);
			continue;
		}

		if (!inode_object_server(in_msg, out_msg)) {
			printk("inode_pager_thread: invalid msg id 0x%x\n",
			       in_msg->msgh_id);
		}

		inode_pager_task.state = TASK_INTERRUPTIBLE;
		server_thread_blocking(FALSE);
		
		if (MACH_PORT_VALID(out_msg->msgh_remote_port)) {
			kr = mach_msg(out_msg, MACH_SEND_MSG,
				      out_msg->msgh_size, 0, MACH_PORT_NULL,
				      MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
			if (kr != MACH_MSG_SUCCESS) {
				MACH3_DEBUG(1, kr, ("inode_pager_thread: mach_msg(SEND)"));
			}
		}
	}
}
Beispiel #19
0
    mach_error_t
mach_override_ptr(
	void *originalFunctionAddress,
    const void *overrideFunctionAddress,
    void **originalFunctionReentryIsland )
{
	assert( originalFunctionAddress );
	assert( overrideFunctionAddress );
	
	// this addresses overriding such functions as AudioOutputUnitStart()
	// test with modified DefaultOutputUnit project
#if defined(__x86_64__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp qword near [rip+0x????????]
            originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1));
        else break;
    }
#elif defined(__i386__)
    for(;;){
        if(*(uint16_t*)originalFunctionAddress==0x25FF)    // jmp *0x????????
            originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1);
        else break;
    }
#endif
#ifdef DEBUG_DISASM
  {
    fprintf(stderr, "Replacing function at %p\n", originalFunctionAddress);
    fprintf(stderr, "First 16 bytes of the function: ");
    unsigned char *orig = (unsigned char *)originalFunctionAddress;
    int i;
    for (i = 0; i < 16; i++) {
       fprintf(stderr, "%x ", (unsigned int) orig[i]);
    }
    fprintf(stderr, "\n");
    fprintf(stderr, 
            "To disassemble, save the following function as disas.c"
            " and run:\n  gcc -c disas.c && gobjdump -d disas.o\n"
            "The first 16 bytes of the original function will start"
            " after four nop instructions.\n");
    fprintf(stderr, "\nvoid foo() {\n  asm volatile(\"nop;nop;nop;nop;\");\n");
    int j = 0;
    for (j = 0; j < 2; j++) {
      fprintf(stderr, "  asm volatile(\".byte ");
      for (i = 8 * j; i < 8 * (j+1) - 1; i++) {
        fprintf(stderr, "0x%x, ", (unsigned int) orig[i]);
      }
      fprintf(stderr, "0x%x;\");\n", (unsigned int) orig[8 * (j+1) - 1]);
    }
    fprintf(stderr, "}\n\n");
  }
#endif

	long	*originalFunctionPtr = (long*) originalFunctionAddress;
	mach_error_t	err = err_none;
	
#if defined(__ppc__) || defined(__POWERPC__)
	//	Ensure first instruction isn't 'mfctr'.
	#define	kMFCTRMask			0xfc1fffff
	#define	kMFCTRInstruction	0x7c0903a6
	
	long	originalInstruction = *originalFunctionPtr;
	if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
		err = err_cannot_override;
#elif defined(__i386__) || defined(__x86_64__)
	int eatenCount = 0;
	int originalInstructionCount = 0;
	char originalInstructions[kOriginalInstructionsSize];
	uint8_t originalInstructionSizes[kOriginalInstructionsSize];
	uint64_t jumpRelativeInstruction = 0; // JMP

	Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, 
										&jumpRelativeInstruction, &eatenCount, 
										originalInstructions, &originalInstructionCount, 
										originalInstructionSizes );
#ifdef DEBUG_DISASM
  if (!overridePossible) fprintf(stderr, "overridePossible = false @%d\n", __LINE__);
#endif
	if (eatenCount > kOriginalInstructionsSize) {
#ifdef DEBUG_DISASM
		fprintf(stderr, "Too many instructions eaten\n");
#endif    
		overridePossible = false;
	}
	if (!overridePossible) err = err_cannot_override;
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
#endif
	
	//	Make the original function implementation writable.
	if( !err ) {
		err = vm_protect( mach_task_self(),
				(vm_address_t) originalFunctionPtr, 8, false,
				(VM_PROT_ALL | VM_PROT_COPY) );
		if( err )
			err = vm_protect( mach_task_self(),
					(vm_address_t) originalFunctionPtr, 8, false,
					(VM_PROT_DEFAULT | VM_PROT_COPY) );
	}
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	
	//	Allocate and target the escape island to the overriding function.
	BranchIsland	*escapeIsland = NULL;
	if( !err )	
		err = allocateBranchIsland( &escapeIsland, kAllocateHigh, originalFunctionAddress );
		if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	
#if defined(__ppc__) || defined(__POWERPC__)
	if( !err )
		err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
	
	//	Build the branch absolute instruction to the escape island.
	long	branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
	if( !err ) {
		long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
		branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
	}
#elif defined(__i386__) || defined(__x86_64__)
        if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);

	if( !err )
		err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
 
	if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
	// Build the jump relative instruction to the escape island
#endif


#if defined(__i386__) || defined(__x86_64__)
	if (!err) {
		uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5);
		addressOffset = OSSwapInt32(addressOffset);
		
		jumpRelativeInstruction |= 0xE900000000000000LL; 
		jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
		jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);		
	}
#endif
	
	//	Optionally allocate & return the reentry island. This may contain relocated
	//  jmp instructions and so has all the same addressing reachability requirements
	//  the escape island has to the original function, except the escape island is
	//  technically our original function.
	BranchIsland	*reentryIsland = NULL;
	if( !err && originalFunctionReentryIsland ) {
		err = allocateBranchIsland( &reentryIsland, kAllocateHigh, escapeIsland);
		if( !err )
			*originalFunctionReentryIsland = reentryIsland;
	}
	
#if defined(__ppc__) || defined(__POWERPC__)	
	//	Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instruction into the reentry island.
	//		o Target the reentry island at the 2nd instruction of the
	//		  original function.
	//	o Replace the original instruction with the branch absolute.
	if( !err ) {
		int escapeIslandEngaged = false;
		do {
			if( reentryIsland )
				err = setBranchIslandTarget( reentryIsland,
						(void*) (originalFunctionPtr+1), originalInstruction );
			if( !err ) {
				escapeIslandEngaged = CompareAndSwap( originalInstruction,
										branchAbsoluteInstruction,
										(UInt32*)originalFunctionPtr );
				if( !escapeIslandEngaged ) {
					//	Someone replaced the instruction out from under us,
					//	re-read the instruction, make sure it's still not
					//	'mfctr' and try again.
					originalInstruction = *originalFunctionPtr;
					if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
						err = err_cannot_override;
				}
			}
		} while( !err && !escapeIslandEngaged );
	}
#elif defined(__i386__) || defined(__x86_64__)
	// Atomically:
	//	o If the reentry island was allocated:
	//		o Insert the original instructions into the reentry island.
	//		o Target the reentry island at the first non-replaced 
	//        instruction of the original function.
	//	o Replace the original first instructions with the jump relative.
	//
	// Note that on i386, we do not support someone else changing the code under our feet
	if ( !err ) {
		fixupInstructions(originalFunctionPtr, reentryIsland, originalInstructions,
					originalInstructionCount, originalInstructionSizes );
	
		if( reentryIsland )
			err = setBranchIslandTarget_i386( reentryIsland,
										 (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
		// try making islands executable before planting the jmp
#if defined(__x86_64__) || defined(__i386__)
        if( !err )
            err = makeIslandExecutable(escapeIsland);
        if( !err && reentryIsland )
            err = makeIslandExecutable(reentryIsland);
#endif
		if ( !err )
			atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
	}
#endif
	
	//	Clean up on error.
	if( err ) {
		if( reentryIsland )
			freeBranchIsland( reentryIsland );
		if( escapeIsland )
			freeBranchIsland( escapeIsland );
	}

#ifdef DEBUG_DISASM
  {
    fprintf(stderr, "First 16 bytes of the function after slicing: ");
    unsigned char *orig = (unsigned char *)originalFunctionAddress;
    int i;
    for (i = 0; i < 16; i++) {
       fprintf(stderr, "%x ", (unsigned int) orig[i]);
    }
    fprintf(stderr, "\n");
  }
#endif
	return err;
}
Beispiel #20
0
kern_return_t
inode_object_data_return(
	memory_object_t		mem_obj,
	memory_object_control_t	mem_obj_control,
	vm_offset_t		offset,
	vm_offset_t		data,
	vm_size_t		length,
	boolean_t		dirty,
	boolean_t		kernel_copy)
{
	struct i_mem_object	*imo;
	kern_return_t		kr;
	struct file		file;
	struct vm_area_struct	*vma;
	struct inode		*inode;
	struct task_struct	*tsk;
	struct osfmach3_mach_task_struct *mach_task;
	struct mm_struct	*mm;
	int			i;
	int			result;
	vm_offset_t		orig_data;
	vm_size_t		orig_length;

#ifdef	INODE_PAGER_DEBUG
	if (inode_pager_debug) {
		printk("inode_object_data_return: obj 0x%x control 0x%x "
		       "offset 0x%x length 0x%x\n",
		       mem_obj, mem_obj_control, offset, length);
	}
#endif	/* INODE_PAGER_DEBUG */

	imo = inode_pager_check_request(mem_obj, mem_obj_control);

	/*
	 * Flush the data to the disk.
	 */
	vma = imo->imo_inode->i_mmap;
	ASSERT(vma);
	if (!vma) {
#ifdef INODE_PAGER_DEBUG
		printk("inode_pager: return with no map!\n");
		inode = imo->imo_inode;
		file.f_op = inode->i_op->default_file_ops;
		printk("  inode: %ld, dev: %x, write: %x\n", 
		       inode->i_ino, (unsigned)inode->i_dev, (unsigned)file.f_op->write);
#endif
		orig_data = data;
		orig_length = length;
	} else {
	mm = vma->vm_mm;
	ASSERT(mm);
	mach_task = mm->mm_mach_task;
	ASSERT(mach_task);
	for (i = 0; i < NR_TASKS; i++) {
		if (task[i] &&
		    task[i]->osfmach3.task == mach_task) {
			break;
		}
	}
	if (i == NR_TASKS) {
		panic("inode_object_data_return: can't locate target task\n");
	}
	tsk = task[i];
	ASSERT(tsk);
	ASSERT(tsk->mm == mm || tsk->mm == &init_mm);

	/* XXX take the identity of the task that did the mapping */
	current->uid = tsk->uid;
	current->euid = tsk->euid;
	current->suid = tsk->suid;
	current->fsuid = tsk->fsuid;
	current->gid = tsk->gid;
	current->egid = tsk->egid;
	current->sgid = tsk->sgid;
	current->fsgid = tsk->fsgid;
	for (i = 0; i < NGROUPS; i++)
		current->groups[i] = tsk->groups[i];

	orig_data = data;
	orig_length = length;

	/*
	 * XXX: Code inspired directly from filemap_write_page, not generic!
	 */
	inode = imo->imo_inode;
	ASSERT(inode);
	file.f_op = inode->i_op->default_file_ops;
	if (file.f_op->write) {
		file.f_mode = 3;
		file.f_flags = 0;
		file.f_count = 1;
		file.f_inode = inode;
		file.f_pos = offset;
		file.f_reada = 0;

		down(&inode->i_sem);
		do {
			extern int do_write_page(struct inode * inode,
						 struct file * file,
						 const char * page,
						 unsigned long offset);

			result = do_write_page(inode, &file,
					       (const char *) data, offset);
			data += PAGE_SIZE;
			offset += PAGE_SIZE;
			length -= PAGE_SIZE;
		} while (length > 0);
		up(&inode->i_sem);
	} else {
		result = 0;
	}

	/* take back our (ghost) identity */
	current->uid = 0;
	current->euid = 0;
	current->suid = 0;
	current->fsuid = 0;
	current->gid = 0;
	current->egid = 0;
	current->sgid = 0;
	current->fsgid = 0;
	current->groups[0] = NOGROUP;
	
	if (result) {
		printk("inode_object_data_return: "
		       "do_write_page returned %d\n",
		       result);
	}
	} /* !vma */

	kr = vm_deallocate(mach_task_self(),
			   orig_data,
			   orig_length);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("inode_object_data_return: "
			     "vm_deallocate(0x%x,0x%x)",
			     orig_data, orig_length));
	}

	return KERN_SUCCESS;
}
Semaphore::Semaphore(long maxCount) : mValid(true)
{
    semaphore_create(mach_task_self(), &mInternal, SYNC_POLICY_FIFO, 0);
}
nsresult
nsPerformanceStatsService::GetResources(uint64_t* userTime,
                                        uint64_t* systemTime) const {
  MOZ_ASSERT(userTime);
  MOZ_ASSERT(systemTime);

#if defined(XP_MACOSX)
  // On MacOS X, to get we per-thread data, we need to
  // reach into the kernel.

  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
  thread_basic_info_data_t info;
  mach_port_t port = mach_thread_self();
  kern_return_t err =
    thread_info(/* [in] targeted thread*/ port,
                /* [in] nature of information*/ THREAD_BASIC_INFO,
                /* [out] thread information */  (thread_info_t)&info,
                /* [inout] number of items */   &count);

  // We do not need ability to communicate with the thread, so
  // let's release the port.
  mach_port_deallocate(mach_task_self(), port);

  if (err != KERN_SUCCESS)
    return NS_ERROR_FAILURE;

  *userTime = info.user_time.microseconds + info.user_time.seconds * 1000000;
  *systemTime = info.system_time.microseconds + info.system_time.seconds * 1000000;

#elif defined(XP_UNIX)
  struct rusage rusage;
#if defined(RUSAGE_THREAD)
  // Under Linux, we can obtain per-thread statistics
  int err = getrusage(RUSAGE_THREAD, &rusage);
#else
  // Under other Unices, we need to do with more noisy
  // per-process statistics.
  int err = getrusage(RUSAGE_SELF, &rusage);
#endif // defined(RUSAGE_THREAD)

  if (err)
    return NS_ERROR_FAILURE;

  *userTime = rusage.ru_utime.tv_usec + rusage.ru_utime.tv_sec * 1000000;
  *systemTime = rusage.ru_stime.tv_usec + rusage.ru_stime.tv_sec * 1000000;

#elif defined(XP_WIN)
  // Under Windows, we can obtain per-thread statistics. Experience
  // seems to suggest that they are not very accurate under Windows
  // XP, though.
  FILETIME creationFileTime; // Ignored
  FILETIME exitFileTime; // Ignored
  FILETIME kernelFileTime;
  FILETIME userFileTime;
  BOOL success = GetThreadTimes(GetCurrentThread(),
                                &creationFileTime, &exitFileTime,
                                &kernelFileTime, &userFileTime);

  if (!success)
    return NS_ERROR_FAILURE;

  ULARGE_INTEGER kernelTimeInt;
  kernelTimeInt.LowPart = kernelFileTime.dwLowDateTime;
  kernelTimeInt.HighPart = kernelFileTime.dwHighDateTime;
  // Convert 100 ns to 1 us.
  *systemTime = kernelTimeInt.QuadPart / 10;

  ULARGE_INTEGER userTimeInt;
  userTimeInt.LowPart = userFileTime.dwLowDateTime;
  userTimeInt.HighPart = userFileTime.dwHighDateTime;
  // Convert 100 ns to 1 us.
  *userTime = userTimeInt.QuadPart / 10;

#endif // defined(XP_MACOSX) || defined(XP_UNIX) || defined(XP_WIN)

  return NS_OK;
}
Beispiel #23
0
int main(int argc, char** argv){
  kern_return_t err;
  
  CFMutableDictionaryRef matching = IOServiceMatching("IntelAccelerator");
  if(!matching){
   printf("unable to create service matching dictionary\n");
   return 0;
  }

  io_iterator_t iterator;
  err = IOServiceGetMatchingServices(kIOMasterPortDefault, matching, &iterator);
  if (err != KERN_SUCCESS){
   printf("no matches\n");
   return 0;
  }

  io_service_t service = IOIteratorNext(iterator);
  
  if (service == IO_OBJECT_NULL){
   printf("unable to find service\n");
   return 0;
  }
  printf("got service: %x\n", service);

  io_connect_t conn = MACH_PORT_NULL;
  err = IOServiceOpen(service, mach_task_self(), 1, &conn); // type 1 == IGAccelGLContext
  if (err != KERN_SUCCESS){
   printf("unable to get user client connection\n");
   return 0;
  }

  printf("got userclient connection: %x\n", conn);
  
  uint64_t inputScalar[16];  
  uint64_t inputScalarCnt = 0;

  char inputStruct[4096];
  size_t inputStructCnt = 0;

  uint64_t outputScalar[16];
  uint32_t outputScalarCnt = 0;

  char outputStruct[4096];
  size_t outputStructCnt = 0;

  inputScalarCnt = 0;
  inputStructCnt = 0;

  outputScalarCnt = 0;
  outputStructCnt = 0;

  inputStructCnt = 0x30;

  err = IOConnectCallMethod(
   conn,
   0x205,                 //gst_operation
   inputScalar,
   inputScalarCnt,
   inputStruct,
   inputStructCnt,
   outputScalar,
   &outputScalarCnt,
   outputStruct,
   &outputStructCnt); 

  if (err != KERN_SUCCESS){
   printf("IOConnectCall error: %x\n", err);
   printf("that was an error in the first call, don't care!\n");
  }
  

  
  inputStructCnt = 0x1;

  err = IOConnectCallMethod(
   conn,
   0x206,                 //gst_configure
   inputScalar,
   inputScalarCnt,
   inputStruct,
   inputStructCnt,
   outputScalar,
   &outputScalarCnt,
   outputStruct,
   &outputStructCnt); 

  if (err != KERN_SUCCESS){
   printf("IOConnectCall error: %x\n", err);
   return 0;
  }
}
Beispiel #24
0
/** @name	getMemoryUsage
	@text	Get the current amount of memory used by MOAI and its subsystems. This will
			attempt to return reasonable estimates where exact values cannot be obtained.
			Some fields represent informational fields (i.e. are not double counted in the
			total, but present to assist debugging) and may be only available on certain
			platforms (e.g. Windows, etc). These fields begin with a '_' character.
 
	@out	table	usage		The breakdown of each subsystem's memory usage, in bytes. There is also a "total" field that contains the summed value.
*/
int MOAISim::_getMemoryUsage ( lua_State* L ) {
	
	float divisor = 1.0f;
	
	if( lua_type(L, 1) == LUA_TSTRING )
	{
		cc8* str = lua_tostring(L, 1);
		if( str[0] == 'k' || str[0] == 'K' )
			divisor = 1024.0f;
		else if( str[0] == 'm' || str[0] == 'M' )
			divisor = 1024.0f * 1024.0f;
		else if( str[0] == 'b' || str[0] == 'B' )
			divisor = 1.0f;
	}
	
	size_t total = 0;
	
	lua_newtable(L);
	
	size_t count;
	
	count = MOAILuaRuntime::Get().GetMemoryUsage ();
	lua_pushnumber(L, count / divisor);
	lua_setfield(L, -2, "lua");
	total += count;

	// This is informational only (i.e. don't double count with the previous field).
	// It doesn't actually seem to represent the real usage of lua, but maybe
	// someone is interested.
	lua_pushnumber ( L, lua_gc ( L, LUA_GCCOUNTB, 0 ) / divisor );
	lua_setfield ( L, -2, "_luagc_count" );
	
	count = MOAIGfxDevice::Get ().GetTextureMemoryUsage ();
	lua_pushnumber ( L, count / divisor );
	lua_setfield ( L, -2, "texture" );
	total += count;
	
#if defined(_WIN32)
    PROCESS_MEMORY_COUNTERS pmc;

    // Print the process identifier.
    if ( GetProcessMemoryInfo( GetCurrentProcess(), &pmc, sizeof(pmc)) )
    {
		lua_pushnumber(L, pmc.PagefileUsage / divisor);
		lua_setfield(L, -2, "_sys_vs");
		lua_pushnumber(L, pmc.WorkingSetSize / divisor);
		lua_setfield(L, -2, "_sys_rss");
    }
#elif defined(__APPLE__) //&& defined(TARGET_IPHONE_SIMULATOR) 
	// Tricky undocumented mach polling of memory
	struct task_basic_info t_info;
	mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
	kern_return_t kr = task_info(mach_task_self(),
								 TASK_BASIC_INFO,
								 reinterpret_cast<task_info_t>(&t_info),
								 &t_info_count);
	// Most likely cause for failure: |task| is a zombie.
	if( kr == KERN_SUCCESS )
	{
		lua_pushnumber(L, t_info.virtual_size / divisor);
		lua_setfield(L, -2, "_sys_vs");
		lua_pushnumber(L, t_info.resident_size / divisor);
		lua_setfield(L, -2, "_sys_rss");
	}
#endif
	
	lua_pushnumber(L, total / divisor);
	lua_setfield(L, -2, "total");
	
	return 1;
}
  io_service_t serviceObject;

  // Find a service object for the controller.
  serviceObject = IOServiceGetMatchingService(kIOMasterPortDefault,
    IOServiceMatching("AppleLMUController"));

  // Check a matching service object was found
  if (!serviceObject) {
    fprintf(stderr, "Failed to find service matching \"AppleLMUController\".\
        Ending program.");
    exit(1);
  }
  
  // Open the matching service. The static variable connect is used to allow
  // communication with the IOConnect APIs.
  kr = IOServiceOpen(serviceObject, mach_task_self(), 0, &conn);

  // Release the service object and clean up, checking for errors.
  IOObjectRelease(serviceObject);
  if(kr != KERN_SUCCESS) {
    mach_error("IOServiceOpen: ", kr);
    exit(kr);
  }
}

/* Returns a value between 0 and 0xfff indicating the brightness of the
 * keyboard LED light.
 *
 *  UPDATE
 * Renamed by Flávio Caetano in 2015-06-24
 */
Beispiel #26
0
void uv_sem_destroy(uv_sem_t* sem) {
  if (semaphore_destroy(mach_task_self(), *sem))
    abort();
}
Beispiel #27
0
/* Fetch a directory, as for netfs_get_dirents.  */
static error_t
get_dirents (struct ftpfs_dir *dir,
	     int first_entry, int max_entries, char **data,
	     mach_msg_type_number_t *data_len,
	     vm_size_t max_data_len, int *data_entries)
{
  struct ftpfs_dir_entry *e;
  error_t err = 0;

  if (! dir)
    return ENOTDIR;

  e = dir->ordered;

  /* Find the first entry.  */
  while (first_entry-- > 0)
    if (! e)
      {
	max_entries = 0;
	break;
      }
    else
      e = e->ordered_next;

  if (max_entries != 0)
    {
      size_t size =
	(max_data_len == 0 || max_data_len > DIRENTS_CHUNK_SIZE
	 ? DIRENTS_CHUNK_SIZE
	 : max_data_len);

      *data = mmap (0, size, PROT_READ|PROT_WRITE,
				   MAP_ANON, 0, 0);
      err = ((void *) *data == (void *) -1) ? errno : 0;

      if (! err)
	{
	  char *p = *data;
	  int count = 0;

	  /* See how much space we need for the result.  */
	  while ((max_entries == -1 || count < max_entries) && e)
	    {
	      struct dirent hdr;
	      size_t name_len = strlen (e->name);
	      size_t sz = DIRENT_LEN (name_len);
	      int entry_type =
		e->stat_timestamp ? IFTODT (e->stat.st_mode) : DT_UNKNOWN;

	      if ((p - *data) + sz > size)
		{
		  if (max_data_len > 0)
		    break;
		  else
		    /* Try to grow our return buffer.  */
		    {
		      vm_address_t extension = (vm_address_t)(*data + size);
		      err = vm_allocate (mach_task_self (), &extension,
					 DIRENTS_CHUNK_SIZE, 0);
		      if (err)
			break;
		      size += DIRENTS_CHUNK_SIZE;
		    }
		}

	      hdr.d_namlen = name_len;
	      hdr.d_fileno = e->stat.st_ino;
	      hdr.d_reclen = sz;
	      hdr.d_type = entry_type;

	      memcpy (p, &hdr, DIRENT_NAME_OFFS);
	      strcpy (p + DIRENT_NAME_OFFS, e->name);
	      p += sz;

	      count++;
	      e = e->ordered_next;
	    }

	  if (err)
	    munmap (*data, size);
	  else
	    {
	      vm_address_t alloc_end = (vm_address_t)(*data + size);
	      vm_address_t real_end = round_page (p);
	      if (alloc_end > real_end)
		munmap ((caddr_t) real_end, alloc_end - real_end);
	      *data_len = p - *data;
	      *data_entries = count;
	    }
	}
    }
  else
    {
      *data_len = 0;
      *data_entries = 0;
    }

  return err;
}
Beispiel #28
0
Threading::Semaphore::~Semaphore() throw()
{
	MACH_CHECK(semaphore_destroy(mach_task_self(), (semaphore_t) m_sema));
	__atomic_store_n(&m_counter, 0, __ATOMIC_SEQ_CST);
}
Beispiel #29
0
static void protCatchOne(void)
{
  protRequestStruct request;
  mach_msg_return_t mr;
  protReplyStruct reply;

  AVER(MACH_PORT_VALID(protExcPort));
  mr = mach_msg(&request.Head,
                /* option */ MACH_RCV_MSG,
                /* send_size */ 0,
                /* receive_limit */ sizeof(request),
                /* receive_name */ protExcPort,
                /* timeout */ MACH_MSG_TIMEOUT_NONE,
                /* notify */ MACH_PORT_NULL);
  AVER(mr == MACH_MSG_SUCCESS);
  if (mr != MACH_MSG_SUCCESS)
    mach_error("ERROR: MPS mach_msg recv\n", mr);  /* .trans.must */

  /* 2407 is the id for the 64-bit exception requests we asked for in
     ProtThreadRegister, with state and identity
     information, determined by experimentation and confirmed by 
     running mig on /usr/include/mach/mach_exc.defs */
  AVER(request.Head.msgh_id == 2407);
  AVER(request.Head.msgh_local_port == protExcPort);
  AVER(request.task.name == mach_task_self());
  AVER(request.exception == EXC_BAD_ACCESS);
  AVER(request.codeCnt == 2);
  AVER(request.old_stateCnt == THREAD_STATE_COUNT);
  AVER(request.flavor == THREAD_STATE_FLAVOR);
  
  /* TODO: This could dispatch to separate worker threads, in order to
     spread scanning work across several cores once the MPS can be
     re-entered. */

  if (request.code[0] == KERN_PROTECTION_FAILURE) {
    MutatorContextStruct context;

    /* The cast via Word suppresses "cast to pointer from integer of
       different size" warnings in GCC, for the  XCI3GC build. */
    MutatorContextInitFault(&context, (Addr)(Word)request.code[1],
                            (void *)request.old_state);
  
    if (ArenaAccess(context.address,
                    AccessREAD | AccessWRITE,
                    &context)) {
      /* Send a reply that will cause the thread to continue.
         Note that ArenaAccess may have updated request.old_state
         via context.thread_state, and that will get copied to the
         reply and affect the state the thread resumes in. */
      protBuildReply(&reply, &request, KERN_SUCCESS);
      protMustSend(&reply.Head);
      return;
    }
  }

  /* We didn't handle the exception -- it wasn't one of ours. */

  /* .assume.only-port: We assume that there was no previously installed
     exception port.  This is checked in ProtThreadRegister, and we don't
     check it again here to avoid the extra system call.  If there
     were, we must arrange to forward the exception message to the
     previous port.  This module used to do that because it installed a
     task-wide exception handler, but the code is pretty hairy and not
     necessary as long as the MPS is registering threads individually.
     If we ever need to reinstate that code, look at
     https://info.ravenbrook.com/project/mps/prototype/2013-06-24/machtest */
  
  protBuildReply(&reply, &request, KERN_FAILURE);
  protMustSend(&reply.Head);
}
Beispiel #30
0
static void ExceptionThread(mach_port_t port)
{
	Common::SetCurrentThreadName("Mach exception thread");
	#pragma pack(4)
	struct
	{
		mach_msg_header_t Head;
		NDR_record_t NDR;
		exception_type_t exception;
		mach_msg_type_number_t codeCnt;
		int64_t code[2];
		int flavor;
		mach_msg_type_number_t old_stateCnt;
		natural_t old_state[x86_THREAD_STATE64_COUNT];
		mach_msg_trailer_t trailer;
	} msg_in;

	struct
	{
		mach_msg_header_t Head;
		NDR_record_t NDR;
		kern_return_t RetCode;
		int flavor;
		mach_msg_type_number_t new_stateCnt;
		natural_t new_state[x86_THREAD_STATE64_COUNT];
	} msg_out;
	#pragma pack()
	memset(&msg_in, 0xee, sizeof(msg_in));
	memset(&msg_out, 0xee, sizeof(msg_out));
	mach_msg_header_t *send_msg = nullptr;
	mach_msg_size_t send_size = 0;
	mach_msg_option_t option = MACH_RCV_MSG;
	while (true)
	{
		// If this isn't the first run, send the reply message.  Then, receive
		// a message: either a mach_exception_raise_state RPC due to
		// thread_set_exception_ports, or MACH_NOTIFY_NO_SENDERS due to
		// mach_port_request_notification.
		CheckKR("mach_msg_overwrite", mach_msg_overwrite(send_msg, option, send_size, sizeof(msg_in), port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, &msg_in.Head, 0));

		if (msg_in.Head.msgh_id == MACH_NOTIFY_NO_SENDERS)
		{
			// the other thread exited
			mach_port_destroy(mach_task_self(), port);
			return;
		}

		if (msg_in.Head.msgh_id != 2406)
		{
			PanicAlert("unknown message received");
			return;
		}

		if (msg_in.flavor != x86_THREAD_STATE64)
		{
			PanicAlert("unknown flavor %d (expected %d)", msg_in.flavor, x86_THREAD_STATE64);
			return;
		}

		x86_thread_state64_t *state = (x86_thread_state64_t *) msg_in.old_state;

		bool ok = JitInterface::HandleFault((uintptr_t) msg_in.code[1], state);

		// Set up the reply.
		msg_out.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(msg_in.Head.msgh_bits), 0);
		msg_out.Head.msgh_remote_port = msg_in.Head.msgh_remote_port;
		msg_out.Head.msgh_local_port = MACH_PORT_NULL;
		msg_out.Head.msgh_id = msg_in.Head.msgh_id + 100;
		msg_out.NDR = msg_in.NDR;
		if (ok)
		{
			msg_out.RetCode = KERN_SUCCESS;
			msg_out.flavor = x86_THREAD_STATE64;
			msg_out.new_stateCnt = x86_THREAD_STATE64_COUNT;
			memcpy(msg_out.new_state, msg_in.old_state, x86_THREAD_STATE64_COUNT * sizeof(natural_t));
		}
		else
		{
			// Pass the exception to the next handler (debugger or crash).
			msg_out.RetCode = KERN_FAILURE;
			msg_out.flavor = 0;
			msg_out.new_stateCnt = 0;
		}
		msg_out.Head.msgh_size = offsetof(__typeof__(msg_out), new_state) + msg_out.new_stateCnt * sizeof(natural_t);

		send_msg = &msg_out.Head;
		send_size = msg_out.Head.msgh_size;
		option |= MACH_SEND_MSG;
	}
}