Exemplo n.º 1
0
int
main(int argc, char **argv)
{
    kern_return_t          kr;
    host_priv_t            host_priv;
    processor_port_array_t processor_list;
    natural_t              processor_count;
    char                  *errmsg = PROGNAME;
   
    if (argc != 2) {
        fprintf(stderr,
                "usage: %s <cmd>, where <cmd> is \"exit\" or \"start\"\n",
                PROGNAME);
        exit(1);
    }
   
    kr = host_get_host_priv_port(mach_host_self(), &host_priv);
    EXIT_ON_MACH_ERROR("host_get_host_priv_port:", kr);   
   
    kr = host_processors(host_priv, &processor_list, &processor_count);
    EXIT_ON_MACH_ERROR("host_processors:", kr);
   
    // disable last processor on a multiprocessor system
    if (processor_count > 1) {
        if (*argv[1] == 'e') {
            kr = processor_exit(processor_list[processor_count - 1]);
            errmsg = "processor_exit:";
        } else if (*argv[1] == 's') {
            kr = processor_start(processor_list[processor_count - 1]);
            errmsg = "processor_start:";
        } else {
            kr = KERN_INVALID_ARGUMENT;
        }
    } else
        printf("Only one processor!\n");
   
    // this will deallocate while rounding up to page size
    (void)vm_deallocate(mach_task_self(), (vm_address_t)processor_list,
                        processor_count * sizeof(processor_t *));
    EXIT_ON_MACH_ERROR(errmsg, kr);
   
    fprintf(stderr, "%s successful\n", errmsg);
   
    exit(0);
}
unsigned int
get_num_cpus (void)
{
    unsigned int num_cpus;
    processor_cpu_load_info_data_t *proc_info;
    mach_msg_type_number_t proc_info_size;

    if (host_processor_info(mach_host_self(),
                            PROCESSOR_CPU_LOAD_INFO,
                            &num_cpus,
                            (processor_info_array_t *) &proc_info,
                            &proc_info_size))
    {
        return 0;
    }
    vm_deallocate(mach_task_self(), (vm_address_t) proc_info, proc_info_size);
    return num_cpus;
}
Exemplo n.º 3
0
void
free(void *data)
{
	register vm_size_t freesize;
	union header *fl;
	union header *addr = ((union header *)data) - 1;

	freesize = get_allocsize(addr->size, &fl);

	if (freesize < kalloc_max) {
	    addr->next = fl->next;
	    fl->next = addr;
	}
	else {
	    (void) vm_deallocate(mach_task_self(), (vm_offset_t)addr,
				 freesize);
	}
}
Exemplo n.º 4
0
int
macosx_thread_valid (task_t task, thread_t thread)
{
  thread_array_t thread_list;
  unsigned int thread_count = 0;
  kern_return_t kret;

  unsigned int found = 0;
  unsigned int i;

  CHECK_FATAL (task != TASK_NULL);

  kret = task_threads (task, &thread_list, &thread_count);
#ifdef DEBUG_MACOSX_MUTILS
  mutils_debug ("macosx_thread_valid - task_threads (%d, %p, %d) returned 0x%lx\n", task, &thread_list, thread_count, kret);
#endif
  if ((kret == KERN_INVALID_ARGUMENT)
      || (kret == MACH_SEND_INVALID_RIGHT) || (kret == MACH_RCV_INVALID_NAME))
    {
      return 0;
    }
  MACH_CHECK_ERROR (kret);

  for (i = 0; i < thread_count; i++)
    {
      if (thread_list[i] == thread)
        {
          found = 1;
        }
    }

  kret = vm_deallocate (mach_task_self (), (vm_address_t) thread_list,
                        (vm_size_t) (thread_count * sizeof (thread_t)));
  MACH_CHECK_ERROR (kret);

#ifdef DEBUG_MACOSX_MUTILS
  if (!found)
    {
      mutils_debug ("thread 0x%lx no longer valid for task 0x%lx\n",
                    (unsigned long) thread, (unsigned long) task);
    }
#endif
  return found;
}
Exemplo n.º 5
0
	mach_error_t
freeBranchIsland(
		BranchIsland	*island )
{
	assert( island );
	assert( (*(long*)&island->instructions[0]) == kIslandTemplate[0] );
	assert( island->allocatedHigh );
	
	mach_error_t	err = err_none;
	
	if( island->allocatedHigh ) {
		assert( sizeof( BranchIsland ) <= PAGE_SIZE );
		err = vm_deallocate(mach_task_self(), (vm_address_t) island, PAGE_SIZE );
	} else {
		free( island );
	}
	
	return err;
}
UInt32 IOFireWireSBP2LibORB::release( void )
{
	UInt32 retVal = fRefCount;
	
	if( 1 == fRefCount-- ) 
	{
		if( fRangeScratch != NULL )
		{
			// delete it
			vm_deallocate( mach_task_self(), (vm_address_t)fRangeScratch, fRangeScratchLength );
			fRangeScratch = NULL;
			fRangeScratchLength = 0;
		}

		delete this;
    }
	
	return retVal;
}
Exemplo n.º 7
0
/*
 * fileport_walk
 *
 * Description: Invoke the action function on every fileport in the task.
 *
 *		This could be more efficient if we refactored mach_port_names()
 *		so that (a) it didn't compute the type information unless asked
 *		and (b) it could be asked to -not- unwire/copyout the memory
 *		and (c) if we could ask for port names by kobject type. Not
 *		clear that it's worth all that complexity, though.
 *
 * Parameters: 	task		The target task
 *		action		The function to invoke on each fileport
 *		arg		Anonymous pointer to caller state.
 */
kern_return_t
fileport_walk(task_t task,
	int (*action)(mach_port_name_t, struct fileglob *, void *arg),
	void *arg)
{
	mach_port_name_t *names;
	mach_msg_type_number_t ncnt, tcnt;
	vm_map_copy_t map_copy_names, map_copy_types;
	vm_map_address_t map_names;
	kern_return_t kr;
	uint_t i;
	int rval;

	/*
	 * mach_port_names returns the 'name' and 'types' in copied-in
	 * form.  Discard 'types' immediately, then copyout 'names'
	 * back into the kernel before walking the array.
	 */

	kr = mach_port_names(task->itk_space,
	    (mach_port_name_t **)&map_copy_names, &ncnt,
	    (mach_port_type_t **)&map_copy_types, &tcnt);
	if (kr != KERN_SUCCESS)
		return (kr);

	vm_map_copy_discard(map_copy_types);

	kr = vm_map_copyout(ipc_kernel_map, &map_names, map_copy_names);
	if (kr != KERN_SUCCESS) {
		vm_map_copy_discard(map_copy_names);
		return (kr);
	}
	names = (mach_port_name_t *)(uintptr_t)map_names;

	for (rval = 0, i = 0; i < ncnt; i++)
		if (fileport_invoke(task, names[i], action, arg,
		    &rval) == KERN_SUCCESS && -1 == rval)
			break;		/* early termination clause */

	vm_deallocate(ipc_kernel_map,
	    (vm_address_t)names, ncnt * sizeof (*names));
	return (KERN_SUCCESS);
}
Exemplo n.º 8
0
  /* Write uncompress data to our output buffer.  */
  void zwrite (const char *wbuf, size_t nwrite)
    {
      size_t old_buf_len = *buf_len;

      if (out_buf_offs + nwrite > old_buf_len)
	/* Have to grow the output buffer.  */
	{
	  void *old_buf = *buf;
	  void *new_buf = old_buf + old_buf_len; /* First try.  */
	  size_t new_buf_len = round_page (old_buf_len + old_buf_len + nwrite);

	  /* Try to grow the buffer.  */
	  zerr =
	    vm_allocate (mach_task_self (),
			 (vm_address_t *)&new_buf, new_buf_len - old_buf_len,
			 0);
	  if (zerr)
	    /* Can't do that, try to make a bigger buffer elsewhere.  */
	    {
	      new_buf = old_buf;
	      zerr =
		vm_allocate (mach_task_self (),
			     (vm_address_t *)&new_buf, new_buf_len, 1);
	      if (zerr)
		longjmp (zerr_jmp_buf, 1);

	      if (out_buf_offs > 0)
		/* Copy the old buffer into the start of the new & free it. */
		bcopy (old_buf, new_buf, out_buf_offs);

	      vm_deallocate (mach_task_self (),
			     (vm_address_t)old_buf, old_buf_len);

	      *buf = new_buf;
	    }

	  *buf_len = new_buf_len;
	}

      bcopy (wbuf, *buf + out_buf_offs, nwrite);
      out_buf_offs += nwrite;
    }
Exemplo n.º 9
0
int main() {
	natural_t cpuCount;
	processor_info_array_t infoArray;
	mach_msg_type_number_t infoCount;
	kern_return_t kr;
	processor_cpu_load_info_data_t* cpuLoadInfo;
	unsigned long old_ticks,new_ticks,old_totalTicks,new_totalTicks;
	int cpu,state;
	FILE *file=fopen("/Users/delphinus/.screen/.cpu.old","r");
	if (file == NULL) old_ticks = old_totalTicks = 0;
	else fscanf(file,"%ld %ld",&old_ticks,&old_totalTicks);
	fclose(file);

	/* get information */
	kr = host_processor_info(mach_host_self(),
			PROCESSOR_CPU_LOAD_INFO, &cpuCount, &infoArray, &infoCount);
	if (kr) {
		mach_error("host_processor_info error:", kr);
		return kr;
	}

	cpuLoadInfo = (processor_cpu_load_info_data_t*) infoArray;

	new_ticks = new_totalTicks = 0;
	for (cpu = 0; cpu<cpuCount; cpu++){
		/* state 0:user, 1:system, 2:idle, 3:nice */
		for (state = 0; state<CPU_STATE_MAX; state++) {
			if(state != 2)
				new_ticks += cpuLoadInfo[cpu].cpu_ticks[state];
			new_totalTicks += cpuLoadInfo[cpu].cpu_ticks[state];
		}
	}

	printf("%5.1lf\n",(double)(new_ticks - old_ticks)/(new_totalTicks - old_totalTicks)*100);
	if((file = fopen("/Users/delphinus/.screen/.cpu.old","w")) != NULL){
		fprintf(file,"%lu %lu",new_ticks,new_totalTicks);
		fclose(file);
	}
	vm_deallocate(mach_task_self(), (vm_address_t)infoArray, infoCount);

	return 0;
}
Exemplo n.º 10
0
static PyObject *MachTask_vm_read(PyObject *self, PyObject *args) {
    PyObject *buffer;
    kern_return_t err;
    unsigned int address, size;
    vm_offset_t data;
    vm_size_t data_count;

    if (!PyArg_ParseTuple(args, "II", &address, &size))
        return NULL;

    err = vm_read(((MachPort *)self)->port, address, size, &data, &data_count);
    if (err != KERN_SUCCESS) {
        PyErr_SetString(MachError, mach_error_string(err));
        return NULL;
    }

    buffer = PyString_FromStringAndSize((char *)data, data_count);
    vm_deallocate(mach_task_self(), data, data_count);
    return buffer;
}
Exemplo n.º 11
0
Arquivo: open.c Projeto: GNOME/libgtop
void
glibtop_open_p (glibtop *server, const char *program_name,
		const unsigned long features, const unsigned flags)
{
        processor_cpu_load_info_data_t *pinfo;
        mach_msg_type_number_t info_count;
        natural_t processor_count;

	/* !!! WE ARE ROOT HERE - CHANGE WITH CAUTION !!! */

	server->name = program_name;

	server->machine->uid = getuid ();
	server->machine->euid = geteuid ();
	server->machine->gid = getgid ();
	server->machine->egid = getegid ();

	/* Drop priviledges. */	

	if (setreuid (server->machine->euid, server->machine->uid))
		_exit (1);

	if (setregid (server->machine->egid, server->machine->gid))
		_exit (1);
	
	/* !!! END OF SUID ROOT PART !!! */

	/* Our effective uid is now those of the user invoking the server,
	 * so we do no longer have any priviledges. */

	if (host_processor_info (mach_host_self (),
		PROCESSOR_CPU_LOAD_INFO,
		&processor_count,
		(processor_info_array_t*)&pinfo,
		&info_count)) {
		glibtop_error_io_r (server, "host_processor_info");
	}
	server->ncpu = (processor_count <= GLIBTOP_NCPU) ?
		processor_count : GLIBTOP_NCPU;
	vm_deallocate (mach_task_self (), (vm_address_t) pinfo, info_count);
}
Exemplo n.º 12
0
int main() {
  kern_return_t err;
  // re map the null page rw
  int var = 0;
  err = vm_deallocate(mach_task_self(), 0x0, 0x1000);
  if (err != KERN_SUCCESS){
    printf("%x\n", err);
  }
  vm_address_t addr = 0;
  err = vm_allocate(mach_task_self(), &addr, 0x1000, 0);
  if (err != KERN_SUCCESS){
    if (err == KERN_INVALID_ADDRESS){
      printf("invalid address\n");
    }
    if (err == KERN_NO_SPACE){
      printf("no space\n");
    }
    printf("%x\n", err);
  }
  char* np = 0;
  for (int i = 0; i < 0x1000; i++){
    np[i] = 'A';
  }


  io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOSCSIPeripheralDeviceType00"));
  if (service == MACH_PORT_NULL) {
    printf("can't find service\n");
    return 0;
  }

  IOServiceOpen(service, mach_task_self(), 12, &conn);  // <-- userclient type 12
  if (conn == MACH_PORT_NULL) {
    printf("can't connect to service\n");
    return 0;
  }

  printf("boom?\n");

  return 0;
}
Exemplo n.º 13
0
/*
 * free:
 *
 * Free resources
 */
void IOBufferMemoryDescriptor::free()
{
    // Cache all of the relevant information on the stack for use
    // after we call super::free()!
    IOOptionBits options   = _options;
    vm_size_t    size	   = _capacity;
    void *       buffer	   = _buffer;
    vm_map_t	 map	   = 0;
    vm_offset_t  alignment = _alignment;

    if (reserved)
    {
        map = reserved->map;
        IODelete( reserved, ExpansionData, 1 );
    }

    /* super::free may unwire - deallocate buffer afterwards */
    super::free();

    if (buffer)
    {
        if (options & kIOMemoryPageable)
        {
            if (map)
                vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
            else
                IOFreePageable(buffer, size);
        }
        else
        {
            if (options & kIOMemoryPhysicallyContiguous)
                IOFreeContiguous(buffer, size);
            else if (alignment > 1)
                IOFreeAligned(buffer, size);
            else
                IOFree(buffer, size);
        }
    }
    if (map)
        vm_map_deallocate(map);
}
Exemplo n.º 14
0
static int __shm_cmem_free(void *mem)
{
  {
    char *p = (char *)mem;
    p -= sizeof(void *);
    mem = (void *)p;
  }
  #if (CLIENT_OS == OS_NEXTSTEP)
  if (vm_deallocate(task_self(), (vm_address_t)mem,
                    (vm_size_t)*((unsigned int *)mem)) != KERN_SUCCESS)
    return -1;

  return 0;
  #elif defined(USE_SYSV_SHM)
  TRACE_OUT((0,"shmdt(%p)\n", mem));
  return shmdt((char *)mem);
  #else
  TRACE_OUT((0,"munmap(%p,%u)\n", mem, *((unsigned int *)mem)));
  return munmap((void *)mem, *((unsigned int *)mem));
  #endif
}
Exemplo n.º 15
0
int32_t
dns_async_receive(mach_port_t p, char **buf, uint32_t *len, struct sockaddr **from, uint32_t *fromlen)
{
	kern_return_t status;
	char *reply;
	uint32_t rlen;

	reply = NULL;
	rlen = 0;

	status = lu_async_receive(p, &reply, &rlen);
	if (status != KERN_SUCCESS) return NO_RECOVERY;

	status = dns_extract_data(reply, rlen, buf, len, from, fromlen);
	vm_deallocate(mach_task_self(), (vm_address_t)reply, rlen);
	if (status != KERN_SUCCESS) return NO_RECOVERY;

	if (*buf == NULL) return NO_DATA;

	return 0;
}
Exemplo n.º 16
0
void ksmach_init(void)
{
    kern_return_t kr;
    const task_t thisTask = mach_task_self();
    thread_act_array_t threads;
    mach_msg_type_number_t numThreads;

    if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS)
    {
        KSLOG_ERROR("task_threads: %s", mach_error_string(kr));
        return;
    }

    g_topThread = pthread_from_mach_thread_np(threads[0]);

    for(mach_msg_type_number_t i = 0; i < numThreads; i++)
    {
        mach_port_deallocate(thisTask, threads[i]);
    }
    vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads);
}
Exemplo n.º 17
0
int get_cpu_usage(cpu_usage_t **usage_array, int *count)
{
    *count = 0;
    *usage_array = 0;

  	natural_t cpu_count;
	processor_cpu_load_info_data_t *load_info;
	mach_msg_type_number_t info_count;

	kern_return_t error = host_processor_info(
            mach_host_self(),
            PROCESSOR_CPU_LOAD_INFO,
            &cpu_count,
            (processor_info_array_t *) &load_info,
            &info_count);

	if (error) {
		return error;
	}

    *count = cpu_count;
    *usage_array = (cpu_usage_t *) malloc(sizeof(cpu_usage_t) * cpu_count);

    int cpu;
	for (cpu = 0; cpu < cpu_count; cpu++) {
        unsigned int *ticks = load_info[cpu].cpu_ticks;

        (*usage_array)[cpu].user   = ticks[CPU_STATE_USER];
        (*usage_array)[cpu].system = ticks[CPU_STATE_SYSTEM];
        (*usage_array)[cpu].idle   = ticks[CPU_STATE_IDLE];
        (*usage_array)[cpu].nice   = ticks[CPU_STATE_NICE];
    }

	vm_deallocate(
            mach_task_self(),
            (vm_address_t) load_info,
            info_count);

	return 0;
}
Exemplo n.º 18
0
int main(int argc, char** argv){
  kern_return_t err;
  // re map the null page rw
  int var = 0;
  err = vm_deallocate(mach_task_self(), 0x0, 0x1000);
  if (err != KERN_SUCCESS){
    printf("%x\n", err);
  }
  vm_address_t addr = 0;
  err = vm_allocate(mach_task_self(), &addr, 0x1000, 0);
  if (err != KERN_SUCCESS){
    if (err == KERN_INVALID_ADDRESS){
      printf("invalid address\n");
    }
    if (err == KERN_NO_SPACE){
      printf("no space\n");
    }
    printf("%x\n", err);
  }
  char* np = 0;
  for (int i = 0; i < 0x1000; i++){
    np[i] = '\xff';
  }

  *((uint64_t*)0x28) = 0xffffff4141414141;


  OSSpinLockLock(&lock);

  pthread_t t;
  pthread_create(&t, NULL, thread_func, NULL);


  mach_port_t conn = get_user_client("IOAudioEngine", 0);
  
  set_params(conn);
  OSSpinLockUnlock(&lock);
  IOServiceClose(conn);

}
Exemplo n.º 19
0
/*
 * Return a Python list of tuple representing per-cpu times
 */
static PyObject*
get_system_per_cpu_times(PyObject* self, PyObject* args)
{
    natural_t cpu_count;
    processor_info_array_t info_array;
    mach_msg_type_number_t info_count;
    kern_return_t error;
    processor_cpu_load_info_data_t* cpu_load_info;
    PyObject* py_retlist = PyList_New(0);
    PyObject* py_cputime;
    int i, ret;

    error = host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO,
                                &cpu_count, &info_array, &info_count);
    if (error != KERN_SUCCESS) {
        return PyErr_Format(PyExc_RuntimeError,
              "Error in host_processor_info(): %s", mach_error_string(error));
    }

    cpu_load_info = (processor_cpu_load_info_data_t*) info_array;

    for (i = 0; i < cpu_count; i++) {
        py_cputime = Py_BuildValue("(dddd)",
               (double)cpu_load_info[i].cpu_ticks[CPU_STATE_USER] / CLK_TCK,
               (double)cpu_load_info[i].cpu_ticks[CPU_STATE_NICE] / CLK_TCK,
               (double)cpu_load_info[i].cpu_ticks[CPU_STATE_SYSTEM] / CLK_TCK,
               (double)cpu_load_info[i].cpu_ticks[CPU_STATE_IDLE] / CLK_TCK
              );
        PyList_Append(py_retlist, py_cputime);
        Py_XDECREF(py_cputime);
    }

    ret = vm_deallocate(mach_task_self(), (vm_address_t)info_array,
                        info_count * sizeof(int));
    if (ret != KERN_SUCCESS) {
        printf("vm_deallocate() failed\n");
    }
    return py_retlist;
}
Exemplo n.º 20
0
void VMemFree(const vm_size_t size, void *pointer)
{
	if ( ( pointer != NULL ) && ( size > 0 ) )
	{
		kern_return_t err = vm_deallocate(	(vm_map_t) mach_task_self(),
										  (vm_address_t)pointer,
										  size );
		
		// Check errors
		
		check(err == KERN_SUCCESS);
		
		if( err != KERN_SUCCESS)
		{
			NSLog(@">> ERROR: Failed to deallocate vm memory of size = %lu",size);
		} // if
	} // if
	else
	{
		NSLog(@">> ERROR: Can't free a NULL vm pointer!");
	} // else
} // VMemFree
Exemplo n.º 21
0
void
mportset_callback(void)
{
	mach_port_name_array_t members;
	mach_msg_type_number_t membersCnt;
	mach_port_status_t status;
	mach_msg_type_number_t statusCnt;
	struct kevent kev;
	unsigned int i;

	if (os_assumes_zero(mach_port_get_set_status(mach_task_self(), demand_port_set, &members, &membersCnt)) != 0) {
		return;
	}

	for (i = 0; i < membersCnt; i++) {
		statusCnt = MACH_PORT_RECEIVE_STATUS_COUNT;
		if (mach_port_get_attributes(mach_task_self(), members[i], MACH_PORT_RECEIVE_STATUS, (mach_port_info_t)&status,
					&statusCnt) != KERN_SUCCESS) {
			continue;
		}
		if (status.mps_msgcount) {
			EV_SET(&kev, members[i], EVFILT_MACHPORT, 0, 0, 0, job_find_by_service_port(members[i]));
#if 0
			if (kev.udata != NULL) {
#endif
				log_kevent_struct(LOG_DEBUG, &kev, 0);
				(*((kq_callback *)kev.udata))(kev.udata, &kev);
#if 0
			} else {
				log_kevent_struct(LOG_ERR, &kev, 0);
			}
#endif
			/* the callback may have tainted our ability to continue this for loop */
			break;
		}
	}

	(void)os_assumes_zero(vm_deallocate(mach_task_self(), (vm_address_t)members, (vm_size_t) membersCnt * sizeof(mach_port_name_t)));
}
IOReturn IOUPSGetCapabilities(mach_port_t connect, int upsID, CFSetRef *capabilities)
{
    IOReturn 		ret;
    void *		buffer = NULL;
    IOByteCount		bufferSize;

    if (!connect || !capabilities)
        return kIOReturnBadArgument;

    ret = io_ups_get_capabilities(connect, upsID, 
                (vm_offset_t *)&buffer, 
                (mach_msg_type_number_t *)&bufferSize);
    
    if ( ret != kIOReturnSuccess )
        return ret;

    *capabilities = IOCFUnserialize(buffer, kCFAllocatorDefault, kNilOptions, NULL);

    vm_deallocate(mach_task_self(), (vm_address_t)buffer, bufferSize);

    return ret;
}
PassOwnPtr<PurgeableBuffer> PurgeableBuffer::create(const char* data, size_t size)
{
    if (size < minPurgeableBufferSize)
        return PassOwnPtr<PurgeableBuffer>();

    vm_address_t buffer = 0;
    kern_return_t ret = vm_allocate(mach_task_self(), &buffer, size, VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE | VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY);

    ASSERT(ret == KERN_SUCCESS);
    if (ret != KERN_SUCCESS)
        return PassOwnPtr<PurgeableBuffer>();

    ret = vm_copy(mach_task_self(), reinterpret_cast<vm_address_t>(data), size, buffer);

    ASSERT(ret == KERN_SUCCESS);
    if (ret != KERN_SUCCESS) {
        vm_deallocate(mach_task_self(), buffer, size);
        return PassOwnPtr<PurgeableBuffer>();
    }

    return adoptPtr(new PurgeableBuffer(reinterpret_cast<char*>(buffer), size));
}
Exemplo n.º 24
0
void ksmc_resumeEnvironment()
{
#if KSCRASH_HAS_THREADS_API
    KSLOG_DEBUG("Resuming environment.");
    kern_return_t kr;
    const task_t thisTask = mach_task_self();
    const thread_t thisThread = (thread_t)ksthread_self();
    thread_act_array_t threads;
    mach_msg_type_number_t numThreads;
    
    if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS)
    {
        KSLOG_ERROR("task_threads: %s", mach_error_string(kr));
        return;
    }
    
    for(mach_msg_type_number_t i = 0; i < numThreads; i++)
    {
        thread_t thread = threads[i];
        if(thread != thisThread && !isThreadInList(thread, g_reservedThreads, g_reservedThreadsCount))
        {
            if((kr = thread_resume(thread)) != KERN_SUCCESS)
            {
                // Record the error and keep going.
                KSLOG_ERROR("thread_resume (%08x): %s", thread, mach_error_string(kr));
            }
        }
    }
    
    for(mach_msg_type_number_t i = 0; i < numThreads; i++)
    {
        mach_port_deallocate(thisTask, threads[i]);
    }
    vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads);

    KSLOG_DEBUG("Resume complete.");
#endif
}
Exemplo n.º 25
0
QueryData genCpuTime(QueryContext& context) {
  QueryData results;

  natural_t processor_count;
  processor_cpu_load_info_data_t* processor_times;
  mach_port_t host = mach_host_self();
  mach_msg_type_number_t processor_msg_count;

  kern_return_t ret =
      host_processor_info(host,
                          PROCESSOR_CPU_LOAD_INFO,
                          &processor_count,
                          reinterpret_cast<processor_info_t*>(&processor_times),
                          &processor_msg_count);

  if (ret == KERN_SUCCESS) {
    // Loop through the cores and add rows for each core.
    for (unsigned int core = 0; core < processor_count; core++) {
      Row r;
      r["core"] = INTEGER(core);
      r["user"] = BIGINT(
          ticks_to_usecs(processor_times[core].cpu_ticks[CPU_STATE_USER]));
      r["idle"] = BIGINT(
          ticks_to_usecs(processor_times[core].cpu_ticks[CPU_STATE_IDLE]));
      r["system"] = BIGINT(
          ticks_to_usecs(processor_times[core].cpu_ticks[CPU_STATE_SYSTEM]));
      r["nice"] = BIGINT(
          ticks_to_usecs(processor_times[core].cpu_ticks[CPU_STATE_NICE]));

      results.push_back(r);
    }
    vm_deallocate(
        mach_task_self(),
        reinterpret_cast<vm_address_t>(processor_times),
        static_cast<vm_size_t>(processor_count * sizeof(*processor_times)));
  }
  return results;
}
Exemplo n.º 26
0
kern_return_t k5_ipc_client_reply (mach_port_t             in_reply_port,
                                   k5_ipc_inl_reply_t      in_inl_reply,
                                   mach_msg_type_number_t  in_inl_replyCnt,
                                   k5_ipc_ool_reply_t      in_ool_reply,
                                   mach_msg_type_number_t  in_ool_replyCnt)
{
    kern_return_t err = KERN_SUCCESS;
    k5_ipc_connection_info cinfo = NULL;
    
    if (!err) {
        err = CALL_INIT_FUNCTION (k5_cli_ipc_thread_init);
    }
    
    if (!err) {
        cinfo = k5_getspecific (K5_KEY_IPC_CONNECTION_INFO);
        if (!cinfo || !cinfo->reply_stream) { err = EINVAL; }
    }
    
    if (!err) {
        if (in_inl_replyCnt) {
            err = k5_ipc_stream_write (cinfo->reply_stream, 
                                       in_inl_reply, in_inl_replyCnt);
            
        } else if (in_ool_replyCnt) {
            err = k5_ipc_stream_write (cinfo->reply_stream, 
                                       in_ool_reply, in_ool_replyCnt);
            
        } else {
            err = EINVAL;
        }
    }
    
    if (in_ool_replyCnt) { vm_deallocate (mach_task_self (), 
                                          (vm_address_t) in_ool_reply, 
                                          in_ool_replyCnt); }
    
    return err;
}
Exemplo n.º 27
0
static void cheap_extend(NXStream *s, int desiredSize)
{
    int		    new_size = s->buf_size;
    char	   *new_addr;
    int		    cur_offset = 0;
    kern_return_t   ret;
    
    while (new_size < desiredSize)
	new_size = new_size * 2;
    if (new_size >= MAX_MALLOC && new_size < CHUNK_SIZE)
	new_size = CHUNK_SIZE;
    if (new_size < MAX_MALLOC)
	new_addr = malloc(new_size);
    else {
	new_size = round_page(new_size);
	ret = vm_allocate(mach_task_self(), (vm_address_t *)&new_addr, new_size, 
	    TRUE);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
    }
    if (s->buf_base) {
	cur_offset = s->buf_ptr - s->buf_base;
	betterCopy(s->buf_base, s->buf_size, new_addr, s);
	if (s->buf_size < MAX_MALLOC)
	    free(s->buf_base);
	else {
	    ret = vm_deallocate(mach_task_self(),
				(vm_offset_t)s->buf_base,
				(vm_size_t)s->buf_size);
	    if (ret != KERN_SUCCESS)
		NX_RAISE(NX_streamVMError, s, (void *)ret);
	}
    }
    s->buf_base = (unsigned char *)new_addr;
    s->buf_left += new_size - s->buf_size;
    s->buf_size = new_size;
    s->buf_ptr = s->buf_base + cur_offset;
}
Exemplo n.º 28
0
static void memory_extend(register NXStream *s, int size)
{
    vm_size_t       new_size;
    vm_offset_t     new_addr;
    int             cur_offset;
    kern_return_t   ret;

    new_size = (size + CHUNK_SIZE) & (~(vm_page_size - 1));
    ret = vm_allocate(mach_task_self(), &new_addr, new_size, TRUE);
    if (ret != KERN_SUCCESS)
	NX_RAISE(NX_streamVMError, s, (void *)ret);
    cur_offset = 0;
    if (s->buf_base) {
	int             copySize;

	copySize = s->buf_size;
	if (copySize % vm_page_size)
	    copySize += vm_page_size - (copySize % vm_page_size);
	ret = vm_copy(mach_task_self(),
		      (vm_offset_t)s->buf_base,
		      (vm_size_t)copySize,
		      (vm_offset_t)new_addr);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
	ret = vm_deallocate(mach_task_self(),
			    (vm_offset_t)s->buf_base,
			    (vm_size_t)s->buf_size);
	if (ret != KERN_SUCCESS)
	    NX_RAISE(NX_streamVMError, s, (void *)ret);
	cur_offset = s->buf_ptr - s->buf_base;
    }
    s->buf_base = (unsigned char *)new_addr;
    s->buf_size = new_size;
    s->buf_ptr = s->buf_base + cur_offset;
    s->buf_left = new_size - size;
    s->flags &= ~NX_USER_OWNS_BUF;
}
Exemplo n.º 29
0
static void
darwin_init_cpu_monitor(void)
{
  kern_return_t r;
  processor_info_t pinfo;
  mach_msg_type_number_t msg_count;

  p_sys = prop_create(prop_get_global(), "system");

  r = host_processor_info(mach_host_self (),
                          PROCESSOR_CPU_LOAD_INFO,
                          &cpu_count,
                          (processor_info_array_t *)&pinfo,
                          &msg_count);
  if(r != KERN_SUCCESS) {
    TRACE(TRACE_ERROR, "darwin",
          "host_processor_info(PROCESSOR_CPU_LOAD_INFO) failed %d", r);
    return;
  }
  
  p_cpu = calloc(cpu_count, sizeof(prop_t *));
  p_load  = calloc(cpu_count, sizeof(prop_t *));
  last_total = calloc(cpu_count, sizeof(unsigned int));
  last_idle = calloc(cpu_count, sizeof(unsigned int));
  
  prop_set_int(prop_create(prop_create(p_sys, "cpuinfo"), "available"), 1);
  p_cpuroot =  prop_create(prop_create(p_sys, "cpuinfo"), "cpus");
  
  vm_deallocate(mach_task_self(),
                (vm_address_t)pinfo,
                (vm_size_t)sizeof(*pinfo) * msg_count);
  
  cpu_monitor_do();
  mem_monitor_do();
  callout_arm(&timer, timercb, NULL, 1);
}
Exemplo n.º 30
0
void
osfmach3_exit_mmap(
	struct mm_struct * mm)
{
	struct osfmach3_mach_task_struct *mach_task;
	kern_return_t	kr;

	mach_task = mm->mm_mach_task;

	user_memory_flush_task(mach_task);

	/* flush the memory out of the kernel cache */
	server_thread_blocking(FALSE);
	kr = vm_msync(mach_task->mach_task_port,
		      VM_MIN_ADDRESS,
		      VM_MAX_ADDRESS - VM_MIN_ADDRESS,
		      VM_SYNC_SYNCHRONOUS);
	server_thread_unblocking(FALSE);
	if (kr != KERN_SUCCESS) {
		if (kr != MACH_SEND_INVALID_DEST &&
		    kr != KERN_INVALID_ARGUMENT) {
			MACH3_DEBUG(1, kr, ("osfmach3_exit_mmap: vm_msync"));
		}
	}

	kr = vm_deallocate(mach_task->mach_task_port,
			   VM_MIN_ADDRESS,
			   VM_MAX_ADDRESS - VM_MIN_ADDRESS);
	if (kr != KERN_SUCCESS) {
		if (kr != MACH_SEND_INVALID_DEST &&
		    kr != KERN_INVALID_ARGUMENT) {
			MACH3_DEBUG(1, kr,
				    ("osfmach3_exit_mmap: vm_deallocate"));
		}
	}
}