Exemplo n.º 1
0
/*
 * lck_rw_clear_promotion: Undo priority promotions when the last RW
 * lock is released by a thread (if a promotion was active)
 */
void lck_rw_clear_promotion(thread_t thread)
{
	assert(thread->rwlock_count == 0);

	/* Cancel any promotions if the thread had actually blocked while holding a RW lock */
	spl_t s = splsched();

	thread_lock(thread);

	if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
		thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED;

		if (thread->sched_flags & TH_SFLAG_PROMOTED) {
			/* Thread still has a mutex promotion */
		} else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
			                      (uintptr_t)thread_tid(thread), thread->sched_pri, DEPRESSPRI, 0, 0);

			set_sched_pri(thread, DEPRESSPRI);
		} else {
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
			                      (uintptr_t)thread_tid(thread), thread->sched_pri, thread->base_pri, 0, 0);

			thread_recompute_sched_pri(thread, FALSE);
		}
	}

	thread_unlock(thread);
	splx(s);
}
Exemplo n.º 2
0
/* just keep sending and receiving for a while */
THREAD_FUNC(pipe_child, varg) {
    pipe_child_arg_t *arg = (pipe_child_arg_t*)varg;
    int id = arg->id;
    sock_t fd = arg->sock;
    int i, n;
    char buf[CHILD_BUF_MAX];
    tid_t tid;
    tid = thread_tid();
    for(i=0; i<CHILD_CALL_MAX; i++) {
	n = rand_u32(CHILD_BUF_MAX);
	n = send(fd, buf, n, 0);
	printf("pipe_child[tid=%d]: send n=%d\n", tid, n);
	if( n <= 0 ) break;

	n = recv(fd, buf, sizeof(buf), 0);
	printf("pipe_child[tid=%d]: recv n=%d\n", tid, n);
	if( n <= 0 ) break;
	
	//usleep((double)rand() * CHILD_SLEEP_RAND / RAND_MAX);
    }
    sock_close(fd);
    free(arg);

    return 0;
}
Exemplo n.º 3
0
// dump status to stderr 
void dump_debug_info()
{
  output("\n\n-- Capriccio Status Dump --\n");
  output("Current thread %d  (%s)\n", 
         current_thread ? (int)thread_tid(current_thread) : -1,
         (current_thread && current_thread->name) ? current_thread->name : "noname");
  output("threads:    %d runnable    %d suspended    %d daemon\n", 
         num_runnable_threads, num_suspended_threads, num_daemon_threads);

  print_resource_stats();

  stack_report_usage_stats();
  stack_report_call_stats();

  {
    int i;
    output("thread locations:");
    for(i=0; i<bg_num_nodes; i++) {
      bg_node_t *node = bg_nodelist[i];
      if(node->num_here)
        output("  %d:%d", node->node_num, node->num_here);
    }
    output("\n");
  }

  output("\n\n");
}
Exemplo n.º 4
0
/*
 * Callout from context switch if the thread goes
 * off core with a positive rwlock_count
 *
 * Called at splsched with the thread locked
 */
void
lck_rw_set_promotion_locked(thread_t thread)
{
	if (LcksOpts & disLkRWPrio)
		return;

	integer_t priority;

	priority = thread->sched_pri;

	if (priority < thread->base_pri)
		priority = thread->base_pri;
	if (priority < BASEPRI_BACKGROUND)
		priority = BASEPRI_BACKGROUND;

	if ((thread->sched_pri < priority) ||
	    !(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
		KERNEL_DEBUG_CONSTANT(
		        MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE) | DBG_FUNC_NONE,
		        (uintptr_t)thread_tid(thread), thread->sched_pri,
		        thread->base_pri, priority, 0);

		thread->sched_flags |= TH_SFLAG_RW_PROMOTED;

		if (thread->sched_pri < priority)
			set_sched_pri(thread, priority);
	}
}
Exemplo n.º 5
0
/* code to collect current thread info */
void
kperf_threadinfo_sample(struct threadinfo *ti, struct kperf_context *context)
{
	thread_t cur_thread = context->cur_thread;
	BUF_INFO1( PERF_TI_SAMPLE, (uintptr_t)cur_thread );

	// fill out the fields
	ti->pid = context->cur_pid;
	ti->tid = thread_tid(cur_thread);
	ti->dq_addr = thread_dispatchqaddr(cur_thread);
	ti->runmode = make_runmode(cur_thread);
}
Exemplo n.º 6
0
void dump_thread_state()
{
  void *bt[100];
  int count, i;
  linked_list_entry_t *e;

  output("\n-- State of all threads --\n");

  e = ll_view_head(threadlist);
  while (e) {
    char sleepstr[80];
    thread_t *t = (thread_t *) pl_get_pointer(e);

    output("Thread %2d (%s)  %s    %d file   %d sock    %ld KB heap   ? KB stack    %s%s%s\n", 
           thread_tid(t), t->name, THREAD_STATES[t->state], 
           t->curr_stats.files, 
           t->curr_stats.sockets, 
           t->curr_stats.heap / 1024,
           //(long)-1, // FIXME: add total stack numbers after incorporating Jeremy's code
           t->joinable ? "joinable  " : "",
           t->daemon ? "daemon  " : "",
           t->sleep > 0 ? (sprintf(sleepstr,"sleep=%lld  ",t->sleep), sleepstr) : "");

    if( conf_show_thread_stacks ) {
      count = co_backtrace(t->coro, bt, 100);
      if (count == 100)
        output("WARN: only output first 100 stack frames.\n");
      
#if (0)
      {
        void **frame;
        frame = bt;
        while( count-- )
          output("    %p\n",*(frame++));
      }
#else
      {
        // NOTE: backtrace_symbols_fd causes a loop w/ our IO functions.
        char **p = backtrace_symbols(bt, count);
        for (i = 0; i < count; i++)
          output("   %s\n", *(p+i));
        free(p);
      }
#endif
      
      output("\n");
    }
      
    e = ll_view_next(threadlist, e);
  }

  return;
}
Exemplo n.º 7
0
void
kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation,
                      uintptr_t *starting_fp)
{
	if (kperf_kdebug_cswitch) {
		/* trace the new thread's PID for Instruments */
		int pid = task_pid(get_threadtask(thread));

		BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid);
	}
	if (kperf_lightweight_pet_active) {
		kperf_pet_on_cpu(thread, continuation, starting_fp);
	}
}
Exemplo n.º 8
0
static void
syscall_handler (struct intr_frame *f) 
{
  
  printf ("In System call handler!\n");
  // Extracting Stack Pointer
  void * esp = f->esp;
  hex_dump(esp,esp,20,true);
  
  printf("Esp %p\n",esp); 
  // First 4 bytes represent sys
  int syscall_number = *((int *) esp);
  esp+= 4;


  
  
  //printf("Stack pointer %x\n",esp);
  char * buffer;
  int fd;
  unsigned length;
  switch (syscall_number)
  {
    case SYS_WRITE:

      fd = *((int *)esp);
      esp+=4;

      buffer = (char *)esp;
      //buffer = user_to_kernel_ptr((const void *) buffer);
      esp+=4;
      length = *((unsigned *)esp);
      esp+=4;
      printf("Boy want's to write\n");
      printf("Extracted Arguments:\n");
      printf("Buffer str: %s\n", buffer);
      printf("fd : %d\nbuffer: %p\nlength: %d\n",fd,(void *)buffer,length);
      break;
    default:
      printf("No match\n");
  }

  if (thread_tid () == child.id)
    child.flag = 1;
  thread_exit ();
}
Exemplo n.º 9
0
/*
 * Complete the shutdown and place the processor offline.
 *
 * Called at splsched in the shutdown context.
 * This performs a minimal thread_invoke() to the idle thread,
 * so it needs to be kept in sync with what thread_invoke() does.
 *
 * The onlining half of this is done in load_context().
 */
void
processor_offline(
	processor_t			processor)
{
	assert(processor == current_processor());
	assert(processor->active_thread == current_thread());

	thread_t old_thread = processor->active_thread;
	thread_t new_thread = processor->idle_thread;

	processor->active_thread = new_thread;
	processor->current_pri = IDLEPRI;
	processor->current_thmode = TH_MODE_NONE;
	processor->starting_pri = IDLEPRI;
	processor->current_sfi_class = SFI_CLASS_KERNEL;
	processor->deadline = UINT64_MAX;
	new_thread->last_processor = processor;

	uint64_t ctime = mach_absolute_time();

	processor->last_dispatch = ctime;
	old_thread->last_run_time = ctime;

	/* Update processor->thread_timer and ->kernel_timer to point to the new thread */
	thread_timer_event(ctime, &new_thread->system_timer);
	PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;

	timer_stop(PROCESSOR_DATA(processor, current_state), ctime);

	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
	                          MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
	                          old_thread->reason, (uintptr_t)thread_tid(new_thread),
	                          old_thread->sched_pri, new_thread->sched_pri, 0);

	machine_set_current_thread(new_thread);

	thread_dispatch(old_thread, new_thread);

	PMAP_DEACTIVATE_KERNEL(processor->cpu_id);

	cpu_sleep();
	panic("zombie processor");
	/*NOTREACHED*/
}
Exemplo n.º 10
0
static void
thread_suspended(__unused void *parameter, wait_result_t result)
{
	thread_t thread = current_thread();

	thread_mtx_lock(thread);

	if (result == THREAD_INTERRUPTED)
		thread->suspend_parked = FALSE;
	else
		assert(thread->suspend_parked == FALSE);

	if (thread->suspend_count > 0) {
		thread_set_apc_ast(thread);
	} else {
		spl_t s = splsched();

		thread_lock(thread);
		if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
			thread->sched_pri = DEPRESSPRI;
			thread->last_processor->current_pri = thread->sched_pri;
			thread->last_processor->current_perfctl_class = thread_get_perfcontrol_class(thread);

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY),
			                      (uintptr_t)thread_tid(thread),
			                      thread->base_pri,
			                      thread->sched_pri,
			                      0, /* eventually, 'reason' */
			                      0);
		}
		thread_unlock(thread);
		splx(s);
	}

	thread_mtx_unlock(thread);

	thread_exception_return();
	/*NOTREACHED*/
}
Exemplo n.º 11
0
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	int						options,
#define TH_OPTION_NONE		0x00
#define TH_OPTION_NOCRED	0x01
#define TH_OPTION_NOSUSP	0x02
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread = THREAD_NULL;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == THREAD_NULL)
		new_thread = first_thread = current_thread();
	new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == THREAD_NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
	new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
	if (new_thread->uthread == NULL) {
		zfree(thread_zone, new_thread);
		return (KERN_RESOURCE_SHORTAGE);
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		void *ut = new_thread->uthread;

		new_thread->uthread = NULL;
		/* cred free may not be necessary */
		uthread_cleanup(parent_task, ut, parent_task->bsd_info);
		uthread_cred_free(ut);
		uthread_zone_free(ut);
#endif  /* MACH_BSD */

		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	lck_mtx_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active || parent_task->halting ||
			((options & TH_OPTION_NOSUSP) != 0 &&
			 	parent_task->suspend_count > 0)	||
			(parent_task->thread_count >= task_threadmax &&
				parent_task != kernel_task)		) {
		task_unlock(parent_task);
		lck_mtx_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	/* New threads inherit any default state on the task */
	machine_thread_inherit_taskwide(new_thread, parent_task);

	task_reference_internal(parent_task);

	if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
		/*
		 * This task has a per-thread CPU limit; make sure this new thread
		 * gets its limit set too, before it gets out of the kernel.
		 */
		set_astledger(new_thread);
	}
	new_thread->t_threadledger = LEDGER_NULL;	/* per thread ledger is not inherited */
	new_thread->t_ledger = new_thread->task->ledger;
	if (new_thread->t_ledger)
		ledger_reference(new_thread->t_ledger);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	/* Protected by the tasks_threads_lock */
	new_thread->thread_id = ++thread_unique_id;

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

#if CONFIG_COUNTERS
	/*
	 * If parent task has any reservations, they need to be propagated to this
	 * thread.
	 */
	new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? 
		THREAD_PMC_FLAG : 0U;
#endif

	/* Set the thread's scheduling parameters */
	new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
	new_thread->sched_flags = 0;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
#if CONFIG_EMBEDDED 
	if (new_thread->priority < MAXPRI_THROTTLE) {
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
#if CONFIG_EMBEDDED
	new_thread->saved_importance = new_thread->importance;
	/* apple ios daemon starts all threads in darwin background */
	if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
		/* Cannot use generic routines here so apply darwin bacground directly */
		new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
		/* set thread self backgrounding */
		new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg;
		/* priority will get recomputed suitably bit later */
		new_thread->importance = INT_MIN;
		/* to avoid changes to many pri compute routines, set the effect of those here */
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */

#if defined(CONFIG_SCHED_TRADITIONAL)
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
#endif
	SCHED(compute_priority)(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
			(vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
			dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
Exemplo n.º 12
0
#if 0

/* transalted from the APF */

APTIAKernelEntry_t *threadInfo = (APTIAKernelEntry_t*)(threadInfos + account->offset);

context->timeStamp = mach_absolute_time();
context->cpuNum = chudxnu_cpu_number();

// record the process info from the callback context
context->pid = chudxnu_current_pid();
threadInfo->pid = context->generic->pid;

// thread_tid is a thread_t to ID function in the kernel
context->threadID = chudxnu_current_thread();
threadInfo->tid = thread_tid(context->generic->threadID);

// also a kernel function
threadInfo->dispatch_queue_addr = thread_dispatchqaddr(context->generic->threadID);

// see below
threadInfo->runMode = AppleProfileGetRunModeOfThread(context->generic->threadID);


/****** WTF is this?! *******/

/*!enum AppleProfileTriggerClientThreadRunMode
 *
 * Specifies the thread mode in which to record samples.
 */
typedef enum { // Target Thread State - can be OR'd
Exemplo n.º 13
0
/*
 *	thread_switch:
 *
 *	Context switch.  User may supply thread hint.
 */
kern_return_t
thread_switch(
	struct thread_switch_args *args)
{
	thread_t			thread = THREAD_NULL;
	thread_t			self = current_thread();
	mach_port_name_t		thread_name = args->thread_name;
	int						option = args->option;
	mach_msg_timeout_t		option_time = args->option_time;
	uint32_t				scale_factor = NSEC_PER_MSEC;
	boolean_t				reenable_workq_callback = FALSE;
	boolean_t				depress_option = FALSE;
	boolean_t				wait_option = FALSE;

    /*
     *	Validate and process option.
     */
    switch (option) {

	case SWITCH_OPTION_NONE:
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_WAIT:
		wait_option = TRUE;
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_DEPRESS:
		depress_option = TRUE;
		workqueue_thread_yielded();
		break;
	case SWITCH_OPTION_DISPATCH_CONTENTION:
		scale_factor = NSEC_PER_USEC;
		wait_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	case SWITCH_OPTION_OSLOCK_DEPRESS:
		depress_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	case SWITCH_OPTION_OSLOCK_WAIT:
		wait_option = TRUE;
		if (thread_switch_disable_workqueue_sched_callback())
			reenable_workq_callback = TRUE;
		break;
	default:
	    return (KERN_INVALID_ARGUMENT);
    }

	/*
	 * Translate the port name if supplied.
	 */
	if (thread_name != MACH_PORT_NULL) {
		ipc_port_t port;

		if (ipc_port_translate_send(self->task->itk_space,
		                            thread_name, &port) == KERN_SUCCESS) {
			ip_reference(port);
			ip_unlock(port);

			thread = convert_port_to_thread(port);
			ip_release(port);

			if (thread == self) {
				thread_deallocate(thread);
				thread = THREAD_NULL;
			}
		}
	}

	if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) {
		if (thread != THREAD_NULL) {

			if (thread->task != self->task) {
				/*
				 * OSLock boosting only applies to other threads
				 * in your same task (even if you have a port for
				 * a thread in another task)
				 */

				thread_deallocate(thread);
				thread = THREAD_NULL;
			} else {
				/*
				 * Attempt to kick the lock owner up to our same IO throttling tier.
				 * If the thread is currently blocked in throttle_lowpri_io(),
				 * it will immediately break out.
				 *
				 * TODO: SFI break out?
				 */
				int new_policy = proc_get_effective_thread_policy(self, TASK_POLICY_IO);

				set_thread_iotier_override(thread, new_policy);
			}
		}
	}

	/*
	 * Try to handoff if supplied.
	 */
	if (thread != THREAD_NULL) {
		spl_t s = splsched();

		/* This may return a different thread if the target is pushing on something */
		thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread);

		KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE,
				      thread_tid(thread), thread->state,
				      pulled_thread ? TRUE : FALSE, 0, 0);

		if (pulled_thread != THREAD_NULL) {
			/* We can't be dropping the last ref here */
			thread_deallocate_safe(thread);

			if (wait_option)
				assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
				                    option_time, scale_factor);
			else if (depress_option)
				thread_depress_ms(option_time);

			self->saved.swtch.option = option;
			self->saved.swtch.reenable_workq_callback = reenable_workq_callback;

			thread_run(self, (thread_continue_t)thread_switch_continue, NULL, pulled_thread);
			/* NOTREACHED */
			panic("returned from thread_run!");
		}

		splx(s);

		thread_deallocate(thread);
	}

	if (wait_option)
		assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, scale_factor);
	else if (depress_option)
		thread_depress_ms(option_time);

	self->saved.swtch.option = option;
	self->saved.swtch.reenable_workq_callback = reenable_workq_callback;

	thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);

	if (depress_option)
		thread_depress_abort_internal(self);

	if (reenable_workq_callback)
		thread_switch_enable_workqueue_sched_callback();

    return (KERN_SUCCESS);
}
Exemplo n.º 14
0
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	int						options,
#define TH_OPTION_NONE		0x00
#define TH_OPTION_NOCRED	0x01
#define TH_OPTION_NOSUSP	0x02
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == THREAD_NULL)
		new_thread = first_thread = current_thread();
	else
		new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == THREAD_NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
	new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
	if (new_thread->uthread == NULL) {
		zfree(thread_zone, new_thread);
		return (KERN_RESOURCE_SHORTAGE);
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		void *ut = new_thread->uthread;

		new_thread->uthread = NULL;
		/* cred free may not be necessary */
		uthread_cleanup(parent_task, ut, parent_task->bsd_info);
		uthread_cred_free(ut);
		uthread_zone_free(ut);
#endif  /* MACH_BSD */

		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

    new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	lck_mtx_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active || parent_task->halting ||
			((options & TH_OPTION_NOSUSP) != 0 &&
			 	parent_task->suspend_count > 0)	||
			(parent_task->thread_count >= task_threadmax &&
				parent_task != kernel_task)		) {
		task_unlock(parent_task);
		lck_mtx_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	/* New threads inherit any default state on the task */
	machine_thread_inherit_taskwide(new_thread, parent_task);

	task_reference_internal(parent_task);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	/* Protected by the tasks_threads_lock */
	new_thread->thread_id = ++thread_unique_id;

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

#if CONFIG_COUNTERS
	/*
	 * If parent task has any reservations, they need to be propagated to this
	 * thread.
	 */
	new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? 
		THREAD_PMC_FLAG : 0U;
#endif

	/* Set the thread's scheduling parameters */
	if (parent_task != kernel_task)
		new_thread->sched_mode |= TH_MODE_TIMESHARE;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
	compute_priority(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
							(vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
							dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
Exemplo n.º 15
0
static void
callstack_sample( struct callstack *cs, 
                  struct kperf_context *context,
                  uint32_t is_user )
{
	kern_return_t kr;
	mach_msg_type_number_t nframes; /* WTF with the type? */
	uint32_t code;

	if( is_user )
		code = PERF_CS_USAMPLE;
	else
		code = PERF_CS_KSAMPLE;

	BUF_INFO1( code, (uintptr_t)thread_tid(context->cur_thread) );

	/* fill out known flags */
	cs->flags = 0;
	if( !is_user )
	{
		cs->flags |= CALLSTACK_KERNEL;
#ifdef __LP64__
		cs->flags |= CALLSTACK_64BIT;
#endif
	}
	else
	{
		/* FIXME: detect 32 vs 64-bit? */
	}

	/* collect the callstack */
	nframes = MAX_CALLSTACK_FRAMES;
	kr = chudxnu_thread_get_callstack64_kperf( context->cur_thread, 
						   cs->frames, 
						   &nframes,
						   is_user );

	/* check for overflow */
	if( kr == KERN_SUCCESS )
	{
		cs->flags |= CALLSTACK_VALID;
		cs->nframes = nframes;
	}
	else if( kr == KERN_RESOURCE_SHORTAGE )
	{
		/* FIXME: more here */
		cs->flags |= CALLSTACK_TRUNCATED;
		cs->flags |= CALLSTACK_VALID;
		cs->nframes = nframes;
	}
	else
	{
		BUF_INFO2(PERF_CS_ERROR, ERR_GETSTACK, kr);
		cs->nframes = 0;
	}

	if( cs->nframes > MAX_CALLSTACK_FRAMES )
	{
		/* necessary? */
		BUF_INFO1(PERF_CS_ERROR, ERR_FRAMES);
		cs->nframes = 0;
	}

}