Ejemplo n.º 1
0
error_t
setup_thread_target (void)
{
  error_t err;
  static task_t task;
  static thread_t thread;

  if (MACH_PORT_VALID (thread))
    {
      thread_terminate (thread);
      mach_port_deallocate (mach_task_self (), thread);
    }

  if (MACH_PORT_VALID (task))
    {
      task_terminate (task);
      mach_port_deallocate (mach_task_self (), task);
    }

  err = task_create (mach_task_self (), 0, &task);
  if (err)
    return err;

  err = thread_create (task, &thread);
  if (err)
    return err;

  return setup (thread, MACH_MSG_TYPE_COPY_SEND);
}
Ejemplo n.º 2
0
void
_exit(int status)
{

	for (;;)
		task_terminate(task_self());
}
Ejemplo n.º 3
0
int task_exit(void)
{
  FAR struct tcb_s *dtcb = (FAR struct tcb_s*)g_readytorun.head;
  FAR struct tcb_s *rtcb;
  int ret;

  /* Remove the TCB of the current task from the ready-to-run list.  A context
   * switch will definitely be necessary -- that must be done by the
   * architecture-specific logic.
   *
   * sched_removereadytorun will mark the task at the head of the ready-to-run
   * with state == TSTATE_TASK_RUNNING
   */

  (void)sched_removereadytorun(dtcb);
  rtcb = (FAR struct tcb_s*)g_readytorun.head;

  /* We are now in a bad state -- the head of the ready to run task list
   * does not correspond to the thread that is running.  Disabling pre-
   * emption on this TCB and marking the new ready-to-run task as not
   * running (see, for example, get_errno_ptr()).
   *
   * We disable pre-emption here by directly incrementing the lockcount
   * (vs. calling sched_lock()).
   */

  rtcb->lockcount++;
  rtcb->task_state = TSTATE_TASK_READYTORUN;

  /* Move the TCB to the specified blocked task list and delete it.  Calling
   * task_terminate with non-blocking true will suppress atexit() and on-exit()
   * calls and will cause buffered I/O to fail to be flushed.  The former
   * is required _exit() behavior; the latter is optional _exit() behavior.
   */

  sched_addblocked(dtcb, TSTATE_TASK_INACTIVE);
  ret = task_terminate(dtcb->pid, true);
  rtcb->task_state = TSTATE_TASK_RUNNING;

  /* If there are any pending tasks, then add them to the ready-to-run
   * task list now
   */

  if (g_pendingtasks.head)
    {
      (void)sched_mergepending();
    }

  /* We can't use sched_unlock() to decrement the lock count because the
   * sched_mergepending() call above might have changed the task at the
   * head of the ready-to-run list.  Furthermore, we should not need to
   * perform the unlock action anyway because we know that the pending
   * task list is empty.  So all we really need to do is to decrement
   * the lockcount on rctb.
   */

  rtcb->lockcount--;
  return ret;
}
Ejemplo n.º 4
0
int
mach_init_doit(bool forkchild)
{
	/*
	 *	Get the important ports into the cached values,
	 *	as required by "mach_init.h".
	 */
	mach_task_self_ = task_self_trap();
	
	/*
	 *	Initialize the single mig reply port
	 */

	_pthread_set_self(0);
	_mig_init(0);

#if WE_REALLY_NEED_THIS_GDB_HACK
	/*
	 * Check to see if GDB wants us to stop
	 */
	{
	task_user_data_data_t	user_data;
	mach_msg_type_number_t	user_data_count = TASK_USER_DATA_COUNT;
	  
	user_data.user_data = 0;
	(void)task_info(mach_task_self_, TASK_USER_DATA,
		(task_info_t)&user_data, &user_data_count);
#define MACH_GDB_RUN_MAGIC_NUMBER 1
#ifdef	MACH_GDB_RUN_MAGIC_NUMBER	
	/* This magic number is set in mach-aware gdb 
	 *  for RUN command to allow us to suspend user's
	 *  executable (linked with this libmach!) 
	 *  with the code below.
	 * This hack should disappear when gdb improves.
	 */
	if ((int)user_data.user_data == MACH_GDB_RUN_MAGIC_NUMBER) {
	    kern_return_t ret;
	    user_data.user_data = 0;
	    
	    ret = task_suspend(mach_task_self_);
	    if (ret != KERN_SUCCESS) {
			while (1) {
				(void)task_terminate(mach_task_self_);
			}
	    }
	}
#undef MACH_GDB_RUN_MAGIC_NUMBER  
#endif /* MACH_GDB_RUN_MAGIC_NUMBER */
	}
#endif /* WE_REALLY_NEED_THIS_GDB_HACK */

	return 0;
}
Ejemplo n.º 5
0
error_t
setup_task_target (void)
{
  error_t err;
  static task_t task;
  static mach_msg_type_name_t taskType = MACH_MSG_TYPE_COPY_SEND;

  if (MACH_PORT_VALID (task))
    {
      task_terminate (task);
      mach_port_deallocate (mach_task_self (), task);
    }

  err = task_create (mach_task_self (), 0, &task);
  if (err)
    return err;

  return setup (task, taskType);
}
Ejemplo n.º 6
0
int task_delete(pid_t pid)
{
  FAR struct tcb_s *rtcb;

  /* Check if the task to delete is the calling task */

  rtcb = (FAR struct tcb_s*)g_readytorun.head;
  if (pid == 0 || pid == rtcb->pid)
    {
      /* If it is, then what we really wanted to do was exit. Note that we
       * don't bother to unlock the TCB since it will be going away.
       */

      exit(EXIT_SUCCESS);
    }

  /* Then let task_terminate do the heavy lifting */

  return task_terminate(pid, false);
}
Ejemplo n.º 7
0
void
exception_no_server(void)
{
	ipc_thread_t self = current_thread();

	/*
	 *	If this thread is being terminated, cooperate.
	 */

	while (thread_should_halt(self))
		thread_halt_self(thread_exception_return);


#if 0
	if (thread_suspend (self) == KERN_SUCCESS)
	  thread_exception_return ();
#endif

#if	MACH_KDB
	if (debug_user_with_kdb) {
		/*
		 *	Debug the exception with kdb.
		 *	If kdb handles the exception,
		 *	then thread_kdb_return won't return.
		 */

		db_printf("No exception server, calling kdb...\n");
		thread_kdb_return();
	}
#endif	/* MACH_KDB */

	/*
	 *	All else failed; terminate task.
	 */

	(void) task_terminate(self->task);
	thread_halt_self(thread_exception_return);
	panic("terminating the task didn't kill us");
	/*NOTREACHED*/
}
Ejemplo n.º 8
0
/*
 * Panic system call.
 *
 * If kernel is built with debug option, sys_panic() displays
 * a panic message and stops the enture system. Otherwise, it
 * terminates the task which called sys_panic().
 */
int
sys_panic(const char *str)
{
#ifdef DEBUG
    task_t self = cur_task();

    irq_lock();
    printf("\nUser mode panic: task:%s thread:%x\n",
           self->name != NULL ? self->name : "no name", cur_thread);

    sys_log(str);
    printf("\n");

    sched_lock();
    irq_unlock();

    for (;;);
#else
    task_terminate(cur_task());
#endif
    /* NOTREACHED */
    return 0;
}
Ejemplo n.º 9
0
Archivo: rztask.c Proyecto: bronson/rzh
static void parse_typing(const char *buf, int len, void *refcon)
{
    int i;
    task_spec *spec = (task_spec*)refcon;

    for(i=0; i<len; i++) {
        switch(buf[i]) {
        case 3:		// ^C
        case 24: 	// ^X
        case 27:	// ESC
        case 'q':
        case 'Q':
            log_info("TYPING: Cancel!");
            task_terminate(spec->master);
            break;

        default:
            fprintf(stderr, "KEY: len=%d <<%.*s>>\r\n", len, len, buf);
            ;
        }
    }

    log_dbg("TYPING (%d chars): %.*s", len, len, buf);
}
Ejemplo n.º 10
0
int task_restart(pid_t pid)
{
  FAR struct tcb_s *rtcb;
  FAR struct task_tcb_s *tcb;
  FAR dq_queue_t *tasklist;
  irqstate_t flags;
  int errcode;
#ifdef CONFIG_SMP
  int cpu;
#endif
  int ret;

  /* Check if the task to restart is the calling task */

  rtcb = this_task();
  if ((pid == 0) || (pid == rtcb->pid))
    {
      /* Not implemented */

      errcode = ENOSYS;
      goto errout;
    }

  /* We are restarting some other task than ourselves.  Make sure that the
   * task does not change its state while we are executing.  In the single
   * CPU state this could be done by disabling pre-emption.  But we will
   * a little stronger medicine on the SMP case:  The task make be running
   * on another CPU.
   */

  flags = enter_critical_section();

  /* Find for the TCB associated with matching pid  */

  tcb = (FAR struct task_tcb_s *)sched_gettcb(pid);
#ifndef CONFIG_DISABLE_PTHREAD
  if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
#else
  if (!tcb)
#endif
    {
      /* There is no TCB with this pid or, if there is, it is not a task. */

      errcode = ESRCH;
      goto errout_with_lock;
    }

#ifdef CONFIG_SMP
  /* If the task is running on another CPU, then pause that CPU.  We can
   * then manipulate the TCB of the restarted task and when we resume the
   * that CPU, the restart take effect.
   */

  cpu = sched_cpu_pause(&tcb->cmn);
#endif /* CONFIG_SMP */

  /* Try to recover from any bad states */

  task_recover((FAR struct tcb_s *)tcb);

  /* Kill any children of this thread */

#ifdef HAVE_GROUP_MEMBERS
  (void)group_killchildren(tcb);
#endif

  /* Remove the TCB from whatever list it is in.  After this point, the TCB
   * should no longer be accessible to the system
   */

#ifdef CONFIG_SMP
  tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu);
#else
  tasklist = TLIST_HEAD(tcb->cmn.task_state);
#endif

  dq_rem((FAR dq_entry_t *)tcb, tasklist);
  tcb->cmn.task_state = TSTATE_TASK_INVALID;

  /* Deallocate anything left in the TCB's queues */

  sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */

  /* Reset the current task priority  */

  tcb->cmn.sched_priority = tcb->cmn.init_priority;

  /* The task should restart with pre-emption disabled and not in a critical
   * secton.
   */

  tcb->cmn.lockcount = 0;
#ifdef CONFIG_SMP
  tcb->cmn.irqcount  = 0;
#endif

  /* Reset the base task priority and the number of pending reprioritizations */

#ifdef CONFIG_PRIORITY_INHERITANCE
  tcb->cmn.base_priority = tcb->cmn.init_priority;
#  if CONFIG_SEM_NNESTPRIO > 0
  tcb->cmn.npend_reprio = 0;
#  endif
#endif

  /* Re-initialize the processor-specific portion of the TCB.  This will
   * reset the entry point and the start-up parameters
   */

  up_initial_state((FAR struct tcb_s *)tcb);

  /* Add the task to the inactive task list */

  dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks);
  tcb->cmn.task_state = TSTATE_TASK_INACTIVE;

#ifdef CONFIG_SMP
  /* Resume the paused CPU (if any) */

  if (cpu >= 0)
    {
      ret = up_cpu_resume(cpu);
      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_lock;
        }
    }
#endif /* CONFIG_SMP */

  leave_critical_section(flags);

  /* Activate the task. */

  ret = task_activate((FAR struct tcb_s *)tcb);
  if (ret != OK)
    {
      (void)task_terminate(pid, true);
      errcode = -ret;
      goto errout_with_lock;
    }

  return OK;

errout_with_lock:
  leave_critical_section(flags);
errout:
  set_errno(errcode);
  return ERROR;
}
Ejemplo n.º 11
0
/*
 *	Routine:	exception
 *	Purpose:
 *		The current thread caught an exception.
 *		We make an up-call to the thread's exception server.
 *	Conditions:
 *		Nothing locked and no resources held.
 *		Called from an exception context, so
 *		thread_exception_return and thread_kdb_return
 *		are possible.
 *	Returns:
 *		Doesn't return.
 */
void
exception_triage(
	exception_type_t	exception,
	mach_exception_data_t	code,
	mach_msg_type_number_t  codeCnt)
{
	thread_t		thread;
	task_t			task;
	host_priv_t		host_priv;
	struct exception_action *excp;
	lck_mtx_t			*mutex;
	kern_return_t		kr;

	assert(exception != EXC_RPC_ALERT);

	if (exception == KERN_SUCCESS)
		panic("exception");

	/*
	 * Try to raise the exception at the activation level.
	 */
	thread = current_thread();
	mutex = &thread->mutex;
	excp = &thread->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * Maybe the task level will handle it.
	 */
	task = current_task();
	mutex = &task->lock;
	excp = &task->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * How about at the host level?
	 */
	host_priv = host_priv_self();
	mutex = &host_priv->lock;
	excp = &host_priv->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * Nobody handled it, terminate the task.
	 */

#if	MACH_KDB
	if (debug_user_with_kdb) {
		/*
		 *	Debug the exception with kdb.
		 *	If kdb handles the exception,
		 *	then thread_kdb_return won't return.
		 */
		db_printf("No exception server, calling kdb...\n");
		thread_kdb_return();
	}
#endif	/* MACH_KDB */

	(void) task_terminate(task);

out:
	if (exception != EXC_CRASH)
		thread_exception_return();
	return;
}
Ejemplo n.º 12
0
kern_return_t
norma_task_common(
	task_t		parent_task,
	boolean_t	inherit_memory,
	boolean_t	clone,
	boolean_t	kill_parent,
	int		child_node,
	task_t		*child_task)
{
	ipc_port_t remote_task, remote_host;
	task_t new_task;
	kern_return_t kr;
	int vector_start;
	unsigned int entry_vector_count;
	ipc_port_t bootstrap;
	emulation_vector_t entry_vector;
	ipc_port_t registered[TASK_PORT_REGISTER_MAX];
	exception_mask_t exc_masks[EXC_TYPES_COUNT];
	ipc_port_t exc_ports[EXC_TYPES_COUNT];
	exception_behavior_t exc_behaviors[EXC_TYPES_COUNT];
	thread_state_flavor_t exc_flavors[EXC_TYPES_COUNT];
	unsigned count, exc_count;

#if	DIPC
	if (!dipc_node_is_valid (child_node)) {
		return KERN_INVALID_ARGUMENT;
	}

	if (child_node == dipc_node_self()) {
		if (clone) {
			/*
			 * Cloning: easy if kill_parent;
			 * (currently) impossible otherwise.
			 */
			if (kill_parent) {
				/*
				 * Just return the parent -- nothing says that
				 * the clone has to be a different task.
				 */
				*child_task = parent_task;
				return KERN_SUCCESS;
			} else {
				/*
				 * XXX
				 * There is no local call we can use with the
				 * same memory semantics -- we'd have to
				 * modify task_create_local, and vm_map_fork.
				 * Not hard, just probably unnecessary except
				 * for orthogonality.
				 */
				return KERN_INVALID_ARGUMENT;
			}
		} else {
			/*
			 * Not cloning: just use task_create_local,
			 * and task_terminate if appropriate.
			 */
			kr = task_create_local(parent_task, inherit_memory,
					       FALSE, child_task);
			if (kr != KERN_SUCCESS) {
				return kr;
			}
			if (kill_parent) {
				kr = task_terminate(parent_task);
				if (kr != KERN_SUCCESS) {
					/* cleanup */
					(void) task_terminate(*child_task);
					return kr;
				}
			}
			return KERN_SUCCESS;
		}
	}

	kr = task_get_emulation_vector(parent_task, &vector_start,
				       &entry_vector, &entry_vector_count);
	if (kr != KERN_SUCCESS) {
		printf("task_get_emulation_vector failed: kr %d %x\n", kr, kr);
		return kr;
	}

	kr = task_get_inherited_ports(parent_task, &bootstrap, registered,
				      &count, exc_masks, &exc_count,
				      exc_ports, exc_behaviors, exc_flavors);
	if (kr != KERN_SUCCESS) {
		printf("task_get_inherited_ports failed: kr %d %x\n", kr, kr);
		return kr;
	}

	remote_host = dipc_host_priv_port(child_node);
	if (remote_host == IP_NULL) {
		panic("norma_task_create:  no priv port for node %d\n",
		      child_node);
		return KERN_INVALID_ARGUMENT;
	}

	kr = r_norma_task_allocate(remote_host, vector_start, entry_vector,
				   entry_vector_count, bootstrap,
				   registered, count, exc_masks, exc_count,
				   exc_ports, exc_behaviors, exc_flavors,
				   &remote_task);
	if (kr != KERN_SUCCESS) {
		return kr;
	}

	if (inherit_memory) {
		task_copy_vm(remote_host, parent_task->map, clone,
			     kill_parent, remote_task);
	}

	if (kill_parent) {
		(void) task_terminate(parent_task);
	}

	/*
	 * Create a placeholder task for the benefit of convert_task_to_port.
	 * Set new_task->map to VM_MAP_NULL so that task_deallocate will
	 * know that this is only a placeholder task.
	 */
	new_task = (task_t) zalloc(task_zone);
	if (new_task == TASK_NULL) {
		panic("task_create: no memory for task structure");
	}

#if     MCMSG
	new_task->mcmsg_task = 0;
#endif  /* MCMSG */

	/* only one ref, for our caller */
	new_task->ref_count = 1;

	new_task->map = VM_MAP_NULL;
	new_task->itk_self = remote_task;
	mutex_init(&new_task->lock, ETAP_NORMA_TASK);
	itk_lock_init(new_task);

	*child_task = new_task;
	return(KERN_SUCCESS);
#else	/* DIPC */
	printf("norma_task_common:  no underlying transport!\n");
	return KERN_INVALID_ARGUMENT;
#endif	/* DIPC */
}
Ejemplo n.º 13
0
void
boot_script_free_task (task_t task, int aborting)
{
  if (aborting)
    task_terminate (task);
}
Ejemplo n.º 14
0
int generic_callback_9(exc_msg_t *info_struct){
    printf("OMG A CRASH CALLBACK\n");
    getchar();
    task_terminate(info_struct->task);
    exit(-1);
}
Ejemplo n.º 15
0
int pthread_cancel(pthread_t thread)
{
  FAR struct pthread_tcb_s *tcb;

  /* First, make sure that the handle references a valid thread */

  if (thread == 0)
    {
      /* pid == 0 is the IDLE task.  Callers cannot cancel the
       * IDLE task.
       */

      return ESRCH;
    }

  tcb = (FAR struct pthread_tcb_s *)sched_gettcb((pid_t)thread);
  if (tcb == NULL)
    {
      /* The pid does not correspond to any known thread.  The thread
       * has probably already exited.
       */

      return ESRCH;
    }

  /* Only pthreads should use this interface */

  DEBUGASSERT((tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD);

  /* Check to see if this thread has the non-cancelable bit set in its
   * flags. Suppress context changes for a bit so that the flags are stable.
   * (the flags should not change in interrupt handling).
   */

  sched_lock();
  if ((tcb->cmn.flags & TCB_FLAG_NONCANCELABLE) != 0)
    {
      /* Then we cannot cancel the thread now.  Here is how this is
       * supposed to work:
       *
       * "When cancelability is disabled, all cancels are held pending
       *  in the target thread until the thread changes the cancelability.
       *  When cancelability is deferred, all cancels are held pending in
       *  the target thread until the thread changes the cancelability, calls
       *  a function which is a cancellation point or calls pthread_testcancel(),
       *  thus creating a cancellation point. When cancelability is asynchronous,
       *  all cancels are acted upon immediately, interrupting the thread with its
       *  processing."
       */

      tcb->cmn.flags |= TCB_FLAG_CANCEL_PENDING;
      sched_unlock();
      return OK;
    }

#ifdef CONFIG_CANCELLATION_POINTS
  /* Check if this thread supports deferred cancellation */

  if ((tcb->cmn.flags & TCB_FLAG_CANCEL_DEFERRED) != 0)
    {
      /* Then we cannot cancel the thread asynchronously.  Mark the cancellation
       * as pending.
       */

      tcb->cmn.flags |= TCB_FLAG_CANCEL_PENDING;

      /* If the thread is waiting at a cancellation point, then notify of the
       * cancellation thereby waking the task up with an ECANCELED error.
       *
       * REVISIT: is locking the scheduler sufficent in SMP mode?
       */

      if (tcb->cmn.cpcount > 0)
        {
          notify_cancellation(&tcb->cmn);
        }

      sched_unlock();
      return OK;
    }
#endif

  /* Otherwise, perform the asyncrhonous cancellation */

  sched_unlock();

  /* Check to see if the ID refers to ourselves.. this would be the
   * same as pthread_exit(PTHREAD_CANCELED).
   */

  if (tcb == (FAR struct pthread_tcb_s *)this_task())
    {
      pthread_exit(PTHREAD_CANCELED);
    }

#ifdef CONFIG_PTHREAD_CLEANUP
  /* Perform any stack pthread clean-up callbacks.
   *
   * REVISIT: In this case, the clean-up callback will execute on the
   * thread of the caller of pthread cancel, not on the thread of
   * the thread-to-be-canceled.  Is that an issue?  Presumably they
   * are both within the same group and within the same process address
   * space.
   */

  pthread_cleanup_popall(tcb);
#endif

  /* Complete pending join operations */

  (void)pthread_completejoin((pid_t)thread, PTHREAD_CANCELED);

  /* Then let task_terminate do the real work */

  return task_terminate((pid_t)thread, false);
}