Esempio n. 1
0
/*
 * Set up the initial state of a MACH thread
 * so that it will invoke cthread_body(child)
 * when it is resumed.
 */
void
cproc_setup(register cproc_t child, thread_t thread, void (*routine)(cproc_t))
{
	extern unsigned int __hurd_threadvar_max; /* GNU */
	register int *top = (int *)
	  cproc_stack_base (child,
			    sizeof(ur_cthread_t *) +
			    /* Account for GNU per-thread variables.  */
			    __hurd_threadvar_max *
			    sizeof (long int));
	struct i386_thread_state state;
	register struct i386_thread_state *ts = &state;
	kern_return_t r;
	unsigned int count;

	/*
	 * Set up i386 call frame and registers.
	 * Read registers first to get correct segment values.
	 */
	count = i386_THREAD_STATE_COUNT;
	MACH_CALL(thread_get_state(thread,i386_THREAD_STATE,(thread_state_t) &state,&count),r);

	ts->eip = (int) routine;
	*--top = (int) child;	/* argument to function */
	*--top = 0;		/* fake return address */
	ts->uesp = (int) top;	/* set stack pointer */
	ts->ebp = 0;		/* clear frame pointer */

	MACH_CALL(thread_set_state(thread,i386_THREAD_STATE,(thread_state_t) &state,i386_THREAD_STATE_COUNT),r);
}
Esempio n. 2
0
/*
 * Set up the initial state of a MACH thread
 */
void
_pthread_setup(pthread_t thread, 
	       void (*routine)(pthread_t), 
	       vm_address_t vsp)
{
	struct hp700_thread_state state;
	struct hp700_thread_state *ts = &state;
	kern_return_t r;
	mach_msg_type_number_t count;
	int *sp = (int *) vsp;;

	/*
	 * Set up pa-risc registers & function call.
	 */
	count = HP700_THREAD_STATE_COUNT;
	MACH_CALL(thread_get_state(thread->kernel_thread,
				   HP700_THREAD_STATE,
				   (thread_state_t) &state,
				   &count),
		  r);
	ts->iioq_head = (int) routine;
	ts->iioq_tail = (int) routine + 4;
	ts->arg0 = (int) thread;
	ts->sp = vsp;
	ts->dp = _dp();
	ts->rp = 0;
	ts->r3 = 0;
	sp[-1] = sp[-5] = 0;	/* Clear saved SP and RP in frame. */
	MACH_CALL(thread_set_state(thread->kernel_thread,
				   HP700_THREAD_STATE,
				   (thread_state_t) &state,
				   HP700_THREAD_STATE_COUNT),
		  r);
}
Esempio n. 3
0
/*
 * Signal a condition variable, waking up all threads waiting for it.
 */
int       
pthread_cond_broadcast(pthread_cond_t *cond)
{
	kern_return_t kern_res;
	if (cond->sig == _PTHREAD_COND_SIG_init)
	{
		int res;
		if (res = pthread_cond_init(cond, NULL))
			return (res);
	}
	if (cond->sig == _PTHREAD_COND_SIG)
	{ 
		LOCK(cond->lock);
		if (cond->waiters == 0)
		{ /* Avoid kernel call since there are no waiters... */
			UNLOCK(cond->lock);	
			return (ESUCCESS);
		}
		UNLOCK(cond->lock);
		MACH_CALL(semaphore_signal(cond->sem), kern_res);
		MACH_CALL(semaphore_signal_all(cond->sem), kern_res);
		if (kern_res == KERN_SUCCESS)
		{
			return (ESUCCESS);
		} else
		{
			return (EINVAL);
		}
	} else
		return (EINVAL); /* Not a condition variable */
}
Esempio n. 4
0
/*
 * Set up the initial state of a MACH thread
 */
void
_pthread_setup(pthread_t thread, 
	       void (*routine)(pthread_t), 
	       vm_address_t vsp)
{
	struct i386_thread_state state;
	struct i386_thread_state *ts = &state;
	kern_return_t r;
	unsigned int count;
	int *sp = (int *) vsp;

	/*
	 * Set up i386 registers & function call.
	 */
	count = i386_THREAD_STATE_COUNT;
	MACH_CALL(thread_get_state(thread->kernel_thread,
				   i386_THREAD_STATE,
				   (thread_state_t) &state,
				   &count),
		  r);
	ts->eip = (int) routine;
	*--sp = (int) thread;	/* argument to function */
	*--sp = 0;		/* fake return address */
	ts->uesp = (int) sp;	/* set stack pointer */
	ts->ebp = 0;		/* clear frame pointer */
	MACH_CALL(thread_set_state(thread->kernel_thread,
				   i386_THREAD_STATE,
				   (thread_state_t) &state,
				   i386_THREAD_STATE_COUNT),
		  r);
}
Esempio n. 5
0
static int
umb_setup(void)
{
  int i;
  size_t addr_start;
  int size, umb;

  for (i = 0; i < UMBS; i++) {
    umbs[i].in_use = FALSE;
  }

  memcheck_addtype('U', "Upper Memory Block (UMB, XMS 3.0)");

  addr_start = 0x00000;     /* start address */
  while ((size = memcheck_findhole(&addr_start, 1024, 0x100000)) != 0) {
    Debug0((dbg_fd, "findhole - from 0x%5.5zX, %dKb\n", addr_start, size/1024));
    memcheck_reserve('U', addr_start, size);

    if (addr_start == 0xa0000 && config.umb_a0 == 2) {
      // FreeDOS UMB bug, reserve 1 para
      const int rsv = 16;
      addr_start += rsv;
      size -= rsv;
    }
    umb = umb_find_unused();
    umbs[umb].in_use = TRUE;
    umbs[umb].free = TRUE;
    umbs[umb].addr = addr_start;
    umbs[umb].size = size;
  }

  for (i = 0; i < UMBS; i++) {
    if (umbs[i].in_use && umbs[i].free) {
      unsigned int addr = umbs[i].addr;
      vm_size_t size = umbs[i].size;
#if 0
      MACH_CALL((vm_deallocate(mach_task_self(),
			       addr,
			       (vm_size_t) size)), "vm_deallocate");
      MACH_CALL((vm_allocate(mach_task_self(), &addr,
			     (vm_size_t) size,
			     FALSE)),
		"vm_allocate of umb block.");
#else
      Debug0((dbg_fd, "umb_setup: addr %x size 0x%04zx\n",
	      addr, size));
#endif
    }
  }

  return 0;
}
Esempio n. 6
0
static void
rthreads_more_memory(int size, register free_list_t fl)
{
	register int amount;
	register int n;
	vm_address_t where;
	register header_t h;
	kern_return_t r;

	if (size <= vm_page_size) {
		amount = vm_page_size;
		n = vm_page_size / size;
		/*
		 * We lose vm_page_size - n*size bytes here.
		 */
	} else {
		amount = size;
		n = 1;
	}

	MACH_CALL(vm_allocate(mach_task_self(),
			      &where,
			      (vm_size_t) amount,
			      TRUE), r);

	/* We mustn't allocate at address 0, since programs will then see
	 * what appears to be a null pointer for valid data.
	 */

	if (r == KERN_SUCCESS && where == 0) {
		MACH_CALL(vm_allocate(mach_task_self(), &where,
				      (vm_size_t) amount, TRUE), r);

		if (r == KERN_SUCCESS) {
			MACH_CALL(vm_deallocate(mach_task_self(),
						(vm_address_t) 0,
						(vm_size_t) amount), r);
		}
	}

	h = (header_t) where;

	do {
		h->next = fl->head;
		fl->head = h;
		h = (header_t) ((char *) h + size);
	} while (--n != 0);
}
Esempio n. 7
0
reboot()
{
	if(!standalone)
		printf("reboot - This option is not available when you are running with the Unix server\n");
	else
		MACH_CALL(host_reboot, (privileged_host_port, 0));
}
Esempio n. 8
0
int
fprintf(FILE *fp, const char *fmt, ...)
{
	kern_return_t kern_res;
	va_list ap;
	if (!_init)
	{
		_init++;
		MACH_CALL(semaphore_create(mach_task_self(), 
					   &lock, 
					   SYNC_POLICY_FIFO,
					   1),
			  kern_res);
	}
	MACH_CALL(semaphore_wait(lock), kern_res);
	va_start(ap, fmt);
	vfprintf(fp, fmt, ap);
	va_end(ap);
	MACH_CALL(semaphore_signal(lock), kern_res);
}
Esempio n. 9
0
int
fprintf(FILE *fp, const char *fmt, ...)
{
	kern_return_t kern_res;
	va_list ap;
	if (!_init)
	{
		_init++;
		MACH_CALL(lock_set_create(mach_task_self(), 
					  &lock, 
					  1,
					  SYNC_POLICY_FIFO),
			  kern_res);
	}
	MACH_CALL(lock_acquire(lock, 0), kern_res);
	va_start(ap, fmt);
	vfprintf(fp, fmt, ap);
	va_end(ap);
	MACH_CALL(lock_release(lock, 0), kern_res);
}
Esempio n. 10
0
int
printf(const char *fmt, ...)
{
	kern_return_t kern_res;
	va_list ap;
	if (!_init)
	{
		_init++;
		MACH_CALL(semaphore_create(mach_task_self(), 
					   &lock, 
					   SYNC_POLICY_FIFO,
					   1),
			  kern_res);
	}
	MACH_CALL(semaphore_wait(lock), kern_res);
	va_start(ap, fmt);
	vprintf(fmt, ap);
	va_end(ap);
	MACH_CALL(semaphore_signal(lock), kern_res);
	if (kern_res == KERN_SUCCESS)
		return 0;
	else
		return -1;
}
Esempio n. 11
0
/*
 * Destroy a condition variable.
 */
int       
pthread_cond_destroy(pthread_cond_t *cond)
{
	kern_return_t kern_res;
	if (cond->sig == _PTHREAD_COND_SIG)
	{
		LOCK(cond->lock);
		if (cond->busy != (pthread_mutex_t *)NULL)
		{
			UNLOCK(cond->lock);
			return (EBUSY);
		} else
		{
			cond->sig = _PTHREAD_NO_SIG;
			MACH_CALL(semaphore_destroy(mach_task_self(),
						    cond->sem), kern_res);
			UNLOCK(cond->lock);
			return (ESUCCESS);
		}
	} else
		return (EINVAL); /* Not an initialized condition variable structure */
}
Esempio n. 12
0
/*
 * Initialize a condition variable.  Note: 'attr' is ignored.
 */
int       
pthread_cond_init(pthread_cond_t *cond,
		  const pthread_condattr_t *attr)
{
	kern_return_t kern_res;

	LOCK_INIT(cond->lock);
	cond->sig = _PTHREAD_COND_SIG;
	cond->next = (pthread_cond_t *)NULL;
	cond->prev = (pthread_cond_t *)NULL;
	cond->busy = (pthread_mutex_t *)NULL;
	cond->waiters = 0;
	MACH_CALL(semaphore_create(mach_task_self(), 
				   &cond->sem, 
				   SYNC_POLICY_FIFO, 
				   0), kern_res);
	if (kern_res != KERN_SUCCESS)
	{
		cond->sig = _PTHREAD_NO_SIG;  /* Not a valid condition variable */
		return (ENOMEM);
	} else
		return (ESUCCESS);
}
Esempio n. 13
0
/*
 * Suspend waiting for a condition variable.
 * Note: we have to keep a list of condition variables which are using
 * this same mutex variable so we can detect invalid 'destroy' sequences.
 */
static int       
_pthread_cond_wait(pthread_cond_t *cond, 
		   pthread_mutex_t *mutex,
		   const struct timespec *abstime)
{
	int res;
	kern_return_t kern_res;
	pthread_mutex_t *busy;
	tvalspec_t then;
	if (cond->sig == _PTHREAD_COND_SIG_init)
	{
		if (res = pthread_cond_init(cond, NULL))
			return (res);
	}
	if (cond->sig != _PTHREAD_COND_SIG)
		return (EINVAL); /* Not a condition variable */
	LOCK(cond->lock);
	busy = cond->busy;
	if ((busy != (pthread_mutex_t *)NULL) && (busy != mutex))
	{ /* Must always specify the same mutex! */
		UNLOCK(cond->lock);
		return (EINVAL);
	}
	cond->waiters++;
	if (cond->waiters == 1)
	{
		_pthread_cond_add(cond, mutex);
		cond->busy = mutex;
	}
	if ((res = pthread_mutex_unlock(mutex)) != ESUCCESS)
	{
		cond->waiters--;
		if (cond->waiters == 0)
		{
			_pthread_cond_remove(cond, mutex);
			cond->busy = (pthread_mutex_t *)NULL;
		}
		UNLOCK(cond->lock);
		return (res);
	}	
	UNLOCK(cond->lock);
	if (abstime)
	{
		struct timespec now;
		getclock(TIMEOFDAY, &now);
		/* Compute relative time to sleep */
		then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
	        then.tv_sec = abstime->tv_sec - now.tv_sec;
		if (then.tv_nsec < 0)
		{
			then.tv_nsec += 1000000000;  /* nsec/sec */
			then.tv_sec--;
		}
		if (((int)then.tv_sec < 0) ||
		    ((then.tv_sec == 0) && (then.tv_nsec == 0)))
		{
			kern_res = KERN_OPERATION_TIMED_OUT;
		} else
		{
			MACH_CALL(semaphore_timedwait(cond->sem, then),
				  kern_res);
		}
	} else
	{
		MACH_CALL(semaphore_wait(cond->sem), kern_res);
	}
	LOCK(cond->lock);
	cond->waiters--;
	if (cond->waiters == 0)
	{
		_pthread_cond_remove(cond, mutex);
		cond->busy = (pthread_mutex_t *)NULL;
	}
	UNLOCK(cond->lock);
	if ((res = pthread_mutex_lock(mutex)) != ESUCCESS)
	{
		return (res);
	}
	if (kern_res == KERN_SUCCESS)
	{
		return (ESUCCESS);
	} else
	if (kern_res == KERN_OPERATION_TIMED_OUT)
	{
		return (ETIMEDOUT);
	} else
	{
		return (EINVAL);
	}
}
Esempio n. 14
0
main()
{
	mach_port_t 	bootstrap_port;
	int	 	i;
	struct test_dir *td, **tds;
	int 		all;
	kern_return_t 	kr;
	boolean_t 	found;
	int 		argc = 0;
	char 		**argv;


	MACH_CALL(task_get_special_port, (mach_task_self(),
                                       TASK_BOOTSTRAP_PORT,
                                       &bootstrap_port));
	MACH_CALL(bootstrap_ports, (bootstrap_port,
					&privileged_host_port,
					&master_device_port,
					&root_ledger_wired,
					&root_ledger_paged,
					&security_port));

	MACH_FUNC(host_port, mach_host_self, ());

	standalone = is_standalone();

	threads_init();
	console_init();
	_printf_init();
	exception_init();

	printf("\n\n");
	version();
	kernel_version();
	if (standalone)
		printf("Standalone mode\n\n");

	vm_opt = 1;
	print_vm_stats();
	printf("\n");
	get_thread_sched_attr(mach_thread_self(),
			    (policy_state_t) 0,
			    TRUE);
	printf("\n");
	while(1) {
		mach_setjmp(&sa_jmp_buf);
		reset_options();
                /* synthetic benchmarks are not the default type */
                synthetic_fn = NULL;
		reset_more();
		if (!(argc = read_cmd(&argv, "mpts> ")))
			continue;
		for (i = 1; i < argc; i++)
			is_gen_opt(argc, argv, &i, 0, 0);
		if (!strcmp(argv[0],"on")) {
		  	shift_args(&argc, argv);
			if (remote_node(argc, argv)) {
				shift_args(&argc, argv);
			} else {
				interruptible_cmd(usage, 0, 0);
				continue;
			}
		} 
		if (!strcmp(argv[0],"more")) {
			shift_args(&argc, argv);
		} else
			disable_more();
		all = strcmp(argv[0],"*") ? 0 : 1;
		for (found = FALSE, tds = test_dirs; *tds && !found; tds++)
		    for (td = *tds; td->name && !found; td++)
			if ((all && td->is_a_test) 
			    || !strcmp(argv[0],td->name)) {
				if (td->is_a_test)
					printf("_______________________________________________________________________________\n");
			  	argv[0] = td->name;
				if (td->is_a_test)
					interruptible_cmd(td->func,
							  argc,
							  argv);
				else
					(*td->func) (argc, argv);
				if (!all)
					found = TRUE;
			}
		if ((!all) && (!found)) {
                    if (find_proc(argv[0]))
                        /* run synthetic benchmark if we have a proc name */
                        interruptible_cmd(synthetic,argc,argv);
                    else
			interruptible_cmd(usage, 0, 0);
		}
	}
	printf("done\n");
}
Esempio n. 15
0
kern_return_t
set_sched_attr(
	mach_port_t		task,
	mach_port_t		thread,
	struct policy_state 	*ps,
	boolean_t		print)
{
	processor_set_control_port_t 	pset;
	struct thread_basic_info 	thread_basic_info;
	mach_msg_type_number_t 		info_count;

	if ((task & thread) || ((!task) && (!thread)))
		test_error("set_sched_attr", "invalid arguments");


	MACH_FUNC(pset, get_processor_set, ());

	if (ps->policy == -1)		/* dont change it */
	  	return(KERN_SUCCESS);

	switch(ps->policy) {
	case POLICY_TIMESHARE:
		if (task) {
			MACH_CALL(task_set_policy,
				  (task,
				   pset,
				   ps->policy,
				   &ps->base.tr_base,
				   POLICY_TIMESHARE_BASE_COUNT,
				   &ps->limit.tr_limit,
				   POLICY_TIMESHARE_LIMIT_COUNT,
				   TRUE));
		} else {
			MACH_CALL(thread_set_policy,
				  (thread,
				   pset,
				   ps->policy,
				   &ps->base.tr_base,
				   POLICY_TIMESHARE_BASE_COUNT,
				   &ps->limit.tr_limit,
				   POLICY_TIMESHARE_LIMIT_COUNT));
		}
		break;
	case POLICY_RR:
		if (get_quantum(ps) == -1) {
			test_error("set_sched_attr", "invalid quantum value");
		}
		if (task) {
		  	MACH_CALL(task_set_policy,
				  (task,
				   pset,
				   ps->policy,
				   &ps->base.rr_base,
				   POLICY_RR_BASE_COUNT,
				   &ps->limit.rr_limit,
				   POLICY_RR_LIMIT_COUNT,
				   TRUE));
		} else {
		  	MACH_CALL(thread_set_policy,
				  (thread,
				   pset,
				   ps->policy,
				   &ps->base.rr_base,
				   POLICY_RR_BASE_COUNT,
				   &ps->limit.rr_limit,
				   POLICY_RR_LIMIT_COUNT));
		}
		break;
	case POLICY_FIFO:
		if (task) {
			MACH_CALL(task_set_policy,
				  (task,
				   pset,
				   ps->policy,
				   &ps->base.ff_base,
				   POLICY_FIFO_BASE_COUNT,
				   &ps->limit.ff_limit,
				   POLICY_FIFO_LIMIT_COUNT,
				   TRUE));
		} else {
			MACH_CALL(thread_set_policy,
				  (thread,
				   pset,
				   ps->policy,
				   &ps->base.ff_base,
				   POLICY_FIFO_BASE_COUNT,
				   &ps->limit.ff_limit,
				   POLICY_FIFO_LIMIT_COUNT));
		}
		break;
	}
	if (debug || print)
		print_policy(ps);
	return KERN_SUCCESS;
}
Esempio n. 16
0
kern_return_t
get_sched_attr(
	mach_port_t		task,
	mach_port_t		thread,
	struct policy_state 	*ps,
	boolean_t		print)
{
	struct thread_basic_info 	thread_basic_info;
	struct task_basic_info 		task_basic_info;
	mach_msg_type_number_t 		info_count;
	struct policy_state 		tmp_ps;

	if (!ps)
	  	ps = &tmp_ps;

	if ((task & thread) || ((!task) && (!thread)))
		test_error("get_sched_attr", "invalid arguments");

	if (task) {
		info_count = sizeof(struct task_basic_info);
		MACH_CALL(task_info, (task,
				      TASK_BASIC_INFO,
				      (task_info_t)&task_basic_info,
				      &info_count));
		ps->policy = task_basic_info.policy;
	} else {
		info_count = sizeof(struct thread_basic_info);
		MACH_CALL(thread_info, (thread,
					THREAD_BASIC_INFO,
					(thread_info_t)&thread_basic_info,
					&info_count));
		ps->policy = thread_basic_info.policy;
	}

	switch(ps->policy) {
	case POLICY_TIMESHARE: {
		if (task) {
			struct policy_timeshare_base time_share;
			info_count = POLICY_TIMESHARE_BASE_COUNT;

			MACH_CALL(task_info, (task,
					      TASK_SCHED_TIMESHARE_INFO,
					      (task_info_t)&time_share,
					      &info_count));
			ps->base.tr_base.base_priority =
			  			time_share.base_priority;
			ps->limit.tr_limit.max_priority = -1;
			  			
		} else {
			struct policy_timeshare_info time_share;

			info_count = POLICY_TIMESHARE_INFO_COUNT;
			MACH_CALL(thread_info, (thread,
						THREAD_SCHED_TIMESHARE_INFO,
						(thread_info_t)&time_share,
						&info_count));
			ps->base.tr_base.base_priority =
			  			time_share.base_priority;
			ps->limit.tr_limit.max_priority =
			  			time_share.max_priority;
		}
		break;
		}
	case POLICY_RR: {
		if (task) {
			struct policy_rr_base round_robin;

			info_count = POLICY_RR_BASE_COUNT;
			MACH_CALL(task_info, (task,
					      TASK_SCHED_RR_INFO,
					      (task_info_t)&round_robin,
					      &info_count));
			ps->base.rr_base.base_priority = 
			  			round_robin.base_priority;
			ps->base.rr_base.quantum = -1;
			ps->limit.rr_limit.max_priority = -1;
		} else {
			struct policy_rr_info round_robin;

			info_count = POLICY_RR_INFO_COUNT;
			MACH_CALL(thread_info, (thread,
						THREAD_SCHED_RR_INFO,
						(thread_info_t)&round_robin,
						&info_count));
			ps->base.rr_base.base_priority = 
			  			round_robin.base_priority;
			ps->base.rr_base.quantum = round_robin.quantum;
			ps->limit.rr_limit.max_priority =
						round_robin.max_priority;
		}
		break;
		}
	case POLICY_FIFO: {
		if (task) {
			struct policy_fifo_base fifo;

			info_count = POLICY_FIFO_BASE_COUNT;
			MACH_CALL(task_info, (task,
					      TASK_SCHED_FIFO_INFO,
					      (task_info_t)&fifo,
					      &info_count));
			ps->base.ff_base.base_priority = fifo.base_priority;
			ps->limit.ff_limit.max_priority = -1;
		} else {
			struct policy_fifo_info fifo;

			info_count = POLICY_FIFO_INFO_COUNT;
			MACH_CALL(thread_info, (thread,
						THREAD_SCHED_FIFO_INFO,
						(thread_info_t)&fifo,
						&info_count));
			ps->base.ff_base.base_priority = fifo.base_priority;
			ps->limit.ff_limit.max_priority = fifo.max_priority;
		}
		break;
		}
	default:
	  	printf("get_sched_attr: unknown policy 0x%x: ", ps->policy);
	}
	if (debug || print)
		print_policy(ps);
	return(KERN_SUCCESS);
}
Esempio n. 17
0
/*
 * Set up the initial state of a MACH thread so that it will invoke
 * routine(child) when it is resumed.
 */
void
rthread_setup(rthread_t child, thread_port_t thread, rthread_fn_t routine)
{
	struct hp700_thread_state state;
	kern_return_t r;
	unsigned size;

	/*
	 * Set up hp700 call frame and registers.
	 */
	size = HP700_THREAD_STATE_COUNT;
	MACH_CALL(thread_get_state(thread, HP700_THREAD_STATE, 
				   (thread_state_t) &state, &size), r);

	/*
	 * set the PC queue to point to routine.
	 */
	state.iioq_head = (unsigned)routine;
	state.iioq_tail = (unsigned)routine + 4;

	/*
	 * setup the first argument to routine to be the address of child.
	 */
	state.arg0 = (unsigned) child;

	state.dp = get_dp();

	/*
	 * establish a user stack for this thread. There is no guarantee
	 * that the stack is aligned correctly. We have to align it to
	 * a double word boundary.  (In fact, if RTHREAD_STACK_OFFSET
	 * is aligned, then so is the stack.  But little things can go
	 * wrong.)
	 */
	state.sp = rthread_stack_base(child, RTHREAD_STACK_OFFSET)
			+ 7 & 0xfffffff8;

	/*
	 * clear off the first 48 bytes of the stack, this is going to be 
	 * our fake stack marker, we don't want any routine to think there
	 * is a stack frame below us.
	 */
	bzero((char*)state.sp, 48);

	/*
	 * now set the stack pointer 48 bytes deeper, 32 for a frame marker 
	 * and 16 for the standard 4 arguments.
	 */
	state.sp += 48;

	/*
	 * Now we have to initialize the floating point status registers
	 * since they will be in a random state and could cause a trap.
	state.fr0 = 0.0;
	state.fr1 = 0.0;
	state.fr2 = 0.0;
	state.fr3 = 0.0;
	 */

	/*
	 * the psw is fine it is set up as CQPDI by the kernel. Set the
	 * registers into the saved state of the thread.
	 */
	MACH_CALL(thread_set_state(thread, HP700_THREAD_STATE,
				   (thread_state_t) &state,
				   HP700_THREAD_STATE_COUNT),r);
}