示例#1
0
void
exception_no_server(void)
{
	ipc_thread_t self = current_thread();

	/*
	 *	If this thread is being terminated, cooperate.
	 */

	while (thread_should_halt(self))
		thread_halt_self(thread_exception_return);


#if 0
	if (thread_suspend (self) == KERN_SUCCESS)
	  thread_exception_return ();
#endif

#if	MACH_KDB
	if (debug_user_with_kdb) {
		/*
		 *	Debug the exception with kdb.
		 *	If kdb handles the exception,
		 *	then thread_kdb_return won't return.
		 */

		db_printf("No exception server, calling kdb...\n");
		thread_kdb_return();
	}
#endif	/* MACH_KDB */

	/*
	 *	All else failed; terminate task.
	 */

	(void) task_terminate(self->task);
	thread_halt_self(thread_exception_return);
	panic("terminating the task didn't kill us");
	/*NOTREACHED*/
}
示例#2
0
文件: ppp_comp.c 项目: AllardJ/Tomato
/* thread for calling back to a compressor's memory allocator
 * Needed for Digital UNIX since it's VM can't handle requests
 * for large amounts of memory without blocking.  The thread
 * provides a context in which we can call a memory allocator
 * that may block.
 */
static void
ppp_comp_alloc(comp_state_t *cp)
{
    int len, cmd;
    unsigned char *compressor_options;
    thread_t thread;
    void *(*comp_allocator)();


#if defined(MAJOR_VERSION) && (MAJOR_VERSION <= 2)

    /* In 2.x and earlier the argument gets passed
     * in the thread structure itself.  Yuck.
     */
    thread = current_thread();
    cp = thread->reply_port;
    thread->reply_port = PORT_NULL;

#endif

    for (;;) {
	assert_wait((vm_offset_t)&cp->memreq.thread_status, TRUE);
	thread_block();

	if (thread_should_halt(current_thread()))
	    thread_halt_self();
	cmd = cp->memreq.cmd;
	compressor_options = &cp->memreq.comp_opts[0];
	len = compressor_options[1];
	if (cmd == PPPIO_XCOMP) {
	    cp->memreq.returned_mem = cp->xcomp->comp_alloc(compressor_options, len);
	    if (!cp->memreq.returned_mem) {
		cp->memreq.thread_status = ENOSR;
	    } else {
		cp->memreq.thread_status = 0;
	    }
	} else {
	    cp->memreq.returned_mem = cp->rcomp->decomp_alloc(compressor_options, len);
	    if (!cp->memreq.returned_mem) {
	        cp->memreq.thread_status = ENOSR;
	    } else {
		cp->memreq.thread_status = 0;
	    }
	}
    }
}
示例#3
0
文件: ast.c 项目: quan-na/gnumach
void
ast_taken(void)
{
	thread_t self = current_thread();
	ast_t reasons;

	/*
	 *	Interrupts are still disabled.
	 *	We must clear need_ast and then enable interrupts.
	 */

	reasons = need_ast[cpu_number()];
	need_ast[cpu_number()] = AST_ZILCH;
	(void) spl0();

	/*
	 *	These actions must not block.
	 */

	if (reasons & AST_NETWORK)
		net_ast();

	/*
	 *	Make darn sure that we don't call thread_halt_self
	 *	or thread_block from the idle thread.
	 */

	if (self != current_processor()->idle_thread) {
#ifndef MIGRATING_THREADS
		while (thread_should_halt(self))
			thread_halt_self();
#endif

		/*
		 *	One of the previous actions might well have
		 *	woken a high-priority thread, so we use
		 *	csw_needed in addition to AST_BLOCK.
		 */

		if ((reasons & AST_BLOCK) ||
		    csw_needed(self, current_processor())) {
			counter(c_ast_taken_block++);
			thread_block(thread_exception_return);
		}
	}
}
示例#4
0
void
profile_thread(void)
{
    spl_t	    s;
    buffer_t	    buf_entry;
    queue_entry_t   prof_queue_entry;
    prof_data_t	    pbuf;
    kern_return_t   kr;
    int		    j;

    thread_swappable(current_act(), FALSE);

    /* Initialise the queue header for the prof_queue */
    mpqueue_init(&prof_queue);

    while (TRUE) {

	/* Dequeue the first buffer. */
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);

	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) { 
	    assert_wait((event_t) profile_thread, FALSE);
	    thread_block((void (*)(void)) 0);
	    if (current_thread()->wait_result != THREAD_AWAKENED)
		break;
	} else 
#if DCI
	{
	    register int    sum_samples = 0;
	    int		    i;

	    pbuf = buf_entry->p_prof;
/*
 * sum all the points from all the cpus on the machine.
*/
	    for(i=0;i < NCPUS; i++)
		sum_samples += buf_entry->p_index[i];

	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)sum_samples);
	    if (kr != KERN_SUCCESS)
	    {
		task_suspend(pbuf->task); /* suspend task */
		kr = send_notices(pbuf->prof_port, (void *)buf_entry->p_zone,
				  (mach_msg_type_number_t)sum_samples,
				  MACH_SEND_ALWAYS);
	    }
	    bzero((char *)buf_entry->p_zone, NCPUS*SIZE_PROF_BUFFER);
#else
	{
	    int		    dropped;

	    pbuf = buf_entry->p_prof;
	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)buf_entry->p_index);
	    profile_sample_count += buf_entry->p_index;
	    if (kr != KERN_SUCCESS)
	      printf("send_samples(%x, %x, %d) error %x\n",
			pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr); 
	    dropped = buf_entry->p_dropped;
	    if (dropped > 0) {
		printf("kernel: profile dropped %d sample%s\n", dropped,
		       dropped == 1 ? "" : "s");
		buf_entry->p_dropped = 0;
	    }

#endif /* DCI */
	    /* Indicate you've finished the dirty job */
#if DCI
	    {
		int i;
		for(i=0;i<NCPUS;i++)
		    buf_entry->p_full[i] = FALSE;
	    }
#else
	    buf_entry->p_full = FALSE;
#endif /* DCI */
	    if (buf_entry->p_wakeme)
	      thread_wakeup((event_t) &buf_entry->p_wakeme);
	}

    }
    /* The profile thread has been signalled to exit.  Any threads waiting
       for the last buffer of samples to be acknowledged should be woken
       up now.  */
    profile_thread_id = THREAD_NULL;
    while (1) {
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);
	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
	    break;
	if (buf_entry->p_wakeme)
	    thread_wakeup((event_t) &buf_entry->p_wakeme);
    }
#if 0	/* XXXXX */
    thread_halt_self();
#else
	panic("profile_thread(): halt_self");
#endif	/* XXXXX */
}

/*
 *****************************************************************************
 * send_last_sample is the drain mechanism to allow partial profiled buffers
 * to be sent to the receive_prof thread in the server.
 *****************************************************************************
*/

void
send_last_sample_buf(prof_data_t pbuf)
{
    spl_t    s;
    buffer_t buf_entry;

    if (pbuf == NULLPROFDATA)
	return;

    /* Ask for the sending of the last PC buffer.
     * Make a request to the profile_thread by inserting
     * the buffer in the send queue, and wake it up. 
     * The last buffer must be inserted at the head of the
     * send queue, so the profile_thread handles it immediatly. 
     */ 
    buf_entry = pbuf->prof_area + pbuf->prof_index;
    buf_entry->p_prof = pbuf;

    /* 
       Watch out in case profile thread exits while we are about to
       queue data for it.
     */
    s = splsched();
    if (profile_thread_id == THREAD_NULL)
	splx(s);
    else {
	buf_entry->p_wakeme = 1;
	mpenqueue_tail(&prof_queue, &buf_entry->p_list);
	thread_wakeup((event_t) profile_thread);
	assert_wait((event_t) &buf_entry->p_wakeme, TRUE);
	splx(s); 
	thread_block((void (*)(void)) 0);
    }
}
示例#5
0
void
exception_raise_continue_slow(
	mach_msg_return_t mr,
	ipc_kmsg_t 	  kmsg,
	mach_port_seqno_t seqno)
{
	ipc_thread_t self = current_thread();
	ipc_port_t reply_port = self->ith_port;
	ipc_mqueue_t reply_mqueue = &reply_port->ip_messages;

	while (mr == MACH_RCV_INTERRUPTED) {
		/*
		 *	Somebody is trying to force this thread
		 *	to a clean point.  We must cooperate
		 *	and then resume the receive.
		 */

		while (thread_should_halt(self)) {
			/* if thread is about to terminate, release the port */
			if (self->ast & AST_TERMINATE)
				ipc_port_release(reply_port);
			/*
			 *	Use the continuation to release the port in
			 *	case the thread is about to halt.
			 */
			thread_halt_self(thread_release_and_exception_return);
		}

		ip_lock(reply_port);
		if (!ip_active(reply_port)) {
			ip_unlock(reply_port);
			mr = MACH_RCV_PORT_DIED;
			break;
		}

		imq_lock(reply_mqueue);
		ip_unlock(reply_port);

		mr = ipc_mqueue_receive(reply_mqueue, MACH_MSG_OPTION_NONE,
					MACH_MSG_SIZE_MAX,
					MACH_MSG_TIMEOUT_NONE,
					FALSE, exception_raise_continue,
					&kmsg, &seqno);
		/* reply_mqueue is unlocked */
	}
	ipc_port_release(reply_port);

	assert((mr == MACH_MSG_SUCCESS) ||
	       (mr == MACH_RCV_PORT_DIED));

	if (mr == MACH_MSG_SUCCESS) {
		/*
		 *	Consume the reply message.
		 */

		ipc_port_release_sonce(reply_port);
		mr = exception_parse_reply(kmsg);
	}

	if ((mr == KERN_SUCCESS) ||
	    (mr == MACH_RCV_PORT_DIED)) {
		thread_exception_return();
		/*NOTREACHED*/
	}

	if (self->ith_exc != KERN_SUCCESS) {
		exception_try_task(self->ith_exc,
				   self->ith_exc_code,
				   self->ith_exc_subcode);
		/*NOTREACHED*/
	}

	exception_no_server();
	/*NOTREACHED*/
}
示例#6
0
文件: profile.c 项目: Prajna/mach
void profile_thread() 
{
	struct message {
		mach_msg_header_t	head;
		mach_msg_type_t		type;
		int			arg[SIZE_PROF_BUFFER+1];
	} msg;

	register spl_t	s;
	buf_to_send_t	buf_entry;
	queue_entry_t	prof_queue_entry;
	prof_data_t	pbuf;
	simple_lock_t 	lock;
	msg_return_t 	mr;
	int		j;

	/* Initialise the queue header for the prof_queue */
	mpqueue_init(&prof_queue);

	/* Template initialisation of header and type structures */
	msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
	msg.head.msgh_size = sizeof(msg); 
	msg.head.msgh_local_port = MACH_PORT_NULL;
	msg.head.msgh_kind = MACH_MSGH_KIND_NORMAL;
	msg.head.msgh_id = 666666;
	
	msg.type.msgt_name = MACH_MSG_TYPE_INTEGER_32;
	msg.type.msgt_size = 32;
	msg.type.msgt_number = SIZE_PROF_BUFFER+1;
	msg.type.msgt_inline = TRUE;
	msg.type.msgt_longform = FALSE;
	msg.type.msgt_deallocate = FALSE;
	msg.type.msgt_unused = 0;

	while (TRUE) {

	   /* Dequeue the first buffer. */
	   s = splsched();
	   mpdequeue_head(&prof_queue, &prof_queue_entry);
	   splx(s);

	   if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
                { 
		thread_sleep((event_t) profile_thread, lock, TRUE);
		if (current_thread()->wait_result != THREAD_AWAKENED)
			break;
                }
	   else {
		task_t		curr_task;
                thread_t	curr_th;
		register int 	*sample;
                int 		curr_buf;
		int 		imax;

                curr_th = (thread_t) buf_entry->thread;
                curr_buf = (int) buf_entry->number; 
		pbuf = curr_th->profil_buffer;

		/* Set the remote port */
		msg.head.msgh_remote_port = (mach_port_t) pbuf->prof_port;

                 
                sample = pbuf->prof_area[curr_buf].p_zone;
	        imax = pbuf->prof_area[curr_buf].p_index;
	        for(j=0 ;j<imax; j++,sample++)
		msg.arg[j] = *sample;	

	        /* Let hardclock() know you've finished the dirty job */
	        pbuf->prof_area[curr_buf].p_full = FALSE;

	        /*
		 * Store the number of samples actually sent 
	         * as the last element of the array.
		 */
	        msg.arg[SIZE_PROF_BUFFER] = imax;

	        mr = mach_msg(&(msg.head), MACH_SEND_MSG, 
			            sizeof(struct message), 0, 
				    MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, 
				    MACH_PORT_NULL);

		if (mr != MACH_MSG_SUCCESS) {
printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
		}

		if (buf_entry->wakeme)
			thread_wakeup((event_t) &buf_entry->wakeme);
		kmem_free(kernel_map, (buf_to_send_t) buf_entry,
					sizeof(struct buf_to_send));

            }

        }
	/* The profile thread has been signalled to exit.  There may still
	   be sample data queued for us, which we must now throw away.
	   Once we set profile_thread_id to null, hardclock() will stop
	   queueing any additional samples, so we do not need to alter
	   the interrupt level.  */
	profile_thread_id = THREAD_NULL;
	while (1) {
		mpdequeue_head(&prof_queue, &prof_queue_entry);
		if ((buf_entry = (buf_to_send_t) prof_queue_entry) == NULLBTS)
			break;
		if (buf_entry->wakeme)
			thread_wakeup((event_t) &buf_entry->wakeme);
		kmem_free(kernel_map, (buf_to_send_t) buf_entry,
					sizeof(struct buf_to_send));
	}

	thread_halt_self();
}