Example #1
0
boolean_t
kbd_read_done(
	io_req_t	ior)
{
	register int	s, count;

	s = SPLKD();
	kdq_lock(&kbd_queue);
	if (kdq_empty(&kbd_queue)) {
	    ior->io_done = kbd_read_done;
	    mpenqueue_tail(&kbd_read_queue, (queue_entry_t)ior);
	    kdq_unlock(&kbd_queue);
	    splx(s);
	    return (FALSE);
	}

	count = 0;
	while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
	    register kd_event *ev;

	    ev = kdq_get(&kbd_queue);
	    *(kd_event *)(&ior->io_data[count]) = *ev;
	    count += sizeof(kd_event);
	}
	kdq_unlock(&kbd_queue);
	splx(s);

	ior->io_residual = ior->io_count - count;
	ds_read_done(ior);

	return (TRUE);
}
__private_extern__ kern_return_t
chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
{
	int				thisCPU;
	kern_return_t			retval = KERN_FAILURE;
	chudcpu_signal_request_t	request;
	uint64_t			deadline;
	chudcpu_data_t			*target_chudp;
	boolean_t old_level;

	disable_preemption();
	// force interrupts on for a cross CPU signal.
	old_level = chudxnu_set_interrupts_enabled(TRUE);
	thisCPU = cpu_number();

	if ((unsigned) otherCPU < real_ncpus &&
	    thisCPU != otherCPU &&
	    cpu_data_ptr[otherCPU]->cpu_running) {

		target_chudp = (chudcpu_data_t *)
					cpu_data_ptr[otherCPU]->cpu_chud;

		/* Fill out request */
		request.req_sync = 0xFFFFFFFF;		/* set sync flag */
		//request.req_type = CPRQchud;		/* set request type */
		request.req_code = request_code;	/* set request */

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_CHUD,
				     CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
			otherCPU, request_code, 0, 0, 0);

		/*
		 * Insert the new request in the target cpu's request queue
		 * and signal target cpu.
		 */
		mpenqueue_tail(&target_chudp->cpu_request_queue,
			       &request.req_entry);
		i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);

		/* Wait for response or timeout */
		deadline = mach_absolute_time() + LockTimeOut;
		while (request.req_sync != 0) {
			if (mach_absolute_time() > deadline) {
				panic("chudxnu_cpusig_send(%d,%d) timed out\n",
					otherCPU, request_code);
			}
			cpu_pause();
		}
		retval = KERN_SUCCESS;
	} else {
		retval = KERN_INVALID_ARGUMENT;
	}

	chudxnu_set_interrupts_enabled(old_level);
	enable_preemption();
	return retval;
}
Example #3
0
void
profile(natural_t	pc,
	prof_data_t	pbuf)
{
    natural_t inout_val = pc; 
    buffer_t buf_entry;

    if (pbuf == NULLPROFDATA)
	return;
    
    /* Inserts the PC value in the buffer of the thread */
    set_pbuf_value(pbuf, &inout_val); 
    switch((int)inout_val) {
    case 0: 
	if (profile_thread_id == THREAD_NULL) {
	  reset_pbuf_area(pbuf);
	}
	break;
    case 1: 
	/* Normal case, value successfully inserted */
	break;
    case 2 : 
	/*
	 * The value we have just inserted caused the
	 * buffer to be full, and ready to be sent.
	 * If profile_thread_id is null, the profile
	 * thread has been killed.  Since this generally
	 * happens only when the O/S server task of which
	 * it is a part is killed, it is not a great loss
	 * to throw away the data.
	 */
	if (profile_thread_id == THREAD_NULL) {
	  reset_pbuf_area(pbuf);
	  break;
	}

	buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
	buf_entry->p_prof = pbuf;
	mpenqueue_tail(&prof_queue, &buf_entry->p_list);
	
	/* Switch to another buffer */
	reset_pbuf_area(pbuf);
	
	/* Wake up the profile thread */
	if (profile_thread_id != THREAD_NULL)
	  thread_wakeup((event_t) profile_thread);
	break;
      
      default: 
	printf("profile : unexpected case\n"); 
    }
}
Example #4
0
int
kbdread(
	dev_t		dev,
	io_req_t	ior)
{
	register int	err, s, count;

	err = device_read_alloc(ior, (vm_size_t)ior->io_count);
	if (err != KERN_SUCCESS)
	    return (err);

	s = SPLKD();
	kdq_lock(&kbd_queue);
	if (kdq_empty(&kbd_queue)) {
	    if (ior->io_mode & D_NOWAIT) {
		kdq_unlock(&kbd_queue);
		splx(s);
		return (D_WOULD_BLOCK);
	    }
	    ior->io_done = kbd_read_done;
	    mpenqueue_tail(&kbd_read_queue, (queue_entry_t) ior);
	    kdq_unlock(&kbd_queue);
	    splx(s);
	    return (D_IO_QUEUED);
	}
	count = 0;
	while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
	    register kd_event *ev;

	    ev = kdq_get(&kbd_queue);
	    *(kd_event *)(&ior->io_data[count]) = *ev;
	    count += sizeof(kd_event);
	}
	kdq_unlock(&kbd_queue);
	splx(s);
	ior->io_residual = ior->io_count - count;
	return (D_SUCCESS);
}
Example #5
0
void
DCIprofile(int cpunum,
	natural_t threadnum,
	natural_t sp,
	natural_t pc,
	prof_data_t	pbuf)
{
    natural_t inout_val1 = threadnum;
    natural_t inout_val2 = sp;
    natural_t inout_val3 = pc; 
    buffer_t buf_entry;

    if (pbuf == NULLPROFDATA)
	return;
    
    /* Inserts the PC value in the buffer of the thread */
    DCIset_pbuf_value(pbuf, cpunum, &inout_val1, &inout_val2, &inout_val3); 
    switch(inout_val1) {
    case 0: 
	/*
	 * If we get here then we have wrapped around the zones. This case
	 * should happen in the MACH_PROF section because it does not have
	 * any throttling mechanism.
	 */
	if (profile_thread_id != THREAD_NULL)
	    panic("profile.c:thread died.\n");
	DCIreset_pbuf_area(pbuf, cpunum);
	break;
    case 1: 
	/*
	 * Normal case, value successfully inserted
	 */
	break;
    case 2 : 
	/*
	 * The value we have just inserted caused the
	 * buffer to be full, and ready to be sent.
	 * If profile_thread_id is null, the profile
	 * thread has been killed.  Since this generally
	 * happens only when the O/S server task of which
	 * it is a part is killed, it is not a great loss
	 * to throw away the data.
	 */
	if (profile_thread_id == THREAD_NULL) {
	  DCIreset_pbuf_area(pbuf,cpunum);
	  break;
	}

	buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
	buf_entry->p_prof = pbuf;
	mpenqueue_tail(&prof_queue, &buf_entry->p_list);
	
	/* Switch to another buffer */
	DCIreset_pbuf_area(pbuf,cpunum);
	
	/* Wake up the profile thread */
	if (profile_thread_id != THREAD_NULL)
	  thread_wakeup((event_t) profile_thread);
	break;
      
      default: 
	panic("profile: unexpected case\n");
    }
}
Example #6
0
void
profile_thread(void)
{
    spl_t	    s;
    buffer_t	    buf_entry;
    queue_entry_t   prof_queue_entry;
    prof_data_t	    pbuf;
    kern_return_t   kr;
    int		    j;

    thread_swappable(current_act(), FALSE);

    /* Initialise the queue header for the prof_queue */
    mpqueue_init(&prof_queue);

    while (TRUE) {

	/* Dequeue the first buffer. */
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);

	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) { 
	    assert_wait((event_t) profile_thread, FALSE);
	    thread_block((void (*)(void)) 0);
	    if (current_thread()->wait_result != THREAD_AWAKENED)
		break;
	} else 
#if DCI
	{
	    register int    sum_samples = 0;
	    int		    i;

	    pbuf = buf_entry->p_prof;
/*
 * sum all the points from all the cpus on the machine.
*/
	    for(i=0;i < NCPUS; i++)
		sum_samples += buf_entry->p_index[i];

	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)sum_samples);
	    if (kr != KERN_SUCCESS)
	    {
		task_suspend(pbuf->task); /* suspend task */
		kr = send_notices(pbuf->prof_port, (void *)buf_entry->p_zone,
				  (mach_msg_type_number_t)sum_samples,
				  MACH_SEND_ALWAYS);
	    }
	    bzero((char *)buf_entry->p_zone, NCPUS*SIZE_PROF_BUFFER);
#else
	{
	    int		    dropped;

	    pbuf = buf_entry->p_prof;
	    kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
			(mach_msg_type_number_t)buf_entry->p_index);
	    profile_sample_count += buf_entry->p_index;
	    if (kr != KERN_SUCCESS)
	      printf("send_samples(%x, %x, %d) error %x\n",
			pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr); 
	    dropped = buf_entry->p_dropped;
	    if (dropped > 0) {
		printf("kernel: profile dropped %d sample%s\n", dropped,
		       dropped == 1 ? "" : "s");
		buf_entry->p_dropped = 0;
	    }

#endif /* DCI */
	    /* Indicate you've finished the dirty job */
#if DCI
	    {
		int i;
		for(i=0;i<NCPUS;i++)
		    buf_entry->p_full[i] = FALSE;
	    }
#else
	    buf_entry->p_full = FALSE;
#endif /* DCI */
	    if (buf_entry->p_wakeme)
	      thread_wakeup((event_t) &buf_entry->p_wakeme);
	}

    }
    /* The profile thread has been signalled to exit.  Any threads waiting
       for the last buffer of samples to be acknowledged should be woken
       up now.  */
    profile_thread_id = THREAD_NULL;
    while (1) {
	s = splsched();
	mpdequeue_head(&prof_queue, &prof_queue_entry);
	splx(s);
	if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
	    break;
	if (buf_entry->p_wakeme)
	    thread_wakeup((event_t) &buf_entry->p_wakeme);
    }
#if 0	/* XXXXX */
    thread_halt_self();
#else
	panic("profile_thread(): halt_self");
#endif	/* XXXXX */
}

/*
 *****************************************************************************
 * send_last_sample is the drain mechanism to allow partial profiled buffers
 * to be sent to the receive_prof thread in the server.
 *****************************************************************************
*/

void
send_last_sample_buf(prof_data_t pbuf)
{
    spl_t    s;
    buffer_t buf_entry;

    if (pbuf == NULLPROFDATA)
	return;

    /* Ask for the sending of the last PC buffer.
     * Make a request to the profile_thread by inserting
     * the buffer in the send queue, and wake it up. 
     * The last buffer must be inserted at the head of the
     * send queue, so the profile_thread handles it immediatly. 
     */ 
    buf_entry = pbuf->prof_area + pbuf->prof_index;
    buf_entry->p_prof = pbuf;

    /* 
       Watch out in case profile thread exits while we are about to
       queue data for it.
     */
    s = splsched();
    if (profile_thread_id == THREAD_NULL)
	splx(s);
    else {
	buf_entry->p_wakeme = 1;
	mpenqueue_tail(&prof_queue, &buf_entry->p_list);
	thread_wakeup((event_t) profile_thread);
	assert_wait((event_t) &buf_entry->p_wakeme, TRUE);
	splx(s); 
	thread_block((void (*)(void)) 0);
    }
}