void IOInterruptController::timeStampInterruptHandlerInternal(bool isStart, IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector)
{
  uint64_t providerID = 0;
  vm_offset_t unslidHandler = 0;
  vm_offset_t unslidTarget = 0;

  IOService * provider = getProvider();

  if (provider) {
    providerID = provider->getRegistryEntryID();
  }

  if (vector) {
    unslidHandler = VM_KERNEL_UNSLIDE((vm_offset_t)vector->handler);
    unslidTarget = VM_KERNEL_UNSLIDE_OR_PERM((vm_offset_t)vector->target);
  }


  if (isStart) {
    IOTimeStampStartConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler,
                           (uintptr_t)unslidTarget, (uintptr_t)providerID);
  } else {
    IOTimeStampEndConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler,
                           (uintptr_t)unslidTarget, (uintptr_t)providerID);
  }
}
void IOFilterInterruptEventSource::disableInterruptOccurred
    (void */*refcon*/, IOService *prov, int source)
{
    bool 	filterRes;
    uint64_t	startTime = 0;
    uint64_t	endTime = 0;
	bool	trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false;
	
	if (trace)
		IOTimeStampStartConstant(IODBG_INTES(IOINTES_FILTER),
					 VM_KERNEL_UNSLIDE(filterAction), (uintptr_t) owner, (uintptr_t) this, (uintptr_t) workLoop);

    if (IOInterruptEventSource::reserved->statistics) {
        if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) {
            startTime = mach_absolute_time();
        }
    }
    
    // Call the filter.
    filterRes = (*filterAction)(owner, this);

    if (IOInterruptEventSource::reserved->statistics) {
        if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) {
            IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1);
        }

        if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) {
            endTime = mach_absolute_time();
            IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelTimeIndex], endTime - startTime);
        }
    }

	if (trace)
		IOTimeStampEndConstant(IODBG_INTES(IOINTES_FILTER),
				       VM_KERNEL_UNSLIDE(filterAction), (uintptr_t) owner, (uintptr_t) this, (uintptr_t) workLoop);
	
    if (filterRes) {
        prov->disableInterrupt(source);	/* disable the interrupt */
        signalInterrupt();
    }
}
示例#3
0
IOReturn IOCommandGate::attemptAction(Action inAction,
                                      void *arg0, void *arg1,
                                      void *arg2, void *arg3)
{
    IOReturn res;
    IOWorkLoop * wl;

    if (!inAction)
        return kIOReturnBadArgument;
    if (!(wl = workLoop))
        return kIOReturnNotReady;

    // Try to close the gate if can't get return immediately.
    if (!wl->tryCloseGate())
        return kIOReturnCannotLock;

    // If the command gate is disabled then sleep until we get a wakeup
    if (!wl->onThread() && !enabled)
        res = kIOReturnNotPermitted;
    else {
		
        bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false;
		
        if (trace)
            IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION),
				     VM_KERNEL_UNSLIDE(inAction), (uintptr_t) owner);
        
        IOStatisticsActionCall();
        
        res = (*inAction)(owner, arg0, arg1, arg2, arg3);
		
        if (trace)
            IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION),
				   VM_KERNEL_UNSLIDE(inAction), (uintptr_t) owner);
    }

    wl->openGate();

    return res;
}
示例#4
0
文件: mach_debug.c 项目: Bitesher/xnu
kern_return_t
mach_port_kobject(
	ipc_space_t			space,
	mach_port_name_t		name,
	natural_t			*typep,
	mach_vm_address_t		*addrp)
{
	ipc_entry_t entry;
	ipc_port_t port;
	kern_return_t kr;
	mach_vm_address_t kaddr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	kr = ipc_right_lookup_read(space, name, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	port = (ipc_port_t) entry->ie_object;
	assert(port != IP_NULL);

	ip_lock(port);
	is_read_unlock(space);

	if (!ip_active(port)) {
		ip_unlock(port);
		return KERN_INVALID_RIGHT;
	}

	*typep = (unsigned int) ip_kotype(port);
	kaddr = (mach_vm_address_t)port->ip_kobject;
	ip_unlock(port);

	if (0 != kaddr && is_ipc_kobject(*typep))
		*addrp = VM_KERNEL_ADDRPERM(VM_KERNEL_UNSLIDE(kaddr));
	else
		*addrp = 0;

	return KERN_SUCCESS;
}
示例#5
0
uint64_t
timer_queue_expire_with_options(
	mpqueue_head_t		*queue,
	uint64_t		deadline,
	boolean_t		rescan)
{
	timer_call_t	call = NULL;
	uint32_t tc_iterations = 0;
	DBG("timer_queue_expire(%p,)\n", queue);

	uint64_t cur_deadline = deadline;
	timer_queue_lock_spin(queue);

	while (!queue_empty(&queue->head)) {
		/* Upon processing one or more timer calls, refresh the
		 * deadline to account for time elapsed in the callout
		 */
		if (++tc_iterations > 1)
			cur_deadline = mach_absolute_time();

		if (call == NULL)
			call = TIMER_CALL(queue_first(&queue->head));

		if (call->soft_deadline <= cur_deadline) {
			timer_call_func_t		func;
			timer_call_param_t		param0, param1;

			TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
				call,
				call->soft_deadline,
				CE(call)->deadline,
				CE(call)->entry_time, 0);

			/* Bit 0 of the "soft" deadline indicates that
			 * this particular timer call is rate-limited
			 * and hence shouldn't be processed before its
			 * hard deadline.
			 */
			if ((call->soft_deadline & 0x1) &&
			    (CE(call)->deadline > cur_deadline)) {
				if (rescan == FALSE)
					break;
			}

			if (!simple_lock_try(&call->lock)) {
				/* case (2b) lock inversion, dequeue and skip */
				timer_queue_expire_lock_skips++;
				timer_call_entry_dequeue_async(call);
				call = NULL;
				continue;
			}

			timer_call_entry_dequeue(call);

			func = CE(call)->func;
			param0 = CE(call)->param0;
			param1 = CE(call)->param1;

			simple_unlock(&call->lock);
			timer_queue_unlock(queue);

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_START,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);

#if CONFIG_DTRACE
			DTRACE_TMR7(callout__start, timer_call_func_t, func,
			    timer_call_param_t, param0, unsigned, call->flags,
			    0, (call->ttd >> 32),
			    (unsigned) (call->ttd & 0xFFFFFFFF), call);
#endif
			/* Maintain time-to-deadline in per-processor data
			 * structure for thread wakeup deadline statistics.
			 */
			uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
			*ttdp = call->ttd;
			(*func)(param0, param1);
			*ttdp = 0;
#if CONFIG_DTRACE
			DTRACE_TMR4(callout__end, timer_call_func_t, func,
			    param0, param1, call);
#endif

			TIMER_KDEBUG_TRACE(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_END,
				call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
			call = NULL;
			timer_queue_lock_spin(queue);
		} else {
			if (__probable(rescan == FALSE)) {
示例#6
0
/*
 * 	Event timer interrupt.
 *
 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
 *     that occur before the entire chain completes.
 *
 * XXX a better implementation would use a set of generic callouts and iterate over them
 */
void
timer_intr(int		user_mode,
           uint64_t	rip)
{
    uint64_t		abstime;
    rtclock_timer_t		*mytimer;
    cpu_data_t		*pp;
    int64_t			latency;
    uint64_t		pmdeadline;
    boolean_t		timer_processed = FALSE;

    pp = current_cpu_datap();

    SCHED_STATS_TIMER_POP(current_processor());

    abstime = mach_absolute_time();		/* Get the time now */

    /* has a pending clock timer expired? */
    mytimer = &pp->rtclock_timer;		/* Point to the event timer */

    if ((timer_processed = ((mytimer->deadline <= abstime) ||
                            (abstime >= (mytimer->queue.earliest_soft_deadline))))) {
        /*
         * Log interrupt service latency (-ve value expected by tool)
         * a non-PM event is expected next.
         * The requested deadline may be earlier than when it was set
         * - use MAX to avoid reporting bogus latencies.
         */
        latency = (int64_t) (abstime - MAX(mytimer->deadline,
                                           mytimer->when_set));
        /* Log zero timer latencies when opportunistically processing
         * coalesced timers.
         */
        if (latency < 0) {
            TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0);
            latency = 0;
        }

        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_TRAP_LATENCY | DBG_FUNC_NONE,
                                  -latency,
                                  ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
                                  user_mode, 0, 0);

        mytimer->has_expired = TRUE;	/* Remember that we popped */
        mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
        mytimer->has_expired = FALSE;

        /* Get the time again since we ran a bit */
        abstime = mach_absolute_time();
        mytimer->when_set = abstime;
    }

    /* is it time for power management state change? */
    if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_PM_DEADLINE | DBG_FUNC_START,
                                  0, 0, 0, 0, 0);
        pmCPUDeadline(pp);
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                  DECR_PM_DEADLINE | DBG_FUNC_END,
                                  0, 0, 0, 0, 0);
        timer_processed = TRUE;
    }

    /* schedule our next deadline */
    x86_lcpu()->rtcDeadline = EndOfAllTime;
    timer_resync_deadlines();

    if (__improbable(timer_processed == FALSE))
        spurious_timers++;
}
示例#7
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
		thread_call_group_t		group,
		wait_result_t			wres)
{
	thread_t	self = current_thread();
	boolean_t	canwait;

	/*
	 * A wakeup with THREAD_INTERRUPTED indicates that 
	 * we should terminate.
	 */
	if (wres == THREAD_INTERRUPTED) {
		thread_terminate(self);

		/* NOTREACHED */
		panic("thread_terminate() returned?");
	}

	(void)disable_ints_and_lock();

	thread_sched_call(self, group->sched_call);

	while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->tc_call.func;
		param0 = call->tc_call.param0;
		param1 = call->tc_call.param1;

		call->tc_call.queue = NULL;

		_internal_call_release(call);

		/*
		 * Can only do wakeups for thread calls whose storage
		 * we control.
		 */
		if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
			canwait = TRUE;
			call->tc_refs++;	/* Delay free until we're done */
		} else
			canwait = FALSE;

		enable_ints_and_unlock();

		KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
					pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
		}

		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) disable_ints_and_lock();
		
		if (canwait) {
			/* Frees if so desired */
			thread_call_finish(call);
		}
	}

	thread_sched_call(self, NULL);
	group->active_count--;

	if (group_isparallel(group)) {
		/*
		 * For new style of thread group, thread always blocks. 
		 * If we have more than the target number of threads,
		 * and this is the first to block, and it isn't active 
		 * already, set a timer for deallocating a thread if we 
		 * continue to have a surplus.
		 */
		group->idle_count++;

		if (group->idle_count == 1) {
			group->idle_timestamp = mach_absolute_time();
		}   

		if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
				((group->active_count + group->idle_count) > group->target_thread_count)) {
			group->flags |= TCG_DEALLOC_ACTIVE;
			thread_call_start_deallocate_timer(group);
		}   

		/* Wait for more work (or termination) */
		wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); 
		if (wres != THREAD_WAITING) {
			panic("kcall worker unable to assert wait?");
		}   

		enable_ints_and_unlock();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
	} else {
		if (group->idle_count < group->target_thread_count) {
			group->idle_count++;

			wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */

			enable_ints_and_unlock();

			thread_block_parameter((thread_continue_t)thread_call_thread, group);
			/* NOTREACHED */
		}
	}

	enable_ints_and_unlock();

	thread_terminate(self);
	/* NOTREACHED */
}
示例#8
0
/*
 * On x86_64 systems, kernel extension text must remain within 2GB of the
 * kernel's text segment.  To ensure this happens, we snag 2GB of kernel VM
 * as early as possible for kext allocations.
 */
void 
kext_alloc_init(void)
{
#if CONFIG_KEXT_BASEMENT
    kern_return_t rval = 0;
    kernel_segment_command_t *text = NULL;
    kernel_segment_command_t *prelinkTextSegment = NULL;
    mach_vm_offset_t text_end, text_start;
    mach_vm_size_t text_size;
    mach_vm_size_t kext_alloc_size;

    /* Determine the start of the kernel's __TEXT segment and determine the
     * lower bound of the allocated submap for kext allocations.
     */

    text = getsegbyname(SEG_TEXT);
    text_start = vm_map_trunc_page(text->vmaddr,
				   VM_MAP_PAGE_MASK(kernel_map));
    text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1);
    text_end = vm_map_round_page(text->vmaddr + text->vmsize,
				 VM_MAP_PAGE_MASK(kernel_map));
    text_size = text_end - text_start;

    kext_alloc_base = KEXT_ALLOC_BASE(text_end);
    kext_alloc_size = KEXT_ALLOC_SIZE(text_size);
    kext_alloc_max = kext_alloc_base + kext_alloc_size;
    
    /* Post boot kext allocation will start after the prelinked kexts */
    prelinkTextSegment = getsegbyname("__PRELINK_TEXT");
    if (prelinkTextSegment) {
        /* use kext_post_boot_base to start allocations past all the prelinked 
         * kexts
         */
        kext_post_boot_base = 
		vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize,
				  VM_MAP_PAGE_MASK(kernel_map));
    }
    else {
        kext_post_boot_base = kext_alloc_base;
    }

    /* Allocate the sub block of the kernel map */
    rval = kmem_suballoc(kernel_map, (vm_offset_t *) &kext_alloc_base, 
			 kext_alloc_size, /* pageable */ TRUE,
			 VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE,
			 &g_kext_map);
    if (rval != KERN_SUCCESS) {
	    panic("kext_alloc_init: kmem_suballoc failed 0x%x\n", rval);
    }

    if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) {
        panic("kext_alloc_init: failed to get first 2GB\n");
    }

    if (kernel_map->min_offset > kext_alloc_base) {
	    kernel_map->min_offset = kext_alloc_base;
    }

    printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n",
	   VM_KERNEL_UNSLIDE(kext_alloc_base),
	   VM_KERNEL_UNSLIDE(kext_alloc_max),
	   VM_KERNEL_UNSLIDE(text->vmaddr),
	   VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize));

#else
    g_kext_map = kernel_map;
    kext_alloc_base = VM_MIN_KERNEL_ADDRESS;
    kext_alloc_max = VM_MAX_KERNEL_ADDRESS;
#endif /* CONFIG_KEXT_BASEMENT */
}
示例#9
0
int
machine_trace_thread64(thread_t thread,
                       char * tracepos,
                       char * tracebound,
                       int nframes,
                       boolean_t user_p,
                       boolean_t trace_fp,
                       uint32_t * thread_trace_flags)
{
	uint64_t * tracebuf = (uint64_t *)tracepos;
	unsigned framesize  = (trace_fp ? 2 : 1) * sizeof(addr64_t);

	uint32_t fence             = 0;
	addr64_t stackptr          = 0;
	int framecount             = 0;
	addr64_t prev_rip          = 0;
	addr64_t prevsp            = 0;
	vm_offset_t kern_virt_addr = 0;
	vm_map_t bt_vm_map         = VM_MAP_NULL;

	if (thread->machine.iss == NULL) {
        // no register states to backtrace, probably thread is terminating
        return 0;
	}

	nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;

	if (user_p) {
		x86_saved_state64_t	*iss64;
		iss64 = USER_REGS64(thread);
		prev_rip = iss64->isf.rip;
		stackptr = iss64->rbp;
		bt_vm_map = thread->task->map;
	}
	else {
		stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
		prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
		prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
		bt_vm_map = kernel_map;
	}

	for (framecount = 0; framecount < nframes; framecount++) {

		*tracebuf++ = prev_rip;
		if (trace_fp) {
			*tracebuf++ = stackptr;
		}

		if (!stackptr || (stackptr == fence)) {
			break;
		}
		if (stackptr & 0x0000007) {
			break;
		}
		if (stackptr <= prevsp) {
			break;
		}

		kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
		if (!kern_virt_addr) {
			if (thread_trace_flags) {
				*thread_trace_flags |= kThreadTruncatedBT;
			}
			break;
		}

		prev_rip = *(uint64_t *)kern_virt_addr;
		if (!user_p) {
			prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
		}

		prevsp = stackptr;

		kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);

		if (kern_virt_addr) {
			stackptr = *(uint64_t *)kern_virt_addr;
		} else {
			stackptr = 0;
			if (thread_trace_flags) {
				*thread_trace_flags |= kThreadTruncatedBT;
			}
		}
	}

	machine_trace_thread_clear_validation_cache();

	return (uint32_t) (((char *) tracebuf) - tracepos);
}
示例#10
0
uint64_t
timer_queue_expire(
	mpqueue_head_t		*queue,
	uint64_t		deadline)
{
	timer_call_t	call;

	DBG("timer_queue_expire(%p,)\n", queue);

	timer_call_lock_spin(queue);

	while (!queue_empty(&queue->head)) {
		call = TIMER_CALL(queue_first(&queue->head));

		if (call->soft_deadline <= deadline) {
			timer_call_func_t		func;
			timer_call_param_t		param0, param1;

			if (!simple_lock_try(&call->lock)) {
				/* case (2b) lock inversion, dequeue and skip */
				timer_queue_expire_lock_skips++;
				(void) remque(qe(call));
				call->async_dequeue = TRUE;
				continue;
			}

			timer_call_entry_dequeue(call);

			func = CE(call)->func;
			param0 = CE(call)->param0;
			param1 = CE(call)->param1;

			simple_unlock(&call->lock);
			timer_call_unlock(queue);

			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_START,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_TMR3(callout__start, timer_call_func_t, func, 
										timer_call_param_t, param0, 
										timer_call_param_t, param1);
#endif

			(*func)(param0, param1);

#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_TMR3(callout__end, timer_call_func_t, func, 
										timer_call_param_t, param0, 
										timer_call_param_t, param1);
#endif

			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
				DECR_TIMER_CALLOUT | DBG_FUNC_END,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

			timer_call_lock_spin(queue);
		}
		else
			break;
	}

	if (!queue_empty(&queue->head))
		deadline = CE(call)->deadline;
	else
		deadline = UINT64_MAX;

	timer_call_unlock(queue);

	return (deadline);
}
示例#11
0
IOReturn IOCommandGate::runAction(Action inAction,
                                  void *arg0, void *arg1,
                                  void *arg2, void *arg3)
{
    IOWorkLoop * wl;
    uintptr_t  * sleepersP;

    if (!inAction)
        return kIOReturnBadArgument;
    if (!(wl = workLoop))
        return kIOReturnNotReady;

    // closeGate is recursive needn't worry if we already hold the lock.
    wl->closeGate();
    sleepersP = (uintptr_t *) &reserved;

    // If the command gate is disabled and we aren't on the workloop thread
    // itself then sleep until we get enabled.
    IOReturn res;
    if (!wl->onThread())
    {
	while (!enabled)
	{
            IOReturn sleepResult = kIOReturnSuccess;
	    if (workLoop)
	    {
		*sleepersP |= kSleepersWaitEnabled;
		sleepResult = wl->sleepGate(&enabled, THREAD_ABORTSAFE);
		*sleepersP &= ~kSleepersWaitEnabled;
	    }
	    bool wakeupTearDown = (!workLoop || (0 != (*sleepersP & kSleepersRemoved)));
	    if ((kIOReturnSuccess != sleepResult) || wakeupTearDown) {
		wl->openGate();

		 if (wakeupTearDown)
		     wl->wakeupGate(sleepersP, false);	// No further resources used

		return kIOReturnAborted;
	    }
	}
    }

    bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false;
	
    if (trace) IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION),
					 VM_KERNEL_UNSLIDE(inAction), (uintptr_t) owner);
	
    IOStatisticsActionCall();
	
    // Must be gated and on the work loop or enabled

    *sleepersP += kSleepersActions;
    res = (*inAction)(owner, arg0, arg1, arg2, arg3);
    *sleepersP -= kSleepersActions;

    if (trace) IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION),
				       VM_KERNEL_UNSLIDE(inAction), (uintptr_t) owner);

    if (kSleepersRemoved == ((kSleepersActionsMask|kSleepersRemoved) & *sleepersP))
    {
        // no actions outstanding
	*sleepersP &= ~kSleepersRemoved;
	super::setWorkLoop(0);
    }
    
    wl->openGate();
	
    return res;
}