Exemple #1
0
boolean_t
swtch_pri(
__unused	struct swtch_pri_args *args)
{
	register processor_t	myprocessor;
	boolean_t				result;

	disable_preemption();
	myprocessor = current_processor();
	if (SCHED(processor_queue_empty)(myprocessor) && rt_runq.count == 0) {
		mp_enable_preemption();

		return (FALSE);
	}
	enable_preemption();

	counter(c_swtch_pri_block++);

	thread_depress_abstime(thread_depress_time);

	thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);

	thread_depress_abort_internal(current_thread());

	disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	return (result);
}
Exemple #2
0
void dispatcher_body()
{
  int err, t0, t1;
  task_t* next; // Tarefa que ocupara o processador.

  enable_preemption(0);

  // Caso haver alguma tarefa na lista de prontas
  // o while eh executado.
  while (list_size(ready_list) > 0) {
    t0 = systime();
    next = scheduler();

    if (next) {
      ticks = 20;
      enable_preemption(1);
      t1 = systime();
      curr_task->proc_time += t0 - t1;   
      
      t0 = systime();
      err = task_switch(next); 
      t1 = systime();
      
      next->proc_time += t1 - t0;
      
      if (err == -1) {
        perror("dispatcher_body: task_switch failed.\n");
        return;
      }
    }
  }

  // Finaliza o dispatcher, voltando para o main.
  task_exit(0);
}
Exemple #3
0
void kmutex_lock(kmutex *m)
{
   disable_preemption();
   DEBUG_ONLY(check_not_in_irq_handler());

   if (!m->owner_task) {

      /* Nobody owns this mutex, just make this task own it */
      m->owner_task = get_curr_task();

      if (m->flags & KMUTEX_FL_RECURSIVE) {
         ASSERT(m->lock_count == 0);
         m->lock_count++;
      }

      enable_preemption();
      return;
   }

   if (m->flags & KMUTEX_FL_RECURSIVE) {

      ASSERT(m->lock_count > 0);

      if (kmutex_is_curr_task_holding_lock(m)) {
         m->lock_count++;
         enable_preemption();
         return;
      }

   } else {
      ASSERT(!kmutex_is_curr_task_holding_lock(m));
   }

#if KMUTEX_STATS_ENABLED
   m->num_waiters++;
   m->max_num_waiters = MAX(m->num_waiters, m->max_num_waiters);
#endif

   task_set_wait_obj(get_curr_task(), WOBJ_KMUTEX, m, &m->wait_list);
   enable_preemption();
   kernel_yield(); // Go to sleep until someone else is holding the lock.

   /* ------------------- We've been woken up ------------------- */

#if KMUTEX_STATS_ENABLED
   m->num_waiters--;
#endif

   /* Now for sure this task should hold the mutex */
   ASSERT(kmutex_is_curr_task_holding_lock(m));

   /*
    * DEBUG check: in case we went to sleep with a recursive mutex, then the
    * lock_count must be just 1 now.
    */
   if (m->flags & KMUTEX_FL_RECURSIVE) {
      ASSERT(m->lock_count == 1);
   }
}
Exemple #4
0
int
pthread_join(pthread_t tid, void **status)
{
	pthread_thread_t	*joinee, *joiner;

	if ((joinee = tidtothread(tid)) == NULL_THREADPTR)
		return EINVAL;
	
	joiner = CURPTHREAD();

	assert_preemption_enabled();
	disable_preemption();

	pthread_lock(&(joinee->lock));
	if (joinee->flags & THREAD_DETACHED) {
		pthread_unlock(&(joinee->lock));
		enable_preemption();
		return EINVAL;
	}
	pthread_unlock(&(joinee->lock));
	enable_preemption();

	/*
	 * Use a mutex here. This avoids specialized handling in the cancel
	 * and signal code. It works becase the "dead" flag is independent,
	 * protected by a spinning mutex in the reaper code. 
	 */
	pthread_mutex_lock(&joinee->mutex);
	while (!joinee->dead) {
		/*
		 * join must be called with cancelation DEFERRED!
		 */
		pthread_testcancel();

		pthread_cond_wait(&joinee->cond, &joinee->mutex);
	}

	/*
	 * Not allowed to detach the target thread if this thread is canceled.
	 */
	pthread_testcancel();

	disable_preemption();
	if (status)
		*status = (void *) joinee->exitval;

	pthread_mutex_unlock(&joinee->mutex);
	pthread_destroy_internal(joinee);
	enable_preemption();

	return 0;
}
Exemple #5
0
/*
 * This is first routine called for every thread.
 */
void
pthread_start_thread(pthread_thread_t *pthread)
{
	/*
	 * Threads start with the current thread pointer not set yet.
	 */
	SETCURPTHREAD(pthread);

	/*
	 * All threads start with the schedlock locked.
	 */
	pthread_unlock(&pthread->schedlock);

	/*
	 * All threads start out with interrupts and preemptions blocked,
	 * which must be reset.
	 */
	enable_interrupts();
	enable_preemption();

	DPRINTF("(%d): P:%p, T:%d, F:%p A:%p\n", THISCPU,
		pthread, (int) pthread->tid, pthread->func, pthread->cookie);

	/*
	 * If the function returns a value, it is passed to pthread_exit.
	 */
	pthread_exit((*pthread->func)(pthread->cookie));

        /* NOTREACHED */
}
Exemple #6
0
/*
 *	Routine:	cpu_doshutdown
 *	Function:
 */
void
cpu_doshutdown(
	void)
{
	enable_preemption();
	processor_offline(current_processor());
}
Exemple #7
0
bool kmutex_trylock(kmutex *m)
{
   bool success = false;

   disable_preemption();
   DEBUG_ONLY(check_not_in_irq_handler());

   if (!m->owner_task) {

      /* Nobody owns this mutex, just make this task own it */
      m->owner_task = get_curr_task();
      success = true;

      if (m->flags & KMUTEX_FL_RECURSIVE)
         m->lock_count++;

   } else {

      /*
       * There IS an owner task, but we can still acquire the mutex if:
       *    - the mutex is recursive
       *    - the task holding it is actually the current task
       */

      if (m->flags & KMUTEX_FL_RECURSIVE) {
         if (kmutex_is_curr_task_holding_lock(m)) {
            m->lock_count++;
            success = true;
         }
      }
   }

   enable_preemption();
   return success;
}
__private_extern__ kern_return_t
chudxnu_cpusig_send(int otherCPU, uint32_t request_code)
{
	int				thisCPU;
	kern_return_t			retval = KERN_FAILURE;
	chudcpu_signal_request_t	request;
	uint64_t			deadline;
	chudcpu_data_t			*target_chudp;
	boolean_t old_level;

	disable_preemption();
	// force interrupts on for a cross CPU signal.
	old_level = chudxnu_set_interrupts_enabled(TRUE);
	thisCPU = cpu_number();

	if ((unsigned) otherCPU < real_ncpus &&
	    thisCPU != otherCPU &&
	    cpu_data_ptr[otherCPU]->cpu_running) {

		target_chudp = (chudcpu_data_t *)
					cpu_data_ptr[otherCPU]->cpu_chud;

		/* Fill out request */
		request.req_sync = 0xFFFFFFFF;		/* set sync flag */
		//request.req_type = CPRQchud;		/* set request type */
		request.req_code = request_code;	/* set request */

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_CHUD,
				     CHUD_CPUSIG_SEND) | DBG_FUNC_NONE,
			otherCPU, request_code, 0, 0, 0);

		/*
		 * Insert the new request in the target cpu's request queue
		 * and signal target cpu.
		 */
		mpenqueue_tail(&target_chudp->cpu_request_queue,
			       &request.req_entry);
		i386_signal_cpu(otherCPU, MP_CHUD, ASYNC);

		/* Wait for response or timeout */
		deadline = mach_absolute_time() + LockTimeOut;
		while (request.req_sync != 0) {
			if (mach_absolute_time() > deadline) {
				panic("chudxnu_cpusig_send(%d,%d) timed out\n",
					otherCPU, request_code);
			}
			cpu_pause();
		}
		retval = KERN_SUCCESS;
	} else {
		retval = KERN_INVALID_ARGUMENT;
	}

	chudxnu_set_interrupts_enabled(old_level);
	enable_preemption();
	return retval;
}
Exemple #9
0
void kmutex_unlock(kmutex *m)
{
   disable_preemption();

   DEBUG_ONLY(check_not_in_irq_handler());
   ASSERT(kmutex_is_curr_task_holding_lock(m));

   if (m->flags & KMUTEX_FL_RECURSIVE) {

      ASSERT(m->lock_count > 0);

      if (--m->lock_count > 0) {
         enable_preemption();
         return;
      }

      // m->lock_count == 0: we have to really unlock the mutex
   }

   m->owner_task = NULL;

   /* Unlock one task waiting to acquire the mutex 'm' (if any) */
   if (!list_is_empty(&m->wait_list)) {

      wait_obj *task_wo =
         list_first_obj(&m->wait_list, wait_obj, wait_list_node);

      task_info *ti = CONTAINER_OF(task_wo, task_info, wobj);

      m->owner_task = ti;

      if (m->flags & KMUTEX_FL_RECURSIVE)
         m->lock_count++;

      ASSERT(ti->state == TASK_STATE_SLEEPING);
      task_reset_wait_obj(ti);

   } // if (!list_is_empty(&m->wait_list))

   enable_preemption();
}
Exemple #10
0
/*
 *	Routine: hw_lock_unlock
 *
 *	Unconditionally release lock, release preemption level.
 */
void
hw_lock_unlock(hw_lock_t lock)
{
	__c11_atomic_store((_Atomic uintptr_t *)&lock->lock_data, 0, memory_order_release_smp);
#if __arm__ || __arm64__
	// ARM tests are only for open-source exclusion
	set_event();
#endif	// __arm__ || __arm64__
#if	CONFIG_DTRACE
	LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, 0);
#endif /* CONFIG_DTRACE */
	enable_preemption();
}
Exemple #11
0
static void
swtch_continue(void)
{
	register processor_t	myprocessor;
    boolean_t				result;

    disable_preemption();
	myprocessor = current_processor();
	result = !SCHED(processor_queue_empty)(myprocessor) || rt_runq.count > 0;
	enable_preemption();

	thread_syscall_return(result);
	/*NOTREACHED*/
}
void end_fault_handler_state(void)
{
   /*
    * Exit from the fault handler with the correct sequence:
    *
    *    - re-enable the preemption (the last thing disabled)
    *    - pop the last "nested interrupt" caused by the fault
    *    - re-enable the interrupts (disabled by the CPU as first thing)
    *
    * See soft_interrupt_entry() for more.
    */
   enable_preemption();
   pop_nested_interrupt();
   enable_interrupts_forced();
}
Exemple #13
0
void
log(__unused int level, char *fmt, ...)
{
	va_list	listp;

#ifdef lint
	level++;
#endif /* lint */
#ifdef	MACH_BSD
	disable_preemption();
	va_start(listp, fmt);
	_doprnt(fmt, &listp, conslog_putc, 0);
	va_end(listp);
	enable_preemption();
#endif
}
Exemple #14
0
/*
 *	Routine: hw_lock_try
 *
 *	returns with preemption disabled on success.
 */
unsigned int
hw_lock_try(hw_lock_t lock)
{
	thread_t	thread = current_thread();
	int		success = 0;
#if	LOCK_TRY_DISABLE_INT
	long		intmask;

	intmask = disable_interrupts();
#else
	disable_preemption_for_thread(thread);
#endif	// LOCK_TRY_DISABLE_INT

#if	__SMP__
#if	LOCK_PRETEST
	if (ordered_load_hw(lock))
		goto failed;
#endif	// LOCK_PRETEST
	success = atomic_compare_exchange(&lock->lock_data, 0, LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK,
					memory_order_acquire_smp, FALSE);
#else
	if (lock->lock_data == 0) {
		lock->lock_data = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
		success = 1;
	}
#endif	// __SMP__

#if	LOCK_TRY_DISABLE_INT
	if (success)
		disable_preemption_for_thread(thread);
#if	LOCK_PRETEST
failed:
#endif	// LOCK_PRETEST
	restore_interrupts(intmask);
#else
#if	LOCK_PRETEST
failed:
#endif	// LOCK_PRETEST
	if (!success)
		enable_preemption();
#endif	// LOCK_TRY_DISABLE_INT
#if CONFIG_DTRACE
	if (success)
		LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0);
#endif
	return success;
}
Exemple #15
0
static void
panic_epilogue(spl_t	s)
{
	/*
	 * Release panicstr so that we can handle normally other panics.
	 */
	PANIC_LOCK();
	panicstr = (char *)0;
	PANIC_UNLOCK();

	if (return_on_panic) {
		panic_normal();
		enable_preemption();
		splx(s);
		return;
	}
	kdb_printf("panic: We are hanging here...\n");
	panic_stop();
	/* NOTREACHED */
}
void soft_interrupt_entry(regs *r)
{
   const int int_num = regs_intnum(r);
   ASSERT(!are_interrupts_enabled());

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();

   push_nested_interrupt(int_num);
   disable_preemption();

   if (int_num == SYSCALL_SOFT_INTERRUPT) {

      enable_interrupts_forced();
      {
         handle_syscall(r);
      }
      disable_interrupts_forced(); /* restore IF = 0 */

   } else {

      /*
       * General rule: fault handlers get control with interrupts disabled but
       * they are supposed to call enable_interrupts_forced() ASAP.
       */
      handle_fault(r);

      /* Faults are expected to return with interrupts disabled. */
      ASSERT(!are_interrupts_enabled());
   }

   enable_preemption();
   pop_nested_interrupt();

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();
}
Exemple #17
0
void
panic(const char *str, ...)
{
	va_list	listp;
	spl_t	s;
	thread_t thread;
	wait_queue_t wq;

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif
	if (kdebug_enable)
		kdbg_dump_trace_to_file("/var/tmp/panic.trace");

	s = splhigh();
	disable_preemption();

	panic_safe();

	thread = current_thread();		/* Get failing thread */
	wq = thread->wait_queue;		/* Save the old value */
	thread->wait_queue = NULL;		/* Clear the wait so we do not get double panics when we try locks */

	if( logPanicDataToScreen )
		disable_debug_output = FALSE;
		
	debug_mode = TRUE;

	/* panic_caller is initialized to 0.  If set, don't change it */
	if ( ! panic_caller )
		panic_caller = (unsigned long)(char *)__builtin_return_address(0);
	
restart:
	PANIC_LOCK();
	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	kdb_printf("panic(cpu %d caller 0x%lx): ", (unsigned) paniccpu, panic_caller);
	if (str) {
		va_start(listp, str);
		_doprnt(str, &listp, consdebug_putc, 0);
		va_end(listp);
	}
	kdb_printf("\n");

	/*
	 * Release panicwait indicator so that other cpus may call Debugger().
	 */
	panicwait = 0;
	Debugger("panic");
	/*
	 * Release panicstr so that we can handle normally other panics.
	 */
	PANIC_LOCK();
	panicstr = (char *)0;
	PANIC_UNLOCK();
	thread->wait_queue = wq; 	/* Restore the wait queue */

	if (return_on_panic) {
		panic_normal();
		enable_preemption();
		splx(s);
		return;
	}

	kdb_printf("panic: We are hanging here...\n");
	panic_stop();
	/* NOTREACHED */
}
Exemple #18
0
struct savearea * interrupt(
        int type,
        struct savearea *ssp,
	unsigned int dsisr,
	unsigned int dar)
{
	int	current_cpu;
	struct per_proc_info	*proc_info;
	uint64_t		now;
	thread_t		thread;

	disable_preemption();

	perfCallback fn = perfIntHook;
	if(fn) {							/* Is there a hook? */
		if(fn(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
	
#if CONFIG_DTRACE
	if(tempDTraceIntHook) {							/* Is there a hook? */
		if(tempDTraceIntHook(type, ssp, dsisr, dar) == KERN_SUCCESS) return ssp;	/* If it succeeds, we are done... */
	}
#endif

#if 0
	{
		extern void fctx_text(void);
		fctx_test();
	}
#endif


	current_cpu = cpu_number();
	proc_info = getPerProc();

	switch (type) {

		case T_DECREMENTER:
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
				  isync_mfdec(), (unsigned int)ssp->save_srr0, 0, 0, 0);
	
			now = mach_absolute_time();				/* Find out what time it is */
			
			if(now >= proc_info->pms.pmsPop) {		/* Is it time for power management state change? */
				pmsStep(1);							/* Yes, advance step */
				now = mach_absolute_time();			/* Get the time again since we ran a bit */
			}

			thread = current_thread();					/* Find ourselves */
			if(thread->machine.qactTimer != 0) {	/* Is the timer set? */
				if (thread->machine.qactTimer <= now) {	/* It is set, has it popped? */
					thread->machine.qactTimer = 0;		/* Clear single shot timer */
					if((unsigned int)thread->machine.vmmControl & 0xFFFFFFFE) {	/* Are there any virtual machines? */
						vmm_timer_pop(thread);			/* Yes, check out them out... */
					}
				}
			}

			etimer_intr(USER_MODE(ssp->save_srr1), ssp->save_srr0);	/* Handle event timer */
			break;
	
		case T_INTERRUPT:
			/* Call the platform interrupt routine */
			counter(c_incoming_interrupts++);
	
			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
			   current_cpu, (unsigned int)ssp->save_srr0, 0, 0, 0);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__start, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			proc_info->interrupt_handler(
				proc_info->interrupt_target, 
				proc_info->interrupt_refCon,
				proc_info->interrupt_nub, 
				proc_info->interrupt_source);
	
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
			DTRACE_INT5(interrupt__complete, void *, proc_info->interrupt_nub, int, proc_info->interrupt_source, 
						void *, proc_info->interrupt_target, IOInterruptHandler, proc_info->interrupt_handler,
						void *, proc_info->interrupt_refCon);
#endif

			KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
			   0, 0, 0, 0, 0);
	
			break;
	
		case T_SIGP:
			/* Did the other processor signal us? */ 
			cpu_signal_handler();
			break;
	
		case T_SHUTDOWN:
			cpu_doshutdown();
			panic("returning from cpu_doshutdown()\n");
			break;
	
				
		default:
			if (!Call_Debugger(type, ssp))
				unresolved_kernel_trap(type, ssp, dsisr, dar, NULL);
			break;
	}

	enable_preemption();
	return ssp;
}
Exemple #19
0
__private_extern__ void
chudxnu_enable_preemption(void)
{
	enable_preemption();
}