Ejemplo n.º 1
0
static void
KALLOC_ZINFO_SFREE(vm_size_t bytes)
{
	thread_t thr = current_thread();
	task_t task;
	zinfo_usage_t zinfo;

	ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes);

	if (kalloc_fake_zone_index != -1 && 
	    (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
		zinfo[kalloc_fake_zone_index].free += bytes;
}
Ejemplo n.º 2
0
nw_result mk_connection_accept(nw_ep ep, nw_buffer_t msg,
			       nw_ep_t new_epp) {
  nw_result rc;
  nw_pv_t pv;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BAD_EP;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_PROT_VIOLATION;
    } else if ((char *) msg < pv->buf_start ||
	       (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
	       !msg->buf_used ||
	       (char *) msg + msg->buf_length > pv->buf_end) {
      rc = NW_BAD_BUFFER;
    } else if (new_epp != NULL &&
	       (invalid_user_access(current_task()->map, (vm_offset_t) new_epp,
				   (vm_offset_t) new_epp + sizeof(nw_ep) - 1,
				   VM_PROT_READ | VM_PROT_WRITE) ||
		(*new_epp != 0 && *new_epp != ep))) {
      rc = NW_INVALID_ARGUMENT;
    } else {
      rc = (*(devct[NW_DEVICE(ect[ep].conn->peer.rem_addr_1)].entry->accept))
	      (ep, msg, new_epp);
      if (rc == NW_SYNCH) {
	hect[ep].sig_waiter = current_thread();
	assert_wait(0, TRUE);
	current_thread()->nw_ep_waited = NULL;
	simple_unlock(&nw_simple_lock);
	thread_block(mk_return);
      }
    }
  }
  nw_unlock();
  return rc;
}
Ejemplo n.º 3
0
kern_return_t
act_set_state_from_user(
	thread_t				thread,
	int						flavor,
	thread_state_t			state,
	mach_msg_type_number_t	count)
{
    if (thread == current_thread())
	    return (KERN_INVALID_ARGUMENT);

    return (thread_set_state_from_user(thread, flavor, state, count));
    
}
Ejemplo n.º 4
0
static void
panic_display_process_name(void) {
	/* because of scoping issues len(p_comm) from proc_t is hard coded here */
	char proc_name[17] = "Unknown";
	task_t ctask = 0;
	void *cbsd_info = 0;

	if (ml_nofault_copy((vm_offset_t)&current_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t))
		if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info))
			if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0))
				proc_name[sizeof(proc_name) - 1] = '\0';
	kdb_printf("\nBSD process name corresponding to current thread: %s\n", proc_name);
}
Ejemplo n.º 5
0
void
mach_msg_continue(void)
{
	ipc_thread_t thread = current_thread();
	task_t task = thread->task;
	ipc_space_t space = task->itk_space;
	vm_map_t map = task->map;
	mach_msg_header_t *msg = thread->ith_msg;
	mach_msg_size_t rcv_size = thread->ith_rcv_size;
	ipc_object_t object = thread->ith_object;
	ipc_mqueue_t mqueue = thread->ith_mqueue;
	ipc_kmsg_t kmsg;
	mach_port_seqno_t seqno;
	mach_msg_return_t mr;

	mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
				MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE,
				TRUE, mach_msg_continue, &kmsg, &seqno);
	/* mqueue is unlocked */
	ipc_object_release(object);
	if (mr != MACH_MSG_SUCCESS) {
		thread_syscall_return(mr);
		/*NOTREACHED*/
	}

	kmsg->ikm_header.msgh_seqno = seqno;
	if (kmsg->ikm_header.msgh_size > rcv_size) {
		ipc_kmsg_copyout_dest(kmsg, space);
		(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
		thread_syscall_return(MACH_RCV_TOO_LARGE);
		/*NOTREACHED*/
	}

	mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
	if (mr != MACH_MSG_SUCCESS) {
		if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
			(void) ipc_kmsg_put(msg, kmsg,
					kmsg->ikm_header.msgh_size);
		} else {
			ipc_kmsg_copyout_dest(kmsg, space);
			(void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
		}

		thread_syscall_return(mr);
		/*NOTREACHED*/
	}

	mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
	thread_syscall_return(mr);
	/*NOTREACHED*/
}
Ejemplo n.º 6
0
/*
 * Routine: 	lck_mtx_lock_acquire
 *
 * Invoked on acquiring the mutex when there is
 * contention.
 *
 * Returns the current number of waiters.
 *
 * Called with the interlock locked.
 */
int
lck_mtx_lock_acquire(
	lck_mtx_t		*lck)
{
	thread_t		thread = current_thread();
	lck_mtx_t		*mutex;

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
		thread->pending_promoter[thread->pending_promoter_index] = NULL;
		if (thread->pending_promoter_index > 0)
			thread->pending_promoter_index--;
		mutex->lck_mtx_waiters--;
	}

	if (mutex->lck_mtx_waiters > 0) {
		integer_t		priority = mutex->lck_mtx_pri;
		spl_t			s = splsched();

		thread_lock(thread);
		thread->promotions++;
		thread->sched_flags |= TH_SFLAG_PROMOTED;
		if (thread->sched_pri < priority) {
			KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
						thread->sched_pri, priority, 0, lck, 0);
			/* Do not promote past promotion ceiling */
			assert(priority <= MAXPRI_PROMOTE);
			set_sched_pri(thread, priority);
		}
		thread_unlock(thread);
		splx(s);
	}
	else
		mutex->lck_mtx_pri = 0;

#if CONFIG_DTRACE
	if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) {
		if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
			LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lck, 0);
		} else {
			LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, lck, 0);
		}
	}
#endif	
	return (mutex->lck_mtx_waiters);
}
Ejemplo n.º 7
0
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	x86_saved_state_t *regs;
	user_addr_t pc, sp, fp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	regs = (x86_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	if (is64Bit) {
		pc = regs->ss_64.isf.rip;
		sp = regs->ss_64.isf.rsp;
		fp = regs->ss_64.rbp;
	} else {
		pc = regs->ss_32.eip;
		sp = regs->ss_32.uesp;
		fp = regs->ss_32.ebp;
	}

	if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
	    /*
	     * we would have adjusted the stack if we had
	     * supplied one (that is what rc == 1 means).
	     * Also, as a side effect, the pc might have
	     * been fixed up, which is good for calling
	     * in to dtrace_getustack_common.
	     */
	    n++;
	}
	
	/*
	 * Note that unlike ppc, the x86 code does not use
	 * CPU_DTRACE_USTACK_FP. This is because x86 always
	 * traces from the fp, even in syscall/profile/fbt
	 * providers.
	 */

	n += dtrace_getustack_common(NULL, 0, pc, fp);

	return (n);
}
Ejemplo n.º 8
0
/*
 * Set the Universal (Posix) time. Privileged call.
 */
kern_return_t
host_set_time(
	host_t		host,
	time_value_t	new_time)
{
	spl_t	s;

	if (host == HOST_NULL)
		return(KERN_INVALID_HOST);

#if	NCPUS > 1
	thread_bind(current_thread(), master_processor);
	mp_disable_preemption();
	if (current_processor() != master_processor) {
		mp_enable_preemption();
		thread_block((void (*)(void)) 0);
	} else {
		mp_enable_preemption();
	}
#endif	/* NCPUS > 1 */

	s = splhigh();
	time = new_time;
	update_mapped_time(&time);
#if	PTIME_MACH_RT
	rtc_gettime_interrupts_disabled((tvalspec_t *)&last_utime_tick);
#endif	/* PTIME_MACH_RT */
#if 0
	(void)bbc_settime((time_value_t *)&time);
#endif
	splx(s);

#if	NCPUS > 1
	thread_bind(current_thread(), PROCESSOR_NULL);
#endif	/* NCPUS > 1 */

	return (KERN_SUCCESS);
}
Ejemplo n.º 9
0
Archivo: status.c Proyecto: DiogoPC/xnu
/** 
 * act_thread_catt
 *
 * Restore the current thread context, used for the internal uthread structure.
 * (We should also restore VFP state...)
 */
void act_thread_catt(void *ctx)
{
    thread_t thr_act = current_thread();
    kern_return_t kret;

    if (ctx == (void *)NULL)
        return;

    struct arm_thread_state_t *ts = (struct arm_thread_state_t*)ctx;

    machine_thread_set_state(thr_act, ARM_THREAD_STATE, (thread_state_t)ts, 
                                    ARM_THREAD_STATE_COUNT);
    kfree(ts, sizeof(struct arm_thread_state));
}
Ejemplo n.º 10
0
/*
 * Routine:	lck_mtx_sleep_deadline
 */
wait_result_t
lck_mtx_sleep_deadline(
        lck_mtx_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;
	thread_t		thread = current_thread();

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
		     VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		/*
		 * See lck_mtx_sleep().
		 */
		thread->rwlock_count++;
	}

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_mtx_unlock(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if ((lck_sleep_action & LCK_SLEEP_SPIN))
				lck_mtx_lock_spin(lck);
			else
				lck_mtx_lock(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		lck_mtx_unlock(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */
			lck_rw_clear_promotion(thread);
		}
	}

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);

	return res;
}
Ejemplo n.º 11
0
mach_port_name_t
thread_self_trap(
	__unused struct thread_self_trap_args *args)
{
	thread_t  thread = current_thread();
	task_t task = thread->task;
	ipc_port_t sright;
	mach_port_name_t name;

	sright = retrieve_thread_self_fast(thread);
	name = ipc_port_copyout_send(sright, task->itk_space);
	return name;

}
Ejemplo n.º 12
0
// ### fasl-sharp-dollar stream sub-char numarg => value
Value SYS_fasl_sharp_dollar(Value streamarg, Value subchar, Value numarg)
{
  Symbol * symbol = check_symbol(stream_read_symbol(streamarg, FASL_READTABLE));
  Thread * thread = current_thread();
  Value package = thread->symbol_value(S_fasl_anonymous_package);
  if (package == NIL)
    {
      package = make_value(new Package());
      thread->set_symbol_value(S_fasl_anonymous_package, package);
    }
  Value sym = check_package(package)->intern(symbol->name(), false);
  the_symbol(sym)->set_package(NIL);
  return sym;
}
Ejemplo n.º 13
0
	/**
	 *  Unset thread specific value if present
	 *
	 *  @return true on success
	 */
	bool erase() {
		auto currThread = current_thread();

		for (size_t i = 0; i < N; i++) {
			if (atomic_load_explicit(&threads[i], memory_order_acquire) == currThread) {
				values[i] = {};
				thread_t nullThread = nullptr;
				return atomic_compare_exchange_strong_explicit(&threads[i], &currThread,
					nullThread, memory_order_acq_rel, memory_order_acq_rel);
			}
		}

		return false;
	}
Ejemplo n.º 14
0
boolean_t
db_phys_eq(
	task_t		task1,
	vm_offset_t	addr1,
	const task_t	task2,
	vm_offset_t	addr2)
{
	vm_offset_t	kern_addr1, kern_addr2;

	if (addr1 >= VM_MIN_KERNEL_ADDRESS || addr2 >= VM_MIN_KERNEL_ADDRESS)
	    return FALSE;
	if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
	    return FALSE;
	if (task1 == TASK_NULL) {
	    if (current_thread() == THREAD_NULL)
		return FALSE;
	    task1 = current_thread()->task;
	}
	if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0
		|| db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
	    return FALSE;
	return(kern_addr1 == kern_addr2);
}
Ejemplo n.º 15
0
/*
 * This method will bind a given thread to the requested CPU starting at the
 * next time quantum.  If the thread is the current thread, this method will
 * force a thread_block().  The result is that if you call this method on the
 * current thread, you will be on the requested CPU when this method returns.
 */
__private_extern__ kern_return_t
chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
{
    processor_t proc = NULL;

	if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
		return KERN_FAILURE;

	// temporary restriction until after phase 2 of the scheduler
	if(thread != current_thread())
		return KERN_FAILURE; 
	
	proc = cpu_to_processor(cpu);

	/* 
	 * Potentially racey, but mainly to prevent bind to shutdown
	 * processor.
	 */
	if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
			!(proc->state == PROCESSOR_SHUTDOWN)) {
		
		thread_bind(proc);

		/*
		 * If we're trying to bind the current thread, and
		 * we're not on the target cpu, and not at interrupt
		 * context, block the current thread to force a
		 * reschedule on the target CPU.
		 */
		if(thread == current_thread() && 
			!ml_at_interrupt_context() && cpu_number() != cpu) {
			(void)thread_block(THREAD_CONTINUE_NULL);
		}
		return KERN_SUCCESS;
	}
    return KERN_FAILURE;
}
Ejemplo n.º 16
0
void db_dr (
	int		num,
	vm_offset_t	linear_addr,
	int		type,
	int		len,
	int		persistence)
{
	int s = splhigh();
	unsigned long dr7;

	if (!kernel_dr) {
	    if (!linear_addr) {
		splx(s);
		return;
	    }
	    kernel_dr = TRUE;
	    /* Clear user debugging registers */
	    set_dr7(0);
	    set_dr0(0);
	    set_dr1(0);
	    set_dr2(0);
	    set_dr3(0);
	}

	ids.dr[num] = linear_addr;
	switch (num) {
	    case 0: set_dr0(linear_addr); break;
	    case 1: set_dr1(linear_addr); break;
	    case 2: set_dr2(linear_addr); break;
	    case 3: set_dr3(linear_addr); break;
	}

	/* Replace type/len/persistence for DRnum in dr7 */
	dr7 = get_dr7 ();
	dr7 &= ~(0xfUL << (4*num+16)) & ~(0x3UL << (2*num));
	dr7 |= (((len << 2) | type) << (4*num+16)) | (persistence << (2*num));
	set_dr7 (dr7);

	if (kernel_dr) {
	    if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
		/* Not used any more, switch back to user debugging registers */
		set_dr7 (0);
		kernel_dr = FALSE;
		zero_dr = TRUE;
		db_load_context(current_thread()->pcb);
	    }
	}
	splx(s);
}
Ejemplo n.º 17
0
/*
 * Routine:	lck_rw_sleep_deadline
 */
wait_result_t
lck_rw_sleep_deadline(
	lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;
	lck_rw_type_t	lck_rw_type;
	thread_t		thread = current_thread();

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		thread->rwlock_count++;
	}

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */

			/* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */
			assert(lck_sleep_action & LCK_SLEEP_UNLOCK);

			lck_rw_clear_promotion(thread);
		}
	}

	return res;
}
Ejemplo n.º 18
0
kern_return_t
thread_get_state(
	register thread_t		thread,
	int						flavor,
	thread_state_t			state,			/* pointer to OUT array */
	mach_msg_type_number_t	*state_count)	/*IN/OUT*/
{
	kern_return_t		result = KERN_SUCCESS;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread_mtx_lock(thread);

	if (thread->active) {
		if (thread != current_thread()) {
			thread_hold(thread);

			thread_mtx_unlock(thread);

			if (thread_stop(thread, FALSE)) {
				thread_mtx_lock(thread);
				result = machine_thread_get_state(
										thread, flavor, state, state_count);
				thread_unstop(thread);
			}
			else {
				thread_mtx_lock(thread);
				result = KERN_ABORTED;
			}

			thread_release(thread);
		}
		else
			result = machine_thread_get_state(
									thread, flavor, state, state_count);
	}
	else if (thread->inspection)
	{
		result = machine_thread_get_state(
									thread, flavor, state, state_count);
	}
	else
		result = KERN_TERMINATED;

	thread_mtx_unlock(thread);

	return (result);
}
Ejemplo n.º 19
0
void BrcmPatchRAM::uploadFirmwareThread(void *arg, wait_result_t wait)
{
    DebugLog("sendFirmwareThread enter\n");

    BrcmPatchRAM* me = static_cast<BrcmPatchRAM*>(arg);
    me->resetDevice();
    IOSleep(20);
    me->uploadFirmware();
    me->publishPersonality();
    me->scheduleWork(kWorkFinished);

    DebugLog("sendFirmwareThread termination\n");
    thread_terminate(current_thread());
    DebugLog("!!! sendFirmwareThread post-terminate !!! should not be here\n");
}
Ejemplo n.º 20
0
int thread_yield_microseconds(unsigned microsecs)
{
	struct thread *current;
	struct timeout_callback tocb;

	current = current_thread();

	if (!thread_can_yield(current))
		return -1;

	if (thread_yield_timed_callback(&tocb, microsecs))
		return -1;

	return 0;
}
Ejemplo n.º 21
0
static void schedule(struct thread *t)
{
	struct thread *current = current_thread();

	/* If t is NULL need to find new runnable thread. */
	if (t == NULL) {
		if (thread_list_empty(&runnable_threads))
			die("Runnable thread list is empty!\n");
		t = pop_runnable();
	} else {
		/* current is still runnable. */
		push_runnable(current);
	}
	switch_to_thread(t->stack_current, &current->stack_current);
}
Ejemplo n.º 22
0
Archivo: cpu.c Proyecto: argp/xnu
/*
 *	Routine:	cpu_sleep
 *	Function:
 */
void
cpu_sleep(void)
{
	cpu_data_t     *cpu_data_ptr = getCpuDatap();
	pmap_switch_user_ttb(kernel_pmap);
	cpu_data_ptr->cpu_active_thread = current_thread();
	cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
	cpu_data_ptr->cpu_flags |= SleepState;
	cpu_data_ptr->cpu_user_debug = NULL;

	CleanPoC_Dcache();

	PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);

}
Ejemplo n.º 23
0
void
get_vfs_context(void)
{
    int isglock = ISAFS_GLOCK();

    if (!isglock)
	AFS_GLOCK();
    if (afs_osi_ctxtp_initialized) {
	if (!isglock)
	    AFS_GUNLOCK();
	return;
    }
    osi_Assert(vfs_context_owner != current_thread());
    if (afs_osi_ctxtp && current_proc() == vfs_context_curproc) {
	vfs_context_ref++;
	vfs_context_owner = current_thread();
	if (!isglock)
	    AFS_GUNLOCK();
	return;
    }
    while (afs_osi_ctxtp && vfs_context_ref) {
	afs_osi_Sleep(&afs_osi_ctxtp);
	if (afs_osi_ctxtp_initialized) {
	    if (!isglock)
		AFS_GUNLOCK();
	    return;
	}
    }
    vfs_context_rele(afs_osi_ctxtp);
    vfs_context_ref=1;
    afs_osi_ctxtp = vfs_context_create(NULL);
    vfs_context_owner = current_thread();
    vfs_context_curproc = current_proc();
    if (!isglock)
	AFS_GUNLOCK();
}
Ejemplo n.º 24
0
AbstractString * StandardObject::write_to_string()
{
  if (CL_fboundp(S_print_object) != NIL)
    {
      Thread * const thread = current_thread();
      StringOutputStream * stream = new StringOutputStream(S_character);
      thread->execute(the_symbol(S_print_object)->function(),
                      make_value(this),
                      make_value(stream));
      AbstractString * s = stream->get_string();
      return s;
    }
  else
    return unreadable_string();
}
Ejemplo n.º 25
0
struct trace_cpu_data *trace_get_tcd(void)
{
	struct trace_cpu_data *tcd;
	int nr_pages;
	struct list_head pages;

	/*
	 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
	 * from here: this will lead to infinite recursion.
	 */

	/*
	 * debugging check for recursive call to libcfs_debug_msg()
	 */
	if (trace_owner == current_thread()) {
                /*
                 * Cannot assert here.
                 */
		printk(KERN_EMERG "recursive call to %s", __FUNCTION__);
		/*
                 * "The death of God left the angels in a strange position."
		 */
		cfs_enter_debugger();
	}
	tcd = &trace_data[0].tcd;
        CFS_INIT_LIST_HEAD(&pages);
	if (get_preemption_level() == 0)
		nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
	else
		nr_pages = 0;
	spin_lock(&trace_cpu_serializer);
	trace_owner = current_thread();
	tcd->tcd_cur_stock_pages += nr_pages;
	list_splice(&pages, &tcd->tcd_stock_pages);
	return tcd;
}
Ejemplo n.º 26
0
/*
 *	Routine:	semaphore_wait_continue
 *
 *	Common continuation routine after waiting on a semphore.
 *	It returns directly to user space.
 */
void
semaphore_wait_continue(void)
{
	thread_t self = current_thread();
	int wait_result = self->wait_result;
	void (*caller_cont)(kern_return_t) = self->sth_continuation;

	assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
	semaphore_dereference(self->sth_waitsemaphore);
	if (self->sth_signalsemaphore != SEMAPHORE_NULL)
		semaphore_dereference(self->sth_signalsemaphore);

	assert(caller_cont != (void (*)(kern_return_t))0);
	(*caller_cont)(semaphore_convert_wait_result(wait_result));
}
Ejemplo n.º 27
0
static int
thread_switch_disable_workqueue_sched_callback(void)
{
	sched_call_t callback = workqueue_get_sched_callback();
	thread_t self = current_thread();
	if (!callback || self->sched_call != callback) {
		return FALSE;
	}
	spl_t s = splsched();
	thread_lock(self);
	thread_sched_call(self, NULL);
	thread_unlock(self);
	splx(s);
	return TRUE;
}
Ejemplo n.º 28
0
kern_return_t
lock_release (lock_set_t lock_set, int lock_id)
{
	ulock_t	 ulock;

	if (lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
		return KERN_INVALID_ARGUMENT;

	ulock = (ulock_t) &lock_set->ulock_list[lock_id];

	return (ulock_release_internal(ulock, current_thread()));
}
Ejemplo n.º 29
0
nw_result mk_connection_open_internal(nw_ep local_ep, nw_address_1 rem_addr_1,
               		      nw_address_2 rem_addr_2, nw_ep remote_ep) {
  nw_result rc;
  
  rc = (*devct[NW_DEVICE(rem_addr_1)].entry->open) (local_ep,
						    rem_addr_1, rem_addr_2,
						    remote_ep);
  if (rc == NW_SYNCH) {
    hect[local_ep].sig_waiter = current_thread();
    assert_wait(0, TRUE);
    simple_unlock(&nw_simple_lock);
    thread_block((void (*)()) 0);
  }
  return rc;
}
Ejemplo n.º 30
0
/*
 * Reset thread to default state in preparation for termination
 * Called with thread mutex locked
 *
 * Always called on current thread, so we don't need a run queue remove
 */
void
thread_policy_reset(
	thread_t		thread)
{
	spl_t		s;

	assert(thread == current_thread());

	s = splsched();
	thread_lock(thread);

	assert_thread_sched_count(thread);

	if (thread->sched_flags & TH_SFLAG_FAILSAFE)
		sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE);

	assert_thread_sched_count(thread);

	if (thread->sched_flags & TH_SFLAG_THROTTLED)
		sched_set_thread_throttled(thread, FALSE);

	assert_thread_sched_count(thread);

	assert(thread->BG_COUNT == 0);

	/* At this point, the various demotions should be inactive */
	assert(!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK));
	assert(!(thread->sched_flags & TH_SFLAG_THROTTLED));
	assert(!(thread->sched_flags & TH_SFLAG_DEPRESSED_MASK));

	/* Reset thread back to task-default basepri and mode  */
	sched_mode_t newmode = SCHED(initial_thread_sched_mode)(thread->task);

	sched_set_thread_mode(thread, newmode);

	thread->importance = 0;

	sched_set_thread_base_priority(thread, thread->task_priority);

	/* Prevent further changes to thread base priority or mode */
	thread->policy_reset = 1;

	assert(thread->BG_COUNT == 0);
	assert_thread_sched_count(thread);

	thread_unlock(thread);
	splx(s);
}