static void clock_track_calend_nowait(void) { int i; for (i = 0; i < 2; i++) { struct clock_calend tmp = clock_calend; /* * Set the low bit if the generation count; since we use a * barrier instruction to do this, we are guaranteed that this * will flag an update in progress to an async caller trying * to examine the contents. */ (void)hw_atomic_or(&flipflop[i].gen, 1); flipflop[i].calend = tmp; /* * Increment the generation count to clear the low bit to * signal completion. If a caller compares the generation * count after taking a copy while in progress, the count * will be off by two. */ (void)hw_atomic_add(&flipflop[i].gen, 1); } }
/* * uio_create - create an uio_t. * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t * is not fully initialized until all iovecs are added using uio_addiov calls. * a_iovcount is the maximum number of iovecs you may add. */ uio_t uio_create( int a_iovcount, /* number of iovecs */ off_t a_offset, /* current offset */ int a_spacetype, /* type of address space */ int a_iodirection ) /* read or write flag */ { void * my_buf_p; int my_size; uio_t my_uio; my_size = UIO_SIZEOF(a_iovcount); my_buf_p = kalloc(my_size); my_uio = uio_createwithbuffer( a_iovcount, a_offset, a_spacetype, a_iodirection, my_buf_p, my_size ); if (my_uio != 0) { /* leave a note that we allocated this uio_t */ my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED; #if DEBUG (void)hw_atomic_add(&uio_t_count, 1); #endif } return( my_uio ); }
/* * processor_up: * * Flag processor as up and running, and available * for scheduling. */ void processor_up( processor_t processor) { processor_set_t pset; spl_t s; s = splsched(); init_ast_check(processor); pset = processor->processor_set; pset_lock(pset); ++pset->online_processor_count; enqueue_tail(&pset->active_queue, (queue_entry_t)processor); processor->state = PROCESSOR_RUNNING; (void)hw_atomic_add(&processor_avail_count, 1); commpage_update_active_cpus(); pset_unlock(pset); ml_cpu_up(); splx(s); #if CONFIG_DTRACE if (dtrace_cpu_state_changed_hook) (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); #endif }
void OSMalloc_Tagref( OSMallocTag tag) { if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); (void)hw_atomic_add(&tag->OSMT_refcnt, 1); }
void vnode_pager_reference( memory_object_t mem_obj) { register vnode_pager_t vnode_object; unsigned int new_ref_count; vnode_object = vnode_pager_lookup(mem_obj); new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1); assert(new_ref_count > 1); }
/* * uio_duplicate - allocate a new uio and make a copy of the given uio_t. * may return NULL. */ uio_t uio_duplicate( uio_t a_uio ) { uio_t my_uio; int i; if (a_uio == NULL) { return(NULL); } my_uio = (uio_t) kalloc(a_uio->uio_size); if (my_uio == 0) { panic("%s :%d - allocation failed\n", __FILE__, __LINE__); } bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size); /* need to set our iovec pointer to point to first active iovec */ if (my_uio->uio_max_iovs > 0) { my_uio->uio_iovs.uiovp = (struct user_iovec *) (((uint8_t *)my_uio) + sizeof(struct uio)); /* advance to first nonzero iovec */ if (my_uio->uio_iovcnt > 0) { for ( i = 0; i < my_uio->uio_max_iovs; i++ ) { if (UIO_IS_USER_SPACE(a_uio)) { if (my_uio->uio_iovs.uiovp->iov_len != 0) { break; } my_uio->uio_iovs.uiovp++; } else { if (my_uio->uio_iovs.kiovp->iov_len != 0) { break; } my_uio->uio_iovs.kiovp++; } } } } my_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED; #if DEBUG (void)hw_atomic_add(&uio_t_count, 1); #endif return(my_uio); }
void lck_grp_lckcnt_incr( lck_grp_t *grp, lck_type_t lck_type) { unsigned int *lckcnt; switch (lck_type) { case LCK_TYPE_SPIN: lckcnt = &grp->lck_grp_spincnt; break; case LCK_TYPE_MTX: lckcnt = &grp->lck_grp_mtxcnt; break; case LCK_TYPE_RW: lckcnt = &grp->lck_grp_rwcnt; break; default: return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type); } (void)hw_atomic_add(lckcnt, 1); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread = THREAD_NULL; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { /* * This task has a per-thread CPU limit; make sure this new thread * gets its limit set too, before it gets out of the kernel. */ set_astledger(new_thread); } new_thread->t_threadledger = LEDGER_NULL; /* per thread ledger is not inherited */ new_thread->t_ledger = new_thread->task->ledger; if (new_thread->t_ledger) ledger_reference(new_thread->t_ledger); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; #if CONFIG_EMBEDDED if (new_thread->priority < MAXPRI_THROTTLE) { new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ new_thread->importance = new_thread->priority - new_thread->task_priority; #if CONFIG_EMBEDDED new_thread->saved_importance = new_thread->importance; /* apple ios daemon starts all threads in darwin background */ if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) { /* Cannot use generic routines here so apply darwin bacground directly */ new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL; /* set thread self backgrounding */ new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg; /* priority will get recomputed suitably bit later */ new_thread->importance = INT_MIN; /* to avoid changes to many pri compute routines, set the effect of those here */ new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ #if defined(CONFIG_SCHED_TRADITIONAL) new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; #endif SCHED(compute_priority)(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD { new_thread->uthread = uthread_alloc(parent_task, new_thread); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); mutex_init(&new_thread->mutex, 0); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; mutex_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || (parent_task->thread_count >= THREAD_MAX && parent_task != kernel_task)) { task_unlock(parent_task); mutex_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)new_thread, dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
void lck_grp_reference( lck_grp_t *grp) { (void)hw_atomic_add(&grp->lck_grp_refcnt, 1); }
/* * Routine: semaphore_reference * * Take out a reference on a semaphore. This keeps the data structure * in existence (but the semaphore may be deactivated). */ void semaphore_reference( semaphore_t semaphore) { (void)hw_atomic_add(&semaphore->ref_count, 1); }
/** * pmap_reference * * Increment reference count of the specified pmap. */ void pmap_reference(pmap_t pmap) { if (pmap != PMAP_NULL) (void)hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */ }