inline static int init_gIOOptionsEntry(void) { IORegistryEntry *entry; void *nvram_entry; volatile void **options; int ret = -1; if (gIOOptionsEntry) return 0; entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); if (!entry) return -1; nvram_entry = (void *) OSDynamicCast(IODTNVRAM, entry); if (!nvram_entry) goto release; options = (volatile void **) &gIOOptionsEntry; if (!OSCompareAndSwapPtr(NULL, nvram_entry, options)) { ret = 0; goto release; } return 0; release: entry->release(); return ret; }
__private_extern__ void net_init_run(void) { struct init_list_entry *backward_head = 0; struct init_list_entry *forward_head = 0; struct init_list_entry *current = 0; /* * Grab the list, replacing the head with 0xffffffff to indicate * that we've already run. */ do { backward_head = list_head; } while (!OSCompareAndSwapPtr(backward_head, LIST_RAN, &list_head)); /* Reverse the order of the list */ while (backward_head != 0) { current = backward_head; backward_head = current->next; current->next = forward_head; forward_head = current; } /* Call each function pointer registered */ while (forward_head != 0) { current = forward_head; forward_head = current->next; current->func(); kfree(current, sizeof(*current)); } }
bool IOFireWireUserClientIniter::init(OSDictionary * propTable) { fProvider = NULL ; if( sIniterLock == NULL ) { IORecursiveLock * lock = IORecursiveLockAlloc(); //IOLog( "IOFireWireUserClientIniter<0x%08lx>::init - IORecursiveLockAlloc = 0x%08lx\n", this, lock ); bool result = false; while( sIniterLock == NULL && result == false ) { result = OSCompareAndSwapPtr( NULL, lock, (void * volatile *)&sIniterLock ); } if( result == false ) { //IOLog( "IOFireWireUserClientIniter<0x%08lx>::init - IORecursiveLockFree = 0x%08lx\n", this, lock ); IORecursiveLockFree( lock ); } } //IOLog( "IOFireWireUserClientIniter<0x%08lx>::init - sIniterLock = 0x%08lx\n", this, sIniterLock ); return super::init(propTable) ; }
VFSFilter0UserClient* com_VFSFilter0::getClient() /* if non NULL is returned the putClient() must be called */ { VFSFilter0UserClient* currentClient; // // if ther is no user client, then nobody call for logging // if( NULL == this->userClient || this->pendingUnregistration ) return NULL; OSIncrementAtomic( &this->clientInvocations ); currentClient = (VFSFilter0UserClient*)this->userClient; // // if the current client is NULL or can't be atomicaly exchanged // with the same value then the unregistration is in progress, // the call to OSCompareAndSwapPtr( NULL, NULL, &this->userClient ) // checks the this->userClient for NULL atomically // if( !currentClient || !OSCompareAndSwapPtr( currentClient, currentClient, &this->userClient ) || OSCompareAndSwapPtr( NULL, NULL, &this->userClient ) ){ // // the unregistration is in the progress and waiting for all // invocations to return // assert( this->pendingUnregistration ); if( 0x1 == OSDecrementAtomic( &this->clientInvocations ) ){ // // this was the last invocation // wakeup( &this->clientInvocations ); } return NULL; } return currentClient; }
__private_extern__ kern_return_t chudxnu_syscall_callback_enter(chudxnu_syscall_callback_func_t func) { if(OSCompareAndSwapPtr(chud_null_syscall, func, (void * volatile *)&syscall_callback_fn)) { return KERN_SUCCESS; } return KERN_FAILURE; }
__private_extern__ kern_return_t chudxnu_kdebug_callback_enter(chudxnu_kdebug_callback_func_t func) { /* Atomically set the callback. */ if(OSCompareAndSwapPtr(chud_null_kdebug, func, (void * volatile *)&kdebug_callback_fn)) { kdbg_control_chud(TRUE, (void *)chudxnu_private_kdebug_callback); return KERN_SUCCESS; } return KERN_FAILURE; }
__private_extern__ kern_return_t chudxnu_syscall_callback_cancel(void) { chudxnu_syscall_callback_func_t old = syscall_callback_fn; while(!OSCompareAndSwapPtr(old, chud_null_syscall, (void * volatile *)&syscall_callback_fn)) { old = syscall_callback_fn; } return KERN_SUCCESS; }
/* * Routine: convert_mig_object_to_port [interface] * Purpose: * Base implementation of MIG outtrans routine to convert from * a mig object reference to a new send right on the object's * port. The object reference is consumed. * Returns: * IP_NULL - Null MIG object supplied * Otherwise, a newly made send right for the port * Conditions: * Nothing locked. */ ipc_port_t convert_mig_object_to_port( mig_object_t mig_object) { ipc_port_t port; boolean_t deallocate = TRUE; if (mig_object == MIG_OBJECT_NULL) return IP_NULL; port = mig_object->port; while ((port == IP_NULL) || ((port = ipc_port_make_send(port)) == IP_NULL)) { ipc_port_t previous; /* * Either the port was never set up, or it was just * deallocated out from under us by the no-senders * processing. In either case, we must: * Attempt to make one * Arrange for no senders * Try to atomically register it with the object * Destroy it if we are raced. */ port = ipc_port_alloc_kernel(); ip_lock(port); ipc_kobject_set_atomically(port, (ipc_kobject_t) mig_object, IKOT_MIG); /* make a sonce right for the notification */ port->ip_sorights++; ip_reference(port); ipc_port_nsrequest(port, 1, port, &previous); /* port unlocked */ assert(previous == IP_NULL); if (OSCompareAndSwapPtr((void *)IP_NULL, (void *)port, (void * volatile *)&mig_object->port)) { deallocate = FALSE; } else { ipc_port_dealloc_kernel(port); port = mig_object->port; } } if (deallocate) mig_object->pVtbl->Release((IMIGObject *)mig_object); return (port); }
__private_extern__ kern_return_t chudxnu_kdebug_callback_cancel(void) { kdbg_control_chud(FALSE, NULL); chudxnu_kdebug_callback_func_t old = kdebug_callback_fn; while(!OSCompareAndSwapPtr(old, chud_null_kdebug, (void * volatile *)&kdebug_callback_fn)) { old = kdebug_callback_fn; } return KERN_SUCCESS; }
/* * Routine: convert_semaphore_to_port * Purpose: * Convert a semaphore reference to a send right to a * semaphore port. * * Consumes the semaphore reference. If the semaphore * port currently has no send rights (or doesn't exist * yet), the reference is donated to the port to represent * all extant send rights collectively. */ ipc_port_t convert_semaphore_to_port (semaphore_t semaphore) { ipc_port_t port, send; if (semaphore == SEMAPHORE_NULL) return (IP_NULL); /* caller is donating a reference */ port = semaphore->port; if (!IP_VALID(port)) { port = ipc_port_alloc_kernel(); assert(IP_VALID(port)); ipc_kobject_set_atomically(port, (ipc_kobject_t) semaphore, IKOT_SEMAPHORE); /* If we lose the race, deallocate and pick up the other guy's port */ if (!OSCompareAndSwapPtr(IP_NULL, port, &semaphore->port)) { ipc_port_dealloc_kernel(port); port = semaphore->port; assert(ip_kotype(port) == IKOT_SEMAPHORE); assert(port->ip_kobject == (ipc_kobject_t)semaphore); } } ip_lock(port); assert(ip_active(port)); send = ipc_port_make_send_locked(port); if (1 == port->ip_srights) { ipc_port_t old_notify; /* transfer our ref to the port, and arm the no-senders notification */ assert(IP_NULL == port->ip_nsrequest); ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify); /* port unlocked */ assert(IP_NULL == old_notify); } else { /* piggyback on the existing port reference, so consume ours */ ip_unlock(port); semaphore_dereference(semaphore); } return (send); }
IOReturn com_VFSFilter0::unregisterUserClient( __in VFSFilter0UserClient* client ) { bool unregistered; VFSFilter0UserClient* currentClient; currentClient = (VFSFilter0UserClient*)this->userClient; assert( currentClient == client ); if( currentClient != client ){ DBG_PRINT_ERROR(("currentClient != client\n")); return kIOReturnError; } this->pendingUnregistration = true; unregistered = OSCompareAndSwapPtr( (void*)currentClient, NULL, &this->userClient ); assert( unregistered && NULL == this->userClient ); if( !unregistered ){ DBG_PRINT_ERROR(("!unregistered\n")); this->pendingUnregistration = false; return kIOReturnError; } do { // wait for any existing client invocations to return struct timespec ts = { 1, 0 }; // one second (void)msleep( &this->clientInvocations, // wait channel NULL, // mutex PUSER, // priority "com_VFSFilter0::unregisterUserClient()", // wait message &ts ); // sleep interval } while( this->clientInvocations != 0 ); currentClient->release(); this->pendingUnregistration = false; return unregistered? kIOReturnSuccess: kIOReturnError; }
/* * Called from kernel context to register a kdp event callout. */ void kdp_register_callout( kdp_callout_fn_t fn, void *arg) { struct kdp_callout *kcp; struct kdp_callout *list_head; kcp = kalloc(sizeof(*kcp)); if (kcp == NULL) panic("kdp_register_callout() kalloc failed"); kcp->callout_fn = fn; kcp->callout_arg = arg; /* Lock-less list insertion using compare and exchange. */ do { list_head = kdp_callout_list; kcp->callout_next = list_head; } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list)); }
IOReturn com_VFSFilter0::registerUserClient( __in VFSFilter0UserClient* client ) { bool registered; if( this->pendingUnregistration ){ DBG_PRINT_ERROR(("com_VFSFilter0 : pendingUnregistration\n")); return kIOReturnError; } registered = OSCompareAndSwapPtr( NULL, (void*)client, &this->userClient ); assert( registered ); if( !registered ){ DBG_PRINT_ERROR(("com_VFSFilter0 : a client was not registered\n")); return kIOReturnError; } client->retain(); return registered? kIOReturnSuccess: kIOReturnError; }
errno_t net_init_add( net_init_func_ptr init_func) { struct init_list_entry *entry; if (init_func == 0) { return EINVAL; } /* Check if we've already started */ if (list_head == LIST_RAN) { return EALREADY; } entry = kalloc(sizeof(*entry)); if (entry == 0) { printf("net_init_add: no memory\n"); return ENOMEM; } bzero(entry, sizeof(*entry)); entry->func = init_func; do { entry->next = list_head; if (entry->next == LIST_RAN) { /* List already ran, cleanup and call the function */ kfree(entry, sizeof(*entry)); return EALREADY; } } while(!OSCompareAndSwapPtr(entry->next, entry, &list_head)); return 0; }
static struct kern_coredump_core * kern_register_coredump_helper_internal(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks, void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit, uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) { struct kern_coredump_core *core_helper = NULL; kern_coredump_callback_config *core_callbacks = NULL; if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) return NULL; if (kc_callbacks == NULL) return NULL;; if (core_description == NULL) return NULL; if (kc_callbacks->kcc_coredump_get_summary == NULL || kc_callbacks->kcc_coredump_save_segment_descriptions == NULL || kc_callbacks->kcc_coredump_save_segment_data == NULL || kc_callbacks->kcc_coredump_save_thread_state == NULL || kc_callbacks->kcc_coredump_save_sw_vers == NULL) return NULL; #if !defined(__LP64__) /* We don't support generating 64-bit cores on 32-bit platforms */ if (is64bit) return NULL; #endif core_helper = kalloc(sizeof(*core_helper)); core_helper->kcc_next = NULL; core_helper->kcc_refcon = refcon; if (xnu_callback) { snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description); } else { /* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */ snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.8s-coproc", core_description); } core_helper->kcc_is64bit = is64bit; core_helper->kcc_mh_magic = mh_magic; core_helper->kcc_cpu_type = cpu_type; core_helper->kcc_cpu_subtype = cpu_subtype; core_callbacks = &core_helper->kcc_cb; core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init; core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary; core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions; core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data; core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state; core_callbacks->kcc_coredump_save_misc_data = kc_callbacks->kcc_coredump_save_misc_data; core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers; if (xnu_callback) { assert(kernel_helper == NULL); kernel_helper = core_helper; } else { do { core_helper->kcc_next = kern_coredump_core_list; } while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list)); } OSAddAtomic(1, &coredump_registered_count); kprintf("Registered coredump handler for %s\n", core_description); return core_helper; }