/* Allocate a condition. Return the condition pointer if successful or NULL if the allocation failed for any reason. */ objc_condition_t objc_condition_allocate(void) { objc_condition_t condition; /* Allocate the condition mutex structure */ if (!(condition = (objc_condition_t)objc_malloc(sizeof(struct objc_condition)))) return NULL; /* Call the backend to create the condition mutex */ if (__objc_condition_allocate(condition)) { /* failed! */ objc_free(condition); return NULL; } /* Success! */ return condition; }
/* Allocate a mutex. Return the mutex pointer if successful or NULL if the allocation failed for any reason. */ objc_mutex_t objc_mutex_allocate(void) { objc_mutex_t mutex; /* Allocate the mutex structure */ if (!(mutex = (objc_mutex_t)objc_malloc(sizeof(struct objc_mutex)))) return NULL; /* Call backend to create the mutex */ if (__objc_mutex_allocate(mutex)) { /* failed! */ objc_free(mutex); return NULL; } /* Initialize mutex */ mutex->owner = NULL; mutex->depth = 0; return mutex; }
/* Called at startup by init.c. */ void __objc_sync_init (void) { int i; for (i = 0; i < SYNC_NUMBER_OF_POOLS; i++) { lock_node_ptr new_node; /* Create a protection lock for each pool. */ sync_pool_protection_locks[i] = objc_mutex_allocate (); /* Preallocate a lock per pool. */ new_node = objc_malloc (sizeof (struct lock_node)); new_node->lock = objc_mutex_allocate (); new_node->object = nil; new_node->usage_count = 0; new_node->recursive_usage_count = 0; new_node->next = NULL; sync_pool_array[i] = new_node; } }
SEL sel_registerSelectorNoCopyName(const char *name) { #ifndef OBJC_TYPED_SELECTORS return sel_registerNameNoCopy(name); #else SEL result; if(nameToSelector == NULL) nameToSelector = OBJCCreateHashTable(INITIAL_SELECTOR_TABLE_SIZE); result = (SEL)OBJCHashValueForKey(nameToSelector, name); if(result == NULL) { objc_selector_internal *ughp; ughp = objc_malloc(sizeof(objc_selector_internal)); ughp->name = sel_registerNameNoCopy(name); result = (SEL)OBJCHashInsertValueForKey(nameToSelector, (char *)name, ughp); } return result; #endif }
/* sarray_at_put : copies data in such a way as to be thread reader safe. */ void sarray_at_put (struct sarray *array, sidx index, void *element) { #ifdef OBJC_SPARSE3 struct sindex **the_index; struct sindex *new_index; #endif struct sbucket **the_bucket; struct sbucket *new_bucket; #ifdef OBJC_SPARSE3 size_t ioffset; #endif size_t boffset; size_t eoffset; #ifdef PRECOMPUTE_SELECTORS union sofftype xx; xx.idx = index; #ifdef OBJC_SPARSE3 ioffset = xx.off.ioffset; #endif boffset = xx.off.boffset; eoffset = xx.off.eoffset; #else /* not PRECOMPUTE_SELECTORS */ #ifdef OBJC_SPARSE3 ioffset = index/INDEX_CAPACITY; boffset = (index/BUCKET_SIZE)%INDEX_SIZE; eoffset = index%BUCKET_SIZE; #else boffset = index/BUCKET_SIZE; eoffset = index%BUCKET_SIZE; #endif #endif /* not PRECOMPUTE_SELECTORS */ assert (soffset_decode (index) < array->capacity); /* Range check */ #ifdef OBJC_SPARSE3 the_index = &(array->indices[ioffset]); the_bucket = &((*the_index)->buckets[boffset]); #else the_bucket = &(array->buckets[boffset]); #endif if ((*the_bucket)->elems[eoffset] == element) return; /* great! we just avoided a lazy copy */ #ifdef OBJC_SPARSE3 /* First, perform lazy copy/allocation of index if needed */ if ((*the_index) == array->empty_index) { /* The index was previously empty, allocate a new */ new_index = (struct sindex *) objc_malloc (sizeof (struct sindex)); memcpy (new_index, array->empty_index, sizeof (struct sindex)); new_index->version.version = array->version.version; *the_index = new_index; /* Prepared for install. */ the_bucket = &((*the_index)->buckets[boffset]); nindices += 1; } else if ((*the_index)->version.version != array->version.version) { /* This index must be lazy copied */ struct sindex *old_index = *the_index; new_index = (struct sindex *) objc_malloc (sizeof (struct sindex)); memcpy (new_index, old_index, sizeof (struct sindex)); new_index->version.version = array->version.version; *the_index = new_index; /* Prepared for install. */ the_bucket = &((*the_index)->buckets[boffset]); nindices += 1; } #endif /* OBJC_SPARSE3 */ /* next, perform lazy allocation/copy of the bucket if needed */ if ((*the_bucket) == array->empty_bucket) { /* The bucket was previously empty (or something like that), */ /* allocate a new. This is the effect of `lazy' allocation */ new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket)); memcpy ((void *) new_bucket, (const void *) array->empty_bucket, sizeof (struct sbucket)); new_bucket->version.version = array->version.version; *the_bucket = new_bucket; /* Prepared for install. */ nbuckets += 1; } else if ((*the_bucket)->version.version != array->version.version) { /* Perform lazy copy. */ struct sbucket *old_bucket = *the_bucket; new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket)); memcpy (new_bucket, old_bucket, sizeof (struct sbucket)); new_bucket->version.version = array->version.version; *the_bucket = new_bucket; /* Prepared for install. */ nbuckets += 1; } (*the_bucket)->elems[eoffset] = element; }
void sarray_realloc (struct sarray *array, int newsize) { #ifdef OBJC_SPARSE3 size_t old_max_index = (array->capacity - 1)/INDEX_CAPACITY; size_t new_max_index = ((newsize - 1)/INDEX_CAPACITY); size_t rounded_size = (new_max_index + 1) * INDEX_CAPACITY; struct sindex **new_indices; struct sindex **old_indices; #else /* OBJC_SPARSE2 */ size_t old_max_index = (array->capacity - 1)/BUCKET_SIZE; size_t new_max_index = ((newsize - 1)/BUCKET_SIZE); size_t rounded_size = (new_max_index + 1) * BUCKET_SIZE; struct sbucket **new_buckets; struct sbucket **old_buckets; #endif size_t counter; assert (newsize > 0); /* The size is the same, just ignore the request */ if (rounded_size <= array->capacity) return; assert (array->ref_count == 1); /* stop if lazy copied... */ /* We are asked to extend the array -- allocate new bucket table, */ /* and insert empty_bucket in newly allocated places. */ if (rounded_size > array->capacity) { #ifdef OBJC_SPARSE3 new_max_index += 4; rounded_size = (new_max_index + 1) * INDEX_CAPACITY; #else /* OBJC_SPARSE2 */ new_max_index += 4; rounded_size = (new_max_index + 1) * BUCKET_SIZE; #endif /* update capacity */ array->capacity = rounded_size; #ifdef OBJC_SPARSE3 /* alloc to force re-read by any concurrent readers. */ old_indices = array->indices; new_indices = (struct sindex **) objc_malloc ((new_max_index + 1) * sizeof (struct sindex *)); #else /* OBJC_SPARSE2 */ old_buckets = array->buckets; new_buckets = (struct sbucket **) objc_malloc ((new_max_index + 1) * sizeof (struct sbucket *)); #endif /* copy buckets below old_max_index (they are still valid) */ for (counter = 0; counter <= old_max_index; counter++ ) { #ifdef OBJC_SPARSE3 new_indices[counter] = old_indices[counter]; #else /* OBJC_SPARSE2 */ new_buckets[counter] = old_buckets[counter]; #endif } #ifdef OBJC_SPARSE3 /* reset entries above old_max_index to empty_bucket */ for (counter = old_max_index + 1; counter <= new_max_index; counter++) new_indices[counter] = array->empty_index; #else /* OBJC_SPARSE2 */ /* reset entries above old_max_index to empty_bucket */ for (counter = old_max_index + 1; counter <= new_max_index; counter++) new_buckets[counter] = array->empty_bucket; #endif #ifdef OBJC_SPARSE3 /* install the new indices */ array->indices = new_indices; #else /* OBJC_SPARSE2 */ array->buckets = new_buckets; #endif #ifdef OBJC_SPARSE3 /* free the old indices */ sarray_free_garbage (old_indices); #else /* OBJC_SPARSE2 */ sarray_free_garbage (old_buckets); #endif idxsize += (new_max_index-old_max_index); return; } }
struct sarray * sarray_new (int size, void *default_element) { struct sarray *arr; #ifdef OBJC_SPARSE3 size_t num_indices = ((size - 1)/(INDEX_CAPACITY)) + 1; struct sindex **new_indices; #else /* OBJC_SPARSE2 */ size_t num_indices = ((size - 1)/BUCKET_SIZE) + 1; struct sbucket **new_buckets; #endif size_t counter; assert (size > 0); /* Allocate core array */ arr = (struct sarray *) objc_malloc (sizeof (struct sarray)); arr->version.version = 0; /* Initialize members */ #ifdef OBJC_SPARSE3 arr->capacity = num_indices*INDEX_CAPACITY; new_indices = (struct sindex **) objc_malloc (sizeof (struct sindex *) * num_indices); arr->empty_index = (struct sindex *) objc_malloc (sizeof (struct sindex)); arr->empty_index->version.version = 0; narrays += 1; idxsize += num_indices; nindices += 1; #else /* OBJC_SPARSE2 */ arr->capacity = num_indices*BUCKET_SIZE; new_buckets = (struct sbucket **) objc_malloc (sizeof (struct sbucket *) * num_indices); narrays += 1; idxsize += num_indices; #endif arr->empty_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket)); arr->empty_bucket->version.version = 0; nbuckets += 1; arr->ref_count = 1; arr->is_copy_of = (struct sarray *) 0; for (counter = 0; counter < BUCKET_SIZE; counter++) arr->empty_bucket->elems[counter] = default_element; #ifdef OBJC_SPARSE3 for (counter = 0; counter < INDEX_SIZE; counter++) arr->empty_index->buckets[counter] = arr->empty_bucket; for (counter = 0; counter < num_indices; counter++) new_indices[counter] = arr->empty_index; #else /* OBJC_SPARSE2 */ for (counter = 0; counter < num_indices; counter++) new_buckets[counter] = arr->empty_bucket; #endif #ifdef OBJC_SPARSE3 arr->indices = new_indices; #else /* OBJC_SPARSE2 */ arr->buckets = new_buckets; #endif return arr; }
BOOL class_addIvar (Class class_, const char * ivar_name, size_t size, unsigned char log_2_of_alignment, const char *type) { struct objc_ivar_list *ivars; if (class_ == Nil || (! CLS_IS_IN_CONSTRUCTION (class_)) || ivar_name == NULL || (strcmp (ivar_name, "") == 0) || size == 0 || type == NULL) return NO; /* Check if the class has an instance variable with that name already. */ ivars = class_->ivars; if (ivars != NULL) { int i; for (i = 0; i < ivars->ivar_count; i++) { struct objc_ivar *ivar = &(ivars->ivar_list[i]); if (strcmp (ivar->ivar_name, ivar_name) == 0) return NO; } } /* Ok, no direct ivars. Check superclasses. */ if (class_getInstanceVariable (objc_getClass ((char *)(class_->super_class)), ivar_name)) return NO; /* Good. Create space for the new instance variable. */ if (ivars) { int ivar_count = ivars->ivar_count + 1; int new_size = sizeof (struct objc_ivar_list) + (ivar_count - 1) * sizeof (struct objc_ivar); ivars = (struct objc_ivar_list*) objc_realloc (ivars, new_size); ivars->ivar_count = ivar_count; class_->ivars = ivars; } else { int new_size = sizeof (struct objc_ivar_list); ivars = (struct objc_ivar_list*) objc_malloc (new_size); ivars->ivar_count = 1; class_->ivars = ivars; } /* Now ivars is set to a list of instance variables of the right size. */ { struct objc_ivar *ivar = &(ivars->ivar_list[ivars->ivar_count - 1]); unsigned int alignment = 1 << log_2_of_alignment; int misalignment; ivar->ivar_name = objc_malloc (strlen (ivar_name) + 1); strcpy ((char *)ivar->ivar_name, ivar_name); ivar->ivar_type = objc_malloc (strlen (type) + 1); strcpy ((char *)ivar->ivar_type, type); /* The new instance variable is placed at the end of the existing instance_size, at the first byte that is aligned with alignment. */ misalignment = class_->instance_size % alignment; if (misalignment == 0) ivar->ivar_offset = class_->instance_size; else ivar->ivar_offset = class_->instance_size - misalignment + alignment; class_->instance_size = ivar->ivar_offset + size; } return YES; }
int objc_sync_enter (id object) { #ifndef SYNC_CACHE_DISABLE int free_cache_slot; #endif int hash; lock_node_ptr node; lock_node_ptr unused_node; if (object == nil) return OBJC_SYNC_SUCCESS; #ifndef SYNC_CACHE_DISABLE if (lock_cache == NULL) { /* Note that this calloc only happen only once per thread, the very first time a thread does a objc_sync_enter(). */ lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr)); } /* Check the cache to see if we have a record of having already locked the lock corresponding to this object. While doing so, keep track of the first free cache node in case we need it later. */ node = NULL; free_cache_slot = -1; { int i; for (i = 0; i < SYNC_CACHE_SIZE; i++) { lock_node_ptr locked_node = lock_cache[i]; if (locked_node == NULL) { if (free_cache_slot == -1) free_cache_slot = i; } else if (locked_node->object == object) { node = locked_node; break; } } } if (node != NULL) { /* We found the lock. Increase recursive_usage_count, which is protected by node->lock, which we already hold. */ node->recursive_usage_count++; /* There is no need to actually lock anything, since we already hold the lock. Correspondingly, objc_sync_exit() will just decrease recursive_usage_count and do nothing to unlock. */ return OBJC_SYNC_SUCCESS; } #endif /* SYNC_CACHE_DISABLE */ /* The following is the standard lookup for the lock in the standard pool lock. It requires a pool protection lock. */ hash = SYNC_OBJECT_HASH(object); /* Search for an existing lock for 'object'. While searching, make note of any unused lock if we find any. */ unused_node = NULL; objc_mutex_lock (sync_pool_protection_locks[hash]); node = sync_pool_array[hash]; while (node != NULL) { if (node->object == object) { /* We found the lock. */ node->usage_count++; objc_mutex_unlock (sync_pool_protection_locks[hash]); #ifndef SYNC_CACHE_DISABLE /* Put it in the cache. */ if (free_cache_slot != -1) lock_cache[free_cache_slot] = node; #endif /* Lock it. */ objc_mutex_lock (node->lock); return OBJC_SYNC_SUCCESS; } if (unused_node == NULL && node->usage_count == 0) { /* We found the first unused node. Record it. */ unused_node = node; } node = node->next; } /* An existing lock for 'object' could not be found. */ if (unused_node != NULL) { /* But we found a unused lock; use it. */ unused_node->object = object; unused_node->usage_count = 1; unused_node->recursive_usage_count = 0; objc_mutex_unlock (sync_pool_protection_locks[hash]); #ifndef SYNC_CACHE_DISABLE if (free_cache_slot != -1) lock_cache[free_cache_slot] = unused_node; #endif objc_mutex_lock (unused_node->lock); return OBJC_SYNC_SUCCESS; } else { /* There are no unused nodes; allocate a new node. */ lock_node_ptr new_node; /* Create the node. */ new_node = objc_malloc (sizeof (struct lock_node)); new_node->lock = objc_mutex_allocate (); new_node->object = object; new_node->usage_count = 1; new_node->recursive_usage_count = 0; /* Attach it at the beginning of the pool. */ new_node->next = sync_pool_array[hash]; sync_pool_array[hash] = new_node; objc_mutex_unlock (sync_pool_protection_locks[hash]); #ifndef SYNC_CACHE_DISABLE if (free_cache_slot != -1) lock_cache[free_cache_slot] = new_node; #endif objc_mutex_lock (new_node->lock); return OBJC_SYNC_SUCCESS; } }