void shutdown_osinet(void) { AFS_STATCNT(shutdown_osinet); #ifndef AFS_PRIVATE_OSI_ALLOCSPACES if (afs_cold_shutdown) { struct osi_packet *tp; while ((tp = freePacketList)) { freePacketList = tp->next; afs_osi_Free(tp, AFS_LRALLOCSIZ); #ifdef KERNEL_HAVE_PIN unpin(tp, AFS_LRALLOCSIZ); #endif } while ((tp = freeSmallList)) { freeSmallList = tp->next; afs_osi_Free(tp, AFS_SMALLOCSIZ); #ifdef KERNEL_HAVE_PIN unpin(tp, AFS_SMALLOCSIZ); #endif } LOCK_INIT(&osi_fsplock, "osi_fsplock"); LOCK_INIT(&osi_flplock, "osi_flplock"); } #endif /* AFS_PRIVATE_OSI_ALLOCSPACES */ if (afs_stats_cmperf.LargeBlocksActive || afs_stats_cmperf.SmallBlocksActive) { afs_warn("WARNING: not all blocks freed: large %d small %d\n", afs_stats_cmperf.LargeBlocksActive, afs_stats_cmperf.SmallBlocksActive); } }
ACL_EVENT *event_new_select_thr(void) { EVENT_SELECT_THR *event_thr; event_thr = (EVENT_SELECT_THR*) event_alloc(sizeof(EVENT_SELECT_THR)); snprintf(event_thr->event.event.name, sizeof(event_thr->event.event.name), "thread events - select"); event_thr->event.event.event_mode = ACL_EVENT_SELECT; event_thr->event.event.use_thread = 1; event_thr->event.event.loop_fn = event_loop; event_thr->event.event.free_fn = event_free; event_thr->event.event.add_dog_fn = event_add_dog; event_thr->event.event.enable_read_fn = event_enable_read; event_thr->event.event.enable_write_fn = event_enable_write; event_thr->event.event.enable_listen_fn = event_enable_listen; event_thr->event.event.disable_readwrite_fn = event_disable_readwrite; event_thr->event.event.isrset_fn = event_isrset; event_thr->event.event.iswset_fn = event_iswset; event_thr->event.event.isxset_fn = event_isxset; event_thr->event.event.timer_request = event_timer_request_thr; event_thr->event.event.timer_cancel = event_timer_cancel_thr; event_thr->event.event.timer_keep = event_timer_keep_thr; event_thr->event.event.timer_ifkeep = event_timer_ifkeep_thr; FD_ZERO(&event_thr->rmask); FD_ZERO(&event_thr->wmask); FD_ZERO(&event_thr->xmask); LOCK_INIT(&event_thr->event.tm_mutex); LOCK_INIT(&event_thr->event.tb_mutex); return (ACL_EVENT *) event_thr; }
int init_branches(struct opt_s *opt) { int err; opt->membranch = (struct entity_list_branch *)malloc(sizeof(struct entity_list_branch)); CHECK_ERR_NONNULL(opt->membranch, "membranch malloc"); opt->diskbranch = (struct entity_list_branch *)malloc(sizeof(struct entity_list_branch)); CHECK_ERR_NONNULL(opt->diskbranch, "diskbranch malloc"); opt->diskbranch->mutex_free = 0; opt->membranch->mutex_free = 0; opt->membranch->freelist = NULL; opt->membranch->busylist = NULL; opt->membranch->loadedlist = NULL; opt->diskbranch->freelist = NULL; opt->diskbranch->busylist = NULL; opt->diskbranch->loadedlist = NULL; err = LOCK_INIT(&(opt->membranch->branchlock)); CHECK_ERR("branchlock"); err = LOCK_INIT(&(opt->diskbranch->branchlock)); CHECK_ERR("branchlock"); err = pthread_cond_init(&(opt->membranch->busysignal), NULL); CHECK_ERR("busysignal"); err = pthread_cond_init(&(opt->diskbranch->busysignal), NULL); CHECK_ERR("busysignal"); return 0; }
booster_fdtable_t * booster_fdtable_alloc (void) { booster_fdtable_t *fdtable = NULL; int32_t ret = -1; fdtable = CALLOC (1, sizeof (*fdtable)); GF_VALIDATE_OR_GOTO ("booster-fd", fdtable, out); LOCK_INIT (&fdtable->lock); LOCK (&fdtable->lock); { ret = booster_fdtable_expand (fdtable, 0); } UNLOCK (&fdtable->lock); if (ret == -1) { gf_log ("booster-fd", GF_LOG_ERROR, "FD-table allocation " "failed"); FREE (fdtable); fdtable = NULL; } out: return fdtable; }
/* * Helper functions */ static xlator_t * helper_xlator_init(uint32_t num_types) { xlator_t *xl; int i, ret; REQUIRE(num_types > 0); xl = test_calloc(1, sizeof(xlator_t)); assert_non_null(xl); xl->mem_acct.num_types = num_types; xl->mem_acct.rec = test_calloc(num_types, sizeof(struct mem_acct_rec)); assert_non_null(xl->mem_acct.rec); xl->ctx = test_calloc(1, sizeof(glusterfs_ctx_t)); assert_non_null(xl->ctx); for (i = 0; i < num_types; i++) { ret = LOCK_INIT(&(xl->mem_acct.rec[i].lock)); assert_int_equal(ret, 0); } ENSURE(num_types == xl->mem_acct.num_types); ENSURE(NULL != xl); return xl; }
clienttable_t * gf_clienttable_alloc (void) { clienttable_t *clienttable = NULL; int result = 0; clienttable = GF_CALLOC (1, sizeof (clienttable_t), gf_common_mt_clienttable_t); if (!clienttable) return NULL; LOCK_INIT (&clienttable->lock); result = gf_client_clienttable_expand (clienttable, GF_CLIENTTABLE_INITIAL_SIZE); if (result != 0) { gf_msg ("client_t", GF_LOG_ERROR, 0, LG_MSG_EXPAND_CLIENT_TABLE_FAILED, "gf_client_clienttable_expand failed"); GF_FREE (clienttable); return NULL; } return clienttable; }
//****************************************************************************** // Function: iiEllisInit() // Parameters: None // // Returns: Nothing // // Description: // // This routine performs any required initialization of the iiEllis subsystem. // //****************************************************************************** static void iiEllisInit(void) { pDelayTimer = kmalloc ( sizeof (struct timer_list), GFP_KERNEL ); init_waitqueue_head(&pDelayWait); LOCK_INIT(&Dl_spinlock); }
struct afs_exporter * exporter_add(afs_int32 size, struct exporterops *ops, afs_int32 state, afs_int32 type, char *data) { struct afs_exporter *ex, *op; afs_int32 length; AFS_STATCNT(exporter_add); if (!init_xexported) { init_xexported = 1; LOCK_INIT(&afs_xexp, "afs_xexp"); } length = (size ? size : sizeof(struct afs_exporter)); ex = (struct afs_exporter *)afs_osi_Alloc(length); memset(ex, 0, length); ObtainWriteLock(&afs_xexp, 308); for (op = root_exported; op; op = op->exp_next) { if (!op->exp_next) break; } if (op) op->exp_next = ex; else root_exported = ex; ReleaseWriteLock(&afs_xexp); ex->exp_next = 0; ex->exp_op = ops; ex->exp_states = state; ex->exp_data = data; ex->exp_type = type; return ex; }
cevents *cevents_create() { cevents *evts; int len; len = sizeof(cevents); evts = (cevents *)jmalloc(len); memset((void *)evts, len, 0); evts->events = jmalloc(sizeof(cevent) * MAX_EVENTS); evts->fired = jmalloc(sizeof(cevent_fired) * MAX_EVENTS); evts->fired_queue = clist_create(); LOCK_INIT(&evts->qlock); LOCK_INIT(&evts->lock); cevents_create_priv_impl(evts); evts->poll_sec = 0; evts->poll_ms = 0; return evts; }
static int glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx) { cmd_args_t *cmd_args = NULL; struct rlimit lim = {0, }; call_pool_t *pool = NULL; xlator_mem_acct_init (THIS, cli_mt_end); ctx->process_uuid = generate_uuid (); if (!ctx->process_uuid) return -1; ctx->page_size = 128 * GF_UNIT_KB; ctx->iobuf_pool = iobuf_pool_new (8 * GF_UNIT_MB, ctx->page_size); if (!ctx->iobuf_pool) return -1; ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE); if (!ctx->event_pool) return -1; pool = GF_CALLOC (1, sizeof (call_pool_t), cli_mt_call_pool_t); if (!pool) return -1; /* frame_mem_pool size 112 * 16k */ pool->frame_mem_pool = mem_pool_new (call_frame_t, 16384); if (!pool->frame_mem_pool) return -1; /* stack_mem_pool size 256 * 8k */ pool->stack_mem_pool = mem_pool_new (call_stack_t, 8192); if (!pool->stack_mem_pool) return -1; ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024); if (!ctx->stub_mem_pool) return -1; INIT_LIST_HEAD (&pool->all_frames); LOCK_INIT (&pool->lock); ctx->pool = pool; pthread_mutex_init (&(ctx->lock), NULL); cmd_args = &ctx->cmd_args; INIT_LIST_HEAD (&cmd_args->xlator_options); lim.rlim_cur = RLIM_INFINITY; lim.rlim_max = RLIM_INFINITY; setrlimit (RLIMIT_CORE, &lim); return 0; }
int odp_queue_init_global(void) { uint32_t i; odp_shm_t shm; ODP_DBG("Queue init ... "); shm = odp_shm_reserve("odp_queues", sizeof(queue_table_t), sizeof(queue_entry_t), 0); queue_tbl = odp_shm_addr(shm); if (queue_tbl == NULL) return -1; memset(queue_tbl, 0, sizeof(queue_table_t)); for (i = 0; i < ODP_CONFIG_QUEUES; i++) { /* init locks */ queue_entry_t *queue = get_qentry(i); LOCK_INIT(queue); queue->s.handle = queue_from_id(i); } ODP_DBG("done\n"); ODP_DBG("Queue init global\n"); ODP_DBG(" struct queue_entry_s size %zu\n", sizeof(struct queue_entry_s)); ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t)); ODP_DBG("\n"); __k1_wmb(); return 0; }
/***************sink operations *****************/ void sink_init(Sink* sink) { if(!sink) return; LOCK_INIT (&sink->lock); //pthread_mutex_init(&sink->lock, 0); sink->busy = 0; }
void DInit(int abuffers) { /* Initialize the venus buffer system. */ register int i; register struct buffer *tb; #if defined(AFS_USEBUFFERS) struct buf *tub; /* unix buffer for allocation */ #endif AFS_STATCNT(DInit); if (dinit_flag) return; dinit_flag = 1; #if defined(AFS_USEBUFFERS) /* round up to next multiple of NPB, since we allocate multiple pages per chunk */ abuffers = ((abuffers - 1) | (NPB - 1)) + 1; #endif LOCK_INIT(&afs_bufferLock, "afs_bufferLock"); Buffers = (struct buffer *)afs_osi_Alloc(abuffers * sizeof(struct buffer)); #if !defined(AFS_USEBUFFERS) BufferData = (char *)afs_osi_Alloc(abuffers * AFS_BUFFER_PAGESIZE); #endif timecounter = 1; afs_stats_cmperf.bufAlloced = nbuffers = abuffers; for (i = 0; i < PHSIZE; i++) phTable[i] = 0; for (i = 0; i < abuffers; i++) { #if defined(AFS_USEBUFFERS) if ((i & (NPB - 1)) == 0) { /* time to allocate a fresh buffer */ tub = geteblk(AFS_BUFFER_PAGESIZE * NPB); BufferData = (char *)tub->b_un.b_addr; } #endif /* Fill in each buffer with an empty indication. */ tb = &Buffers[i]; tb->fid = NULLIDX; tb->inode = 0; tb->accesstime = 0; tb->lockers = 0; #if defined(AFS_USEBUFFERS) if ((i & (NPB - 1)) == 0) tb->bufp = tub; else tb->bufp = 0; tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))]; #else tb->data = &BufferData[AFS_BUFFER_PAGESIZE * i]; #endif tb->hashIndex = 0; tb->dirty = 0; AFS_RWLOCK_INIT(&tb->lock, "buffer lock"); } return; }
static int __event_slot_alloc (struct event_pool *event_pool, int fd) { int i = 0; int table_idx = -1; int gen = -1; struct event_slot_epoll *table = NULL; for (i = 0; i < EVENT_EPOLL_TABLES; i++) { switch (event_pool->slots_used[i]) { case EVENT_EPOLL_SLOTS: continue; case 0: if (!event_pool->ereg[i]) { table = __event_newtable (event_pool, i); if (!table) return -1; } else { table = event_pool->ereg[i]; } break; default: table = event_pool->ereg[i]; break; } if (table) /* break out of the loop */ break; } if (!table) return -1; table_idx = i; for (i = 0; i < EVENT_EPOLL_SLOTS; i++) { if (table[i].fd == -1) { /* wipe everything except bump the generation */ gen = table[i].gen; memset (&table[i], 0, sizeof (table[i])); table[i].gen = gen + 1; LOCK_INIT (&table[i].lock); table[i].fd = fd; event_pool->slots_used[table_idx]++; break; } } return table_idx * EVENT_EPOLL_SLOTS + i; }
/* Init worker data */ void SNetWorkerInit(void) { snet_workers = SNetNodeGetWorkers(); snet_worker_count = SNetNodeGetWorkerCount(); snet_thief_limit = SNetThreadingThieves(); if (snet_thief_limit > 1) { LOCK_INIT2(snet_idle_lock, snet_thief_limit); } else { LOCK_INIT(snet_idle_lock); } }
void _gf_ref_init(gf_ref_t *ref, gf_ref_release_t release, void *data) { GF_ASSERT(ref); #ifdef REFCOUNT_NEEDS_LOCK LOCK_INIT(&ref->lk); #endif ref->cnt = 1; ref->release = release; ref->data = data; }
/* * Initialize the cache. */ int gid_cache_init(gid_cache_t *cache, uint32_t timeout) { if (!cache) return -1; LOCK_INIT(&cache->gc_lock); cache->gc_max_age = timeout; cache->gc_nbuckets = AUX_GID_CACHE_BUCKETS; memset(cache->gc_cache, 0, sizeof(gid_list_t) * AUX_GID_CACHE_SIZE); return 0; }
static hash_type * __hash_alloc(int size, double resize_fill , hashf_type *hashf) { hash_type* hash; hash = util_malloc(sizeof *hash ); UTIL_TYPE_ID_INIT(hash , HASH_TYPE_ID); hash->size = size; hash->hashf = hashf; hash->table = hash_sll_alloc_table(hash->size); hash->elements = 0; hash->resize_fill = resize_fill; LOCK_INIT( &hash->rwlock ); return hash; }
static void T(init_common)(void) { (void) LOCK_INIT(&lock); (void) LOCK_LOCK(&lock); (void) LOCK_UNLOCK(&lock); (void) pthread_key_create(&thread_key, T(clean_key)); (void) pthread_setspecific(thread_key, NULL); main_thread = pthread_self(); (void) memset(&lo_guard, '[', sizeof (lo_guard)); }
cevents *cevents_create() { cevents *evts; int len; len = sizeof(cevents); evts = (cevents *)jmalloc(len); memset((void *)evts, 0, len); evts->events = jmalloc(sizeof(cevent) * MAX_EVENTS); memset(evts->events, 0, sizeof(cevent) * MAX_EVENTS); evts->fired_fds = jmalloc(sizeof(int) * MAX_EVENTS); for(size_t i = 0; i < MAX_EVENTS; i++) { evts->events[i].fired_queue = clist_create(); } evts->fired_fds = clist_create(); LOCK_INIT(&evts->qlock); LOCK_INIT(&evts->lock); cevents_create_priv_impl(evts); evts->poll_sec = 0; evts->poll_ms = 0; evts->timers = ctimer_base_create(); return evts; }
void Text::internalInit(int pixelSize) { pBitmap = NULL; pPixelSize = pixelSize; if(gFontInfo) return; LOCK_INIT(gFtLock); #ifdef IOS const char* font = get_asset_filepath( "Roboto-Regular.ttf" ); #else const char* font = "Roboto-Regular.ttf"; #endif /* load font file */ long size; long readed_size; unsigned char* fontBuffer; FILE* fontFile = fopen(font, "rb"); fseek(fontFile, 0, SEEK_END); size = ftell(fontFile); /* how long is the file ? */ fseek(fontFile, 0, SEEK_SET); /* reset */ fontBuffer = (unsigned char*)malloc(size); gFontInfo = (stbtt_fontinfo*)malloc(sizeof(stbtt_fontinfo)); readed_size = fread(fontBuffer, 1, size, fontFile); fclose(fontFile); // prepare font if (readed_size != size || !stbtt_InitFont(gFontInfo, fontBuffer, 0)) { LOGI("failed to create font\n"); if(gFontInfo) { free(gFontInfo); gFontInfo = 0; } if(fontBuffer) { free(fontBuffer); fontBuffer = 0; } } }
int pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr) { int conforming; #if __DARWIN_UNIX03 conforming = 1; #else /* __DARWIN_UNIX03 */ conforming = 0; #endif /* __DARWIN_UNIX03 */ _pthread_cond *cond = (_pthread_cond *)ocond; LOCK_INIT(cond->lock); return _pthread_cond_init(cond, attr, conforming); }
void shutdown_memcache(void) { int index; if (cacheDiskType != AFS_FCACHE_TYPE_MEM) return; memCacheBlkSize = 8192; for (index = 0; index < memMaxBlkNumber; index++) { LOCK_INIT(&((memCache + index)->afs_memLock), "afs_memLock"); afs_osi_Free((memCache + index)->data, (memCache + index)->dataSize); } afs_osi_Free((char *)memCache, memMaxBlkNumber * sizeof(struct memCacheEntry)); memMaxBlkNumber = 0; }
void shutdown_osi(void) { AFS_STATCNT(shutdown_osi); #ifdef AFS_DARWIN80_ENV if (afs_osi_ctxtp_initialized && afs_osi_ctxtp) { vfs_context_rele(afs_osi_ctxtp); afs_osi_ctxtp = NULL; afs_osi_ctxtp_initialized = 0; } shutdown_osisleep(); #endif if (afs_cold_shutdown) { LOCK_INIT(&afs_ftf, "afs_ftf"); } }
static_inline void afs_BackgroundDaemon_once(void) { LOCK_INIT(&afs_xbrs, "afs_xbrs"); memset(afs_brs, 0, sizeof(afs_brs)); brsInit = 1; #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK) /* * steal the first daemon for doing delayed DSlot flushing * (see afs_GetDownDSlot) */ AFS_GUNLOCK(); afs_sgidaemon(); exit(CLD_EXITED, 0); #endif }
void mono_sgen_ssb_init (SgenRemeberedSet *remset) { LOCK_INIT (global_remset_mutex); global_remset = mono_sgen_alloc_remset (1024, NULL, FALSE); global_remset->next = NULL; mono_native_tls_alloc (&remembered_set_key, NULL); #ifdef HEAVY_STATISTICS mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_remset); mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets); mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique); mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1); mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2); mono_counters_register ("Non-global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_local_remsets_processed); mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added); mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded); mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed); mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded); #endif remset->wbarrier_set_field = mono_sgen_ssb_wbarrier_set_field; remset->wbarrier_set_arrayref = mono_sgen_ssb_wbarrier_set_arrayref; remset->wbarrier_arrayref_copy = mono_sgen_ssb_wbarrier_arrayref_copy; remset->wbarrier_value_copy = mono_sgen_ssb_wbarrier_value_copy; remset->wbarrier_object_copy = mono_sgen_ssb_wbarrier_object_copy; remset->wbarrier_generic_nostore = mono_sgen_ssb_wbarrier_generic_nostore; remset->record_pointer = mono_sgen_ssb_record_pointer; remset->begin_scan_remsets = mono_sgen_ssb_begin_scan_remsets; remset->finish_scan_remsets = mono_sgen_ssb_finish_scan_remsets; remset->register_thread = mono_sgen_ssb_register_thread; remset->cleanup_thread = mono_sgen_ssb_cleanup_thread; #ifdef HAVE_KW_THREAD remset->fill_thread_info_for_suspend = mono_sgen_ssb_fill_thread_info_for_suspend; #endif remset->prepare_for_minor_collection = mono_sgen_ssb_prepare_for_minor_collection; remset->prepare_for_major_collection = mono_sgen_ssb_prepare_for_major_collection; remset->find_address = mono_sgen_ssb_find_address; }
struct mem_pool * mem_pool_new_fn (unsigned long sizeof_type, unsigned long count) { struct mem_pool *mem_pool = NULL; unsigned long padded_sizeof_type = 0; void *pool = NULL; int i = 0; struct list_head *list = NULL; if (!sizeof_type || !count) { gf_log ("mem-pool", GF_LOG_ERROR, "invalid argument"); return NULL; } padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY; mem_pool = GF_CALLOC (sizeof (*mem_pool), 1, gf_common_mt_mem_pool); if (!mem_pool) return NULL; LOCK_INIT (&mem_pool->lock); INIT_LIST_HEAD (&mem_pool->list); mem_pool->padded_sizeof_type = padded_sizeof_type; mem_pool->cold_count = count; mem_pool->real_sizeof_type = sizeof_type; pool = GF_CALLOC (count, padded_sizeof_type, gf_common_mt_long); if (!pool) { GF_FREE (mem_pool); return NULL; } for (i = 0; i < count; i++) { list = pool + (i * (padded_sizeof_type)); INIT_LIST_HEAD (list); list_add_tail (list, &mem_pool->list); } mem_pool->pool = pool; mem_pool->pool_end = pool + (count * (padded_sizeof_type)); return mem_pool; }
void sgen_workers_init (int num_workers) { int i; if (!sgen_get_major_collector ()->is_parallel) return; //g_print ("initing %d workers\n", num_workers); workers_num = num_workers; workers_data = sgen_alloc_internal_dynamic (sizeof (WorkerData) * num_workers, INTERNAL_MEM_WORKER_DATA, TRUE); memset (workers_data, 0, sizeof (WorkerData) * num_workers); MONO_SEM_INIT (&workers_waiting_sem, 0); MONO_SEM_INIT (&workers_done_sem, 0); sgen_gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue, workers_gray_queue_share_redirect, &workers_gc_thread_data); mono_mutex_init (&workers_gc_thread_data.stealable_stack_mutex, NULL); workers_gc_thread_data.stealable_stack_fill = 0; if (sgen_get_major_collector ()->alloc_worker_data) workers_gc_thread_data.major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); for (i = 0; i < workers_num; ++i) { /* private gray queue is inited by the thread itself */ mono_mutex_init (&workers_data [i].stealable_stack_mutex, NULL); workers_data [i].stealable_stack_fill = 0; if (sgen_get_major_collector ()->alloc_worker_data) workers_data [i].major_collector_data = sgen_get_major_collector ()->alloc_worker_data (); } LOCK_INIT (workers_job_queue_mutex); sgen_register_fixed_internal_mem_type (INTERNAL_MEM_JOB_QUEUE_ENTRY, sizeof (JobQueueEntry)); mono_counters_register ("Stolen from self lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_lock); mono_counters_register ("Stolen from self no lock", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_self_no_lock); mono_counters_register ("Stolen from others", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_stolen_from_others); mono_counters_register ("# workers waited", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_workers_num_waited); }
void shutdown_osi(void) { AFS_STATCNT(shutdown_osi); #ifdef AFS_DARWIN80_ENV if (afs_osi_ctxtp_initialized && afs_osi_ctxtp) { vfs_context_rele(afs_osi_ctxtp); afs_osi_ctxtp = NULL; afs_osi_ctxtp_initialized = 0; } #endif #if !defined(AFS_HPUX_ENV) && !defined(UKERNEL) && !defined(AFS_DFBSD_ENV) && !defined(AFS_LINUX26_ENV) /* LINUX calls this from afs_cleanup() which hooks into module_exit */ shutdown_osisleep(); #endif if (afs_cold_shutdown) { LOCK_INIT(&afs_ftf, "afs_ftf"); } }
void sgen_split_nursery_init (SgenMinorCollector *collector) { collector->alloc_for_promotion = minor_alloc_for_promotion; collector->par_alloc_for_promotion = minor_par_alloc_for_promotion; collector->prepare_to_space = prepare_to_space; collector->clear_fragments = clear_fragments; collector->build_fragments_get_exclude_head = build_fragments_get_exclude_head; collector->build_fragments_release_exclude_head = build_fragments_release_exclude_head; collector->build_fragments_finish = build_fragments_finish; collector->init_nursery = init_nursery; collector->handle_gc_param = handle_gc_param; collector->print_gc_param_usage = print_gc_param_usage; FILL_MINOR_COLLECTOR_COPY_OBJECT (collector); FILL_MINOR_COLLECTOR_SCAN_OBJECT (collector); LOCK_INIT (par_alloc_buffer_refill_mutex); }