/* remove the semaphore set from system */ void lock_cleanup() { /* that's system-wide; all other processes trying to use the semaphore will fail! call only if it is for sure no other process lives */ /* sibling double-check missing here; install a signal handler */ if (entry_semaphore !=0){ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); }; if (timer_semaphore !=0){ lock_set_destroy(timer_semaphore); lock_set_dealloc(timer_semaphore); }; if (reply_semaphore !=0) { lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); }; entry_semaphore = timer_semaphore = reply_semaphore = 0; if (timer_group_lock) shm_free(timer_group_lock); }
/* remove the semaphore set from system */ void lock_cleanup() { /* that's system-wide; all other processes trying to use the semaphore will fail! call only if it is for sure no other process lives */ /* sibling double-check missing here; install a signal handler */ if (entry_semaphore !=0){ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); }; if (reply_semaphore !=0) { lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); }; #ifdef ENABLE_ASYNC_MUTEX if (async_semaphore !=0) { lock_set_destroy(async_semaphore); lock_set_dealloc(async_semaphore); } async_semaphore = 0; #endif entry_semaphore = reply_semaphore = 0; }
void mod_destroy(void) { unsigned int i; if (rl_htable.maps) { for (i = 0; i < rl_htable.size; i++) map_destroy(rl_htable.maps[i], 0); shm_free(rl_htable.maps); rl_htable.maps = 0; rl_htable.size = 0; } if (rl_htable.locks) { lock_set_destroy(rl_htable.locks); lock_set_dealloc(rl_htable.locks); rl_htable.locks = 0; rl_htable.locks_no = 0; } if (rl_lock) { lock_destroy(rl_lock); lock_dealloc(rl_lock); } RL_SHM_FREE(rl_network_count); RL_SHM_FREE(rl_network_load); RL_SHM_FREE(rl_load_value); RL_SHM_FREE(pid_kp); RL_SHM_FREE(pid_ki); RL_SHM_FREE(pid_kd); RL_SHM_FREE(pid_setpoint); RL_SHM_FREE(drop_rate); RL_SHM_FREE(rl_feedback_limit); if (db_url.s && db_url.len) destroy_cachedb(); }
/* size must be a power of 2 */ static gen_lock_set_t* init_lock_set(int *size) { gen_lock_set_t *lset; lset=0; /* kill warnings */ for( ; *size ; *size=((*size)>>1) ) { LM_INFO("probing %d set size\n", *size); /* create a lock set */ lset = lock_set_alloc( *size ); if (lset==0) { LM_INFO("cannot get %d locks\n", *size); continue; } /* init lock set */ if (lock_set_init(lset)==0) { LM_INFO("cannot init %d locks\n", *size); lock_set_dealloc( lset ); lset = 0; continue; } /* alloc and init succesfull */ break; } if (*size==0) { LM_ERR("cannot get a lock set\n"); return 0; } return lset; }
void shvar_destroy_locks(void) { if (shvar_locks !=0){ lock_set_destroy(shvar_locks); lock_set_dealloc(shvar_locks); } }
void lua_sr_destroy(void) { if(_sr_L_env.L!=NULL) { lua_close(_sr_L_env.L); _sr_L_env.L = NULL; } if(_sr_L_env.LL!=NULL) { lua_close(_sr_L_env.LL); _sr_L_env.LL = NULL; } memset(&_sr_L_env, 0, sizeof(sr_lua_env_t)); if(sr_lua_script_ver!=NULL) { shm_free(sr_lua_script_ver->version); shm_free(sr_lua_script_ver); } if (sr_lua_locks!=NULL) { lock_set_destroy( sr_lua_locks ); lock_set_dealloc( sr_lua_locks ); sr_lua_locks = 0; } if(_app_lua_sv!=NULL) { pkg_free(_app_lua_sv); _app_lua_sv = 0; } }
/*! \brief * Initialize locks */ int ul_init_locks(void) { int i; i = ul_locks_no; do { if ((( ul_locks=lock_set_alloc(i))!=0)&& (lock_set_init(ul_locks)!=0)) { ul_locks_no = i; LM_INFO("locks array size %d\n", ul_locks_no); return 0; } if (ul_locks){ lock_set_dealloc(ul_locks); ul_locks=0; } i--; if(i==0) { LM_ERR("failed to allocate locks\n"); return -1; } } while (1); }
void ul_destroy_locks(void) { if (ul_locks !=0){ lock_set_destroy(ul_locks); lock_set_dealloc(ul_locks); }; }
void free_cc_data(struct cc_data *data) { struct cc_flow *flow, *f_flow; struct cc_agent *agent,*f_agent; int i; if (data) { /* lock */ if (data->lock) { lock_destroy( data->lock ); lock_dealloc( data->lock ); } if (data->call_locks) { lock_set_destroy( data->call_locks ); lock_set_dealloc( data->call_locks ); } /* flows */ for( flow=data->flows ; flow ; ) { f_flow = flow; flow = flow->next; free_cc_flow( f_flow ); } /* agents */ for(i = 0; i< 2; i++) { for( agent=data->agents[i] ; agent ; ) { f_agent = agent; agent = agent->next; free_cc_agent( f_agent ); } } shm_free(data); } }
int tls_init_multithread(void) { /* init static locks support */ tls_static_locks_no = CRYPTO_num_locks(); if (tls_static_locks_no>0) { /* init a lock set & pass locking function to SSL */ tls_static_locks = lock_set_alloc(tls_static_locks_no); if (tls_static_locks == NULL) { LM_ERR("Failed to alloc static locks\n"); return -1; } if (lock_set_init(tls_static_locks)==0) { LM_ERR("Failed to init static locks\n"); lock_set_dealloc(tls_static_locks); return -1; } CRYPTO_set_locking_callback(tls_static_locks_ops); } CRYPTO_set_id_callback(tls_get_id); /* dynamic locks support*/ CRYPTO_set_dynlock_create_callback(tls_dyn_lock_create); CRYPTO_set_dynlock_lock_callback(tls_dyn_lock_ops); CRYPTO_set_dynlock_destroy_callback(tls_dyn_lock_destroy); return 0; }
/*! * \brief Destroy all locks on the list */ void subs_destroy_locks(void) { if (subs_locks !=0){ lock_set_destroy(subs_locks); lock_set_dealloc(subs_locks); }; }
/*! * \brief Destroy the global dialog table */ void destroy_dlg_table(void) { struct dlg_cell *dlg, *l_dlg; unsigned int i; if (d_table==0) return; if (d_table->locks) { lock_set_destroy(d_table->locks); lock_set_dealloc(d_table->locks); } for( i=0 ; i<d_table->size; i++ ) { dlg = d_table->entries[i].first; while (dlg) { l_dlg = dlg; dlg = dlg->next; destroy_dlg(l_dlg); } } shm_free(d_table); d_table = 0; return; }
/*! * \brief Destroy the per user transaction table */ void destroy_ts_table(void) { struct ts_urecord *ts_u, *l_ts_u; unsigned int i; if (t_table==0) return; if (t_table->locks) { lock_set_destroy(t_table->locks); lock_set_dealloc(t_table->locks); } for( i=0 ; i<t_table->size; i++ ) { ts_u = t_table->entries[i].first; while (ts_u) { l_ts_u = ts_u; ts_u = ts_u->next; free_ts_urecord(l_ts_u); } } shm_free(t_table); t_table = 0; return; }
/* * Initialize locks */ int shvar_init_locks(void) { int i; /* already initialized */ if(shvar_locks!=0) return 0; i = shvar_locks_no; do { if ((( shvar_locks=lock_set_alloc(i))!=0)&& (lock_set_init(shvar_locks)!=0)) { shvar_locks_no = i; LM_INFO("locks array size %d\n", shvar_locks_no); return 0; } if (shvar_locks){ lock_set_dealloc(shvar_locks); shvar_locks=0; } i--; if(i==0) { LM_ERR("failed to allocate locks\n"); return -1; } } while (1); }
void tls_destroy_locks() { if (static_locks){ lock_set_destroy(static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; } }
void pg_destroy_lock_set(void) { if(_pg_lock_set!=NULL) { lock_set_destroy(_pg_lock_set); lock_set_dealloc(_pg_lock_set); _pg_lock_set = NULL; _pg_lock_size = 0; } }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LM_CRIT("bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ if (CRYPTO_get_locking_callback()!=NULL) { LM_CRIT("ssl locking callback already set\n"); return -1; } static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LM_CRIT("could not allocate lockset with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LM_CRIT("lock set init failed (%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* OpenSSL is thread-safe since 1.1.0 */ #if OPENSSL_VERSION_NUMBER < 0x10100000L /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); #endif /* starting with v1.0.0 openssl does not use anymore getpid(), but address * of errno which can point to same virtual address in a multi-process * application * - for refrence http://www.openssl.org/docs/crypto/threads.html */ CRYPTO_set_id_callback(sr_ssl_id_f); /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
void destroy_dst_blacklist() { int r; struct dst_blst_entry** crt; struct dst_blst_entry* e; if (blst_timer_h){ timer_del(blst_timer_h); timer_free(blst_timer_h); blst_timer_h=0; } #ifdef BLST_LOCK_PER_BUCKET if (dst_blst_hash) for(r=0; r<DST_BLST_HASH_SIZE; r++) lock_destroy(&dst_blst_hash[r].lock); #elif defined BLST_LOCK_SET if (blst_lock_set){ lock_set_destroy(blst_lock_set); lock_set_dealloc(blst_lock_set); blst_lock_set=0; } #else if (blst_lock){ lock_destroy(blst_lock); lock_dealloc(blst_lock); blst_lock=0; } #endif if (dst_blst_hash){ for(r=0; r<DST_BLST_HASH_SIZE; r++){ crt=&dst_blst_hash[r].first; while(*crt){ e=*crt; *crt=(*crt)->next; blst_destroy_entry(e); } } shm_free(dst_blst_hash); dst_blst_hash=0; } if (blst_mem_used){ shm_free((void*)blst_mem_used); blst_mem_used=0; } #ifdef DST_BLACKLIST_HOOKS destroy_blacklist_hooks(); #endif #ifdef USE_DST_BLACKLIST_STATS if (dst_blacklist_stats) shm_free(dst_blacklist_stats); #endif }
/*! * \brief Initialize the per user transactions table * \param size size of the table * \return 0 on success, -1 on failure */ int init_ts_table(unsigned int size) { unsigned int n; unsigned int i; t_table = (struct ts_table*)shm_malloc( sizeof(struct ts_table)); if (t_table==0) { LM_ERR("no more shm mem (1)\n"); return -1; } memset( t_table, 0, sizeof(struct ts_table) ); t_table->size = size; n = (size<MAX_TS_LOCKS)?size:MAX_TS_LOCKS; for( ; n>=MIN_TS_LOCKS ; n-- ) { t_table->locks = lock_set_alloc(n); if (t_table->locks==0) continue; if (lock_set_init(t_table->locks)==0) { lock_set_dealloc(t_table->locks); t_table->locks = 0; continue; } t_table->locks_no = n; break; } if (t_table->locks==0) { LM_ERR("unable to allocted at least %d locks for the hash table\n", MIN_TS_LOCKS); goto error; } t_table->entries = (ts_entry_t*)shm_malloc(sizeof(ts_entry_t) * size); if (!t_table->entries) { LM_ERR("no more shm mem (2)\n"); goto error; } for( i=0 ; i<size; i++ ) { memset( &(t_table->entries[i]), 0, sizeof(struct ts_entry) ); t_table->entries[i].next_id = rand() % (3*size); t_table->entries[i].lock_idx = i % t_table->locks_no; } return 0; error: shm_free( t_table ); t_table = NULL; return -1; }
int lua_sr_alloc_script_ver(void) { int size = _sr_L_env.nload; sr_lua_script_ver = (sr_lua_script_ver_t *) shm_malloc(sizeof(sr_lua_script_ver_t)); if(sr_lua_script_ver==NULL) { LM_ERR("cannot allocate shm memory\n"); return -1; } sr_lua_script_ver->version = (unsigned int *) shm_malloc(sizeof(unsigned int)*size); if(sr_lua_script_ver->version==NULL) { LM_ERR("cannot allocate shm memory\n"); goto error; } memset(sr_lua_script_ver->version, 0, sizeof(unsigned int)*size); sr_lua_script_ver->len = size; if((sr_lua_locks=lock_set_alloc(size))==0) { LM_CRIT("failed to alloc lock set\n"); goto error; } if(lock_set_init(sr_lua_locks)==0 ) { LM_CRIT("failed to init lock set\n"); goto error; } return 0; error: if(sr_lua_script_ver!=NULL) { if(sr_lua_script_ver->version!=NULL) { shm_free(sr_lua_script_ver->version); sr_lua_script_ver->version = NULL; } shm_free(sr_lua_script_ver); sr_lua_script_ver = NULL; } if(sr_lua_locks!=NULL) { lock_set_destroy( sr_lua_locks ); lock_set_dealloc( sr_lua_locks ); sr_lua_locks = NULL; } return -1; }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LOG(L_CRIT, "BUG: tls: tls_init_locking: bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LOG(L_CRIT, "ERROR: tls_init_locking: could not allocate lockset" " with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LOG(L_CRIT, "ERROR: tls_init_locking: lock_set_init failed " "(%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); /* starting with v1.0.0 openssl does not use anymore getpid(), but address * of errno which can point to same virtual address in a multi-process * application * - for refrence http://www.openssl.org/docs/crypto/threads.html */ CRYPTO_set_id_callback(sr_ssl_id_f); /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
/** * free jab_wlist * - jwl : pointer to the workers list */ void xj_wlist_free(xj_wlist jwl) { int i; #ifdef XJ_EXTRA_DEBUG LM_DBG("freeing 'xj_wlist' memory ...\n"); #endif if(jwl == NULL) return; if(jwl->workers != NULL) { for(i=0; i<jwl->len; i++) free2tree234(jwl->workers[i].sip_ids, xj_jkey_free_p); _M_SHM_FREE(jwl->workers); } if(jwl->aliases != NULL) { if(jwl->aliases->d) _M_SHM_FREE(jwl->aliases->d); if(jwl->aliases->jdm != NULL) { _M_SHM_FREE(jwl->aliases->jdm->s); _M_SHM_FREE(jwl->aliases->jdm); } if(jwl->aliases->proxy != NULL) { _M_SHM_FREE(jwl->aliases->proxy->s); _M_SHM_FREE(jwl->aliases->proxy); } if(jwl->aliases->size > 0) { for(i=0; i<jwl->aliases->size; i++) _M_SHM_FREE(jwl->aliases->a[i].s); _M_SHM_FREE(jwl->aliases->a); } _M_SHM_FREE(jwl->aliases); jwl->aliases = NULL; } if(jwl->sems != NULL){ lock_set_destroy(jwl->sems); lock_set_dealloc(jwl->sems); } _M_SHM_FREE(jwl); }
int init_dlg_table(unsigned int size) { unsigned int n; unsigned int i; d_table = (struct dlg_table*)shm_malloc ( sizeof(struct dlg_table) + size*sizeof(struct dlg_entry)); if (d_table==0) { LM_ERR("no more shm mem (1)\n"); goto error0; } memset( d_table, 0, sizeof(struct dlg_table) ); d_table->size = size; d_table->entries = (struct dlg_entry*)(d_table+1); n = (size<MAX_LDG_LOCKS)?size:MAX_LDG_LOCKS; for( ; n>=MIN_LDG_LOCKS ; n-- ) { d_table->locks = lock_set_alloc(n); if (d_table->locks==0) continue; if (lock_set_init(d_table->locks)==0) { lock_set_dealloc(d_table->locks); d_table->locks = 0; continue; } d_table->locks_no = n; break; } if (d_table->locks==0) { LM_ERR("unable to allocted at least %d locks for the hash table\n", MIN_LDG_LOCKS); goto error1; } for( i=0 ; i<size; i++ ) { memset( &(d_table->entries[i]), 0, sizeof(struct dlg_entry) ); d_table->entries[i].next_id = rand(); d_table->entries[i].lock_idx = i % d_table->locks_no; } return 0; error1: shm_free( d_table ); error0: return -1; }
static void mod_destroy(void) { if (probability) shm_free(probability); if (gflags) shm_free(gflags); if (gflags_lock) { lock_destroy(gflags_lock); lock_dealloc(gflags_lock); } if(_cfg_lock_set!=NULL) { lock_set_destroy(_cfg_lock_set); lock_set_dealloc(_cfg_lock_set); } }
void destroy_script_locks(void) { static_lock *lock_entry; /* Free all static locks */ while (static_locks) { lock_entry = static_locks; static_locks = static_locks->next; lock_dealloc(lock_entry->lock); shm_free(lock_entry); } /* Free all dynamic locks */ lock_set_dealloc(dynamic_locks); }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LOG(L_CRIT, "BUG: tls: tls_init_locking: bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LOG(L_CRIT, "ERROR: tls_init_locking: could not allocate lockset" " with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LOG(L_CRIT, "ERROR: tls_init_locking: lock_set_init failed " "(%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); /* thread id callback: not needed because ser doesn't use thread and * openssl already uses getpid() (by default) * CRYPTO_set_id_callback(id_f); */ /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
/* returns 0 on success, -1 on error */ int init_atomic_ops() { #ifdef MEMBAR_USES_LOCK if ((__membar_lock=lock_alloc())==0){ goto error; } if (lock_init(__membar_lock)==0){ lock_dealloc(__membar_lock); __membar_lock=0; goto error; } _membar_lock; /* start with the lock "taken" so that we can safely use unlock/lock sequences on it later */ #endif #ifdef ATOMIC_OPS_USE_LOCK_SET if ((_atomic_lock_set=lock_set_alloc(_ATOMIC_LS_SIZE))==0){ goto error; } if (lock_set_init(_atomic_lock_set)==0){ lock_set_dealloc(_atomic_lock_set); _atomic_lock_set=0; goto error; } #elif defined ATOMIC_OPS_USE_LOCK if ((_atomic_lock=lock_alloc())==0){ goto error; } if (lock_init(_atomic_lock)==0){ lock_dealloc(_atomic_lock); _atomic_lock=0; goto error; } #endif /* ATOMIC_OPS_USE_LOCK_SET/ATOMIC_OPS_USE_LOCK */ return 0; #if defined MEMBAR_USES_LOCK || defined ATOMIC_OPS_USE_LOCK || \ defined ATOMIC_OPS_USE_LOCK_SET error: destroy_atomic_ops(); return -1; #endif }
void destroy_atomic_ops() { #ifdef MEMBAR_USES_LOCK if (__membar_lock!=0){ lock_destroy(__membar_lock); lock_dealloc(__membar_lock); __membar_lock=0; } #endif #ifdef ATOMIC_OPS_USE_LOCK_SET if (_atomic_lock_set!=0){ lock_set_destroy(_atomic_lock_set); lock_set_dealloc(_atomic_lock_set); _atomic_lock_set=0; } #elif defined ATOMIC_OPS_USE_LOCK if (_atomic_lock!=0){ lock_destroy(_atomic_lock); lock_dealloc(_atomic_lock); _atomic_lock=0; } #endif /* ATOMIC_OPS_USE_LOCK_SET / ATOMIC_OPS_USE_LOCK*/ }
/* destroy and free the IP tree */ void destroy_ip_tree(void) { int i; if (root==0) return; /* destroy and free the lock set */ if (root->entry_lock_set) { lock_set_destroy(root->entry_lock_set); lock_set_dealloc(root->entry_lock_set); } /* destroy all the nodes */ for(i=0;i<MAX_IP_BRANCHES;i++) if (root->entries[i].node) destroy_ip_node(root->entries[i].node); shm_free( root ); root = 0; return; }
/** * init a workers list * - pipes : communication pipes * - size : size of list - number of workers * - max : maximum number of jobs per worker * return : pointer to workers list or NULL on error */ xj_wlist xj_wlist_init(int **pipes, int size, int max, int cache_time, int sleep_time, int delay_time) { int i; xj_wlist jwl = NULL; if(pipes == NULL || size <= 0 || max <= 0) return NULL; #ifdef XJ_EXTRA_DEBUG LM_DBG("-----START-----\n"); #endif jwl = (xj_wlist)_M_SHM_MALLOC(sizeof(t_xj_wlist)); if(jwl == NULL) return NULL; jwl->len = size; jwl->maxj = max; jwl->cachet = cache_time; jwl->delayt = delay_time; jwl->sleept = sleep_time; jwl->aliases = NULL; jwl->sems = NULL; i = 0; /* alloc locks*/ if((jwl->sems = lock_set_alloc(size)) == NULL){ LM_CRIT("failed to alloc lock set\n"); goto clean; }; /* init the locks*/ if (lock_set_init(jwl->sems)==0){ LM_CRIT("failed to initialize the locks\n"); goto clean; }; jwl->workers = (xj_worker)_M_SHM_MALLOC(size*sizeof(t_xj_worker)); if(jwl->workers == NULL){ lock_set_destroy(jwl->sems); goto clean; } for(i = 0; i < size; i++) { jwl->workers[i].nr = 0; jwl->workers[i].pid = 0; jwl->workers[i].wpipe = pipes[i][1]; jwl->workers[i].rpipe = pipes[i][0]; if((jwl->workers[i].sip_ids = newtree234(xj_jkey_cmp)) == NULL){ lock_set_destroy(jwl->sems); goto clean; } } return jwl; clean: LM_DBG("error occurred -> cleaning\n"); if(jwl->sems != NULL) lock_set_dealloc(jwl->sems); if(jwl->workers != NULL) { while(i>=0) { if(jwl->workers[i].sip_ids == NULL) free2tree234(jwl->workers[i].sip_ids, xj_jkey_free_p); i--; } _M_SHM_FREE(jwl->workers); } _M_SHM_FREE(jwl); return NULL; }