/* remove the semaphore set from system */ void lock_cleanup() { /* that's system-wide; all other processes trying to use the semaphore will fail! call only if it is for sure no other process lives */ /* sibling double-check missing here; install a signal handler */ if (entry_semaphore !=0){ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); }; if (reply_semaphore !=0) { lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); }; #ifdef ENABLE_ASYNC_MUTEX if (async_semaphore !=0) { lock_set_destroy(async_semaphore); lock_set_dealloc(async_semaphore); } async_semaphore = 0; #endif entry_semaphore = reply_semaphore = 0; }
/* remove the semaphore set from system */ void lock_cleanup() { /* that's system-wide; all other processes trying to use the semaphore will fail! call only if it is for sure no other process lives */ /* sibling double-check missing here; install a signal handler */ if (entry_semaphore !=0){ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); }; if (timer_semaphore !=0){ lock_set_destroy(timer_semaphore); lock_set_dealloc(timer_semaphore); }; if (reply_semaphore !=0) { lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); }; entry_semaphore = timer_semaphore = reply_semaphore = 0; if (timer_group_lock) shm_free(timer_group_lock); }
void free_cc_data(struct cc_data *data) { struct cc_flow *flow, *f_flow; struct cc_agent *agent,*f_agent; int i; if (data) { /* lock */ if (data->lock) { lock_destroy( data->lock ); lock_dealloc( data->lock ); } if (data->call_locks) { lock_set_destroy( data->call_locks ); lock_set_dealloc( data->call_locks ); } /* flows */ for( flow=data->flows ; flow ; ) { f_flow = flow; flow = flow->next; free_cc_flow( f_flow ); } /* agents */ for(i = 0; i< 2; i++) { for( agent=data->agents[i] ; agent ; ) { f_agent = agent; agent = agent->next; free_cc_agent( f_agent ); } } shm_free(data); } }
void shvar_destroy_locks(void) { if (shvar_locks !=0){ lock_set_destroy(shvar_locks); lock_set_dealloc(shvar_locks); } }
/*! * \brief Destroy the per user transaction table */ void destroy_ts_table(void) { struct ts_urecord *ts_u, *l_ts_u; unsigned int i; if (t_table==0) return; if (t_table->locks) { lock_set_destroy(t_table->locks); lock_set_dealloc(t_table->locks); } for( i=0 ; i<t_table->size; i++ ) { ts_u = t_table->entries[i].first; while (ts_u) { l_ts_u = ts_u; ts_u = ts_u->next; free_ts_urecord(l_ts_u); } } shm_free(t_table); t_table = 0; return; }
/*! * \brief Destroy the global dialog table */ void destroy_dlg_table(void) { struct dlg_cell *dlg, *l_dlg; unsigned int i; if (d_table==0) return; if (d_table->locks) { lock_set_destroy(d_table->locks); lock_set_dealloc(d_table->locks); } for( i=0 ; i<d_table->size; i++ ) { dlg = d_table->entries[i].first; while (dlg) { l_dlg = dlg; dlg = dlg->next; destroy_dlg(l_dlg); } } shm_free(d_table); d_table = 0; return; }
/*! * \brief Destroy all locks on the list */ void subs_destroy_locks(void) { if (subs_locks !=0){ lock_set_destroy(subs_locks); lock_set_dealloc(subs_locks); }; }
void ul_destroy_locks(void) { if (ul_locks !=0){ lock_set_destroy(ul_locks); lock_set_dealloc(ul_locks); }; }
void lua_sr_destroy(void) { if(_sr_L_env.L!=NULL) { lua_close(_sr_L_env.L); _sr_L_env.L = NULL; } if(_sr_L_env.LL!=NULL) { lua_close(_sr_L_env.LL); _sr_L_env.LL = NULL; } memset(&_sr_L_env, 0, sizeof(sr_lua_env_t)); if(sr_lua_script_ver!=NULL) { shm_free(sr_lua_script_ver->version); shm_free(sr_lua_script_ver); } if (sr_lua_locks!=NULL) { lock_set_destroy( sr_lua_locks ); lock_set_dealloc( sr_lua_locks ); sr_lua_locks = 0; } if(_app_lua_sv!=NULL) { pkg_free(_app_lua_sv); _app_lua_sv = 0; } }
void mod_destroy(void) { unsigned int i; if (rl_htable.maps) { for (i = 0; i < rl_htable.size; i++) map_destroy(rl_htable.maps[i], 0); shm_free(rl_htable.maps); rl_htable.maps = 0; rl_htable.size = 0; } if (rl_htable.locks) { lock_set_destroy(rl_htable.locks); lock_set_dealloc(rl_htable.locks); rl_htable.locks = 0; rl_htable.locks_no = 0; } if (rl_lock) { lock_destroy(rl_lock); lock_dealloc(rl_lock); } RL_SHM_FREE(rl_network_count); RL_SHM_FREE(rl_network_load); RL_SHM_FREE(rl_load_value); RL_SHM_FREE(pid_kp); RL_SHM_FREE(pid_ki); RL_SHM_FREE(pid_kd); RL_SHM_FREE(pid_setpoint); RL_SHM_FREE(drop_rate); RL_SHM_FREE(rl_feedback_limit); if (db_url.s && db_url.len) destroy_cachedb(); }
void tls_destroy_locks() { if (static_locks){ lock_set_destroy(static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; } }
void pg_destroy_lock_set(void) { if(_pg_lock_set!=NULL) { lock_set_destroy(_pg_lock_set); lock_set_dealloc(_pg_lock_set); _pg_lock_set = NULL; _pg_lock_size = 0; } }
void destroy_dst_blacklist() { int r; struct dst_blst_entry** crt; struct dst_blst_entry* e; if (blst_timer_h){ timer_del(blst_timer_h); timer_free(blst_timer_h); blst_timer_h=0; } #ifdef BLST_LOCK_PER_BUCKET if (dst_blst_hash) for(r=0; r<DST_BLST_HASH_SIZE; r++) lock_destroy(&dst_blst_hash[r].lock); #elif defined BLST_LOCK_SET if (blst_lock_set){ lock_set_destroy(blst_lock_set); lock_set_dealloc(blst_lock_set); blst_lock_set=0; } #else if (blst_lock){ lock_destroy(blst_lock); lock_dealloc(blst_lock); blst_lock=0; } #endif if (dst_blst_hash){ for(r=0; r<DST_BLST_HASH_SIZE; r++){ crt=&dst_blst_hash[r].first; while(*crt){ e=*crt; *crt=(*crt)->next; blst_destroy_entry(e); } } shm_free(dst_blst_hash); dst_blst_hash=0; } if (blst_mem_used){ shm_free((void*)blst_mem_used); blst_mem_used=0; } #ifdef DST_BLACKLIST_HOOKS destroy_blacklist_hooks(); #endif #ifdef USE_DST_BLACKLIST_STATS if (dst_blacklist_stats) shm_free(dst_blacklist_stats); #endif }
int lua_sr_alloc_script_ver(void) { int size = _sr_L_env.nload; sr_lua_script_ver = (sr_lua_script_ver_t *) shm_malloc(sizeof(sr_lua_script_ver_t)); if(sr_lua_script_ver==NULL) { LM_ERR("cannot allocate shm memory\n"); return -1; } sr_lua_script_ver->version = (unsigned int *) shm_malloc(sizeof(unsigned int)*size); if(sr_lua_script_ver->version==NULL) { LM_ERR("cannot allocate shm memory\n"); goto error; } memset(sr_lua_script_ver->version, 0, sizeof(unsigned int)*size); sr_lua_script_ver->len = size; if((sr_lua_locks=lock_set_alloc(size))==0) { LM_CRIT("failed to alloc lock set\n"); goto error; } if(lock_set_init(sr_lua_locks)==0 ) { LM_CRIT("failed to init lock set\n"); goto error; } return 0; error: if(sr_lua_script_ver!=NULL) { if(sr_lua_script_ver->version!=NULL) { shm_free(sr_lua_script_ver->version); sr_lua_script_ver->version = NULL; } shm_free(sr_lua_script_ver); sr_lua_script_ver = NULL; } if(sr_lua_locks!=NULL) { lock_set_destroy( sr_lua_locks ); lock_set_dealloc( sr_lua_locks ); sr_lua_locks = NULL; } return -1; }
/** * free jab_wlist * - jwl : pointer to the workers list */ void xj_wlist_free(xj_wlist jwl) { int i; #ifdef XJ_EXTRA_DEBUG LM_DBG("freeing 'xj_wlist' memory ...\n"); #endif if(jwl == NULL) return; if(jwl->workers != NULL) { for(i=0; i<jwl->len; i++) free2tree234(jwl->workers[i].sip_ids, xj_jkey_free_p); _M_SHM_FREE(jwl->workers); } if(jwl->aliases != NULL) { if(jwl->aliases->d) _M_SHM_FREE(jwl->aliases->d); if(jwl->aliases->jdm != NULL) { _M_SHM_FREE(jwl->aliases->jdm->s); _M_SHM_FREE(jwl->aliases->jdm); } if(jwl->aliases->proxy != NULL) { _M_SHM_FREE(jwl->aliases->proxy->s); _M_SHM_FREE(jwl->aliases->proxy); } if(jwl->aliases->size > 0) { for(i=0; i<jwl->aliases->size; i++) _M_SHM_FREE(jwl->aliases->a[i].s); _M_SHM_FREE(jwl->aliases->a); } _M_SHM_FREE(jwl->aliases); jwl->aliases = NULL; } if(jwl->sems != NULL){ lock_set_destroy(jwl->sems); lock_set_dealloc(jwl->sems); } _M_SHM_FREE(jwl); }
static void mod_destroy(void) { if (probability) shm_free(probability); if (gflags) shm_free(gflags); if (gflags_lock) { lock_destroy(gflags_lock); lock_dealloc(gflags_lock); } if(_cfg_lock_set!=NULL) { lock_set_destroy(_cfg_lock_set); lock_set_dealloc(_cfg_lock_set); } }
void destroy_atomic_ops() { #ifdef MEMBAR_USES_LOCK if (__membar_lock!=0){ lock_destroy(__membar_lock); lock_dealloc(__membar_lock); __membar_lock=0; } #endif #ifdef ATOMIC_OPS_USE_LOCK_SET if (_atomic_lock_set!=0){ lock_set_destroy(_atomic_lock_set); lock_set_dealloc(_atomic_lock_set); _atomic_lock_set=0; } #elif defined ATOMIC_OPS_USE_LOCK if (_atomic_lock!=0){ lock_destroy(_atomic_lock); lock_dealloc(_atomic_lock); _atomic_lock=0; } #endif /* ATOMIC_OPS_USE_LOCK_SET / ATOMIC_OPS_USE_LOCK*/ }
/* destroy and free the IP tree */ void destroy_ip_tree(void) { int i; if (root==0) return; /* destroy and free the lock set */ if (root->entry_lock_set) { lock_set_destroy(root->entry_lock_set); lock_set_dealloc(root->entry_lock_set); } /* destroy all the nodes */ for(i=0;i<MAX_IP_BRANCHES;i++) if (root->entries[i].node) destroy_ip_node(root->entries[i].node); shm_free( root ); root = 0; return; }
/** * init a workers list * - pipes : communication pipes * - size : size of list - number of workers * - max : maximum number of jobs per worker * return : pointer to workers list or NULL on error */ xj_wlist xj_wlist_init(int **pipes, int size, int max, int cache_time, int sleep_time, int delay_time) { int i; xj_wlist jwl = NULL; if(pipes == NULL || size <= 0 || max <= 0) return NULL; #ifdef XJ_EXTRA_DEBUG LM_DBG("-----START-----\n"); #endif jwl = (xj_wlist)_M_SHM_MALLOC(sizeof(t_xj_wlist)); if(jwl == NULL) return NULL; jwl->len = size; jwl->maxj = max; jwl->cachet = cache_time; jwl->delayt = delay_time; jwl->sleept = sleep_time; jwl->aliases = NULL; jwl->sems = NULL; i = 0; /* alloc locks*/ if((jwl->sems = lock_set_alloc(size)) == NULL){ LM_CRIT("failed to alloc lock set\n"); goto clean; }; /* init the locks*/ if (lock_set_init(jwl->sems)==0){ LM_CRIT("failed to initialize the locks\n"); goto clean; }; jwl->workers = (xj_worker)_M_SHM_MALLOC(size*sizeof(t_xj_worker)); if(jwl->workers == NULL){ lock_set_destroy(jwl->sems); goto clean; } for(i = 0; i < size; i++) { jwl->workers[i].nr = 0; jwl->workers[i].pid = 0; jwl->workers[i].wpipe = pipes[i][1]; jwl->workers[i].rpipe = pipes[i][0]; if((jwl->workers[i].sip_ids = newtree234(xj_jkey_cmp)) == NULL){ lock_set_destroy(jwl->sems); goto clean; } } return jwl; clean: LM_DBG("error occurred -> cleaning\n"); if(jwl->sems != NULL) lock_set_dealloc(jwl->sems); if(jwl->workers != NULL) { while(i>=0) { if(jwl->workers[i].sip_ids == NULL) free2tree234(jwl->workers[i].sip_ids, xj_jkey_free_p); i--; } _M_SHM_FREE(jwl->workers); } _M_SHM_FREE(jwl); return NULL; }
/* initialize the locks; return 0 on success, -1 otherwise */ int lock_initialize() { #ifndef GEN_LOCK_T_PREFERED int i; int probe_run; #endif /* first try allocating semaphore sets with fixed number of semaphores */ DBG("DEBUG: lock_initialize: lock initialization started\n"); #ifndef GEN_LOCK_T_PREFERED i=SEM_MIN; /* probing phase: 0=initial, 1=after the first failure */ probe_run=0; again: do { if (entry_semaphore!=0){ /* clean-up previous attempt */ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); } if (reply_semaphore!=0){ lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); } #ifdef ENABLE_ASYNC_MUTEX if (async_semaphore!=0){ lock_set_destroy(async_semaphore); lock_set_dealloc(async_semaphore); } #endif if (i==0){ LOG(L_CRIT, "lock_initialize: could not allocate semaphore" " sets\n"); goto error; } if (((entry_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(entry_semaphore)==0)) { DBG("DEBUG: lock_initialize: entry semaphore " "initialization failure: %s\n", strerror( errno ) ); if (entry_semaphore){ lock_set_dealloc(entry_semaphore); entry_semaphore=0; } /* first time: step back and try again */ if (probe_run==0) { DBG("DEBUG: lock_initialize: first time " "semaphore allocation failure\n"); i--; probe_run=1; continue; /* failure after we stepped back; give up */ } else { DBG("DEBUG: lock_initialize: second time semaphore" " allocation failure\n"); goto error; } } /* allocation succeeded */ if (probe_run==1) { /* if ok after we stepped back, we're done */ break; } else { /* if ok otherwise, try again with larger set */ if (i==SEM_MAX) break; else { i++; continue; } } } while(1); sem_nr=i; if (((reply_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(reply_semaphore)==0)){ if (reply_semaphore){ lock_set_dealloc(reply_semaphore); reply_semaphore=0; } DBG("DEBUG:lock_initialize: reply semaphore initialization" " failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } #ifdef ENABLE_ASYNC_MUTEX i++; if (((async_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(async_semaphore)==0)){ if (async_semaphore){ lock_set_dealloc(async_semaphore); async_semaphore=0; } DBG("DEBUG:lock_initialize: async semaphore initialization" " failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } #endif /* return success */ LOG(L_INFO, "INFO: semaphore arrays of size %d allocated\n", sem_nr ); #endif /* GEN_LOCK_T_PREFERED*/ return 0; #ifndef GEN_LOCK_T_PREFERED error: lock_cleanup(); return -1; #endif }
/* initialize the locks; return 0 on success, -1 otherwise */ int lock_initialize(void) { int i; #ifndef GEN_LOCK_T_PREFERED int probe_run; #endif /* first try allocating semaphore sets with fixed number of semaphores */ LM_DBG("lock initialization started\n"); timer_group_lock=shm_malloc(TG_NR*sizeof(ser_lock_t)); if (timer_group_lock==0){ LM_CRIT("no more share mem\n"); goto error; } #ifdef GEN_LOCK_T_PREFERED for(i=0;i<TG_NR;i++) lock_init(&timer_group_lock[i]); #else /* transaction timers */ if (((timer_semaphore= lock_set_alloc( TG_NR ) ) == 0)|| (lock_set_init(timer_semaphore)==0)){ if (timer_semaphore) lock_set_destroy(timer_semaphore); LM_CRIT("transaction timer semaphore initialization failure: %s\n", strerror(errno)); goto error; } for (i=0; i<TG_NR; i++) { timer_group_lock[i].semaphore_set = timer_semaphore; timer_group_lock[i].semaphore_index = timer_group[ i ]; } i=SEM_MIN; /* probing phase: 0=initial, 1=after the first failure */ probe_run=0; again: do { if (entry_semaphore!=0){ /* clean-up previous attempt */ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); } if (reply_semaphore!=0){ lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); } if (i==0){ LM_CRIT("failed allocate semaphore sets\n"); goto error; } if (((entry_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(entry_semaphore)==0)) { LM_DBG("entry semaphore initialization failure: %s\n", strerror( errno ) ); if (entry_semaphore){ lock_set_dealloc(entry_semaphore); entry_semaphore=0; } /* first time: step back and try again */ if (probe_run==0) { LM_DBG("first time semaphore allocation failure\n"); i--; probe_run=1; continue; /* failure after we stepped back; give up */ } else { LM_DBG("second time semaphore allocation failure\n"); goto error; } } /* allocation succeeded */ if (probe_run==1) { /* if ok after we stepped back, we're done */ break; } else { /* if ok otherwise, try again with larger set */ if (i==SEM_MAX) break; else { i++; continue; } } } while(1); sem_nr=i; if (((reply_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(reply_semaphore)==0)){ if (reply_semaphore){ lock_set_dealloc(reply_semaphore); reply_semaphore=0; } LM_DBG("reply semaphore initialization failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } /* return success */ LM_INFO("semaphore arrays of size %d allocated\n", sem_nr ); #endif /* GEN_LOCK_T_PREFERED*/ return 0; error: lock_cleanup(); return -1; }