/* * Initialize locks */ int shvar_init_locks(void) { int i; /* already initialized */ if(shvar_locks!=0) return 0; i = shvar_locks_no; do { if ((( shvar_locks=lock_set_alloc(i))!=0)&& (lock_set_init(shvar_locks)!=0)) { shvar_locks_no = i; LM_INFO("locks array size %d\n", shvar_locks_no); return 0; } if (shvar_locks){ lock_set_dealloc(shvar_locks); shvar_locks=0; } i--; if(i==0) { LM_ERR("failed to allocate locks\n"); return -1; } } while (1); }
/*! \brief * Initialize locks */ int ul_init_locks(void) { int i; i = ul_locks_no; do { if ((( ul_locks=lock_set_alloc(i))!=0)&& (lock_set_init(ul_locks)!=0)) { ul_locks_no = i; LM_INFO("locks array size %d\n", ul_locks_no); return 0; } if (ul_locks){ lock_set_dealloc(ul_locks); ul_locks=0; } i--; if(i==0) { LM_ERR("failed to allocate locks\n"); return -1; } } while (1); }
int tls_init_multithread(void) { /* init static locks support */ tls_static_locks_no = CRYPTO_num_locks(); if (tls_static_locks_no>0) { /* init a lock set & pass locking function to SSL */ tls_static_locks = lock_set_alloc(tls_static_locks_no); if (tls_static_locks == NULL) { LM_ERR("Failed to alloc static locks\n"); return -1; } if (lock_set_init(tls_static_locks)==0) { LM_ERR("Failed to init static locks\n"); lock_set_dealloc(tls_static_locks); return -1; } CRYPTO_set_locking_callback(tls_static_locks_ops); } CRYPTO_set_id_callback(tls_get_id); /* dynamic locks support*/ CRYPTO_set_dynlock_create_callback(tls_dyn_lock_create); CRYPTO_set_dynlock_lock_callback(tls_dyn_lock_ops); CRYPTO_set_dynlock_destroy_callback(tls_dyn_lock_destroy); return 0; }
/* size must be a power of 2 */ static gen_lock_set_t* init_lock_set(int *size) { gen_lock_set_t *lset; lset=0; /* kill warnings */ for( ; *size ; *size=((*size)>>1) ) { LM_INFO("probing %d set size\n", *size); /* create a lock set */ lset = lock_set_alloc( *size ); if (lset==0) { LM_INFO("cannot get %d locks\n", *size); continue; } /* init lock set */ if (lock_set_init(lset)==0) { LM_INFO("cannot init %d locks\n", *size); lock_set_dealloc( lset ); lset = 0; continue; } /* alloc and init succesfull */ break; } if (*size==0) { LM_ERR("cannot get a lock set\n"); return 0; } return lset; }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LM_CRIT("bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ if (CRYPTO_get_locking_callback()!=NULL) { LM_CRIT("ssl locking callback already set\n"); return -1; } static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LM_CRIT("could not allocate lockset with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LM_CRIT("lock set init failed (%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* OpenSSL is thread-safe since 1.1.0 */ #if OPENSSL_VERSION_NUMBER < 0x10100000L /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); #endif /* starting with v1.0.0 openssl does not use anymore getpid(), but address * of errno which can point to same virtual address in a multi-process * application * - for refrence http://www.openssl.org/docs/crypto/threads.html */ CRYPTO_set_id_callback(sr_ssl_id_f); /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
/*! * \brief Initialize the per user transactions table * \param size size of the table * \return 0 on success, -1 on failure */ int init_ts_table(unsigned int size) { unsigned int n; unsigned int i; t_table = (struct ts_table*)shm_malloc( sizeof(struct ts_table)); if (t_table==0) { LM_ERR("no more shm mem (1)\n"); return -1; } memset( t_table, 0, sizeof(struct ts_table) ); t_table->size = size; n = (size<MAX_TS_LOCKS)?size:MAX_TS_LOCKS; for( ; n>=MIN_TS_LOCKS ; n-- ) { t_table->locks = lock_set_alloc(n); if (t_table->locks==0) continue; if (lock_set_init(t_table->locks)==0) { lock_set_dealloc(t_table->locks); t_table->locks = 0; continue; } t_table->locks_no = n; break; } if (t_table->locks==0) { LM_ERR("unable to allocted at least %d locks for the hash table\n", MIN_TS_LOCKS); goto error; } t_table->entries = (ts_entry_t*)shm_malloc(sizeof(ts_entry_t) * size); if (!t_table->entries) { LM_ERR("no more shm mem (2)\n"); goto error; } for( i=0 ; i<size; i++ ) { memset( &(t_table->entries[i]), 0, sizeof(struct ts_entry) ); t_table->entries[i].next_id = rand() % (3*size); t_table->entries[i].lock_idx = i % t_table->locks_no; } return 0; error: shm_free( t_table ); t_table = NULL; return -1; }
int create_dynamic_locks(void) { dynamic_locks = lock_set_alloc(lock_pool_size); if (!dynamic_locks) { LM_ERR("SHM MEMORY depleted!\n"); return -1; } lock_set_init(dynamic_locks); return 0; }
int lua_sr_alloc_script_ver(void) { int size = _sr_L_env.nload; sr_lua_script_ver = (sr_lua_script_ver_t *) shm_malloc(sizeof(sr_lua_script_ver_t)); if(sr_lua_script_ver==NULL) { LM_ERR("cannot allocate shm memory\n"); return -1; } sr_lua_script_ver->version = (unsigned int *) shm_malloc(sizeof(unsigned int)*size); if(sr_lua_script_ver->version==NULL) { LM_ERR("cannot allocate shm memory\n"); goto error; } memset(sr_lua_script_ver->version, 0, sizeof(unsigned int)*size); sr_lua_script_ver->len = size; if((sr_lua_locks=lock_set_alloc(size))==0) { LM_CRIT("failed to alloc lock set\n"); goto error; } if(lock_set_init(sr_lua_locks)==0 ) { LM_CRIT("failed to init lock set\n"); goto error; } return 0; error: if(sr_lua_script_ver!=NULL) { if(sr_lua_script_ver->version!=NULL) { shm_free(sr_lua_script_ver->version); sr_lua_script_ver->version = NULL; } shm_free(sr_lua_script_ver); sr_lua_script_ver = NULL; } if(sr_lua_locks!=NULL) { lock_set_destroy( sr_lua_locks ); lock_set_dealloc( sr_lua_locks ); sr_lua_locks = NULL; } return -1; }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LOG(L_CRIT, "BUG: tls: tls_init_locking: bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LOG(L_CRIT, "ERROR: tls_init_locking: could not allocate lockset" " with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LOG(L_CRIT, "ERROR: tls_init_locking: lock_set_init failed " "(%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); /* starting with v1.0.0 openssl does not use anymore getpid(), but address * of errno which can point to same virtual address in a multi-process * application * - for refrence http://www.openssl.org/docs/crypto/threads.html */ CRYPTO_set_id_callback(sr_ssl_id_f); /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
int init_dlg_table(unsigned int size) { unsigned int n; unsigned int i; d_table = (struct dlg_table*)shm_malloc ( sizeof(struct dlg_table) + size*sizeof(struct dlg_entry)); if (d_table==0) { LM_ERR("no more shm mem (1)\n"); goto error0; } memset( d_table, 0, sizeof(struct dlg_table) ); d_table->size = size; d_table->entries = (struct dlg_entry*)(d_table+1); n = (size<MAX_LDG_LOCKS)?size:MAX_LDG_LOCKS; for( ; n>=MIN_LDG_LOCKS ; n-- ) { d_table->locks = lock_set_alloc(n); if (d_table->locks==0) continue; if (lock_set_init(d_table->locks)==0) { lock_set_dealloc(d_table->locks); d_table->locks = 0; continue; } d_table->locks_no = n; break; } if (d_table->locks==0) { LM_ERR("unable to allocted at least %d locks for the hash table\n", MIN_LDG_LOCKS); goto error1; } for( i=0 ; i<size; i++ ) { memset( &(d_table->entries[i]), 0, sizeof(struct dlg_entry) ); d_table->entries[i].next_id = rand(); d_table->entries[i].lock_idx = i % d_table->locks_no; } return 0; error1: shm_free( d_table ); error0: return -1; }
/*! * \brief init lock set used to implement SQL REPLACE via UPDATE/INSERT * \param sz power of two to compute the lock set size * \return 0 on success, -1 on error */ int pg_init_lock_set(int sz) { if(sz>0 && sz<=10) { _pg_lock_size = 1<<sz; } else { _pg_lock_size = 1<<4; } _pg_lock_set = lock_set_alloc(_pg_lock_size); if(_pg_lock_set==NULL || lock_set_init(_pg_lock_set)==NULL) { LM_ERR("cannot initiate lock set\n"); return -1; } return 0; }
/* returns -1 on error, 0 on success */ int tls_init_locks() { /* init "static" tls locks */ n_static_locks=CRYPTO_num_locks(); if (n_static_locks<0){ LOG(L_CRIT, "BUG: tls: tls_init_locking: bad CRYPTO_num_locks %d\n", n_static_locks); n_static_locks=0; } if (n_static_locks){ static_locks=lock_set_alloc(n_static_locks); if (static_locks==0){ LOG(L_CRIT, "ERROR: tls_init_locking: could not allocate lockset" " with %d locks\n", n_static_locks); goto error; } if (lock_set_init(static_locks)==0){ LOG(L_CRIT, "ERROR: tls_init_locking: lock_set_init failed " "(%d locks)\n", n_static_locks); lock_set_dealloc(static_locks); static_locks=0; n_static_locks=0; goto error; } CRYPTO_set_locking_callback(locking_f); } /* set "dynamic" locks callbacks */ CRYPTO_set_dynlock_create_callback(dyn_create_f); CRYPTO_set_dynlock_lock_callback(dyn_lock_f); CRYPTO_set_dynlock_destroy_callback(dyn_destroy_f); /* thread id callback: not needed because ser doesn't use thread and * openssl already uses getpid() (by default) * CRYPTO_set_id_callback(id_f); */ /* atomic add -- since for now we don't have atomic_add * (only atomic_inc), fallback to the default use-locks mode * CRYPTO_set_add_lock_callback(atomic_add_f); */ return 0; error: tls_destroy_locks(); return -1; }
/* returns 0 on success, -1 on error */ int init_atomic_ops() { #ifdef MEMBAR_USES_LOCK if ((__membar_lock=lock_alloc())==0){ goto error; } if (lock_init(__membar_lock)==0){ lock_dealloc(__membar_lock); __membar_lock=0; goto error; } _membar_lock; /* start with the lock "taken" so that we can safely use unlock/lock sequences on it later */ #endif #ifdef ATOMIC_OPS_USE_LOCK_SET if ((_atomic_lock_set=lock_set_alloc(_ATOMIC_LS_SIZE))==0){ goto error; } if (lock_set_init(_atomic_lock_set)==0){ lock_set_dealloc(_atomic_lock_set); _atomic_lock_set=0; goto error; } #elif defined ATOMIC_OPS_USE_LOCK if ((_atomic_lock=lock_alloc())==0){ goto error; } if (lock_init(_atomic_lock)==0){ lock_dealloc(_atomic_lock); _atomic_lock=0; goto error; } #endif /* ATOMIC_OPS_USE_LOCK_SET/ATOMIC_OPS_USE_LOCK */ return 0; #if defined MEMBAR_USES_LOCK || defined ATOMIC_OPS_USE_LOCK || \ defined ATOMIC_OPS_USE_LOCK_SET error: destroy_atomic_ops(); return -1; #endif }
struct cc_data* init_cc_data(void) { struct cc_data *data; data = (struct cc_data*) shm_malloc( sizeof(struct cc_data) ); if (data==NULL) { LM_ERR("failed to allocate shm mem\n"); return NULL; } memset( data, 0, sizeof(struct cc_data)); /* create & init lock */ if ( (data->lock=lock_alloc())==0) { LM_CRIT("failed to alloc lock\n"); goto error; } if (lock_init(data->lock)==0 ) { LM_CRIT("failed to init lock\n"); goto error; } /* set of locks for calls */ if ( (data->call_locks=lock_set_alloc(512))==0) { LM_CRIT("failed to alloc set of call locks\n"); goto error; } if (lock_set_init(data->call_locks)==0 ) { LM_CRIT("failed to init set of call locks\n"); goto error; } return data; error: free_cc_data(data); return NULL; }
static int mod_init(void) { /* Register RPC commands */ if (rpc_register_array(rpc_cmds)!=0) { LM_ERR("failed to register RPC commands\n"); return -1; } if(register_mi_mod(exports.name, mi_cmds)!=0) { LM_ERR("failed to register MI commands\n"); return -1; } if (!hash_file) { LM_INFO("no hash_file given, disable hash functionality\n"); } else { if (MD5File(config_hash, hash_file) != 0) { LM_ERR("could not hash the config file"); return -1; } LM_DBG("config file hash is %.*s", MD5_LEN, config_hash); } if (initial_prob > 100) { LM_ERR("invalid probability <%d>\n", initial_prob); return -1; } LM_DBG("initial probability %d percent\n", initial_prob); probability=(int *) shm_malloc(sizeof(int)); if (!probability) { LM_ERR("no shmem available\n"); return -1; } *probability = initial_prob; gflags=(unsigned int *) shm_malloc(sizeof(unsigned int)); if (!gflags) { LM_ERR(" no shmem available\n"); return -1; } *gflags=initial_gflags; gflags_lock = lock_alloc(); if (gflags_lock==0) { LM_ERR("cannot allocate gflgas lock\n"); return -1; } if (lock_init(gflags_lock)==NULL) { LM_ERR("cannot initiate gflags lock\n"); lock_dealloc(gflags_lock); return -1; } if(_cfg_lock_size>0 && _cfg_lock_size<=10) { _cfg_lock_size = 1<<_cfg_lock_size; _cfg_lock_set = lock_set_alloc(_cfg_lock_size); if(_cfg_lock_set==NULL || lock_set_init(_cfg_lock_set)==NULL) { LM_ERR("cannot initiate lock set\n"); return -1; } } return 0; }
/** * init a workers list * - pipes : communication pipes * - size : size of list - number of workers * - max : maximum number of jobs per worker * return : pointer to workers list or NULL on error */ xj_wlist xj_wlist_init(int **pipes, int size, int max, int cache_time, int sleep_time, int delay_time) { int i; xj_wlist jwl = NULL; if(pipes == NULL || size <= 0 || max <= 0) return NULL; #ifdef XJ_EXTRA_DEBUG LM_DBG("-----START-----\n"); #endif jwl = (xj_wlist)_M_SHM_MALLOC(sizeof(t_xj_wlist)); if(jwl == NULL) return NULL; jwl->len = size; jwl->maxj = max; jwl->cachet = cache_time; jwl->delayt = delay_time; jwl->sleept = sleep_time; jwl->aliases = NULL; jwl->sems = NULL; i = 0; /* alloc locks*/ if((jwl->sems = lock_set_alloc(size)) == NULL){ LM_CRIT("failed to alloc lock set\n"); goto clean; }; /* init the locks*/ if (lock_set_init(jwl->sems)==0){ LM_CRIT("failed to initialize the locks\n"); goto clean; }; jwl->workers = (xj_worker)_M_SHM_MALLOC(size*sizeof(t_xj_worker)); if(jwl->workers == NULL){ lock_set_destroy(jwl->sems); goto clean; } for(i = 0; i < size; i++) { jwl->workers[i].nr = 0; jwl->workers[i].pid = 0; jwl->workers[i].wpipe = pipes[i][1]; jwl->workers[i].rpipe = pipes[i][0]; if((jwl->workers[i].sip_ids = newtree234(xj_jkey_cmp)) == NULL){ lock_set_destroy(jwl->sems); goto clean; } } return jwl; clean: LM_DBG("error occurred -> cleaning\n"); if(jwl->sems != NULL) lock_set_dealloc(jwl->sems); if(jwl->workers != NULL) { while(i>=0) { if(jwl->workers[i].sip_ids == NULL) free2tree234(jwl->workers[i].sip_ids, xj_jkey_free_p); i--; } _M_SHM_FREE(jwl->workers); } _M_SHM_FREE(jwl); return NULL; }
/* initialize the locks; return 0 on success, -1 otherwise */ int lock_initialize() { #ifndef GEN_LOCK_T_PREFERED int i; int probe_run; #endif /* first try allocating semaphore sets with fixed number of semaphores */ DBG("DEBUG: lock_initialize: lock initialization started\n"); #ifndef GEN_LOCK_T_PREFERED i=SEM_MIN; /* probing phase: 0=initial, 1=after the first failure */ probe_run=0; again: do { if (entry_semaphore!=0){ /* clean-up previous attempt */ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); } if (reply_semaphore!=0){ lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); } #ifdef ENABLE_ASYNC_MUTEX if (async_semaphore!=0){ lock_set_destroy(async_semaphore); lock_set_dealloc(async_semaphore); } #endif if (i==0){ LOG(L_CRIT, "lock_initialize: could not allocate semaphore" " sets\n"); goto error; } if (((entry_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(entry_semaphore)==0)) { DBG("DEBUG: lock_initialize: entry semaphore " "initialization failure: %s\n", strerror( errno ) ); if (entry_semaphore){ lock_set_dealloc(entry_semaphore); entry_semaphore=0; } /* first time: step back and try again */ if (probe_run==0) { DBG("DEBUG: lock_initialize: first time " "semaphore allocation failure\n"); i--; probe_run=1; continue; /* failure after we stepped back; give up */ } else { DBG("DEBUG: lock_initialize: second time semaphore" " allocation failure\n"); goto error; } } /* allocation succeeded */ if (probe_run==1) { /* if ok after we stepped back, we're done */ break; } else { /* if ok otherwise, try again with larger set */ if (i==SEM_MAX) break; else { i++; continue; } } } while(1); sem_nr=i; if (((reply_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(reply_semaphore)==0)){ if (reply_semaphore){ lock_set_dealloc(reply_semaphore); reply_semaphore=0; } DBG("DEBUG:lock_initialize: reply semaphore initialization" " failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } #ifdef ENABLE_ASYNC_MUTEX i++; if (((async_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(async_semaphore)==0)){ if (async_semaphore){ lock_set_dealloc(async_semaphore); async_semaphore=0; } DBG("DEBUG:lock_initialize: async semaphore initialization" " failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } #endif /* return success */ LOG(L_INFO, "INFO: semaphore arrays of size %d allocated\n", sem_nr ); #endif /* GEN_LOCK_T_PREFERED*/ return 0; #ifndef GEN_LOCK_T_PREFERED error: lock_cleanup(); return -1; #endif }
int init_dst_blacklist() { int ret; #ifdef BLST_LOCK_PER_BUCKET int r; #endif if (dst_blacklist_init==0) { /* the dst blacklist is turned off */ default_core_cfg.use_dst_blacklist=0; return 0; } ret=-1; #ifdef DST_BLACKLIST_HOOKS if (init_blacklist_hooks()!=0){ ret=E_OUT_OF_MEM; goto error; } #endif blst_mem_used=shm_malloc(sizeof(*blst_mem_used)); if (blst_mem_used==0){ ret=E_OUT_OF_MEM; goto error; } *blst_mem_used=0; dst_blst_hash=shm_malloc(sizeof(struct dst_blst_lst_head) * DST_BLST_HASH_SIZE); if (dst_blst_hash==0){ ret=E_OUT_OF_MEM; goto error; } memset(dst_blst_hash, 0, sizeof(struct dst_blst_lst_head) * DST_BLST_HASH_SIZE); #ifdef BLST_LOCK_PER_BUCKET for (r=0; r<DST_BLST_HASH_SIZE; r++){ if (lock_init(&dst_blst_hash[r].lock)==0){ ret=-1; goto error; } } #elif defined BLST_LOCK_SET blst_lock_set=lock_set_alloc(DST_BLST_HASH_SIZE); if (blst_lock_set==0){ ret=E_OUT_OF_MEM; goto error; } if (lock_set_init(blst_lock_set)==0){ lock_set_dealloc(blst_lock_set); blst_lock_set=0; ret=-1; goto error; } #else /* BLST_ONE_LOCK */ blst_lock=lock_alloc(); if (blst_lock==0){ ret=E_OUT_OF_MEM; goto error; } if (lock_init(blst_lock)==0){ lock_dealloc(blst_lock); blst_lock=0; ret=-1; goto error; } #endif /* BLST*LOCK*/ blst_timer_h=timer_alloc(); if (blst_timer_h==0){ ret=E_OUT_OF_MEM; goto error; } /* fix options */ default_core_cfg.blst_max_mem<<=10; /* in Kb */ /* TODO: test with 0 */ if (blst_timer_interval){ timer_init(blst_timer_h, blst_timer, 0 ,0); /* slow timer */ if (timer_add(blst_timer_h, S_TO_TICKS(blst_timer_interval))<0){ LOG(L_CRIT, "BUG: init_dst_blacklist: failed to add the timer\n"); timer_free(blst_timer_h); blst_timer_h=0; goto error; } } return 0; error: destroy_dst_blacklist(); return ret; }
/* initialize the locks; return 0 on success, -1 otherwise */ int lock_initialize(void) { int i; #ifndef GEN_LOCK_T_PREFERED int probe_run; #endif /* first try allocating semaphore sets with fixed number of semaphores */ LM_DBG("lock initialization started\n"); timer_group_lock=shm_malloc(TG_NR*sizeof(ser_lock_t)); if (timer_group_lock==0){ LM_CRIT("no more share mem\n"); goto error; } #ifdef GEN_LOCK_T_PREFERED for(i=0;i<TG_NR;i++) lock_init(&timer_group_lock[i]); #else /* transaction timers */ if (((timer_semaphore= lock_set_alloc( TG_NR ) ) == 0)|| (lock_set_init(timer_semaphore)==0)){ if (timer_semaphore) lock_set_destroy(timer_semaphore); LM_CRIT("transaction timer semaphore initialization failure: %s\n", strerror(errno)); goto error; } for (i=0; i<TG_NR; i++) { timer_group_lock[i].semaphore_set = timer_semaphore; timer_group_lock[i].semaphore_index = timer_group[ i ]; } i=SEM_MIN; /* probing phase: 0=initial, 1=after the first failure */ probe_run=0; again: do { if (entry_semaphore!=0){ /* clean-up previous attempt */ lock_set_destroy(entry_semaphore); lock_set_dealloc(entry_semaphore); } if (reply_semaphore!=0){ lock_set_destroy(reply_semaphore); lock_set_dealloc(reply_semaphore); } if (i==0){ LM_CRIT("failed allocate semaphore sets\n"); goto error; } if (((entry_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(entry_semaphore)==0)) { LM_DBG("entry semaphore initialization failure: %s\n", strerror( errno ) ); if (entry_semaphore){ lock_set_dealloc(entry_semaphore); entry_semaphore=0; } /* first time: step back and try again */ if (probe_run==0) { LM_DBG("first time semaphore allocation failure\n"); i--; probe_run=1; continue; /* failure after we stepped back; give up */ } else { LM_DBG("second time semaphore allocation failure\n"); goto error; } } /* allocation succeeded */ if (probe_run==1) { /* if ok after we stepped back, we're done */ break; } else { /* if ok otherwise, try again with larger set */ if (i==SEM_MAX) break; else { i++; continue; } } } while(1); sem_nr=i; if (((reply_semaphore=lock_set_alloc(i))==0)|| (lock_set_init(reply_semaphore)==0)){ if (reply_semaphore){ lock_set_dealloc(reply_semaphore); reply_semaphore=0; } LM_DBG("reply semaphore initialization failure: %s\n", strerror(errno)); probe_run=1; i--; goto again; } /* return success */ LM_INFO("semaphore arrays of size %d allocated\n", sem_nr ); #endif /* GEN_LOCK_T_PREFERED*/ return 0; error: lock_cleanup(); return -1; }
int init_rl_table(unsigned int size) { unsigned int i; rl_htable.maps = shm_malloc(sizeof(map_t) * size); if (!rl_htable.maps) { LM_ERR("no more shm memory\n"); return -1; } memset(rl_htable.maps, 0, sizeof(map_t) * size); for (i = 0; i < size; i++) { rl_htable.maps[i] = map_create(AVLMAP_SHARED); if (!rl_htable.maps[i]) { LM_ERR("cannot create map index %d\n", i); goto error; } rl_htable.size++; } if (!rl_default_algo_s.s) { LM_ERR("Default algorithm was not specified\n"); return -1; } /* resolve the default algorithm */ rl_default_algo = get_rl_algo(rl_default_algo_s); if (rl_default_algo < 0) { LM_ERR("unknown algoritm <%.*s>\n", rl_default_algo_s.len, rl_default_algo_s.s); return -1; } LM_DBG("default algorithm is %.*s [ %d ]\n", rl_default_algo_s.len, rl_default_algo_s.s, rl_default_algo); /* if at least 25% of the size locks can't be allocated * we return an error */ for ( i = size; i > size / 4; i--) { rl_htable.locks = lock_set_alloc(i); if (!rl_htable.locks) continue; if (!lock_set_init(rl_htable.locks)) { lock_set_dealloc(rl_htable.locks); rl_htable.locks = 0; continue; } break; } if (!rl_htable.locks) { LM_ERR("unable to allocted at least %d locks for the hash table\n", size/4); goto error; } rl_htable.locks_no = i; LM_DBG("%d locks allocated for %d hashsize\n", rl_htable.locks_no, rl_htable.size); return 0; error: mod_destroy(); return -1; }