/** call it before exiting; if show_status==1, mem status is displayed */ void destroy_memory(int show_status) { /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ #ifdef SHM_MEM if (show_status){ LOG(memlog, "Memory status (shm):\n"); //shm_status(); #ifndef SER_MOD_INTERFACE shm_sums(); #endif } /* zero all shmem alloc vars that we still use */ #ifdef WHARF #else shm_mem_destroy(); #endif #endif #ifdef PKG_MALLOC if (show_status){ LOG(memlog, "Memory status (pkg):\n"); //pkg_status(); #ifndef SER_MOD_INTERFACE pkg_sums(); #endif } #endif }
/** * Clean up on exit. This should be called before exiting. * \param show_status set to one to display the mem status */ void cleanup(int show_status) { LM_INFO("cleanup\n"); /*clean-up*/ /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ if (mem_lock) #ifdef HP_MALLOC { int i; for (i = 0; i < HP_HASH_SIZE; i++) shm_unlock(i); } #else shm_unlock(); #endif handle_ql_shutdown(); destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); destroy_stats_collector(); destroy_script_cb(); pv_free_extra_list(); destroy_argv_list(); destroy_black_lists(); #ifdef PKG_MALLOC if (show_status){ LM_GEN1(memdump, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM cleanup_debug(); if (pt) shm_free(pt); pt=0; if (show_status){ LM_GEN1(memdump, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }
/** * Clean up on exit. This should be called before exiting. * \param show_status set to one to display the mem status */ void cleanup(int show_status) { LM_INFO("cleanup\n"); /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ handle_ql_shutdown(); destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); destroy_stats_collector(); destroy_script_cb(); pv_free_extra_list(); destroy_argv_list(); destroy_black_lists(); #ifdef CHANGEABLE_DEBUG_LEVEL if (debug!=&debug_init) { reset_proc_debug_level(); debug_init = *debug; shm_free(debug); debug = &debug_init; } #endif #ifdef PKG_MALLOC if (show_status){ LM_GEN1(memdump, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM if (pt) shm_free(pt); pt=0; if (show_status){ LM_GEN1(memdump, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }
/* call it before exiting; if show_status==1, mem status is displayed */ void cleanup(show_status) { /*clean-up*/ if (mem_lock) shm_unlock(); /* hack: force-unlock the shared memory lock in case some process crashed and let it locked; this will allow an almost gracious shutdown */ destroy_modules(); #ifdef USE_TCP destroy_tcp(); #endif #ifdef USE_TLS destroy_tls(); #endif destroy_timer(); close_unixsock_server(); destroy_fifo(); destroy_script_cb(); #ifdef PKG_MALLOC if (show_status){ LOG(memlog, "Memory status (pkg):\n"); pkg_status(); } #endif #ifdef SHM_MEM if (pt) shm_free(pt); pt=0; if (show_status){ LOG(memlog, "Memory status (shm):\n"); shm_status(); } /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif if (pid_file) unlink(pid_file); if (pgid_file) unlink(pgid_file); }
int shm_getmem(void) { #ifdef SHM_MMAP int fd; #else struct shmid_ds shm_info; #endif #ifdef SHM_MMAP if (shm_mempool && (shm_mempool!=(void*)-1)){ #else if ((shm_shmid!=-1)||(shm_mempool!=(void*)-1)){ #endif LM_CRIT("shm already initialized\n"); return -1; } #ifdef SHM_MMAP #ifdef USE_ANON_MMAP shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1 ,0); #else fd=open("/dev/zero", O_RDWR); if (fd==-1){ LM_CRIT("could not open /dev/zero: %s\n", strerror(errno)); return -1; } shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd ,0); /* close /dev/zero */ close(fd); #endif /* USE_ANON_MMAP */ #else shm_shmid=shmget(IPC_PRIVATE, /* SHM_MEM_SIZE */ shm_mem_size , 0700); if (shm_shmid==-1){ LM_CRIT("could not allocate shared memory segment: %s\n", strerror(errno)); return -1; } shm_mempool=shmat(shm_shmid, 0, 0); #endif if (shm_mempool==(void*)-1){ LM_CRIT("could not attach shared memory segment: %s\n", strerror(errno)); /* destroy segment*/ shm_mem_destroy(); return -1; } return 0; } int shm_mem_init_mallocs(void* mempool, unsigned long pool_size) { /* init it for malloc*/ shm_block=shm_malloc_init(mempool, pool_size); if (shm_block==0){ LM_CRIT("could not initialize shared malloc\n"); shm_mem_destroy(); return -1; } mem_lock=shm_malloc_unsafe(sizeof(gen_lock_t)); /* skip lock_alloc, race cond*/ if (mem_lock==0){ LM_CRIT("could not allocate lock\n"); shm_mem_destroy(); return -1; } if (lock_init(mem_lock)==0){ LM_CRIT("could not initialize lock\n"); shm_mem_destroy(); return -1; } #ifdef STATISTICS if (event_shm_threshold) { event_shm_last=shm_malloc_unsafe(sizeof(long)); if (event_shm_last==0){ LM_CRIT("could not allocate shm last event indicator\n"); shm_mem_destroy(); return -1; } *event_shm_last=0; event_shm_pending=shm_malloc_unsafe(sizeof(int)); if (event_shm_pending==0){ LM_CRIT("could not allocate shm peinding flags\n"); shm_mem_destroy(); return -1; } *event_shm_pending=0; } #endif /* STATISTICS */ LM_DBG("success\n"); return 0; }
int shm_getmem(void) { #ifdef SHM_MMAP int fd; #else struct shmid_ds shm_info; #endif #ifdef SHM_MMAP if (shm_mempool && (shm_mempool!=(void*)-1)){ #else if ((shm_shmid!=-1)||(shm_mempool!=(void*)-1)){ #endif LM_CRIT("shm already initialized\n"); return -1; } #ifdef SHM_MMAP #ifdef USE_ANON_MMAP shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1 ,0); #else fd=open("/dev/zero", O_RDWR); if (fd==-1){ LM_CRIT("could not open /dev/zero: %s\n", strerror(errno)); return -1; } shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd ,0); /* close /dev/zero */ close(fd); #endif /* USE_ANON_MMAP */ #else shm_shmid=shmget(IPC_PRIVATE, /* SHM_MEM_SIZE */ shm_mem_size , 0700); if (shm_shmid==-1){ LM_CRIT("could not allocate shared memory segment: %s\n", strerror(errno)); return -1; } shm_mempool=shmat(shm_shmid, 0, 0); #endif if (shm_mempool==(void*)-1){ LM_CRIT("could not attach shared memory segment: %s\n", strerror(errno)); /* destroy segment*/ shm_mem_destroy(); return -1; } return 0; } int shm_mem_init_mallocs(void* mempool, unsigned long pool_size) { #ifdef HP_MALLOC int i; #endif /* init it for malloc*/ shm_block = shm_malloc_init(mempool, pool_size, "shm"); if (!shm_block){ LM_CRIT("could not initialize shared malloc\n"); shm_mem_destroy(); return -1; } #ifdef SHM_EXTRA_STATS int size_prealoc, j, one_full_entry, groups; char *start; if(mem_free_idx != 1){ #ifndef SHM_SHOW_DEFAULT_GROUP groups = mem_free_idx - 1; #else groups = mem_free_idx; #endif one_full_entry = 3 * (sizeof(stat_var) + sizeof(stat_val)); size_prealoc = groups * sizeof(struct module_info) + groups * one_full_entry; #ifndef DBG_MALLOC memory_mods_stats = MY_MALLOC_UNSAFE(shm_block, size_prealoc); #else memory_mods_stats = MY_MALLOC_UNSAFE(shm_block, size_prealoc, __FILE__, __FUNCTION__, __LINE__ ); #endif if(!memory_mods_stats){ LM_CRIT("could not alloc shared memory"); return -1; } memset( (void*)memory_mods_stats, 0, size_prealoc); start = (char*)memory_mods_stats + groups * sizeof(struct module_info); for(j = 0; j < groups; j++){ memory_mods_stats[j].fragments = (stat_var *)(start + j * one_full_entry); memory_mods_stats[j].memory_used = (stat_var *)(start + j * one_full_entry + sizeof(stat_var)); memory_mods_stats[j].real_used = (stat_var *)(start + j * one_full_entry + 2 * sizeof(stat_var)); memory_mods_stats[j].fragments->u.val = (stat_val*)(start + j * one_full_entry + 3 * sizeof(stat_var)); memory_mods_stats[j].memory_used->u.val = (stat_val*)(start + j * one_full_entry + 3 * sizeof(stat_var) + sizeof(stat_val)); memory_mods_stats[j].real_used->u.val = (stat_val*)(start + j * one_full_entry + 3 * sizeof(stat_var) + 2 * sizeof(stat_val)); } #ifndef SHM_SHOW_DEFAULT_GROUP if(core_index != 0){ update_stat(memory_mods_stats[core_index - 1].fragments, 1); update_stat(memory_mods_stats[core_index - 1].memory_used, size_prealoc); update_stat(memory_mods_stats[core_index - 1].real_used, size_prealoc + FRAG_OVERHEAD); } #else update_stat(memory_mods_stats[core_index].fragments, 1); update_stat(memory_mods_stats[core_index].memory_used, size_prealoc); update_stat(memory_mods_stats[core_index].real_used, size_prealoc + FRAG_OVERHEAD); #endif } #endif #ifdef HP_MALLOC /* lock_alloc cannot be used yet! */ mem_lock = shm_malloc_unsafe(HP_TOTAL_HASH_SIZE * sizeof *mem_lock); if (!mem_lock) { LM_CRIT("could not allocate the shm lock array\n"); shm_mem_destroy(); return -1; } for (i = 0; i < HP_TOTAL_HASH_SIZE; i++) if (!lock_init(&mem_lock[i])) { LM_CRIT("could not initialize lock\n"); shm_mem_destroy(); return -1; } mem_hash_usage = shm_malloc_unsafe(HP_TOTAL_HASH_SIZE * sizeof *mem_hash_usage); if (!mem_hash_usage) { LM_ERR("failed to allocate statistics array\n"); return -1; } memset(mem_hash_usage, 0, HP_TOTAL_HASH_SIZE * sizeof *mem_hash_usage); #else mem_lock = shm_malloc_unsafe(sizeof *mem_lock); if (!mem_lock) { LM_CRIT("could not allocate the shm lock\n"); shm_mem_destroy(); return -1; } if (!lock_init(mem_lock)) { LM_CRIT("could not initialize lock\n"); shm_mem_destroy(); return -1; } #endif #ifdef STATISTICS if (event_shm_threshold) { event_shm_last=shm_malloc_unsafe(sizeof(long)); if (event_shm_last==0){ LM_CRIT("could not allocate shm last event indicator\n"); shm_mem_destroy(); return -1; } *event_shm_last=0; event_shm_pending=shm_malloc_unsafe(sizeof(int)); if (event_shm_pending==0){ LM_CRIT("could not allocate shm pending flags\n"); shm_mem_destroy(); return -1; } *event_shm_pending=0; } #endif /* STATISTICS */ LM_DBG("success\n"); return 0; }
int shm_getmem() { #ifdef SHM_MMAP #ifndef USE_ANON_MMAP int fd; #endif #else struct shmid_ds shm_info; #endif #ifdef SHM_MMAP if (shm_mempool && (shm_mempool!=(void*)-1)){ #else if ((shm_shmid!=-1)||(shm_mempool!=(void*)-1)){ #endif LOG(L_CRIT, "BUG: shm_mem_init: shm already initialized\n"); return -1; } #ifdef SHM_MMAP #ifdef USE_ANON_MMAP shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1 ,0); #else fd=open("/dev/zero", O_RDWR); if (fd==-1){ LOG(L_CRIT, "ERROR: shm_mem_init: could not open /dev/zero: %s\n", strerror(errno)); return -1; } shm_mempool=mmap(0, shm_mem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd ,0); /* close /dev/zero */ close(fd); #endif /* USE_ANON_MMAP */ #else shm_shmid=shmget(IPC_PRIVATE, /* SHM_MEM_SIZE */ shm_mem_size , 0700); if (shm_shmid==-1){ LOG(L_CRIT, "ERROR: shm_mem_init: could not allocate shared memory" " segment: %s\n", strerror(errno)); return -1; } shm_mempool=shmat(shm_shmid, 0, 0); #endif if (shm_mempool==(void*)-1){ LOG(L_CRIT, "ERROR: shm_mem_init: could not attach shared memory" " segment: %s\n", strerror(errno)); /* destroy segment*/ shm_mem_destroy(); return -1; } return 0; } int shm_mem_init_mallocs(void* mempool, unsigned long pool_size) { /* init it for malloc*/ shm_block=shm_malloc_init(mempool, pool_size); if (shm_block==0){ LOG(L_CRIT, "ERROR: shm_mem_init: could not initialize shared" " malloc\n"); shm_mem_destroy(); return -1; } mem_lock=shm_malloc_unsafe(sizeof(gen_lock_t)); /* skip lock_alloc, race cond*/ if (mem_lock==0){ LOG(L_CRIT, "ERROR: shm_mem_init: could not allocate lock\n"); shm_mem_destroy(); return -1; } if (lock_init(mem_lock)==0){ LOG(L_CRIT, "ERROR: shm_mem_init: could not initialize lock\n"); shm_mem_destroy(); return -1; } DBG("shm_mem_init: success\n"); return 0; }
/** * Shutdown the CDiameterPeer nicely. * It stops the workers, disconnects peers, drops timers and wait for all processes to exit. */ void diameter_peer_destroy() { int pid,status; handler *h; lock_get(shutdownx_lock); if (*shutdownx) { /* already other process is cleaning stuff */ lock_release(shutdownx_lock); return; }else { /* indicating that we are shuting down */ *shutdownx = 1; lock_release(shutdownx_lock); } worker_poison_queue(); /* wait for all childs to clean up nicely (acceptor, receiver, timer, workers) */ LOG(L_INFO,"INFO:destroy_diameter_peer(): Terminating all childs...\n"); while(pid_list->tail){ pid = dp_last_pid(); if (pid<=0||pid==getpid()){ dp_del_pid(pid); continue; } LOG(L_INFO,"INFO:destroy_diameter_peer(): Waiting for child [%d] to terminate...\n",pid); if (waitpid(pid,&status,0)<0){ dp_del_pid(pid); continue; } if (!WIFEXITED(status) /*|| WIFSIGNALED(status)*/){ worker_poison_queue(); sleep(1); } else { dp_del_pid(pid); } } LOG(L_INFO,"INFO:destroy_diameter_peer(): All processes terminated. Cleaning up.\n"); /* clean upt the timer */ timer_cdp_destroy(); /* cleaning up workers */ worker_destroy(); /* cleaning peer_manager */ peer_manager_destroy(); /* cleaning up sessions */ session_destroy(); /* cleaning up global vars */ /* lock_get(pid_list_lock);*/ shm_free(dp_first_pid); shm_free(pid_list); lock_destroy(pid_list_lock); lock_dealloc((void*)pid_list_lock); shm_free(shutdownx); lock_destroy(shutdownx_lock); lock_dealloc((void*)shutdownx_lock); lock_get(handlers_lock); while(handlers->head){ h = handlers->head->next; shm_free(handlers->head); handlers->head = h; } lock_destroy(handlers_lock); lock_dealloc((void*)handlers_lock); shm_free(handlers); free_dp_config(config); LOG(L_CRIT,"INFO:destroy_diameter_peer(): Bye Bye from C Diameter Peer test\n"); #ifndef CDP_FOR_SER #ifdef PKG_MALLOC LOG(memlog, "Memory status (pkg):\n"); //pkg_status(); #ifdef pkg_sums pkg_sums(); #endif #endif #ifdef SHM_MEM LOG(memlog, "Memory status (shm):\n"); //shm_status(); #ifdef shm_sums shm_sums(); #endif /* zero all shmem alloc vars that we still use */ shm_mem_destroy(); #endif #endif }