void knet_vrouter_destroy(kvrouter_t* router) { verify(router); verify(router->table); verify(router->lock); hash_destroy(router->table); lock_destroy(router->lock); destroy(router); }
/* destroy and releases all TCP conns * this function must be called only at shutdown, in a single thread env.*/ void destroy_tcp_conns(void) { struct tcp_conn *conn; struct tcp_conn *cnext; int h; for( h=0 ; h<TCP_HASH_SIZE ; h++) { for( conn=hash_id_conns[h]; conn ; conn=cnext ) { cnext = conn->id_next; /* free the connection */ free_tcp_conn(conn); } } lock_destroy(&hash_id_lock); lock_destroy(&hash_ip_lock); }
static void destroy_shmlock(void) { if (lock) { lock_destroy(lock); lock_dealloc((void *)lock); lock = NULL; } }
void logger_destroy(klogger_t* logger) { verify(logger); if (logger->fd) { fclose(logger->fd); } lock_destroy(logger->lock); destroy(logger); }
void free_saved_transaction_global_data(saved_transaction_t* data) { if (!data) return; lock_dealloc(data->lock); lock_destroy(data->lock); shm_free(data); }
static void destroy_shmlock(void) { if (conf_lock) { lock_destroy(conf_lock); lock_dealloc((void *)conf_lock); conf_lock = NULL; } }
void tls_server_destroy(tls_server_t *server) { lock_destroy(&server->lock); gnutls_dh_params_deinit(server->dh_params); gnutls_certificate_free_credentials(server->cred); tls_server_cache_destroy(&server->cache); memset(server, 0, sizeof(tls_server_t)); }
void whalemating_cleanup() { sem_destroy(male_sem); sem_destroy(female_sem); lock_destroy(hold); cv_destroy(mate_cv); return; }
void logger_destroy(logger_t* logger) { assert(logger); if (logger->fd) { fclose(logger->fd); } lock_destroy(logger->lock); destroy(logger); }
/* * Destroy an abstract vnode. * Invoked by VOP_KILL. */ void vnode_kill(struct vnode *vn) { assert(vn->vn_refcount==1); assert(vn->vn_opencount==0); assert(vn->vn_countlock!=NULL); lock_destroy(vn->vn_countlock); lock_destroy(vn->vn_dirlock); vn->vn_ops = NULL; vn->vn_refcount = 0; vn->vn_opencount = 0; vn->vn_countlock = NULL; vn->vn_dirlock = NULL; vn->vn_fs = NULL; vn->vn_data = NULL; }
inline void destroy_seas_stats_table(void) { /*deallocs the table*/ if(seas_stats_table){ lock_destroy(seas_stats_table->mutex); shm_free(seas_stats_table); seas_stats_table=(struct statstable *)0; } }
void as_destroy(struct addrspace *as) { KASSERT(as != NULL); struct page_table_entry* cur = as->as_page_list; struct page_table_entry* next = NULL; if(use_big_lock == true && swapping_started == true) lock_acquire(as_lock); else if(use_small_lock == true && swapping_started == true) lock_acquire(as->as_lock); // spinlock_acquire(as->as_splock); // kprintf("as_destory called \n"); while(cur != NULL) { next = cur->next; KASSERT(cur->page_state != SWAPPING); while(cur->page_state == SWAPPING) { // if(spinlock_do_i_hold(as->as_splock)) // spinlock_release(as->as_splock); thread_yield(); } // if(!spinlock_do_i_hold(as->as_splock)) // spinlock_acquire(as->as_splock); bool is_swapped = false; if(cur->page_state == SWAPPED) { // kprintf("as destroy , swapped is true \n"); is_swapped = true; } // kprintf("calling free user page \n"); free_user_page(cur->vaddr,cur->paddr,as, false, is_swapped, cur->swap_pos); // kprintf("got out of free user page \n"); kfree(cur); cur = next; } // page_list_delete(&(as->as_page_list)); as->as_page_list = NULL; //this is straight forward though. region_list_delete(&(as->as_region_list)); as->as_region_list = NULL; if(use_big_lock == true && swapping_started == true) lock_release(as_lock); else if(use_small_lock == true && swapping_started == true) { lock_release(as->as_lock); lock_destroy(as->as_lock); } // spinlock_release(as->as_splock); // spinlock_cleanup(as->as_splock); // kfree(as->as_splock); kfree(as); }
/** * Destroys the session related structures. */ int session_destroy() { lock_get(session_lock); lock_destroy(session_lock); lock_dealloc((void*)session_lock); shm_free(session_id1); shm_free(session_id2); return 1; }
void rw_destroy(struct rwlock *rw) { KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); rw->rw_lock = RW_DESTROYED; lock_destroy(&rw->lock_object); }
void sx_destroy(struct sx *sx) { KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); sx->sx_lock = SX_LOCK_DESTROYED; lock_destroy(&sx->lock_object); }
/*! * \brief Destroy a dialog profile list * \param profile dialog profile */ static void destroy_dlg_profile(struct dlg_profile_table *profile) { if (profile==NULL) return; lock_destroy( &profile->lock ); shm_free( profile ); return; }
static void destroy_lock(void) { if (thread_lock != NULL) { lock_destroy(thread_lock); thread_lock = NULL; } }
void destroy_kill() { /* if disabled ... */ if (time_to_kill==0) return; lock_destroy(kill_lock); lock_dealloc(kill_lock); return; }
static void delete_game_server(void* gam) { server_game* g = (server_game*)gam; lock_destroy(g); mfree(g->name); mfree(g->internal_ip); mfree(g); }
void pdt_free_hash(pdt_hash_t* hash) { free_hash(hash->dhash, hash->hash_size); lock_destroy(&hash->diff_lock); /* todo: destroy diff list */ shm_free(hash); }
void wsconn_destroy(void) { int h; if (wsconn_used_list) { shm_free(wsconn_used_list); wsconn_used_list = NULL; } if (wsconn_id_hash) { WSCONN_UNLOCK; WSCONN_LOCK; for (h = 0; h < TCP_ID_HASH_SIZE; h++) { ws_connection_t *wsc = wsconn_id_hash[h]; while (wsc) { ws_connection_t *next = wsc->id_next; _wsconn_rm(wsc); wsc = next; } } WSCONN_UNLOCK; shm_free(wsconn_id_hash); wsconn_id_hash = NULL; } if (wsconn_lock) { lock_destroy(wsconn_lock); lock_dealloc((void *) wsconn_lock); wsconn_lock = NULL; } if (wsstat_lock) { lock_destroy(wsstat_lock); lock_dealloc((void *) wsstat_lock); wsstat_lock = NULL; } }
shtable_t new_shtable(int hash_size) { shtable_t htable= NULL; int i, j; i = 0; htable= (subs_entry_t*)shm_malloc(hash_size* sizeof(subs_entry_t)); if(htable== NULL) { ERR_MEM(SHARE_MEM); } memset(htable, 0, hash_size* sizeof(subs_entry_t)); for(i= 0; i< hash_size; i++) { if(lock_init(&htable[i].lock)== 0) { LM_ERR("initializing lock [%d]\n", i); goto error; } htable[i].entries= (subs_t*)shm_malloc(sizeof(subs_t)); if(htable[i].entries== NULL) { lock_destroy(&htable[i].lock); ERR_MEM(SHARE_MEM); } memset(htable[i].entries, 0, sizeof(subs_t)); htable[i].entries->next= NULL; } return htable; error: if(htable) { for(j=0; j< i; j++) { lock_destroy(&htable[j].lock); shm_free(htable[j].entries); } shm_free(htable); } return NULL; }
/* unsafe tcpconn_rm version (nolocks) */ void _tcpconn_rm(struct tcp_connection* c) { tcpconn_listrm(tcpconn_addr_hash[c->addr_hash], c, next, prev); tcpconn_listrm(tcpconn_id_hash[c->id_hash], c, id_next, id_prev); lock_destroy(&c->write_lock); #ifdef USE_TLS if (c->type==PROTO_TLS) tls_tcpconn_clean(c); #endif shm_free(c); }
/*! * \brief Destroy global ro_session timer */ void destroy_ro_timer(void) { if (roi_timer == 0) return; lock_destroy(roi_timer->lock); lock_dealloc(roi_timer->lock); shm_free(roi_timer); roi_timer = 0; }
void free_hash_list(hash_list_t* hl) { if(hl==NULL) return; if(hl->hash!=NULL) free_hash(hl->hash); lock_destroy(&hl->hl_lock); shm_free(hl); }
void ip_set_list_free() { int i; if (!ip_set_list) return; for (i=0; i<ip_set_list_count; i++) { lock_destroy(&ip_set_list[i].read_lock); lock_destroy(&ip_set_list[i].write_lock); if (ip_set_list[i].ip_set) { if (atomic_dec_and_test(&ip_set_list[i].ip_set->refcnt)) { /* do not destroy cloned sets because if they can live only in local copy after commit, they must be deleted separately in local copy before this procedure is called*/ ip_set_destroy(&ip_set_list[i].ip_set->ip_set); shm_free(ip_set_list[i].ip_set); } } ip_set_destroy(&ip_set_list[i].ip_set_pending); } shm_free(ip_set_list); ip_set_list = NULL; }
/** * Frees the memory taken by a peer structure. * @param x - the peer to free * @param locked - if the caller of this function already acquired the lock on this peer */ void free_peer(peer *x,int locked) { if (!x) return; if (!locked) lock_get(x->lock); if (x->fqdn.s) shm_free(x->fqdn.s); if (x->realm.s) shm_free(x->realm.s); lock_destroy(x->lock); lock_dealloc((void*)x->lock); shm_free(x); }
/* * Deallocate a remote context */ VOID remote_deallocate( Remote * remote ) { if( remote->fd ) closesocket( remote->fd ); if( remote->lock ) lock_destroy( remote->lock ); free(remote); }
int dbt_cache_destroy(void) { int i; dbt_cache_p _dc=NULL, _dc0=NULL; dbt_table_p _tbc = NULL; dbt_table_p _tbc0 = NULL; if(!_dbt_cachesem) return -1; lock_get(_dbt_cachesem); if( _dbt_cachedb!=NULL ) { _dc = *_dbt_cachedb; while(_dc) { _dc0 = _dc; _dc = _dc->next; shm_free(_dc0->name.s); shm_free(_dc0); } shm_free(_dbt_cachedb); } lock_destroy(_dbt_cachesem); lock_dealloc(_dbt_cachesem); /* destroy tables' hash table*/ if(_dbt_cachetbl==0) return 0; for(i=0; i<DBT_CACHETBL_SIZE; i++) { lock_destroy(&_dbt_cachetbl[i].sem); _tbc = _dbt_cachetbl[i].dtp; while(_tbc) { _tbc0 = _tbc; _tbc = _tbc->next; dbt_table_free(_tbc0); } } shm_free(_dbt_cachetbl); return 0; }
void bitmap_ts_destroy(struct bitmap_ts* b) { assert(b != NULL); lock_acquire(b->lk); bitmap_destroy(b->bm); cv_destroy(b->cv); lock_release(b->lk); lock_destroy(b->lk); kfree(b); }