static void event_free(ACL_EVENT *eventp) { const char *myname = "event_free"; EVENT_SELECT_THR *event_thr = (EVENT_SELECT_THR *) eventp; if (eventp == NULL) acl_msg_fatal("%s, %s(%d): eventp null", __FILE__, myname, __LINE__); LOCK_DESTROY(&event_thr->event.tm_mutex); LOCK_DESTROY(&event_thr->event.tb_mutex); acl_myfree(eventp); }
void booster_fdtable_destroy (booster_fdtable_t *fdtable) { fd_t *fd = NULL; fd_t **fds = NULL; uint fd_count = 0; int i = 0; if (!fdtable) return; LOCK (&fdtable->lock); { fds = __booster_fdtable_get_all_fds (fdtable, &fd_count); FREE (fdtable->fds); } UNLOCK (&fdtable->lock); if (!fds) goto free_table; for (i = 0; i < fd_count; i++) { fd = fds[i]; if (fd != NULL) fd_unref (fd); } FREE (fds); free_table: LOCK_DESTROY (&fdtable->lock); FREE (fdtable); }
void gf_client_clienttable_destroy (clienttable_t *clienttable) { client_t *client = NULL; cliententry_t *cliententries = NULL; uint32_t client_count = 0; int32_t i = 0; if (!clienttable) { gf_log_callingfn ("client_t", GF_LOG_WARNING, "!clienttable"); return; } LOCK (&clienttable->lock); { client_count = clienttable->max_clients; clienttable->max_clients = 0; cliententries = clienttable->cliententries; clienttable->cliententries = NULL; } UNLOCK (&clienttable->lock); if (cliententries != NULL) { for (i = 0; i < client_count; i++) { client = cliententries[i].client; if (client != NULL) { gf_client_unref (client); } } GF_FREE (cliententries); LOCK_DESTROY (&clienttable->lock); GF_FREE (clienttable); } }
/* This function is the destructor for the event_pool data structure * Should be called only after poller_threads_destroy() is called, * else will lead to crashes. */ static int event_pool_destroy_epoll (struct event_pool *event_pool) { int ret = 0, i = 0, j = 0; struct event_slot_epoll *table = NULL; ret = close (event_pool->fd); for (i = 0; i < EVENT_EPOLL_TABLES; i++) { if (event_pool->ereg[i]) { table = event_pool->ereg[i]; event_pool->ereg[i] = NULL; for (j = 0; j < EVENT_EPOLL_SLOTS; j++) { LOCK_DESTROY (&table[j].lock); } GF_FREE (table); } } pthread_mutex_destroy (&event_pool->mutex); pthread_cond_destroy (&event_pool->cond); GF_FREE (event_pool->evcache); GF_FREE (event_pool->reg); GF_FREE (event_pool); return ret; }
void cevents_destroy(cevents *cevts) { if(cevts == NULL) return; if(cevts->events != NULL) jfree(cevts->events); if(cevts->fired != NULL) jfree(cevts->fired); if(cevts->fired_queue != NULL) clist_destroy(cevts->fired_queue); LOCK_DESTROY(&cevts->lock); LOCK_DESTROY(&cevts->qlock); cevts->events = NULL; cevts->fired = NULL; cevts->fired_queue = NULL; cevents_destroy_priv_impl(cevts); jfree(cevts); }
void hash_free(hash_type *hash) { uint32_t i; for (i=0; i < hash->size; i++) hash_sll_free(hash->table[i]); free(hash->table); LOCK_DESTROY( &hash->rwlock ); free(hash); }
void mem_pool_destroy (struct mem_pool *pool) { if (!pool) return; LOCK_DESTROY (&pool->lock); GF_FREE (pool->pool); GF_FREE (pool); return; }
void cevents_destroy(cevents *cevts) { if(cevts == NULL) return; if(cevts->events != NULL) { for(size_t i = 0; i < MAX_EVENTS; i++) { cevents_clear_fired_events(cevts, i); clist_destroy(cevts->events[i].fired_queue); } jfree(cevts->events); } if(cevts->fired_fds != NULL) clist_destroy(cevts->fired_fds); if(cevts->timers != NULL) ctimer_base_destroy(cevts->timers); LOCK_DESTROY(&cevts->lock); LOCK_DESTROY(&cevts->qlock); cevts->events = NULL; cevts->fired_fds = NULL; cevts->timers = NULL; //cevts->fired_queue = NULL; cevents_destroy_priv_impl(cevts); jfree(cevts); }
void rbthash_table_destroy_buckets (rbthash_table_t *tbl) { int x = 0; if (!tbl) return; for (;x < tbl->numbuckets; x++) { LOCK_DESTROY (&tbl->buckets[x].bucketlock); rb_destroy (tbl->buckets[x].bucket, rbthash_entry_deiniter); } return; }
static int helper_xlator_destroy(xlator_t *xl) { int i, ret; for (i = 0; i < xl->mem_acct.num_types; i++) { ret = LOCK_DESTROY(&(xl->mem_acct.rec[i].lock)); assert_int_equal(ret, 0); } free(xl->mem_acct.rec); free(xl->ctx); free(xl); return 0; }
int sbuf_close(struct buffer_entity *be, void *stats) { (void)stats; struct simplebuf *sbuf = (struct simplebuf *)be->opt; D("Closing simplebuf for id %d ", sbuf->bufnum); LOCK_DESTROY(be->headlock); LOCK_FREE(be->headlock); free(be->iosignal); //int ret = 0; /* if(be->recer->close != NULL) be->recer->close(be->recer, stats); */ if (!(sbuf->optbits & USE_RX_RING)) { #if(HAVE_HUGEPAGES) if (sbuf->optbits & USE_HUGEPAGE) { //munmap(sbuf->buffer, sbuf->opt->packet_size*sbuf->opt->buf_num_elems); #ifdef MMAP_NOT_SHMGET munmap(be->buffer, sbuf->opt->filesize); #else char shmstring[FILENAME_MAX]; sprintf(shmstring, "%s%03d", SHMIDENT, sbuf->bufnum); shm_unlink(shmstring); close(sbuf->shmid); #endif } else #endif /* HAVE_HUGEPAGES */ free(be->buffer); } else D("Not freeing mem. Done in main"); if (pthread_mutex_destroy(&(be->self->waitlock)) != 0) E("Error in waitlock destroy"); if (pthread_cond_destroy(&(be->self->waitsig)) != 0) E("Error in waitsig destroy"); D("Freeing structs"); free(sbuf); //free(be); //free(be->recer); D("Simplebuf closed"); return 0; }
static int tegra_rtc_detach(device_t dev) { struct tegra_rtc_softc *sc; sc = device_get_softc(dev); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (bus_generic_detach(dev)); }
void mem_pool_destroy (struct mem_pool *pool) { if (!pool) return; gf_log (THIS->name, GF_LOG_INFO, "size=%lu max=%d total=%"PRIu64, pool->padded_sizeof_type, pool->max_alloc, pool->alloc_count); list_del (&pool->global_list); LOCK_DESTROY (&pool->lock); GF_FREE (pool->name); GF_FREE (pool->pool); GF_FREE (pool); return; }
/* destroy a worker */ void socket_worker_destroy(socket_worker_t * worker) { uint32_t was_stopping = RELAY_ATOMIC_OR(worker->base.stopping, WORKER_STOPPING); /* Avoid race between worker_pool_reload_static and worker_pool_destroy_static(). * * TODO: Another possible solution for this race could be a destructor thread * that waits on a semaphore and then destroys all. Possible flaw: what is * a thread doesn't decrement the semaphore? * * Note that similar solution is used also by the graphite worker. */ if (was_stopping & WORKER_STOPPING) return; pthread_join(worker->base.tid, NULL); LOCK_DESTROY(&worker->lock); free(worker->base.arg); free(worker); }
static int tegra_xhci_detach(device_t dev) { struct tegra_xhci_softc *sc; struct xhci_softc *xsc; sc = device_get_softc(dev); xsc = &sc->xhci_softc; /* during module unload there are lots of children leftover */ device_delete_children(dev); if (sc->xhci_inited) { usb_callout_drain(&xsc->sc_callout); xhci_halt_controller(xsc); } if (xsc->sc_irq_res && xsc->sc_intr_hdl) { bus_teardown_intr(dev, xsc->sc_irq_res, xsc->sc_intr_hdl); xsc->sc_intr_hdl = NULL; } if (xsc->sc_irq_res) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(xsc->sc_irq_res), xsc->sc_irq_res); xsc->sc_irq_res = NULL; } if (xsc->sc_io_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(xsc->sc_io_res), xsc->sc_io_res); xsc->sc_io_res = NULL; } if (sc->xhci_inited) xhci_uninit(xsc); if (sc->irq_hdl_mbox != NULL) bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox); if (sc->fw_vaddr != 0) kmem_free(kernel_arena, sc->fw_vaddr, sc->fw_size); LOCK_DESTROY(sc); return (0); }
static int tegra_rtc_attach(device_t dev) { int rv, rid; struct tegra_rtc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* OFW resources. */ rv = clk_get_by_ofw_index(dev, 0, 0, &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ WR4(sc, RTC_SECONDS_ALARM0, 0); WR4(sc, RTC_SECONDS_ALARM1, 0); WR4(sc, RTC_INTR_STATUS, 0xFFFFFFFF); WR4(sc, RTC_INTR_MASK, 0); /* Setup interrupt */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_rtc_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* * Register as a time of day clock with 1-second resolution. * * XXXX Not yet, we don't have support for multiple RTCs */ /* clock_register(dev, 1000000); */ return (bus_generic_attach(dev)); fail: if (sc->clk != NULL) clk_release(sc->clk); if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); }
static void * gf_timer_proc (void *data) { gf_timer_registry_t *reg = data; const struct timespec sleepts = {.tv_sec = 1, .tv_nsec = 0, }; gf_timer_t *event = NULL; xlator_t *old_THIS = NULL; while (!reg->fin) { uint64_t now; struct timespec now_ts; timespec_now (&now_ts); now = TS (now_ts); while (1) { uint64_t at; char need_cbk = 0; LOCK (®->lock); { event = reg->active.next; at = TS (event->at); if (event != ®->active && now >= at) { need_cbk = 1; event->next->prev = event->prev; event->prev->next = event->next; event->fired = _gf_true; } } UNLOCK (®->lock); if (need_cbk) { old_THIS = NULL; if (event->xl) { old_THIS = THIS; THIS = event->xl; } event->callbk (event->data); GF_FREE (event); if (old_THIS) { THIS = old_THIS; } } else { break; } } nanosleep (&sleepts, NULL); } LOCK (®->lock); { /* Do not call gf_timer_call_cancel(), * it will lead to deadlock */ while (reg->active.next != ®->active) { event = reg->active.next; /* cannot call list_del as the event doesnt have * list_head*/ __delete_entry (event); } } UNLOCK (®->lock); LOCK_DESTROY (®->lock); return NULL; } static gf_timer_registry_t * gf_timer_registry_init (glusterfs_ctx_t *ctx) { gf_timer_registry_t *reg = NULL; if (ctx == NULL) { gf_msg_callingfn ("timer", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, "invalid argument"); return NULL; } if (ctx->cleanup_started) { gf_msg_callingfn ("timer", GF_LOG_INFO, 0, LG_MSG_CTX_CLEANUP_STARTED, "ctx cleanup started"); return NULL; } LOCK (&ctx->lock); { reg = ctx->timer; } UNLOCK (&ctx->lock); if (!reg) { reg = GF_CALLOC (1, sizeof (*reg), gf_common_mt_gf_timer_registry_t); if (!reg) return NULL; LOCK_INIT (®->lock); reg->active.next = ®->active; reg->active.prev = ®->active; LOCK (&ctx->lock); { ctx->timer = reg; } UNLOCK (&ctx->lock); gf_thread_create (®->th, NULL, gf_timer_proc, reg); } return reg; }
/* Cleanup worker data */ void SNetWorkerCleanup(void) { snet_worker_count = 0; snet_workers = NULL; LOCK_DESTROY(snet_idle_lock); }
static int tegra_i2c_attach(device_t dev) { int rv, rid; phandle_t node; struct tegra_i2c_softc *sc; uint64_t freq; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); LOCK_INIT(sc); /* Get the memory resource for the register mapping. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot map registers.\n"); rv = ENXIO; goto fail; } /* Allocate our IRQ resource. */ rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Cannot allocate interrupt.\n"); rv = ENXIO; goto fail; } /* FDT resources. */ rv = clk_get_by_ofw_name(dev, 0, "div-clk", &sc->clk); if (rv != 0) { device_printf(dev, "Cannot get i2c clock: %d\n", rv); goto fail; } rv = hwreset_get_by_ofw_name(sc->dev, 0, "i2c", &sc->reset); if (rv != 0) { device_printf(sc->dev, "Cannot get i2c reset\n"); return (ENXIO); } rv = OF_getencprop(node, "clock-frequency", &sc->bus_freq, sizeof(sc->bus_freq)); if (rv != sizeof(sc->bus_freq)) { sc->bus_freq = 100000; goto fail; } /* Request maximum frequency for I2C block 136MHz (408MHz / 3). */ rv = clk_set_freq(sc->clk, 136000000, CLK_SET_ROUND_DOWN); if (rv != 0) { device_printf(dev, "Cannot set clock frequency\n"); goto fail; } rv = clk_get_freq(sc->clk, &freq); if (rv != 0) { device_printf(dev, "Cannot get clock frequency\n"); goto fail; } sc->core_freq = (uint32_t)freq; rv = clk_enable(sc->clk); if (rv != 0) { device_printf(dev, "Cannot enable clock: %d\n", rv); goto fail; } /* Init hardware. */ rv = tegra_i2c_hw_init(sc); if (rv) { device_printf(dev, "tegra_i2c_activate failed\n"); goto fail; } /* Setup interrupt. */ rv = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, tegra_i2c_intr, sc, &sc->irq_h); if (rv) { device_printf(dev, "Cannot setup interrupt.\n"); goto fail; } /* Attach the iicbus. */ sc->iicbus = device_add_child(dev, "iicbus", -1); if (sc->iicbus == NULL) { device_printf(dev, "Could not allocate iicbus instance.\n"); rv = ENXIO; goto fail; } /* Probe and attach the iicbus. */ return (bus_generic_attach(dev)); fail: if (sc->irq_h != NULL) bus_teardown_intr(dev, sc->irq_res, sc->irq_h); if (sc->irq_res != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res); LOCK_DESTROY(sc); return (rv); }