/** * Create a new header object. */ header_t * header_make(void) { header_t *o; WALLOC0(o); o->magic = HEADER_MAGIC; o->refcnt = 1; return o; }
/** * Allocates a new search_table_t. * Use st_free() to free it. */ search_table_t * st_create(void) { search_table_t *table; WALLOC0(table); table->magic = SEARCH_TABLE_MAGIC; st_initialize(table); st_recreate(table); return table; }
/** * Create a new empty header field, whose name is `name'. * A private copy of `name' is done. */ static header_field_t * hfield_make(const char *name) { header_field_t *h; WALLOC0(h); h->magic = HEADER_FIELD_MAGIC; h->name = h_strdup(name); return h; }
/** * Allocate new publisher entry. */ static struct publisher_entry * publisher_entry_alloc(const sha1_t *sha1) { struct publisher_entry *pe; WALLOC0(pe); pe->magic = PUBLISHER_MAGIC; pe->sha1 = atom_sha1_get(sha1); return pe; }
/** * Allocate output stream descriptor of the specified type. */ static ostream_t * ostream_alloc(enum ostream_type type) { ostream_t *os; WALLOC0(os); os->magic = OSTREAM_MAGIC; os->type = type; return os; }
/** * Allocate a new XML node. */ static xnode_t * xnode_new(xnode_type_t type) { xnode_t *xn; WALLOC0(xn); xn->magic = XNODE_MAGIC; xn->type = type; return xn; }
static struct uhc * uhc_new(const char *host) { struct uhc *uhc; g_assert(host != NULL); WALLOC0(uhc); uhc->host = atom_str_get(host); return uhc; }
/** * Create a new IP range database. */ struct iprange_db * iprange_new(void) { struct iprange_db *idb; WALLOC0(idb); idb->magic = IPRANGE_DB_MAGIC; iprange_reset_ipv4(idb); iprange_reset_ipv6(idb); return idb; }
/** * Initialize the LRU page cache with default values. */ void lru_init(DBM *db) { struct lru_cache *cache; g_assert(NULL == db->cache); g_assert(-1 == db->pagbno); /* We must be called before first access */ WALLOC0(cache); if (-1 == setup_cache(cache, LRU_PAGES, FALSE)) g_error("out of virtual memory"); db->cache = cache; }
/** * Allocate keyinfo. * * @param kuid the key's KUID * @param common common bits with our KUID */ static struct keyinfo * allocate_keyinfo(const kuid_t *kuid, size_t common) { struct keyinfo *ki; WALLOC0(ki); ki->magic = KEYINFO_MAGIC; ki->kuid = kuid_get_atom(kuid); ki->common_bits = common & 0xff; return ki; }
/** * Initialize the browse host context. */ struct browse_ctx * browse_host_dl_create(gpointer owner, gnet_host_t *host, gnet_search_t sh) { struct browse_ctx *bc; WALLOC0(bc); bc->owner = owner; gnet_host_copy(&bc->host, host); bc->sh = sh; return bc; }
/** * Flush current /QH2. * * Depending how the QH2 builder is configured, this either sends the message * to the target node or invokes a processing callback. */ static void g2_build_qh2_flush(struct g2_qh2_builder *ctx) { pmsg_t *mb; g_assert(ctx != NULL); g_assert(ctx->t != NULL); g_assert((ctx->n != NULL) ^ (ctx->cb != NULL)); /* * Restore the order of children in the root packet to be the order we * used when we added the nodes, since we prepend new children. */ g2_tree_reverse_children(ctx->t); /* * If sending over UDP, ask for reliable delivery of the query hit. * To be able to monitor the fate of the message, we asssociate a free * routine to it. */ if (ctx->to_udp) { struct g2_qh2_pmsg_info *pmi; WALLOC0(pmi); pmi->magic = G2_QH2_PMI_MAGIC; pmi->hub_id = nid_ref(NODE_ID(ctx->hub)); mb = g2_build_pmsg_extended(ctx->t, g2_qh2_pmsg_free, pmi); pmsg_mark_reliable(mb); } else { mb = g2_build_pmsg(ctx->t); } if (GNET_PROPERTY(g2_debug) > 3) { g_debug("%s(): flushing the following hit for " "Q2 #%s to %s%s (%d bytes):", G_STRFUNC, guid_hex_str(ctx->muid), NULL == ctx->n ? stacktrace_function_name(ctx->cb) : node_infostr(ctx->n), NULL == ctx->n ? "()" : "", pmsg_size(mb)); g2_tfmt_tree_dump(ctx->t, stderr, G2FMT_O_PAYLOAD | G2FMT_O_PAYLEN); } if (ctx->n != NULL) g2_node_send(ctx->n, mb); else (*ctx->cb)(mb, ctx->arg); ctx->messages++; ctx->current_size = 0; g2_tree_free_null(&ctx->t); }
/** * Allocate a new whitelist entry containing an explicit address. */ static struct whitelist * whitelist_addr_create(bool use_tls, host_addr_t addr, uint16 port, uint8 bits) { struct whitelist *item; WALLOC0(item); item->use_tls = use_tls; item->addr = addr; item->port = port; item->bits = bits ? bits : addr_default_mask(addr); return item; }
/** * Create new record for query hits for specified MUID. * New record is registered in the current table. */ static dqhit_t * dh_create(const struct guid *muid) { dqhit_t *dh; const struct guid *key; WALLOC0(dh); key = atom_guid_get(muid); htable_insert(by_muid, key, dh); return dh; }
/** * Create a new GHC. */ static struct ghc * ghc_new(const char *url) { struct ghc *ghc; g_assert(url != NULL); WALLOC0(ghc); ghc->url = atom_str_get(url); ghc->stamp = 0; ghc->used = 0; return ghc; }
/** * Allocate waiting event. * * @param key waiting key * @param cb callback to trigger * @param arg additional callback argument */ static wq_event_t * wq_event_alloc(const void *key, wq_callback_t cb, void *arg) { wq_event_t *we; g_assert(cb != NULL); WALLOC0(we); we->magic = WQ_EVENT_MAGIC; we->key = key; we->cb = cb; we->arg = arg; return we; }
/** * Create a new LRU cache. * @return -1 with errno set on error, 0 if OK. */ static int init_cache(DBM *db, long pages, gboolean wdelay) { struct lru_cache *cache; g_assert(NULL == db->cache); WALLOC0(cache); if (-1 == setup_cache(cache, pages, wdelay)) { WFREE(cache); return -1; } db->cache = cache; return 0; }
/** * Create a new hash set iterator. */ hikset_iter_t * hikset_iter_new(const hikset_t *hx) { hikset_iter_t *hxi; hikset_check(hx); WALLOC0(hxi); hxi->magic = HIKSET_ITER_MAGIC; hxi->hx = hx; hxi->stamp = hx->stamp; hash_refcnt_inc(HASH(hx)); return hxi; }
/** * Allocate a new service description. * * The control URL is copied. * * @param type service type * @param version service version number * @param ctrl_url control URL * @param scpd_url SCPD URL */ static upnp_service_t * upnp_service_alloc(enum upnp_service_type type, unsigned version, const char *ctrl_url, const char *scpd_url) { upnp_service_t *usd; WALLOC0(usd); usd->magic = UPNP_SVC_DESC_MAGIC; usd->type = type; usd->version = version; usd->control_url = atom_str_get(ctrl_url); usd->scpd_url = atom_str_get(scpd_url); return usd; }
static hash_list_iter_t * hash_list_iterator_new(hash_list_t *hl, enum hash_list_iter_direction dir) { hash_list_iter_t *iter; hash_list_check(hl); WALLOC0(iter); iter->magic = HASH_LIST_ITER_MAGIC; iter->dir = dir; iter->hl = hl; iter->stamp = hl->stamp; hl->refcount++; return iter; }
/** * Delete key from database. */ void dbmw_delete(dbmw_t *dw, gconstpointer key) { struct cached *entry; dbmw_check(dw); g_assert(key); dw->w_access++; entry = map_lookup(dw->values, key); if (entry) { if (entry->dirty) dw->w_hits++; if (!entry->absent) { dw->count_needs_sync = TRUE; /* Deferred delete */ fill_entry(dw, entry, NULL, 0); entry->absent = TRUE; } hash_list_moveto_tail(dw->keys, key); } else { dw->ioerr = FALSE; dbmap_remove(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst deleting key: %s", dw->name, dbmap_strerror(dw->dm)); } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache that the key is no longer valid. * Otherwise, possibly pushing a value out of the cache to record * a deletion is not worth it. */ if (0 == dw->value_size) { WALLOC0(entry); entry->absent = TRUE; (void) allocate_entry(dw, key, entry); } } }
/** * Register new file to be monitored. * * If the file was already monitored, cancel the previous monitoring action * and replace it with this one. * * @param filename the file to monitor (string duplicated) * @param cb the callback to invoke when the file changes * @param udata extra data to pass to the callback, along with filename */ void watcher_register(const char *filename, watcher_cb_t cb, void *udata) { struct monitored *m; WALLOC0(m); m->filename = atom_str_get(filename); m->cb = cb; m->udata = udata; m->mtime = watcher_mtime(filename); if (hikset_contains(monitored, filename)) watcher_unregister(filename); hikset_insert_key(monitored, &m->filename); }
/** * Create a new LRU cache. * @return -1 with errno set on error, 0 if OK. */ static int init_cache(DBM *db, long pages, bool wdelay) { struct lru_cache *cache; g_assert(NULL == db->cache); WALLOC0(cache); cache->magic = SDBM_LRU_MAGIC; if (-1 == setup_cache(cache, pages, wdelay)) { WFREE(cache); return -1; } db->cache = cache; return 0; }
/** * Allocate a new entry in the cache to hold the deserialized value. * * @param dw the DBM wrapper * @param key key we want a cache entry for * @param filled optionally, a new cache entry already filled with the data * * @attention * An older cache entry structure can be returned, and it will still * point to the previous data. Caller should normally invoke fill_entry() * immediately to make sure these stale data are not associated wrongly * with the new key, or supply his own filled structure directly. * * @return a cache entry object that can be filled with the value. */ static struct cached * allocate_entry(dbmw_t *dw, gconstpointer key, struct cached *filled) { struct cached *entry; gpointer saved_key; g_assert(!hash_list_contains(dw->keys, key)); g_assert(!map_contains(dw->values, key)); g_assert(!filled || (!filled->len == !filled->data)); saved_key = wcopy(key, dbmw_keylen(dw, key)); /* * If we have less keys cached than our maximum, add it. * Otherwise evict the least recently used key, at the head. */ if (hash_list_length(dw->keys) < dw->max_cached) { if (filled) entry = filled; else WALLOC0(entry); } else { gpointer head; g_assert(hash_list_length(dw->keys) == dw->max_cached); head = hash_list_head(dw->keys); entry = remove_entry(dw, head, filled != NULL, TRUE); g_assert(filled != NULL || entry != NULL); if (filled) entry = filled; } /* * Add entry into cache. */ g_assert(entry); hash_list_append(dw->keys, saved_key); map_insert(dw->values, saved_key, entry); return entry; }
/** * Mark newly created aging table as being thread-safe. * * This will make all external operations on the table thread-safe. */ void aging_thread_safe(aging_table_t *ag) { aging_check(ag); /* * Silently do nothing if the aging table was already made thread-safe. * Indeed, this is implicitly done when the callout queue is not running * in the thread that creates the aging table, since then we know that * concurrent calls can happen. */ if (NULL == ag->lock) { WALLOC0(ag->lock); mutex_init(ag->lock); } }
/** * Create a new callout queue subordinate to another. * * @param name the name of the subqueue * @param parent the parent callout queue * @param period period between heartbeats, in ms * * @return a new callout queue */ cqueue_t * cq_submake(const char *name, cqueue_t *parent, int period) { struct csubqueue *csq; WALLOC0(csq); cq_initialize(&csq->sub_cq, name, parent->cq_time, period); csq->sub_cq.cq_magic = CSUBQUEUE_MAGIC; csq->heartbeat = cq_periodic_add(parent, period, cq_heartbeat_trampoline, &csq->sub_cq); csubqueue_check(csq); cqueue_check(&csq->sub_cq); return &csq->sub_cq; }
static bool udp_ping_register(const struct guid *muid, host_addr_t addr, uint16 port, udp_ping_cb_t cb, void *data, bool multiple) { struct udp_ping *ping; uint length; g_assert(muid); g_return_val_if_fail(udp_pings, FALSE); if (hash_list_contains(udp_pings, muid)) { /* Probably a duplicate */ return FALSE; } /* random early drop */ length = hash_list_length(udp_pings); if (length >= UDP_PING_MAX) { return FALSE; } else if (length > (UDP_PING_MAX / 4) * 3) { if (random_value(UDP_PING_MAX - 1) < length) return FALSE; } WALLOC(ping); ping->muid = *muid; ping->added = tm_time(); { gnet_host_t host; gnet_host_set(&host, addr, port); ping->host = atom_host_get(&host); } if (cb != NULL) { WALLOC0(ping->callback); ping->callback->cb = cb; ping->callback->data = data; ping->callback->multiple = booleanize(multiple); } else { ping->callback = NULL; } hash_list_append(udp_pings, ping); return TRUE; }
/** * Is key present in the database? */ gboolean dbmw_exists(dbmw_t *dw, gconstpointer key) { struct cached *entry; gboolean ret; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { dw->r_hits++; return !entry->absent; } dw->ioerr = FALSE; ret = dbmap_contains(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst checking key existence: %s", dw->name, dbmap_strerror(dw->dm)); return FALSE; } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache existence checks. A data read * will also correctly return a null item from the cache. */ if (0 == dw->value_size) { WALLOC0(entry); entry->absent = !ret; (void) allocate_entry(dw, key, entry); } return ret; }
/** * Allocate a new hash set capable of holding 2^bits items. */ static hikset_t * hikset_allocate(size_t bits, bool raw, size_t offset) { hikset_t *hx; if (raw) XPMALLOC0(hx); else WALLOC0(hx); hx->magic = HIKSET_MAGIC; hx->ops = &hikset_ops; hx->kset.raw_memory = booleanize(raw); hx->offset = offset; hash_arena_allocate(HASH(hx), bits); return hx; }
/** * Create a new watchdog. * * @param name the watchdog name, for logging purposes * @param period the period after which it triggers, in seconds * @param trigger the callback to invoke if no kicking during period * @param arg the user-supplied argument given to callback * @param start whether to start immediately, or put in sleep state * * @return the created watchdog object. */ watchdog_t * wd_make(const char *name, int period, wd_trigger_t trigger, void *arg, bool start) { watchdog_t *wd; WALLOC0(wd); wd->magic = WATCHDOG_MAGIC; wd->name = atom_str_get(name); wd->period = period; wd->trigger = trigger; wd->arg = arg; if (start) wd_start(wd); watchdog_check(wd); return wd; }