void fd_set_nonblocking(int fd) { bool failed = FALSE; #ifdef MINGW32 { unsigned long nonblock = 1; if (ioctlsocket(fd, FIONBIO, &nonblock)) { errno = WSAGetLastError(); failed = TRUE; } } #else /* !MINGW32 */ { int ret, flags; ret = fcntl(fd, F_GETFL, 0); flags = ret | VAL_O_NONBLOCK; if (flags != ret) { if (-1 == fcntl(fd, F_SETFL, flags)) failed = TRUE; } } #endif /* MINGW32 */ if (failed) s_carp("%s(): failed for #%d: %m", G_STRFUNC, fd); }
/** * Initializes `fp' with directory path `dir' and filename `name'. * * The directory must be an absolute path or the entry is initialized with NULL * values and a loud warning is emitted. */ void file_path_set(file_path_t *fp, const char *dir, const char *name) { g_assert(fp); g_assert(dir); g_assert(name); /* * For robustness, we no longer assert that the path must be absolute. * * The open_read() routine will skip file_path_t entries in the vector * that are NULL, the only requirement being that the first entry of the * vector be non-NULL, i.e. an absolute path. * * Since this is an abnormal situation, loudly warn so that we may find * out who the culprit is. */ if (!is_absolute_path(dir)) { s_carp("%s(): ignoring non-absolute path \"%s\" for \"%s\"", G_STRFUNC, dir, name); fp->dir = NULL; fp->name = NULL; } else { fp->dir = dir; fp->name = name; } }
/** * Shuffle array in-place. */ static void entropy_array_shuffle(void *ary, size_t len, size_t elem_size) { g_assert(ary != NULL); g_assert(size_is_non_negative(len)); g_assert(size_is_positive(elem_size)); if (len > RANDOM_SHUFFLE_MAX) s_carp("%s: cannot shuffle %zu items without bias", G_STRFUNC, len); shuffle_with((random_fn_t) entropy_rand31, ary, len, elem_size); }
static void dbstore_unlink_file(const char *path, const char *ext) { char *file = h_strconcat(path, ext, NULL_PTR); if (file_exists(file)) { if (-1 == unlink(file)) { s_carp("could not unlink \"%s\": %m", file); } } HFREE_NULL(file); }
static void dbstore_move_file(const char *old_path, const char *new_path, const char *ext) { char *old_file = h_strconcat(old_path, ext, NULL_PTR); char *new_file = h_strconcat(new_path, ext, NULL_PTR); if (file_exists(old_file)) { if (-1 == rename(old_file, new_file)) { s_carp("could not rename \"%s\" as \"%s\": %m", old_file, new_file); } } HFREE_NULL(old_file); HFREE_NULL(new_file); }
void fd_set_close_on_exec(int fd) { #ifdef FD_CLOEXEC int flags; flags = fcntl(fd, F_GETFD); if (0 == (flags & FD_CLOEXEC)) { flags |= FD_CLOEXEC; if (-1 == fcntl(fd, F_SETFD, flags)) s_carp("%s(): failed for #%d: %m", G_STRFUNC, fd); } #else (void) fd; #endif /* FD_CLOEXEC */ }
/** * Generates the version string. This function does not require any * initialization, thus may be called very early e.g., for showing * version information if the executable was invoked with --version * as argument. * * @return A pointer to a static buffer holding the version string. */ const char * G_COLD version_build_string(void) { static bool initialized; static char buf[128]; if (!initialized) { const char *sysname = "Unknown"; const char *machine = NULL; initialized = TRUE; #ifdef HAS_UNAME { static struct utsname un; /* Must survive this scope */ if (-1 != uname(&un)) { sysname = un.sysname; machine = un.machine; } else { s_carp("uname() failed: %m"); } } #endif /* HAS_UNAME */ str_bprintf(buf, sizeof buf, "%s/%s%s (%s; %s; %s%s%s)", product_name(), product_version(), product_build_full(), product_date(), product_interface(), sysname, machine && machine[0] ? " " : "", machine ? machine : ""); } return buf; }
/** * Free the callout queue and all contained event objects. */ static void cq_free(cqueue_t *cq) { cevent_t *ev; cevent_t *ev_next; int i; struct chash *ch; cqueue_check(cq); if (cq->cq_current != NULL) { s_carp("%s(): %squeue \"%s\" still within cq_clock()", G_STRFUNC, CSUBQUEUE_MAGIC == cq->cq_magic ? "sub" : "", cq->cq_name); } mutex_lock(&cq->cq_lock); for (ch = cq->cq_hash, i = 0; i < HASH_SIZE; i++, ch++) { for (ev = ch->ch_head; ev; ev = ev_next) { ev_next = ev->ce_bnext; ev->ce_magic = 0; WFREE(ev); } } if (cq->cq_periodic) { hset_foreach_remove(cq->cq_periodic, cq_free_periodic, NULL); hset_free_null(&cq->cq_periodic); } if (cq->cq_idle) { hset_foreach_remove(cq->cq_idle, cq_free_idle, cq); hset_free_null(&cq->cq_idle); } XFREE_NULL(cq->cq_hash); atom_str_free_null(&cq->cq_name); /* * Unlocking the cq->cq_lock mutex (taken above) prevents a loud warning in * mutex_destroy() in case the mutex was already locked by our thread, * meaning we were already in cq_clock(). In that situation however, * we already warned upon entry, and therefore there is no need for a * second warning. * * If the mutex was not taken and someone else attempts to grab it at that * stage, there will be a slight window which fortunately will be loudly * detected by mutex_destroy(), as a case of a mutex being destroyed * whilst owned by another thread. * * No valid application code should attempt to sneak in at this stage to * grab that mutex anyway, so our logic is safe and we will be copiously * warned if something unexpected happens. * --RAM, 2012-12-04. */ mutex_unlock(&cq->cq_lock); mutex_destroy(&cq->cq_lock); mutex_destroy(&cq->cq_idle_lock); /* * If freeing a sub-queue, the object is a bit larger than a queue, * and we have more cleanup to do... */ if (CSUBQUEUE_MAGIC == cq->cq_magic) { cq_subqueue_free((struct csubqueue *) cq); } else { cq->cq_magic = 0; WFREE(cq); } }
/** * Common code for dbmw_foreach_trampoline() and * dbmw_foreach_remove_trampoline(). */ static bool dbmw_foreach_common(bool removing, void *key, dbmap_datum_t *d, void *arg) { struct foreach_ctx *ctx = arg; dbmw_t *dw = ctx->dw; struct cached *entry; dbmw_check(dw); entry = map_lookup(dw->values, key); if (entry != NULL) { /* * Key / value pair is present in the cache. * * This affects us in two ways: * * - We may already know that the key was deleted, in which case * that entry is just skipped: no further access is possible * through DBMW until that key is recreated. We still return * TRUE to make sure the lower layers will delete the entry * physically, since deletion has not been flushed yet (that's * the reason we're still iterating on it). * * - Should the cached key need to be deleted (as determined by * the user callback, we make sure we delete the entry in the * cache upon callback return). * * Because we sync the cache (the dirty deleted items only), we should * normally never iterate on a deleted entry, coming from the * underlying database, hence we loudly complain! */ entry->traversed = TRUE; /* Signal we iterated on cached value */ if (entry->absent) { s_carp("%s(): DBMW \"%s\" iterating over a %s absent key in cache!", G_STRFUNC, dw->name, entry->dirty ? "dirty" : "clean"); return TRUE; /* Key was already deleted, info cached */ } if (removing) { bool status; status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg); if (status) { entry->removable = TRUE; /* Discard it after traversal */ } return status; } else { (*ctx->u.cb)(key, entry->data, entry->len, ctx->arg); return FALSE; } } else { bool status = FALSE; void *data = d->data; size_t len = d->len; /* * Deserialize data if needed, but do not cache this value. * Iterating over the map must not disrupt the cache. */ if (dw->unpack) { len = dw->value_size; data = walloc(len); bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, data, len)) { s_critical("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_function_name(dw->unpack), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(data, len); return FALSE; } } if (removing) { status = (*ctx->u.cbr)(key, data, len, ctx->arg); } else { (*ctx->u.cb)(key, data, len, ctx->arg); } if (dw->unpack) { if (dw->valfree) (*dw->valfree)(data, len); wfree(data, len); } return status; } }