/** * Move entry to the tail of the list. */ void hash_list_moveto_tail(hash_list_t *hl, const void *key) { struct hash_list_item *item; hash_list_check(hl); g_assert(1 == hl->refcount); g_assert(size_is_positive(elist_count(&hl->list))); item = hikset_lookup(hl->ht, key); g_assert(item != NULL); /* * Remove item from list and insert it back at the tail. */ if (elist_last(&hl->list) != &item->lnk) { elist_link_remove(&hl->list, &item->lnk); elist_link_append(&hl->list, &item->lnk); } hl->stamp++; hash_list_regression(hl); }
/** * Set max line length. */ void header_fmt_set_line_length(header_fmt_t *hf, size_t maxlen) { header_fmt_check(hf); g_assert(size_is_positive(maxlen)); hf->maxlen = maxlen; }
static inline void sectoken_gen_check(const sectoken_gen_t * const stg) { g_assert(stg != NULL); g_assert(SECTOKEN_GEN_MAGIC == stg->magic); g_assert(stg->keys != NULL); g_assert(size_is_positive(stg->keycnt)); }
/** * Escape text string, returning a newly allocated string. * * @param text text with characters to escape (NUL-terminated) * @param amp whether '&' also needs to be escaped * @param apos whether single quotes also need to be escaped * @param newlen computed length for the escaped string * * @return escaped string, which must be freed via hfree(). */ static char * xfmt_text_escape(const char *text, bool amp, bool apos, size_t newlen) { char *newtext; const char *p; char *q; char *end; int c; g_assert(text != 0); g_assert(size_is_positive(newlen)); newtext = halloc(newlen + 1); /* Trailing NUL */ p = text; q = newtext; end = newtext + (newlen + 1); /* * Text is assumed to be valid UTF-8, and since we are looking for ASCII * characters, there's no need to decode the UTF-8 encoding. */ while ('\0' != (c = *p++)) { if (amp && '&' == c) { g_assert(q + CONST_STRLEN("&") < end); *q++ = '&'; *q++ = 'a'; *q++ = 'm'; *q++ = 'p'; *q++ = ';'; } else if (apos && '\'' == c) { g_assert(q + CONST_STRLEN("'") < end); *q++ = '&'; *q++ = 'a'; *q++ = 'p'; *q++ = 'o'; *q++ = 's'; *q++ = ';'; } else if ('<' == c || '>' == c) { g_assert(q + CONST_STRLEN("&xt;") < end); *q++ = '&'; *q++ = ('<' == c) ? 'l' : 'g'; *q++ = 't'; *q++ = ';'; } else { *q++ = c; } } g_assert(q < end); g_assert(q + 1 == end); /* Overhead was properly computed */ *q++ = '\0'; return newtext; }
/** * Create a security token from host address, port and contextual data. * * @param stg the security token generator * @param tok where security token is written * @param addr address of the host for which we're generating a token * @param port port of the host for which we're generating a token * @param data contextual data * @param len length of contextual data */ void sectoken_generate_with_context(sectoken_gen_t *stg, sectoken_t *tok, host_addr_t addr, uint16 port, const void *data, size_t len) { g_assert(data != NULL); g_assert(size_is_positive(len)); sectoken_generate_n(stg, 0, tok, addr, port, data, len); }
/** * Shuffle array in-place. */ static void entropy_array_shuffle(void *ary, size_t len, size_t elem_size) { g_assert(ary != NULL); g_assert(size_is_non_negative(len)); g_assert(size_is_positive(elem_size)); if (len > RANDOM_SHUFFLE_MAX) s_carp("%s: cannot shuffle %zu items without bias", G_STRFUNC, len); shuffle_with((random_fn_t) entropy_rand31, ary, len, elem_size); }
/** * Extract the URN from a /Q2/URN and populate the search request info * if it is a SHA1 (or bitprint, which contains a SHA1). */ static void g2_node_extract_urn(const g2_tree_t *t, search_request_info_t *sri) { const char *p; size_t paylen; uint i; /* * If we have more SHA1s already than we can hold, stop. */ if (sri->exv_sha1cnt == N_ITEMS(sri->exv_sha1)) return; p = g2_tree_node_payload(t, &paylen); if (NULL == p) return; /* * We can only search by SHA1, hence we're only interested by URNs * that contain a SHA1. */ if (paylen < SHA1_RAW_SIZE) return; /* Cannot contain a SHA1 */ /* * Since we know there are at least SHA1_RAW_SIZE bytes in the payload, * we can use clamp_memcmp() to see whether we have a known prefix. */ for (i = 0; i < N_ITEMS(g2_q2_urn); i++) { const char *prefix = g2_q2_urn[i]; size_t len = vstrlen(prefix) + 1; /* Wants trailing NUL as well */ if (0 == clamp_memcmp(prefix, len, p, paylen)) { p += len; paylen -= len; g_assert(size_is_positive(paylen)); if (paylen >= SHA1_RAW_SIZE) { uint idx = sri->exv_sha1cnt++; g_assert(idx < N_ITEMS(sri->exv_sha1)); memcpy(&sri->exv_sha1[idx].sha1, p, SHA1_RAW_SIZE); } break; } } }
/** * Remove current iterator item, returned by hikset_iter_next(). */ void hikset_iter_remove(hikset_iter_t *hxi) { hikset_t *hx; size_t idx; hikset_iter_check(hxi); g_assert(size_is_positive(hxi->pos)); /* Called _next() once */ g_assert(hxi->pos <= hxi->hx->kset.size); hx = deconstify_pointer(hxi->hx); idx = hxi->pos - 1; /* Current item */ if (hash_keyset_erect_tombstone(&hx->kset, idx)) hx->kset.items--; hxi->deleted = TRUE; }
/** * Create a new security token generator. */ sectoken_gen_t * sectoken_gen_new(size_t keys, time_delta_t refresh) { sectoken_gen_t *stg; size_t i; g_assert(size_is_positive(keys)); WALLOC0(stg); stg->magic = SECTOKEN_GEN_MAGIC; stg->keys = walloc(keys * sizeof stg->keys[0]); stg->keycnt = keys; stg->refresh = refresh; for (i = 0; i < stg->keycnt; i++) random_bytes(&stg->keys[i], sizeof(stg->keys[0])); stg->rotate_ev = cq_main_insert(refresh * 1000, sectoken_rotate, stg); return stg; }
static ssize_t tls_write(struct wrap_io *wio, const void *buf, size_t size) { struct gnutella_socket *s = wio->ctx; ssize_t ret; socket_check(s); g_assert(socket_uses_tls(s)); g_assert(NULL != buf); g_assert(size_is_positive(size)); ret = tls_flush(wio); if (0 == ret) { ret = tls_write_intern(wio, buf, size); if (s->gdk_tag) { tls_socket_evt_change(s, INPUT_EVENT_WX); } } g_assert(ret == (ssize_t) -1 || (size_t) ret <= size); tls_signal_pending(s); return ret; }
/** * Collect entropy by randomly feeding values from array. */ static void entropy_array_data_collect(SHA1Context *ctx, enum entropy_data data, void *ary, size_t len, size_t elem_size) { size_t i; void *p; g_assert(ctx != NULL); g_assert(ary != NULL); g_assert(size_is_non_negative(len)); g_assert(size_is_positive(elem_size)); entropy_array_shuffle(ary, len, elem_size); for (i = 0, p = ary; i < len; i++, p = ptr_add_offset(p, elem_size)) { switch (data) { case ENTROPY_ULONG: sha1_feed_ulong(ctx, *(unsigned long *) p); break; case ENTROPY_STRING: sha1_feed_string(ctx, *(char **) p); break; case ENTROPY_STAT: sha1_feed_stat(ctx, *(char **) p); break; case ENTROPY_FSTAT: sha1_feed_fstat(ctx, *(int *) p); break; case ENTROPY_DOUBLE: sha1_feed_double(ctx, *(double *) p); break; case ENTROPY_POINTER: sha1_feed_pointer(ctx, *(void **) p); break; } } }
/** * Remove node from tree. * * @attention * It is assumed that the node is already part of the tree. */ void G_HOT erbtree_remove(erbtree_t *tree, rbnode_t *node) { rbnode_t *removed = node; rbnode_t *parent = get_parent(node); rbnode_t *left = node->left; rbnode_t *right = node->right; rbnode_t *next; enum rbcolor color; erbtree_check(tree); g_assert(size_is_positive(tree->count)); g_assert(node != NULL); g_assert(is_valid(node)); /* Does not verify it is part of THIS tree */ tree->count--; if (node == tree->first) tree->first = erbtree_next(node); if (node == tree->last) tree->last = erbtree_prev(node); if (NULL == left) next = right; else if (NULL == right) next = left; else next = get_first(right); if (parent != NULL) set_child(parent, next, parent->left == node); else tree->root = next; if (left != NULL && right != NULL) { color = get_color(next); set_color(next, get_color(node)); next->left = left; set_parent(left, next); if (next != right) { parent = get_parent(next); set_parent(next, get_parent(node)); node = next->right; parent->left = node; next->right = right; set_parent(right, next); } else { set_parent(next, parent); parent = next; node = next->right; } } else { color = get_color(node); node = next; } /* * 'node' is now the sole successor's child and 'parent' its * new parent (since the successor can have been moved). */ if (node != NULL) set_parent(node, parent); invalidate(removed); /* * The "easy" cases. */ if (color == RB_RED) return; if (node != NULL && is_red(node)) { set_color(node, RB_BLACK); return; } do { if (node == tree->root) break; if (node == parent->left) { rbnode_t *sibling = parent->right; if (is_red(sibling)) { set_color(sibling, RB_BLACK); set_color(parent, RB_RED); rotate_left(tree, parent); sibling = parent->right; } if ( (NULL == sibling->left || is_black(sibling->left)) && (NULL == sibling->right || is_black(sibling->right)) ) { set_color(sibling, RB_RED); node = parent; parent = get_parent(parent); continue; } if (NULL == sibling->right || is_black(sibling->right)) { set_color(sibling->left, RB_BLACK); set_color(sibling, RB_RED); rotate_right(tree, sibling); sibling = parent->right; } set_color(sibling, get_color(parent)); set_color(parent, RB_BLACK); set_color(sibling->right, RB_BLACK); rotate_left(tree, parent); node = tree->root; break; } else { rbnode_t *sibling = parent->left; if (is_red(sibling)) { set_color(sibling, RB_BLACK); set_color(parent, RB_RED); rotate_right(tree, parent); sibling = parent->left; } if ( (NULL == sibling->left || is_black(sibling->left)) && (NULL == sibling->right || is_black(sibling->right)) ) { set_color(sibling, RB_RED); node = parent; parent = get_parent(parent); continue; } if (NULL == sibling->left || is_black(sibling->left)) { set_color(sibling->right, RB_BLACK); set_color(sibling, RB_RED); rotate_left(tree, sibling); sibling = parent->left; } set_color(sibling, get_color(parent)); set_color(parent, RB_BLACK); set_color(sibling->left, RB_BLACK); rotate_right(tree, parent); node = tree->root; break; } } while (is_black(node)); if (node != NULL) set_color(node, RB_BLACK); }
/** * Free a block from file. */ static void big_ffree(DBM *db, size_t bno) { DBMBIG *dbg = db->big; long bmap; size_t i; STATIC_ASSERT(IS_POWER_OF_2(BIG_BITCOUNT)); if (-1 == dbg->fd && -1 == big_open(dbg)) { g_warning("sdbm: \"%s\": cannot free block #%ld", sdbm_name(db), (long) bno); return; } /* * Block number must be positive, and we cannot free a bitmap block. * If we end-up doing it, then it means data in the .pag was corrupted, * so we do not assert but fail gracefully. */ if (!size_is_positive(bno) || 0 == (bno & (BIG_BITCOUNT - 1))) { g_warning("sdbm: \"%s\": attempt to free invalid block #%ld", sdbm_name(db), (long) bno); return; } g_assert(size_is_positive(bno)); /* Can never free block 0 (bitmap!) */ g_assert(bno & (BIG_BITCOUNT - 1)); /* Cannot be a bitmap block */ bmap = bno / BIG_BITCOUNT; /* Bitmap handling this block */ i = bno & (BIG_BITCOUNT - 1); /* Index within bitmap */ /* * Likewise, if the block falls in a bitmap we do not know about yet, * the .pag was corrupted. */ if (bmap >= dbg->bitmaps) { g_warning("sdbm: \"%s\": " "freed block #%ld falls within invalid bitmap #%ld (max %ld)", sdbm_name(db), (long) bno, bmap, dbg->bitmaps - 1); return; } if (!fetch_bitbuf(db, bmap)) return; /* * Again, freeing a block that is already marked as being freed is * a severe error but can happen if the bitmap cannot be flushed to disk * at some point, hence it cannot be an assertion. */ if (!bit_field_get(dbg->bitbuf, i)) { g_warning("sdbm: \"%s\": freed block #%ld was already marked as free", sdbm_name(db), (long) bno); return; } bit_field_clear(dbg->bitbuf, i); dbg->bitbuf_dirty = TRUE; }
struct magnet_resource * magnet_parse(const char *url, const char **error_str) { static const struct magnet_resource zero_resource; struct magnet_resource res; const char *p, *next; res = zero_resource; clear_error_str(&error_str); p = is_strcaseprefix(url, "magnet:"); if (!p) { *error_str = "Not a MAGNET URI"; return NULL; } if ('?' != p[0]) { *error_str = "Invalid MAGNET URI"; return NULL; } p++; for (/* NOTHING */; p && '\0' != p[0]; p = next) { enum magnet_key key; const char *endptr; char name[16]; /* Large enough to hold longest key we know */ name[0] = '\0'; endptr = strchr(p, '='); if (endptr && p != endptr) { size_t name_len; name_len = endptr - p; g_assert(size_is_positive(name_len)); if (name_len < sizeof name) { /* Ignore overlong key */ strncat(name, p, name_len); } p = &endptr[1]; /* Point behind the '=' */ } endptr = strchr(p, '&'); if (!endptr) { endptr = strchr(p, '\0'); } key = magnet_key_get(name); if (MAGNET_KEY_NONE == key) { g_message("skipping unknown key \"%s\" in MAGNET URI", name); } else { char *value; size_t value_len; value_len = endptr - p; value = h_strndup(p, value_len); plus_to_space(value); if (url_unescape(value, TRUE)) { magnet_handle_key(&res, name, value); } else { g_message("badly encoded value in MAGNET URI: \"%s\"", value); } HFREE_NULL(value); } while ('&' == endptr[0]) { endptr++; } next = endptr; } res.sources = g_slist_reverse(res.sources); res.searches = g_slist_reverse(res.searches); return wcopy(&res, sizeof res); }
static ssize_t tls_read(struct wrap_io *wio, void *buf, size_t size) { struct gnutella_socket *s = wio->ctx; ssize_t ret; socket_check(s); g_assert(socket_uses_tls(s)); g_assert(NULL != buf); g_assert(size_is_positive(size)); if (tls_flush(wio) && !is_temporary_error(errno)) { if (GNET_PROPERTY(tls_debug)) { g_warning("%s(): tls_flush(fd=%d) error: %m", G_STRFUNC, s->file_desc); } return -1; } ret = gnutls_record_recv(tls_socket_get_session(s), buf, size); if (ret < 0) { switch (ret) { case GNUTLS_E_INTERRUPTED: case GNUTLS_E_AGAIN: errno = VAL_EAGAIN; break; case GNUTLS_E_PULL_ERROR: case GNUTLS_E_PUSH_ERROR: /* Logging already done by tls_transport_debug() */ errno = (SOCK_F_CONNRESET & s->flags) ? ECONNRESET : EIO; break; case GNUTLS_E_UNEXPECTED_PACKET_LENGTH: if (SOCK_F_EOF & s->flags) { /* * Remote peer has hung up. * * This is not exceptional, so we make it appear to upper * layers (who do not necessarily know they're dealing with * a TLS socket) as a regular EOF condition: the read() * operation return 0. */ ret = 0; goto no_error; } else if (SOCK_F_CONNRESET & s->flags) { errno = ECONNRESET; break; } /* FALLTHROUGH */ default: if (GNET_PROPERTY(tls_debug)) { g_carp("tls_read(): gnutls_record_recv(fd=%d) failed: " "host=%s error=\"%s\"", s->file_desc, host_addr_port_to_string(s->addr, s->port), gnutls_strerror(ret)); } errno = EIO; } ret = -1; } no_error: if (s->gdk_tag && 0 == s->tls.snarf) { tls_socket_evt_change(s, INPUT_EVENT_RX); } g_assert(ret == (ssize_t) -1 || (size_t) ret <= size); tls_signal_pending(s); return ret; }
/** * Retrieve known GWC URLs. * They are normally saved in ~/.gtk-gnutella/gwcache. */ static void gwc_retrieve(void) { file_path_t fp[4], *fpv; uint len, added; int line, idx; FILE *in; char tmp[1024]; len = settings_file_path_load(fp, gwc_file, SFP_ALL); g_assert(len <= N_ITEMS(fp)); fpv = &fp[0]; retry: g_assert(ptr_cmp(fpv, &fp[N_ITEMS(fp)]) < 0); if (&fp[0] == fpv) in = file_config_open_read_chosen(gwc_what, fpv, len, &idx); else in = file_config_open_read_norename_chosen(gwc_what, fpv, len, &idx); if (NULL == in) return; /* * Retrieve each line, counting the amount of entries added. */ line = 0; added = 0; while (fgets(tmp, sizeof(tmp), in)) { line++; if (tmp[0] == '#') /* Skip comments */ continue; if (tmp[0] == '\n') /* Allow empty lines */ continue; (void) strchomp(tmp, 0); if (gwc_add(tmp)) added++; } fclose(in); /* * Now check whether we added anything from that file, and if we have not * and there are more backup files to open, retry with these fallbacks * instead. */ if (0 == added && UNSIGNED(idx) < len - 1) { g_warning("%s(): nothing loaded from \"%s/%s\", trying fallbacks", G_STRFUNC, fpv[idx].dir, fpv[idx].name); fpv += idx + 1; len -= idx + 1; g_assert(size_is_positive(len)); goto retry; } else { if (GNET_PROPERTY(bootstrap_debug)) { g_debug("%s(): loaded %u URL%s from \"%s/%s\"", G_STRFUNC, added, plural(added), fpv[idx].dir, fpv[idx].name); } } }