static int erbtree_cmp(erbtree_t *tree, rbnode_t *a, rbnode_t *b) { const void *p = const_ptr_add_offset(a, -tree->offset); const void *q = const_ptr_add_offset(b, -tree->offset); if (erbtree_is_extended(tree)) { erbtree_ext_t *etree = ERBTREE_E(tree); return (*etree->u.dcmp)(p, q, etree->data); } else { return (*tree->u.cmp)(p, q); } }
/** * Mark blocks in the supplied vector as allocated in the checking bitmap. * * @param db the sdbm database * @param bvec vector where allocated block numbers are stored * @param bcnt amount of blocks in vector */ static void big_file_mark_used(DBM *db, const void *bvec, int bcnt) { DBMBIG *dbg = db->big; const void *q; int n; if (!big_check_start(db)) return; for (q = bvec, n = bcnt; n > 0; n--) { size_t bno = peek_be32(q); bit_field_t *map; long bmap; size_t bit; bmap = bno / BIG_BITCOUNT; /* Bitmap handling this block */ bit = bno & (BIG_BITCOUNT - 1); /* Index within bitmap */ q = const_ptr_add_offset(q, sizeof(guint32)); /* * It's because of this sanity check that we don't want to consider * the bitcheck field as a huge continuous map. Also doing that would * violate the encapsulation: we're not supposed to know how bits are * allocated in the field. */ if (bmap >= dbg->bitmaps) continue; map = ptr_add_offset(dbg->bitcheck, bmap * BIG_BLKSIZE); bit_field_set(map, bit); } }
/** * Look whether the datagram we received is a valid Gnutella packet. * * The routine also handles traffic statistics (reception and dropping). * * @param n the pseudo UDP reception node (NULL if invalid IP:port) * @param s the socket on which we got the UDP datagram * @param truncated whether datagram was truncated during reception * @param start start of message (header + payload following) * @param len total length of message * * @return TRUE if valid, FALSE otherwise. */ static bool udp_is_valid_gnet(gnutella_node_t *n, const gnutella_socket_t *s, bool truncated, const void *start, size_t len) { return udp_is_valid_gnet_split(n, s, truncated, start, const_ptr_add_offset(start, GTA_HEADER_SIZE), len); }
/** * Fill supplied buffer with the formatted string describing the message. * * @param data start of the G2 message * @param len length of the message * @param buf buffer where formatted string is written * @param buflen length of the destination buffer * * @return the amount of bytes written. */ size_t g2_msg_infostr_to_buf(const void *data, size_t len, char *buf, size_t buflen) { enum g2_msg m; const guid_t *muid = NULL; g_assert(size_is_non_negative(len)); g_assert(size_is_non_negative(buflen)); /* * Check whether we need to decompile the packet to access the GUID, which * is the payload of the root element in the tree. Given the way things * are serialized, that would be the last 16 bytes of the message, so * we don't have to deserialize everything just to access it. */ m = g2_msg_type(data, len); switch (m) { case G2_MSG_Q2: case G2_MSG_QA: case G2_MSG_QH2: if (len > GUID_RAW_SIZE) muid = const_ptr_add_offset(data, len - GUID_RAW_SIZE); /* FALL THROUGH */ default: break; } return str_bprintf(buf, buflen, "/%s (%zu byte%s)%s%s", g2_msg_type_name(m), len, plural(len), NULL == muid ? "" : " #", NULL == muid ? "" : guid_hex_str(muid)); }
/** * Deserialization convenience for IP:port. * * The supplied buffer must hold either 6 or 18 more bytes of data, depending * on the address type we want to deserialize. */ void host_ip_port_peek(const void *p, enum net_type nt, host_addr_t *addr, uint16 *port) { const void *q = p; if (NET_TYPE_IPV4 == nt) { *addr = host_addr_peek_ipv4(q); q = const_ptr_add_offset(q, 4); } else if (NET_TYPE_IPV6 == nt) { *addr = host_addr_peek_ipv6(q); q = const_ptr_add_offset(q, 16); } else { /* Can only deserialize IPv4:port or IPv6:port */ g_assert_not_reached(); } *port = peek_le16(q); }
/** * Is item an "standalone" node? (no parent, no sibling, no children) */ bool etree_is_standalone(const etree_t *tree, const void *item) { const node_t *n; etree_check(tree); n = const_ptr_add_offset(item, tree->offset); return NULL == n->parent && NULL == n->sibling && NULL == n->child; }
/** * Is item an "orphan" node? (no parent, no sibling) */ bool etree_is_orphan(const etree_t *tree, const void *item) { const node_t *n; etree_check(tree); n = const_ptr_add_offset(item, tree->offset); return NULL == n->parent && NULL == n->sibling; }
/** * Is item the root node? */ bool etree_is_root(const etree_t *tree, const void *item) { const node_t *n; etree_check(tree); n = const_ptr_add_offset(item, tree->offset); return NULL == n->parent && n == tree->root; }
/** * Locate a symbol at the given addres. * * @param bc the BFD context retrieved by bfd_util_get_context() * @param addr the address of the symbol * @param loc where location information is returned * * @return TRUE if the symbol address was located. */ bool bfd_util_locate(bfd_ctx_t *bc, const void *addr, struct symbol_loc *loc) { struct symbol_ctx sc; const void *lookaddr; const char *name; g_assert(loc != NULL); if G_UNLIKELY(NULL == bc) return FALSE; bfd_ctx_check(bc); mutex_lock_fast(&bc->lock); ZERO(&sc); lookaddr = const_ptr_add_offset(addr, bc->offset); sc.addr = pointer_to_ulong(lookaddr); sc.symbols = bc->symbols; bfd_map_over_sections(bc->handle, bfd_util_lookup_section, &sc); if (sc.location.function != NULL) { *loc = sc.location; /* Struct copy */ mutex_unlock_fast(&bc->lock); return TRUE; } /* * For some reason the BFD library successfully loads symbols but is not * able to locate them through bfd_map_over_sections(). * * Load the symbol table ourselves and perform the lookup then. We will * only be able to fill the routine name, and not the source code * information but that is better than nothing. */ if (NULL == bc->text_symbols) { bc->text_symbols = symbols_make(bc->count, FALSE); bfd_util_load_text(bc, bc->text_symbols); symbols_sort(bc->text_symbols); } name = symbols_name_only(bc->text_symbols, lookaddr, FALSE); if (name != NULL) { ZERO(loc); loc->function = name; mutex_unlock_fast(&bc->lock); return TRUE; } mutex_unlock_fast(&bc->lock); return FALSE; }
/** * Make sure vector of block numbers is ordered and points to allocated data, * but was not already flagged as being used by another key / value. * * @param what string describing what is being tested (key or value) * @param db the sdbm database * @param bvec vector where allocated block numbers are stored * @param bcnt amount of blocks in vector * * @return TRUE on success. */ static gboolean big_file_check(const char *what, DBM *db, const void *bvec, int bcnt) { size_t prev_bno = 0; /* 0 is invalid: it's the first bitmap */ const void *q; int n; if (!big_check_start(db)) return TRUE; /* Cannot validate, assume it's OK */ for (q = bvec, n = bcnt; n > 0; n--) { size_t bno = peek_be32(q); bit_field_t *map; long bmap; size_t bit; if (!big_block_is_allocated(db, bno)) { g_warning("sdbm: \"%s\": " "%s from .pag refers to unallocated block %lu in .dat", sdbm_name(db), what, (unsigned long) bno); return FALSE; } if (prev_bno != 0 && bno <= prev_bno) { g_warning("sdbm: \"%s\": " "%s from .pag lists unordered block list (corrupted file?)", sdbm_name(db), what); return FALSE; } q = const_ptr_add_offset(q, sizeof(guint32)); prev_bno = bno; /* * Make sure block is not used by someone else. * * Because we mark blocks as used in big keys and values only after * we validated both the key and the value for a given pair, we cannot * detect shared blocks between the key and value of a pair. */ bmap = bno / BIG_BITCOUNT; /* Bitmap handling this block */ bit = bno & (BIG_BITCOUNT - 1); /* Index within bitmap */ g_assert(bmap < db->big->bitmaps); map = ptr_add_offset(db->big->bitcheck, bmap * BIG_BLKSIZE); if (bit_field_get(map, bit)) { g_warning("sdbm: \"%s\": " "%s from .pag refers to already seen block %lu in .dat", sdbm_name(db), what, (unsigned long) bno); return FALSE; } } return TRUE; }
/** * @return pointer to last child of item, NULL if leaf item. */ void * etree_last_child(const etree_t *tree, const void *item) { etree_check(tree); if (etree_is_extended(tree)) { const nodex_t *n = const_ptr_add_offset(item, tree->offset); if (NULL == n->last_child) return NULL; return ptr_add_offset(n->last_child, -tree->offset); } else { const node_t *n = const_ptr_add_offset(item, tree->offset); node_t *sn = etree_node_last_sibling(n->child); if (NULL == sn) return NULL; return ptr_add_offset(sn, -tree->offset); } }
/** * Insert item in hash set. * * Any previously existing value for the key is replaced by the new one. * * @param ht the hash table * @param value the value (which embeds the key) * * @attention * This routine takes a value with its embedded expanded key, not the key. */ void hevset_insert(hevset_t *ht, const void *value) { const void *key; hevset_check(ht); g_assert(value != NULL); key = const_ptr_add_offset(value, ht->offset); hash_insert_key(HASH(ht), key); ht->stamp++; }
/** * Given an item, find the first matching sibling starting with this item, * NULL if none. * * @param tree the tree descriptor (with possible inaccurate root) * @param item item at which search begins * @param match matching predicate * @param data additional callback argument */ void * etree_find_sibling(const etree_t *tree, const void *item, match_fn_t match, void *data) { const node_t *s; etree_check(tree); g_assert(item != NULL); g_assert(match != NULL); for ( s = const_ptr_add_offset(item, tree->offset); s != NULL; s = s->sibling ) { const void *node = const_ptr_add_offset(s, -tree->offset); if ((*match)(node, data)) return deconstify_pointer(node); } return NULL; /* No matching item */ }
/** * Insert item in hash set. * * Any previously existing value for the key is replaced by the new one. * * @param ht the hash table * @param value the value (which embeds the key) * * @attention * This routine takes a value with its embedded key reference, not the key. */ void hikset_insert(hikset_t *hik, const void *value) { void * const *key; /* Pointer to the key field in the value */ hikset_check(hik); g_assert(value != NULL); /* * We're really inserting the address within the value where the key * is stored. It will be hashed through hikset_key_hash() which * will perform the necessary indirection. */ key = const_ptr_add_offset(value, hik->offset); hash_insert_key(HASH(hik), key); hik->stamp++; }
/** * Computes the root of the tree, starting from any item. * * This disregards the actual root in the etree_t structure passed, which may * be inaccurate, i.e. a sub-node of the actual tree. The only accurate * information that etree_t must contain is the offset of the node_t within * the items. * * @param tree the tree descriptor (with possible inaccurate root) * @param item an item belonging to the tree * * @return the root of the tree to which item belongs. */ void * etree_find_root(const etree_t *tree, const void *item) { const node_t *n, *p; void *root; etree_check(tree); g_assert(item != NULL); n = const_ptr_add_offset(item, tree->offset); for (p = n; p != NULL; p = n->parent) n = p; root = ptr_add_offset(deconstify_pointer(n), -tree->offset); g_assert(etree_is_orphan(tree, root)); /* No parent, no sibling */ return root; }
/** * Free allocated blocks from the .dat file. * * @param db the sdbm database * @param bvec vector where allocated block numbers are stored * @param bcnt amount of blocks in vector to free */ static void big_file_free(DBM *db, const void *bvec, int bcnt) { size_t bno; const void *q; int n; for (q = bvec, n = bcnt; n > 0; n--) { bno = peek_be32(q); big_ffree(db, bno); q = const_ptr_add_offset(q, sizeof(guint32)); } /* * If database is not volatile, sync the bitmap to make sure the freed * blocks are reusable even if we crash later. */ if (!db->is_volatile) big_sync(db); }
/** * Fetch the MUID in the message, if any is architected. * * @param t the message tree * @param buf the buffer to fill with a copy of the MUID * * @return a pointer to `buf' if OK and we filled the MUID, NULL if there is * no valid MUID in the message or the message is not carrying any MUID. */ guid_t * g2_msg_get_muid(const g2_tree_t *t, guid_t *buf) { enum g2_msg m; const void *payload; size_t paylen; size_t offset; g_assert(t != NULL); g_assert(buf != NULL); m = g2_msg_name_type(g2_tree_name(t)); switch (m) { case G2_MSG_Q2: case G2_MSG_QA: offset = 0; break; case G2_MSG_QH2: offset = 1; /* First payload byte is the hop count */ break; default: return NULL; /* No MUID in message */ } payload = g2_tree_node_payload(t, &paylen); if (NULL == payload || paylen < GUID_RAW_SIZE + offset) return NULL; /* * Copy the MUID in the supplied buffer for alignment purposes, since * the MUID is offset by 1 byte in /QH2 messages, and return that aligned * pointer. */ memcpy(buf, const_ptr_add_offset(payload, offset), GUID_RAW_SIZE); return buf; }
/** * Lookup key in the (extended) tree. * * 'pparent' and 'is_left' are only used for insertions. Normally GCC * will notice this and get rid of them for lookups. */ static inline rbnode_t * do_lookup_ext(const erbtree_ext_t *tree, const void *key, rbnode_t **pparent, bool *is_left) { rbnode_t *node = tree->root; *pparent = NULL; *is_left = FALSE; while (node != NULL) { int res; const void *nbase = const_ptr_add_offset(node, -tree->offset); res = (*tree->u.dcmp)(nbase, key, tree->data); if (0 == res) return node; *pparent = node; if ((*is_left = res > 0)) node = node->left; else node = node->right; } return NULL; }
/** * Called when a pong with an "IPP" extension was received. */ void uhc_ipp_extract(gnutella_node_t *n, const char *payload, int paylen, enum net_type type) { int i, cnt; int len = NET_TYPE_IPV6 == type ? 18 : 6; const void *p; g_assert(0 == paylen % len); cnt = paylen / len; if (GNET_PROPERTY(bootstrap_debug)) g_debug("extracting %d host%s in UDP IPP pong #%s from %s", cnt, plural(cnt), guid_hex_str(gnutella_header_get_muid(&n->header)), node_addr(n)); for (i = 0, p = payload; i < cnt; i++, p = const_ptr_add_offset(p, len)) { host_addr_t ha; uint16 port; host_ip_port_peek(p, type, &ha, &port); hcache_add_caught(HOST_ULTRA, ha, port, "UDP-HC"); if (GNET_PROPERTY(bootstrap_debug) > 2) g_debug("BOOT collected %s from UDP IPP pong from %s", host_addr_port_to_string(ha, port), node_addr(n)); } if (!uhc_connecting) return; /* * Check whether this was a reply from our request. * * The reply could come well after we decided it timed out and picked * another UDP host cache, which ended-up replying, so we must really * check whether we're still in a probing cycle. */ if (!guid_eq(&uhc_ctx.muid, gnutella_header_get_muid(&n->header))) return; if (GNET_PROPERTY(bootstrap_debug)) { g_debug("BOOT UDP cache \"%s\" replied: got %d host%s from %s", uhc_ctx.host, cnt, plural(cnt), node_addr(n)); } /* * Terminate the probing cycle if we got hosts. */ if (cnt > 0) { char msg[256]; cq_cancel(&uhc_ctx.timeout_ev); uhc_connecting = FALSE; str_bprintf(msg, sizeof(msg), NG_("Got %d host from UDP host cache %s", "Got %d hosts from UDP host cache %s", cnt), cnt, uhc_ctx.host); gcu_statusbar_message(msg); } else { uhc_try_next(); } }
/** * Check message header for a valid semi-reliable UDP header. * * @param head message header * @param len message length * * @return intuited type */ static enum udp_traffic udp_check_semi_reliable(const void *head, size_t len) { uint8 flags, part, count; const unsigned char *tag; enum udp_traffic utp; if (len < UDP_RELIABLE_HEADER_SIZE) return UNKNOWN; /* * We're only interested in "GTA" and "GND" traffic. */ tag = head; if (tag[0] != 'G') return UNKNOWN; if ('T' == tag[1] && 'A' == tag[2]) { utp = SEMI_RELIABLE_GTA; goto tag_known; } if ('N' == tag[1] && 'D' == tag[2]) { utp = SEMI_RELIABLE_GND; goto tag_known; } return UNKNOWN; /* Not a tag we know about */ tag_known: /* * Extract key fields from the header. */ flags = udp_reliable_header_get_flags(head); part = udp_reliable_header_get_part(head); count = udp_reliable_header_get_count(head); /* * There are 2 bits that must be zero in the flags (critical bits that * we don't know about if set and therefore would lead us to drop the * fragment anyway). * * This will match 3 random bytes out of 4, or 75% of them. */ if (0 != (flags & UDP_RF_CRITICAL_MASK)) return UNKNOWN; /* Critical flags we don't know about */ /* * Normally the part is non-zero, unless we're facing an Extra * Acknowledgment Request (EAR) in which case both part and count will * be set to zero. * * Hence, 0 is an invalid part number for plain fragments only, when * count is non-zero. */ if (0 == part && 0 != count) return UNKNOWN; /* Invalid fragment number */ /* * Check acknowledgments for consistency. */ if (0 == count) { size_t nominal_size = UDP_RELIABLE_HEADER_SIZE; if (flags & UDP_RF_EXTENDED_ACK) { uint8 received; if (len < UDP_RELIABLE_EXT_HEADER_SIZE) return UNKNOWN; received = udp_reliable_get_received(head); nominal_size = UDP_RELIABLE_EXT_HEADER_SIZE; if (0 == received) return UNKNOWN; /* At least one fragment received! */ if (0 == part) return UNKNOWN; /* EARs are not extended */ if ((flags & UDP_RF_CUMULATIVE_ACK) && received < part) return UNKNOWN; /* Receiver must have ``part'' fragments */ } /* * A valid acknowledgment should never claim to have a deflated payload. * First, there is no payload expected, really, but second, in order * to have deflation creating a saving over plain bytes, it would * require to carry over a significant amount of data, something that * is totally illogical in any foreseeable future. */ if (flags & UDP_RF_DEFLATED) return UNKNOWN; /* Cannot be a legitimate acknowledgment */ /* * We don't check for the UDP_RF_ACKME flag. No acknowledgment should * specify this, but implementations should ignore that flag anyway for * acknowledgments, so a broken implementation could have it set and * it would go totally unnoticed during testing. * * Actually, EARs can have the UDP_RF_ACKME flag set, but we don't care * at this point. */ /* * There could be a (small) payload added one day to acknowledgments, * but it should remain small otherwise the protocol will become * inefficient. * * Therefore it is fair to assume that if the length of the fragment * claiming to be an acknowledgement is more than twice as large as * it should be, it most definitely isn't an acknowledgment. */ if (len > 2 * nominal_size) return UNKNOWN; /* Was certainly a false positive! */ return utp; /* OK, seems valid as far as we can tell */ } /* * This has roughly a 50% chance of correctly ruling out a non-header. */ if (part > count) return UNKNOWN; /* Invalid fragment number */ /* * If we are receiving fragment #1 of a message, we can further check * the consistency of the "deflate" flag provided we have at least 2 bytes * in the payload. */ if ( 1 == part && (flags & UDP_RF_DEFLATED) && len >= UDP_RELIABLE_HEADER_SIZE + 2 ) { const void *payload = const_ptr_add_offset(head, UDP_RELIABLE_HEADER_SIZE); if (!zlib_is_valid_header(payload, len - UDP_RELIABLE_HEADER_SIZE)) return UNKNOWN; /* Supposedly deflated payload is not valid */ } return utp; }
/** * Look whether semi-reliable UDP header corresponds to valid traffic. * * This routine is only used for ambiguous traffic that looks like both * Gnutella and semi-reliable UDP: we want to make sure we're not mistaking * a legitimate semi-reliable fragment / ACK for a Gnutella message. * * @param utp already classified semi-reliable protocol * @param s socket which received the message * @param data received data * @param len length of data * * @return TRUE if message corresponds to valid semi-reliable UDP traffic. */ static bool udp_is_valid_semi_reliable(enum udp_traffic utp, const gnutella_socket_t *s, const void *data, size_t len) { struct ut_header uth; void *message = NULL; size_t msglen; bool valid = TRUE; /* * Since we're talking about an ambiguous message, it is highly unlikely * we'll ever be called with an acknowledgement: they should have been * ruled out earlier as improbable since ACKs are short message, much * shorter than a Gnuella header typically. * * So we'll only handle fragments for now, assuming ACKs are legitimate. */ gnet_stats_inc_general(GNR_UDP_AMBIGUOUS_DEEPER_INSPECTION); uth.count = udp_reliable_header_get_count(data); if (0 == uth.count) return TRUE; /* Acknoweldgments */ uth.part = udp_reliable_header_get_part(data) - 1; /* Zero-based */ uth.flags = udp_reliable_header_get_flags(data); uth.seqno = udp_reliable_header_get_seqno(data); /* * We're going to ask the RX layer about the message: is it a known * sequence ID for this host? * * This works only for messages with more than one fragment, of course, * but chances are that, for these, we would have possibly already * received another fragment, not mistaken as a Gnutella message... * * This is OK for acknowledged fragments: we're not going to acknowledge * the unprocessed fragment, but we'll receive other fragments of the * message, and later on we'll get a retransmission of the unprocessed * fragment, which this time will be validated since we have already * partially received the message. */ if (uth.count > 1) { rxdrv_t *rx; gnet_host_t from; gnet_host_set(&from, s->addr, s->port); rx = udp_get_rx_semi_reliable(utp, s->addr, len); return NULL == rx ? FALSE : ut_valid_message(rx, &uth, &from); } /* * We're facing a single-fragment message. * * We can trivially probe it and validate it to see whether it can still * be interpreted as a valid Gnutella message on its own... If the answer * is yes, then we can assert we're facing a valid semi-reliable UDP * message. * * For deflated payloads, we already validated that the start of the * payload is a well-formed zlib header, but we'll attempt deflation anyway * so we will know for sure whether it's a valid message! * * Of course we're doing here work that will have to be redone later when * processing the message, but this is for proper classification and not * happening very often: only on a very very small fraction of messages for * which there is a high level of ambiguity. */ g_assert(0 == uth.part); /* First (and only) fragment */ if (uth.flags & UDP_RF_DEFLATED) { int outlen = settings_max_msg_size(); int ret; message = xmalloc(outlen); ret = zlib_inflate_into( const_ptr_add_offset(data, UDP_RELIABLE_HEADER_SIZE), len - UDP_RELIABLE_HEADER_SIZE, message, &outlen); if (ret != Z_OK) { valid = FALSE; /* Does not inflate properly */ goto done; } msglen = outlen; } else { message = ptr_add_offset( deconstify_pointer(data), UDP_RELIABLE_HEADER_SIZE); msglen = len - UDP_RELIABLE_HEADER_SIZE; } switch (utp) { case SEMI_RELIABLE_GTA: /* * Assume message is valid if the Gnutella size header is consistent * with the length of the whole message. */ { uint16 size; switch (gmsg_size_valid(message, &size)) { case GMSG_VALID: case GMSG_VALID_MARKED: break; case GMSG_VALID_NO_PROCESS: /* Header flags undefined for now */ case GMSG_INVALID: valid = FALSE; goto done; } valid = (size_t) size + GTA_HEADER_SIZE == msglen; } break; case SEMI_RELIABLE_GND: valid = TRUE; /* For now */ break; case GNUTELLA: case DHT: case RUDP: case UNKNOWN: g_assert_not_reached(); } done: if (uth.flags & UDP_RF_DEFLATED) xfree(message); return valid; }
/** * Create a security token from host address and port using specified key. * * Optionally, extra contextual data may be given (i.e. the token is not * only based on the address and port) to make the token more unique to * a specific context. * * @param stg the security token generator * @param n key index to use * @param tok where security token is written * @param addr address of the host for which we're generating a token * @param port port of the host for which we're generating a token * @param data optional contextual data * @param len length of contextual data */ static void sectoken_generate_n(sectoken_gen_t *stg, size_t n, sectoken_t *tok, host_addr_t addr, uint16 port, const void *data, size_t len) { char block[8]; char enc[8]; char *p = block; sectoken_gen_check(stg); g_assert(tok != NULL); g_assert(size_is_non_negative(n)); g_assert(n < stg->keycnt); g_assert((NULL != data) == (len != 0)); switch (host_addr_net(addr)) { case NET_TYPE_IPV4: p = poke_be32(p, host_addr_ipv4(addr)); break; case NET_TYPE_IPV6: { uint val; val = binary_hash(host_addr_ipv6(&addr), 16); p = poke_be32(p, val); } break; case NET_TYPE_LOCAL: case NET_TYPE_NONE: g_error("unexpected address for security token generation: %s", host_addr_to_string(addr)); } p = poke_be16(p, port); p = poke_be16(p, 0); /* Filler */ g_assert(p == &block[8]); STATIC_ASSERT(sizeof(tok->v) == sizeof(uint32)); STATIC_ASSERT(sizeof(block) == sizeof(enc)); tea_encrypt(&stg->keys[n], enc, block, sizeof block); /* * If they gave contextual data, encrypt them by block of 8 bytes, * filling the last partial block with zeroes if needed. */ if (data != NULL) { const void *q = data; size_t remain = len; char denc[8]; STATIC_ASSERT(sizeof(denc) == sizeof(enc)); while (remain != 0) { size_t fill = MIN(remain, 8U); unsigned i; if (fill != 8U) ZERO(&block); memcpy(block, q, fill); remain -= fill; q = const_ptr_add_offset(q, fill); /* * Encrypt block of contextual data (possibly filled with trailing * zeroes) and merge back the result into the main encryption * output with XOR. */ tea_encrypt(&stg->keys[n], denc, block, sizeof block); for (i = 0; i < sizeof denc; i++) enc[i] ^= denc[i]; } } poke_be32(tok->v, tea_squeeze(enc, sizeof enc)); }
/** * Fetch data block from the .dat file, reading from the supplied block numbers. * * @param db the sdbm database * @param bvec start of block vector, containing block numbers * @param len length of the data to be read * * @return -1 on error with errno set, 0 if OK. Read data is left in the * scratch buffer. */ static int big_fetch(DBM *db, const void *bvec, size_t len) { int bcnt = bigbcnt(len); DBMBIG *dbg = db->big; int n; const void *p; char *q; size_t remain; guint32 prev_bno; if (-1 == dbg->fd && -1 == big_open(dbg)) return -1; if (dbg->scratch_len < len) big_scratch_grow(dbg, len); /* * Read consecutive blocks in one single system call. */ n = bcnt; p = bvec; q = dbg->scratch; remain = len; while (n > 0) { size_t toread = MIN(remain, BIG_BLKSIZE); guint32 bno = peek_be32(p); prev_bno = bno; if (!big_block_is_allocated(db, prev_bno)) goto corrupted_database; p = const_ptr_add_offset(p, sizeof(guint32)); n--; remain = size_saturate_sub(remain, toread); while (n > 0) { guint32 next_bno = peek_be32(p); size_t amount; if (next_bno <= prev_bno) /* Block numbers are sorted */ goto corrupted_page; if (next_bno - prev_bno != 1) break; /* Not consecutive */ prev_bno = next_bno; if (!big_block_is_allocated(db, prev_bno)) goto corrupted_database; p = const_ptr_add_offset(p, sizeof(guint32)); amount = MIN(remain, BIG_BLKSIZE); toread += amount; n--; remain = size_saturate_sub(remain, amount); } dbg->bigread++; if (-1 == compat_pread(dbg->fd, q, toread, OFF_DAT(bno))) { g_warning("sdbm: \"%s\": " "could not read %lu bytes starting at data block #%u: %s", sdbm_name(db), (unsigned long) toread, bno, g_strerror(errno)); ioerr(db, FALSE); return -1; } q += toread; dbg->bigread_blk += bigblocks(toread); g_assert(UNSIGNED(q - dbg->scratch) <= dbg->scratch_len); } g_assert(UNSIGNED(q - dbg->scratch) == len); return 0; corrupted_database: g_warning("sdbm: \"%s\": cannot read unallocated data block #%u", sdbm_name(db), prev_bno); goto fault; corrupted_page: g_warning("sdbm: \"%s\": corrupted page: %d big data block%s not sorted", sdbm_name(db), bcnt, 1 == bcnt ? "" : "s"); /* FALL THROUGH */ fault: ioerr(db, FALSE); errno = EFAULT; /* Data corrupted somehow (.pag or .dat file) */ return -1; }
/** * Store big value in the .dat file, writing to the supplied block numbers. * * @param db the sdbm database * @param bvec start of block vector, containing block numbers * @param data start of data to write * @param len length of data to write * * @return -1 on error with errno set, 0 if OK. */ static int big_store(DBM *db, const void *bvec, const void *data, size_t len) { DBMBIG *dbg = db->big; int bcnt = bigbcnt(len); int n; const void *p; const char *q; size_t remain; g_return_val_if_fail(NULL == dbg->bitcheck, -1); if (-1 == dbg->fd && -1 == big_open(dbg)) return -1; /* * Look at the amount of consecutive block numbers we have to be able * to write into them via a single system call. */ n = bcnt; p = bvec; q = data; remain = len; while (n > 0) { size_t towrite = MIN(remain, BIG_BLKSIZE); guint32 bno = peek_be32(p); guint32 prev_bno = bno; p = const_ptr_add_offset(p, sizeof(guint32)); n--; remain = size_saturate_sub(remain, towrite); while (n > 0) { guint32 next_bno = peek_be32(p); size_t amount; if (next_bno <= prev_bno) /* Block numbers are sorted */ goto corrupted_page; if (next_bno - prev_bno != 1) break; /* Not consecutive */ prev_bno = next_bno; p = const_ptr_add_offset(p, sizeof(guint32)); amount = MIN(remain, BIG_BLKSIZE); towrite += amount; n--; remain = size_saturate_sub(remain, amount); } dbg->bigwrite++; if (-1 == compat_pwrite(dbg->fd, q, towrite, OFF_DAT(bno))) { g_warning("sdbm: \"%s\": " "could not write %lu bytes starting at data block #%u: %s", sdbm_name(db), (unsigned long) towrite, bno, g_strerror(errno)); ioerr(db, TRUE); return -1; } q += towrite; dbg->bigwrite_blk += bigblocks(towrite); g_assert(ptr_diff(q, data) <= len); } g_assert(ptr_diff(q, data) == len); return 0; corrupted_page: g_warning("sdbm: \"%s\": corrupted page: %d big data block%s not sorted", sdbm_name(db), bcnt, 1 == bcnt ? "" : "s"); ioerr(db, FALSE); errno = EFAULT; /* Data corrupted somehow (.pag file) */ return -1; }
/** * Insert node in tree. * * @return the existing key if the key already existed, NULL if the node * was properly inserted. */ void * G_HOT erbtree_insert(erbtree_t *tree, rbnode_t *node) { rbnode_t *key, *parent; const void *kbase; bool is_left; erbtree_check(tree); g_assert(node != NULL); kbase = const_ptr_add_offset(node, -tree->offset); if (erbtree_is_extended(tree)) { key = do_lookup_ext(ERBTREE_E(tree), kbase, &parent, &is_left); } else { key = do_lookup(tree, kbase, &parent, &is_left); } if (key != NULL) return ptr_add_offset(key, -tree->offset); g_assert(!is_valid(node)); /* Not yet part of the tree */ node->left = NULL; node->right = NULL; set_color(node, RB_RED); set_parent(node, parent); tree->count++; if (parent != NULL) { if (is_left) { if (parent == tree->first) tree->first = node; } else { if (parent == tree->last) tree->last = node; } set_child(parent, node, is_left); } else { tree->root = node; tree->first = node; tree->last = node; } /* * Fixup the modified tree by recoloring nodes and performing * rotations (2 at most) hence the red-black tree properties are * preserved. */ while (NULL != (parent = get_parent(node)) && is_red(parent)) { rbnode_t *grandpa = get_parent(parent); if (parent == grandpa->left) { rbnode_t *uncle = grandpa->right; if (uncle != NULL && is_red(uncle)) { set_color(parent, RB_BLACK); set_color(uncle, RB_BLACK); set_color(grandpa, RB_RED); node = grandpa; } else { if (node == parent->right) { rotate_left(tree, parent); node = parent; parent = get_parent(node); } set_color(parent, RB_BLACK); set_color(grandpa, RB_RED); rotate_right(tree, grandpa); } } else { rbnode_t *uncle = grandpa->left; if (uncle != NULL && is_red(uncle)) { set_color(parent, RB_BLACK); set_color(uncle, RB_BLACK); set_color(grandpa, RB_RED); node = grandpa; } else { if (node == parent->left) { rotate_right(tree, parent); node = parent; parent = get_parent(node); } set_color(parent, RB_BLACK); set_color(grandpa, RB_RED); rotate_left(tree, grandpa); } } } set_color(tree->root, RB_BLACK); return NULL; }