예제 #1
0
/**
 * Test whether GUID is that of GTKG, and extract version major/minor, along
 * with release status provided the `majp', `minp' and `relp' are non-NULL.
 */
static bool
guid_oob_is_gtkg(const guid_t *guid, uint8 *majp, uint8 *minp, bool *relp)
{
	uint8 hec;

	if (!guid_extract_gtkg_info(guid, 4, majp, minp, relp))
		return FALSE;		/* Marking incorrect, no need to compute HEC */

	/*
	 * The HEC for OOB queries was made of the first 15 bytes for versions
	 * up to 0.98.4u (legacy encoding).  Starting with 0.98.4, we have a
	 * different way of encoding the HEC to preserve its integrity even in
	 * the advent of OOB-proxying.
	 *
	 * Also bit 0 of the HEC is not significant (used to mark requeries)
	 * therefore it is masked out for comparison purposes.
	 */

	hec = peek_u8(&guid->v[15]) & ~GUID_REQUERY;

	if (*majp >0 || *minp >= 99)
		return booleanize((guid_hec_oob(guid) & ~GUID_REQUERY) == hec);

	/*
	 * Transition period for servents earlier than 0.99: try the legacy marking
	 * for 0.97 and earlier. For 0.98, try the legacy marking first, then the
	 * new marking.
	 */

	if (*minp <= 97)
		return booleanize((guid_hec_oob_legacy(guid) & ~GUID_REQUERY) == hec);

	return booleanize((guid_hec_oob_legacy(guid) & ~GUID_REQUERY) == hec) ||
		booleanize((guid_hec_oob(guid) & ~GUID_REQUERY) == hec);
}
예제 #2
0
파일: dbmw.c 프로젝트: Haxe/gtk-gnutella
/**
 * Synchronize dirty values.
 *
 * The ``which'' argument is a bitfield indicating the set of things to
 * synchronize:
 *
 * DBMW_SYNC_CACHE requests that dirty values from the local DBMW cache
 * be flushed to the DB map layer immediately.
 *
 * DBMW_SYNC_MAP requests that the DB map layer be flushed, if it is backed
 * by disk data.
 *
 * If DBMW_DELETED_ONLY is specified along with DBMW_SYNC_CACHE, only the
 * dirty values that are marked as pending deletion are flushed.
 *
 * @return amount of value flushes plus amount of sdbm page flushes, -1 if
 * an error occurred.
 */
ssize_t
dbmw_sync(dbmw_t *dw, int which)
{
	ssize_t amount = 0;
	gboolean error = FALSE;

	if (which & DBMW_SYNC_CACHE) {
		struct flush_context ctx;

		ctx.dw = dw;
		ctx.error = FALSE;
		ctx.deleted_only = booleanize(which & DBMW_DELETED_ONLY);
		ctx.amount = 0;

		map_foreach(dw->values, flush_dirty, &ctx);
		if (!ctx.error)
			dw->count_needs_sync = FALSE;

		amount += ctx.amount;
		error = ctx.error;
	}
	if (which & DBMW_SYNC_MAP) {
		ssize_t ret = dbmap_sync(dw->dm);
		if (-1 == ret)
			error = TRUE;
		else
			amount += ret;
	}

	return error ? -1 : amount;
}
예제 #3
0
파일: magnet.c 프로젝트: MrJoe/gtk-gnutella
/**
 * This is a bit of a hack (an extension anyway) and should only be used
 * for magnets with a single logical source because DHT support is only
 * valid for a certain source.
 */
void
magnet_set_dht(struct magnet_resource *res, bool dht_support)
{
	g_return_if_fail(res);

	res->dht = booleanize(dht_support);
}
예제 #4
0
파일: tsync.c 프로젝트: MrJoe/gtk-gnutella
/**
 * Send time synchronization request to specified node.
 *
 * When node_id is non-zero, it refers to the connected node to which
 * we're sending the time synchronization request.
 */
void
tsync_send(struct gnutella_node *n, const struct nid *node_id)
{
	struct tsync *ts;

	g_return_if_fail(n->port != 0);
	if (!NODE_IS_WRITABLE(n))
		return;

	WALLOC(ts);
	ts->magic = TSYNC_MAGIC;
	tm_now_exact(&ts->sent);
	ts->sent.tv_sec = clock_loc2gmt(ts->sent.tv_sec);
	ts->node_id = nid_ref(node_id);
	ts->udp = booleanize(NODE_IS_UDP(n));

	/*
	 * As far as time synchronization goes, we must get the reply within
	 * the next TSYNC_EXPIRE_MS millisecs.
	 */

	ts->expire_ev = cq_main_insert(TSYNC_EXPIRE_MS, tsync_expire, ts);

	hevset_insert(tsync_by_time, ts);

	vmsg_send_time_sync_req(n, GNET_PROPERTY(ntp_detected), &ts->sent);
}
예제 #5
0
/**
 * Decode major/minor and release information from the specified two
 * contiguous GUID bytes.
 *
 * @param guid is the GUID considered
 * @param start is the offset of the markup (2 or 4) in the GUID
 * @param majp is filled with the major version if it's a GTKG markup
 * @param minp is filled with the minor version if it's a GTKG markup
 * @param relp is filled with the release status if it's a GTKG markup
 *
 * @return whether we recognized a GTKG markup.
 */
static bool
guid_extract_gtkg_info(const guid_t *guid, size_t start,
	uint8 *majp, uint8 *minp, bool *relp)
{
	uint8 major;
	uint8 minor;
	bool release;
	uint16 mark;
	uint16 xmark;
	uint8 product_major;

	g_assert(start < GUID_RAW_SIZE - 1);
	major = peek_u8(&guid->v[start]) & 0x0f;
	minor = peek_u8(&guid->v[start + 1]) & 0x7f;
	release = booleanize(0 == (peek_u8(&guid->v[start + 1]) & 0x80));

	mark = guid_gtkg_encode_version(major, minor, release);
	xmark = peek_be16(&guid->v[start]);

	if (mark != xmark)
		return FALSE;

	/*
	 * Even if by extraordinary the above check matches, make sure we're
	 * not distant from more than one major version.  Since GTKG versions
	 * expire every year, and I don't foresee more than one major version
	 * release per year, this strengthens the positive check.
	 */

	product_major = product_get_major();

	if (major != product_major) {
		int8 delta = product_major - major;
		if (delta < -1 || delta > 1)
			return FALSE;
	}

	/*
	 * We've validated the GUID: the HEC is correct and the version is
	 * consistently encoded, judging by the highest 4 bits of guid.v[4].
	 */

	if (majp) *majp = major;
	if (minp) *minp = minor;
	if (relp) *relp = release;

	return TRUE;
}
예제 #6
0
/**
 * Create a new text node, inserted under parent node as the last child..
 *
 * When created as verbatim, any '&' character is left as-is, otherwise they
 * are escaped.  All '<' and '>' are escaped regardless.
 *
 * @param parent		the parent node (NULL creates a standalone node
 * @param text			the text
 * @param verbatim		whether text is to be emitted verbatim or escaped
 */
xnode_t *
xnode_new_text(xnode_t *parent, const char *text, bool verbatim)
{
	xnode_t *xn;

	g_assert(text != NULL);

	xn = xnode_new(XNODE_T_TEXT);
	xn->u.t.text = h_strdup(text);
	xn->u.t.asis = booleanize(verbatim);

	if (parent != NULL)
		xnode_add_child(parent, xn);

	return xn;
}
예제 #7
0
파일: udp.c 프로젝트: MrJoe/gtk-gnutella
static bool
udp_ping_register(const struct guid *muid,
	host_addr_t addr, uint16 port,
	udp_ping_cb_t cb, void *data, bool multiple)
{
	struct udp_ping *ping;
	uint length;

	g_assert(muid);
	g_return_val_if_fail(udp_pings, FALSE);

	if (hash_list_contains(udp_pings, muid)) {
		/* Probably a duplicate */
		return FALSE;
	}

	/* random early drop */
	length = hash_list_length(udp_pings);
	if (length >= UDP_PING_MAX) {
		return FALSE;
	} else if (length > (UDP_PING_MAX / 4) * 3) {
		if (random_value(UDP_PING_MAX - 1) < length)
			return FALSE;
	}

	WALLOC(ping);
	ping->muid = *muid;
	ping->added = tm_time();
	{
		gnet_host_t host;
		gnet_host_set(&host, addr, port);
		ping->host = atom_host_get(&host);
	}

	if (cb != NULL) {
		WALLOC0(ping->callback);
		ping->callback->cb = cb;
		ping->callback->data = data;
		ping->callback->multiple = booleanize(multiple);
	} else {
		ping->callback = NULL;
	}
	hash_list_append(udp_pings, ping);
	return TRUE;
}
예제 #8
0
/**
 * Allocate a new hash set capable of holding 2^bits items.
 */
static hikset_t *
hikset_allocate(size_t bits, bool raw, size_t offset)
{
	hikset_t *hx;

	if (raw)
		XPMALLOC0(hx);
	else
		WALLOC0(hx);

	hx->magic = HIKSET_MAGIC;
	hx->ops = &hikset_ops;
	hx->kset.raw_memory = booleanize(raw);
	hx->offset = offset;
	hash_arena_allocate(HASH(hx), bits);

	return hx;
}
예제 #9
0
파일: slist.c 프로젝트: qgewfg/gtk-gnutella
/**
 * Get an iterator on the slist.
 */
static slist_iter_t *
slist_iter_new(const slist_t *slist, bool before, bool removable)
{
	slist_iter_t *iter;

	if (slist != NULL) {
		slist_t *wslist;

		slist_check(slist);

		WALLOC(iter);
		iter->magic = LIST_ITER_MAGIC;
		iter->slist = slist;

		iter->prev = NULL;
		iter->cur = NULL;
		iter->next = slist->head;
		if (!before) {
			iter->cur = iter->next;
			iter->next = g_slist_next(iter->cur);
		}

		iter->stamp = slist->stamp;
		iter->removable = booleanize(removable);

		/*
		 * The reference count is an internal state, we're not violating
		 * the "const" contract here (the abstract data type is not  changed).
		 */

		wslist = deconstify_pointer(slist);
		wslist->refcount++;
	} else {
		iter = NULL;
	}

	return iter;
}
예제 #10
0
파일: node.c 프로젝트: graaff/gtk-gnutella
/**
 * Handle reception of a /Q2
 */
static void
g2_node_handle_q2(gnutella_node_t *n, const g2_tree_t *t)
{
	const guid_t *muid;
	size_t paylen;
	const g2_tree_t *c;
	char *dn = NULL;
	char *md = NULL;
	uint32 iflags = 0;
	search_request_info_t sri;
	bool has_interest = FALSE;

	node_inc_rx_query(n);

	/*
	 * As a G2 leaf, we cannot handle queries coming from UDP because we
	 * are not supposed to get any!
	 */

	if (NODE_IS_UDP(n)) {
		g2_node_drop(G_STRFUNC, n, t, "coming from UDP");
		return;
	}

	/*
	 * The MUID of the query is the payload of the root node.
	 */

	muid = g2_tree_node_payload(t, &paylen);

	if (paylen != GUID_RAW_SIZE) {
		g2_node_drop(G_STRFUNC, n, t, "missing MUID");
		return;
	}

	/*
	 * Make sure we have never seen this query already.
	 *
	 * To be able to leverage on Gnutella's routing table to detect duplicates
	 * over a certain lifespan, we are going to fake a minimal Gnutella header
	 * with a message type of GTA_MSG_G2_SEARCH, which is never actually used
	 * on the network.
	 *
	 * The TTL and hops are set to 1 and 0 initially, so that the message seems
	 * to come from a neighbouring host and cannot be forwarded.
	 *
	 * When that is done, we will be able to call route_message() and have
	 * all the necessary bookkeeping done for us.
	 */

	{
		struct route_dest dest;

		gnutella_header_set_muid(&n->header, muid);
		gnutella_header_set_function(&n->header, GTA_MSG_G2_SEARCH);
		gnutella_header_set_ttl(&n->header, 1);
		gnutella_header_set_hops(&n->header, 0);

		if (!route_message(&n, &dest))
			return;			/* Already accounted as duplicated, and logged */
	}

	/*
	 * Setup request information so that we can call search_request()
	 * to process our G2 query.
	 */

	ZERO(&sri);
	sri.magic = SEARCH_REQUEST_INFO_MAGIC;

	/*
	 * Handle the children of /Q2.
	 */

	G2_TREE_CHILD_FOREACH(t, c) {
		enum g2_q2_child ct = TOKENIZE(g2_tree_name(c), g2_q2_children);
		const char *payload;

		switch (ct) {
		case G2_Q2_DN:
			payload = g2_tree_node_payload(c, &paylen);
			if (payload != NULL && NULL == dn) {
				uint off = 0;
				/* Not NUL-terminated, need to h_strndup() it */
				dn = h_strndup(payload, paylen);
				if (!query_utf8_decode(dn, &off)) {
					gnet_stats_count_dropped(n, MSG_DROP_MALFORMED_UTF_8);
					goto done;		/* Drop the query */
				}
				sri.extended_query = dn + off;
				sri.search_len = paylen - off;		/* In bytes */
			}
			break;

		case G2_Q2_I:
			if (!has_interest)
				iflags = g2_node_extract_interest(c);
			has_interest = TRUE;
			break;

		case G2_Q2_MD:
			payload = g2_tree_node_payload(c, &paylen);
			if (payload != NULL && NULL == md) {
				/* Not NUL-terminated, need to h_strndup() it */
				md = h_strndup(payload, paylen);
			}
			break;

		case G2_Q2_NAT:
			sri.flags |= QUERY_F_FIREWALLED;
			break;

		case G2_Q2_SZR:			/* Size limits */
			if (g2_node_extract_size_request(c, &sri.minsize, &sri.maxsize))
				sri.size_restrictions = TRUE;
			break;

		case G2_Q2_UDP:
			if (!sri.oob)
				g2_node_extract_udp(c, &sri, n);
			break;

		case G2_Q2_URN:
			g2_node_extract_urn(c, &sri);
			break;
		}
	}

	/*
	 * When there is no /Q2/I, return a default set of information.
	 */

	if (!has_interest)
		iflags = G2_Q2_F_DFLT;

	/*
	 * If there are meta-data, try to intuit which media types there are
	 * looking for.
	 *
	 * The payload is XML looking like "<audio/>" or "<video/>" but there
	 * can be attributes and we don't want to do a full XML parsing there.
	 * Hence we'll base our analysis on simple lexical parsing, which is
	 * why we call a routine to "intuit", not to "extract".
	 *
	 * Also, this is poorer than Gnutella's GGEP "M" because apparently there
	 * can be only one single type, since the XML payload must obey some
	 * kind of schema and there is an audio schema, a video schema, etc...
	 * XML was just a wrong design choice there.
	 */

	if (md != NULL)
		sri.media_types = g2_node_intuit_media_type(md);

	/*
	 * Validate the return address if OOB hit delivery is configured.
	 */

	if (sri.oob && !search_oob_is_allowed(n, &sri))
		goto done;

	/*
	 * Update statistics, as done in search_request_preprocess() for Gnutella.
	 */

	if (sri.exv_sha1cnt) {
		gnet_stats_inc_general(GNR_QUERY_G2_SHA1);

		if (NULL == dn) {
			int i;
			for (i = 0; i < sri.exv_sha1cnt; i++) {
				search_request_listener_emit(QUERY_SHA1,
					sha1_base32(&sri.exv_sha1[i].sha1), n->addr, n->port);
			}
		}
	}

	if (dn != NULL && !is_ascii_string(dn))
		gnet_stats_inc_general(GNR_QUERY_G2_UTF8);

	if (dn != NULL)
		search_request_listener_emit(QUERY_STRING, dn, n->addr, n->port);

	if (!search_is_valid(n, 0, &sri))
		goto done;

	/*
	 * Perform the query.
	 */

	sri.g2_query     = TRUE;
	sri.partials     = booleanize(iflags & G2_Q2_F_PFS);
	sri.g2_wants_url = booleanize(iflags & G2_Q2_F_URL);
	sri.g2_wants_alt = booleanize(iflags & G2_Q2_F_A);
	sri.g2_wants_dn  = booleanize(iflags & G2_Q2_F_DN);

	search_request(n, &sri, NULL);

done:

	HFREE_NULL(dn);
	HFREE_NULL(md);
}
예제 #11
0
static inline gboolean
is_big(unsigned short off)
{
	return booleanize(off & 0x8000);
}
예제 #12
0
파일: pair.c 프로젝트: qgewfg/gtk-gnutella
static inline ALWAYS_INLINE bool
is_big(unsigned short off)
{
	return booleanize(off & BIG_FLAG);
}
예제 #13
0
/**
 * Get a new index in the cache, and update LRU data structures.
 *
 * @param db	the database
 * @param num	page number in the DB for which we want a cache index
 *
 *
 * @return -1 on error, or the allocated cache index.
 */
static int
getidx(DBM *db, long num)
{
	struct lru_cache *cache = db->cache;
	long n;		/* Cache index */

	/*
	 * If we invalidated pages, reuse their indices.
	 * If we have not used all the pages yet, get the next one.
	 * Otherwise, use the least-recently requested page.
	 */

	if (slist_length(cache->available)) {
		void *v = slist_shift(cache->available);
		n = pointer_to_int(v);
		g_assert(n >= 0 && n < cache->pages);
		g_assert(!cache->dirty[n]);
		g_assert(-1 == cache->numpag[n]);
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else if (cache->next < cache->pages) {
		n = cache->next++;
		cache->dirty[n] = FALSE;
		hash_list_prepend(cache->used, int_to_pointer(n));
	} else {
		void *last = hash_list_tail(cache->used);
		long oldnum;
		gboolean had_ioerr = booleanize(db->flags & DBM_IOERR_W);

		hash_list_moveto_head(cache->used, last);
		n = pointer_to_int(last);

		/*
		 * This page is no longer cached as its cache index is being reused
		 * Flush it to disk if dirty before discarding it.
		 */

		g_assert(n >= 0 && n < cache->pages);

		oldnum = cache->numpag[n];

		if (cache->dirty[n] && !writebuf(db, oldnum, n)) {
			hash_list_iter_t *iter;
			void *item;
			gboolean found = FALSE;

			/*
			 * Cannot flush dirty page now, probably because we ran out of
			 * disk space.  Look through the cache whether we can reuse a
			 * non-dirty page instead, which would let us keep the dirty
			 * page a little longer in the cache, in the hope it can then
			 * be properly flushed later.
			 */

			iter = hash_list_iterator_tail(cache->used);

			while (NULL != (item = hash_list_iter_previous(iter))) {
				long i = pointer_to_int(item);

				g_assert(i >= 0 && i < cache->pages);

				if (!cache->dirty[i]) {
					found = TRUE;	/* OK, reuse cache slot #i then */
					n = i;
					oldnum = cache->numpag[i];
					break;
				}
			}

			hash_list_iter_release(&iter);

			if (found) {
				g_assert(item != NULL);
				hash_list_moveto_head(cache->used, item);

				/*
				 * Clear error condition if we had none prior to the flush
				 * attempt, since we can do without it for now.
				 */

				if (!had_ioerr)
					db->flags &= ~DBM_IOERR_W;

				g_warning("sdbm: \"%s\": "
					"reusing cache slot used by clean page #%ld instead",
					sdbm_name(db), oldnum);
			} else {
				g_warning("sdbm: \"%s\": cannot discard dirty page #%ld",
					sdbm_name(db), oldnum);
				return -1;
			}
		}

		g_hash_table_remove(cache->pagnum, ulong_to_pointer(oldnum));
		cache->dirty[n] = FALSE;
	}

	/*
	 * Record the association between the cache index and the page number.
	 */

	g_assert(n >= 0 && n < cache->pages);

	cache->numpag[n] = num;
	g_hash_table_insert(cache->pagnum,
		ulong_to_pointer(num), int_to_pointer(n));

	return n;
}
예제 #14
0
/**
 * Synchronize dirty values.
 *
 * The ``which'' argument is a bitfield indicating the set of things to
 * synchronize:
 *
 * DBMW_SYNC_CACHE requests that dirty values from the local DBMW cache
 * be flushed to the DB map layer immediately.
 *
 * DBMW_SYNC_MAP requests that the DB map layer be flushed, if it is backed
 * by disk data.
 *
 * If DBMW_DELETED_ONLY is specified along with DBMW_SYNC_CACHE, only the
 * dirty values that are marked as pending deletion are flushed.
 *
 * @return amount of value flushes plus amount of sdbm page flushes, -1 if
 * an error occurred.
 */
ssize_t
dbmw_sync(dbmw_t *dw, int which)
{
	ssize_t amount = 0;
	size_t pages = 0, values = 0;
	bool error = FALSE;

	dbmw_check(dw);

	if (which & DBMW_SYNC_CACHE) {
		struct flush_context ctx;

		ctx.dw = dw;
		ctx.error = FALSE;
		ctx.deleted_only = booleanize(which & DBMW_DELETED_ONLY);
		ctx.amount = 0;

		if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING)) {
			dbg_ds_log(dw->dbg, dw, "%s: syncing cache%s",
				G_STRFUNC, ctx.deleted_only ? " (deleted only)" : "");
		}

		map_foreach(dw->values, flush_dirty, &ctx);

		if (!ctx.error && !ctx.deleted_only)
			dw->count_needs_sync = FALSE;

		/*
		 * We can safely reset the amount of cached entries to 0, regardless
		 * of whether we only sync'ed deleted entries: that value is rather
		 * meaningless when ``count_needs_sync'' is TRUE anyway since we will
		 * come here first, and we'll then reset it to zero.
		 */

		dw->cached = 0;		/* No more dirty values */

		amount += ctx.amount;
		values = ctx.amount;
		error = ctx.error;
	}
	if (which & DBMW_SYNC_MAP) {
		ssize_t ret;

		if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING))
			dbg_ds_log(dw->dbg, dw, "%s: syncing map", G_STRFUNC);

		ret = dbmap_sync(dw->dm);
		if (-1 == ret) {
			error = TRUE;
		} else {
			amount += ret;
			pages = ret;
		}
	}

	if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING)) {
		dbg_ds_log(dw->dbg, dw, "%s: %s (flushed %zu value%s, %zu page%s)",
			G_STRFUNC, error ? "FAILED" : "OK",
			values, plural(values), pages, plural(pages));
	}

	return error ? -1 : amount;
}
예제 #15
0
/**
 * Initialize the driver.
 *
 * @return NULL if there is an initialization problem.
 */
static void *
tx_deflate_init(txdrv_t *tx, void *args)
{
	struct attr *attr;
	struct tx_deflate_args *targs = args;
	z_streamp outz;
	int ret;
	int i;

	g_assert(tx);
	g_assert(NULL != targs->cb);

	WALLOC(outz);
	outz->zalloc = zlib_alloc_func;
	outz->zfree = zlib_free_func;
	outz->opaque = NULL;

	/*
	 * Reduce memory requirements for deflation when running as an ultrapeer.
	 *
	 * Memory used for deflation is:
	 *
	 *	(1 << (window_bits +2)) +  (1 << (mem_level + 9))
	 *
	 * For leaves, we use window_bits = 15 and mem_level = 9, which makes
	 * for 128 KiB + 256 KiB = 384 KiB per connection (TX side).
	 *
	 * For ultra peers, we use window_bits = 14 and mem_level = 6, so this
	 * uses 64 KiB + 32 KiB = 96 KiB only.
	 *
	 * Since ultra peers have many more connections than leaves, the memory
	 * savings are drastic, yet compression levels remain around 50% (varies
	 * depending on the nature of the traffic, of course).
	 *
	 *		--RAM, 2009-04-09
	 *
	 * For Ultra <-> Ultra connections we use window_bits = 15 and mem_level = 9
	 * and request a best compression because the amount of ultra connections
	 * is far less than the number of leaf connections and modern machines
	 * can cope with a "best" compression overhead.
	 *
	 * This is now controlled with the "reduced" argument, so this layer does
	 * not need to know whether we're an ultra node or even what an ultra
	 * node is... It just knows whether we have to setup a fully compressed
	 * connection or a reduced one (both in terms of memory usage and level
	 * of compression).
	 *
	 *		--RAM, 2011-11-29
	 */

	{
		int window_bits = MAX_WBITS;		/* Must be 8 .. MAX_WBITS */
		int mem_level = MAX_MEM_LEVEL;		/* Must be 1 .. MAX_MEM_LEVEL */
		int level = Z_BEST_COMPRESSION;

		if (targs->reduced) {
			/* Ultra -> Leaf connection */
			window_bits = 14;
			mem_level = 6;
			level = Z_DEFAULT_COMPRESSION;
		}

		g_assert(window_bits >= 8 && window_bits <= MAX_WBITS);
		g_assert(mem_level >= 1 && mem_level <= MAX_MEM_LEVEL);
		g_assert(level == Z_DEFAULT_COMPRESSION ||
			(level >= Z_BEST_SPEED && level <= Z_BEST_COMPRESSION));

		ret = deflateInit2(outz, level, Z_DEFLATED,
				targs->gzip ? (-window_bits) : window_bits, mem_level,
				Z_DEFAULT_STRATEGY);
	}

	if (Z_OK != ret) {
		g_warning("unable to initialize compressor for peer %s: %s",
			gnet_host_to_string(&tx->host), zlib_strerror(ret));
		WFREE(outz);
		return NULL;
	}

	WALLOC0(attr);
	attr->cq = targs->cq;
	attr->cb = targs->cb;
	attr->buffer_size = targs->buffer_size;
	attr->buffer_flush = targs->buffer_flush;
	attr->nagle = booleanize(targs->nagle);
	attr->gzip.enabled = targs->gzip;

	attr->outz = outz;
	attr->tm_ev = NULL;

	for (i = 0; i < BUFFER_COUNT; i++) {
		struct buffer *b = &attr->buf[i];

		b->arena = b->wptr = b->rptr = walloc(attr->buffer_size);
		b->end = &b->arena[attr->buffer_size];
	}

	attr->fill_idx = 0;
	attr->send_idx = -1;		/* Signals: none ready */

	if (attr->gzip.enabled) {
		/* See RFC 1952 - GZIP file format specification version 4.3 */
		static const unsigned char header[] = {
			0x1f, 0x8b, /* gzip magic */
			0x08,		/* compression method: deflate */
			0,			/* flags: none */
			0, 0, 0, 0, /* modification time: unavailable */
			0,			/* extra flags: none */
			0xff,		/* filesystem: unknown */
		};
		struct buffer *b;

		b = &attr->buf[attr->fill_idx];	/* Buffer we fill */
		g_assert(sizeof header <= (size_t) (b->end - b->wptr));
		b->wptr = mempcpy(b->wptr, header, sizeof header);

		attr->gzip.crc = crc32(0, NULL, 0);
		attr->gzip.size = 0;
	}

	tx->opaque = attr;

	/*
	 * Register our service routine to the lower layer.
	 */

	tx_srv_register(tx->lower, deflate_service, tx);

	return tx;		/* OK */
}