예제 #1
0
static ssize_t
tls_write_intern(struct wrap_io *wio, const void *buf, size_t size)
{
    struct gnutella_socket *s = wio->ctx;
    ssize_t ret;

    g_assert((0 == s->tls.snarf) ^ (NULL == buf));
    g_assert((0 == s->tls.snarf) ^ (0 == size));

    size = tls_adjust_send_size(s, size);
    ret = gnutls_record_send(tls_socket_get_session(s), buf, size);
    if (ret < 0) {
        switch (ret) {
        case GNUTLS_E_INTERRUPTED:
        case GNUTLS_E_AGAIN:
            if (0 == s->tls.snarf) {
                s->tls.snarf = size;
                ret = size;
            } else {
                errno = VAL_EAGAIN;
                ret = -1;
            }
            break;
        case GNUTLS_E_PULL_ERROR:
        case GNUTLS_E_PUSH_ERROR:
            /* Logging already done by tls_transport_debug() */
            errno = (SOCK_F_CONNRESET & s->flags) ? ECONNRESET : EIO;
            ret = -1;
            goto finish;

        default:
            if (GNET_PROPERTY(tls_debug)) {
                g_carp("tls_write(): gnutls_record_send(fd=%d) failed: "
                       "host=%s snarf=%zu error=\"%s\"",
                       s->file_desc, host_addr_port_to_string(s->addr, s->port),
                       s->tls.snarf, gnutls_strerror(ret));
            }
            errno = EIO;
            ret = -1;
            goto finish;
        }
    } else {

        if (s->tls.snarf) {
            g_assert(s->tls.snarf >= (size_t) ret);
            s->tls.snarf -= ret;
            errno = VAL_EAGAIN;
            ret = -1;
            goto finish;
        }
    }

    if (s->tls.snarf) {
        tls_socket_evt_change(s, INPUT_EVENT_WX);
    }

finish:
    g_assert(ret == (ssize_t) -1 || (size_t) ret <= size);
    return ret;
}
예제 #2
0
파일: bg.c 프로젝트: Haxe/gtk-gnutella
/**
 * Free task structure.
 */
static void
bg_task_free(struct bgtask *bt)
{
	GSList *l;
	int stepsize;
	int count;

	g_assert(bt);
	g_assert(BGTASK_DEAD_MAGIC == bt->magic);
	
	g_assert(!(bt->flags & TASK_F_RUNNING));
	g_assert(bt->flags & TASK_F_EXITED);

	stepsize = bt->stepcnt * sizeof(bgstep_cb_t *);
	wfree(bt->stepvec, stepsize);

	for (count = 0, l = bt->wq; l; l = l->next) {
		count++;
		if (bt->item_free)
			(*bt->item_free)(l->data);
	}
	gm_slist_free_null(&bt->wq);

	if (count)
		g_carp("freed %d pending item%s for daemon \"%s\" task",
			count, count == 1 ? "" : "s", bt->name);

	bt->magic = 0;
	WFREE(bt);
}
예제 #3
0
/**
 * Declare user-defined mapping between a URI and a namespace.
 */
static void
xfmt_prefix_declare(struct xfmt_pass2 *xp2, const char *uri, const char *prefix)
{
    nv_pair_t *nv;

    nv = nv_table_lookup(xp2->uri2prefix, uri);
    if (nv != NULL) {
        /*
         * Silently ignore the mapping if we already have seen an identical one
         * in the XML tree during the first pass.
         */

        if (strcmp(prefix, nv_pair_value_str(nv)) != 0) {
            g_carp("XFMT ignoring supplied prefix '%s' for '%s': "
                   "already saw '%s' in the tree", prefix, uri,
                   nv_pair_value_str(nv));
        }
    } else {
        /*
         * New mapping.
         */

        nv = nv_pair_make_static_str(uri, prefix);
        nv_table_insert_pair(xp2->uri2prefix, nv);
    }
}
예제 #4
0
/**
 * Dispose of the data structure, but not of the items it holds.
 *
 * @param hl_ptr	pointer to the variable containing the address of the list
 *
 * As a side effect, the variable containing the address of the list
 * is nullified, since it is no longer allowed to refer to the structure.
 */
void
hash_list_free(hash_list_t **hl_ptr)
{
	g_assert(NULL != hl_ptr);

	if (*hl_ptr) {
		hash_list_t *hl = *hl_ptr;
		link_t *lk, *next;

		hash_list_check(hl);

		if (--hl->refcount != 0) {
			g_carp("%s: hash list is still referenced! "
				"(hl=%p, hl->refcount=%d)",
				G_STRFUNC, cast_to_pointer(hl), hl->refcount);
		}

		hikset_free_null(&hl->ht);

		for (lk = elist_first(&hl->list); lk != NULL; lk = next) {
			struct hash_list_item *item = ITEM(lk);
			next = elist_next(lk);	/* Embedded, get next before freeing */
			WFREE(item);
		}

		elist_discard(&hl->list);
		hl->magic = 0;
		WFREE(hl);
		*hl_ptr = NULL;
	}
}
예제 #5
0
/**
 * Record a tree-defined mapping between a prefix and a namespace URI.
 */
static void
xfmt_prefix_record(struct xfmt_pass1 *xp1, const char *prefix, const char *uri)
{
    nv_pair_t *nv;

    /*
     * Our policy is to use one single prefix for a given namespace URI
     * throughout the document.  Although several prefixes could be used.
     * this is confusing to read and serves no value: a human will be mislead
     * into thinking the two namespaces are different because they carry
     * distinct prefixes, and a machine will not care about the prefix value.
     */

    nv = nv_table_lookup(xp1->uri2prefix, uri);
    if (nv != NULL) {
        /*
         * Silently ignore the mapping if we already have seen an identical one
         * in the XML tree.
         */

        if (strcmp(prefix, nv_pair_value_str(nv)) != 0) {
            g_carp("XFMT ignoring prefix '%s' for '%s': "
                   "already saw '%s' earlier in the tree", prefix, uri,
                   nv_pair_value_str(nv));
        }
    } else {
        /*
         * New mapping.
         */

        nv = nv_pair_make_static_str(uri, prefix);
        nv_table_insert_pair(xp1->uri2prefix, nv);
    }
}
예제 #6
0
/**
 * Creates an incremental zlib deflater for `len' bytes starting at `data',
 * with specified compression `level'.
 *
 * @param data		data to compress; if NULL, will be incrementally given
 * @param len		length of data to compress (if data not NULL) or estimation
 * @param dest		where compressed data should go, or NULL if allocated
 * @param destlen	length of supplied output buffer, if dest != NULL
 * @param level		compression level, between 0 and 9.
 *
 * @return new deflater, or NULL if error.
 */
static zlib_deflater_t *
zlib_deflater_alloc(
    const void *data, size_t len, void *dest, size_t destlen, int level)
{
    zlib_deflater_t *zd;
    z_streamp outz;
    int ret;

    g_assert(size_is_non_negative(len));
    g_assert(size_is_non_negative(destlen));
    g_assert(level == Z_DEFAULT_COMPRESSION || (level >= 0 && level <= 9));

    WALLOC(outz);
    outz->zalloc = zlib_alloc_func;
    outz->zfree = zlib_free_func;
    outz->opaque = NULL;

    ret = deflateInit(outz, level);

    if (ret != Z_OK) {
        WFREE(outz);
        g_carp("%s(): unable to initialize compressor: %s",
               G_STRFUNC, zlib_strerror(ret));
        return NULL;
    }

    WALLOC0(zd);
    zd->zs.magic = ZLIB_DEFLATER_MAGIC;
    zd->zs.z = outz;
    zd->zs.closed = FALSE;

    zlib_stream_init(&zd->zs, data, len, dest, destlen);

    return zd;
}
예제 #7
0
/**
 * Mark .dat blocks used to hold the value described in the .pag space as
 * being allocated in the bitmap checking array.
 *
 * @param db		the sdbm database
 * @param bval		start of big value in the page
 * @param blen		length of big value in the page
 */
void
bigval_mark_used(DBM *db, const char *bval, size_t blen)
{
	size_t len = big_length(bval);

	if (bigval_length(len) != blen) {
		g_carp("sdbm: \"%s\": %s: inconsistent value length %lu in .pag",
			sdbm_name(db), G_STRFUNC, (unsigned long) len);
		return;
	}

	big_file_mark_used(db, bigval_blocks(bval), bigblocks(len));
}
예제 #8
0
/**
 * Allocate a prefix as a shorthand for the URI.
 *
 * @return prefix string to use, which will be freed by symbol tables
 * when leaving scope.
 */
static const char *
xfmt_new_prefix(struct xfmt_pass2 *xp2, const char *uri)
{
    const char *prefix = NULL;
    bool free_prefix = FALSE;

    /* The URI must not already exist in the symbol table */
    g_assert(NULL == symtab_lookup(xp2->uris, uri));

    /*
     * Check whether user has a preference for the prefix to use.
     *
     * If there is a prefix, there must be no identical prefix in scope
     * currently.
     */

    if (xp2->uri2prefix != NULL)
        prefix = nv_table_lookup_str(xp2->uri2prefix, uri);

    if (prefix != NULL) {
        const char *used_uri = symtab_lookup(xp2->prefixes, prefix);

        if (used_uri != NULL) {
            g_carp("XFMT cannot use prefix '%s' for '%s': "
                   "already used by '%s'", prefix, uri, used_uri);
            prefix = NULL;
        }
    }

    /*
     * Allocate a new prefix if required.
     */

    if (NULL == prefix) {
        prefix = h_strdup_printf("ns%u", xp2->pcount++);
        free_prefix = TRUE;
    }

    /*
     * Record associations in the symbol tables.
     */

    xfmt_ns_declare(xp2, prefix, uri, free_prefix);

    return prefix;
}
예제 #9
0
파일: slist.c 프로젝트: qgewfg/gtk-gnutella
/**
 * Dispose of the data structure.
 */
void
slist_free(slist_t **slist_ptr)
{
	g_assert(slist_ptr);
	if (*slist_ptr) {
		slist_t *slist;
	
		slist = *slist_ptr;
		slist_check(slist);

		if (--slist->refcount != 0) {
			g_carp("slist_free: slist is still referenced! "
					"(slist=%p, slist->refcount=%d)",
					cast_to_constpointer(slist), slist->refcount);
		}

		gm_slist_free_null(&slist->head);
		slist->tail = NULL;
		slist->magic = 0;
		WFREE(slist);
		*slist_ptr = NULL;
	}
}
예제 #10
0
void
tls_bye(struct gnutella_socket *s)
{
    int ret;

    socket_check(s);
    g_return_if_fail(s->tls.ctx);
    g_return_if_fail(s->tls.ctx->session);

    if ((SOCK_F_EOF | SOCK_F_SHUTDOWN) & s->flags)
        return;

    if (tls_flush(&s->wio) && GNET_PROPERTY(tls_debug)) {
        g_warning("%s(): tls_flush(fd=%d) failed", G_STRFUNC, s->file_desc);
    }

    ret = gnutls_bye(s->tls.ctx->session,
                     SOCK_CONN_INCOMING != s->direction
                     ? GNUTLS_SHUT_WR : GNUTLS_SHUT_RDWR);

    if (ret < 0) {
        switch (ret) {
        case GNUTLS_E_INTERRUPTED:
        case GNUTLS_E_AGAIN:
            break;
        case GNUTLS_E_PULL_ERROR:
        case GNUTLS_E_PUSH_ERROR:
            /* Logging already done by tls_transport_debug() */
            break;
        default:
            if (GNET_PROPERTY(tls_debug)) {
                g_carp("gnutls_bye() failed: host=%s error=%m",
                       host_addr_port_to_string(s->addr, s->port));
            }
        }
    }
}
예제 #11
0
파일: udp.c 프로젝트: MrJoe/gtk-gnutella
/**
 * Records the RX layer to use for semi-reliable UDP traffic.
 */
void
udp_set_rx_semi_reliable(enum udp_sr_tag tag, rxdrv_t *rx, enum net_type net)
{
	unsigned i = 0;

	switch (net) {
	case NET_TYPE_IPV4:		i = 0; break;
	case NET_TYPE_IPV6:		i = 1; break;
	case NET_TYPE_LOCAL:
	case NET_TYPE_NONE:
		g_carp("mis-configured network type %s for socket",
			net_type_to_string(net));
		return;		/* Ignore, indicates mis-configuration of bind address */
	}

	switch (tag) {
	case UDP_SR_GTA:
		rx_sr_gta[i] = rx;
		break;
	case UDP_SR_GND:
		rx_sr_gnd[i] = rx;
		break;
	}
}
예제 #12
0
파일: dbmw.c 프로젝트: Haxe/gtk-gnutella
/**
 * Read value from database file, returning a pointer to the allocated
 * deserialized data.  These data can be modified freely and stored back,
 * but their lifetime will not exceed that of the next call to a dbmw
 * operation on the same descriptor.
 *
 * User code does not need to bother with freeing the allocated data, this
 * is managed directly by the DBM wrapper.
 *
 * @param dw		the DBM wrapper
 * @param key		the key (constant-width, determined at open time)
 * @param lenptr	if non-NULL, writes length of (deserialized) value
 *
 * @return pointer to value, or NULL if it was either not found or the
 * deserialization failed.
 */
G_GNUC_HOT gpointer
dbmw_read(dbmw_t *dw, gconstpointer key, size_t *lenptr)
{
	struct cached *entry;
	dbmap_datum_t dval;

	dbmw_check(dw);
	g_assert(key);

	dw->r_access++;

	entry = map_lookup(dw->values, key);
	if (entry) {
		dw->r_hits++;
		if (lenptr)
			*lenptr = entry->len;
		return entry->data;
	}

	/*
	 * Not cached, must read from DB.
	 */

	dw->ioerr = FALSE;
	dval = dbmap_lookup(dw->dm, key);

	if (dbmap_has_ioerr(dw->dm)) {
		dw->ioerr = TRUE;
		dw->error = errno;
		g_warning("DBMW \"%s\" I/O error whilst reading entry: %s",
			dw->name, dbmap_strerror(dw->dm));
		return NULL;
	} else if (NULL == dval.data)
		return NULL;	/* Not found in DB */

	/*
	 * Value was found, allocate a cache entry object for it.
	 */

	WALLOC0(entry);

	/*
	 * Deserialize data if needed.
	 */

	if (dw->unpack) {
		/*
		 * Allocate cache entry arena to hold the deserialized version.
		 */

		entry->data = walloc(dw->value_size);
		entry->len = dw->value_size;

		bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR);

		if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) {
			g_carp("DBMW \"%s\" deserialization error in %s(): %s",
				dw->name,
				stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE),
				bstr_error(dw->bs));
			/* Not calling value free routine on deserialization failures */
			wfree(entry->data, dw->value_size);
			WFREE(entry);
			return NULL;
		}

		if (lenptr)
			*lenptr = dw->value_size;
	} else {
		g_assert(dw->value_size >= dval.len);

		if (dval.len) {
			entry->len = dval.len;
			entry->data = wcopy(dval.data, dval.len);
		} else {
			entry->data = NULL;
			entry->len = 0;
		}

		if (lenptr)
			*lenptr = dval.len;
	}

	g_assert((entry->len != 0) == (entry->data != NULL));

	/*
	 * Insert into cache.
	 */

	(void) allocate_entry(dw, key, entry);

	return entry->data;
}
예제 #13
0
파일: mq_udp.c 프로젝트: MrJoe/gtk-gnutella
/**
 * Enqueue message, which becomes owned by the queue.
 *
 * The data held in `to' is copied, so the structure can be reclaimed
 * immediately by the caller.
 */
void
mq_udp_putq(mqueue_t *q, pmsg_t *mb, const gnet_host_t *to)
{
	size_t size;
	char *mbs;
	uint8 function;
	pmsg_t *mbe = NULL;		/* Extended message with destination info */
	bool error = FALSE;

	mq_check_consistency(q);

	dump_tx_udp_packet(to, mb);

again:
	mq_check_consistency(q);
	g_assert(mb);
	g_assert(!pmsg_was_sent(mb));
	g_assert(pmsg_is_unread(mb));
	g_assert(q->ops == &mq_udp_ops);	/* Is an UDP queue */

	/*
	 * Trap messages enqueued whilst in the middle of an mq_clear() operation
	 * by marking them as sent and dropping them.  Idem if queue was
	 * put in "discard" mode.
	 */

	if (q->flags & (MQ_CLEAR | MQ_DISCARD)) {
		pmsg_mark_sent(mb);	/* Let them think it was sent */
		pmsg_free(mb);		/* Drop message */
		return;
	}

	mq_check(q, 0);

	size = pmsg_size(mb);

	if (size == 0) {
		g_carp("%s: called with empty message", G_STRFUNC);
		goto cleanup;
	}

	/*
	 * Protect against recursion: we must not invoke puthere() whilst in
	 * the middle of another putq() or we would corrupt the qlink array:
	 * Messages received during recursion are inserted into the qwait list
	 * and will be stuffed back into the queue when the initial putq() ends.
	 *		--RAM, 2006-12-29
	 */

	if (q->putq_entered > 0) {
		pmsg_t *extended;

		if (debugging(20))
			g_warning("%s: %s recursion detected (%u already pending)",
				G_STRFUNC, mq_info(q), slist_length(q->qwait));

		/*
		 * We insert extended messages into the waiting queue since we need
		 * the destination information as well.
		 */

		extended = mq_udp_attach_metadata(mb, to);
		slist_append(q->qwait, extended);
		return;
	}
	q->putq_entered++;

	mbs = pmsg_start(mb);
	function = gmsg_function(mbs);

	gnet_stats_count_queued(q->node, function, mbs, size);

	/*
	 * If queue is empty, attempt a write immediatly.
	 */

	if (q->qhead == NULL) {
		ssize_t written;

		if (pmsg_check(mb, q)) {
			written = tx_sendto(q->tx_drv, mb, to);
		} else {
			gnet_stats_count_flowc(mbs, FALSE);
			node_inc_txdrop(q->node);		/* Dropped during TX */
			written = (ssize_t) -1;
		}

		if ((ssize_t) -1 == written)
			goto cleanup;

		node_add_tx_given(q->node, written);

		if ((size_t) written == size) {
			if (GNET_PROPERTY(mq_udp_debug) > 5)
				g_debug("MQ UDP sent %s",
					gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

			goto cleanup;
		}

		/*
		 * Since UDP respects write boundaries, the following can never
		 * happen in practice: either we write the whole datagram, or none
		 * of it.
		 */

		if (written > 0) {
			g_warning(
				"partial UDP write (%zu bytes) to %s for %zu-byte datagram",
				written, gnet_host_to_string(to), size);
			goto cleanup;
		}

		/* FALL THROUGH */
	}

	if (GNET_PROPERTY(mq_udp_debug) > 5)
		g_debug("MQ UDP queued %s",
			gmsg_infostr_full(pmsg_start(mb), pmsg_written_size(mb)));

	/*
	 * Attach the destination information as metadata to the message, unless
	 * it is already known (possible only during unfolding of the queued data
	 * during re-entrant calls).
	 *
	 * This is later extracted via pmsg_get_metadata() on the extended
	 * message by the message queue to get the destination information.
	 *
	 * Then enqueue the extended message.
	 */

	if (NULL == mbe)
		mbe = mq_udp_attach_metadata(mb, to);

	q->cops->puthere(q, mbe, size);
	mb = NULL;

	/* FALL THROUGH */

cleanup:

	if (mb) {
		pmsg_free(mb);
		mb = NULL;
	}

	/*
	 * When reaching that point with a zero putq_entered counter, it means
	 * we triggered an early error condition.  Bail out.
	 */

	g_assert(q->putq_entered >= 0);

	if (q->putq_entered == 0)
		error = TRUE;
	else
		q->putq_entered--;

	mq_check(q, 0);

	/*
	 * If we're exiting here with no other putq() registered, then we must
	 * pop an item off the head of the list and iterate again.
	 */

	if (0 == q->putq_entered && !error) {
		mbe = slist_shift(q->qwait);
		if (mbe) {
			struct mq_udp_info *mi = pmsg_get_metadata(mbe);

			mb = mbe;		/* An extended message "is-a" message */
			to = &mi->to;

			if (debugging(20))
				g_warning(
					"%s: %s flushing waiting to %s (%u still pending)",
					G_STRFUNC, mq_info(q), gnet_host_to_string(to),
					slist_length(q->qwait));

			goto again;
		}
	}

	return;
}
예제 #14
0
파일: dbmw.c 프로젝트: Haxe/gtk-gnutella
/**
 * Common code for dbmw_foreach_trampoline() and
 * dbmw_foreach_remove_trampoline().
 */
static gboolean
dbmw_foreach_common(gboolean removing,
	gpointer key, dbmap_datum_t *d, gpointer arg)
{
	struct foreach_ctx *ctx = arg;
	dbmw_t *dw = ctx->dw;
	struct cached *entry;

	dbmw_check(dw);

	entry = map_lookup(dw->values, key);
	if (entry != NULL) {
		/*
		 * Key / value pair is present in the cache.
		 *
		 * This affects us in two ways:
		 *
		 *   - We may already know that the key was deleted, in which case
		 *     that entry is just skipped: no further access is possible
		 *     through DBMW until that key is recreated.  We still return
		 *     TRUE to make sure the lower layers will delete the entry
		 *     physically, since deletion has not been flushed yet (that's
		 *     the reason we're still iterating on it).
		 *
		 *   - Should the cached key need to be deleted (as determined by
		 *     the user callback, we make sure we delete the entry in the
		 *     cache upon callback return).
		 */

		entry->traversed = TRUE;	/* Signal we iterated on cached value */

		if (entry->absent)
			return TRUE;		/* Key was already deleted, info cached */
		if (removing) {
			gboolean status;
			status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg);
			if (status) {
				entry->removable = TRUE;	/* Discard it after traversal */
			}
			return status;
		} else {
			(*ctx->u.cb)(key, entry->data, entry->len, ctx->arg);
			return FALSE;
		}
	} else {
		gboolean status = FALSE;
		gpointer data = d->data;
		size_t len = d->len;

		/*
		 * Deserialize data if needed, but do not cache this value.
		 * Iterating over the map must not disrupt the cache.
		 */

		if (dw->unpack) {
			len = dw->value_size;
			data = walloc(len);

			bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR);

			if (!dbmw_deserialize(dw, dw->bs, data, len)) {
				g_carp("DBMW \"%s\" deserialization error in %s(): %s",
					dw->name,
					stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE),
					bstr_error(dw->bs));
				/* Not calling value free routine on deserialization failures */
				wfree(data, len);
				return FALSE;
			}
		}

		if (removing) {
			status = (*ctx->u.cbr)(key, data, len, ctx->arg);
		} else {
			(*ctx->u.cb)(key, data, len, ctx->arg);
		}

		if (dw->unpack) {
			if (dw->valfree)
				(*dw->valfree)(data, len);
			wfree(data, len);
		}

		return status;
	}
}
예제 #15
0
/**
 * Extended XML formatting of a tree.
 *
 * Namespaces, if any, are automatically assigned a prefix, whose format
 * is "ns%u", the counter being incremented from 0.
 *
 * Users can supply a vector mapping namespaces to prefixes, so that they
 * can force specific prefixes for a given well-known namespace.
 *
 * If there is a default namespace, all the tags belonging to that namespace
 * are emitted without any prefix.
 *
 * The output stream must be explicitly closed by the user upon return.
 *
 * Options can be supplied to tune the output:
 *
 * - XFMT_O_SKIP_BLANKS will skip pure white space nodes.
 * - XFMT_O_COLLAPSE_BLANKS will replace consecutive blanks with 1 space
 * - XFMT_O_NO_INDENT requests that no indentation of the tree be made.
 * - XFMT_O_PROLOGUE emits a leading <?xml?> prologue.
 * - XFMT_O_FORCE_10 force generation of XML 1.0
 * - XFMT_O_SINGLE_LINE emits XML as one big line (implies XFMT_O_NO_INDENT).
 *
 * @param root			the root of the tree to dump
 * @param os			the output stream where tree is dumped
 * @param options		formatting options, as documented above
 * @param pvec			a vector of prefixes to be used for namespaces
 * @param pvcnt			amount of entries in vector
 * @param default_ns	default namespace to install at root element
 *
 * @return TRUE on success.
 */
bool
xfmt_tree_extended(const xnode_t *root, ostream_t *os, uint32 options,
                   const struct xfmt_prefix *pvec, size_t pvcnt, const char *default_ns)
{
    struct xfmt_pass1 xp1;
    struct xfmt_pass2 xp2;
    struct xfmt_invert_ctx ictx;
    const char *dflt_ns;

    g_assert(root != NULL);
    g_assert(os != NULL);

    if (options & XFMT_O_COLLAPSE_BLANKS) {
        /* FIXME */
        g_carp("XFMT_O_COLLAPSE_BLANKS not supported yet");
        stacktrace_where_print(stderr);
    }

    if (options & XFMT_O_SINGLE_LINE)
        options |= XFMT_O_NO_INDENT;

    /*
     * First pass: look at namespaces and construct a table recording the
     * earliest tree depth at which a namespace is used.
     */

    ZERO(&xp1);
    xp1.uri2node = htable_create(HASH_KEY_STRING, 0);
    xp1.uri2prefix = nv_table_make(FALSE);

    if (default_ns != NULL)
        xp1.attr_uris = hset_create(HASH_KEY_STRING, 0);

    htable_insert_const(xp1.uri2node, VXS_XML_URI, root);

    xnode_tree_enter_leave(deconstify_pointer(root),
                           xfmt_handle_pass1_enter, xfmt_handle_pass1_leave, &xp1);

    g_assert(0 == xp1.depth);		/* Sound traversal */

    /*
     * If there was a default namespace, make sure it is used in the tree.
     * Otherwise, discard it.
     */

    if (default_ns != NULL) {
        if (NULL == htable_lookup(xp1.uri2node, default_ns)) {
            g_carp("XFMT default namespace '%s' is not needed", default_ns);
            dflt_ns = NULL;
        } else {
            dflt_ns = default_ns;
        }
    } else {
        dflt_ns = NULL;
    }

    /*
     * Prepare context for second pass.
     */

    ZERO(&xp2);
    xp2.node2uri = htable_create(HASH_KEY_SELF, 0);
    xp2.os = os;
    xp2.options = options;
    xp2.default_ns = dflt_ns;
    xp2.attr_uris = xp1.attr_uris;
    xp2.uri2prefix = xp1.uri2prefix;
    xp2.uris = symtab_make();
    xp2.prefixes = symtab_make();
    xp2.depth = 0;
    xp2.pcount = 0;
    xp2.last_was_nl = TRUE;

    /*
     * Iterate over the hash table we've built to create a table indexed
     * by tree node and listing the namespaces to declare for that node.
     */

    ictx.uri2node = xp1.uri2node;
    ictx.node2uri = xp2.node2uri;

    htable_foreach(xp1.uri2node, xfmt_invert_uri_kv, &ictx);
    htable_free_null(&xp1.uri2node);

    /*
     * Emit prologue if requested.
     */

    if (options & XFMT_O_PROLOGUE) {
        if (options & XFMT_O_FORCE_10) {
            ostream_write(os, XFMT_DECL_10, CONST_STRLEN(XFMT_DECL_10));
        } else {
            ostream_write(os, XFMT_DECL, CONST_STRLEN(XFMT_DECL));
        }
        if (!(options & XFMT_O_SINGLE_LINE)) {
            ostream_putc(os, '\n');
        }
    }

    xfmt_prefix_declare(&xp2, VXS_XML_URI, VXS_XML);

    /*
     * Prepare user-defined URI -> prefix mappings.
     */

    if (pvcnt != 0) {
        size_t i;

        for (i = 0; i < pvcnt; i++) {
            const struct xfmt_prefix *p = &pvec[i];

            xfmt_prefix_declare(&xp2, p->uri, p->prefix);
        }
    }

    /*
     * Second pass: generation.
     */

    xnode_tree_enter_leave(deconstify_pointer(root),
                           xfmt_handle_pass2_enter, xfmt_handle_pass2_leave, &xp2);

    g_assert(0 == xp2.depth);		/* Sound traversal */

    /*
     * Done, cleanup.
     */

    nv_table_free_null(&xp2.uri2prefix);
    symtab_free_null(&xp2.prefixes);
    symtab_free_null(&xp2.uris);
    htable_free_null(&xp2.node2uri);
    hset_free_null(&xp2.attr_uris);

    return !ostream_has_ioerr(os);
}
예제 #16
0
/**
 * @return	TLS_HANDSHAKE_ERROR if the TLS handshake failed.
 *			TLS_HANDSHAKE_RETRY if the handshake is incomplete; thus
 *				tls_handshake() should called again on the next I/O event.
 *			TLS_HANDSHAKE_FINISHED if the TLS handshake succeeded. Note
 *				that this is also returned if TLS is disabled. Therefore
 *				this does not imply an encrypted connection.
 */
enum tls_handshake_result
tls_handshake(struct gnutella_socket *s)
{
    gnutls_session session;
    bool do_warn;
    int ret;

    socket_check(s);

    /*
     * For connect-back probes, the handshake will probably fail. We use
     * TLS anyway to avoid getting blocked which the remote peer would
     * not notice. Thus suppress warnings for failed handshakes in this
     * case.
     */
    do_warn = SOCK_TYPE_CONNBACK != s->type;

    session = tls_socket_get_session(s);
    g_return_val_if_fail(session, TLS_HANDSHAKE_ERROR);
    g_return_val_if_fail(SOCK_TLS_INITIALIZED == s->tls.stage,
    TLS_HANDSHAKE_ERROR);

    ret = gnutls_handshake(session);
    switch (ret) {
    case 0:
        if (GNET_PROPERTY(tls_debug) > 3) {
            g_debug("TLS handshake succeeded");
        }
        tls_socket_evt_change(s, SOCK_CONN_INCOMING == s->direction
                              ? INPUT_EVENT_R : INPUT_EVENT_W);
        if (GNET_PROPERTY(tls_debug > 3)) {
            tls_print_session_info(s->addr, s->port, session,
                                   SOCK_CONN_INCOMING == s->direction);
        }
        tls_signal_pending(s);
        return TLS_HANDSHAKE_FINISHED;
    case GNUTLS_E_AGAIN:
    case GNUTLS_E_INTERRUPTED:
        tls_socket_evt_change(s, gnutls_record_get_direction(session)
                              ? INPUT_EVENT_WX : INPUT_EVENT_RX);
        if (GNET_PROPERTY(tls_debug) > 3) {
            g_debug("TLS handshake proceeding...");
        }
        tls_signal_pending(s);
        return TLS_HANDSHAKE_RETRY;
    case GNUTLS_E_PULL_ERROR:
    case GNUTLS_E_PUSH_ERROR:
        /* Logging already done by tls_transport_debug() */
        break;
    case GNUTLS_E_UNEXPECTED_PACKET_LENGTH:
        if ((SOCK_F_EOF | SOCK_F_CONNRESET) & s->flags) {
            /* Remote peer has hung up */
            break;
        }
    /* FALLTHROUGH */
    default:
        if (do_warn && GNET_PROPERTY(tls_debug)) {
            g_carp("gnutls_handshake() failed: host=%s (%s) error=\"%s\"",
                   host_addr_port_to_string(s->addr, s->port),
                   SOCK_CONN_INCOMING == s->direction ? "incoming" : "outgoing",
                   gnutls_strerror(ret));
        }
    }
    return TLS_HANDSHAKE_ERROR;
}
예제 #17
0
파일: bg.c 프로젝트: Haxe/gtk-gnutella
/**
 * Terminate the task, invoking the completion callback if defined.
 */
static void
bg_task_terminate(struct bgtask *bt)
{
	bgstatus_t status;

	bg_task_check(bt);
	g_assert(!(bt->flags & TASK_F_EXITED));

	/*
	 * If the task is running, we can't proceed now,
	 * Go back to the scheduler, which will call us back.
	 */

	if (bt->flags & TASK_F_RUNNING)
		longjmp(bt->env, 1);

	/*
	 * When we come here, the task is no longer running.
	 */

	if (bg_debug > 1) {
		g_debug("BGTASK terminating \"%s\"%s, ran %d msecs",
			bt->name, (bt->flags & TASK_F_DAEMON) ? " daemon" : "", bt->wtime);
	}

	g_assert(!(bt->flags & TASK_F_RUNNING));

	if (bt->flags & TASK_F_SLEEPING)
		bg_sched_wakeup(bt);

	bt->flags |= TASK_F_EXITED;		/* Task has now exited */
	bg_sched_remove(bt);			/* Ensure it's no longer scheduled */
	bg_runcount--;					/* One task less to run */

	g_assert(bg_runcount >= 0);

	/*
	 * Compute proper status.
	 */

	status = BGS_OK;		/* Assume everything was fine */

	if (bt->flags & TASK_F_SIGNAL)
		status = BGS_KILLED;
	else if (bt->exitcode != 0)
		status = BGS_ERROR;

	/*
	 * If there is a status to read, mark task as being a zombie: it will
	 * remain around until the user probes the task to know its final
	 * execution status.
	 */

	if (status != BGS_OK && bt->done_cb == NULL)
		bt->flags |= TASK_F_ZOMBIE;

	/*
	 * Let the user know this task has now ended.
	 * Upon return from this callback, further user-reference of the
	 * task structure are FORBIDDEN.
	 */

	if (bt->done_cb) {
		(*bt->done_cb)(bt, bt->ucontext, status, bt->done_arg);

		if (bt->flags & TASK_F_ZOMBIE)
			g_carp("user code lost exit status of task \"%s\"", bt->name);

		bt->flags &= ~TASK_F_ZOMBIE;		/* Is now totally DEAD */
	}

	/*
	 * Free user's context.
	 */

	(*bt->uctx_free)(bt->ucontext);
	bt->magic = BGTASK_DEAD_MAGIC;	/* Prevent further uses! */

	/*
	 * Do not free the task structure immediately, in case the calling
	 * stack is not totally clean and we're about to probe the task
	 * structure again.
	 *
	 * It will be freed at the next scheduler run.
	 */

	dead_tasks = g_slist_prepend(dead_tasks, bt);
}
예제 #18
0
static ssize_t
tls_read(struct wrap_io *wio, void *buf, size_t size)
{
    struct gnutella_socket *s = wio->ctx;
    ssize_t ret;

    socket_check(s);
    g_assert(socket_uses_tls(s));
    g_assert(NULL != buf);
    g_assert(size_is_positive(size));

    if (tls_flush(wio) && !is_temporary_error(errno)) {
        if (GNET_PROPERTY(tls_debug)) {
            g_warning("%s(): tls_flush(fd=%d) error: %m",
                      G_STRFUNC, s->file_desc);
        }
        return -1;
    }

    ret = gnutls_record_recv(tls_socket_get_session(s), buf, size);
    if (ret < 0) {
        switch (ret) {
        case GNUTLS_E_INTERRUPTED:
        case GNUTLS_E_AGAIN:
            errno = VAL_EAGAIN;
            break;
        case GNUTLS_E_PULL_ERROR:
        case GNUTLS_E_PUSH_ERROR:
            /* Logging already done by tls_transport_debug() */
            errno = (SOCK_F_CONNRESET & s->flags) ? ECONNRESET : EIO;
            break;
        case GNUTLS_E_UNEXPECTED_PACKET_LENGTH:
            if (SOCK_F_EOF & s->flags) {
                /*
                 * Remote peer has hung up.
                 *
                 * This is not exceptional, so we make it appear to upper
                 * layers (who do not necessarily know they're dealing with
                 * a TLS socket) as a regular EOF condition: the read()
                 * operation return 0.
                 */
                ret = 0;
                goto no_error;
            } else if (SOCK_F_CONNRESET & s->flags) {
                errno = ECONNRESET;
                break;
            }
        /* FALLTHROUGH */
        default:
            if (GNET_PROPERTY(tls_debug)) {
                g_carp("tls_read(): gnutls_record_recv(fd=%d) failed: "
                       "host=%s error=\"%s\"",
                       s->file_desc, host_addr_port_to_string(s->addr, s->port),
                       gnutls_strerror(ret));
            }
            errno = EIO;
        }
        ret = -1;
    }

no_error:
    if (s->gdk_tag && 0 == s->tls.snarf) {
        tls_socket_evt_change(s, INPUT_EVENT_RX);
    }
    g_assert(ret == (ssize_t) -1 || (size_t) ret <= size);
    tls_signal_pending(s);
    return ret;
}
예제 #19
0
/**
 * Incrementally process more data.
 *
 * @param zs		the zlib stream object
 * @param amount	amount of data to process
 * @param maxout	maximum length of dynamically-allocated buffer (0 = none)
 * @param may_close	whether to allow closing when all data was consumed
 * @param finish	whether this is the last data to process
 *
 * @return -1 on error, 1 if work remains, 0 when done.
 */
static int
zlib_stream_process_step(zlib_stream_t *zs, int amount, size_t maxout,
                         bool may_close, bool finish)
{
    z_streamp z;
    int remaining;
    int process;
    bool finishing;
    int ret = 0;

    g_assert(amount > 0);
    g_assert(!zs->closed);

    z = zs->z;
    g_assert(z != NULL);			/* Stream not closed yet */

    /*
     * Compute amount of input data to process.
     */

    remaining = zs->inlen - ptr_diff(z->next_in, zs->in);
    g_assert(remaining >= 0);

    process = MIN(remaining, amount);
    finishing = process == remaining;

    /*
     * Process data.
     */

    z->avail_in = process;

resume:
    switch (zs->magic) {
    case ZLIB_DEFLATER_MAGIC:
        ret = deflate(z, finishing && finish ? Z_FINISH : 0);
        break;
    case ZLIB_INFLATER_MAGIC:
        ret = inflate(z, Z_SYNC_FLUSH);
        break;
    }

    switch (ret) {
    case Z_OK:
        if (0 == z->avail_out) {
            if (zlib_stream_grow_output(zs, maxout))
                goto resume;	/* Process remaining input */
            goto error;			/* Cannot continue */
        }
        return 1;				/* Need to call us again */
    /* NOTREACHED */

    case Z_BUF_ERROR:			/* Output full or need more input to continue */
        if (0 == z->avail_out) {
            if (zlib_stream_grow_output(zs, maxout))
                goto resume;	/* Process remaining input */
            goto error;			/* Cannot continue */
        }
        if (0 == z->avail_in)
            return 1;			/* Need to call us again */
        goto error;				/* Cannot continue */
    /* NOTREACHED */

    case Z_STREAM_END:			/* Reached end of input stream */
        g_assert(finishing);

        /*
         * Supersede the output length to let them probe how much data
         * was processed once the stream is closed, through calls to
         * zlib_deflater_outlen() or zlib_inflater_outlen().
         */

        zs->outlen = ptr_diff(z->next_out, zs->out);
        g_assert(zs->outlen > 0);

        if (may_close) {
            switch (zs->magic) {
            case ZLIB_DEFLATER_MAGIC:
                ret = deflateEnd(z);
                break;
            case ZLIB_INFLATER_MAGIC:
                ret = inflateEnd(z);
                break;
            }

            if (ret != Z_OK) {
                g_carp("%s(): while freeing zstream: %s",
                       G_STRFUNC, zlib_strerror(ret));
            }
            WFREE(z);
            zs->z = NULL;
        }

        zs->closed = TRUE;		/* Signals processing stream done */
        return 0;				/* Done */
    /* NOTREACHED */

    default:
        break;
    }

    /* FALL THROUGH */

error:
    g_carp("%s(): error during %scompression: %s "
           "(avail_in=%u, avail_out=%u, total_in=%lu, total_out=%lu)",
           G_STRFUNC, ZLIB_DEFLATER_MAGIC == zs->magic ? "" : "de",
           zlib_strerror(ret),
           z->avail_in, z->avail_out, z->total_in, z->total_out);

    if (may_close) {
        switch (zs->magic) {
        case ZLIB_DEFLATER_MAGIC:
            ret = deflateEnd(z);
        case ZLIB_INFLATER_MAGIC:
            ret = inflateEnd(z);
            break;
        }
        if (ret != Z_OK && ret != Z_DATA_ERROR) {
            g_carp("%s(): while freeing stream: %s",
                   G_STRFUNC, zlib_strerror(ret));
        }
        WFREE(z);
        zs->z = NULL;
    }

    return -1;				/* Error! */
}