Example #1
0
/**
 * anjuta_token_stream_read:
 * @stream: a #AnjutaTokenStream object.
 * @buffer: a character buffer to fill with token data.
 * @max_size: the size of the buffer.
 *
 * Read token from the input stream and write the content as a C string in the
 * buffer passed as argument.
 *
 * Return value: The number of characters written in the buffer.
 */
gint
anjuta_token_stream_read (AnjutaTokenStream *stream, gchar *buffer, gsize max_size)
{
    gint result = 0;

    if (stream->token != NULL)
    {
        gsize length = anjuta_token_get_length (stream->token);

        if ((anjuta_token_get_type (stream->token) >= ANJUTA_TOKEN_PARSED) || (stream->pos >= length))
        {
            for (;;)
            {
                /* Last token */
                if (stream->token== stream->last) return 0;

                if (anjuta_token_get_type (stream->token) >= ANJUTA_TOKEN_PARSED)
                {
                    stream->token = anjuta_token_next (stream->token);
                }
                else
                {
                    stream->token = anjuta_token_next (stream->token);
                }

                if ((stream->token == NULL) || (anjuta_token_get_type (stream->token) == ANJUTA_TOKEN_EOV))
                {
                    /* Last token */
                    return 0;
                }
                else if ((anjuta_token_get_length (stream->token) != 0) && (anjuta_token_get_type (stream->token) < ANJUTA_TOKEN_PARSED))
                {
                    /* Find some data */
                    stream->pos = 0;
                    length = anjuta_token_get_length (stream->token);
                    break;
                }
            }
        }

        if (stream->pos < length)
        {
            const gchar *start = anjuta_token_get_string (stream->token);

            length -= stream->pos;

            if (length > max_size) length = max_size;
            memcpy (buffer, start + stream->pos, length);
            stream->pos += length;
            result = length;
        }
    }

    return result;
}
Example #2
0
void
anjuta_token_style_format (AnjutaTokenStyle *style, AnjutaToken *list)
{
	AnjutaToken *item;
	AnjutaToken *last;
	AnjutaToken *text;
	AnjutaToken *prev;

	/* Find following tokens */
	for (last = list; last != NULL; last = anjuta_token_next (last))
	{
		/* Get all tokens in group */
		last = anjuta_token_last (last);

		gint flags = anjuta_token_get_flags (last);
		if (!(flags & (ANJUTA_TOKEN_ADDED | ANJUTA_TOKEN_REMOVED))) break;
	}

	/* Find previous token */
	for (prev = list; prev != NULL; prev = anjuta_token_previous (prev))
	{
		gint flags = anjuta_token_get_flags (prev);
		if ((anjuta_token_get_length (prev) != 0) && !(flags & (ANJUTA_TOKEN_ADDED | ANJUTA_TOKEN_REMOVED))) break;
		list = prev;
	}

	for (item = list; (item != NULL) && (item != last); item = anjuta_token_next (item))
	{
		if ((anjuta_token_get_flags (item) & ANJUTA_TOKEN_ADDED) &&
			!(anjuta_token_get_flags (item) & ANJUTA_TOKEN_REMOVED))
		{
			switch (anjuta_token_get_type (item))
			{
			case ANJUTA_TOKEN_START:
				text = anjuta_token_style_lookup (style, ANJUTA_TOKEN_START, FALSE);
				anjuta_token_set_flags (text, ANJUTA_TOKEN_ADDED);
				anjuta_token_insert_after (item, text);
				anjuta_token_merge (item, text);
				item = text;
				break;
			case ANJUTA_TOKEN_NEXT:
				text = anjuta_token_style_lookup (style, ANJUTA_TOKEN_NEXT, FALSE);
				anjuta_token_set_flags (text, ANJUTA_TOKEN_ADDED);
				anjuta_token_insert_after (item, text);
				anjuta_token_merge (item, text);
				item = text;
				break;
			case ANJUTA_TOKEN_LAST:
				text = anjuta_token_style_lookup (style, ANJUTA_TOKEN_LAST, FALSE);
				anjuta_token_set_flags (text, ANJUTA_TOKEN_ADDED);
				anjuta_token_insert_after (item, text);
				anjuta_token_merge (item, text);
				item = text;
				break;
			default:
				break;
			}
		}
	}
}
Example #3
0
AnjutaToken*
mkp_project_get_variable_token (MkpProject *project, AnjutaToken *variable)
{
	guint length;
	const gchar *string;
	gchar *name;
	MkpVariable *var;

	length = anjuta_token_get_length (variable);
	string = anjuta_token_get_string (variable);
	if ((length == 0) || (string == NULL)) return NULL;

	if (string[1] == '(')
	{
		name = g_strndup (string + 2, length - 3);
	}
	else
	{
		name = g_strndup (string + 1, 1);
	}
	var = g_hash_table_lookup (project->variables, name);
	g_free (name);

	return var != NULL ? var->value : NULL;
}
Example #4
0
/**
 * anjuta_token_stream_tokenize:
 * @stream: a #AnjutaTokenStream object.
 * @type: a token type.
 * @length: the token length in character.
 *
 * Create a token of type from the last length characters previously read and
 * append it in the output stream. The characters are not copied in the output
 * stream, the new token uses the same characters.
 *
 * Return value: The created token.
 */
AnjutaToken*
anjuta_token_stream_tokenize (AnjutaTokenStream *stream, gint type, gsize length)
{
    AnjutaToken *frag;
    AnjutaToken *end;

    frag = anjuta_token_new_static (type, NULL);

    for (end = stream->start; end != NULL;)
    {
        if ((anjuta_token_get_type (end) < ANJUTA_TOKEN_PARSED) || (anjuta_token_get_length (end) == 0))
        {
            gint toklen = anjuta_token_get_length (end);
            AnjutaToken *copy = anjuta_token_cut (end, stream->begin, length);

            if (toklen >= (length + stream->begin))
            {

                if (end == stream->start)
                {
                    /* Get whole token */
                    anjuta_token_free (frag);
                    anjuta_token_set_type (copy, type);
                    frag = copy;
                }
                else
                {
                    /* Get several token */
                    anjuta_token_insert_after (frag, copy);
                    anjuta_token_merge (frag, copy);
                }

                if (toklen == (length + stream->begin))
                {
                    stream->start = anjuta_token_next (end);
                    stream->begin = 0;
                }
                else
                {
                    stream->start = end;
                    stream->begin += length;
                }
                break;
            }
            else
            {
                anjuta_token_insert_after (frag, copy);
                anjuta_token_merge (frag, copy);
                length -= toklen - stream->begin;
                end = anjuta_token_next (end);
                stream->begin = 0;
            }
        }
        else
        {
            end = anjuta_token_next (end);
            stream->begin = 0;
        }
    }

    anjuta_token_stream_append_token (stream, frag);

    return frag;
}