コード例 #1
0
int get_token_list(token_list_t *list, char *str, int start_index)
{
	int i=start_index;

	while (1) {
		int start_of_token, len_of_token;

		if ( (str[i] == '\0') || (str[i] == '\n'))
			return i;
		while ((str[i] == ' ') || (str[i] == '\t') ) {
			i++;
			if ( (str[i] == '\0') || (str[i] == '\n') )
				return i;
		}
		start_of_token = i;
		//printf("start of token=%d is %d ", token_cnt, start_of_token);
		while ((str[i] != ' ') && (str[i] != '\t') && (str[i] != '=') ) {
			i++;
			if (str[i] == '\0' || str[i] == '\n') {
				len_of_token = i - start_of_token;
				if (len_of_token > 0) {
					//printf("-end of token=%d is %d \n", token_cnt, i-1);
					add_token(list, str, start_of_token, len_of_token);
				}
				return i;
			}
		}
		//printf("end of token=%d is %d \n", token_cnt, i-1);
		len_of_token = i - start_of_token;
		add_token(list, str, start_of_token, len_of_token);
		if (str[i] == '=') add_token(list, str, i, 1);
		i++;
	}
}
コード例 #2
0
ファイル: Scanner.cpp プロジェクト: boxershorts7/School
void Scanner::scan_id(char token) {
	std::stringstream id;

	// Make sure the char isn't whitespace
	if (!std::isspace(static_cast<unsigned char>(token))) {
		// Check if the first char is a digit or other invalid input
		if (std::isdigit(token) || !std::isalnum(static_cast<unsigned char>(token)) ) {
			std::cout << "Found an error\n";
			error = line;
		}

		id << token;
		while (std::isalnum(static_cast<unsigned char>(file.peek()))) {
			char next = file.get();
			id << next;
		}

		if (error != -1) {
			add_token("", Token::ERROR);
		}
		else {
			std::string id_str = id.str();
			if(!is_keyword(id_str)) {
				add_token(id_str, Token::ID);
			}
		}
	}
}
コード例 #3
0
ファイル: token_table.cpp プロジェクト: davidmueller13/lean
static int add_token(lua_State * L) {
    int nargs = lua_gettop(L);
    if (nargs == 3)
        return push_token_table(L, add_token(to_token_table(L, 1), lua_tostring(L, 2), lua_tonumber(L, 3)));
    else
        return push_token_table(L, add_token(to_token_table(L, 1), lua_tostring(L, 2), lua_tostring(L, 3), lua_tonumber(L, 4)));
}
コード例 #4
0
ファイル: Scanner.cpp プロジェクト: boxershorts7/School
bool Scanner::is_keyword(std::string id) {
	bool is_keyword;

	if ( id == "Schemes" ) {
		add_token(id, Token::SCHEMES);
		is_keyword = true;
	}
	else if ( id == "Facts" ) {
		add_token(id, Token::FACTS);
		is_keyword = true;
	}
	else if ( id == "Rules" ) {
		add_token(id, Token::RULES);
		is_keyword = true;
	}
	else if ( id == "Queries" ) {
		add_token(id, Token::QUERIES);
		is_keyword = true;
	}
	else {
		is_keyword = false;
	}

	return is_keyword;
}
コード例 #5
0
ファイル: util.c プロジェクト: Phoul/shim
/* this isn't very fast, but this thing is meant for a single user anyhow */
size_t
tokenize(const char *buf, const char *sep, int lim,
	 struct token_list *tokens)
{
	char *p;
	size_t ntok;
	size_t len;

	ntok = 0;

	while ((lim < 0 || ntok < (size_t)lim) && (p = strpbrk(buf, sep))) {
		len = p - buf;
		add_token(buf, len, tokens);
		buf += len + 1;
		ntok++;
	}

	/* add any remaining */
	if (*buf) {
		add_token(buf, strlen(buf), tokens);
		ntok++;
	}

	return ntok;
}
コード例 #6
0
void get_interface_name_token_list(token_list_t *list, char *str)
{
	int start_of_token, i=0;

	while(1) {
		if (str[i] == '\0')
			return;
		start_of_token=i;
		while ( (str[i] != ':') && (str[i] != '[')  && str[i] != ']' &&
			(str[i] != '.') && (str[i] != ',') ) {
			i++;
			if (str[i] == '\0') {
				add_token(list, str, start_of_token,
					  i - start_of_token);
				return;
			}
		}
		if ((i-start_of_token) > 0 ) {
			add_token(list, str, start_of_token, i - start_of_token);
			add_token(list, str, i, 1);
		} else {
			add_token(list, str, i, 1);
		}
		i++;
	}

}
コード例 #7
0
ファイル: cdump.c プロジェクト: rustyrussell/pettycoin
/* Simplified tokenizer: comments and preproc directives removed,
   identifiers are a token, others are single char tokens. */
static struct token *tokenize(const void *ctx, const char *code)
{
	unsigned int i, len, tok_start = -1;
	bool start_of_line = true;
	struct token *toks = tal_arr(ctx, struct token, 0);

	for (i = 0; code[i]; i += len) {
		if (code[i] == '#' && start_of_line) {
			/* Preprocessor line. */
			len = strcspn(code+i, "\n");
		} else if (code[i] == '/' && code[i+1] == '/') {
			/* One line comment. */
			len = strcspn(code+i, "\n");
			if (tok_start != -1U) {
				add_token(&toks, code+tok_start, i - tok_start);
				tok_start = -1U;
			}
		} else if (code[i] == '/' && code[i+1] == '*') {
			/* Multi-line comment. */
			const char *end = strstr(code+i+2, "*/");
			len = (end + 2) - (code + i);
			if (!end)
				len = strlen(code + i);
			if (tok_start != -1U) {
				add_token(&toks, code+tok_start, i - tok_start);
				tok_start = -1U;
			}
		} else if (cisalnum(code[i]) || code[i] == '_') {
			/* Identifier or part thereof */
			if (tok_start == -1U)
				tok_start = i;
			len = 1;
		} else if (!cisspace(code[i])) {
			/* Punctuation: treat as single char token. */
			if (tok_start != -1U) {
				add_token(&toks, code+tok_start, i - tok_start);
				tok_start = -1U;
			}
			add_token(&toks, code+i, 1);
			len = 1;
		} else {
			/* Whitespace. */
			if (tok_start != -1U) {
				add_token(&toks, code+tok_start, i - tok_start);
				tok_start = -1U;
			}
			len = 1;
		}
		if (code[i] == '\n')
			start_of_line = true;
		else if (!cisspace(code[i]))
			start_of_line = false;
	}

	/* Add terminating NULL. */
	tal_resizez(&toks, tal_count(toks) + 1);
	return toks;
}
コード例 #8
0
ファイル: Scanner.cpp プロジェクト: boxershorts7/School
void Scanner::scan_colon() {
	if ( file.peek() == '-' ) {
		// Throw out the -
		file.get();
		// Create a :- token
		add_token(":-", Token::COLON_DASH);
	}
	else {
		// Create a : token
		add_token(":", Token::COLON);
	}
}
コード例 #9
0
void
cdk_style_schemes_init (void)
{
  if (style_name_map != NULL)
    return;

  style_name_map = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
  token_map = g_hash_table_new (g_direct_hash, g_direct_equal);
  cursor_map = g_hash_table_new (g_direct_hash, g_direct_equal);

  // Map style scheme style names to CdkStyleIDs
  add_map ("default", CDK_STYLE_DEFAULT);
  add_map ("comment", CDK_STYLE_COMMENT);
  add_map ("member_ref", CDK_STYLE_MEMBER_REF);
  add_map ("identifier", CDK_STYLE_IDENTIFIER);
  add_map ("keyword", CDK_STYLE_KEYWORD);
  add_map ("literal", CDK_STYLE_LITERAL);
  add_map ("number", CDK_STYLE_NUMBER);
  add_map ("preprocessor", CDK_STYLE_PREPROCESSOR);
  add_map ("punctuation", CDK_STYLE_PUNCTUATION);
  add_map ("string", CDK_STYLE_STRING);
  add_map ("type_name", CDK_STYLE_TYPE_NAME);
  add_map ("function_call", CDK_STYLE_FUNCTION_CALL);
  add_map ("character", CDK_STYLE_CHARACTER);
  add_map ("diagnostic_warning", CDK_STYLE_DIAGNOSTIC_WARNING);
  add_map ("diagnostic_error", CDK_STYLE_DIAGNOSTIC_ERROR);
  add_map ("annotation_warning", CDK_STYLE_ANNOTATION_WARNING);
  add_map ("annotation_error", CDK_STYLE_ANNOTATION_ERROR);

  // Map libclang CXTokenKinds to CdkStyleIDs
  add_token (CXToken_Punctuation, CDK_STYLE_PUNCTUATION);
  add_token (CXToken_Keyword, CDK_STYLE_KEYWORD);
  add_token (CXToken_Identifier, CDK_STYLE_IDENTIFIER);
  add_token (CXToken_Literal, CDK_STYLE_LITERAL);
  add_token (CXToken_Comment, CDK_STYLE_COMMENT);

  // Map libclang CXCursorKinds (more specific) to CdkStyleIDs
  add_cursor (CXCursor_TypeRef, CDK_STYLE_TYPE_NAME);
  add_cursor (CXCursor_MemberRef, CDK_STYLE_MEMBER_REF);
  add_cursor (CXCursor_MemberRefExpr, CDK_STYLE_MEMBER_REF);
  add_cursor (CXCursor_CallExpr, CDK_STYLE_FUNCTION_CALL);
  add_cursor (CXCursor_StringLiteral, CDK_STYLE_STRING);
  add_cursor (CXCursor_CharacterLiteral, CDK_STYLE_CHARACTER);
  add_cursor (CXCursor_IntegerLiteral, CDK_STYLE_NUMBER);
  add_cursor (CXCursor_FloatingLiteral, CDK_STYLE_NUMBER);
  add_cursor (CXCursor_ImaginaryLiteral, CDK_STYLE_NUMBER);
  // ...
}
コード例 #10
0
ファイル: lexer.c プロジェクト: tmerlier/42sh
void		lexer(char *line, t_lex *lst)
{
	int		param[3];

	param[2] = 0;
	while (*line)
	{
		no_space(&line);
		if (!*line)
			break ;
		if ((is_token(line, param)))
		{
			add_token(&lst, param);
			line = line + param[1];
		}
		else if ((is_word(line, param)))
		{
			add_word(&lst, param, line);
			line = line + param[1];
		}
		else
		{
			add_word(&lst, NULL, line);
			line++;
		}
	}
	parser(&lst);
}
コード例 #11
0
/**
 * This function adds all the tokens contained in the given 'token_list' into
 * 'list' that contains all the tokens matched by an optimized state.
 * '*number_of_tokens' is updated.
 */
static void add_token_list(struct list_int* token_list,Transition* transition,
                    struct opt_token** list,int *number_of_tokens,Abstract_allocator prv_alloc) {
while (token_list!=NULL) {
   add_token(token_list->n,transition,list,number_of_tokens,prv_alloc);
   token_list=token_list->next;
}
}
コード例 #12
0
ファイル: tokenizer.c プロジェクト: WinnowTag/winnow
static Pvoid_t tokenize_text(char * txt, int length, Pvoid_t features) {
	char *token;

	// Remove HTML entities
	replace(txt, length, "&[^;]+;", " ");
	// Remove all non-alphanums
	replace(txt, length, "[^a-zA-Z0-9\\-]", " ");
	// Remove leading and trailing dashes
	replace(txt, length, "[[:space:]]+[\\-]+", " ");
	replace(txt, length, "\\-+[[:space:]]+", " ");
	// Normalize whitespace
	replace(txt, length, "[[:space:]]+", " ");
	foldcase(txt);

	for (; (token = strsep(&txt, "\t\n ")) != NULL; ) {
		if (token != '\0') {
			int toklen = strlen(token) + 1; // +1 for \0
			if (toklen > 2) {
				features = add_token(token, features);
			}
		}
	}

	return features;
}
コード例 #13
0
ファイル: lexer.c プロジェクト: raglandba/WebC
int inline lex_generic_token(Tokens* tokens, enum token_type type) {
	Token* tok = new_token_type(type);
	if (tok != NULL) {
		return add_token(tokens, tok);
	} else {
		return 0;
	}
}
コード例 #14
0
ファイル: lexer.c プロジェクト: raglandba/WebC
int inline lex_content_token(Tokens* tokens, String* content, enum token_type type) {
	Token* tok = new_token_type(type);
	if (tok != NULL) {
		tok->contents = content;
		return add_token(tokens, tok);
	} else {
		return 0;
	}
}
コード例 #15
0
ファイル: Scanner.cpp プロジェクト: boxershorts7/School
void Scanner::scan_string() {
	std::stringstream input_str;
	input_str << '\'';
	char token = file.get();
	while (token != '\'' && token != '\n') {
		input_str << token;
		token = file.get();
		// If the string isn't complete, make an error.
		if (token == '\n')
			error = line;
	}
	if(error == -1) {
		input_str << '\'';
		add_token(input_str.str(), Token::STRING);
	}
	else {
		add_token("", Token::ERROR);
	}
}
コード例 #16
0
ファイル: token_funct.c プロジェクト: Frozenhorns/project
t_bool	is_a_esp(t_token **tok, char **str)
{
  if (str && *str && **str == '&')
    {
      *str += 1;
      if (add_token(tok, ESP, "&", NONE) == EXIT_FAILURE)
	return (ERROR);
      return (TRUE);
    }
  return (FALSE);
}
コード例 #17
0
ファイル: token_funct.c プロジェクト: Frozenhorns/project
t_bool	is_an_excla(t_token **tok, char **str)
{
  if (str && *str && **str == '!')
    {
      if (add_token(tok, EXCLA, "!", NONE) == EXIT_FAILURE)
	return (ERROR);
      *str += 1;
      return (TRUE);
    }
  return (FALSE);
}
コード例 #18
0
ファイル: token_funct.c プロジェクト: Frozenhorns/project
t_bool	is_a_pv(t_token **tok, char **str)
{
  if (str && *str && **str == ';')
    {
      *str += 1;
      if (add_token(tok, PV, ";", NONE) == EXIT_FAILURE)
	return (ERROR);
      return (TRUE);
    }
  return (FALSE);
}
コード例 #19
0
ファイル: token_funct.c プロジェクト: Frozenhorns/project
t_bool	is_a_dollar(t_token **tok, char **str)
{
  if (str && *str && **str == '$')
    {
      *str += 1;
      if (add_token(tok, DOLLARS, "$", NONE) == EXIT_FAILURE)
	return (ERROR);
      return (TRUE);
    }
  return (FALSE);
}
コード例 #20
0
ファイル: redir.c プロジェクト: Zethir/42sh
static int	is_heredoc(t_lex *lex, t_token_ht *token_ht, int i)
{
	char	*tmp;

	i += 2;
	if (lex->line[i] == '>' || lex->line[i] == '<' || lex->line[i] == '|' ||
			lex->line[i] == '&' || lex->line[i] == ';')
		return (-1);
	tmp = ft_strsub(lex->line, lex->tl, lex->hd - lex->tl);
	if ((token_ht = add_token(lex, token_ht, tmp, 9)) == NULL)
	{
		free(tmp);
		return (-1);
	}
	free(tmp);
	return (i);
}
コード例 #21
0
ファイル: js-norm.c プロジェクト: oozie/clamav-devel
void cli_js_parse_done(struct parser_state* state)
{
	struct tokens * tokens = &state->tokens;
	size_t par_balance = 0, i;
	char end = '\0';
	YYSTYPE val;

	cli_dbgmsg(MODULE "in cli_js_parse_done()\n");
	/* close unfinished token */
	switch (state->scanner->state) {
		case DoubleQString:
			end = '"';
			break;
		case SingleQString:
			end = '\'';
			break;
		default: /* make gcc happy */
			break;
	}
	if (end != '\0')
		cli_js_process_buffer(state, &end, 1);
	/* close remaining parenthesis */
	for (i=0;i<tokens->cnt;i++) {
		if (tokens->data[i].type == TOK_PAR_OPEN)
			par_balance++;
		else if (tokens->data[i].type == TOK_PAR_CLOSE && par_balance > 0)
			par_balance--;
	}
	if (par_balance > 0) {
		memset(&val, 0, sizeof(val));
		val.type = TOK_PAR_CLOSE;
		TOKEN_SET(&val, cstring, ")");
		while (par_balance-- > 0) {
			add_token(state, &val);
		}
	}

	/* we had to close unfinished strings, parenthesis,
	 * so that the folders/decoders can run properly */
	run_folders(&state->tokens);
	run_decoders(state);

	yylex_destroy(state->scanner);
	state->scanner = NULL;
}
コード例 #22
0
ファイル: symbtab.c プロジェクト: BlakeJarvis/csound
void init_symbtab(CSOUND *csound)
{
    OENTRY *ep;
    CONS_CELL *top, *head, *items;

    char *shortName;


    if(csound->symbtab == NULL) {
      /* VL 27 02 2015 -- if symbtab exists, do not create it again
        to avoid memory consumption.
       */
      //printf("init symbtab\n");
      csound->symbtab = cs_hash_table_create(csound);
    /* Now we need to populate with basic words */
    /* Add token types for opcodes to symbtab.  If a polymorphic opcode
     * definition is found (dsblksiz >= 0xfffb), look for implementations
     * of that opcode to correctly mark the type of opcode it is (T_OPCODE,
     * T_OPCODE0, or T_OPCODE00)
     */

    top = head = cs_hash_table_values(csound, csound->opcodes);

    while (head != NULL) {
        items = head->value;
        while (items != NULL) {
            ep = items->value;

            if (ep->dsblksiz < 0xfffb) {
                shortName = get_opcode_short_name(csound, ep->opname);

                add_token(csound, shortName, get_opcode_type(ep));

                if (shortName != ep->opname) {
                    csound->Free(csound, shortName);
                }
            }
            items = items->next;
        }
        head = head->next;
    }
    csound->Free(csound, top);
    }
}
コード例 #23
0
ファイル: Scanner.cpp プロジェクト: boxershorts7/School
void Scanner::scan() {
	char token = file.get();
	while (token != EOF && error == -1) {
		switch (token) {
		case ',':
			scan_punctuation(token, Token::COMMA);
			break;
		case '.':
			scan_punctuation(token, Token::PERIOD);
			break;
		case '?':
			scan_punctuation(token, Token::Q_MARK);
			break;
		case '(':
			scan_punctuation(token, Token::LEFT_PAREN);
			break;
		case ')':
			scan_punctuation(token, Token::RIGHT_PAREN);
			break;
		case ':':
			scan_colon();
			break;
		case '#':
			scan_comment();
			break;
		case '\'':
			scan_string();
			break;
		case '\n':
			line++;
			break;
		default:
			scan_id(token);
			break;
		}
		token = file.get();
	}

	if (error == -1) {
		line++;
		add_token("", Token::END);
	}
}
コード例 #24
0
ファイル: tokenizer.c プロジェクト: WinnowTag/winnow
Pvoid_t atom_tokenize(const char * atom) {
	Pvoid_t features = NULL;

	if (atom) {
		xmlDocPtr doc = xmlParseDoc(BAD_CAST atom);
		if (doc) {
			xmlXPathContextPtr context = xmlXPathNewContext(doc);
			xmlXPathRegisterNs(context, BAD_CAST "atom", BAD_CAST "http://www.w3.org/2005/Atom");

			char *html = get_element_value(context, "/atom:entry/atom:content/text()");
			if (html) {
				features = html_tokenize_into_features(html, features);
				xmlFree(html);
			}

			char *title = get_element_value(context, "/atom:entry/atom:title/text()");
			if (title) {
				features = tokenize_text(title, strlen(title), features);
				xmlFree(title);
			}

			char *author = get_element_value(context, "/atom:entry/atom:author/atom:name/text()");
			if (author) {
				features = add_token(author, features);
				xmlFree(author);
			}

			char *link = get_attribute_value(context, "/atom:entry/atom:link[@rel='alternate']", "href");
			if (link) {
				features = tokenize_uri(link, features);
				xmlFree(link);
			}

			xmlXPathFreeContext(context);
		}

		xmlFreeDoc(doc);
	}

	return features;
}
コード例 #25
0
ファイル: redir.c プロジェクト: Zethir/42sh
static int	is_in(t_lex *lex, t_token_ht *token_ht, int i)
{
	char	*tmp;
	int		j;

	get_first_fd(lex, i);
	i++;
	if (lex->line[i] == '|' || lex->line[i] == ';')
		return (-1);
	tmp = ft_strsub(lex->line, lex->tl, lex->hd - lex->tl);
	j = is_replace_bis(lex, token_ht, i, tmp);
	if (j != i)
		return (j);
	if ((token_ht = add_token(lex, token_ht, tmp, 10)) == NULL)
	{
		free(tmp);
		return (-1);
	}
	free(tmp);
	return (i);
}
コード例 #26
0
ファイル: out_format.c プロジェクト: noushi/bmon
static int format_probe(void)
{
	int new_one = 1;
	char *p, *e;

	for (p = c_format; *p; p++) {
		if (*p == '$') {
			char *s = p;
			s++;
			if (*s == '(') {
				s++;
				if (!*s)
					goto unexpected_end;
				e = strchr(s, ')');
				if (e == NULL)
					goto invalid;

				*p = '\0';
				*e = '\0';
				add_token(OT_TOKEN, s);
				new_one = 1;
				p = e;
				continue;
			}
		}

		if (*p == '\\') {
			char *s = p;
			s++;
			switch (*s) {
				case 'n':
					*s = '\n';
					goto finish_escape;
				case 't':
					*s = '\t';
					goto finish_escape;
				case 'r':
					*s = '\r';
					goto finish_escape;
				case 'v':
					*s = '\v';
					goto finish_escape;
				case 'b':
					*s = '\b';
					goto finish_escape;
				case 'f':
					*s = '\f';
					goto finish_escape;
				case 'a':
					*s = '\a';
					goto finish_escape;
			}

			goto out;
		
finish_escape:
			*p = '\0';
			add_token(OT_STRING, s);
			p = s;
			new_one = 0;
			continue;
		}

out:	
		if (new_one) {
			add_token(OT_STRING, p);
			new_one = 0;
		}
	}

	if (c_debug) {
		int i;
		for (i = 0; i < token_index; i++)
			printf(">>%s<\n", out_tokens[i].ot_str);
	}

	return 1;

unexpected_end:
	fprintf(stderr, "Unexpected end of format string\n");
	return 0;

invalid:
	fprintf(stderr, "Missing ')' in format string\n");
	return 0;
}
コード例 #27
0
ファイル: vp9_tokenize.c プロジェクト: Laknot/libvpx
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
                       TX_SIZE tx_size, void *arg) {
  struct tokenize_b_args* const args = arg;
  VP9_COMP *cpi = args->cpi;
  ThreadData *const td = args->td;
  MACROBLOCK *const x = &td->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  TOKENEXTRA **tp = args->tp;
  uint8_t token_cache[32 * 32];
  struct macroblock_plane *p = &x->plane[plane];
  struct macroblockd_plane *pd = &xd->plane[plane];
  MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
  int pt; /* near block/prev token context index */
  int c;
  TOKENEXTRA *t = *tp;        /* store tokens starting here */
  int eob = p->eobs[block];
  const PLANE_TYPE type = pd->plane_type;
  const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  const int segment_id = mbmi->segment_id;
  const int16_t *scan, *nb;
  const scan_order *so;
  const int ref = is_inter_block(mbmi);
  unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
      td->rd_counts.coef_counts[tx_size][type][ref];
  vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
      cpi->common.fc->coef_probs[tx_size][type][ref];
  unsigned int (*const eob_branch)[COEFF_CONTEXTS] =
      td->counts->eob_branch[tx_size][type][ref];
  const uint8_t *const band = get_band_translate(tx_size);
  const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);
  int16_t token;
  EXTRABIT extra;
  int aoff, loff;
  txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);

  pt = get_entropy_context(tx_size, pd->above_context + aoff,
                           pd->left_context + loff);
  so = get_scan(xd, tx_size, type, block);
  scan = so->scan;
  nb = so->neighbors;
  c = 0;

  while (c < eob) {
    int v = 0;
    int skip_eob = 0;
    v = qcoeff[scan[c]];

    while (!v) {
      add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob,
                         counts[band[c]][pt]);
      eob_branch[band[c]][pt] += !skip_eob;

      skip_eob = 1;
      token_cache[scan[c]] = 0;
      ++c;
      pt = get_coef_context(nb, token_cache, c);
      v = qcoeff[scan[c]];
    }

    vp9_get_token_extra(v, &token, &extra);

    add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token,
              (uint8_t)skip_eob, counts[band[c]][pt]);
    eob_branch[band[c]][pt] += !skip_eob;

    token_cache[scan[c]] = vp9_pt_energy_class[token];
    ++c;
    pt = get_coef_context(nb, token_cache, c);
  }
  if (c < seg_eob) {
    add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0,
                       counts[band[c]][pt]);
    ++eob_branch[band[c]][pt];
  }

  *tp = t;

  vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
}
コード例 #28
0
ファイル: js-norm.c プロジェクト: oozie/clamav-devel
/* buffer is html-normlike "chunk", if original file is bigger than buffer,
 * we rewind to a space, so we'll know that tokens won't be broken in half at
 * the end of a buffer. All tokens except string-literals of course.
 * So we can assume that after the buffer there is either a space, EOF, or a
 * chunk of text not containing whitespace at all (for which we care only if its
 * a stringliteral)*/
void cli_js_process_buffer(struct parser_state *state, const char *buf, size_t n)
{
	struct scope* current = state->current;
	YYSTYPE val;
	int yv;
	YY_BUFFER_STATE yyb;

	if(!state->global) {
		/* this state has either not been initialized,
		 * or cli_js_parse_done() was already called on it */
		cli_warnmsg(MODULE "invalid state\n");
		return;
	}
	yyb = yy_scan_bytes(buf, n, state->scanner);
	memset(&val, 0, sizeof(val));
	val.vtype = vtype_undefined;
	/* on EOF yylex will return 0 */
	while( (yv=yylex(&val, state->scanner)) != 0)
	{
		const char *text;
		size_t leng;

		val.type = yv;
		switch(yv) {
			case TOK_VAR:
				current->fsm_state = InsideVar;
				break;
			case TOK_IDENTIFIER_NAME:
				text = yyget_text(state->scanner);
				leng = yyget_leng(state->scanner);
				if(current->last_token == TOK_DOT) {
					/* this is a member name, don't normalize
					*/
					TOKEN_SET(&val, string, cli_strdup(text));
					val.type = TOK_UNNORM_IDENTIFIER;
				} else {
					switch(current->fsm_state) {
						case WaitParameterList:
							state->syntax_errors++;
							/* fall through */
						case Base:
						case InsideInitializer:
							TOKEN_SET(&val, cstring, scope_use(current, text, leng));
							break;
						case InsideVar:
						case InsideFunctionDecl:
							TOKEN_SET(&val, cstring, scope_declare(current, text, leng, state));
							current->fsm_state = InsideInitializer;
							current->brackets = 0;
							break;
						case WaitFunctionName:
							TOKEN_SET(&val, cstring, scope_declare(current, text, leng, state));
							current->fsm_state = WaitParameterList;
							break;
					}
				}
				break;
			case TOK_PAR_OPEN:
				switch(current->fsm_state) {
					case WaitFunctionName:
						/* fallthrough */
					case WaitParameterList:
						current->fsm_state = InsideFunctionDecl;
						break;
					default:
						/* noop */
						break;
				}
				break;
			case TOK_PAR_CLOSE:
				switch(current->fsm_state) {
					case WaitFunctionName:
						state->syntax_errors++;
						break;
					case WaitParameterList:
						current->fsm_state = Base;
						break;
					default:
						/* noop */
						break;
				}
				break;
			case TOK_CURLY_BRACE_OPEN:
				switch(current->fsm_state) {
					case WaitFunctionName:
						/* fallthrough */
					case WaitParameterList:
					case InsideFunctionDecl:
						/* in a syntactically correct
						 * file, we would already be in
						 * the Base state when we see a {
						 */
						current->fsm_state = Base;
						/* fall-through */
					case InsideVar:
					case InsideInitializer:
						state->syntax_errors++;
						/* fall-through */
					case Base:
					default:
						current->blocks++;
						break;
				}
				break;
					case TOK_CURLY_BRACE_CLOSE:
				if(current->blocks > 0)
					current->blocks--;
				else
					state->syntax_errors++;
				if(!current->blocks) {
					if(current->parent) {
						/* add dummy FUNCTION token to
						 * mark function end */
						TOKEN_SET(&val, cstring, "}");
						add_token(state, &val);
						TOKEN_SET(&val, scope, NULL);
						val.type = TOK_FUNCTION;

						state->current = current = current->parent;
					} else{
						/* extra } */
						state->syntax_errors++;
				}
				}
				break;
			case TOK_BRACKET_OPEN:
				current->brackets++;
				break;
			case TOK_BRACKET_CLOSE:
				if(current->brackets > 0)
					current->brackets--;
				else
					state->syntax_errors++;
				break;
			case TOK_COMMA:
				if (current->fsm_state == InsideInitializer && current->brackets == 0 && current->blocks == 0) {
					/* initializer ended only if we
					 * encountered a comma, and [] are
					 * balanced.
					 * This avoids switching state on:
					 * var x = [4,y,u];*/
					current->fsm_state = InsideVar;
				}
				break;
			case TOK_SEMICOLON:
				if (current->brackets == 0 && current->blocks == 0) {
					/* avoid switching state on unbalanced []:
					 * var x = [test;testi]; */
					current->fsm_state = Base;
				}
				break;
			case TOK_FUNCTION:
				current = scope_new(state);
				current->fsm_state = WaitFunctionName;
				TOKEN_SET(&val, scope, state->current);
				break;
			case TOK_StringLiteral:
				if(state->tokens.cnt > 1 && state->tokens.data[state->tokens.cnt-1].type == TOK_PLUS) {
					/* see if can fold */
					yystype *prev_string = &state->tokens.data[state->tokens.cnt-2];
					if(prev_string->type == TOK_StringLiteral) {
						char *str = TOKEN_GET(prev_string, string);
						size_t str_len = strlen(str);

						text = yyget_text(state->scanner);
						leng = yyget_leng(state->scanner);


						/* delete TOK_PLUS */
						free_token(&state->tokens.data[--state->tokens.cnt]);

						str = cli_realloc(str, str_len + leng + 1);
						if (!str)
						    break;
						strncpy(str+str_len, text, leng);
						str[str_len + leng] = '\0';
						TOKEN_SET(prev_string, string, str);
						free(val.val.string);
						memset(&val, 0, sizeof(val));
						val.vtype = vtype_undefined;
						continue;
					}
				}
				break;
		}
		if(val.vtype == vtype_undefined) {
			text = yyget_text(state->scanner);
			TOKEN_SET(&val, string, cli_strdup(text));
			abort();
		}
		add_token(state, &val);
		current->last_token = yv;
		memset(&val, 0, sizeof(val));
		val.vtype = vtype_undefined;
	}
}
コード例 #29
0
ファイル: vp9_tokenize.c プロジェクト: JasonOldWoo/webrtc-qt
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
                       TX_SIZE tx_size, void *arg) {
  struct tokenize_b_args* const args = arg;
  VP9_COMP *cpi = args->cpi;
  MACROBLOCKD *xd = args->xd;
  TOKENEXTRA **tp = args->tp;
  uint8_t *token_cache = args->token_cache;
  struct macroblock_plane *p = &cpi->mb.plane[plane];
  struct macroblockd_plane *pd = &xd->plane[plane];
  MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi;
  int pt; /* near block/prev token context index */
  int c = 0;
  TOKENEXTRA *t = *tp;        /* store tokens starting here */
  int eob = p->eobs[block];
  const PLANE_TYPE type = pd->plane_type;
  const int16_t *qcoeff_ptr = BLOCK_OFFSET(p->qcoeff, block);
  const int segment_id = mbmi->segment_id;
  const int16_t *scan, *nb;
  const scan_order *so;
  vp9_coeff_count *const counts = cpi->coef_counts[tx_size];
  vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size];
  const int ref = is_inter_block(mbmi);
  const uint8_t *const band = get_band_translate(tx_size);
  const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);

  int aoff, loff;
  txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);

  pt = get_entropy_context(tx_size, pd->above_context + aoff,
                           pd->left_context + loff);
  so = get_scan(xd, tx_size, type, block);
  scan = so->scan;
  nb = so->neighbors;
  c = 0;
  while (c < eob) {
    int v = 0;
    int skip_eob = 0;
    v = qcoeff_ptr[scan[c]];

    while (!v) {
      add_token(&t, coef_probs[type][ref][band[c]][pt], 0, ZERO_TOKEN, skip_eob,
                counts[type][ref][band[c]][pt]);

      cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] +=
          !skip_eob;

      skip_eob = 1;
      token_cache[scan[c]] = 0;
      ++c;
      pt = get_coef_context(nb, token_cache, c);
      v = qcoeff_ptr[scan[c]];
    }
    add_token(&t, coef_probs[type][ref][band[c]][pt],
              vp9_dct_value_tokens_ptr[v].extra,
              vp9_dct_value_tokens_ptr[v].token, skip_eob,
              counts[type][ref][band[c]][pt]);

    cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] += !skip_eob;

    token_cache[scan[c]] =
        vp9_pt_energy_class[vp9_dct_value_tokens_ptr[v].token];
    ++c;
    pt = get_coef_context(nb, token_cache, c);
  }
  if (c < seg_eob) {
    add_token(&t, coef_probs[type][ref][band[c]][pt], 0, EOB_TOKEN, 0,
              counts[type][ref][band[c]][pt]);
    ++cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt];
  }

  *tp = t;

  set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
}
コード例 #30
0
ファイル: vp9_tokenize.c プロジェクト: RobinWuDev/Qt
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
                       TX_SIZE tx_size, void *arg) {
  struct tokenize_b_args* const args = arg;
  VP9_COMP *cpi = args->cpi;
  MACROBLOCKD *xd = args->xd;
  TOKENEXTRA **tp = args->tp;
  uint8_t token_cache[32 * 32];
  struct macroblock_plane *p = &cpi->mb.plane[plane];
  struct macroblockd_plane *pd = &xd->plane[plane];
  MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
  int pt; /* near block/prev token context index */
  int c;
  TOKENEXTRA *t = *tp;        /* store tokens starting here */
  int eob = p->eobs[block];
  const PLANE_TYPE type = pd->plane_type;
  const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
  const int segment_id = mbmi->segment_id;
  const int16_t *scan, *nb;
  const scan_order *so;
  const int ref = is_inter_block(mbmi);
  unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
      cpi->coef_counts[tx_size][type][ref];
  vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
      cpi->common.fc.coef_probs[tx_size][type][ref];
  unsigned int (*const eob_branch)[COEFF_CONTEXTS] =
      cpi->common.counts.eob_branch[tx_size][type][ref];
  const uint8_t *const band = get_band_translate(tx_size);
  const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);
  const TOKENVALUE *dct_value_tokens;

  int aoff, loff;
  txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);

  pt = get_entropy_context(tx_size, pd->above_context + aoff,
                           pd->left_context + loff);
  so = get_scan(xd, tx_size, type, block);
  scan = so->scan;
  nb = so->neighbors;
  c = 0;
#if CONFIG_VP9_HIGHBITDEPTH
  if (cpi->common.profile >= PROFILE_2) {
    dct_value_tokens = (cpi->common.bit_depth == VPX_BITS_10 ?
                        vp9_dct_value_tokens_high10_ptr :
                        vp9_dct_value_tokens_high12_ptr);
  } else {
    dct_value_tokens = vp9_dct_value_tokens_ptr;
  }
#else
  dct_value_tokens = vp9_dct_value_tokens_ptr;
#endif

  while (c < eob) {
    int v = 0;
    int skip_eob = 0;
    v = qcoeff[scan[c]];

    while (!v) {
      add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob,
                         counts[band[c]][pt]);
      eob_branch[band[c]][pt] += !skip_eob;

      skip_eob = 1;
      token_cache[scan[c]] = 0;
      ++c;
      pt = get_coef_context(nb, token_cache, c);
      v = qcoeff[scan[c]];
    }

    add_token(&t, coef_probs[band[c]][pt],
              dct_value_tokens[v].extra,
              (uint8_t)dct_value_tokens[v].token,
              (uint8_t)skip_eob,
              counts[band[c]][pt]);
    eob_branch[band[c]][pt] += !skip_eob;

    token_cache[scan[c]] = vp9_pt_energy_class[dct_value_tokens[v].token];
    ++c;
    pt = get_coef_context(nb, token_cache, c);
  }
  if (c < seg_eob) {
    add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0,
                       counts[band[c]][pt]);
    ++eob_branch[band[c]][pt];
  }

  *tp = t;

  vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
}