Esempio n. 1
0
std::unique_ptr<table> parser::parse_table(const std::shared_ptr<const global_table_set>& gts, group_mem_protocol gmp, bool inside_rule_box)
{
    HC_LOG_TRACE("");
    std::string table_name;
    std::list<std::unique_ptr<rule_box>> rule_box_list;

    if (get_parser_type() == PT_TABLE) {
        get_next_token();
        if ((!inside_rule_box && m_scanner.get_next_token(true, 0).get_type() == TT_NIL) || (inside_rule_box && m_scanner.get_next_token(true, 0).get_type() == TT_RIGHT_BRACKET )) { //table reference
            if (m_current_token.get_type() == TT_STRING) {
                table_name = m_current_token.get_string();
                if (gts->get_table(table_name) != nullptr) {
                    get_next_token();

                    auto rb = std::unique_ptr<rule_box>(new rule_table_ref(table_name, gts));
                    rule_box_list.push_back(std::move(rb));
                    return std::unique_ptr<table>(new table(std::string(), std::move(rule_box_list)));
                } else {
                    HC_LOG_ERROR("failed to parse line " << m_current_line << " table " << table_name << " not found");
                    throw "failed to parse config file";
                }

            }
        } else if (m_current_token.get_type() == TT_STRING || m_current_token.get_type() == TT_LEFT_BRACE) {
            if (m_current_token.get_type() == TT_STRING) {
                table_name = m_current_token.get_string();

                get_next_token();
                if (m_current_token.get_type() != TT_LEFT_BRACE) {
                    HC_LOG_ERROR("failed to parse line " << m_current_line << " unknown token " << get_token_type_name(m_current_token.get_type()) << " with value " << m_current_token.get_string() << " in this context");
                    throw "failed to parse config file";
                }
            }

            get_next_token();
            auto tmp_rule = parse_rule(gts, gmp);

            while (tmp_rule != nullptr) {
                rule_box_list.push_back(std::move(tmp_rule));
                get_next_token();
                tmp_rule = parse_rule(gts, gmp);
            }

            if (m_current_token.get_type() == TT_RIGHT_BRACE) {
                get_next_token();
                if ((!inside_rule_box && m_current_token.get_type() == TT_NIL) || (inside_rule_box && m_current_token.get_type() == TT_RIGHT_BRACKET)) {
                    return std::unique_ptr<table>(new table(table_name, std::move(rule_box_list)));
                }
            }

        }
    }
    HC_LOG_ERROR("failed to parse line " << m_current_line << " unknown token " << get_token_type_name(m_current_token.get_type()) << " with value " << m_current_token.get_string() << " in this context");
    throw "failed to parse config file";
}
Esempio n. 2
0
struct cParseNode* _get_parse_tree(int start, struct Grammar* grammar, struct TokenStream* tokens, struct Error* error) {
    struct cParseNode* parent = parse_rule(start, grammar, tokens, error);
    if (parent == NULL) {
        return NULL;
    }
    struct cParseNode* current = parent->child;
    struct cParseNode* tmp;
    int m, ignore;
    int rule = start;
    LOG("ignore any trailing ignores\n");
    while (tokens->at < tokens->num) {
        ignore = 0;
        for (m=0;m<grammar->ignore.num;m++) {
            if (tokens->tokens[tokens->at].which == grammar->ignore.tokens[m]) {
                ignore = 1;
                break;
            }
        }
        if (ignore == 0) {
            break;
        }
        LOG("ignoring white\n");
        tmp = _new_parsenode(rule);
        tmp->token = &tokens->tokens[tokens->at];
        tmp->type = NTOKEN;
        current = append_nodes(current, tmp);
        LOG("inc token %d %d\n", tokens->at, tokens->at+1);
        tokens->at += 1;
    }
    parent->child = current;
    return parent;
}
Esempio n. 3
0
static fz_css_rule *parse_stylesheet(struct lexbuf *buf, fz_css_rule *chain)
{
	fz_css_rule *rule, **nextp, *tail;

	tail = chain;
	if (tail)
	{
		while (tail->next)
			tail = tail->next;
		nextp = &tail->next;
	}
	else
	{
		nextp = &tail;
	}

	while (buf->lookahead != EOF)
	{
		if (accept(buf, '@'))
		{
			parse_at_rule(buf);
		}
		else
		{
			rule = *nextp = parse_rule(buf);
			nextp = &rule->next;
		}
	}

	return chain ? chain : tail;
}
Esempio n. 4
0
void
add_rules(const char *config, char *buf, const char *name, int chain)
{
	char *p, *p2;
	int n_str;
	DWORD n;
	UCHAR sid_mask[MAX_SIDS_COUNT / 8];

	get_sid_mask(config, name, sid_mask);

	for (p = buf, n_str = 1; *p != '\0'; p = p2, n_str++) {
		struct flt_rule rule;
		char num[10];

		p2 = p + strlen(p) + 1;

		if (*buf == ';' || *buf == '\0')
			continue;		// empty line or comment

		memset(&rule, 0, sizeof(rule));
		
		// parse it!
		if (!parse_rule(p, &rule)) {
			error("Error in line #%d of section [%s]", n_str, name);
			continue;
		}

		// set chain
		rule.chain = chain;

		// set SID mask
		memcpy(rule.sid_mask, sid_mask, sizeof(rule.sid_mask));

		if (rule.rule_id[0] == '\0') {
			// set default rule name: name of section + n_str
			strncpy(rule.rule_id, name, RULE_ID_SIZE);	// string can be not zero-terminated
			sprintf(num, ":%d", n_str);
			if (strlen(name) + strlen(num) < RULE_ID_SIZE)
				memcpy(rule.rule_id + strlen(name), num, strlen(num));	// string can be not zero-terminated
			else
				memcpy(rule.rule_id + RULE_ID_SIZE - strlen(num), num, strlen(num));
		}

		// append rule
		if (!DeviceIoControl(g_device, IOCTL_CMD_APPENDRULE, &rule, sizeof(rule),
				NULL, 0, &n, NULL)) {
			winerr("start: DeviceIoControl");
			break;
		}
	}
}
Esempio n. 5
0
static elemstyle_t *parse_rules(xmlNode *a_node) {
  xmlNode *cur_node = NULL;
  elemstyle_t *elemstyles = NULL, **elemstyle = &elemstyles;

  for (cur_node = a_node->children; cur_node; cur_node = cur_node->next) {
    if (cur_node->type == XML_ELEMENT_NODE) {
      if(strcasecmp((char*)cur_node->name, "rule") == 0) {
	*elemstyle = parse_rule(cur_node);
	if(*elemstyle) elemstyle = &((*elemstyle)->next);
      } else
	printf("found unhandled rules/%s\n", cur_node->name);
    }
  }
  return elemstyles;
}
Esempio n. 6
0
        static bool parse_rhs_main(
            RHS const& rhs
          , Iterator& first, Iterator const& last
          , Context const& context, RContext& rcontext, ActualAttribute& attr
          , mpl::false_)
        {
            // see if the user has a BOOST_SPIRIT_DEFINE for this rule
            typedef
                decltype(parse_rule(
                    rule<ID, Attribute>(), first, last
                  , make_unique_context<ID>(rhs, context), attr))
            parse_rule_result;

            // If there is no BOOST_SPIRIT_DEFINE for this rule,
            // we'll make a context for this rule tagged by its ID
            // so we can extract the rule later on in the default
            // (generic) parse_rule function.
            typedef
                is_same<parse_rule_result, default_parse_rule_result>
            is_default_parse_rule;

            Iterator i = first;
            bool r = rhs.parse(
                i
              , last
              , make_rule_context<ID>(rhs, context, is_default_parse_rule())
              , rcontext
              , attr
            );

            if (r)
            {
                auto first_ = first;
                x3::skip_over(first_, last, context);
                r = call_on_success(first_, i, context, attr
                  , has_on_success<ID, Iterator, Context, ActualAttribute>());
            }

            if (r)
                first = i;
            return r;
        }
Esempio n. 7
0
// Parse all rules for a given input and return the list of parsed rules.
golem_rule *rules_from_string(char *input) {
  golem_tokenizer tokenizer;
  init_tokenizer(&tokenizer, input);
  (void)get_token(&tokenizer);
  golem_rule *rules = NULL;
  golem_rule *last = NULL;
  do {
    golem_rule *rule = parse_rule(&tokenizer);
    if (!rule) {
      return NULL;
    }
    if (last) {
      last->next = rule;
    } else {
      rules = rule;
    }
    last = rule;
  } while (tokenizer.token != token_error && tokenizer.token != token_eof);
  free(input);
  return rules;
}
Esempio n. 8
0
struct ruleset *parse_ruleset_file( FILE *f ) 
{
    struct ruleset *rules = malloc( sizeof(struct ruleset ) );
    char buf[512];

    rules->rule_count = 0;
    yyline = 0;
    while( fgets( buf, sizeof(buf), f ) != NULL ) {
        yyline++;
        if( strncasecmp(buf, "registers", 9) == 0 ) {
            parse_registers_block(buf, sizeof(buf), f);
        } else if( buf[0] != '\0' && buf[0] != '#' && buf[0] != '\n' ) {
            struct rule *rule;
            char *p = buf;
            rule = new_rule();
            if( parse_rule( &p, rule ) != 0 ) {
                free( rule );
            } else {
                rules->rules[rules->rule_count++] = rule;
            }
        }
    }
    return rules;
}
Esempio n. 9
0
// clean
struct cParseNode* parse_children(unsigned int rule, struct RuleOption* option, struct Grammar* grammar, struct TokenStream* tokens, struct Error* error) {
    LOG("parsing children of %d (token at %d)\n", rule, tokens->at);
    struct cParseNode* current = UNINITIALIZED;
    unsigned int i = 0, m = 0;
    unsigned int at = 0;
    struct cParseNode* tmp = NULL;
    struct RuleItem* item = NULL;
    int ignore;
    INDENT();
    for (i=0;i<option->num;i++) {
        item = &option->items[i];
        if (!grammar->rules.rules[rule].dont_ignore) {
            while (tokens->at < tokens->num) {
                ignore = 0;
                for (m=0;m<grammar->ignore.num;m++) {
                    if (tokens->tokens[tokens->at].which == grammar->ignore.tokens[m]) {
                        ignore = 1;
                        break;
                    }
                }
                if (ignore == 0) {
                    break;
                }
                LOG("ignoring white\n");
                tmp = _new_parsenode(rule);
                tmp->token = &tokens->tokens[tokens->at];
                tmp->type = NTOKEN;
                current = append_nodes(current, tmp);
                LOG("inc token %d %d\n", tokens->at, tokens->at+1);
                tokens->at += 1;
            }
        }
        if (tokens->at < tokens->num) {
            LOG("At token %d '%s'\n", tokens->at, tokens->tokens[tokens->at].value);
        }
        if (item->type == RULE) {
            LOG(">RULE\n");
            /**
            if (0 && tokens->at >= tokens->num) { // disabling
                error->at = tokens->at;
                error->reason = 1;
                error->token = NULL;
                error->text = "ran out";
                // error[1] = ['ran out', rule, i, item->value.which];
                // log('not enough tokens')
                DEDENT();
                return NULL;
            }
            **/
            at = tokens->at;
            tmp = parse_rule(item->value.which, grammar, tokens, error);
            if (tmp == NULL) {
                tokens->at = at;
                if (tokens->at >= error->at && error->reason!=1 && error->reason!=4) {
                    error->at = tokens->at;
                    error->reason = 2;
                    error->token = &tokens->tokens[tokens->at];
                    error->text = "rule failed";
                    error->wanted = item->value.which;
                }
                DEDENT();
                return NULL;
            }
            current = append_nodes(current, tmp);
            continue;
        } else if (item->type == TOKEN) {
            LOG(">TOKEN\n");
            if (tokens->at >= tokens->num) {
                if (item->value.which == tokens->eof) {
                    LOG("EOF -- passing\n");
                    tmp = _new_parsenode(rule);
                    tmp->token = (struct Token*)malloc(sizeof(struct Token));
                    tmp->token->value = NULL;
                    tmp->token->which = tokens->eof;
                    tmp->token->lineno = -1;
                    tmp->token->charno = -1;
                    tmp->type = NTOKEN;
                    current = append_nodes(current, tmp);
                    continue;
                }
                LOG("no more tokens\n");
                error->at = tokens->at;
                error->reason = 1;
                error->token = NULL;
                error->text = "ran out";
                error->wanted = item->value.which;
                DEDENT();
                return NULL;
            }
            if (tokens->tokens[tokens->at].which == item->value.which) {
                LOG("got token! %d\n", item->value.which);
                tmp = _new_parsenode(rule);
                tmp->token = &tokens->tokens[tokens->at];
                tmp->type = NTOKEN;
                current = append_nodes(current, tmp);
                LOG("inc token %d %d\n", tokens->at, tokens->at+1);
                tokens->at += 1;
                continue;
            } else {
                if (tokens->at > error->at) {
                    error->at = tokens->at;
                    error->reason = 3;
                    error->token = &tokens->tokens[tokens->at];
                    error->text = "token failed";
                    error->wanted = option->items[i].value.which;
                }
                LOG("token failed (wanted %d, got %d)\n",
                        item->value.which, tokens->tokens[tokens->at].which);
                DEDENT();
                return NULL;
            }
        } else if (item->type == LITERAL) {
            LOG(">LITERAL\n");
            if (tokens->at >= tokens->num) {
                error->at = tokens->at;
                error->reason = 4;
                error->token = NULL;
                error->text = item->value.text;
                DEDENT();
                return NULL;
            }
            if (strcmp(item->value.text, tokens->tokens[tokens->at].value) == 0) {
                LOG("got literal!\n");
                tmp = _new_parsenode(rule);
                tmp->token = &tokens->tokens[tokens->at];
                tmp->type = NTOKEN;
                current = append_nodes(current, tmp);
                LOG("inc token %d %d\n", tokens->at, tokens->at+1);
                tokens->at += 1;
                continue;
            } else {
                if (tokens->at > error->at) {
                    error->at = tokens->at;
                    error->reason = 5;
                    error->token = &tokens->tokens[tokens->at];
                    error->text = item->value.text;
                }
                LOG("failed....literally: %s\n", item->value.text);
                DEDENT();
                return NULL;
            }
        } else if (item->type == SPECIAL) {
            LOG(">SPECIAL\n");
            tmp = check_special(rule, item->value.special, current, grammar, tokens, error);
            if (tmp == NULL) {
                LOG("FAIL SPECIAL\n");
                DEDENT();
                return NULL;
            }
            current = tmp;
        }
    }
    DEDENT();
    return current;
}
Esempio n. 10
0
int parse_file(FILE ** p_fh)
{

    int ix = 0;
    char line_buf[512];
    int cline = 0;
    int prule_result = 0;

    if (p_fh == NULL) {
        return -1;
    }

    if (*p_fh == NULL) {
        fprintf(stderr, "No configuration file was specified.\n");
    }

    while (*p_fh == NULL && ix < default_locations_cnt) {
        fprintf(stdout,
                "Searching for a file in default location: %s\n",
                default_locations[ix]);
        *p_fh = fopen(default_locations[ix++], "r");
    }

    if (*p_fh == NULL) {
        fprintf(stderr, "No configuration file could be found\n");
        return -1;
    }

    while (fgets(line_buf, sizeof(line_buf), *p_fh) &&
           rule_cnt < MAX_ENTRIES - 1) {
        cline++;

        prule_result = parse_rule(line_buf, cline);
        if (prule_result < 0 ){
            return prule_result;
        }
        else if (prule_result == 0) {
            rule_cnt++;
        }
        else {
            /*
             * This line was just a comment so skip it
             */
        }
    }

    /*
     * Append the "deny all" rule at the end of our table
     */
    rule_table[rule_cnt] = malloc(sizeof(ipr_rule_t));
    if (rule_table[rule_cnt] == NULL) {
        fprintf(stderr,"Could not allocate memory.");
        return -1;
    }
    rule_table[rule_cnt]->permission = RULE_DENY;
    rule_table[rule_cnt]->type = RULE_ALL;
    rule_cnt++;

    if (*p_fh != NULL) {
        fclose(*p_fh);
        *p_fh = NULL;
    }

    return 0;
}
Esempio n. 11
0
bool_t tz_parse (unsigned char *str, struct tz_t *tz)
{
	unsigned char *s;

	memset (tz, 0, sizeof (*tz));

	if (! str || *str == 0) {
		tz->name_std[0] = tz->name_dst[0] = 'G';
		tz->name_std[1] = tz->name_dst[1] = 'M';
		tz->name_std[2] = tz->name_dst[2] = 'T';
		goto ok;
	}

	/* LY: scan for STD name. */
	for (s = str; *s >= 'A' && *s <= 'Z'; )
		s++;
	if (s != str + 3)
		goto ballout;
	tz->name_std[0] = str[0];
	tz->name_std[1] = str[1];
	tz->name_std[2] = str[2];

	/* LY: parse STD offset. */
	str = parse_offset (s, &tz->offset_std);
	if (! str)
		goto ballout;

	if (*str == 0) {
		/* LY: there is no DST. */
		tz->name_dst[0] = tz->name_std[0];
		tz->name_dst[1] = tz->name_std[1];
		tz->name_dst[2] = tz->name_std[2];
		tz->offset_dst = tz->offset_std;
		goto ok;
	}

	/* LY: scan for DST name. */
	for (s = str; *s >= 'A' && *s <= 'Z'; )
		s++;
	if (s == str + 3) {
		tz->name_dst[0] = str[0];
		tz->name_dst[1] = str[1];
		tz->name_dst[2] = str[2];

		/* LY: parse DST offset. */
		str = parse_offset (s, &tz->offset_dst);
		if (! str)
			goto ballout;
	} else if (s != str)
		goto ballout;

	/* LY: start = 2am of last week sunday of March. */
	tz->start.month = 3;
	tz->start.week = 5;
	tz->start.day = 0;
	tz->start.seconds = 3600 * 2;
	if (*str == ',') {
		str = parse_rule (str + 1, &tz->start);
		if (! str)
			goto ballout;
	}

	/* LY: end = 3am of last week sunday of October. */
	tz->end.month = 10;
	tz->end.week = 5;
	tz->end.day = 0;
	tz->end.seconds = 3600 * 3;
	if (*str == ',') {
		str = parse_rule (str + 1, &tz->end);
		if (! str)
			goto ballout;
	}

	if (*str != 0)
		goto ballout;

ok:
	return 1;

ballout:
	return 0;
}
Esempio n. 12
0
 bool parse(Iterator& first, Iterator const& last
            , Context const& context, unused_type, Attribute_& attr) const
 {
     return parse_rule(*this, first, last, context, attr);
 }
Esempio n. 13
0
static int load_rules(RULES *rules, char *tab)
{
    int ret;
    SPIPlanPtr SPIplan;
    Portal SPIportal;
    bool moredata = TRUE;
#ifdef DEBUG
    struct timeval t1, t2;
    double elapsed;
#endif
    char *sql;

    int rule_arr[MAX_RULE_LENGTH];

    int ntuples;
    int total_tuples = 0;

    rules_columns_t rules_columns = {rule: -1};

    char *rule;

    DBG("start load_rules\n");
    SET_TIME(t1);

    if (!tab || !strlen(tab)) {
        elog(NOTICE, "load_rules: rules table is not usable");
        return -1;
    }
    if (!tableNameOk(tab)) {
        elog(NOTICE, "load_rules: rules table name may only be alphanum and '.\"_' characters (%s)", tab);
        return -1;
    }
    sql = SPI_palloc(strlen(tab)+35);
    strcpy(sql, "select rule from ");
    strcat(sql, tab);
    strcat(sql, " order by id ");

    /* get the sql for the lexicon records and prepare the query */
    SPIplan = SPI_prepare(sql, 0, NULL);
    if (SPIplan == NULL) {
        elog(NOTICE, "load_rules: couldn't create query plan for the rule data via SPI (%s)", sql);
        return -1;
    }

    if ((SPIportal = SPI_cursor_open(NULL, SPIplan, NULL, NULL, true)) == NULL) {
        elog(NOTICE, "load_rules: SPI_cursor_open('%s') returns NULL", sql);
        return -1;
    }

    while (moredata == TRUE) {
        //DBG("calling SPI_cursor_fetch");
        SPI_cursor_fetch(SPIportal, TRUE, TUPLIMIT);

        if (SPI_tuptable == NULL) {
            elog(NOTICE, "load_rules: SPI_tuptable is NULL");
            return -1;
        }

        if (rules_columns.rule == -1) {
            ret = fetch_rules_columns(SPI_tuptable, &rules_columns);
            if (ret)
                return ret;
        }

        ntuples = SPI_processed;
        //DBG("Reading edges: %i - %i", total_tuples, total_tuples+ntuples);

        if (ntuples > 0) {
            int t;
            SPITupleTable *tuptable = SPI_tuptable;
            TupleDesc tupdesc = SPI_tuptable->tupdesc;

            for (t = 0; t < ntuples; t++) {
                int nr;
                //if (t%100 == 0) { DBG("    t: %i", t); }
                HeapTuple tuple = tuptable->vals[t];
                GET_TEXT_FROM_TUPLE(rule,rules_columns.rule);
                nr = parse_rule(rule, rule_arr);
                if (nr == -1) {
                    elog(NOTICE, "load_roles: rule exceeds 128 terms");
                    return -1;
                }
                ret = rules_add_rule(rules, nr, rule_arr);
                if (ret != 0) {
                    elog(NOTICE,"load_roles: failed to add rule %d (%d): %s",
                         total_tuples+t+1, ret, rule);
                    return -1;
                }
            }
            //DBG("calling SPI_freetuptable");
            SPI_freetuptable(tuptable);
            //DBG("back from SPI_freetuptable");
        }
        else
            moredata = FALSE;

        total_tuples += ntuples;
    }

    ret = rules_ready(rules);
    if (ret != 0) {
        elog(NOTICE, "load_roles: failed to ready the rules: err: %d", ret);
        return -1;
    }


    SET_TIME(t2);
    ELAPSED_T(t1, t2);
    DBG("Time to read %i rule records: %.1f ms.", total_tuples, elapsed);

    return 0;
}
Esempio n. 14
0
int main(int argc, char *argv[])
{
    STANDARDIZER *std;
    LEXICON *lex;
    LEXICON *gaz;
    RULES *rules;

    char buf[1024];

    int seq;
    char input_str[ 4096 ] ;
    char word[512];
    char stdword[512];
    int token;
    int nr;
    int rule[RULESIZE];
    int err;
    int cnt;
    int option = 0;

    FILE *in;

    if (argc == 3 && !strcmp(argv[1], "-o")) {
        option = strtol(argv[2], NULL, 10);
        argc -= 2;
        argv += 2;
    }
    else if (argc != 1) 
        Usage();

    std = std_init();
    assert(std);

    lex = lex_init(std->err_p);
    assert(lex);

    in = fopen(LEXIN, "rb");
    assert(in);

    cnt = 0;
    while (!feof(in) && fgets(buf, 1024, in)) {
        cnt++;
        /* parse into fields */
        if (parse_csv(buf, &seq, word, stdword, &token)) {
            /* add the record to the lexicon */
            err = lex_add_entry(lex, seq, word, stdword, token);
            if (err != 1)
                printf("lex: Failed: %d: %s", cnt, buf);
        }
        else {
            printf("lex: Skipping: %d: %s", cnt, buf);
        }
    }
    fclose(in);

    if (option & 1) {
        printf("------------ address lexicon --------------\n");
        print_lexicon(lex->hash_table);
        printf("\n");
    }

    gaz = lex_init(std->err_p);
    assert(gaz);

    in = fopen(GAZIN, "rb");
    assert(in);

    cnt = 0;
    while (!feof(in) && fgets(buf, 1024, in)) {
        cnt++;
        /* parse into fields */
        if (parse_csv(buf, &seq, word, stdword, &token)) {
            /* add the record to the lexicon */
            err = lex_add_entry(gaz, seq, word, stdword, token);
            if (err != 1)
                printf("gaz: Failed: %d: %s", cnt, buf);
        }
        else {
            printf("gaz: Skipping: %d: %s", cnt, buf);
        }
    }
    fclose(in);

    if (option & 2) {
        printf("------------ gazeteer lexicon --------------\n");
        print_lexicon(gaz->hash_table);
        printf("\n");
    }

    rules = rules_init(std->err_p);
    assert(rules);
    rules -> r_p -> collect_statistics = TRUE ;

    /* ************ RULES **************** */

    in = fopen(RULESIN, "rb");
    assert(in);

    cnt = 0;
    while (!feof(in) && fgets(buf, 1024, in)) {
        cnt++;
        /* parse into fields */
        nr = parse_rule(buf, rule);

        /* add the record to the rules */
        err = rules_add_rule(rules, nr, rule);
        if (err != 0)
            printf("rules: Failed: %d (%d): %s", cnt, err, buf);
    }
    err = rules_ready(rules);
    if (err != 0)
        printf("rules: Failed: err=%d\n", err);
    fclose(in);

    std_use_lex(std, lex);
    std_use_gaz(std, gaz);
    std_use_rules(std, rules);
    std_ready_standardizer(std);

    printf( "Standardization test. Type \"exit\" to quit:\n" ) ;
    fflush( stdout ) ;
    while ( TRUE ) {
        err = standardize_command_line( std, input_str, option ) ;
        if ( err == FAIL ) {
            break ;
        }
    }
    printf( "OK\n" ) ;
    fflush( stdout ) ;

    std_free(std);
/* these were freed when we bound them with std_use_*()
    rules_free(rules);
    lex_free(gaz);
    lex_free(lex);
*/

    return 0;
}
Esempio n. 15
0
static void fr_post_clo_init(void)
{
   Rule_List* last_rule_ptr = NULL;
   Char* read_ptr;
   Trace_Block* block = NULL;
   Trace_Block* parent = NULL;
   Int* indents = (int*)dir_buffer;
   Int indent;
   Int depth = -1;
   Bool is_group;
   SysRes sres;
   Int fd;
   OffT file_size;

   if (clo_mmap) {
#if VG_WORDSIZE == 4
      mmap_section.next = NULL;
      mmap_section.page_addr = 0;
      mmap_section.trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*));
      mmap_section.used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char));
#else
      mmap_sections = VG_(calloc)("freya.fr_post_clo_init.1", 1, sizeof(Mmap_Section));
      mmap_sections->next = NULL;
      mmap_sections->page_addr = 0;
      mmap_sections->trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*));
      mmap_sections->used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char));
      mmap_section_cache = mmap_sections;
#endif
   }

   read_ptr = NULL;
   if (clo_config) {
      sres = VG_(open)(clo_config, VKI_O_RDONLY, 0);
      if (!sr_isError(sres)) {
         fd = (Int) sr_Res(sres);

         file_size = VG_(lseek)(fd, 0, VKI_SEEK_END);
         VG_(lseek)(fd, 0, VKI_SEEK_SET);

         if (clo_fr_verb)
            VG_(printf)("File '%s' (size: %ld bytes) is successfully opened.\n", clo_config, file_size);

         read_ptr = VG_(malloc)("freya.fr_post_clo_init.3", (file_size + 1) * sizeof(Char));
         VG_(read)(fd, read_ptr, file_size);
         read_ptr[file_size] = '\0';

         VG_(close) (fd);
      }
      else if (clo_fr_verb)
         VG_(printf)("Cannot open '%s'. (Fallback to default config)\n", clo_config);
   }
   else if (clo_fr_verb)
      VG_(printf)("No config file provided. (Fallback to default config)\n");

   if (!read_ptr) {
      // Duplicate
      read_ptr = VG_(malloc)("freya.fr_post_clo_init.4", (VG_(strlen)(default_rule) + 1) * sizeof(Char));
      VG_(strcpy)(read_ptr, default_rule);
   }

   while (*read_ptr) {
      // Parsing the next line, first skip spaces
      indent = 0;
      while (*read_ptr == ' ') {
         indent++;
         read_ptr++;
      }

      // Skip comments and empty lines
      if (*read_ptr == '#' || *read_ptr == '\r' || *read_ptr == '\n') {
         while (*read_ptr != '\0' && *read_ptr != '\r' && *read_ptr != '\n')
            read_ptr++;

         if (*read_ptr) {
            read_ptr++;
            continue;
         }
      }

      if (*read_ptr == '{') {
         read_ptr = parse_extra_rule(read_ptr, block);
         continue;
      } else if (*read_ptr != '[' && *read_ptr != '(') {
         read_ptr = parse_rule(read_ptr, &last_rule_ptr);
         continue;
      }

      is_group = *read_ptr == '[';

      block = VG_(malloc)("freya.fr_post_clo_init.4", sizeof(Trace_Block));
      read_ptr++;
      block->name = read_ptr;

      while (!(!is_group && *read_ptr == ')') && !(is_group && *read_ptr == ']')) {
         tl_assert2(*read_ptr && *read_ptr != '\n' && *read_ptr != '\r', "unterminated ( or [");
         read_ptr++;
      }
      tl_assert2(block->name != read_ptr, "node has no name");

      *read_ptr = '\0';
      if (!is_group)
         search_rule(block, block->name, read_ptr - block->name);
      read_ptr++;

      if (*read_ptr == '+') {
         tl_assert2(default_parent == NULL, "Only one default node is allowed");
         default_parent = block;
         read_ptr++;
      }

      while (*read_ptr == ' ')
         read_ptr++;
      tl_assert2(*read_ptr == '\n' || *read_ptr == '\r' || !*read_ptr, "Garbage at the end of the line");

      if (clo_fr_verb)
         VG_(printf)("%s '%s' %s\n", is_group ? "Group:" : "Group & Attach:", block->name, default_parent == block ? "(Default)" : "");

      if (depth >= 0) {
         if (indents[depth] != indent) {
            if (indent > indents[depth]) {
               tl_assert2(depth < 63, "Maximum allowed depth is 63 for the tree");
               depth++;
               indents[depth] = indent;
               if (parent)
                  parent = parent->first;
               else
                  parent = trace_head;
            } else {
               do {
                  tl_assert2(depth != 0, "Wrong tree indentation");
                  depth--;
                  tl_assert(parent);
                  parent = parent->parent;
               } while (indent != indents[depth]);
               tl_assert((depth == 0 && !parent) || (depth > 0 && parent));
            }
         }
      } else {
         // The indentation of the top element
         tl_assert(!parent);
         indents[0] = indent;
         depth = 0;
      }

      block->parent = parent;
      if (parent) {
         block->next = parent->first;
         parent->first = block;
      } else {
         block->next = trace_head;
         trace_head = block;
      }
      block->first = NULL;

      block->hash_next = NULL;

      block->allocs = 0;
      block->total = 0;
      block->current = 0;
      block->peak = 0;
      block->ips = 0;
   }

   remove_unused_rules();
}