Ejemplo n.º 1
0
std::string join_headers( std::string header1, std::string header2 )
{
  std::vector<std::string> vs1 = tokenize_string( header1, "\t" );
  std::vector<std::string> vs2 = tokenize_string( header2, "\t" );
  std::vector<std::string> intercept = intercept_cols( vs1, vs2 );
  std::string header;


  for ( int i = 0; i < vs1.size(); i++ )
  {
    int j = 0;
    for ( j = 0; j < intercept.size(); j++ )
    {
      if ( compare_without_star( vs1[i], intercept[j] ))
        break;
    }
    if ( j == intercept.size() )
    {
      if ( vs1[i][0] == '*' )
        header.append( vs1[i].substr( 1, vs1[i].size() ) );
      else
        header.append( vs1[i] );
      header.push_back( '\t' );
    } // current col_name is not in intercept,
  }

  for ( int i = 0; i < intercept.size(); i++)
  {
      if ( intercept[i][0] == '*' )
        header.append( intercept[i].substr( 1, vs1[i].size() ) );
      else
        header.append( intercept[i] );

      header.push_back( '\t' );
  }

  for ( int i = 0; i < vs2.size(); i++ )
  {
    int j = 0;
    for ( j = 0; j < intercept.size(); j++ )
    {
      if ( compare_without_star( vs2[i], intercept[j] ))
        break;
    }

    if ( j == intercept.size() )
    {
      if ( vs2[i][0] == '*' )
        header.append( vs2[i].substr( 1, vs2[i].size() ) );
      else
        header.append( vs2[i] );

      header.push_back( '\t' );
    } // j reach the end, current std::string is not in intercept
  }

  return header;

}
Ejemplo n.º 2
0
token_type_t get_token(tokenizer_t* t)
{
	if (t->token_value) {
		free(t->token_value);
		t->token_value = NULL;
	}

	eatwhitespace(t);

	stack_push(t->index_stack, &t->source_index);

	if ('\0' == t->source[t->source_index]) {
		t->token_type = TT_EOF;
		return TT_EOF;
	}

	if (is_digit(t->source[t->source_index])) {
		tokenize_number(t);
		return TT_NUMBER;
	}

	if (is_alpha(t->source[t->source_index])) {
		tokenize_identifier(t);
		return TT_IDENT;
	}

	if ('"' == t->source[t->source_index]) {
		tokenize_string(t);
		return TT_STRING;
	}

	return TT_UNKNOWN;
}
Ejemplo n.º 3
0
int main() {
    char buffer[BUFFERSIZE];
    char **path = init_path();
    int path_length = 1;
    while (1) {
        char *filename = 0;
        printf("%s", "whoosh > ");
        if (fgets(buffer, BUFFERSIZE, stdin) != NULL) {
            int command_length = 0;
            char **commands = tokenize_string(trim_whitespace(buffer), &command_length);
            if (check_if_built_in_command(commands, command_length) != 0) {
                if (strcmp(commands[0], "exit") == 0)
                    exit(0);
                else if (strcmp(commands[0], "pwd") == 0) {
                    char buff[PATH_MAX + 1];
                    printf("%s\n", getcwd(buff, PATH_MAX + 1));
                }
                else if (strcmp(commands[0], "cd") == 0) {
                    char *dir = command_length == 1 ? getenv("HOME") : commands[1];
                    if (chdir(dir) != 0)
                        error_output();
                }
                else if (strcmp(commands[0], "path") == 0) {
                    path = commands + 1;
                    path_length = command_length - 1;
                }
            }
            else if (command_length != 0 && (check_command_syntax(commands, command_length, &filename) != 0 ||
                     run_shell_commands(commands, command_length, path, path_length, filename) != 0))
                error_output();
        }
        else
            error_output();
    }
}
Ejemplo n.º 4
0
void append_string_to_args( char * sin ,
                            int argc , char * argv[] ,
                            int * new_argc , char *** new_argv )
{
   int     n_argc , ii , ntok=0    ;
   char ** n_argv ,   ** stok=NULL ;
   char * cpt , * sss ;

   if( new_argc == NULL || new_argv == NULL ) return ;              /* error */

   if( sin == NULL || sin[0] == '\0' ){ *new_argv = NULL; return; } /* do nothing */

   tokenize_string( sin , &ntok , &stok ) ;
   if( stok == NULL || ntok < 1 ){ *new_argv = NULL; return; }      /* do nothing */

   /* copy input args to output */

   if( argc > 0 ){
      duplicate_string_list( argc , argv , &n_argv ) ;
      n_argc = argc ;
   } else {                                                    /* shouldn't happen */
      n_argv = NULL ;
      n_argc = 0 ;
   }

   /* append token list to output */

   appendto_string_list( &n_argc , &n_argv , ntok , stok ) ;
   free_string_list( ntok , stok ) ;

   *new_argc = n_argc ;  /* the results! */
   *new_argv = n_argv ;
   return ;
}
Ejemplo n.º 5
0
int get_col_pos( std::string col, std::string header ){

  std::vector<std::string> vh = tokenize_string( header, "\t" );
  int i = 0;
  if( col[0] == '*' || col[0] == '&' )
    col = col.substr( 1, col.size() );

  for ( std::vector<std::string>::iterator itr = vh.begin() ; itr != vh.end(); itr++ )
  {
    //std::cout << *itr << " " << col << std::endl;
    if ( compare_without_star(*itr, col ) ){
      return i;
    }
    i++;
  }

  return -1;
  
}
Ejemplo n.º 6
0
void prepend_string_to_args( char * sin ,
                             int argc , char * argv[] ,
                             int * new_argc , char *** new_argv )
{
   int     n_argc , ii , ntok=0    ;
   char ** n_argv ,   ** stok=NULL ;
   char * cpt , * sss ;

   if( new_argc == NULL || new_argv == NULL ) return ;              /* error */

   if( sin == NULL || sin[0] == '\0' ){ *new_argv = NULL; return; } /* do nothing */

   /*-- if no inputs after argv[0], prepend and append are identical --*/

   if( argc < 2 ){
      append_string_to_args( sin , argc , argv , new_argc , new_argv ) ;
      return ;
   }

   /*-- OK, must do it my way --*/

   tokenize_string( sin , &ntok , &stok ) ;
   if( stok == NULL || ntok < 1 ){ *new_argv = NULL; return; }      /* do nothing */

   /* copy first input arg to output */

   duplicate_string_list( 1 , argv , &n_argv ) ;
   n_argc = 1 ;

   /* append token list to output */

   appendto_string_list( &n_argc , &n_argv , ntok , stok ) ;
   free_string_list( ntok , stok ) ;

   /* append rest of input args to output */

   appendto_string_list( &n_argc , &n_argv , argc-1 , argv+1 ) ;

   *new_argc = n_argc ;  /* the results! */
   *new_argv = n_argv ;
   return ;
}
Ejemplo n.º 7
0
void FileSystem::CleanFilename(const std::string &filename, std::string &cleaned) {
    int i, j;

    std::string itermediate = filename;

    // Replace backslashes with forward slashes
    i = 0;
    while(i < (int)itermediate.size()) {
        j = (int)itermediate.find('\\', i);
        if(j != -1) {
            itermediate[j] = '/';
            i = j+1;
        } else {
            break;
        }
    }
    
    // Find double-dots and deal with them
    std::list<std::string> pieces;
    tokenize_string(itermediate, "/", pieces);
    
    std::list<std::string>::reverse_iterator ritr;
    cleaned = "";
    for(ritr = pieces.rbegin(); ritr != pieces.rend(); ritr++) {
        i = 0;
        while((*ritr) == "..") {
            ritr++;
            i++;
            if(ritr == pieces.rend()) { return; }
        }
        while(i > 0) {
            ritr++;
            i--;
            if(ritr == pieces.rend()) { return; }
        }
        cleaned = (*ritr) + "/" + cleaned;
    }
}
Ejemplo n.º 8
0
token_t
tokenizer_next_string(tokenizer_t *tokenizer, string_type_t string_type)
{
  token_t tok;
  int len = 1;
  int newline = FALSE;
  const char *offset;
  int string;

  if (tokenizer->to_use == &tokenizer->previous) {
    tokenizer->to_use = &tokenizer->current;
    return tokenizer_current(tokenizer);
  }

  if (END == tokenizer->to_use->token.type &&
      tokenizer->to_use->valid)
  {
    return tokenizer_current(tokenizer);
  }

  offset = tokenizer->current.offset;
  tokenizer->previous = tokenizer->current;

  if (string_type == NAME && tokenize_name_string(offset, &tok, &len)) string = TRUE;
  else if (string_type == INTEGER && tokenize_integer_string(offset, &tok, &len)) string = TRUE;
  else if (string_type == HEX && tokenize_hex_string(offset, &tok, &len)) string = TRUE;
  else if (string_type == COMMENT_TEXT && tokenize_comment_text(offset, &tok, &len)) string = TRUE;
  else string = FALSE;

  if (string) {
    /* Do nothing */
  } else if (offset[0] == '\0') {
    tok.type = END;
  } else if (offset[0] == '<') {
    if (offset[1] == '?') {
      tok.type = PI_START;
      len = 2;
    } else if (offset[1] == '/') {
      tok.type = CLOSE_TAG_START;
      len = 2;
    } else if (offset[1] == '!' &&
               offset[2] == '-' &&
               offset[3] == '-'
    )
    {
      tok.type = COMMENT_START;
      len = 4;
    } else {
      tok.type = LT;
    }
  } else if (offset[0] == '&') {
    if (offset[1] == '#') {
      if (offset[2] == 'x') {
        tok.type = CHAR_REF_HEX;
        len = 3;
      } else {
        tok.type = CHAR_REF;
        len = 2;
      }
    } else {
      tok.type = AMP;
    }
  } else if (string_type == ATTR_VALUE_QUOT && (*offset != '"')) {
    len = tokenize_string(offset, &tok, "\"");
  } else if (string_type == ATTR_VALUE_APOS && (*offset != '\'')) {
    len = tokenize_string(offset, &tok, "'");
  } else if (string_type == CHARDATA) {
    len = tokenize_string(offset, &tok, "<&");
    /* TODO: Check for ]]> */
  } else if (offset[0] == ' ' ||
             offset[0] == '\t' ||
             offset[0] == '\n' ||
             offset[0] == '\r') {
    tok.type = SPACE;
    if (offset[0] == '\n' ||
        offset[0] == '\r') {
      newline = TRUE;
    }
  } else if (offset[0] == '>') {
    tok.type = GT;
  } else if (offset[0] == '?' && offset[1] == '>') {
    tok.type = PI_END;
    len = 2;
  } else if (offset[0] == '-' &&
             offset[1] == '-' &&
             offset[2] == '>')
  {
    tok.type = COMMENT_END;
    len = 3;
  } else if (offset[0] == '/') {
    tok.type = SLASH;
  } else if (offset[0] == '=') {
    tok.type = EQ;
  } else if (offset[0] == '\'') {
    tok.type = APOS;
  } else if (offset[0] == '"') {
    tok.type = QUOT;
  } else if (offset[0] == ';') {
    tok.type = SEMICOLON;
  } else {
    abort();
  }

  tokenizer->current.valid = TRUE;
  tokenizer->current.context = tokenizer->next_context;
  tokenizer->current.token = tok;
  tokenizer->current.offset += len;

  /* This seems ugly */
  if (newline) {
    tokenizer->next_context.line++;
    tokenizer->next_context.column = 0;
  } else {
    tokenizer->next_context.column += len;
  }
  tokenizer->next_context.offset += len;

  return tok;
}
Ejemplo n.º 9
0
int main(int argc, char ** argv)
{
    int result = 0;
    if (argc != 4)
    {
        print_usage();
        result = 1;
    }
    else
    {
        FILE * token_output_f = 0;
        FILE * grammar_output_f = 0;
        FILE * grammar_source_f = 0;
        
        grammar_source_f = fopen(argv[3],"r");
        if (grammar_source_f == 0) { result = 1; }
        if (result == 0)
        {
            literal literals[1024];
            int t = 0;
            char l[2048];
            while (1)
            {
                if (fgets(l,2048,grammar_source_f) != 0)
                {
                    char * c = l;
                    while (1)
                    {
                        char * c1 = strchr(c,'`');
                        if (c1 != 0)
                        {
                            char * c2 = strchr(c1+1,'`');
                            if (c2 != 0)
                            {
                                literals[t].string = copy_string(c1+1,c2-c1-1);
                                literals[t].token = tokenize_string(literals[t].string);
                                t += 1;
                                c = c2+1;
                            }
                            else
                                break;
                        }
                        else
                            break;
                    }
                }
                else
                {
                    break;
                }
            }
            literals[t].string = 0;
            literals[t].token = 0;
            qsort(literals,t,sizeof(literal),cmp_literal);
            {
                int p = 1;
                int i = 1;
                while (literals[i].string != 0)
                {
                    if (strcmp(literals[p-1].string,literals[i].string) != 0)
                    {
                        literals[p] = literals[i];
                        p += 1;
                    }
                    i += 1;
                }
                literals[p].string = 0;
                literals[p].token = 0;
                t = p;
            }
            token_output_f = fopen(argv[2],"w");
            if (token_output_f != 0)
            {
                int i = 0;
                while (literals[i].string != 0)
                {
                    fprintf(token_output_f,"    { \"%s\", %s },\n",literals[i].string,literals[i].token);
                    i += 1;
                }
                fclose(token_output_f);
            }
            else
                result = 1;
            if (result == 0)
            {
                grammar_output_f = fopen(argv[1],"w");
                if (grammar_output_f != 0)
                {
                    int i = 0;
                    while (literals[i].string != 0)
                    {
                        fprintf(grammar_output_f,"%%token %s\n",literals[i].token);
                        i += 1;
                    }
                    rewind(grammar_source_f);
                    while (1)
                    {
                        if (fgets(l,2048,grammar_source_f) != 0)
                        {
                            char * c = l;
                            while (1)
                            {
                                char * c1 = strchr(c,'`');
                                if (c1 != 0)
                                {
                                    char * c2 = strchr(c1+1,'`');
                                    if (c2 != 0)
                                    {
                                        literal key;
                                        literal * replacement = 0;
                                        key.string = copy_string(c1+1,c2-c1-1);
                                        key.token = 0;
                                        replacement = (literal*)bsearch(
                                            &key,literals,t,sizeof(literal),cmp_literal);
                                        *c1 = 0;
                                        fprintf(grammar_output_f,"%s%s",c,replacement->token);
                                        c = c2+1;
                                    }
                                    else
                                    {
                                        fprintf(grammar_output_f,"%s",c);
                                        break;
                                    }
                                }
                                else
                                {
                                    fprintf(grammar_output_f,"%s",c);
                                    break;
                                }
                            }
                        }
                        else
                        {
                            break;
                        }
                    }
                    fclose(grammar_output_f);
                }
                else
                    result = 1;
            }
        }
        if (result != 0)
        {
            perror("yyacc");
        }
    }
    return result;
}
Ejemplo n.º 10
0
/**
 * Execute query.
 * @param query the query to execute.
 * @param playlist set to TRUE to search only songs in the playlist.
 *
 * @returns the search result in a #MpdData list.
 */
MpdData *advanced_search(const gchar * query, int in_playlist)
{
    MpdData *data_return = NULL;
    gchar **text = g_regex_split(search_regex, query, 0);
    int i = 0;
    gboolean found = FALSE;
    for (i = 0; text && text[i]; i++)
    {
        int type;
        /* Or sign, if hit, a new query is started */
        if (strcmp(text[i], "||") == 0)
        {
            MpdData *data;
            /* Commit the currently in active search and append the results */
            if (in_playlist)
                data = mpd_playlist_search_commit(connection);
            else
                data = mpd_database_search_commit(connection);
            data_return = mpd_data_concatenate(data_return, data);
            found = FALSE;
            continue;
        }
        /* empty element */
        if (text[i][0] == '\0')
            continue;

        /* Parse the tag name. */
        type = mpd_misc_get_tag_by_name(g_strstrip(text[i]));
        if (type != MPD_TAG_NOT_FOUND && text[i + 1])
        {
            gchar **split = tokenize_string(text[i + 1]);
            int j;
            for (j = 0; split && split[j]; j++)
            {
                if (!found)
                {
                    if (in_playlist)
                        mpd_playlist_search_start(connection, FALSE);
                    else
                        mpd_database_search_start(connection, FALSE);
                    found = TRUE;
                }
                if (in_playlist)
                    mpd_playlist_search_add_constraint(connection, type, g_strstrip(split[j]));
                else
                    mpd_database_search_add_constraint(connection, type, g_strstrip(split[j]));
            }
            if (split)
                g_strfreev(split);
            i++;
        } else
        {
            gchar **split = tokenize_string(text[i]);
            int j;
            for (j = 0; split && split[j]; j++)
            {
                if (!found)
                {
                    if (in_playlist)
                        mpd_playlist_search_start(connection, FALSE);
                    else
                        mpd_database_search_start(connection, FALSE);
                    found = TRUE;
                }
                if (in_playlist)
                    mpd_playlist_search_add_constraint(connection, MPD_TAG_ITEM_ANY, split[j]);
                else
                    mpd_database_search_add_constraint(connection, MPD_TAG_ITEM_ANY, split[j]);
            }
            if (split)
                g_strfreev(split);
        }
    }
    if (text)
        g_strfreev(text);
    /* Execute the active search and append the results */
    if (found)
    {
        MpdData *data;
        if (in_playlist)
            data = mpd_playlist_search_commit(connection);
        else
            data = mpd_database_search_commit(connection);
        data_return = mpd_data_concatenate(data_return, data);
    }
    /* remove possible duplicates (because of concatenating queries) */
    return misc_mpddata_remove_duplicate_songs(data_return);
}
Ejemplo n.º 11
0
/** Parse and validate the ASCII-encoded v2 descriptor in <b>desc</b>,
 * write the parsed descriptor to the newly allocated *<b>parsed_out</b>, the
 * binary descriptor ID of length DIGEST_LEN to <b>desc_id_out</b>, the
 * encrypted introduction points to the newly allocated
 * *<b>intro_points_encrypted_out</b>, their encrypted size to
 * *<b>intro_points_encrypted_size_out</b>, the size of the encoded descriptor
 * to *<b>encoded_size_out</b>, and a pointer to the possibly next
 * descriptor to *<b>next_out</b>; return 0 for success (including validation)
 * and -1 for failure.
 *
 * If <b>as_hsdir</b> is 1, we're parsing this as an HSDir, and we should
 * be strict about time formats.
 */
int
rend_parse_v2_service_descriptor(rend_service_descriptor_t **parsed_out,
                                 char *desc_id_out,
                                 char **intro_points_encrypted_out,
                                 size_t *intro_points_encrypted_size_out,
                                 size_t *encoded_size_out,
                                 const char **next_out, const char *desc,
                                 int as_hsdir)
{
  rend_service_descriptor_t *result =
                            tor_malloc_zero(sizeof(rend_service_descriptor_t));
  char desc_hash[DIGEST_LEN];
  const char *eos;
  smartlist_t *tokens = smartlist_new();
  directory_token_t *tok;
  char secret_id_part[DIGEST_LEN];
  int i, version, num_ok=1;
  smartlist_t *versions;
  char public_key_hash[DIGEST_LEN];
  char test_desc_id[DIGEST_LEN];
  memarea_t *area = NULL;
  const int strict_time_fmt = as_hsdir;

  tor_assert(desc);
  /* Check if desc starts correctly. */
  if (strcmpstart(desc, "rendezvous-service-descriptor ")) {
    log_info(LD_REND, "Descriptor does not start correctly.");
    goto err;
  }
  /* Compute descriptor hash for later validation. */
  if (router_get_hash_impl(desc, strlen(desc), desc_hash,
                           "rendezvous-service-descriptor ",
                           "\nsignature", '\n', DIGEST_SHA1) < 0) {
    log_warn(LD_REND, "Couldn't compute descriptor hash.");
    goto err;
  }
  /* Determine end of string. */
  eos = strstr(desc, "\nrendezvous-service-descriptor ");
  if (!eos)
    eos = desc + strlen(desc);
  else
    eos = eos + 1;
  /* Check length. */
  if (eos-desc > REND_DESC_MAX_SIZE) {
    /* XXXX+ If we are parsing this descriptor as a server, this
     * should be a protocol warning. */
    log_warn(LD_REND, "Descriptor length is %d which exceeds "
             "maximum rendezvous descriptor size of %d bytes.",
             (int)(eos-desc), REND_DESC_MAX_SIZE);
    goto err;
  }
  /* Tokenize descriptor. */
  area = memarea_new();
  if (tokenize_string(area, desc, eos, tokens, desc_token_table, 0)) {
    log_warn(LD_REND, "Error tokenizing descriptor.");
    goto err;
  }
  /* Set next to next descriptor, if available. */
  *next_out = eos;
  /* Set length of encoded descriptor. */
  *encoded_size_out = eos - desc;
  /* Check min allowed length of token list. */
  if (smartlist_len(tokens) < 7) {
    log_warn(LD_REND, "Impossibly short descriptor.");
    goto err;
  }
  /* Parse base32-encoded descriptor ID. */
  tok = find_by_keyword(tokens, R_RENDEZVOUS_SERVICE_DESCRIPTOR);
  tor_assert(tok == smartlist_get(tokens, 0));
  tor_assert(tok->n_args == 1);
  if (!rend_valid_descriptor_id(tok->args[0])) {
    log_warn(LD_REND, "Invalid descriptor ID: '%s'", tok->args[0]);
    goto err;
  }
  if (base32_decode(desc_id_out, DIGEST_LEN,
                    tok->args[0], REND_DESC_ID_V2_LEN_BASE32) < 0) {
    log_warn(LD_REND, "Descriptor ID contains illegal characters: %s",
             tok->args[0]);
    goto err;
  }
  /* Parse descriptor version. */
  tok = find_by_keyword(tokens, R_VERSION);
  tor_assert(tok->n_args == 1);
  result->version =
    (int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &num_ok, NULL);
  if (result->version != 2 || !num_ok) {
    /* If it's <2, it shouldn't be under this format.  If the number
     * is greater than 2, we bumped it because we broke backward
     * compatibility.  See how version numbers in our other formats
     * work. */
    log_warn(LD_REND, "Unrecognized descriptor version: %s",
             escaped(tok->args[0]));
    goto err;
  }
  /* Parse public key. */
  tok = find_by_keyword(tokens, R_PERMANENT_KEY);
  result->pk = tok->key;
  tok->key = NULL; /* Prevent free */
  /* Parse secret ID part. */
  tok = find_by_keyword(tokens, R_SECRET_ID_PART);
  tor_assert(tok->n_args == 1);
  if (strlen(tok->args[0]) != REND_SECRET_ID_PART_LEN_BASE32 ||
      strspn(tok->args[0], BASE32_CHARS) != REND_SECRET_ID_PART_LEN_BASE32) {
    log_warn(LD_REND, "Invalid secret ID part: '%s'", tok->args[0]);
    goto err;
  }
  if (base32_decode(secret_id_part, DIGEST_LEN, tok->args[0], 32) < 0) {
    log_warn(LD_REND, "Secret ID part contains illegal characters: %s",
             tok->args[0]);
    goto err;
  }
  /* Parse publication time -- up-to-date check is done when storing the
   * descriptor. */
  tok = find_by_keyword(tokens, R_PUBLICATION_TIME);
  tor_assert(tok->n_args == 1);
  if (parse_iso_time_(tok->args[0], &result->timestamp,
                      strict_time_fmt, 0) < 0) {
    log_warn(LD_REND, "Invalid publication time: '%s'", tok->args[0]);
    goto err;
  }
  /* Parse protocol versions. */
  tok = find_by_keyword(tokens, R_PROTOCOL_VERSIONS);
  tor_assert(tok->n_args == 1);
  versions = smartlist_new();
  smartlist_split_string(versions, tok->args[0], ",",
                         SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
  for (i = 0; i < smartlist_len(versions); i++) {
    version = (int) tor_parse_long(smartlist_get(versions, i),
                                   10, 0, INT_MAX, &num_ok, NULL);
    if (!num_ok) /* It's a string; let's ignore it. */
      continue;
    if (version >= REND_PROTOCOL_VERSION_BITMASK_WIDTH)
      /* Avoid undefined left-shift behaviour. */
      continue;
    result->protocols |= 1 << version;
  }
  SMARTLIST_FOREACH(versions, char *, cp, tor_free(cp));
  smartlist_free(versions);
  /* Parse encrypted introduction points. Don't verify. */
  tok = find_opt_by_keyword(tokens, R_INTRODUCTION_POINTS);
  if (tok) {
    if (strcmp(tok->object_type, "MESSAGE")) {
      log_warn(LD_DIR, "Bad object type: introduction points should be of "
               "type MESSAGE");
      goto err;
    }
    *intro_points_encrypted_out = tor_memdup(tok->object_body,
                                             tok->object_size);
    *intro_points_encrypted_size_out = tok->object_size;
  } else {
    *intro_points_encrypted_out = NULL;
    *intro_points_encrypted_size_out = 0;
  }
  /* Parse and verify signature. */
  tok = find_by_keyword(tokens, R_SIGNATURE);
  if (check_signature_token(desc_hash, DIGEST_LEN, tok, result->pk, 0,
                            "v2 rendezvous service descriptor") < 0)
    goto err;
  /* Verify that descriptor ID belongs to public key and secret ID part. */
  if (crypto_pk_get_digest(result->pk, public_key_hash) < 0) {
    log_warn(LD_REND, "Unable to compute rend descriptor public key digest");
    goto err;
  }
  rend_get_descriptor_id_bytes(test_desc_id, public_key_hash,
                               secret_id_part);
  if (tor_memneq(desc_id_out, test_desc_id, DIGEST_LEN)) {
    log_warn(LD_REND, "Parsed descriptor ID does not match "
             "computed descriptor ID.");
    goto err;
  }
  goto done;
 err:
  rend_service_descriptor_free(result);
  result = NULL;
 done:
  if (tokens) {
    SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
    smartlist_free(tokens);
  }
  if (area)
    memarea_drop_all(area);
  *parsed_out = result;
  if (result)
    return 0;
  return -1;
}
Ejemplo n.º 12
0
/** Parse the content of a client_key file in <b>ckstr</b> and add
 * rend_authorized_client_t's for each parsed client to
 * <b>parsed_clients</b>. Return the number of parsed clients as result
 * or -1 for failure. */
int
rend_parse_client_keys(strmap_t *parsed_clients, const char *ckstr)
{
  int result = -1;
  smartlist_t *tokens;
  directory_token_t *tok;
  const char *current_entry = NULL;
  memarea_t *area = NULL;
  char *err_msg = NULL;
  if (!ckstr || strlen(ckstr) == 0)
    return -1;
  tokens = smartlist_new();
  /* Begin parsing with first entry, skipping comments or whitespace at the
   * beginning. */
  area = memarea_new();
  current_entry = eat_whitespace(ckstr);
  while (!strcmpstart(current_entry, "client-name ")) {
    rend_authorized_client_t *parsed_entry;
    /* Determine end of string. */
    const char *eos = strstr(current_entry, "\nclient-name ");
    if (!eos)
      eos = current_entry + strlen(current_entry);
    else
      eos = eos + 1;
    /* Free tokens and clear token list. */
    SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
    smartlist_clear(tokens);
    memarea_clear(area);
    /* Tokenize string. */
    if (tokenize_string(area, current_entry, eos, tokens,
                        client_keys_token_table, 0)) {
      log_warn(LD_REND, "Error tokenizing client keys file.");
      goto err;
    }
    /* Advance to next entry, if available. */
    current_entry = eos;
    /* Check minimum allowed length of token list. */
    if (smartlist_len(tokens) < 2) {
      log_warn(LD_REND, "Impossibly short client key entry.");
      goto err;
    }
    /* Parse client name. */
    tok = find_by_keyword(tokens, C_CLIENT_NAME);
    tor_assert(tok == smartlist_get(tokens, 0));
    tor_assert(tok->n_args == 1);

    if (!rend_valid_client_name(tok->args[0])) {
      log_warn(LD_CONFIG, "Illegal client name: %s. (Length must be "
               "between 1 and %d, and valid characters are "
               "[A-Za-z0-9+-_].)", tok->args[0], REND_CLIENTNAME_MAX_LEN);
      goto err;
    }
    /* Check if client name is duplicate. */
    if (strmap_get(parsed_clients, tok->args[0])) {
      log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains a "
               "duplicate client name: '%s'. Ignoring.", tok->args[0]);
      goto err;
    }
    parsed_entry = tor_malloc_zero(sizeof(rend_authorized_client_t));
    parsed_entry->client_name = tor_strdup(tok->args[0]);
    strmap_set(parsed_clients, parsed_entry->client_name, parsed_entry);
    /* Parse client key. */
    tok = find_opt_by_keyword(tokens, C_CLIENT_KEY);
    if (tok) {
      parsed_entry->client_key = tok->key;
      tok->key = NULL; /* Prevent free */
    }

    /* Parse descriptor cookie. */
    tok = find_by_keyword(tokens, C_DESCRIPTOR_COOKIE);
    tor_assert(tok->n_args == 1);
    if (rend_auth_decode_cookie(tok->args[0], parsed_entry->descriptor_cookie,
                                NULL, &err_msg) < 0) {
      tor_assert(err_msg);
      log_warn(LD_REND, "%s", err_msg);
      tor_free(err_msg);
      goto err;
    }
  }
  result = strmap_size(parsed_clients);
  goto done;
 err:
  result = -1;
 done:
  /* Free tokens and clear token list. */
  SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
  smartlist_free(tokens);
  if (area)
    memarea_drop_all(area);
  return result;
}
Ejemplo n.º 13
0
/** Parse the encoded introduction points in <b>intro_points_encoded</b> of
 * length <b>intro_points_encoded_size</b> and write the result to the
 * descriptor in <b>parsed</b>; return the number of successfully parsed
 * introduction points or -1 in case of a failure. */
int
rend_parse_introduction_points(rend_service_descriptor_t *parsed,
                               const char *intro_points_encoded,
                               size_t intro_points_encoded_size)
{
  const char *current_ipo, *end_of_intro_points;
  smartlist_t *tokens = NULL;
  directory_token_t *tok;
  rend_intro_point_t *intro;
  extend_info_t *info;
  int result, num_ok=1;
  memarea_t *area = NULL;
  tor_assert(parsed);
  /** Function may only be invoked once. */
  tor_assert(!parsed->intro_nodes);
  if (!intro_points_encoded || intro_points_encoded_size == 0) {
    log_warn(LD_REND, "Empty or zero size introduction point list");
    goto err;
  }
  /* Consider one intro point after the other. */
  current_ipo = intro_points_encoded;
  end_of_intro_points = intro_points_encoded + intro_points_encoded_size;
  tokens = smartlist_new();
  parsed->intro_nodes = smartlist_new();
  area = memarea_new();

  while (!fast_memcmpstart(current_ipo, end_of_intro_points-current_ipo,
                      "introduction-point ")) {
    /* Determine end of string. */
    const char *eos = tor_memstr(current_ipo, end_of_intro_points-current_ipo,
                                 "\nintroduction-point ");
    if (!eos)
      eos = end_of_intro_points;
    else
      eos = eos+1;
    tor_assert(eos <= intro_points_encoded+intro_points_encoded_size);
    /* Free tokens and clear token list. */
    SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
    smartlist_clear(tokens);
    memarea_clear(area);
    /* Tokenize string. */
    if (tokenize_string(area, current_ipo, eos, tokens, ipo_token_table, 0)) {
      log_warn(LD_REND, "Error tokenizing introduction point");
      goto err;
    }
    /* Advance to next introduction point, if available. */
    current_ipo = eos;
    /* Check minimum allowed length of introduction point. */
    if (smartlist_len(tokens) < 5) {
      log_warn(LD_REND, "Impossibly short introduction point.");
      goto err;
    }
    /* Allocate new intro point and extend info. */
    intro = tor_malloc_zero(sizeof(rend_intro_point_t));
    info = intro->extend_info = tor_malloc_zero(sizeof(extend_info_t));
    /* Parse identifier. */
    tok = find_by_keyword(tokens, R_IPO_IDENTIFIER);
    if (base32_decode(info->identity_digest, DIGEST_LEN,
                      tok->args[0], REND_INTRO_POINT_ID_LEN_BASE32) < 0) {
      log_warn(LD_REND, "Identity digest contains illegal characters: %s",
               tok->args[0]);
      rend_intro_point_free(intro);
      goto err;
    }
    /* Write identifier to nickname. */
    info->nickname[0] = '$';
    base16_encode(info->nickname + 1, sizeof(info->nickname) - 1,
                  info->identity_digest, DIGEST_LEN);
    /* Parse IP address. */
    tok = find_by_keyword(tokens, R_IPO_IP_ADDRESS);
    if (tor_addr_parse(&info->addr, tok->args[0])<0) {
      log_warn(LD_REND, "Could not parse introduction point address.");
      rend_intro_point_free(intro);
      goto err;
    }
    if (tor_addr_family(&info->addr) != AF_INET) {
      log_warn(LD_REND, "Introduction point address was not ipv4.");
      rend_intro_point_free(intro);
      goto err;
    }

    /* Parse onion port. */
    tok = find_by_keyword(tokens, R_IPO_ONION_PORT);
    info->port = (uint16_t) tor_parse_long(tok->args[0],10,1,65535,
                                           &num_ok,NULL);
    if (!info->port || !num_ok) {
      log_warn(LD_REND, "Introduction point onion port %s is invalid",
               escaped(tok->args[0]));
      rend_intro_point_free(intro);
      goto err;
    }
    /* Parse onion key. */
    tok = find_by_keyword(tokens, R_IPO_ONION_KEY);
    if (!crypto_pk_public_exponent_ok(tok->key)) {
      log_warn(LD_REND,
               "Introduction point's onion key had invalid exponent.");
      rend_intro_point_free(intro);
      goto err;
    }
    info->onion_key = tok->key;
    tok->key = NULL; /* Prevent free */
    /* Parse service key. */
    tok = find_by_keyword(tokens, R_IPO_SERVICE_KEY);
    if (!crypto_pk_public_exponent_ok(tok->key)) {
      log_warn(LD_REND,
               "Introduction point key had invalid exponent.");
      rend_intro_point_free(intro);
      goto err;
    }
    intro->intro_key = tok->key;
    tok->key = NULL; /* Prevent free */
    /* Add extend info to list of introduction points. */
    smartlist_add(parsed->intro_nodes, intro);
  }
  result = smartlist_len(parsed->intro_nodes);
  goto done;

 err:
  result = -1;

 done:
  /* Free tokens and clear token list. */
  if (tokens) {
    SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
    smartlist_free(tokens);
  }
  if (area)
    memarea_drop_all(area);

  return result;
}
Ejemplo n.º 14
0
bool num_comparison( expression * ep, std::string header, std::string row )
{
  if ( !ep )
    return false;

  std::vector<std::string> cols = tokenize_string( header, "\t" );

  expression* node[2];
  node[0] = ep->values[0].ep;
  node[1] = ep->values[1].ep;
  int v_lhs = 0;
  int v_rhs = 0;
  int ops[2];

  // tokenize row
  std::vector<std::string> row_v;
  char *s = toChars( row );
  char *token;

  token = strtok( s, "\t" );
  while ( token )
  {
    row_v.push_back( std::string( token ) );
    token = strtok ( NULL, "\t" );
  }
  if ( row_v.size() != cols.size() )
    return false;

  // see if nodes are number or std::string, 
  // if it is std::string, we need to find the actual value in database
  for ( int i = 0; i < 2; i++ )
  {
    if ( node[i]->func != OP_COLNAME && node[i]->func != OP_NUMBER )
    {
      ops[i] = arithmetic( node[i], header, row );
    }

    else if ( node[i]->func == OP_NUMBER ) 
    {
      ops[i] = node[i]->values[0].num;
    }
    else if ( node[i]->func == OP_COLNAME )
    {
      int pos = get_col_pos( std::string( node[i]->values[0].name ), header );
      if ( pos < 0 )
      {
        std::cout << "column does not exist!" << std::endl;
        return false;
      }
      ops[i] = atoi( row_v[pos].c_str() );
    }
  }

    // start comparison
  switch( ep->func ){

    case OP_EQUAL:  return ops[0] == ops[1]; break;
    case OP_NOTEQ:  return ops[0] != ops[1]; break;
    case OP_LEQ:    return ops[0] <= ops[1]; break;
    case OP_GEQ:    return ops[0] >= ops[1]; break;
    case OP_LT:     return ops[0] < ops[1]; break;
    case OP_GT:     return ops[0] > ops[1]; break;
    default : 
              std::cout << "OPERATOR NOT SUPPORTED! " << std::endl;
              return false;
              break;

  }
}
Ejemplo n.º 15
0
bool string_comparison( expression * ep, std::string header, std::string row  )
{
//cout << "here " << std::endl;
  if ( !ep )
    return false;


  std::vector<std::string> cols = tokenize_string( header, "\t" );

  //cout << "what?" << std::endl;
  expression* node[2];
  node[0] = ep->values[0].ep;
  node[1] = ep->values[1].ep;
  std::string ops[2];

  std::vector<std::string> row_v;
  char *s = toChars( row );
  char *token;

  token = strtok( s, "\t" );
  while ( token )
  {
    row_v.push_back( std::string( token ) );
    token = strtok ( NULL, "\t" );
  }
  if ( row_v.size() != cols.size() )
    return false;

  // see if nodes are number or std::string, 
  // if it is std::string, we need to find the actual value in database
  for ( int i = 0; i < 2; i++ )
  {
    if ( node[i]->func == OP_STRING )
    {
      ops[i] = std::string( node[i]->values[0].data );
      //cout << "!" << ops[i] << std::endl;
    }
    else if ( node[i]->func == OP_COLNAME )
    {
      //cout <<  node[i]->values[i].name << std::endl;
      int pos = get_col_pos( node[i]->values[i].name, header );
      if ( pos < 0 )
      {
        std::cout << "column does not exist!" << std::endl;
        return false;
      }

      ops[i] = std::string( row_v[pos] );

    }
  }

  switch( ep->func ){

    case OP_EQUAL:  return  (ops[0].compare( ops[1] ) == 0 ); break;
    case OP_NOTEQ:  return  (ops[0].compare( ops[1] ) != 0 ); break;
    case OP_LEQ:    return  (ops[0].compare( ops[1] ) <= 0 ); break;
    case OP_GEQ:    return  (ops[0].compare( ops[1] ) >= 0 ); break;
    case OP_LT:     return  (ops[0].compare( ops[1] ) < 0 ); break;
    case OP_GT:     return  (ops[0].compare( ops[1] ) > 0 ); break;
    default : 
              std::cout << "OPERATOR ON std::string NOT SUPPORTED! " << std::endl;
              return false;
              break;

  }
}