Пример #1
0
void test3(){
	struct BayesSignature * signature;
	signature = new_Signature_i(2, "6f0c36d455f86922ae69808d00bcbadd");

	struct Token * t1, * t2;
	t1 = new_Token("test", 0.7, 4);
	t2 = new_Token("tada", 0.6, 4);
	add_Token(signature, t1);
	add_Token(signature, t2);
	add_threshold(signature, 2.04);

	print_signature(signature);
}
Пример #2
0
Operator *new_Operator( char *name, int norp )

{

  int i;

  Operator *result = ( Operator * ) calloc( 1, sizeof( Operator ) );
  CHECK_PTR(result);

  if ( name ) {
    result->name = new_Token( strlen( name ) + 1 );
    CHECK_PTR( result->name );
    strcpy( result->name, name );
  } else {
    result->name = NULL;
  }

  result->num_vars = 0;
  result->number_of_real_params = norp;

  for ( i = 0; i < MAX_VARS; i++ ) {
    result->removed[i] = FALSE;
  }

  result->preconds = NULL;

  result->effects = NULL;

  result->hard = TRUE;

  return result;

}
Пример #3
0
Operator *new_Operator( char *name, int norp )

{

  int i;

  Operator *result = ( Operator * ) calloc( 1, sizeof( Operator ) );
  CHECK_PTR(result);

  if ( name ) {
    result->name = new_Token( strlen( name ) + 1 );
    CHECK_PTR( result->name );
    strcpy( result->name, name );
  } else {
    result->name = NULL;
  }

  result->num_vars = 0;
  result->number_of_real_params = norp;

  for ( i = 0; i < MAX_VARS; i++ ) {
    result->inst_table[i] = -1;
  }

  result->num_preconds = 0;
  result->num_adds = 0;
  result->num_dels = 0;

  result->out = FALSE;

  return result;

}
Пример #4
0
char *copy_Token( char *s )

{

  char *d = new_Token( strlen( s ) + 1 );
  strcpy(d, s);

  return d;

}
Пример #5
0
type_tree new_type_tree( char *name )

{

  type_tree act_type;
  
  if (!name) {
    return NULL;
  }

  act_type = ( type_tree ) calloc( 1, sizeof( type_tree_elt ) );
  CHECK_PTR(act_type);

  act_type->name = new_Token( strlen( name ) + 1 );
  strcpy( act_type->name, name );
  act_type->sub_types = NULL;

  return act_type;

}
Пример #6
0
TokenList *copy_TokenList( TokenList *source )

{

  TokenList *temp;

  if ( !source ) {
    temp = NULL;
  } else {
    temp = new_TokenList();
    if ( source->item ) {
      temp->item = new_Token( strlen( source->item ) + 1 );
      strcpy( temp->item, source->item );
    }
    temp->next = copy_TokenList( source->next );
  }

  return temp;

}
Пример #7
0
TokenList *copy_complete_TokenList( TokenList *source, 
				    TokenList **end )

{

  TokenList *temp;

  if ( !source ) {
    temp = NULL;
  } else {
    temp = new_TokenList();
    if ( source->item ) {
      temp->item = new_Token( strlen( source->item ) + 1 );
      strcpy( temp->item, source->item );
    }
    temp->next = copy_complete_TokenList( source->next, end );
    if ( !temp->next ) {
      *end = temp;
    }
  }

  return temp;

}
Пример #8
0
PlOperator *new_axiom_op_list( void )

{

  static int count;
  char *name;
  PlOperator *ret;

  /* WARNING: count should not exceed 999 
   */
  count++;
  if ( count == 10000 ) {
    printf("\ntoo many axioms! look into memory.c, line 157\n\n");
    exit( 1 );
  }
  name = new_Token(strlen(HIDDEN_STR)+strlen(AXIOM_STR)+4+1);
  sprintf(name, "%s%s%4d", HIDDEN_STR, AXIOM_STR, count);

  ret = new_PlOperator(name);
  free(name);

  return ret;

}
Пример #9
0
PlOperator *new_PlOperator( char *name )

{

  PlOperator *result = ( PlOperator * ) calloc( 1, sizeof( PlOperator ) );
  CHECK_PTR(result);

  if ( name ) {
    result->name = new_Token(strlen(name)+1);
    CHECK_PTR(result->name);
    strcpy(result->name, name);
  } else {
    result->name = NULL;
  }

  result->params = NULL;
  result->preconds = NULL;
  result->effects = NULL;
  result->number_of_real_params = 0;
  result->next = NULL;

  return result;

}
Пример #10
0
// ------------------------------------------------------------------------------------------------
void Tokenize(TokenList& output_tokens, const char* input)
{
    ai_assert(input);

    // line and column numbers numbers are one-based
    unsigned int line = 1;
    unsigned int column = 1;

    bool comment = false;
    bool in_double_quotes = false;
    bool pending_data_token = false;

    const char* token_begin = NULL, *token_end = NULL;
    for (const char* cur = input;*cur;column += (*cur == '\t' ? ASSIMP_FBX_TAB_WIDTH : 1), ++cur) {
        const char c = *cur;

        if (IsLineEnd(c)) {
            comment = false;

            column = 0;
            ++line;
        }

        if(comment) {
            continue;
        }

        if(in_double_quotes) {
            if (c == '\"') {
                in_double_quotes = false;
                token_end = cur;

                ProcessDataToken(output_tokens,token_begin,token_end,line,column);
                pending_data_token = false;
            }
            continue;
        }

        switch(c)
        {
        case '\"':
            if (token_begin) {
                TokenizeError("unexpected double-quote", line, column);
            }
            token_begin = cur;
            in_double_quotes = true;
            continue;

        case ';':
            ProcessDataToken(output_tokens,token_begin,token_end,line,column);
            comment = true;
            continue;

        case '{':
            ProcessDataToken(output_tokens,token_begin,token_end, line, column);
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_OPEN_BRACKET,line,column));
            continue;

        case '}':
            ProcessDataToken(output_tokens,token_begin,token_end,line,column);
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_CLOSE_BRACKET,line,column));
            continue;

        case ',':
            if (pending_data_token) {
                ProcessDataToken(output_tokens,token_begin,token_end,line,column,TokenType_DATA,true);
            }
            output_tokens.push_back(new_Token(cur,cur+1,TokenType_COMMA,line,column));
            continue;

        case ':':
            if (pending_data_token) {
                ProcessDataToken(output_tokens,token_begin,token_end,line,column,TokenType_KEY,true);
            }
            else {
                TokenizeError("unexpected colon", line, column);
            }
            continue;
        }

        if (IsSpaceOrNewLine(c)) {

            if (token_begin) {
                // peek ahead and check if the next token is a colon in which
                // case this counts as KEY token.
                TokenType type = TokenType_DATA;
                for (const char* peek = cur;  *peek && IsSpaceOrNewLine(*peek); ++peek) {
                    if (*peek == ':') {
                        type = TokenType_KEY;
                        cur = peek;
                        break;
                    }
                }

                ProcessDataToken(output_tokens,token_begin,token_end,line,column,type);
            }

            pending_data_token = false;
        }
        else {
            token_end = cur;
            if (!token_begin) {
                token_begin = cur;
            }

            pending_data_token = true;
        }
    }
}
Пример #11
0
// ------------------------------------------------------------------------------------------------
bool ReadScope(TokenList& output_tokens, const char* input, const char*& cursor, const char* end, bool const is64bits)
{
    // the first word contains the offset at which this block ends
	const uint64_t end_offset = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end);

    // we may get 0 if reading reached the end of the file -
    // fbx files have a mysterious extra footer which I don't know
    // how to extract any information from, but at least it always
    // starts with a 0.
    if(!end_offset) {
        return false;
    }

    if(end_offset > Offset(input, end)) {
        TokenizeError("block offset is out of range",input, cursor);
    }
    else if(end_offset < Offset(input, cursor)) {
        TokenizeError("block offset is negative out of range",input, cursor);
    }

    // the second data word contains the number of properties in the scope
	const uint64_t prop_count = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end);

    // the third data word contains the length of the property list
	const uint64_t prop_length = is64bits ? ReadDoubleWord(input, cursor, end) : ReadWord(input, cursor, end);

    // now comes the name of the scope/key
    const char* sbeg, *send;
    ReadString(sbeg, send, input, cursor, end);

    output_tokens.push_back(new_Token(sbeg, send, TokenType_KEY, Offset(input, cursor) ));

    // now come the individual properties
    const char* begin_cursor = cursor;
    for (unsigned int i = 0; i < prop_count; ++i) {
        ReadData(sbeg, send, input, cursor, begin_cursor + prop_length);

        output_tokens.push_back(new_Token(sbeg, send, TokenType_DATA, Offset(input, cursor) ));

        if(i != prop_count-1) {
            output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_COMMA, Offset(input, cursor) ));
        }
    }

    if (Offset(begin_cursor, cursor) != prop_length) {
        TokenizeError("property length not reached, something is wrong",input, cursor);
    }

    // at the end of each nested block, there is a NUL record to indicate
    // that the sub-scope exists (i.e. to distinguish between P: and P : {})
    // this NUL record is 13 bytes long on 32 bit version and 25 bytes long on 64 bit.
	const size_t sentinel_block_length = is64bits ? (sizeof(uint64_t)* 3 + 1) : (sizeof(uint32_t)* 3 + 1);

    if (Offset(input, cursor) < end_offset) {
        if (end_offset - Offset(input, cursor) < sentinel_block_length) {
            TokenizeError("insufficient padding bytes at block end",input, cursor);
        }

        output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_OPEN_BRACKET, Offset(input, cursor) ));

        // XXX this is vulnerable to stack overflowing ..
        while(Offset(input, cursor) < end_offset - sentinel_block_length) {
			ReadScope(output_tokens, input, cursor, input + end_offset - sentinel_block_length, is64bits);
        }
        output_tokens.push_back(new_Token(cursor, cursor + 1, TokenType_CLOSE_BRACKET, Offset(input, cursor) ));

        for (unsigned int i = 0; i < sentinel_block_length; ++i) {
            if(cursor[i] != '\0') {
                TokenizeError("failed to read nested block sentinel, expected all bytes to be 0",input, cursor);
            }
        }
        cursor += sentinel_block_length;
    }

    if (Offset(input, cursor) != end_offset) {
        TokenizeError("scope length not reached, something is wrong",input, cursor);
    }

    return true;
}
Пример #12
0
void normalize_tyl_in_pl( PlNode **n )

{

  PlNode *i;
  TypedList *tyl;
  PlNode *tmp_pl = NULL, *sons, *p_pl;
  TokenList *tmp_tl, *tl;


  if ( !(*n) ) {
    return;
  }

  switch( (*n)->connective ) {
  case ALL:
  case EX:
    /* we need to make a sequence of quantifiers ( ->sons ...)
     * out of the given sequence of TypedList  elements,
     * with connected type names, var - name in TokenList
     * and KEEPING THE SAME ORDERING !!
     */
    if ( !(*n)->parse_vars ) {
      printf("\n\nquantifier without argument !! check input files.\n\n");
      exit( 1 );
    }
    tmp_tl = new_TokenList();
    tmp_tl->next = new_TokenList();
    tmp_tl->item = copy_Token( (*n)->parse_vars->name );
    if ( (*n)->parse_vars->type->next ) {
      tmp_tl->next->item = new_Token( MAX_LENGTH );
      strcpy( tmp_tl->next->item, EITHER_STR );
      for ( tl = (*n)->parse_vars->type; tl; tl = tl->next ) {
	strcat( tmp_tl->next->item, CONNECTOR );
	strcat( tmp_tl->next->item, tl->item );
      }
    } else {
      tmp_tl->next->item = copy_Token( (*n)->parse_vars->type->item );
    }
    (*n)->atom = tmp_tl;
    /* now add list of sons
     */
    sons = (*n)->sons;
    p_pl = *n;
    for ( tyl = (*n)->parse_vars->next; tyl; tyl = tyl->next ) {
      tmp_tl = new_TokenList();
      tmp_tl->next = new_TokenList();
      tmp_tl->item = copy_Token( tyl->name );
      if ( tyl->type->next ) {
	tmp_tl->next->item = new_Token( MAX_LENGTH );
	strcpy( tmp_tl->next->item, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( tmp_tl->next->item, CONNECTOR );
	  strcat( tmp_tl->next->item, tl->item );
	}
      } else {
	tmp_tl->next->item = copy_Token( tyl->type->item );
      }
      tmp_pl = new_PlNode( (*n)->connective );
      tmp_pl->atom = tmp_tl;
      p_pl->sons = tmp_pl;
      p_pl = tmp_pl;
    }
    /* remove typed-list-of info
     */
    free_TypedList( (*n)->parse_vars );
    (*n)->parse_vars = NULL;
    /* the last son in list takes over ->sons
     */
    p_pl->sons = sons;
    /* normalize this sons and get out
     */
    normalize_tyl_in_pl( &(p_pl->sons) );
    break;
  case AND:
  case OR:
    for ( i = (*n)->sons; i; i = i->next ) {
      normalize_tyl_in_pl( &i );
    }
    break;
  case NOT:
    normalize_tyl_in_pl( &((*n)->sons) );
    break;
  case ATOM:
  case TRU:
  case FAL:
    break;
  case WHEN:
    normalize_tyl_in_pl( &((*n)->sons) );
    normalize_tyl_in_pl( &((*n)->sons->next) );
    break;
  default:
    break;
  }

}
Пример #13
0
void collect_type_names_in_pl( PlNode *n )

{

  PlNode *i;
  TypedList *tyl;
  TokenList *tl;
  char *tmp = NULL;
  int nn;

  if ( !n ) {
    return;
  }

  switch( n->connective ) {
  case ALL:
  case EX:
    for ( tyl = n->parse_vars; tyl; tyl = tyl->next ) {
      if ( tyl->type->next ) {
	tmp = new_Token( MAX_LENGTH );
	strcpy( tmp, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( tmp, CONNECTOR );
	  strcat( tmp, tl->item );
	}
      } else {
	tmp = copy_Token( tyl->type->item );
      }
      if ( (nn = get_type( tmp )) == -1 ) {
	tyl->n = lnum_types;
	ltype_names[lnum_types++] = copy_Token( tmp );
      } else {
	tyl->n = nn;
      }
      free( tmp );
      tmp = NULL;
    }
    collect_type_names_in_pl( n->sons );
    break;
  case AND:
  case OR:
    for ( i = n->sons; i; i = i->next ) {
      collect_type_names_in_pl( i );
    }
    break;
  case NOT:
    collect_type_names_in_pl( n->sons );
    break;
  case ATOM:
  case TRU:
  case FAL:
    break;
  case WHEN:
    collect_type_names_in_pl( n->sons );
    collect_type_names_in_pl( n->sons->next );
    break;
  default:
    break;
  }

}
Пример #14
0
void build_orig_constant_list( void )

{

  char *tmp = NULL;
  TypedList *tyl;
  TypedListList *tyll;
  TokenList *tl, *p_tl, *tmp_tl;
  PlOperator *po;

  int i, j, k, n, std;

  Bool m[MAX_TYPES][MAX_TYPES];

  FactList *fl, *p_fl;

  lnum_types = 0;
  for ( tyl = gparse_types; tyl; tyl = tyl->next ) {
    if ( get_type( tyl->name ) == -1 ) {
      ltype_names[lnum_types++] = copy_Token( tyl->name );
    }
    if ( tyl->type->next ) {
      tmp = new_Token( MAX_LENGTH );
      strcpy( tmp, EITHER_STR );
      for ( tl = tyl->type; tl; tl = tl->next ) {
	strcat( tmp, CONNECTOR );
	strcat( tmp, tl->item );
      }
    } else {
      tmp = copy_Token( tyl->type->item );
    }
    if ( (n = get_type( tmp )) == -1 ) {
      tyl->n = lnum_types;
      ltype_names[lnum_types++] = copy_Token( tmp );
    } else {
      tyl->n = n;
    }
    free( tmp );
    tmp = NULL;
  }
     
  for ( tyl = gparse_constants; tyl; tyl = tyl->next ) {
    if ( tyl->type->next ) {
      tmp = new_Token( MAX_LENGTH );
      strcpy( tmp, EITHER_STR );
      for ( tl = tyl->type; tl; tl = tl->next ) {
	strcat( tmp, CONNECTOR );
	strcat( tmp, tl->item );
      }
    } else {
      tmp = copy_Token( tyl->type->item );
    }
    if ( (n = get_type( tmp )) == -1 ) {
      tyl->n = lnum_types;
      ltype_names[lnum_types++] = copy_Token( tmp );
    } else {
      tyl->n = n;
    }
    free( tmp );
    tmp = NULL;
  }
  
  for ( tyl = gparse_objects; tyl; tyl = tyl->next ) {
    if ( tyl->type->next ) {
      tmp = new_Token( MAX_LENGTH );
      strcpy( tmp, EITHER_STR );
      for ( tl = tyl->type; tl; tl = tl->next ) {
	strcat( tmp, CONNECTOR );
	strcat( tmp, tl->item );
      }
    } else {
      tmp = copy_Token( tyl->type->item );
    }
    if ( (n = get_type( tmp )) == -1 ) {
      tyl->n = lnum_types;
      ltype_names[lnum_types++] = copy_Token( tmp );
    } else {
      tyl->n = n;
    }
    free( tmp );
    tmp = NULL;
  }

  for ( tyll = gparse_predicates; tyll; tyll = tyll->next ) {
    for ( tyl = tyll->args; tyl; tyl = tyl->next ) {
      if ( tyl->type->next ) {
	tmp = new_Token( MAX_LENGTH );
	strcpy( tmp, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( tmp, CONNECTOR );
	  strcat( tmp, tl->item );
	}
      } else {
	tmp = copy_Token( tyl->type->item );
      }
      if ( (n = get_type( tmp )) == -1 ) {
	tyl->n = lnum_types;
	ltype_names[lnum_types++] = copy_Token( tmp );
      } else {
	tyl->n = n;
      }
      free( tmp );
      tmp = NULL;
    }
  }
    
  collect_type_names_in_pl( gorig_goal_facts );

  for ( po = gloaded_ops; po; po = po->next ) {
    collect_type_names_in_pl( po->preconds );
    collect_type_names_in_pl( po->effects );
    for ( tyl = po->parse_params; tyl; tyl = tyl->next ) {
      if ( tyl->type->next ) {
	tmp = new_Token( MAX_LENGTH );
	strcpy( tmp, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( tmp, CONNECTOR );
	  strcat( tmp, tl->item );
	}
      } else {
	tmp = copy_Token( tyl->type->item );
      }
      if ( (n = get_type( tmp )) == -1 ) {
	tyl->n = lnum_types;
	ltype_names[lnum_types++] = copy_Token( tmp );
      } else {
	tyl->n = n;
      }
      free( tmp );
      tmp = NULL;
    }
  }


  /* now get the numbers of all composed either types
   */
  for ( i = 0; i < lnum_types; i++ ) {
    lnum_either_ty[i] = 0;
  }
  for ( tyl = gparse_types; tyl; tyl = tyl->next ) {
    make_either_ty( tyl );
  }
  for ( tyl = gparse_constants; tyl; tyl = tyl->next ) {
    make_either_ty( tyl );
  }
  for ( tyl = gparse_objects; tyl; tyl = tyl->next ) {
    make_either_ty( tyl );
  }
  for ( tyll = gparse_predicates; tyll; tyll = tyll->next ) {
    for ( tyl = tyll->args; tyl; tyl = tyl->next ) {
      make_either_ty( tyl );
    }
  }
  make_either_ty_in_pl( gorig_goal_facts );
  for ( po = gloaded_ops; po; po = po->next ) {
    make_either_ty_in_pl( po->preconds );
    make_either_ty_in_pl( po->effects );
    for ( tyl = po->parse_params; tyl; tyl = tyl->next ) {
      make_either_ty( tyl );
    }
  }


  /* now, compute the transitive closure of all type inclusions.
   * first initialize the matrix.
   */
  for ( i = 0; i < lnum_types; i++ ) {
    for ( j = 0; j < lnum_types; j++ ) {
      m[i][j] = ( i == j ? TRUE : FALSE );
    }
  }
  std = -1;
  for ( i = 0; i < lnum_types; i++ ) {
    if ( strcmp( ltype_names[i], STANDARD_TYPE ) == SAME ) {
      std = i;
      break;
    }
  }
  for ( i = 0; i < lnum_types; i++ ) {
    m[i][std] = TRUE;/* all types are subtypes of OBJECT */
  }
  for ( tyl = gparse_types; tyl; tyl = tyl->next ) {
    /* all inclusions as are defined in domain file
     */
    m[get_type( tyl->name )][tyl->n] = TRUE;
  }
  /* compute transitive closure on inclusions matrix
   */
  for ( j = 0; j < lnum_types; j++ ) {
    for ( i = 0; i < lnum_types; i++ ) {
      if ( m[i][j] ) {
	for ( k = 0; k < lnum_types; k++ ) {
	  if ( m[j][k] ) {
	    m[i][k] = TRUE;
	  }
	}
      }
    }
  }
  /* union types are subsets of all those types that contain all
   * their components, and 
   * all component types are subsets of the either type !
   */
  for ( i = 0; i < lnum_types; i++ ) {
    if ( lnum_either_ty[i] < 2 ) continue;
    for ( j = 0; j < lnum_types; j++ ) {
      if ( j == i ) continue;
      /* get supertypes of all component types
       */
      for ( k = 0; k < lnum_either_ty[i]; k++ ) {
	if ( !m[leither_ty[i][k]][j] ) break;
      }
      if ( k < lnum_either_ty[i] ) continue;
      m[i][j] = TRUE;
      /* make components subtypes of either type
       */
      for ( k = 0; k < lnum_either_ty[i]; k++ ) {
	m[leither_ty[i][k]][i] = TRUE;
      }
    }
  }
  /* and again, compute transitive closure on inclusions matrix.
   * I guess, this won't change anything (?), but it also won't need
   * any remarkable computation time, so why should one think about it ?
   */
  for ( j = 0; j < lnum_types; j++ ) {
    for ( i = 0; i < lnum_types; i++ ) {
      if ( m[i][j] ) {
	for ( k = 0; k < lnum_types; k++ ) {
	  if ( m[j][k] ) {
	    m[i][k] = TRUE;
	  }
	}
      }
    }
  }
  

  /* now build FactList of ALL  constant -> type   pairs.
   * for each constant / object, let it appear separately
   * for each type it is a member of; compute type
   * membership based on propagating constants / objects
   * through inclusions matrix.
   *
   * this might make the same pair appear doubly, if an object
   * is declared in type T as well as in some supertype T'.
   * such cases will be filtered out in string collection.
   */
  for ( tyl = gparse_constants; tyl; tyl = tyl->next ) {
    fl = new_FactList();
    fl->item = new_TokenList();
    fl->item->next = new_TokenList();
    fl->item->item = copy_Token( tyl->name );
    if ( tyl->type->next ) {
      fl->item->next->item = new_Token( MAX_LENGTH );
      strcpy( fl->item->next->item, EITHER_STR );
      for ( tl = tyl->type; tl; tl = tl->next ) {
	strcat( fl->item->next->item, CONNECTOR );
	strcat( fl->item->next->item, tl->item );
      }
    } else {
      fl->item->next->item = copy_Token( tyl->type->item );
    }
    fl->next = gorig_constant_list;
    gorig_constant_list = fl;
    /* now add constant to all supertypes
     */
    n = get_type( fl->item->next->item );
    for ( i = 0; i < lnum_types; i++ ) {
      if ( i == n ||
	   !m[n][i] ) continue;
      fl = new_FactList();
      fl->item = new_TokenList();
      fl->item->next = new_TokenList();
      fl->item->item = copy_Token( tyl->name );
      fl->item->next->item = copy_Token( ltype_names[i] );
      fl->next = gorig_constant_list;
      gorig_constant_list = fl;
    }
  }
  for ( tyl = gparse_objects; tyl; tyl = tyl->next ) {
    fl = new_FactList();
    fl->item = new_TokenList();
    fl->item->next = new_TokenList();
    fl->item->item = copy_Token( tyl->name );
    if ( tyl->type->next ) {
      fl->item->next->item = new_Token( MAX_LENGTH );
      strcpy( fl->item->next->item, EITHER_STR );
      for ( tl = tyl->type; tl; tl = tl->next ) {
	strcat( fl->item->next->item, CONNECTOR );
	strcat( fl->item->next->item, tl->item );
      }
    } else {
      fl->item->next->item = copy_Token( tyl->type->item );
    }
    fl->next = gorig_constant_list;
    gorig_constant_list = fl;
    /* now add constant to all supertypes
     */
    n = get_type( fl->item->next->item );
    for ( i = 0; i < lnum_types; i++ ) {
      if ( i == n ||
	   !m[n][i] ) continue;
      fl = new_FactList();
      fl->item = new_TokenList();
      fl->item->next = new_TokenList();
      fl->item->item = copy_Token( tyl->name );
      fl->item->next->item = copy_Token( ltype_names[i] );
      fl->next = gorig_constant_list;
      gorig_constant_list = fl;
    }
  }


  /* now, normalize all typed-list-of  s in domain and problem def,
   * i.e., in all PlNode quantifiers and in op parameters
   *
   * at the same time, remove typed-listof structures in these defs
   */
  normalize_tyl_in_pl( &gorig_goal_facts );
  for ( po = gloaded_ops; po; po = po->next ) {
    normalize_tyl_in_pl( &po->preconds );
    normalize_tyl_in_pl( &po->effects );
    /* be careful to maintain parameter ordering !
     */
    if ( !po->parse_params ) {
      continue;/* no params at all */
    }
    fl = new_FactList();
    fl->item = new_TokenList();
    fl->item->next = new_TokenList();
    fl->item->item = copy_Token( po->parse_params->name );
    if ( po->parse_params->type->next ) {
      fl->item->next->item = new_Token( MAX_LENGTH );
      strcpy( fl->item->next->item, EITHER_STR );
      for ( tl = po->parse_params->type; tl; tl = tl->next ) {
	strcat( fl->item->next->item, CONNECTOR );
	strcat( fl->item->next->item, tl->item );
      }
    } else {
      fl->item->next->item = copy_Token( po->parse_params->type->item );
    }
    po->params = fl;
    p_fl = fl;
    for ( tyl = po->parse_params->next; tyl; tyl = tyl->next ) {
      fl = new_FactList();
      fl->item = new_TokenList();
      fl->item->next = new_TokenList();
      fl->item->item = copy_Token( tyl->name );
      if ( tyl->type->next ) {
	fl->item->next->item = new_Token( MAX_LENGTH );
	strcpy( fl->item->next->item, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( fl->item->next->item, CONNECTOR );
	  strcat( fl->item->next->item, tl->item );
	}
      } else {
	fl->item->next->item = copy_Token( tyl->type->item );
      }
      p_fl->next = fl;
      p_fl = fl;
    }
    free_TypedList( po->parse_params );
    po->parse_params = NULL;
  }


  /* finally, build  gpredicates_and_types  by chaining predicate names 
   * together with the names of their args' types.
   */
  for ( tyll = gparse_predicates; tyll; tyll = tyll->next ) {
    fl = new_FactList();
    fl->item = new_TokenList();
    fl->item->item = copy_Token( tyll->predicate );
    fl->next = gpredicates_and_types;
    gpredicates_and_types = fl;
    if ( !tyll->args ) continue;
    /* add arg types; MAINTAIN ORDERING !
     */
    fl->item->next = new_TokenList();
    if ( tyll->args->type->next ) {
      fl->item->next->item = new_Token( MAX_LENGTH );
      strcpy( fl->item->next->item, EITHER_STR );
      for ( tl = tyll->args->type; tl; tl = tl->next ) {
	strcat( fl->item->next->item, CONNECTOR );
	strcat( fl->item->next->item, tl->item );
      }
    } else {
      fl->item->next->item = copy_Token( tyll->args->type->item );
    }
    p_tl = fl->item->next;
    for ( tyl = tyll->args->next; tyl; tyl = tyl->next ) {
      tmp_tl = new_TokenList();
      if ( tyl->type->next ) {
	tmp_tl->item = new_Token( MAX_LENGTH );
	strcpy( tmp_tl->item, EITHER_STR );
	for ( tl = tyl->type; tl; tl = tl->next ) {
	  strcat( tmp_tl->item, CONNECTOR );
	  strcat( tmp_tl->item, tl->item );
	}
      } else {
	tmp_tl->item = copy_Token( tyl->type->item );
      }
      p_tl->next = tmp_tl;
      p_tl = tmp_tl;
    }
  }


  /* now get rid of remaining typed-list-of parsing structures
   */
  free_TypedList( gparse_types );
  gparse_types = NULL;
  free_TypedList( gparse_constants );
  gparse_constants = NULL;
  free_TypedList( gparse_objects );
  gparse_objects = NULL;
  free_TypedListList( gparse_predicates );
  gparse_predicates = NULL;

}
Пример #15
0
/*
Lexer_getnext
	returns the next Token in the source text
	can potentially return EOF or unknown token, and/or set_error
	uses lexical definition macros to generate appropriate tokens
*/
Token *Lexer_getnext(Lexer *self)
{
    char c;
    if (self->scanner->curr_index == -1)
        c = Scanner_getnext(self->scanner);
    else
        c = Scanner_getcurr(self->scanner);

    // ignore whitespace
    while (c && strchr(WHITESPACE_CHARS, c))
        c = Scanner_getnext(self->scanner);

    // begin creating a Token
    Token *token = new_Token(c, self->scanner->curr_index,
                             self->scanner->curr_line, self->scanner->curr_col);

    if (c == EOF_CHAR)
        token->type = EOF_TOKEN;

    else if (c == '.') // decimal numeric literal
    {
        token->type = UNKNOWN_TOKEN;
        c = Scanner_getnext(self->scanner);

        while (strchr(NUMERIC_CHARS, c))
        {
            if (token->type != NUMERIC_LITERAL_TOKEN)
                token->type = NUMERIC_LITERAL_TOKEN;

            Token_append(token, c);
            c = Scanner_getnext(self->scanner);
        }

        if (token->type == UNKNOWN_TOKEN)
            cause_error(UNEXPECTED_TOKEN_ERROR, token->buffer, token->len);
    }

    else if (strchr(SYMBOL_CHARS, c))
    {
        token->type = SYMBOL_TOKEN;
        c = Scanner_getnext(self->scanner);

        while (strchr(SYMBOL_CHARS, c))
        {
            Token_append(token, c);
            c = Scanner_getnext(self->scanner);
        }
    }

    else if (strchr(NUMERIC_CHARS, c))
    {
        token->type = NUMERIC_LITERAL_TOKEN;
        c = Scanner_getnext(self->scanner);

        int has_decimal = 0;
        while (strchr(NUMERIC_CHARS, c) || (!has_decimal && c == '.'))
        {
            if (c == '.') has_decimal = 1;
            Token_append(token, c);
            c = Scanner_getnext(self->scanner);
        }
    }

    else if (strchr(STRING_QUOTE_CHARS, c))
    {
        token->type = STRING_LITERAL_TOKEN;
        char end_quote = c;
        c = Scanner_getnext(self->scanner);

        while (c != end_quote)
        {
            if (c == EOF_CHAR) // EOF in string
            {
                token->type = EOF_TOKEN;
                cause_error(UNTERMINATED_STRING_ERROR, token->buffer, token->len);
                Scanner_getnext(self->scanner);
                return token;
            }

            Token_append(token, c);
            c = Scanner_getnext(self->scanner);
        }

        Token_append(token, c);
        Scanner_getnext(self->scanner);
    }

    else if (c == LIST_START_CHAR)
    {
        token->type = LIST_START_TOKEN;
        Scanner_getnext(self->scanner);
    }

    else if (c == LIST_END_CHAR)
    {
        token->type = LIST_END_TOKEN;
        Scanner_getnext(self->scanner);
    }

    else if (c == ESCAPE_CHAR)
    {
        token->type = ESCAPE_TOKEN;
        Scanner_getnext(self->scanner);
    }

    else if (c == EVALUATE_CHAR)
    {
        token->type = EVALUATE_TOKEN;
        Scanner_getnext(self->scanner);
    }

    else
    {
        token->type = UNKNOWN_TOKEN;
        cause_error(UNEXPECTED_TOKEN_ERROR, token->buffer, token->len);
        Scanner_getnext(self->scanner);
    }

    return token;
}
Пример #16
0
void collect_all_strings( void )

{

  FactList *f;
  TokenList *t;
  int p_num, type_num, c_num, ar;
  int i;

  for ( f = gorig_constant_list; f; f = f->next ) {
    if ( (type_num = position_in_types_table( f->item->next->item )) == -1 ) {
      if ( gnum_types == MAX_TYPES ) {
	printf("\ntoo many types! increase MAX_TYPES (currently %d)\n\n",
	       MAX_TYPES);
	exit( 1 );
      }
      gtype_names[gnum_types] = new_Token( strlen( f->item->next->item ) + 1 );
      strcpy( gtype_names[gnum_types], f->item->next->item );
      gtype_size[gnum_types] = 0;
      for ( i = 0; i < MAX_CONSTANTS; i++ ) {
	gis_member[i][gnum_types] = FALSE;
      }
      type_num = gnum_types++;
    }

    if ( (c_num = position_in_constants_table( f->item->item )) == -1 ) {
      if ( gnum_constants == MAX_CONSTANTS ) {
	printf("\ntoo many constants! increase MAX_CONSTANTS (currently %d)\n\n",
	       MAX_CONSTANTS);
	exit( 1 );
      }
      gconstants[gnum_constants] = new_Token( strlen( f->item->item ) + 1 );
      strcpy( gconstants[gnum_constants], f->item->item );
      c_num = gnum_constants++;
    }
    
    if ( !gis_member[c_num][type_num] ) {
      if ( gtype_size[type_num] == MAX_TYPE ) {
	printf("\ntoo many consts in type %s! increase MAX_TYPE (currently %d)\n\n",
	       gtype_names[type_num], MAX_TYPE);
	exit( 1 );
      }     
      gtype_consts[type_num][gtype_size[type_num]++] = c_num;
      gis_member[c_num][type_num] = TRUE;
    }
  }

  for ( f = gpredicates_and_types; f; f = f->next ) {
    if ( (p_num = position_in_predicates_table( f->item->item )) != -1 ) {
      printf("\npredicate %s declared twice!\n\n", f->item->item);
      exit( 1 );
    }
    if ( gnum_predicates == MAX_PREDICATES ) {
      printf("\ntoo many predicates! increase MAX_PREDICATES (currently %d)\n\n",
	     MAX_PREDICATES);
      exit( 1 );
    }
    gpredicates[gnum_predicates] = new_Token( strlen( f->item->item ) + 1 );
    strcpy( gpredicates[gnum_predicates], f->item->item );
    ar = 0;
    for ( t = f->item->next; t; t = t->next ) {
      if ( (type_num = position_in_types_table( t->item )) == -1 ) {
	printf("\nwarning: predicate %s uses unknown or empty type %s\n\n", 
	       f->item->item, t->item);
      }
      if ( ar == MAX_ARITY ) {
	printf("\narity of %s to high! increase MAX_ARITY (currently %d)\n\n",
	       gpredicates[gnum_predicates], MAX_ARITY);
	exit( 1 );
      }
      gpredicates_args_type[gnum_predicates][ar++] = type_num;
    }
    garity[gnum_predicates++] = ar;
  }

  free_FactList( gorig_constant_list );
  free_FactList( gpredicates_and_types );
  free_FactList( gtypes );

}