Ejemplo n.º 1
0
token create_binary_operator_token(char c)
{ switch(c)
  { case '+':
      return create_token(binary_operator, plus);
    case '-':
      return create_token(binary_operator, minus);
    case '*':
      return create_token(binary_operator, multiply);
    case '/':
      return create_token(binary_operator, divide); }
  error_message("wrong argument passed to create_binary_operator_token\n");
  exit(1); }
Ejemplo n.º 2
0
//--------------------------------------------------------
//static int8_t input0 (func_cb_ptr p, void *data, uint16_t length) {
static int8_t input0 (func_cb_ptr p, token_type_t *t) {
	update_background_state_t *s = (update_background_state_t *)sys_get_state();	

	// Get token from port.

	if (s->state == UPD_STATE_INIT) {	

		s->backMat = (CYCLOPS_Matrix *)capture_token_data(t, s->pid);
		if (s->backMat == NULL) return -ENOMEM;
		
		s->state = UPD_STATE_PROCESS;								
	} else {		
		CYCLOPS_Matrix *M = (CYCLOPS_Matrix *)get_token_data(t);
		//check that input matrix's depth is 1 byte
		if( (M->depth != CYCLOPS_1BYTE) || (s->backMat->depth != CYCLOPS_1BYTE) )
				return -EINVAL;
		token_type_t *my_token = create_token(s->backMat, sizeof(CYCLOPS_Matrix), s->pid);
		if (my_token == NULL) return -EINVAL;
		set_token_type(my_token, CYCLOPS_MATRIX);
		//SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
		dispatch(s->output0, my_token);
		destroy_token(my_token);
	}
	return SOS_OK;
}
Ejemplo n.º 3
0
//--------------------------------------------------------
static int8_t process_input(over_thresh_state_t *s) {
    uint8_t overTheThresh;
    objectInfo newObject;

    if (s->state < 4) {
        return -EBUSY;
    }

    s->state = 0;
    overTheThresh = over_thresh(s->A, s->row, s->col, RANGE, s->thresh);

    newObject.objectPosition.x = s->col;
    newObject.objectPosition.y = s->row;
    newObject.objectSize.x = ROWS;
    newObject.objectSize.y = COLS;
    newObject.actionFlag = NONE;

    if (overTheThresh > DETECT_THRESH) {
        LED_DBG(LED_RED_TOGGLE);
        newObject.actionFlag = OBJ_DETECT;
    }

    token_type_t *my_token = create_token(&newObject, sizeof(objectInfo), s->pid);
    if (my_token == NULL) return -ENOMEM;
    //SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
    dispatch(s->output0, my_token);
    destroy_token(my_token);

    //destroy_cyclops_matrix(s->A);
    destroy_token_data(s->A, CYCLOPS_MATRIX, sizeof(CYCLOPS_Matrix));
    s->A = NULL;

    return SOS_OK;
}
Ejemplo n.º 4
0
static PyObject* Preprocessor_tokenize(Preprocessor* self, PyObject *args)
{
    const char *s = NULL;
    Py_ssize_t len;
    if (!PyArg_ParseTuple(args, "s#:tokenize", &s, &len))
        return NULL;

    try
    {
        std::vector<cmonster::core::Token> result =
            self->preprocessor->tokenize(s, len);

        ScopedPyObject tuple(PyTuple_New(result.size()));
        if (!tuple)
            return NULL;

        for (Py_ssize_t i = 0; i < (Py_ssize_t)result.size(); ++i)
        {
            Token *token = create_token(self, result[i]);
            if (!token)
                return NULL;
            PyTuple_SetItem(tuple, i, (PyObject*)token);
        }
        return tuple.release();
    }
    catch (...)
    {
        set_python_exception();
        return NULL;
    }
    Py_INCREF(Py_None);
    return Py_None;
}
Ejemplo n.º 5
0
static PyObject* Preprocessor_next(Preprocessor* self, PyObject *args)
{
    PyObject *expand = Py_True;
    if (!PyArg_ParseTuple(args, "|O:next", &expand))
        return NULL;
    try
    {
        std::auto_ptr<cmonster::core::Token> token(
            self->preprocessor->next(PyObject_IsTrue(expand)));
        if (token.get())
        {
            return (PyObject*)create_token(self, *token);
        }
        else
        {
            PyErr_SetString(PyExc_RuntimeError,
                "Internal error: Preprocessor returned NULL");
            return NULL;
        }
    }
    catch (...)
    {
        set_python_exception();
        return NULL;
    }
}
Ejemplo n.º 6
0
static int8_t element_module(void *state, Message *msg) {
	element_state_t *s = (element_state_t *)state;

	switch (msg->type) {
		case MSG_INIT: 
		{		
			// Default value = 9 seconds.
			s->computation_time = 9;
			// Default: 3 LSBits
			s->bit_mask = 0x07;
			s->pid = msg->did;
			break;
	    }
		case MSG_TIMER_TIMEOUT:
		{
			DEBUG("Truncate_long timer fired. Output the old token.\n");
			token_type_t *my_token = create_token(&s->value, sizeof(uint8_t), s->pid);
			if (my_token == NULL) {
				SOS_CALL(s->signal_error, signal_error_func_t, -ENOMEM);
				return -EINVAL;
			}
			//SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
			dispatch(s->output0, my_token);
			destroy_token(my_token);
			break;
		}
		case MSG_FINAL:
		{
		   	break;
		}
		default: return -EINVAL;
	}
	return SOS_OK;
}
Ejemplo n.º 7
0
// The salt here is owner-supplied random bits, not the address hash.
bool create_token(encrypted_token& out_token, const std::string& passphrase,
    const ek_entropy& entropy)
{
    // BIP38: If lot and sequence numbers are not being included, then
    // owner_salt is 8 random bytes instead of 4, lot_sequence is omitted and
    // owner_entropy becomes an alias for owner_salt.
    const auto prefix = parse_encrypted_token::prefix_factory(false);
    return create_token(out_token, passphrase, entropy, entropy, prefix);
}
Ejemplo n.º 8
0
static void cmdline_tokenize(Cmdline *cmdline)
{
  char ch[] = {0,'\0'};
  int st, ed, pos;
  char *str = cmdline->line;
  bool esc = false;

  st = ed = pos = 0;
  for (;;) {
    ch[0] = str[pos++];
    if (ch[0] == '\0') {
      create_token(cmdline->tokens, str, st, ed, false);
      break;
    }
    else if (ch[0] == '\\' && !esc) {
      esc = true;
      continue;
    }
    else if ((ch[0] == '\"' || ch[0] == '\'') && !esc) {
      char *closech = strchrnul(&str[pos], ch[0]);
      if (closech[-1] != '\\') {
        int end = (closech - &str[pos]) + pos;
        bool quote = str[end] == '\0';
        pos = quote ? pos-1 : pos;
        create_token(cmdline->tokens, str, pos, end, !quote);
        st = pos = quote ? end : ++end;
      }
    }
    else if (strpbrk(ch, TOKENCHARS) && !esc) {
      create_token(cmdline->tokens, str, st, ed, false);
      if (*ch != ' ') {
        create_token(cmdline->tokens, str, pos-1, pos, false);
        ed = pos;
      }
      st = pos;
    }
    else
      ed = pos;

    esc = false;
  }

  cmdline->cont = esc;
}
Ejemplo n.º 9
0
t_result	create_result_from_instru_token(t_file_reader *file,
						t_position pos,
						t_instruction_type inst)
{
  t_token	token;

  token = create_token(file, pos, TOKEN_TYPE_INSTRUCTION);
  token.content.instruction = inst;
  return (create_result_from_token(token));
}
Ejemplo n.º 10
0
 void UUID::generate()
 {
    if( !gUUIDStateInitialized )
    {
       create_uuid_state( &gUUIDState );
       gUUIDStateInitialized = true;
    }
    
    create_token( &gUUIDState, ( xuuid_t* ) this );
 }
Ejemplo n.º 11
0
int main(int argc, char **argv) {
  uuid_state state;
  uuid_t uuid;
  char output[1024];

  create_uuid_state(&state);
  create_token(&state, &uuid);
  format_token(output, &uuid);

  printf("%s\n", output);
  
}
Ejemplo n.º 12
0
// tokenizes the string str
Ttoken *tokenize(char *str) {
    Ttoken *tok_list, *tok_tmp = NULL;
    char word[256];
    int a, b, c;
    int i = 0;

    tok_list = create_token("head");

    while(str[i] != '\0') {
        // skip leading whitespace
		while(str[i] == ' ' || str[i] == '\t') i++;

		if (str[i] == '"') { // find end of string part
			i ++;
			a = i;
			while(str[i] != '"') i++;
			b = i;
			i ++;
		}
		else { 		// find end of word
			a = i;
			while(str[i] != ' ' && str[i] != '\t' && str[i] != '\0') i++;
			b = i;
		}
				
		// copy string to word
        for(c=0;c<b-a;c++) word[c] = str[a+c];
		word[c] = '\0';

        // make token and put it in the list
		tok_tmp = create_token(word);
		insert_token(tok_list, tok_tmp);
    }

	tok_tmp->word[strlen(tok_tmp->word) - 1] = '\0';
    tok_tmp = (Ttoken *)tok_list->next;
    destroy_token(tok_list);
    
    return tok_tmp;
}
Ejemplo n.º 13
0
// The salt here is owner-supplied random bits, not the address hash.
bool create_token(encrypted_token& out_token, const std::string& passphrase,
    const ek_salt& salt, uint32_t lot, uint32_t sequence)
{
    if (lot > ek_max_lot || sequence > ek_max_sequence)
        return false;

    static constexpr size_t max_sequence_bits = 12;
    const uint32_t lot_sequence = (lot << max_sequence_bits) || sequence;
    const auto entropy = splice(salt, to_big_endian(lot_sequence));
    const auto prefix = parse_encrypted_token::prefix_factory(true);
    create_token(out_token, passphrase, salt, entropy, prefix);
    return true;
}
Ejemplo n.º 14
0
static int8_t input0 (func_cb_ptr p, token_type_t *t) {
    element_state_t *s = (element_state_t *)sys_get_state();

    // Get token from port.
    // If want to break the chain of calls here, then copy the token into a private
    // data structure(global), and return appropriate value (SOS_OK).

    // Process input: Extract 3 LSB's and pass it on to the next function.
    // We need a separate place to hold the output as we are modifying the input.
    // Remember, this module does not own the input token, so should not
    // overwrite it.
    uint8_t out_value = (*((uint8_t *)get_token_data(t))) & s->bit_mask;

    token_type_t *my_token = create_token(&out_value, sizeof(uint8_t), s->pid);
    if (my_token == NULL) return -ENOMEM;
    //SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
    dispatch(s->output0, my_token);
    destroy_token(my_token);
    return SOS_OK;
}
Ejemplo n.º 15
0
static int8_t process_input(element_state_t *s, uint8_t data) {
	uint8_t out_value; 
	
  	// Get token from port.
	// If want to break the chain of calls here, then copy the token into a private
	// data structure(global), and return appropriate value (-EBUSY).
	switch (s->status) {
		case IDLE:
		{
			s->stored_input[0] = data;
			s->status = WAITING_FOR_1;
			return -EBUSY;
		}
		case WAITING_FOR_1:
		{
			s->stored_input[1] = data;
			s->status = WAITING_FOR_2;
			return -EBUSY;
		}
		case WAITING_FOR_2:
		{
			// Process input: Combine (OR) the three inputs and pass the result on to the next function.
			// We need a separate place to hold the output as we are modifying the input.
			// Remember, this module does not own the input token, so should not
			// overwrite it.
			out_value = data | s->stored_input[0] | s->stored_input[1];

			DEBUG("COMBINE: Output - token %d.\n", out_value);
			token_type_t *my_token = create_token(&out_value, sizeof(uint8_t), s->pid);
			if (my_token == NULL) return -ENOMEM;
			//SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
			dispatch(s->output0, my_token);
			s->status = IDLE;
			destroy_token(my_token);
			return SOS_OK;
		}
		default: return SOS_OK;
	}

	return SOS_OK;
}
Ejemplo n.º 16
0
Archivo: source.c Proyecto: nesl/sos-2x
static int8_t element_module(void *state, Message *msg) {
	element_state_t *s = (element_state_t *)state;

	switch (msg->type) {
		case MSG_INIT: 
		{		
			LED_DBG(LED_RED_ON);
			s->cnt = 0;
			// Default parameter value = 5 sec.
			s->sample_rate_in_sec = 5;
			s->pid = msg->did;
			sys_timer_start(TIMER_TID, ((uint32_t)s->sample_rate_in_sec) * 1024L, TIMER_ONE_SHOT);
			break;
	    }
		case MSG_TIMER_TIMEOUT:
		{
			LED_DBG(LED_RED_TOGGLE);
			s->cnt++;
			DEBUG("Timer fired. Put token %d on output port. Function CB output = 0x%x.\n", 
														s->cnt, s->output0);
			token_type_t *my_token = create_token(&s->cnt, sizeof(uint8_t), s->pid);
			if (my_token == NULL) return -ENOMEM;
			//SOS_CALL(s->put_token, put_token_func_t, s->output0, my_token);
			dispatch(s->output0, my_token);
			destroy_token(my_token);
			sys_timer_start(TIMER_TID, ((uint32_t)s->sample_rate_in_sec) * 1024L, TIMER_ONE_SHOT);
			break;
		}
		case MSG_FINAL:
		{
		   	break;
		}
		default: return -EINVAL;
	}
	return SOS_OK;
}
Ejemplo n.º 17
0
/**
 * Parse and construct lexer token
 *
 * Note:
 *      Currently, lexer token doesn't fully correspond to Token, defined in ECMA-262, v5, 7.5.
 *      For example, there is no new-line token type in the token definition of ECMA-262 v5.
 *
 * Note:
 *      For Lexer alone, it is hard to find out a token is whether a regexp or a division.
 *      Parser must set maybe_regexp to true if a regexp is expected.
 *      Otherwise, a division is expected.
 *
 * @return constructed token
 */
static token
lexer_parse_token (bool maybe_regexp, /**< read '/' as regexp? */
                   bool *out_is_preceed_by_new_lines_p, /**< out: is constructed token preceded by newlines? */
                   bool is_strict) /**< flag, indicating whether current code is in strict mode code */

{
  JERRY_ASSERT (is_token_parse_in_progress == false);

  *out_is_preceed_by_new_lines_p = lexer_skip_whitespace_and_comments ();

  ecma_char_t c = LA (0);

  /* ECMA-262 v5, 7.6, Identifier */
  if (lexer_is_char_can_be_identifier_start (c))
  {
    return lexer_parse_identifier_or_keyword (is_strict);
  }

  /* ECMA-262 v5, 7.8.3, Numeric literal */
  if (lit_char_is_decimal_digit (c)
      || (c == LIT_CHAR_DOT
          && lit_char_is_decimal_digit (LA (1))))
  {
    return lexer_parse_number (is_strict);
  }

  if (c == LIT_CHAR_NULL)
  {
    return create_token (TOK_EOF, 0);
  }

  if (c == LIT_CHAR_SINGLE_QUOTE
      || c == LIT_CHAR_DOUBLE_QUOTE)
  {
    return lexer_parse_string ();
  }

  if (c == LIT_CHAR_SLASH && maybe_regexp)
  {
    return lexer_parse_regexp ();
  }

  /* ECMA-262 v5, 7.7, Punctuator */
  switch (c)
  {
    case LIT_CHAR_LEFT_BRACE:
    {
      RETURN_PUNC (TOK_OPEN_BRACE);
      break;
    }
    case LIT_CHAR_RIGHT_BRACE:
    {
      RETURN_PUNC (TOK_CLOSE_BRACE);
      break;
    }
    case LIT_CHAR_LEFT_PAREN:
    {
      RETURN_PUNC (TOK_OPEN_PAREN);
      break;
    }
    case LIT_CHAR_RIGHT_PAREN:
    {
      RETURN_PUNC (TOK_CLOSE_PAREN);
      break;
    }
    case LIT_CHAR_LEFT_SQUARE:
    {
      RETURN_PUNC (TOK_OPEN_SQUARE);
      break;
    }
    case LIT_CHAR_RIGHT_SQUARE:
    {
      RETURN_PUNC (TOK_CLOSE_SQUARE);
      break;
    }
    case LIT_CHAR_DOT:
    {
      RETURN_PUNC (TOK_DOT);
      break;
    }
    case LIT_CHAR_SEMICOLON:
    {
      RETURN_PUNC (TOK_SEMICOLON);
      break;
    }
    case LIT_CHAR_COMMA:
    {
      RETURN_PUNC (TOK_COMMA);
      break;
    }
    case LIT_CHAR_TILDE:
    {
      RETURN_PUNC (TOK_COMPL);
      break;
    }
    case LIT_CHAR_COLON:
    {
      RETURN_PUNC (TOK_COLON);
      break;
    }
    case LIT_CHAR_QUESTION:
    {
      RETURN_PUNC (TOK_QUERY);
      break;
    }

    case LIT_CHAR_ASTERISK:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_MULT_EQ, TOK_MULT);
      break;
    }
    case LIT_CHAR_SLASH:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_DIV_EQ, TOK_DIV);
      break;
    }
    case LIT_CHAR_CIRCUMFLEX:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_XOR_EQ, TOK_XOR);
      break;
    }
    case LIT_CHAR_PERCENT:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_MOD_EQ, TOK_MOD);
      break;
    }
    case LIT_CHAR_PLUS:
    {
      IF_LA_IS_OR (LIT_CHAR_PLUS, TOK_DOUBLE_PLUS, LIT_CHAR_EQUALS, TOK_PLUS_EQ, TOK_PLUS);
      break;
    }
    case LIT_CHAR_MINUS:
    {
      IF_LA_IS_OR (LIT_CHAR_MINUS, TOK_DOUBLE_MINUS, LIT_CHAR_EQUALS, TOK_MINUS_EQ, TOK_MINUS);
      break;
    }
    case LIT_CHAR_AMPERSAND:
    {
      IF_LA_IS_OR (LIT_CHAR_AMPERSAND, TOK_DOUBLE_AND, LIT_CHAR_EQUALS, TOK_AND_EQ, TOK_AND);
      break;
    }
    case LIT_CHAR_VLINE:
    {
      IF_LA_IS_OR (LIT_CHAR_VLINE, TOK_DOUBLE_OR, LIT_CHAR_EQUALS, TOK_OR_EQ, TOK_OR);
      break;
    }
    case LIT_CHAR_LESS_THAN:
    {
      switch (LA (1))
      {
        case LIT_CHAR_LESS_THAN: IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_LSHIFT_EQ, TOK_LSHIFT, 2); break;
        case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_LESS_EQ, 2); break;
        default: RETURN_PUNC (TOK_LESS);
      }
      break;
    }
    case LIT_CHAR_GREATER_THAN:
    {
      switch (LA (1))
      {
        case LIT_CHAR_GREATER_THAN:
        {
          switch (LA (2))
          {
            case LIT_CHAR_GREATER_THAN: IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_RSHIFT_EX_EQ, TOK_RSHIFT_EX, 3); break;
            case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_RSHIFT_EQ, 3); break;
            default: RETURN_PUNC_EX (TOK_RSHIFT, 2);
          }
          break;
        }
        case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_GREATER_EQ, 2); break;
        default: RETURN_PUNC (TOK_GREATER);
      }
      break;
    }
    case LIT_CHAR_EQUALS:
    {
      if (LA (1) == LIT_CHAR_EQUALS)
      {
        IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_TRIPLE_EQ, TOK_DOUBLE_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_EQ);
      }
      break;
    }
    case LIT_CHAR_EXCLAMATION:
    {
      if (LA (1) == LIT_CHAR_EQUALS)
      {
        IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_NOT_DOUBLE_EQ, TOK_NOT_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_NOT);
      }
      break;
    }
  }

  PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX, "Illegal character", lit_utf8_iterator_get_pos (&src_iter));
} /* lexer_parse_token */
Ejemplo n.º 18
0
void Tokenizer::handle_char(char new_ch) {
    // Checks for token ending conditions and buffer tasks for each possible state.

    switch (current_state) {
    case 0: // ----- DEFAULT STATE
        // Buffer is now ready for character
        buffer_ready = true;
        return;

    case 1: // ----- WORD
        // Close word token on any non-alphanumeric character
        if (!isalpha(new_ch) && !isnumber(new_ch)) {
            // Determine token type and create token
            std::string type = get_word_type(token_buffer);
            create_token(type);
            // Open buffer to read incoming character
            buffer_ready = true;
        } else {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
        }
        break;

    case 2: // ----- NUMBER
        // Close number token on anything non-numerical
        if (!isnumber(new_ch)) {
            if (isalpha(new_ch)) {
                // ERROR
                // !!
            } else {
                // Create NUMBER token
                create_token("NUMBER");
                // Open buffer to read incoming character
                buffer_ready = true;
            }
        } else {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
        }
        break;

    case 3: // ----- STRING
        // Close string when a " is reached
        if (new_ch == '\"') {
            // Add last quotation mark to token
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create STRING token
            create_token("STRING");
        } else {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
        }
        break;

    case 4: // ----- COMMENT
        // Close comment on a newline
        if (new_ch == '\r' || new_ch == '\n') {
            // Ready to read tokens again
            buffer_ready = true;
        }
        // Otherwise, do nothing until comment ends
        break;

    case 5: // ----- ! EXC
        // Could be a NOT or a RELOP
        if (new_ch == '=') {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create a RELOP token
            create_token("RELOP");
        } else {
            // Create a NOT token
            create_token("NOT");
            // Open buffer to read incoming character
            buffer_ready = true;
        }
        break;

    case 6: // ----- = EQUALS
        // Check for second '='
        if (new_ch == '=') {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create RELOP token
            create_token("RELOP");
        } else {
            // Without the second '=', it's an error
            create_token("ERROR");
            // Open buffer to read incoming character
            buffer_ready = true;
        }
        break;

    case 7: // ----- > GT
        // Could be a > or a >=
        if (new_ch == '=') {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create RELOP token
            create_token("RELOP");
        } else {
            // Create '>' token
            create_token("RELOP");
            // Open buffer to read incoming character
            buffer_ready = true;
        }
        break;

    case 8: // ----- < LT
        // Could be a < or a <= or an ASSIGNOP
        if (new_ch == '-') {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create ASSIGNOP token
            create_token("ASSIGNOP");
        } else if (new_ch == '=') {
            // Add incoming character to token buffer
            token_buffer+=new_ch;
            // Keep buffer closed from handling incoming character
            current_state = 0;
            // Create RELOP token
            create_token("RELOP");
        } else {
            // Create '<' token
            create_token("RELOP");
            // Open buffer to read incoming character
            buffer_ready = true;
        }
        break;

    case 10: // ----- Unambiguous single character
        // Create token immediately
        std::string type = get_op_type(token_buffer[0]);
        create_token(type);
        // Open buffer to read incoming character
        buffer_ready = true;
        break;

    }
}
Ejemplo n.º 19
0
/**
 * Parse and construct lexer token
 *
 * Note:
 *      Currently, lexer token doesn't fully correspond to Token, defined in ECMA-262, v5, 7.5.
 *      For example, there is no new-line token type in the token definition of ECMA-262 v5.
 *
 * @return constructed token
 */
static token
lexer_parse_token (void)
{
  ecma_char_t c = LA (0);

  if (lit_char_is_white_space (c))
  {
    while (lit_char_is_white_space (c))
    {
      consume_char ();

      c = LA (0);
    }
  }

  if (lit_char_is_line_terminator (c))
  {
    while (lit_char_is_line_terminator (c))
    {
      consume_char ();

      c = LA (0);
    }

    return create_token (TOK_NEWLINE, 0);
  }

  JERRY_ASSERT (is_token_parse_in_progress == false);

  /* ECMA-262 v5, 7.6, Identifier */
  if (lexer_is_char_can_be_identifier_start (c))
  {
    return lexer_parse_identifier_or_keyword ();
  }

  /* ECMA-262 v5, 7.8.3, Numeric literal */
  if (lit_char_is_decimal_digit (c)
      || (c == LIT_CHAR_DOT
          && lit_char_is_decimal_digit (LA (1))))
  {
    return lexer_parse_number ();
  }

  if (c == LIT_CHAR_LF)
  {
    consume_char ();
    return create_token (TOK_NEWLINE, 0);
  }

  if (c == LIT_CHAR_NULL)
  {
    return create_token (TOK_EOF, 0);
  }

  if (c == LIT_CHAR_SINGLE_QUOTE
      || c == LIT_CHAR_DOUBLE_QUOTE)
  {
    return lexer_parse_string ();
  }

  /* ECMA-262 v5, 7.4, SingleLineComment or MultiLineComment */
  if (c == LIT_CHAR_SLASH
      && (LA (1) == LIT_CHAR_SLASH
          || LA (1) == LIT_CHAR_ASTERISK))
  {
    if (lexer_parse_comment ())
    {
      return create_token (TOK_NEWLINE, 0);
    }
    else
    {
      return lexer_parse_token ();
    }
  }

  if (c == LIT_CHAR_SLASH
      && !(prev_non_lf_token.type == TOK_NAME
           || prev_non_lf_token.type == TOK_NULL
           || prev_non_lf_token.type == TOK_BOOL
           || prev_non_lf_token.type == TOK_CLOSE_BRACE
           || prev_non_lf_token.type == TOK_CLOSE_SQUARE
           || prev_non_lf_token.type == TOK_CLOSE_PAREN
           || prev_non_lf_token.type == TOK_SMALL_INT
           || prev_non_lf_token.type == TOK_NUMBER
           || prev_non_lf_token.type == TOK_STRING
           || prev_non_lf_token.type == TOK_REGEXP))
  {
    return lexer_parse_regexp ();
  }

  /* ECMA-262 v5, 7.7, Punctuator */
  switch (c)
  {
    case LIT_CHAR_LEFT_BRACE:
    {
      RETURN_PUNC (TOK_OPEN_BRACE);
      break;
    }
    case LIT_CHAR_RIGHT_BRACE:
    {
      RETURN_PUNC (TOK_CLOSE_BRACE);
      break;
    }
    case LIT_CHAR_LEFT_PAREN:
    {
      RETURN_PUNC (TOK_OPEN_PAREN);
      break;
    }
    case LIT_CHAR_RIGHT_PAREN:
    {
      RETURN_PUNC (TOK_CLOSE_PAREN);
      break;
    }
    case LIT_CHAR_LEFT_SQUARE:
    {
      RETURN_PUNC (TOK_OPEN_SQUARE);
      break;
    }
    case LIT_CHAR_RIGHT_SQUARE:
    {
      RETURN_PUNC (TOK_CLOSE_SQUARE);
      break;
    }
    case LIT_CHAR_DOT:
    {
      RETURN_PUNC (TOK_DOT);
      break;
    }
    case LIT_CHAR_SEMICOLON:
    {
      RETURN_PUNC (TOK_SEMICOLON);
      break;
    }
    case LIT_CHAR_COMMA:
    {
      RETURN_PUNC (TOK_COMMA);
      break;
    }
    case LIT_CHAR_TILDE:
    {
      RETURN_PUNC (TOK_COMPL);
      break;
    }
    case LIT_CHAR_COLON:
    {
      RETURN_PUNC (TOK_COLON);
      break;
    }
    case LIT_CHAR_QUESTION:
    {
      RETURN_PUNC (TOK_QUERY);
      break;
    }

    case LIT_CHAR_ASTERISK:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_MULT_EQ, TOK_MULT);
      break;
    }
    case LIT_CHAR_SLASH:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_DIV_EQ, TOK_DIV);
      break;
    }
    case LIT_CHAR_CIRCUMFLEX:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_XOR_EQ, TOK_XOR);
      break;
    }
    case LIT_CHAR_PERCENT:
    {
      IF_LA_IS (LIT_CHAR_EQUALS, TOK_MOD_EQ, TOK_MOD);
      break;
    }
    case LIT_CHAR_PLUS:
    {
      IF_LA_IS_OR (LIT_CHAR_PLUS, TOK_DOUBLE_PLUS, LIT_CHAR_EQUALS, TOK_PLUS_EQ, TOK_PLUS);
      break;
    }
    case LIT_CHAR_MINUS:
    {
      IF_LA_IS_OR (LIT_CHAR_MINUS, TOK_DOUBLE_MINUS, LIT_CHAR_EQUALS, TOK_MINUS_EQ, TOK_MINUS);
      break;
    }
    case LIT_CHAR_AMPERSAND:
    {
      IF_LA_IS_OR (LIT_CHAR_AMPERSAND, TOK_DOUBLE_AND, LIT_CHAR_EQUALS, TOK_AND_EQ, TOK_AND);
      break;
    }
    case LIT_CHAR_VLINE:
    {
      IF_LA_IS_OR (LIT_CHAR_VLINE, TOK_DOUBLE_OR, LIT_CHAR_EQUALS, TOK_OR_EQ, TOK_OR);
      break;
    }
    case LIT_CHAR_LESS_THAN:
    {
      switch (LA (1))
      {
        case LIT_CHAR_LESS_THAN: IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_LSHIFT_EQ, TOK_LSHIFT, 2); break;
        case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_LESS_EQ, 2); break;
        default: RETURN_PUNC (TOK_LESS);
      }
      break;
    }
    case LIT_CHAR_GREATER_THAN:
    {
      switch (LA (1))
      {
        case LIT_CHAR_GREATER_THAN:
        {
          switch (LA (2))
          {
            case LIT_CHAR_GREATER_THAN: IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_RSHIFT_EX_EQ, TOK_RSHIFT_EX, 3); break;
            case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_RSHIFT_EQ, 3); break;
            default: RETURN_PUNC_EX (TOK_RSHIFT, 2);
          }
          break;
        }
        case LIT_CHAR_EQUALS: RETURN_PUNC_EX (TOK_GREATER_EQ, 2); break;
        default: RETURN_PUNC (TOK_GREATER);
      }
      break;
    }
    case LIT_CHAR_EQUALS:
    {
      if (LA (1) == LIT_CHAR_EQUALS)
      {
        IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_TRIPLE_EQ, TOK_DOUBLE_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_EQ);
      }
      break;
    }
    case LIT_CHAR_EXCLAMATION:
    {
      if (LA (1) == LIT_CHAR_EQUALS)
      {
        IF_LA_N_IS (LIT_CHAR_EQUALS, TOK_NOT_DOUBLE_EQ, TOK_NOT_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_NOT);
      }
      break;
    }
  }

  PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX, "Illegal character", lit_utf8_iterator_get_pos (&src_iter));
} /* lexer_parse_token */
Ejemplo n.º 20
0
/* In this function we cannot use strtol function
   since there is no octal literals in ECMAscript.  */
static token
parse_number (void)
{
  ecma_char_t c = LA (0);
  bool is_hex = false;
  bool is_fp = false;
  bool is_exp = false;
  bool is_overflow = false;
  ecma_number_t fp_res = .0;
  size_t tok_length = 0, i;
  uint32_t res = 0;
  token known_token;

  JERRY_ASSERT (isdigit (c) || c == '.');

  if (c == '0')
  {
    if (LA (1) == 'x' || LA (1) == 'X')
    {
      is_hex = true;
    }
  }

  if (c == '.')
  {
    JERRY_ASSERT (!isalpha (LA (1)));
    is_fp = true;
  }

  if (is_hex)
  {
    // Eat up '0x'
    consume_char ();
    consume_char ();
    new_token ();
    while (true)
    {
      c = LA (0);
      if (!isxdigit (c))
      {
        break;
      }
      consume_char ();
    }

    if (isalpha (c) || c == '_' || c == '$')
    {
      PARSE_ERROR ("Integer literal shall not contain non-digit characters", buffer - buffer_start);
    }

    tok_length = (size_t) (buffer - token_start);

    for (i = 0; i < tok_length; i++)
    {
      if (!is_overflow)
      {
        res = (res << 4) + ecma_char_hex_to_int (token_start[i]);
      }
      else
      {
        fp_res = fp_res * 16 + (ecma_number_t) ecma_char_hex_to_int (token_start[i]);
      }

      if (res > 255)
      {
        fp_res = (ecma_number_t) res;
        is_overflow = true;
        res = 0;
      }
    }

    if (is_overflow)
    {
      known_token = convert_seen_num_to_token (fp_res);
      token_start = NULL;
      return known_token;
    }
    else
    {
      known_token = create_token (TOK_SMALL_INT, (uint8_t) res);
      token_start = NULL;
      return known_token;
    }
  }

  JERRY_ASSERT (!is_hex && !is_exp);

  new_token ();

  // Eat up '.'
  if (is_fp)
  {
    consume_char ();
  }

  while (true)
  {
    c = LA (0);
    if (is_fp && c == '.')
    {
      FIXME (/* This is wrong: 1..toString ().  */)
      PARSE_ERROR ("Integer literal shall not contain more than one dot character", buffer - buffer_start);
    }
    if (is_exp && (c == 'e' || c == 'E'))
    {
      PARSE_ERROR ("Integer literal shall not contain more than exponential marker ('e' or 'E')",
                   buffer - buffer_start);
    }

    if (c == '.')
    {
      if (isalpha (LA (1)) || LA (1) == '_' || LA (1) == '$')
      {
        PARSE_ERROR ("Integer literal shall not contain non-digit character after got character",
                     buffer - buffer_start);
      }
      is_fp = true;
      consume_char ();
      continue;
    }

    if (c == 'e' || c == 'E')
    {
      if (LA (1) == '-' || LA (1) == '+')
      {
        consume_char ();
      }
      if (!isdigit (LA (1)))
      {
        PARSE_ERROR ("Integer literal shall not contain non-digit character after exponential marker ('e' or 'E')",
                     buffer - buffer_start);
      }
      is_exp = true;
      consume_char ();
      continue;
    }

    if (isalpha (c) || c == '_' || c == '$')
    {
      PARSE_ERROR ("Integer literal shall not contain non-digit characters", buffer - buffer_start);
    }

    if (!isdigit (c))
    {
      break;
    }

    consume_char ();
  }

  tok_length = (size_t) (buffer - token_start);
  if (is_fp || is_exp)
  {
    ecma_number_t res = ecma_utf8_string_to_number (token_start, (jerry_api_size_t) tok_length);
    JERRY_ASSERT (!ecma_number_is_nan (res));
    known_token = convert_seen_num_to_token (res);
    token_start = NULL;
    return known_token;
  }

  if (*token_start == '0' && tok_length != 1)
  {
    if (strict_mode)
    {
      PARSE_ERROR ("Octal tnteger literals are not allowed in strict mode", token_start - buffer_start);
    }
    for (i = 0; i < tok_length; i++)
    {
      if (!is_overflow)
      {
        res = res * 8 + ecma_char_hex_to_int (token_start[i]);
      }
      else
      {
        fp_res = fp_res * 8 + (ecma_number_t) ecma_char_hex_to_int (token_start[i]);
      }
      if (res > 255)
      {
        fp_res = (ecma_number_t) res;
        is_overflow = true;
        res = 0;
      }
    }
  }
  else
  {
    for (i = 0; i < tok_length; i++)
    {
      if (!is_overflow)
      {
        res = res * 10 + ecma_char_hex_to_int (token_start[i]);
      }
      else
      {
        fp_res = fp_res * 10 + (ecma_number_t) ecma_char_hex_to_int (token_start[i]);
      }
      if (res > 255)
      {
        fp_res = (ecma_number_t) res;
        is_overflow = true;
        res = 0;
      }
    }
  }

  if (is_overflow)
  {
    known_token = convert_seen_num_to_token (fp_res);
    token_start = NULL;
    return known_token;
  }
  else
  {
    known_token = create_token (TOK_SMALL_INT, (uint8_t) res);
    token_start = NULL;
    return known_token;
  }
}
Ejemplo n.º 21
0
int tokenize(struct token_list* tk_list, char* file_buffer)
{
  enum Status status;
   line_num;
  size_t token_begin, token_end;
  token_begin = 0, token_end  = 0;
  status = STATUS_INVALID;
  str_toupper(file_buffer);
  /*
   * Careful: it seems an error to let "i <= len", 
   * but we need one more execution to flush the last token into token list.
   */
  size_t line_num = 1;
  for (size_t i = 0; ; ++i) {
    struct token_node* tok_node;
    switch (status) {
    case STATUS_LETTER:
      if (!IS_LETTER(file_buffer[i]) && !IS_DIGIT(file_buffer[i])) {
        token_end = i;
        tok_node = create_token(TOKEN_LABEL, file_buffer + token_begin, token_end - token_begin);
        tok_node->type = letter_type(tok_node->liter, tok_node->len);
        token_append(tk_list, tok_node);
        token_begin = i;
        status = next_status(status, file_buffer[i]);
      }
      break;
    case STATUS_PRAGMA:
      if (!IS_LETTER(file_buffer[i])) {
        int type;
        token_end = i;
        type = pragma_type(file_buffer + token_begin, token_end - token_begin);
        if (type < 0) {
          error("invalid pragma ad line %d\n", line_num);
          return -4;
        }
        tok_node = create_token(type, file_buffer + token_begin, token_end - token_begin);
        token_append(tk_list, tok_node);
        token_begin = i;
        status = next_status(status, file_buffer[i]);
      }
      break;
    case STATUS_PUNCTUATION:
      token_end = i;
      tok_node = create_token(file_buffer[token_begin], file_buffer + token_begin, token_end - token_begin);
      token_append(tk_list, tok_node);
      token_begin = i;
      status = next_status(status, file_buffer[i]);
      break;
    case STATUS_NUMBER:
      if (!IS_NUMBER(file_buffer[i])) {
        token_end = i;
        if (!check_number(file_buffer + token_begin, token_end - token_begin)) {
          error("invalid number format at line %d\n", line_num);
          return -2;
        }
        tok_node = create_token(TOKEN_NUMBER, file_buffer + token_begin, token_end - token_begin);
        tok_node->data = parse_number(tok_node->liter);
        token_append(tk_list, tok_node);
        token_begin = i;
        status = next_status(status, file_buffer[i]);
      }
      break;
    case STATUS_BLANK:
      if (!IS_BLANK(file_buffer[i])) {
        token_begin = i;
        status = next_status(status, file_buffer[i]);
      }
      break;
    case STATUS_COMMENTS:
      //once status is in comments, it will always be in comments
      if ('\n' == file_buffer[i]) {
        token_begin = i;
        status = next_status(status, file_buffer[i]);
      }
      break;
    case STATUS_INVALID:
      token_begin = i;
      status = next_status(status, file_buffer[i]);
      if (STATUS_INVALID == status && 0 != file_buffer[i]) {
        error("invalid format at line %d\n", line_num);
        return -3;
      }
      break;
    }
    if (0 == file_buffer[i])
      break;
    else if ('\n' == file_buffer[i])
      ++line_num;
  }
  return 0;
}
Ejemplo n.º 22
0
token get_token()
{ char c = nextchar();
  while (c == ' ' || c == '\n')
    c = nextchar();
  switch(c)
  { case 'a': case 'b': case 'c': case 'd': case 'e':
    case 'f': case 'g': case 'h': case 'i': case 'j':
    case 'k': case 'l': case 'm': case 'n': case 'o':
    case 'p': case 'q': case 'r': case 's': case 't':
    case 'u': case 'v': case 'w': case 'x': case 'y':
    case 'z':
    case 'A': case 'B': case 'C': case 'D': case 'E':
    case 'F': case 'G': case 'H': case 'I': case 'J':
    case 'K': case 'L': case 'M': case 'N': case 'O':
    case 'P': case 'Q': case 'R': case 'S': case 'T':
    case 'U': case 'V': case 'W': case 'X': case 'Y':
    case 'Z': case '_':
    { char temp_string[32];
      int i = 0;
      while (is_string_character(c))
      { temp_string[i] = c;
        i += 1;
        c = nextchar(); }
      backchar();
      temp_string[i] = 0;
      char * s = strdup(temp_string);
      return create_string_token(s, 0); }
      
    case '\"':
    { char temp_string[32];
      c = nextchar();
      int pos = 0;
      while (pos < 31 && c != '\"')
      { temp_string[pos] = c;
        pos += 1;
        c = nextchar(); }
      temp_string[pos] = 0;
      if (pos == 31)
      { error_message("max string length is 31 characters\n");
        exit(1); }
      char * s = strdup(temp_string);
      return create_string_token(s, 1); }

    case '0': case '1': case '2': case '3': case '4':
    case '5': case '6': case '7': case '8': case '9':
    { int value = c - '0';
      c = nextchar();
      while (c >= '0' && c <= '9')
      { value = value * 10 + c - '0';
        c = nextchar(); }
      backchar();
      return create_token(integer_constant, value); }

    case '+': case '-': case '*':
     return create_binary_operator_token(c);

    case '/':
    { c = nextchar();
      if (c == ' ')
        return create_binary_operator_token(c);
      if (c == '/')
      { current_line->character_number = current_line->length;
        return get_token(); }
      else
      { error_message("unrecognized use of '/' character\n");
        exit(1); } }

    case '=':
    { c = nextchar();
      if (c == ' ')
      { error_message("for initialization and assignment use :=");
        exit(1); }
      else if (c == '=')
        return create_token(comparison_operator, equal); }

    case '!':
    { c = nextchar();
      if (c == '=')
        return create_token(comparison_operator, not_equal);
      else
      { error_message("undefined use of '!' character");
        exit(1); } }

    case '<':
    { c = nextchar();
      if (c == '=')
        return create_token(comparison_operator, less_or_equal);
      backchar();
      return create_token(comparison_operator, less_than); }

    case '>':
    { c = nextchar();
      if (c == '=')
        return create_token(comparison_operator, greater_or_equal);
      backchar();
      return create_token(comparison_operator, greater_than); }

    case ':':
    { c = nextchar();
      if (c != '=')
      { error_message("did you mean :=?");
        exit(1); }
      else
        return create_token(assignment, 0); }

    case '(':
      return create_token(open_parenthesis, 0);

    case ')':
      return create_token(close_parenthesis, 0);

    case '{':
      return create_token(open_curly_bracket, 0);

    case '}':
      return create_token(close_curly_bracket, 0);

    case ';':
      return create_token(semi_colon, 0);

    case(0):
      return create_token(end_of_file, 0);

    //todo: return error token
    default:
    { error_message("");
      printf("character %c unrecognized\n", c);
      exit(1); }
  }
}
Ejemplo n.º 23
0
CK_RV parse_readers(ykpiv_state *state, const CK_BYTE_PTR readers, const CK_ULONG len,
                    ykcs11_slot_t *slots, CK_ULONG_PTR n_slots, CK_ULONG_PTR n_with_token) {

    CK_BYTE        i;
    CK_BYTE_PTR    p;
    CK_BYTE_PTR    s;
    CK_ULONG       l;
    slot_vendor_t  slot;

    *n_slots = 0;
    *n_with_token = 0;
    p = readers;

    /*
     * According to pcsc-lite, the format of a reader name is:
     * name [interface] (serial) index slot
     * http://ludovicrousseau.blogspot.se/2010/05/what-is-in-pcsc-reader-name.html
     */

    for (i = 0; i < len; i++)
        if (readers[i] == '\0' && i != len - 1) {
            slots[*n_slots].vid = get_vendor_id(p);

            if (slots[*n_slots].vid == UNKNOWN) { // TODO: distinguish between tokenless and unsupported?
                // Unknown slot, just save what info we have
                memset(&slots[*n_slots].info, 0, sizeof(CK_SLOT_INFO));
                memset(slots[*n_slots].info.slotDescription, ' ', sizeof(slots[*n_slots].info.slotDescription));
                if (strlen(p) <= sizeof(slots[*n_slots].info.slotDescription))
                    strncpy(slots[*n_slots].info.slotDescription, p, strlen(p));
                else
                    strncpy(slots[*n_slots].info.slotDescription, p, sizeof(slots[*n_slots].info.slotDescription));
            }
            else {
                // Supported slot
                slot = get_slot_vendor(slots[*n_slots].vid);

                // Values must NOT be null terminated and ' ' padded

                memset(slots[*n_slots].info.slotDescription, ' ', sizeof(slots[*n_slots].info.slotDescription));
                s = slots[*n_slots].info.slotDescription;
                l = sizeof(slots[*n_slots].info.slotDescription);
                strncpy((char *)s, (char*)p, l);

                memset(slots[*n_slots].info.manufacturerID, ' ', sizeof(slots[*n_slots].info.manufacturerID));
                s = slots[*n_slots].info.manufacturerID;
                l = sizeof(slots[*n_slots].info.manufacturerID);
                if(slot.get_slot_manufacturer(s, l) != CKR_OK)
                    goto failure;

                if (slot.get_slot_flags(&slots[*n_slots].info.flags) != CKR_OK)
                    goto failure;

                // Treating hw and fw version the same
                if (slot.get_slot_version(&slots[*n_slots].info.hardwareVersion) != CKR_OK)
                    goto failure;

                if (slot.get_slot_version(&slots[*n_slots].info.firmwareVersion) != CKR_OK)
                    goto failure;

                if (has_token(slots + *n_slots)) {
                    // Save token information
                    (*n_with_token)++;

                    if (create_token(state, p, slots + *n_slots) != CKR_OK)
                        goto failure;
                }
            }
            (*n_slots)++;
            p += i + 1;
        }

    return CKR_OK;

failure:
    // TODO: destroy all token objects
    for (i = 0; i < *n_slots; i++)
        if (has_token(slots + i))
            destroy_token(slots + i);

    return CKR_FUNCTION_FAILED;
}
Ejemplo n.º 24
0
/**
 * Parse numeric literal (ECMA-262, v5, 7.8.3)
 *
 * @return token of TOK_SMALL_INT or TOK_NUMBER types
 */
static token
lexer_parse_number (void)
{
  ecma_char_t c = LA (0);
  bool is_hex = false;
  bool is_fp = false;
  ecma_number_t fp_res = .0;
  size_t tok_length = 0, i;
  token known_token;

  JERRY_ASSERT (lit_char_is_decimal_digit (c)
                || c == LIT_CHAR_DOT);

  if (c == LIT_CHAR_0)
  {
    if (LA (1) == LIT_CHAR_LOWERCASE_X
        || LA (1) == LIT_CHAR_UPPERCASE_X)
    {
      is_hex = true;
    }
  }
  else if (c == LIT_CHAR_DOT)
  {
    JERRY_ASSERT (lit_char_is_decimal_digit (LA (1)));
    is_fp = true;
  }

  if (is_hex)
  {
    // Eat up '0x'
    consume_char ();
    consume_char ();
    new_token ();

    c = LA (0);
    if (!lit_char_is_hex_digit (c))
    {
      PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX, "Invalid HexIntegerLiteral", lit_utf8_iterator_get_pos (&src_iter));
    }

    do
    {
      consume_char ();
      c = LA (0);
    }
    while (lit_char_is_hex_digit (c));

    if (lexer_is_char_can_be_identifier_start (c))
    {
      PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX,
                   "Identifier just after integer literal",
                   lit_utf8_iterator_get_pos (&src_iter));
    }

    tok_length = (size_t) (TOK_SIZE ());

    const lit_utf8_byte_t *fp_buf_p = TOK_START ();
    /* token is constructed at end of function */
    for (i = 0; i < tok_length; i++)
    {
      fp_res = fp_res * 16 + (ecma_number_t) lit_char_hex_to_int (fp_buf_p[i]);
    }
  }
  else
  {
    bool is_exp = false;

    new_token ();

    // Eat up '.'
    if (is_fp)
    {
      consume_char ();
    }

    while (true)
    {
      c = LA (0);

      if (c == LIT_CHAR_DOT)
      {
        if (is_fp)
        {
          /* token is constructed at end of function */
          break;
        }
        else
        {
          is_fp = true;
          consume_char ();

          continue;
        }
      }
      else if (c == LIT_CHAR_LOWERCASE_E
               || c == LIT_CHAR_UPPERCASE_E)
      {
        if (is_exp)
        {
          PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX,
                       "Numeric literal shall not contain more than exponential marker ('e' or 'E')",
                       lit_utf8_iterator_get_pos (&src_iter));
        }
        else
        {
          is_exp = true;
          consume_char ();

          if (LA (0) == LIT_CHAR_MINUS
              || LA (0) == LIT_CHAR_PLUS)
          {
            consume_char ();
          }

          continue;
        }
      }
      else if (!lit_char_is_decimal_digit (c))
      {
        if (lexer_is_char_can_be_identifier_start (c))
        {
          PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX,
                       "Numeric literal shall not contain non-numeric characters",
                       lit_utf8_iterator_get_pos (&src_iter));
        }

        /* token is constructed at end of function */
        break;
      }

      consume_char ();
    }

    tok_length = (size_t) (TOK_SIZE ());

    if (is_fp || is_exp)
    {
      ecma_number_t res = ecma_utf8_string_to_number (TOK_START (), (jerry_api_size_t) tok_length);
      JERRY_ASSERT (!ecma_number_is_nan (res));

      known_token = convert_seen_num_to_token (res);
      is_token_parse_in_progress = NULL;

      return known_token;
    }
    else if (*TOK_START () == LIT_CHAR_0
             && tok_length != 1)
    {
      /* Octal integer literals */
      if (strict_mode)
      {
        PARSE_ERROR (JSP_EARLY_ERROR_SYNTAX, "Octal integer literals are not allowed in strict mode", token_start_pos);
      }
      else
      {
        /* token is constructed at end of function */
        const lit_utf8_byte_t *fp_buf_p = TOK_START ();

        for (i = 0; i < tok_length; i++)
        {
          fp_res = fp_res * 8 + (ecma_number_t) lit_char_hex_to_int (fp_buf_p[i]);
        }
      }
    }
    else
    {
      const lit_utf8_byte_t *fp_buf_p = TOK_START ();
      /* token is constructed at end of function */

      ecma_number_t mult = 1.0f;
      for (i = tok_length; i > 0; i--, mult *= 10)
      {
        fp_res += (ecma_number_t) lit_char_hex_to_int (fp_buf_p[i - 1]) * mult;
      }
    }
  }

  if (fp_res >= 0 && fp_res <= 255 && (uint8_t) fp_res == fp_res)
  {
    known_token = create_token (TOK_SMALL_INT, (uint8_t) fp_res);
    is_token_parse_in_progress = NULL;
    return known_token;
  }
  else
  {
    known_token = convert_seen_num_to_token (fp_res);
    is_token_parse_in_progress = NULL;
    return known_token;
  }
} /* lexer_parse_number */
Ejemplo n.º 25
0
void st_crtToken(State s, register Ptt type)  {st_setToken(s, create_token(s->flux, type));}
Ejemplo n.º 26
0
Token *Lexer::scan() {

#define YYCTYPE char
#define YYCURSOR (this->cursor)
#define YYLIMIT (this->lim)
#define YYMARKER (this->marker)
#define	YYFILL(n) {fill();}

	for(;;) {
		capture_begin();
	
		
#line 22 "<stdout>"
{
	YYCTYPE yych;

	if ((YYLIMIT - YYCURSOR) < 5) YYFILL(5);
	yych = *YYCURSOR;
	switch (yych) {
	case 0x00:	goto yy26;
	case '\t':
	case '\f':
	case ' ':	goto yy30;
	case '\n':
	case '\r':	goto yy28;
	case '(':	goto yy22;
	case ')':	goto yy24;
	case '*':	goto yy18;
	case '+':	goto yy14;
	case ',':	goto yy10;
	case '-':	goto yy16;
	case '.':	goto yy8;
	case '/':	goto yy20;
	case '0':
	case '1':
	case '2':
	case '3':
	case '4':
	case '5':
	case '6':
	case '7':
	case '8':
	case '9':	goto yy6;
	case ':':	goto yy12;
	case 'A':
	case 'B':
	case 'C':
	case 'D':
	case 'E':
	case 'F':
	case 'G':
	case 'H':
	case 'I':
	case 'J':
	case 'K':
	case 'L':
	case 'M':
	case 'N':
	case 'O':
	case 'P':
	case 'Q':
	case 'R':
	case 'S':
	case 'T':
	case 'U':
	case 'V':
	case 'W':
	case 'X':
	case 'Y':
	case 'Z':
	case 'a':
	case 'b':
	case 'c':
	case 'e':
	case 'f':
	case 'g':
	case 'h':
	case 'i':
	case 'j':
	case 'k':
	case 'l':
	case 'm':
	case 'n':
	case 'o':
	case 'q':
	case 'r':
	case 's':
	case 't':
	case 'u':
	case 'v':
	case 'w':
	case 'x':
	case 'y':
	case 'z':	goto yy5;
	case 'd':	goto yy2;
	case 'p':	goto yy4;
	default:	goto yy32;
	}
yy2:
	++YYCURSOR;
	switch ((yych = *YYCURSOR)) {
	case 'e':	goto yy46;
	default:	goto yy41;
	}
yy3:
#line 31 "LexerX.re"
	{ return create_name_token(capture_string()); }
#line 117 "<stdout>"
yy4:
	yych = *++YYCURSOR;
	switch (yych) {
	case 'a':	goto yy42;
	default:	goto yy41;
	}
yy5:
	yych = *++YYCURSOR;
	goto yy41;
yy6:
	yych = *(YYMARKER = ++YYCURSOR);
	goto yy39;
yy7:
#line 33 "LexerX.re"
	{ return create_int_token(capture_int()); }
#line 133 "<stdout>"
yy8:
	++YYCURSOR;
	switch ((yych = *YYCURSOR)) {
	case '0':
	case '1':
	case '2':
	case '3':
	case '4':
	case '5':
	case '6':
	case '7':
	case '8':
	case '9':	goto yy33;
	default:	goto yy9;
	}
yy9:
#line 59 "LexerX.re"
	{ 
			printf( "Unrecognized character: %c\n", *(this->ccursor) );
			continue;
		}
#line 155 "<stdout>"
yy10:
	++YYCURSOR;
#line 37 "LexerX.re"
	{ return create_token(TOKEN_COMMA); }
#line 160 "<stdout>"
yy12:
	++YYCURSOR;
#line 39 "LexerX.re"
	{ return create_token(TOKEN_COLON); }
#line 165 "<stdout>"
yy14:
	++YYCURSOR;
#line 41 "LexerX.re"
	{ return create_token(TOKEN_ADD); }
#line 170 "<stdout>"
yy16:
	++YYCURSOR;
#line 43 "LexerX.re"
	{ return create_token(TOKEN_SUB); }
#line 175 "<stdout>"
yy18:
	++YYCURSOR;
#line 45 "LexerX.re"
	{ return create_token(TOKEN_MUL); }
#line 180 "<stdout>"
yy20:
	++YYCURSOR;
#line 47 "LexerX.re"
	{ return create_token(TOKEN_DIV); }
#line 185 "<stdout>"
yy22:
	++YYCURSOR;
#line 49 "LexerX.re"
	{ return create_token(TOKEN_LPAREN); }
#line 190 "<stdout>"
yy24:
	++YYCURSOR;
#line 51 "LexerX.re"
	{ return create_token(TOKEN_RPAREN); }
#line 195 "<stdout>"
yy26:
	++YYCURSOR;
#line 53 "LexerX.re"
	{ return create_token(TOKEN_EOF); }
#line 200 "<stdout>"
yy28:
	++YYCURSOR;
#line 55 "LexerX.re"
	{ return emit_newline(); }
#line 205 "<stdout>"
yy30:
	++YYCURSOR;
#line 57 "LexerX.re"
	{ continue; }
#line 210 "<stdout>"
yy32:
	yych = *++YYCURSOR;
	goto yy9;
yy33:
	++YYCURSOR;
	if (YYLIMIT <= YYCURSOR) YYFILL(1);
	yych = *YYCURSOR;
	switch (yych) {
	case '0':
	case '1':
	case '2':
	case '3':
	case '4':
	case '5':
	case '6':
	case '7':
	case '8':
	case '9':	goto yy33;
	default:	goto yy35;
	}
yy35:
#line 35 "LexerX.re"
	{ return create_float_token(capture_float()); }
#line 234 "<stdout>"
yy36:
	yych = *++YYCURSOR;
	switch (yych) {
	case '0':
	case '1':
	case '2':
	case '3':
	case '4':
	case '5':
	case '6':
	case '7':
	case '8':
	case '9':	goto yy33;
	default:	goto yy37;
	}
yy37:
	YYCURSOR = YYMARKER;
	goto yy7;
yy38:
	YYMARKER = ++YYCURSOR;
	if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
	yych = *YYCURSOR;
yy39:
	switch (yych) {
	case '.':	goto yy36;
	case '0':
	case '1':
	case '2':
	case '3':
	case '4':
	case '5':
	case '6':
	case '7':
	case '8':
	case '9':	goto yy38;
	default:	goto yy7;
	}
yy40:
	++YYCURSOR;
	if (YYLIMIT <= YYCURSOR) YYFILL(1);
	yych = *YYCURSOR;
yy41:
	switch (yych) {
	case 'A':
	case 'B':
	case 'C':
	case 'D':
	case 'E':
	case 'F':
	case 'G':
	case 'H':
	case 'I':
	case 'J':
	case 'K':
	case 'L':
	case 'M':
	case 'N':
	case 'O':
	case 'P':
	case 'Q':
	case 'R':
	case 'S':
	case 'T':
	case 'U':
	case 'V':
	case 'W':
	case 'X':
	case 'Y':
	case 'Z':
	case 'a':
	case 'b':
	case 'c':
	case 'd':
	case 'e':
	case 'f':
	case 'g':
	case 'h':
	case 'i':
	case 'j':
	case 'k':
	case 'l':
	case 'm':
	case 'n':
	case 'o':
	case 'p':
	case 'q':
	case 'r':
	case 's':
	case 't':
	case 'u':
	case 'v':
	case 'w':
	case 'x':
	case 'y':
	case 'z':	goto yy40;
	default:	goto yy3;
	}
yy42:
	yych = *++YYCURSOR;
	switch (yych) {
	case 's':	goto yy43;
	default:	goto yy41;
	}
yy43:
	yych = *++YYCURSOR;
	switch (yych) {
	case 's':	goto yy44;
	default:	goto yy41;
	}
yy44:
	++YYCURSOR;
	switch ((yych = *YYCURSOR)) {
	case 'A':
	case 'B':
	case 'C':
	case 'D':
	case 'E':
	case 'F':
	case 'G':
	case 'H':
	case 'I':
	case 'J':
	case 'K':
	case 'L':
	case 'M':
	case 'N':
	case 'O':
	case 'P':
	case 'Q':
	case 'R':
	case 'S':
	case 'T':
	case 'U':
	case 'V':
	case 'W':
	case 'X':
	case 'Y':
	case 'Z':
	case 'a':
	case 'b':
	case 'c':
	case 'd':
	case 'e':
	case 'f':
	case 'g':
	case 'h':
	case 'i':
	case 'j':
	case 'k':
	case 'l':
	case 'm':
	case 'n':
	case 'o':
	case 'p':
	case 'q':
	case 'r':
	case 's':
	case 't':
	case 'u':
	case 'v':
	case 'w':
	case 'x':
	case 'y':
	case 'z':	goto yy40;
	default:	goto yy45;
	}
yy45:
#line 29 "LexerX.re"
	{ return create_token(TOKEN_PASS); }
#line 404 "<stdout>"
yy46:
	yych = *++YYCURSOR;
	switch (yych) {
	case 'f':	goto yy47;
	default:	goto yy41;
	}
yy47:
	++YYCURSOR;
	switch ((yych = *YYCURSOR)) {
	case 'A':
	case 'B':
	case 'C':
	case 'D':
	case 'E':
	case 'F':
	case 'G':
	case 'H':
	case 'I':
	case 'J':
	case 'K':
	case 'L':
	case 'M':
	case 'N':
	case 'O':
	case 'P':
	case 'Q':
	case 'R':
	case 'S':
	case 'T':
	case 'U':
	case 'V':
	case 'W':
	case 'X':
	case 'Y':
	case 'Z':
	case 'a':
	case 'b':
	case 'c':
	case 'd':
	case 'e':
	case 'f':
	case 'g':
	case 'h':
	case 'i':
	case 'j':
	case 'k':
	case 'l':
	case 'm':
	case 'n':
	case 'o':
	case 'p':
	case 'q':
	case 'r':
	case 's':
	case 't':
	case 'u':
	case 'v':
	case 'w':
	case 'x':
	case 'y':
	case 'z':	goto yy40;
	default:	goto yy48;
	}
yy48:
#line 27 "LexerX.re"
	{ return create_token(TOKEN_DEF); }
#line 471 "<stdout>"
}
#line 63 "LexerX.re"

	}
}
Ejemplo n.º 27
0
t_point parse_pipe(Hash *h, int level)
{
	char chars[MAX_NAME_LENGTH];
	char *c = chars;
	int whitespaces = 1; // cetli jsme whitespace?
	int first = 1;
	int is_def = 0;
	int is_close_tag = 0;
	Cons all = {NIL, NIL};
	Cons *l = &all;
	t_point s = NIL;


	while ((c - chars) < MAX_NAME_LENGTH && (*c = read_char()) != EOF) {
		if (is_whitespace(*c) || (is_close_tag = (*c == CLOSE_TAG))) {
			if (whitespaces && !is_close_tag) continue;

			*c = '\0';
			whitespaces = 1;

			if (first && ((is_def = (strcmp(chars, "def") == 0))
						|| (strcmp(chars, "lambda") == 0)))
			{
				if (is_close_tag) ERROR(SYNTAX_ERROR);
				if (is_def) {
					// is def
					if (!read_word(chars, 0)) ERROR(SYNTAX_ERROR);
					init_def(h, chars, level);
				} else {
					// is lambda
					return kontext_params(init_def(h, NULL, level), level);
				}

				break;
			}

			// c is not empy
			if (c != chars) {
				l->b = create_token(h, chars, level);

				if (first) {
					l->a = l->b;
					l->b = NIL;
				} else {
					l->b = pnew_List(l->b);
					l = next(l);
				}
			}

			if (is_close_tag) break;

			c = chars;
			first = 0;
			continue;
		}

		// nacitame dal
		if (!whitespaces) { c++; continue; }

		switch (*c) {
			case '\'': s = parse_char(); break;
			case '"':  s = parse_string(); break;
			case OPEN_TAG: s = parse_pipe(h, level); break;

			default: c++; whitespaces = 0; continue;
		}

		if (s != NIL) {
			if (first) {
				l->a = s;
				l->b = NIL;
				first = 0;
			} else {
				l->b = pnew_List(s);
				l = next(l);
			}

			s = NIL;
		}
	}

	if ((c - chars) >= MAX_NAME_LENGTH)
		ERROR(SYNTAX_ERROR);
	
	if (all.b == NIL && !is_Func(all.a))
		return all.a;
	else
		return pnew_Thunk(all.a, get_Cons(all.b));
}
Ejemplo n.º 28
0
static token
lexer_next_token_private (void)
{
  ecma_char_t c = LA (0);

  JERRY_ASSERT (token_start == NULL);

  if (isalpha (c) || c == '$' || c == '_')
  {
    return parse_name ();
  }

  if (isdigit (c) || (c == '.' && isdigit (LA (1))))
  {
    return parse_number ();
  }

  if (c == '\n')
  {
    consume_char ();
    return create_token (TOK_NEWLINE, 0);
  }

  if (c == '\0')
  {
    return create_token (TOK_EOF, 0);
  }

  if (c == '\'' || c == '"')
  {
    return parse_string ();
  }

  if (isspace (c))
  {
    grobble_whitespaces ();
    return lexer_next_token_private ();
  }

  if (c == '/' && LA (1) == '*')
  {
    if (replace_comment_by_newline ())
    {
      token ret;

      ret.type = TOK_NEWLINE;
      ret.uid = 0;

      return ret;
    }
    else
    {
      return lexer_next_token_private ();
    }
  }


  if (c == '/')
  {
    if (LA (1) == '/')
    {
      replace_comment_by_newline ();
      return lexer_next_token_private ();
    }
    else if (!(sent_token.type == TOK_NAME
             || sent_token.type == TOK_NULL
             || sent_token.type == TOK_BOOL
             || sent_token.type == TOK_CLOSE_BRACE
             || sent_token.type == TOK_CLOSE_SQUARE
             || sent_token.type == TOK_CLOSE_PAREN
             || sent_token.type == TOK_SMALL_INT
             || sent_token.type == TOK_NUMBER
             || sent_token.type == TOK_STRING
             || sent_token.type == TOK_REGEXP))
    {
      return parse_regexp ();
    }
  }

  switch (c)
  {
    case '{': RETURN_PUNC (TOK_OPEN_BRACE); break;
    case '}': RETURN_PUNC (TOK_CLOSE_BRACE); break;
    case '(': RETURN_PUNC (TOK_OPEN_PAREN); break;
    case ')': RETURN_PUNC (TOK_CLOSE_PAREN); break;
    case '[': RETURN_PUNC (TOK_OPEN_SQUARE); break;
    case ']': RETURN_PUNC (TOK_CLOSE_SQUARE); break;
    case '.': RETURN_PUNC (TOK_DOT); break;
    case ';': RETURN_PUNC (TOK_SEMICOLON); break;
    case ',': RETURN_PUNC (TOK_COMMA); break;
    case '~': RETURN_PUNC (TOK_COMPL); break;
    case ':': RETURN_PUNC (TOK_COLON); break;
    case '?': RETURN_PUNC (TOK_QUERY); break;

    case '*': IF_LA_IS ('=', TOK_MULT_EQ, TOK_MULT); break;
    case '/': IF_LA_IS ('=', TOK_DIV_EQ, TOK_DIV); break;
    case '^': IF_LA_IS ('=', TOK_XOR_EQ, TOK_XOR); break;
    case '%': IF_LA_IS ('=', TOK_MOD_EQ, TOK_MOD); break;

    case '+': IF_LA_IS_OR ('+', TOK_DOUBLE_PLUS, '=', TOK_PLUS_EQ, TOK_PLUS); break;
    case '-': IF_LA_IS_OR ('-', TOK_DOUBLE_MINUS, '=', TOK_MINUS_EQ, TOK_MINUS); break;
    case '&': IF_LA_IS_OR ('&', TOK_DOUBLE_AND, '=', TOK_AND_EQ, TOK_AND); break;
    case '|': IF_LA_IS_OR ('|', TOK_DOUBLE_OR, '=', TOK_OR_EQ, TOK_OR); break;

    case '<':
    {
      switch (LA (1))
      {
        case '<': IF_LA_N_IS ('=', TOK_LSHIFT_EQ, TOK_LSHIFT, 2); break;
        case '=': RETURN_PUNC_EX (TOK_LESS_EQ, 2); break;
        default: RETURN_PUNC (TOK_LESS);
      }
      break;
    }
    case '>':
    {
      switch (LA (1))
      {
        case '>':
        {
          switch (LA (2))
          {
            case '>': IF_LA_N_IS ('=', TOK_RSHIFT_EX_EQ, TOK_RSHIFT_EX, 3); break;
            case '=': RETURN_PUNC_EX (TOK_RSHIFT_EQ, 3); break;
            default: RETURN_PUNC_EX (TOK_RSHIFT, 2);
          }
          break;
        }
        case '=': RETURN_PUNC_EX (TOK_GREATER_EQ, 2); break;
        default: RETURN_PUNC (TOK_GREATER);
      }
      break;
    }
    case '=':
    {
      if (LA (1) == '=')
      {
        IF_LA_N_IS ('=', TOK_TRIPLE_EQ, TOK_DOUBLE_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_EQ);
      }
      break;
    }
    case '!':
    {
      if (LA (1) == '=')
      {
        IF_LA_N_IS ('=', TOK_NOT_DOUBLE_EQ, TOK_NOT_EQ, 2);
      }
      else
      {
        RETURN_PUNC (TOK_NOT);
      }
      break;
    }
    default: PARSE_SORRY ("Unknown character", buffer - buffer_start);
  }
  PARSE_SORRY ("Unknown character", buffer - buffer_start);
}
Ejemplo n.º 29
0
/**
 * Try to decode specified string as keyword
 *
 * @return if specified string represents a keyword, return corresponding keyword token,
 *         else if it is 'null' - return TOK_NULL token,
 *         else if it is 'true' or 'false' - return TOK_BOOL with corresponding boolean value,
 *         else - return empty_token.
 */
static token
decode_keyword (const lit_utf8_byte_t *str_p, /**< characters buffer */
                lit_utf8_size_t str_size) /**< string's length */
{
  typedef struct
  {
    const char *keyword_p;
    keyword keyword_id;
  } kw_descr_t;

  const kw_descr_t keywords[] =
  {
#define KW_DESCR(literal, keyword_id) { literal, keyword_id }
    KW_DESCR ("break", KW_BREAK),
    KW_DESCR ("case", KW_CASE),
    KW_DESCR ("catch", KW_CATCH),
    KW_DESCR ("class", KW_CLASS),
    KW_DESCR ("const", KW_CONST),
    KW_DESCR ("continue", KW_CONTINUE),
    KW_DESCR ("debugger", KW_DEBUGGER),
    KW_DESCR ("default", KW_DEFAULT),
    KW_DESCR ("delete", KW_DELETE),
    KW_DESCR ("do", KW_DO),
    KW_DESCR ("else", KW_ELSE),
    KW_DESCR ("enum", KW_ENUM),
    KW_DESCR ("export", KW_EXPORT),
    KW_DESCR ("extends", KW_EXTENDS),
    KW_DESCR ("finally", KW_FINALLY),
    KW_DESCR ("for", KW_FOR),
    KW_DESCR ("function", KW_FUNCTION),
    KW_DESCR ("if", KW_IF),
    KW_DESCR ("in", KW_IN),
    KW_DESCR ("instanceof", KW_INSTANCEOF),
    KW_DESCR ("interface", KW_INTERFACE),
    KW_DESCR ("import", KW_IMPORT),
    KW_DESCR ("implements", KW_IMPLEMENTS),
    KW_DESCR ("let", KW_LET),
    KW_DESCR ("new", KW_NEW),
    KW_DESCR ("package", KW_PACKAGE),
    KW_DESCR ("private", KW_PRIVATE),
    KW_DESCR ("protected", KW_PROTECTED),
    KW_DESCR ("public", KW_PUBLIC),
    KW_DESCR ("return", KW_RETURN),
    KW_DESCR ("static", KW_STATIC),
    KW_DESCR ("super", KW_SUPER),
    KW_DESCR ("switch", KW_SWITCH),
    KW_DESCR ("this", KW_THIS),
    KW_DESCR ("throw", KW_THROW),
    KW_DESCR ("try", KW_TRY),
    KW_DESCR ("typeof", KW_TYPEOF),
    KW_DESCR ("var", KW_VAR),
    KW_DESCR ("void", KW_VOID),
    KW_DESCR ("while", KW_WHILE),
    KW_DESCR ("with", KW_WITH),
    KW_DESCR ("yield", KW_YIELD)
#undef KW_DESCR
  };

  keyword kw = KW_NONE;

  for (uint32_t i = 0; i < sizeof (keywords) / sizeof (kw_descr_t); i++)
  {
    if (lit_compare_utf8_strings (str_p,
                                  str_size,
                                  (lit_utf8_byte_t *) keywords[i].keyword_p,
                                  (lit_utf8_size_t) strlen (keywords[i].keyword_p)))
    {
      kw = keywords[i].keyword_id;
      break;
    }
  }

  if (!strict_mode)
  {
    switch (kw)
    {
      case KW_INTERFACE:
      case KW_IMPLEMENTS:
      case KW_LET:
      case KW_PACKAGE:
      case KW_PRIVATE:
      case KW_PROTECTED:
      case KW_PUBLIC:
      case KW_STATIC:
      case KW_YIELD:
      {
        return convert_string_to_token (TOK_NAME, str_p, (ecma_length_t) str_size);
      }

      default:
      {
        break;
      }
    }
  }

  if (kw != KW_NONE)
  {
    return create_token (TOK_KEYWORD, kw);
  }
  else
  {
    if (lit_compare_utf8_string_and_magic_string (str_p, str_size, LIT_MAGIC_STRING_FALSE))
    {
      return create_token (TOK_BOOL, false);
    }
    else if (lit_compare_utf8_string_and_magic_string (str_p, str_size, LIT_MAGIC_STRING_TRUE))
    {
      return create_token (TOK_BOOL, true);
    }
    else if (lit_compare_utf8_string_and_magic_string (str_p, str_size, LIT_MAGIC_STRING_NULL))
    {
      return create_token (TOK_NULL, 0);
    }
    else
    {
      return empty_token;
    }
  }
} /* decode_keyword */
Ejemplo n.º 30
0
struct token_t *get_token(FILE *file) {
    static char c = 0;
    static char next = ' ';
    static unsigned line = 0, column = -1;
    struct token_t *token = NULL;
    int i = 0;

    while (isspace(next)) {
        if (next == '\n') {
            ++line;
            column = 0;
        } else {
            ++column;
        }
        next = fgetc(file);
    }
    c = next;
    next = fgetc(file);
    
    if (isalpha(c)) { /* ident */
        token = create_token(IDENT);
        set_line_column(token, line, column);
        token->ident[0] = c;
        ++column;
        while (isalnum(next)) {
            c = next;
            next = fgetc(file);
            ++column;
            token->ident[++i] = c;
        }
    } else if (isdigit(c)) { /* number */
        char buffer[BUFFER_SIZE];
        token = create_token(NUMBER);
        set_line_column(token, line, column);
        buffer[0] = c;
        ++column;
        while (isdigit(next)) {
            c = next;
            next = fgetc(file);
            ++column;
            buffer[++i] = c;
        }
        token->number = atoi(buffer);
    } else if (c == '"') { /* text */
        token = create_token(TEXT);
        ++column;
        set_line_column(token, line, column);
        do {
            c = next;
            next = fgetc(file);
            ++column;
            token->ident[i++] = c;
            if (feof(file)) {
                free_token(token);
                token = create_token(ERROR);
                set_line_column(token, line, column);
                token->error.type = BAD_EOF;
                return token;
            } else if (next == '\n') {
                free_token(token);
                token = create_token(ERROR);
                set_line_column(token, line, column);
                token->error.type = BAD_EOL;
                return token;
            }
        } while (next != '"');
        next = fgetc(file);
        ++column;
    } else if (c == '#') { /* comment */
        token = create_token(COMMENT);
        set_line_column(token, line, column);
        do {
            c = next;
            next = fgetc(file);
            token->ident[i++] = c;
            ++column;
        } while (next != '\n' && !feof(file));
    } else if (c == '[') {
        token = create_token(LEFTBR);
        set_line_column(token, line, column++);
    } else if (c == ']') {
        token = create_token(RIGHTBR);
        set_line_column(token, line, column++);
    } else if (c == '=') {
        token = create_token(EQ);
        set_line_column(token, line, column++);
    } else if (feof(file)) {
        token = create_token(EOF_T);
        set_line_column(token, line, column++);
    } else {
        token = create_token(ERROR);
        set_line_column(token, line, column++);
        token->error.type = BAD_CHAR;
        token->error.c = c;
    }

    return token;
}