/* Evaluate the conditional that is at *token_index and return whether a skip has ocurred. *token_index is updated with the new position. */ int evaluate_conditional(struct gui_wps *gwps, int offset, struct conditional *conditional, int num_options) { if (!gwps) return false; char result[128]; const char *value; int intval = num_options < 2 ? 2 : num_options; /* get_token_value needs to know the number of options in the enum */ value = get_token_value(gwps, conditional->token, offset, result, sizeof(result), &intval); /* intval is now the number of the enum option we want to read, starting from 1. If intval is -1, we check if value is empty. */ if (intval == -1) { if (num_options == 1) /* so %?AA<true> */ intval = (value && *value) ? 1 : 0; /* returned as 0 for true, -1 for false */ else intval = (value && *value) ? 1 : num_options; } else if (intval > num_options || intval < 1) intval = num_options; return intval -1; }
char *get_on_off(char *line, bool *value) { int tvalue; char *rest; rest = get_token_value(line, &tvalue, on_off, true, REQUIRES_NO_MORE, "Invalid on/off"); *value = (bool) tvalue; return rest; }
tok get_token() { tok ret; ret.pos = pos; ret.posx = pos - cur_line_start_pos; ret.posy = pos_y; ret.line_occured_start = cur_line_start_pos; ret.token = get_token_value(); ret.text = lextext; return ret; }
char *get_open_opts(char *line, long int *fpos, int *flags, int *mode, int *lock_mode) { char *c; int flag2 = -1; /* Set default mode */ *mode = S_IRUSR | S_IWUSR; /* Get fpos */ c = get_fpos(line, fpos, REQUIRES_MORE); if (c == NULL) return c; c = get_token_value(c, flags, read_write_flags, false, REQUIRES_MORE, "Invalid open flags"); if (c == NULL) return c; *flags |= O_SYNC; /* Check optional open flags */ while (flag2 != 0) { c = get_token_value(c, &flag2, open_flags, true, REQUIRES_MORE, "Invalid optional open flag"); if (c == NULL) return c; *flags |= flag2; } /* Check optional lock mode, default to POSIX */ *lock_mode = LOCK_MODE_POSIX; c = get_token_value(c, lock_mode, lock_modes, true, REQUIRES_MORE, "Invalid optional lock mode"); if (c == NULL) return c; return SkipWhite(c, REQUIRES_MORE, "get_open_opts"); }
static bool do_non_text_tags(struct gui_wps *gwps, struct skin_draw_info *info, struct skin_element *element, struct viewport* vp) { #ifndef HAVE_LCD_BITMAP (void)vp; /* silence warnings */ (void)info; #endif struct wps_token *token = (struct wps_token *)element->data; #ifdef HAVE_LCD_BITMAP struct wps_data *data = gwps->data; bool do_refresh = (element->tag->flags & info->refresh_type) > 0; #endif switch (token->type) { #if (LCD_DEPTH > 1) || (defined(HAVE_REMOTE_LCD) && (LCD_REMOTE_DEPTH > 1)) case SKIN_TOKEN_VIEWPORT_FGCOLOUR: { struct viewport_colour *col = token->value.data; col->vp->fg_pattern = col->colour; } break; case SKIN_TOKEN_VIEWPORT_BGCOLOUR: { struct viewport_colour *col = token->value.data; col->vp->bg_pattern = col->colour; } break; #endif case SKIN_TOKEN_VIEWPORT_ENABLE: { char *label = token->value.data; char temp = VP_DRAW_HIDEABLE; struct skin_element *viewport = gwps->data->tree; while (viewport) { struct skin_viewport *skinvp = (struct skin_viewport*)viewport->data; if (skinvp->label && !skinvp->is_infovp && !strcmp(skinvp->label, label)) { if (skinvp->hidden_flags&VP_DRAW_HIDDEN) { temp |= VP_DRAW_WASHIDDEN; } skinvp->hidden_flags = temp; } viewport = viewport->next; } } break; #ifdef HAVE_LCD_BITMAP case SKIN_TOKEN_UIVIEWPORT_ENABLE: sb_set_info_vp(gwps->display->screen_type, token->value.data); break; case SKIN_TOKEN_PEAKMETER: data->peak_meter_enabled = true; if (do_refresh) draw_peakmeters(gwps, info->line_number, vp); break; #endif #ifdef HAVE_LCD_BITMAP case SKIN_TOKEN_PEAKMETER_LEFTBAR: case SKIN_TOKEN_PEAKMETER_RIGHTBAR: data->peak_meter_enabled = true; /* fall through to the progressbar code */ #endif case SKIN_TOKEN_VOLUMEBAR: case SKIN_TOKEN_BATTERY_PERCENTBAR: #ifdef HAVE_LCD_BITMAP case SKIN_TOKEN_PROGRESSBAR: case SKIN_TOKEN_TUNER_RSSI_BAR: { struct progressbar *bar = (struct progressbar*)token->value.data; if (do_refresh) draw_progressbar(gwps, info->line_number, bar); } #endif break; #ifdef HAVE_LCD_BITMAP case SKIN_TOKEN_IMAGE_DISPLAY_LISTICON: case SKIN_TOKEN_IMAGE_PRELOAD_DISPLAY: { struct image_display *id = token->value.data; const char* label = id->label; struct gui_img *img = skin_find_item(label,SKIN_FIND_IMAGE, data); if (img && img->loaded) { if (id->token == NULL) { img->display = id->subimage; } else { char buf[16]; const char *out; int a = img->num_subimages; out = get_token_value(gwps, id->token, info->offset, buf, sizeof(buf), &a); /* NOTE: get_token_value() returns values starting at 1! */ if (a == -1) a = (out && *out) ? 1 : 2; if (token->type == SKIN_TOKEN_IMAGE_DISPLAY_LISTICON) a -= 2; /* 2 is added in statusbar-skinned.c! */ else a--; a += id->offset; /* Clear the image, as in conditionals */ clear_image_pos(gwps, img); /* If the token returned a value which is higher than * the amount of subimages, don't draw it. */ if (a >= 0 && a < img->num_subimages) { img->display = a; } } } break; } #ifdef HAVE_ALBUMART case SKIN_TOKEN_ALBUMART_DISPLAY: /* now draw the AA */ if (do_refresh && data->albumart) { int handle = playback_current_aa_hid(data->playback_aa_slot); #if CONFIG_TUNER if (in_radio_screen() || (get_radio_status() != FMRADIO_OFF)) { struct dim dim = {data->albumart->width, data->albumart->height}; handle = radio_get_art_hid(&dim); } #endif data->albumart->draw_handle = handle; } break; #endif case SKIN_TOKEN_DRAW_INBUILTBAR: gui_statusbar_draw(&(statusbars.statusbars[gwps->display->screen_type]), info->refresh_type == SKIN_REFRESH_ALL, token->value.data); break; case SKIN_TOKEN_VIEWPORT_CUSTOMLIST: if (do_refresh) skin_render_playlistviewer(token->value.data, gwps, info->skin_vp, info->refresh_type); break; #endif /* HAVE_LCD_BITMAP */ #ifdef HAVE_SKIN_VARIABLES case SKIN_TOKEN_VAR_SET: if (do_refresh) { struct skin_var_changer *data = token->value.data; if (data->direct) data->var->value = data->newval; else { data->var->value += data->newval; if (data->max) { if (data->var->value > data->max) data->var->value = 1; else if (data->var->value < 1) data->var->value = data->max; } } if (data->var->value < 1) data->var->value = 1; data->var->last_changed = current_tick; } break; #endif default: return false; } return true; }
/* Draw a LINE element onto the display */ static bool skin_render_line(struct skin_element* line, struct skin_draw_info *info) { bool needs_update = false; int last_value, value; if (line->children_count == 0) return false; /* empty line, do nothing */ struct skin_element *child = line->children[0]; struct conditional *conditional; skin_render_func func = skin_render_line; int old_refresh_mode = info->refresh_type; while (child) { switch (child->type) { case CONDITIONAL: conditional = (struct conditional*)child->data; last_value = conditional->last_value; value = evaluate_conditional(info->gwps, info->offset, conditional, child->children_count); conditional->last_value = value; if (child->children_count == 1) { /* special handling so * %?aa<true> and %?<true|false> need special handlng here */ if (value == -1) /* tag is false */ { /* we are in a false branch of a %?aa<true> conditional */ if (last_value == 0) do_tags_in_hidden_conditional(child->children[0], info); break; } } else { if (last_value >= 0 && value != last_value && last_value < child->children_count) do_tags_in_hidden_conditional(child->children[last_value], info); } if (child->children[value]->type == LINE_ALTERNATOR) { func = skin_render_alternator; } else if (child->children[value]->type == LINE) func = skin_render_line; if (value != last_value) { info->refresh_type = SKIN_REFRESH_ALL; info->force_redraw = true; } if (func(child->children[value], info)) needs_update = true; else needs_update = needs_update || (last_value != value); info->refresh_type = old_refresh_mode; break; case TAG: if (child->tag->flags & NOBREAK) info->no_line_break = true; if (child->tag->type == SKIN_TOKEN_SUBLINE_SCROLL) info->line_scrolls = true; fix_line_alignment(info, child); if (!child->data) { break; } if (!do_non_text_tags(info->gwps, info, child, &info->skin_vp->vp)) { static char tempbuf[128]; const char *value = get_token_value(info->gwps, child->data, info->offset, tempbuf, sizeof(tempbuf), NULL); if (value) { #if CONFIG_RTC if (child->tag->flags&SKIN_RTC_REFRESH) needs_update = needs_update || info->refresh_type&SKIN_REFRESH_DYNAMIC; #endif needs_update = needs_update || ((child->tag->flags&info->refresh_type)!=0); strlcat(info->cur_align_start, value, info->buf_size - (info->cur_align_start-info->buf)); } } break; case TEXT: strlcat(info->cur_align_start, child->data, info->buf_size - (info->cur_align_start-info->buf)); needs_update = needs_update || (info->refresh_type&SKIN_REFRESH_STATIC) != 0; break; case COMMENT: default: break; } child = child->next; } return needs_update; }
ItemContent::ItemContent(string json_contents, size_t item_pos, string item_id) { ItemId = item_id; ItemPos = item_pos; vector<string> Genres; vector<string> Directors; vector<string> Awards; vector<string> Actors; vector<string> PlotTerms; vector<string> Title; vector<string> content_values = split(json_contents, "\",\""); for (auto raw_token: content_values) { string token(raw_token); token = str2lower(token); token = remove_chars(token, "\""); string token_value = get_token_value(token); token = remove_chars(token, ","); if (token_value != NONE_STR) { if (!starts_with(token, "year") && !starts_with(token, "imdbrating")) token_value = remove_chars(token_value, INVALID_CHARS); if (starts_with(token, "title")) { Title = split(token_value, " "); } else if (starts_with(token, "genre")) Genres = split(token_value, ","); /*else if (starts_with(token, "director")) { token_value = remove_chars(token_value," "); Directors = split(token_value, ","); } else if (starts_with(token, "actors")) { token_value = remove_chars(token_value, " "); Actors = split(token_value, ","); }*/ else if (starts_with(token, "country")) MainTerms.push_back(token_value); else if (starts_with(token, "type")) { MainTerms.push_back(token_value); } else if (starts_with(token, "company")) MainTerms.push_back(token_value); else if (starts_with(token, "awards")) Awards = split(token_value, ","); else if (starts_with(token, "plot")) { token_value = remove_chars(token_value, ","); Plot = token_value; } // Extracting the decade 198X else if (starts_with(token, "year")) { Year = stoi(token_value); MainTerms.push_back(token_value.substr(0, token_value.size()-1) + "X"); } else if (starts_with(token, "imdbrating")) { imdbRating = stof(token_value); } } } append_vectors<string>(MainTerms, Title); for (size_t i = 0; i <= 5; i++) append_vectors<string>(MainTerms, Genres); append_vectors<string>(MainTerms, Awards); /*Make the vector too much sparse */ //append_vectors<string>(MainTerms, Actors); //append_vectors<string>(MainTerms, Directors); // Update main terms set to be used during the feature vector build for (string term: MainTerms) UniqueMainTerms.insert(term); // Make a reinforcement of the terms if they exists // in the plot too - The bool value stands for reinforcement only analyze_terms(PlotTerms, false); // Update main terms set to be used during the feature vector build append_vectors<string>(MainTerms, PlotTerms); for (string term: PlotTerms) UniqueMainTerms.insert(term); Title.clear(); Genres.clear(); Directors.clear(); Actors.clear(); PlotTerms.clear(); }
/* Client has queried next frame. * Param: * cc - Queried camera client descriptor. * qc - Qemu client for the emulated camera. * param - Query parameters. Parameters for this query are formatted as such: * video=<size> preview=<size> whiteb=<red>,<green>,<blue> expcomp=<comp> * where: * - 'video', and 'preview' both must be decimal values, defining size of * requested video, and preview frames respectively. Zero value for any * of these parameters means that this particular frame is not requested. * - whiteb contains float values required to calculate whilte balance. * - expcomp contains a float value required to calculate exposure * compensation. */ static void _camera_client_query_frame(CameraClient* cc, QemudClient* qc, const char* param) { int video_size = 0; int preview_size = 0; int repeat; ClientFrameBuffer fbs[2]; int fbs_num = 0; size_t payload_size; uint64_t tick; float r_scale = 1.0f, g_scale = 1.0f, b_scale = 1.0f, exp_comp = 1.0f; char tmp[256]; /* Sanity check. */ if (cc->video_frame == NULL) { /* Not started. */ E("%s: Camera '%s' is not started", __FUNCTION__, cc->device_name); _qemu_client_reply_ko(qc, "Camera is not started"); return; } /* Pull required parameters. */ if (get_token_value_int(param, "video", &video_size) || get_token_value_int(param, "preview", &preview_size)) { E("%s: Invalid or missing 'video', or 'preview' parameter in '%s'", __FUNCTION__, param); _qemu_client_reply_ko(qc, "Invalid or missing 'video', or 'preview' parameter"); return; } /* Pull white balance values. */ if (!get_token_value(param, "whiteb", tmp, sizeof(tmp))) { if (sscanf(tmp, "%g,%g,%g", &r_scale, &g_scale, &b_scale) != 3) { D("Invalid value '%s' for parameter 'whiteb'", tmp); r_scale = g_scale = b_scale = 1.0f; } } /* Pull exposure compensation. */ if (!get_token_value(param, "expcomp", tmp, sizeof(tmp))) { if (sscanf(tmp, "%g", &exp_comp) != 1) { D("Invalid value '%s' for parameter 'whiteb'", tmp); exp_comp = 1.0f; } } /* Verify that framebuffer sizes match the ones that the started camera * operates with. */ if ((video_size != 0 && cc->video_frame_size != video_size) || (preview_size != 0 && cc->preview_frame_size != preview_size)) { E("%s: Frame sizes don't match for camera '%s':\n" "Expected %d for video, and %d for preview. Requested %d, and %d", __FUNCTION__, cc->device_name, cc->video_frame_size, cc->preview_frame_size, video_size, preview_size); _qemu_client_reply_ko(qc, "Frame size mismatch"); return; } /* * Initialize framebuffer array for frame read. */ if (video_size) { fbs[fbs_num].pixel_format = cc->pixel_format; fbs[fbs_num].framebuffer = cc->video_frame; fbs_num++; } if (preview_size) { /* TODO: Watch out for preview format changes! */ fbs[fbs_num].pixel_format = V4L2_PIX_FMT_RGB32; fbs[fbs_num].framebuffer = cc->preview_frame; fbs_num++; } /* Capture new frame. */ tick = _get_timestamp(); repeat = camera_device_read_frame(cc->camera, fbs, fbs_num, r_scale, g_scale, b_scale, exp_comp); /* Note that there is no (known) way how to wait on next frame being * available, so we could dequeue frame buffer from the device only when we * know it's available. Instead we're shooting in the dark, and quite often * device will response with EAGAIN, indicating that it doesn't have frame * ready. In turn, it means that the last frame we have obtained from the * device is still good, and we can reply with the cached frames. The only * case when we need to keep trying to obtain a new frame is when frame cache * is empty. To prevent ourselves from an indefinite loop in case device got * stuck on something (observed with some Microsoft devices) we will limit * the loop by 2 second time period (which is more than enough to obtain * something from the device) */ while (repeat == 1 && !cc->frames_cached && (_get_timestamp() - tick) < 2000000LL) { /* Sleep for 10 millisec before repeating the attempt. */ _camera_sleep(10); repeat = camera_device_read_frame(cc->camera, fbs, fbs_num, r_scale, g_scale, b_scale, exp_comp); } if (repeat == 1 && !cc->frames_cached) { /* Waited too long for the first frame. */ E("%s: Unable to obtain first video frame from the camera '%s' in %d milliseconds: %s.", __FUNCTION__, cc->device_name, (uint32_t)(_get_timestamp() - tick) / 1000, strerror(errno)); _qemu_client_reply_ko(qc, "Unable to obtain video frame from the camera"); return; } else if (repeat < 0) { /* An I/O error. */ E("%s: Unable to obtain video frame from the camera '%s': %s.", __FUNCTION__, cc->device_name, strerror(errno)); _qemu_client_reply_ko(qc, strerror(errno)); return; } /* We have cached something... */ cc->frames_cached = 1; /* * Build the reply. */ /* Payload includes "ok:" + requested video and preview frames. */ payload_size = 3 + video_size + preview_size; /* Send payload size first. */ _qemu_client_reply_payload(qc, payload_size); /* After that send the 'ok:'. Note that if there is no frames sent, we should * use prefix "ok" instead of "ok:" */ if (video_size || preview_size) { qemud_client_send(qc, (const uint8_t*)"ok:", 3); } else { /* Still 3 bytes: zero terminator is required in this case. */ qemud_client_send(qc, (const uint8_t*)"ok", 3); } /* After that send video frame (if requested). */ if (video_size) { qemud_client_send(qc, cc->video_frame, video_size); } /* After that send preview frame (if requested). */ if (preview_size) { qemud_client_send(qc, (const uint8_t*)cc->preview_frame, preview_size); } }
/* Client has queried the client to start capturing video. * Param: * cc - Queried camera client descriptor. * qc - Qemu client for the emulated camera. * param - Query parameters. Parameters for this query must contain a 'dim', and * a 'pix' parameters, where 'dim' must be "dim=<width>x<height>", and 'pix' * must be "pix=<format>", where 'width' and 'height' must be numerical * values for the capturing video frame width, and height, and 'format' must * be a numerical value for the pixel format of the video frames expected by * the client. 'format' must be one of the V4L2_PIX_FMT_XXX values. */ static void _camera_client_query_start(CameraClient* cc, QemudClient* qc, const char* param) { char* w; char dim[64]; int width, height, pix_format; /* Sanity check. */ if (cc->camera == NULL) { /* Not connected. */ E("%s: Camera '%s' is not connected", __FUNCTION__, cc->device_name); _qemu_client_reply_ko(qc, "Camera is not connected"); return; } /* * Parse parameters. */ if (param == NULL) { E("%s: Missing parameters for the query", __FUNCTION__); _qemu_client_reply_ko(qc, "Missing parameters for the query"); return; } /* Pull required 'dim' parameter. */ if (get_token_value(param, "dim", dim, sizeof(dim))) { E("%s: Invalid or missing 'dim' parameter in '%s'", __FUNCTION__, param); _qemu_client_reply_ko(qc, "Invalid or missing 'dim' parameter"); return; } /* Pull required 'pix' parameter. */ if (get_token_value_int(param, "pix", &pix_format)) { E("%s: Invalid or missing 'pix' parameter in '%s'", __FUNCTION__, param); _qemu_client_reply_ko(qc, "Invalid or missing 'pix' parameter"); return; } /* Parse 'dim' parameter, and get requested frame width and height. */ w = strchr(dim, 'x'); if (w == NULL || w[1] == '\0') { E("%s: Invalid 'dim' parameter in '%s'", __FUNCTION__, param); _qemu_client_reply_ko(qc, "Invalid 'dim' parameter"); return; } *w = '\0'; w++; errno = 0; width = strtoi(dim, NULL, 10); height = strtoi(w, NULL, 10); if (errno) { E("%s: Invalid 'dim' parameter in '%s'", __FUNCTION__, param); _qemu_client_reply_ko(qc, "Invalid 'dim' parameter"); return; } /* After collecting capture parameters lets see if camera has already * started, and if so, lets see if parameters match. */ if (cc->video_frame != NULL) { /* Already started. Match capture parameters. */ if (cc->pixel_format != pix_format ||cc->width != width || cc->height != height) { /* Parameters match. Succeed the query. */ W("%s: Camera '%s' is already started", __FUNCTION__, cc->device_name); _qemu_client_reply_ok(qc, "Camera is already started"); } else { /* Parameters don't match. Fail the query. */ E("%s: Camera '%s' is already started, and parameters don't match:\n" "Current %.4s[%dx%d] != requested %.4s[%dx%d]", __FUNCTION__, cc->device_name, (const char*)&cc->pixel_format, cc->width, cc->height, (const char*)&pix_format, width, height); _qemu_client_reply_ko(qc, "Camera is already started with different capturing parameters"); } return; } /* * Start the camera. */ /* Save capturing parameters. */ cc->pixel_format = pix_format; cc->width = width; cc->height = height; cc->pixel_num = cc->width * cc->height; cc->frames_cached = 0; /* Make sure that pixel format is known, and calculate video framebuffer size * along the lines. */ switch (cc->pixel_format) { case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: cc->video_frame_size = (cc->pixel_num * 12) / 8; break; default: E("%s: Unknown pixel format %.4s", __FUNCTION__, (char*)&cc->pixel_format); _qemu_client_reply_ko(qc, "Pixel format is unknown"); return; } /* Make sure that we have a converters between the original camera pixel * format and the one that the client expects. Also a converter must exist * for the preview window pixel format (RGB32) */ if (!has_converter(cc->camera_info->pixel_format, cc->pixel_format) || !has_converter(cc->camera_info->pixel_format, V4L2_PIX_FMT_RGB32)) { E("%s: No conversion exist between %.4s and %.4s (or RGB32) pixel formats", __FUNCTION__, (char*)&cc->camera_info->pixel_format, (char*)&cc->pixel_format); _qemu_client_reply_ko(qc, "No conversion exist for the requested pixel format"); return; } /* TODO: At the moment camera framework in the emulator requires RGB32 pixel * format for preview window. So, we need to keep two framebuffers here: one * for the video, and another for the preview window. Watch out when this * changes (if changes). */ cc->preview_frame_size = cc->pixel_num * 4; /* Allocate buffer large enough to contain both, video and preview * framebuffers. */ cc->video_frame = (uint8_t*)malloc(cc->video_frame_size + cc->preview_frame_size); if (cc->video_frame == NULL) { E("%s: Not enough memory for framebuffers %d + %d", __FUNCTION__, cc->video_frame_size, cc->preview_frame_size); _qemu_client_reply_ko(qc, "Out of memory"); return; } /* Set framebuffer pointers. */ cc->preview_frame = (uint16_t*)(cc->video_frame + cc->video_frame_size); /* Start the camera. */ if (camera_device_start_capturing(cc->camera, cc->camera_info->pixel_format, cc->width, cc->height)) { E("%s: Cannot start camera '%s' for %.4s[%dx%d]: %s", __FUNCTION__, cc->device_name, (const char*)&cc->pixel_format, cc->width, cc->height, strerror(errno)); free(cc->video_frame); cc->video_frame = NULL; _qemu_client_reply_ko(qc, "Cannot start the camera"); return; } D("%s: Camera '%s' is now started for %.4s[%dx%d]", __FUNCTION__, cc->device_name, (char*)&cc->pixel_format, cc->width, cc->height); _qemu_client_reply_ok(qc, NULL); }
char *get_lock_type(char *line, int *type) { return get_token_value(line, type, lock_types, false, REQUIRES_MORE, "Invalid lock type"); }
int get_token_value() { //printf("src[%d]:%c\n" , pos , src[pos] ); char * str = src; if (str[pos] == '\0') return LEXEOF; /* Returns end of file */ if(str[pos] == ' ' ||str[pos] == '\t') { while(str[pos] == ' ' ||str[pos] == '\t') pos++; /* Ignoring whitespace */ return get_token_value(); } if(!strncmp( &str[pos] , "//" , 2)) { pos += 2; while( str[pos] != '\n' && str[pos++] != '\0'); // Ignoring line comments return get_token_value(); } if(!strncmp( &str[pos] , "/*" , 2)) { pos += 2; while(pos++) /* Ignoring block comments */ { if(str[pos] == '*') if(str[++pos] == '/') return get_token_value(pos++); if(str[pos] == '\0') return LEXEOF; } } start_pos = pos; /* Keeping reference of the previous token */ if ( str[pos] == ';' ) return str[pos++]; /* Optional semi-colon (;) support */ else if ( str[pos] == '=' ) /* Checks the equal sign (=) */ { pos++; if(in_cond) return EQ; /* Returns equality compariosn if in condition */ return ASSIGN; /* Returns assingment operation */ } else if ( !strncmp(&str[pos] , "*/" , 2)) /* Used for block comments */ { pos += 2; return CMNTEND; } else if ( is_in(str[pos] , "!()+-*/<>,^[]{}\n") ) /* These characters are checked with their ASCII value */ { switch(str[pos]) { case '>': if(str[pos+1] == '=') { pos++; return GE; } break; /* Returns greater than or equal comparison */ case '<': if(str[pos+1] == '=') { pos++; return LE; } break; /* Returns less than or equal comparison */ case '!': if(str[pos+1] == '=') { pos++; return NE; } break; /* Returns not equal comparison */ } return str[pos++]; /* Otherwise return what was found */ } else if (is_digit(str[pos])) /* Checks if a number is found */ { while(is_digit(str[pos]))pos++; /* Skips until the first not number character appears */ if(str[pos] == '.') /* If it has a dot in it .. then it's a floating point number .. saved as double */ { pos++; while(is_digit(str[pos]))pos++; lextext = strndup( &str[start_pos] , pos-start_pos ); return DOUBLE_VAL; } lextext = strndup( &str[start_pos] , pos-start_pos ); return INT_VAL; } else if (is_valid_id_start(str[pos])) /* is either a keyword or an identifier */ { while(is_valid_id_name(str[pos]))pos++; /* Skipps characters , digits and underscores */ lextext = strndup( &str[start_pos] , pos-start_pos ); if(in_table) return ID; int len = strlen(lextext); /* Checking if it's a keyword */ if (!strcmp(lextext, "if" ) && len == 2) return IF; else if (!strcmp(lextext, "then" ) && len == 4) return THEN; else if (!strcmp(lextext, "end" ) && len == 3) return END; else if (!strcmp(lextext, "while" ) && len == 5) return WHILE; else if (!strcmp(lextext, "do" ) && len == 2) return DO; else if (!strcmp(lextext, "function" ) && len == 8) return FUNC; else if (!strcmp(lextext, "return" ) && len == 6) return RETURN; else if (!strcmp(lextext, "for" ) && len == 3) return FOR; return ID; /* Otherwise it's an identifier */ } else if ( str[pos] == '"' ) { pos++; while(str[pos] != '"') pos++; lextext = fstrndup( &str[start_pos+1] , pos - start_pos -1); pos++; return STRING_VAL; } /* else if ( str[pos] == '"' ) { pos++; while(str[pos] != '"')pos++; lextext = strndup( &str[start_pos] , pos - start_pos); pos++; return STRING_VAL; } */ pos++; /* /* Unexpected symbol in the source code fprintf( stderr , "unexpected symbol in line %d:\n" , pos_y ); /* Printing from cur_line_start_pos until new line int s_c = pos - cur_line_start_pos - 1; while(str[cur_line_start_pos] != '\n') fprintf( stderr , "%c" , str[cur_line_start_pos++] ); fprintf(stderr, "\n"); int m; for(m=0; m < s_c ; m++) fprintf(stderr , " "); fprintf(stderr , "^\n"); exit(0); */ }