void RenderSVGForeignObject::updateLogicalWidth() { // FIXME: Investigate in size rounding issues // FIXME: Remove unnecessary rounding when layout is off ints: webkit.org/b/63656 setWidth(static_cast<int>(roundf(m_viewport.width()))); }
void GLTouchScreen::analyze(float x, float y) { //x = roundf(x * width / context->width); //y = roundf(y * height / context->height); if (clientRatio > viewRatio) { // Экран уже чем треубется x = roundf(x * clientWidth / viewWidth); y = roundf((((y / viewHeight) - 0.5f) * clientRatio / viewRatio + 0.5f) * clientHeight); } else { x = roundf((((x / viewWidth) - 0.5f) * viewRatio / clientRatio + 0.5f) * clientWidth); y = roundf(y * clientHeight / viewHeight); } recordTrack(x, y, true); if (touchTimeout != 0) { touchTimeout = 0; } if (touchCancel) { touchCancel = false; return; } int32_t listlength = touchList.count(); Vec3* list = touchList.items(); if (listlength > 0) { CString type; GLfloat x1 = list[0].x; GLfloat y1 = list[0].y; GLfloat x2 = list[listlength - 1].x; GLfloat y2 = list[listlength - 1].y; if (listlength == 1) { int32_t count = clickList.count(); if (count > 0) { GLTouchObject** items = clickList.items(); for (int i = count - 1; i >= 0; i--) { GLfloat ww = items[i]->w; GLfloat hh = items[i]->h; GLfloat xx = items[i]->x; GLfloat yy = items[i]->y; if ((x1 >= xx) && (x1 < (xx + ww)) && (y1 >= yy) && (y1 < (yy + hh)) ) { type = L"click "; type += items[i]->name; if (items[i]->onevent(type, NULL, NULL, NULL, items[i]->userData)) { break; } } } } if (type.m_length == 0) type = L"none"; } else { // Определение кругового движения по часовой стрелке или против GLfloat cx, cy; { // Вычисляем центр окружности описывающей точки GLfloat xmin, xmax, ymin, ymax; xmin = xmax = x1; ymin = ymax = y1; for (int i = 0; i < listlength; i++) { GLfloat xx = list[i].x; GLfloat yy = list[i].y; if (xx < xmin) xmin = xx; if (xx > xmax) xmax = xx; if (yy < ymin) ymin = yy; if (yy > ymax) ymax = yy; } cx = (xmin + xmax) / 2.0f; cy = (ymin + ymax) / 2.0f; // Вычисляем средний радиус и определяем число смещений по кругу GLfloat mr = 0; // Средний радиус GLfloat cw = 0; // Число смещений по часовой стрелке GLfloat bw = 0; // Число смещений против часовой стрелки GLfloat ca = 0; // Угол смещения по часовой стрелке GLfloat ba = 0; // Угол смещения против часовой стрелки GLfloat lx = x2 - cx; GLfloat ly = y2 - cy; GLfloat la = atan2f(ly, lx); // Угол последней точки for (int i = 0; i < listlength; i++) { // Координаты относительно центра GLfloat xx = list[i].x - cx; GLfloat yy = list[i].y - cy; // Растояние до точки GLfloat r = floorf(sqrtf(xx * xx + yy * yy)); // Направление движения по часовой стрелке или против GLfloat s = lx * yy - ly * xx; GLfloat na = atan2f(yy, xx); GLfloat a = (na - la) * 180.0f / (GLfloat)M_PI; while (a < -180.0f) a += 360.0f; while (a > 180.0f) a -= 360.0f; if (i != 0) { if (s > 0) { cw++; ca += a; } else if (s < 0) { bw++; ba -= a; } } // Кешируем вычисления list[i].z = r; mr += r; la = na; lx = xx; ly = yy; } mr = floorf(mr / (GLfloat)listlength); // Вычисляем процентное соотношение смещений и направление GLfloat md = 0; if ((cw != 0) || (bw != 0)) { if (cw > bw) { md = floorf((cw - bw) * 100.0f / cw); } else { md = -floorf((bw - cw) * 100.0f / bw); } } // Угол смещения GLfloat a = fabsf(ba - ca); // Проверяем ровность круга (допустимое искажение радиуса 50% на каждую точку) if ((mr > minimalDistance) && (fabsf(md) > 90.0f)) { bool circle = true; GLfloat drm = 0; for (int i = 0; i < listlength; i++) { GLfloat dr = floorf(fabsf((list[i].z / mr) - 1.0f) * 100.0f); if (dr > drm) drm = dr; if (dr > 50.0f) { circle = false; break; } } if (circle) { int ac = (int)roundf(a / 90.0f); if (ac > 2) { type = L"circle"; } else { type = L"arc"; } if (md > 0) type += L" right"; else type += L" left"; if (ac > 5) { type += L" "; type += (ac * 90); } } } } // Определение свайпов и их направления if (type.m_length == 0) { // Вычисляем расстояние GLfloat dx = x2 - x1; GLfloat dy = y2 - y1; GLfloat d = floorf(sqrtf(dx * dx + dy * dy)); // Отбрасываем случайные или короткие свайпы uint64_t time = currentTimeMillis(); time -= touchStart; if ((d > minimalDistance) && ((listlength < 15) || (d >= swipeDistance))) { // Проверяем ровность линии (допустимое искажение 25% от длины прямой) bool swipe = true; GLfloat c = x1 * y2 - x2 * y1; for (int i = listlength - 2; i > 0; i--) { // Расстояние до точки от отрезка (+ знак стороны) GLfloat p = (list[i].y * dx - list[i].x * dy + c) / d; GLfloat dp = floorf(fabsf(p) * 100.0f / d); if (dp > 25) { swipe = false; break; } } if (swipe) { type = L"swipe"; GLfloat ax = fabsf(dx); GLfloat ay = fabsf(dy); GLfloat ad = 0; if (ax > ay) { if (d > swipeDistance) type += L" long"; ad = floorf((ax - ay) * 100.0f / ax); if (ad > 50) { if (dx > 0) type += L" right"; else type += L" left"; } else if (dx > 0) { if (dy < 0) type += L" right top"; else type += L" right bottom"; } else { if (dy < 0) type += L" left top"; else type += L" left bottom"; } } else { if (d > swipeDistance) type += L" long"; ad = floorf((ay - ax) * 100.0f / ay); if (ad > 50) { if (dy < 0) type += L" top"; else type += L" bottom"; } else if (dy < 0) { if (dx > 0) type += L" right top"; else type += L" left top"; } else { if (dx > 0) type += L" right bottom"; else type += L" left bottom"; } } } } } } if (type.m_length != 0) { onTouch(type); } } }
static void gl_raster_font_render_message( gl_raster_t *font, const char *msg, GLfloat scale, const GLfloat color[4], GLfloat pos_x, GLfloat pos_y, unsigned text_align) { int x, y, delta_x, delta_y; float inv_tex_size_x, inv_tex_size_y, inv_win_width, inv_win_height; unsigned i, msg_len_full, msg_len; GLfloat font_tex_coords[2 * 6 * MAX_MSG_LEN_CHUNK]; GLfloat font_vertex[2 * 6 * MAX_MSG_LEN_CHUNK]; GLfloat font_color[4 * 6 * MAX_MSG_LEN_CHUNK]; struct gl_coords coords; gl_t *gl = font ? font->gl : NULL; if (!gl) return; msg_len_full = strlen(msg); msg_len = min(msg_len_full, MAX_MSG_LEN_CHUNK); x = roundf(pos_x * gl->vp.width); y = roundf(pos_y * gl->vp.height); delta_x = 0; delta_y = 0; switch (text_align) { case TEXT_ALIGN_RIGHT: x -= get_message_width(font, msg); break; case TEXT_ALIGN_CENTER: x -= get_message_width(font, msg) / 2.0; break; } inv_tex_size_x = 1.0f / font->tex_width; inv_tex_size_y = 1.0f / font->tex_height; inv_win_width = 1.0f / font->gl->vp.width; inv_win_height = 1.0f / font->gl->vp.height; while (msg_len_full) { for (i = 0; i < msg_len; i++) { int off_x, off_y, tex_x, tex_y, width, height; const struct font_glyph *glyph = font->font_driver->get_glyph(font->font_data, (uint8_t)msg[i]); if (!glyph) /* Do something smarter here ... */ glyph = font->font_driver->get_glyph(font->font_data, '?'); if (!glyph) continue; off_x = glyph->draw_offset_x; off_y = glyph->draw_offset_y; tex_x = glyph->atlas_offset_x; tex_y = glyph->atlas_offset_y; width = glyph->width; height = glyph->height; gl_raster_font_emit(0, 0, 1); /* Bottom-left */ gl_raster_font_emit(1, 1, 1); /* Bottom-right */ gl_raster_font_emit(2, 0, 0); /* Top-left */ gl_raster_font_emit(3, 1, 0); /* Top-right */ gl_raster_font_emit(4, 0, 0); /* Top-left */ gl_raster_font_emit(5, 1, 1); /* Bottom-right */ delta_x += glyph->advance_x; delta_y -= glyph->advance_y; } coords.tex_coord = font_tex_coords; coords.vertex = font_vertex; coords.color = font_color; coords.vertices = 6 * msg_len; coords.lut_tex_coord = gl->coords.lut_tex_coord; if (font->block) gl_coord_array_add(&font->block->carr, &coords, coords.vertices); else gl_raster_font_draw_vertices(gl, &coords); msg_len_full -= msg_len; msg += msg_len; msg_len = min(msg_len_full, MAX_MSG_LEN_CHUNK); } }
/* * Initialize the destination byte buffer with image data scaled to the * width and height specified from the source byte buffer. */ static void fillThreeCompBuffer( FilterParam params ) { int srcWidth, srcHeight, srcComponents, srcPixels; int dstWidth, dstHeight; int row_cmp_size; row_cmp_ptr row; int sx, sy, dx, dy; int sxrem, syrem, dxrem, dyrem; int amtx, amty; int srcRowByteOffset, srcColByteOffset; int dstByteOffset; int dstRowByteIndex; int dstRowByteLength; int i; float r, g, b; float mult; jbyte *row_data; jbyte *srcBuffer; jbyte *dstBuffer; int r_int, g_int, b_int; param_ptr source = (param_ptr)params; srcWidth = source->pub.srcWidth; srcHeight = source->pub.srcHeight; srcComponents = source->pub.srcComponents; srcPixels = srcWidth * srcHeight; srcBuffer = source->pub.src_pixel_data; dstWidth = source->pub.dstWidth; dstHeight = source->pub.dstHeight; dstBuffer = source->pub.dst_pixel_data; row = (row_cmp_ptr)malloc( sizeof( row_cmp ) ); row_cmp_size = dstWidth * sizeof( float ); row->red = (float*)malloc( row_cmp_size ); row->green = (float*)malloc( row_cmp_size ); row->blue = (float*)malloc( row_cmp_size ); dstRowByteLength = dstWidth*srcComponents; row_data = (jbyte*)malloc( dstRowByteLength ); /////////////////////////////////////////////////////////////////// sy = 0; syrem = dstHeight; dy = 0; dyrem = 0; srcRowByteOffset = 0; dstByteOffset = 0; while ( sy < srcHeight ) { amty; if ( dyrem == 0 ) { for ( i = 0; i < dstWidth; i++ ) { row->red[i] = row->green[i] = row->blue[i] = 0; } dyrem = srcHeight; } if ( syrem < dyrem ) { amty = syrem; } else { amty = dyrem; } sx = 0; dx = 0; sxrem = 0; dxrem = srcWidth; r = 0, g = 0, b = 0; while ( sx < srcWidth ) { if ( sxrem == 0 ) { sxrem = dstWidth; srcColByteOffset = sx * srcComponents; r = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset++ ]; g = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset++ ]; b = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset ]; } int amtx; if ( sxrem < dxrem ) { amtx = sxrem; } else { amtx = dxrem; } mult = ((float)amtx) * amty; row->red[dx] += mult * r; row->green[dx] += mult * g; row->blue[dx] += mult * b; if ( ( sxrem -= amtx ) == 0 ) { sx++; } if ( ( dxrem -= amtx ) == 0 ) { dx++; dxrem = srcWidth; } } if ( ( dyrem -= amty ) == 0 ) { /////////////////////////////////////////////////////////////////// dstRowByteIndex = 0; for ( i = 0; i < dstWidth; i++ ) { mult = (float)srcPixels; r_int = (int)roundf( row->red[i] / mult ); g_int = (int)roundf( row->green[i] / mult ); b_int = (int)roundf( row->blue[i] / mult ); if ( r_int < 0 ) { r_int = 0; } else if ( r_int > 255 ) { r_int = 255; } if ( g_int < 0 ) { g_int = 0; } else if ( g_int > 255 ) { g_int = 255; } if ( b_int < 0 ) { b_int = 0; } else if ( b_int > 255 ) { b_int = 255; } row_data[dstRowByteIndex++] = (jbyte)r_int; row_data[dstRowByteIndex++] = (jbyte)g_int; row_data[dstRowByteIndex++] = (jbyte)b_int; } /////////////////////////////////////////////////////////////////// do { for ( i = 0; i < dstRowByteLength; i++ ) { dstBuffer[ dstByteOffset++ ] = row_data[i]; } dy++; } while ( ( ( syrem -= amty ) >= amty ) && ( amty == srcHeight ) ); } else { syrem -= amty; } if ( syrem == 0 ) { syrem = dstHeight; sy++; srcRowByteOffset += srcWidth * srcComponents; } } /////////////////////////////////////////////////////////////////// free( row_data ); free( row->red ); free( row->green ); free( row->blue ); free( row ); }
//--------------------------------------------------------------------------- // // factors the total number of verices in alternating x, y, z, t directions // // given: constraints on the blocking entered as an array where // 0 implies no constraint in that direction and some value n > 0 is a given // number of blocks in a given direction // void Blocking::FactorDims(int *given) { int rem = tot_b; // unfactored remaining portion of tot_b int block_dim[DIY_MAX_DIM]; // current block size int max; // longest remaining direction (0, 1, 2) int i, j; // init for (i = 0; i < dim; i++) { if (given[i] == 0) { lat_size[i] = 1; block_dim[i] = data_size[i]; } else { lat_size[i] = given[i]; if (rem % given[i]) #ifdef MAC fprintf(stderr,"Unable to block the volume with given[%d] = %d " "dimension. Please provide different 'given' constraints and " "rerun.\n", i, given[i]); #else fprintf(stderr,"Unable to block the volume with given[%d] = %d " "dimension. Please provide different 'given' constraints and " "rerun.\n", i, given[i]); #endif assert(rem % given[i] == 0); rem /= given[i]; } } // compute factorization of data dimensions into lattice dimensions while (1) { // find longest division direction max = 0; for(i = 1; i < dim; i++) { if (given[i] == 0 && block_dim[i] > block_dim[max]) max = i; } // smallest factor remaining gets assigned to this direction for (j = 2; j <= rem; j++) { if (rem % j == 0) { lat_size[max] *= j; block_dim[max] /= j; rem /= j; break; } } if (rem == 1) break; if (j > rem) fprintf(stderr,"Unable to block the volume into %d blocks. " "Please select a different number of blocks and rerun.\n", tot_b); assert(j <= rem); } // sanity check int prod_blocks = 1; for (i = 0; i < dim; i++) prod_blocks *= lat_size[i]; assert(prod_blocks == tot_b); // block sizes for(i = 0; i < dim; i++) block_size[i] = (int)(roundf((float)data_size[i] / (float)lat_size[i])); // debug // fprintf(stderr, "block sizes = [ "); // for (i = 0; i < dim; i++) // fprintf(stderr, "%d ", block_size[i]); // fprintf(stderr, "]\n"); }
gboolean export_swf_create_layer_elements(swf_frame_element *array_start, guint num_frames, layer *this_layer_data, guint layer_depth) { // Local variables gfloat click_duration; guint click_frames; gfloat element_x_position_finish = 0; gfloat element_x_position_increment = 0; gfloat element_x_position_start = 0; gfloat element_y_position_finish = 0; gfloat element_y_position_increment = 0; gfloat element_y_position_start = 0; gfloat fade_frame; gfloat finish_frame; gint finish_frame_rounded; guint frame_counter; // Holds the number of frames GString *layer_name; // The text name for the layer layer_mouse *mouse_data; // Points to the mouse object data inside the layer guint loop_counter = 0; // Simple counter used in loops gint num_displayed_frames; guint opacity_count; // Used when calculating object opacity gfloat opacity_step; // Used when calculating object opacity guint play_click = MOUSE_NONE; // Should a click sound be played? gfloat scaled_height_ratio; // Used to calculate the final size an object should be scaled to gfloat scaled_width_ratio; // Used to calculate the final size an object should be scaled to gfloat start_frame; guint start_frame_rounded; gfloat x_position; // Used in calculating layer object position gfloat y_position; // Used in calculating layer object position // Initialisation layer_name = g_string_new(NULL); g_string_printf(layer_name, "%s%d", "Object", layer_depth); // Set some basic properties for the layer, across all of its frames for (frame_counter = 0; frame_counter < num_frames; frame_counter++) { array_start[frame_counter].action_this = FALSE; array_start[frame_counter].object_name = layer_name; array_start[frame_counter].depth = layer_depth; array_start[frame_counter].layer_info = this_layer_data; array_start[frame_counter].is_moving = FALSE; } // Calculate the height and width scaling values needed for this swf output scaled_height_ratio = (gfloat) get_output_height() / (gfloat) get_project_height(); scaled_width_ratio = (gfloat) get_output_width() / (gfloat) get_project_width(); // Calculate the scaled start and finish positions for each element element_x_position_start = scaled_width_ratio * this_layer_data->x_offset_start; element_x_position_finish = scaled_width_ratio * this_layer_data->x_offset_finish; element_y_position_start = scaled_height_ratio * this_layer_data->y_offset_start; element_y_position_finish = scaled_height_ratio * this_layer_data->y_offset_finish; // If there is a fade in transition, fill in the relevant elements start_frame = this_layer_data->start_time * get_frames_per_second(); if (TRANS_LAYER_NONE != this_layer_data->transition_in_type) { // Work out the starting and ending frames for the fade finish_frame = start_frame + (this_layer_data->transition_in_duration * get_frames_per_second()); // Indicate on which frame the element should be displayed, at what display depth, and its starting co-ordinates start_frame_rounded = roundf(start_frame); finish_frame_rounded = roundf(finish_frame) == 0 ? 0 : ((roundf(finish_frame)>=num_frames) ? num_frames-1 : roundf(finish_frame)); array_start[start_frame_rounded].add = TRUE; array_start[start_frame_rounded].x_position = element_x_position_start; array_start[start_frame_rounded].y_position = element_y_position_start; // Work out how much opacity to increment each frame by opacity_step = 100 / ((this_layer_data->transition_in_duration * get_frames_per_second())); // Loop through each frame of the fade in, setting the opacity values opacity_count = 0; for (frame_counter = start_frame_rounded; frame_counter <= finish_frame_rounded; frame_counter++) { array_start[frame_counter].action_this = TRUE; array_start[frame_counter].opacity_change = TRUE; array_start[frame_counter].opacity = opacity_count; array_start[frame_counter].x_position = element_x_position_start; array_start[frame_counter].y_position = element_y_position_start; opacity_count += floorf(opacity_step); } // Ensure the layer is completely visible after the end of the fade in array_start[frame_counter].action_this = TRUE; array_start[frame_counter].opacity_change = TRUE; array_start[frame_counter].opacity = 100; } else { // Indicate on which frame the element should be displayed, at what display depth, and its starting co-ordinates start_frame_rounded = roundf(start_frame); array_start[start_frame_rounded].add = TRUE; array_start[start_frame_rounded].x_position = element_x_position_start; array_start[start_frame_rounded].y_position = element_y_position_start; array_start[start_frame_rounded].action_this = TRUE; array_start[start_frame_rounded].opacity = 100; } // If there is a fade out transition, fill in the relevant elements if (TRANS_LAYER_NONE != this_layer_data->transition_out_type) { // Work out the starting and ending frames for the fade fade_frame = start_frame; if (TRANS_LAYER_NONE != this_layer_data->transition_in_type) fade_frame += this_layer_data->transition_in_duration * get_frames_per_second(); fade_frame += this_layer_data->duration * get_frames_per_second(); finish_frame = fade_frame + (this_layer_data->transition_out_duration * get_frames_per_second()); start_frame_rounded = roundf(fade_frame); finish_frame_rounded = roundf(finish_frame) == 0 ? 0 : ((roundf(finish_frame)>=num_frames) ? num_frames-1 : roundf(finish_frame)); // Work out how much opacity to decrement each frame by opacity_step = 100 / ((this_layer_data->transition_out_duration * get_frames_per_second())); // Loop through each frame of the fade out, setting the opacity values opacity_count = 100; for (frame_counter = start_frame_rounded; frame_counter <= finish_frame_rounded; frame_counter++) { array_start[frame_counter].action_this = TRUE; array_start[frame_counter].opacity_change = TRUE; array_start[frame_counter].opacity = opacity_count; array_start[frame_counter].x_position = element_x_position_finish; array_start[frame_counter].y_position = element_y_position_finish; opacity_count -= floorf(opacity_step); } // Ensure the layer is completely invisible after the end of the fade out array_start[finish_frame_rounded].action_this = TRUE; array_start[finish_frame_rounded].opacity_change = TRUE; array_start[finish_frame_rounded].opacity = 0; array_start[finish_frame_rounded].x_position = element_x_position_start; array_start[finish_frame_rounded].y_position = element_y_position_start; } // Work out the start frame of the fully visible layer display if (TRANS_LAYER_NONE != this_layer_data->transition_in_type) start_frame += this_layer_data->transition_in_duration * get_frames_per_second(); // Work out the finish frame of the fully visible layer display finish_frame = start_frame + (this_layer_data->duration * get_frames_per_second()); // If this is a mouse cursor with a click, we squeeze the movement part into a smaller number of frames // to give time for the click sound to play while the mouse is stationery and before it fades if (TYPE_MOUSE_CURSOR == this_layer_data->object_type) { // Simplify the pointer to the mouse click data mouse_data = (layer_mouse *) this_layer_data->object_data; // Determine how many frames to compress the mouse movement by, and also add the click sound switch (mouse_data->click) { case MOUSE_LEFT_ONE: case MOUSE_RIGHT_ONE: case MOUSE_MIDDLE_ONE: click_duration = 0.5; click_frames = roundf(click_duration * get_frames_per_second()) + 1; play_click = mouse_data->click; break; case MOUSE_LEFT_DOUBLE: case MOUSE_RIGHT_DOUBLE: case MOUSE_MIDDLE_DOUBLE: click_duration = 0.5; click_frames = roundf(click_duration * get_frames_per_second()) + 1; play_click = mouse_data->click; break; case MOUSE_LEFT_TRIPLE: case MOUSE_RIGHT_TRIPLE: case MOUSE_MIDDLE_TRIPLE: click_duration = 0.5; click_frames = roundf(click_duration * get_frames_per_second()) + 1; play_click = mouse_data->click; break; case MOUSE_NONE: default: click_frames = 0; break; } // Compress the mouse movement by the determined amount finish_frame = CLAMP(finish_frame - click_frames, start_frame + 1, get_valid_fields_max_value(FRAME_NUMBER)); } start_frame_rounded = roundf(start_frame); finish_frame_rounded = roundf(finish_frame) == 0 ? 0 : ((roundf(finish_frame)>=num_frames) ? num_frames-1 : roundf(finish_frame)); num_displayed_frames = finish_frame_rounded - start_frame_rounded; // Skip layers with 0 full visibility duration if (0 < num_displayed_frames) { x_position = element_x_position_start; y_position = element_y_position_start; // If the layer moves, work out the movement related values if ((element_x_position_start != element_x_position_finish) || (element_y_position_start != element_y_position_finish)) { // Work out how much to increment the frame movement by in each direction element_x_position_increment = (element_x_position_finish - element_x_position_start) / (num_displayed_frames); element_y_position_increment = (element_y_position_finish - element_y_position_start) / (num_displayed_frames); } // Loop through each frame of the fully visible layer, filling in the relevant elements loop_counter = 1; for (frame_counter = start_frame_rounded; frame_counter <= finish_frame_rounded; frame_counter++) { // Store the opacity, x and y positions for this layer for this frame array_start[frame_counter].opacity = 100; array_start[frame_counter].x_position = x_position; array_start[frame_counter].y_position = y_position; // If the layer moves, fill in the relevant elements if ((element_x_position_start != element_x_position_finish) || (element_y_position_start != element_y_position_finish)) { // Mark this element as needing action taken array_start[frame_counter].action_this = TRUE; array_start[frame_counter].is_moving = TRUE; // Update the element position with each loop x_position = element_x_position_start + (element_x_position_increment * loop_counter); y_position = element_y_position_start + (element_y_position_increment * loop_counter); loop_counter++; // We use a separate loop counter now in order to minimise the accumulated rounding effect } } // If a click sound should be played, we mark this if (MOUSE_NONE != play_click) { // Mark the appropriate frame as needing the mouse click sound to be played array_start[frame_counter].action_this = TRUE; array_start[frame_counter].click_sound_to_play = play_click; } } // Determine on which frame the element should be removed from display if (TRANS_LAYER_NONE != this_layer_data->transition_out_type) finish_frame += (this_layer_data->transition_out_duration * get_frames_per_second()); finish_frame_rounded = roundf(finish_frame) == 0 ? 0 : ((roundf(finish_frame)>=num_frames) ? num_frames-1 : roundf(finish_frame)); array_start[finish_frame_rounded].action_this = TRUE; array_start[finish_frame_rounded].remove = TRUE; return TRUE; }
bool UniscribeController::shapeAndPlaceItem(const UChar* cp, unsigned i, const SimpleFontData* fontData, GlyphBuffer* glyphBuffer) { // Determine the string for this item. const UChar* str = cp + m_items[i].iCharPos; int len = m_items[i+1].iCharPos - m_items[i].iCharPos; SCRIPT_ITEM item = m_items[i]; // Set up buffers to hold the results of shaping the item. Vector<WORD> glyphs; Vector<WORD> clusters; Vector<SCRIPT_VISATTR> visualAttributes; clusters.resize(len); // Shape the item. // The recommended size for the glyph buffer is 1.5 * the character length + 16 in the uniscribe docs. // Apparently this is a good size to avoid having to make repeated calls to ScriptShape. glyphs.resize(1.5 * len + 16); visualAttributes.resize(glyphs.size()); if (!shape(str, len, item, fontData, glyphs, clusters, visualAttributes)) return true; // We now have a collection of glyphs. Vector<GOFFSET> offsets; Vector<int> advances; offsets.resize(glyphs.size()); advances.resize(glyphs.size()); int glyphCount = 0; HRESULT placeResult = ScriptPlace(0, fontData->scriptCache(), glyphs.data(), glyphs.size(), visualAttributes.data(), &item.a, advances.data(), offsets.data(), 0); if (placeResult == E_PENDING) { // The script cache isn't primed with enough info yet. We need to select our HFONT into // a DC and pass the DC in to ScriptPlace. HDC hdc = GetDC(0); HFONT hfont = fontData->platformData().hfont(); HFONT oldFont = (HFONT)SelectObject(hdc, hfont); placeResult = ScriptPlace(hdc, fontData->scriptCache(), glyphs.data(), glyphs.size(), visualAttributes.data(), &item.a, advances.data(), offsets.data(), 0); SelectObject(hdc, oldFont); ReleaseDC(0, hdc); } if (FAILED(placeResult) || glyphs.isEmpty()) return true; // Convert all chars that should be treated as spaces to use the space glyph. // We also create a map that allows us to quickly go from space glyphs or rounding // hack glyphs back to their corresponding characters. Vector<int> spaceCharacters(glyphs.size()); spaceCharacters.fill(-1); Vector<int> roundingHackCharacters(glyphs.size()); roundingHackCharacters.fill(-1); Vector<int> roundingHackWordBoundaries(glyphs.size()); roundingHackWordBoundaries.fill(-1); const float cLogicalScale = fontData->platformData().useGDI() ? 1.0f : 32.0f; unsigned logicalSpaceWidth = fontData->spaceWidth() * cLogicalScale; float roundedSpaceWidth = roundf(fontData->spaceWidth()); for (int k = 0; k < len; k++) { UChar ch = *(str + k); if (Font::treatAsSpace(ch)) { // Substitute in the space glyph at the appropriate place in the glyphs // array. glyphs[clusters[k]] = fontData->spaceGlyph(); advances[clusters[k]] = logicalSpaceWidth; spaceCharacters[clusters[k]] = m_currentCharacter + k + item.iCharPos; } if (Font::isRoundingHackCharacter(ch)) roundingHackCharacters[clusters[k]] = m_currentCharacter + k + item.iCharPos; int boundary = k + m_currentCharacter + item.iCharPos; if (boundary < m_run.length() && Font::isRoundingHackCharacter(*(str + k + 1))) roundingHackWordBoundaries[clusters[k]] = boundary; } // Populate our glyph buffer with this information. bool hasExtraSpacing = m_font.letterSpacing() || m_font.wordSpacing() || m_padding; float leftEdge = m_runWidthSoFar; for (unsigned k = 0; k < glyphs.size(); k++) { Glyph glyph = glyphs[k]; float advance = advances[k] / cLogicalScale; float offsetX = offsets[k].du / cLogicalScale; float offsetY = offsets[k].dv / cLogicalScale; // Match AppKit's rules for the integer vs. non-integer rendering modes. float roundedAdvance = roundf(advance); if (!m_font.isPrinterFont() && !fontData->isSystemFont()) { advance = roundedAdvance; offsetX = roundf(offsetX); offsetY = roundf(offsetY); } advance += fontData->syntheticBoldOffset(); // We special case spaces in two ways when applying word rounding. // First, we round spaces to an adjusted width in all fonts. // Second, in fixed-pitch fonts we ensure that all glyphs that // match the width of the space glyph have the same width as the space glyph. if (roundedAdvance == roundedSpaceWidth && (fontData->pitch() == FixedPitch || glyph == fontData->spaceGlyph()) && m_run.applyWordRounding()) advance = fontData->adjustedSpaceWidth(); if (hasExtraSpacing) { // If we're a glyph with an advance, go ahead and add in letter-spacing. // That way we weed out zero width lurkers. This behavior matches the fast text code path. if (advance && m_font.letterSpacing()) advance += m_font.letterSpacing(); // Handle justification and word-spacing. if (glyph == fontData->spaceGlyph()) { // Account for padding. WebCore uses space padding to justify text. // We distribute the specified padding over the available spaces in the run. if (m_padding) { // Use leftover padding if not evenly divisible by number of spaces. if (m_padding < m_padPerSpace) { advance += m_padding; m_padding = 0; } else { advance += m_padPerSpace; m_padding -= m_padPerSpace; } } // Account for word-spacing. int characterIndex = spaceCharacters[k]; if (characterIndex > 0 && !Font::treatAsSpace(*m_run.data(characterIndex - 1)) && m_font.wordSpacing()) advance += m_font.wordSpacing(); } } // Deal with the float/integer impedance mismatch between CG and WebCore. "Words" (characters // followed by a character defined by isRoundingHackCharacter()) are always an integer width. // We adjust the width of the last character of a "word" to ensure an integer width. // Force characters that are used to determine word boundaries for the rounding hack // to be integer width, so the following words will start on an integer boundary. int roundingHackIndex = roundingHackCharacters[k]; if (m_run.applyWordRounding() && roundingHackIndex != -1) advance = ceilf(advance); // Check to see if the next character is a "rounding hack character", if so, adjust the // width so that the total run width will be on an integer boundary. int position = m_currentCharacter + len; bool lastGlyph = (k == glyphs.size() - 1) && (m_run.rtl() ? i == 0 : i == m_items.size() - 2) && (position >= m_end); if ((m_run.applyWordRounding() && roundingHackWordBoundaries[k] != -1) || (m_run.applyRunRounding() && lastGlyph)) { float totalWidth = m_runWidthSoFar + advance; advance += ceilf(totalWidth) - totalWidth; } m_runWidthSoFar += advance; // FIXME: We need to take the GOFFSETS for combining glyphs and store them in the glyph buffer // as well, so that when the time comes to draw those glyphs, we can apply the appropriate // translation. if (glyphBuffer) { FloatSize size(offsetX, -offsetY); glyphBuffer->add(glyph, fontData, advance, &size); } // Mutate the glyph array to contain our altered advances. if (m_computingOffsetPosition) advances[k] = advance; } while (m_computingOffsetPosition && m_offsetX >= leftEdge && m_offsetX < m_runWidthSoFar) { // The position is somewhere inside this run. int trailing = 0; ScriptXtoCP(m_offsetX - leftEdge, clusters.size(), glyphs.size(), clusters.data(), visualAttributes.data(), advances.data(), &item.a, &m_offsetPosition, &trailing); if (trailing && m_includePartialGlyphs && m_offsetPosition < len - 1) { m_offsetPosition += m_currentCharacter + m_items[i].iCharPos; m_offsetX += m_run.rtl() ? -trailing : trailing; } else { m_computingOffsetPosition = false; m_offsetPosition += m_currentCharacter + m_items[i].iCharPos; if (trailing && m_includePartialGlyphs) m_offsetPosition++; return false; } } return true; }
bool acq_search(gnss_signal_t sid, float cf_min, float cf_max, float cf_bin_width, acq_result_t *acq_result) { /* Configuration */ u32 fft_len_log2 = FFT_LEN_LOG2_MAX; u32 fft_len = 1 << fft_len_log2; float fft_bin_width = NAP_ACQ_SAMPLE_RATE_Hz / fft_len; float chips_per_sample = CHIP_RATE / NAP_ACQ_SAMPLE_RATE_Hz; /* Generate, resample, and FFT code */ static fft_cplx_t code_fft[FFT_LEN_MAX]; code_resample(sid, chips_per_sample, code_fft, fft_len); if (!fft(code_fft, code_fft, fft_len_log2, FFT_DIR_FORWARD, FFT_SCALE_SCHED_CODE)) { return false; } /* FFT samples */ u32 sample_count; static fft_cplx_t sample_fft[FFT_LEN_MAX]; if(!fft_samples(FFT_SAMPLES_INPUT, sample_fft, fft_len_log2, FFT_DIR_FORWARD, FFT_SCALE_SCHED_SAMPLES, &sample_count)) { return false; } /* Search for results */ float best_mag_sq = 0.0f; float best_mag_sq_sum = 0.0f; float best_doppler = 0.0f; u32 best_sample_offset = 0; /* Loop over Doppler bins */ s32 doppler_bin_min = (s32)floorf(cf_min / cf_bin_width); s32 doppler_bin_max = (s32)floorf(cf_max / cf_bin_width); for (s32 doppler_bin = doppler_bin_min; doppler_bin <= doppler_bin_max; doppler_bin++) { s32 sample_offset = (s32)roundf(doppler_bin * cf_bin_width / fft_bin_width); /* Actual computed Doppler */ float doppler = sample_offset * fft_bin_width; /* Multiply sample FFT by shifted conjugate code FFT */ static fft_cplx_t result_fft[FFT_LEN_MAX]; for (u32 i=0; i<fft_len; i++) { const fft_cplx_t *a = &code_fft[i]; const fft_cplx_t *b = &sample_fft[(i + sample_offset) & (fft_len - 1)]; fft_cplx_t *r = &result_fft[i]; s32 a_re = (s32)a->re; s32 a_im = (s32)a->im; s32 b_re = (s32)b->re; s32 b_im = (s32)b->im; r->re = ((a_re * b_re) + (a_im * b_im)) / RESULT_DIV; r->im = ((a_re * -b_im) + (a_im * b_re)) / RESULT_DIV; } /* Inverse FFT */ if (!fft(result_fft, result_fft, fft_len_log2, FFT_DIR_BACKWARD, FFT_SCALE_SCHED_INV)) { return false; } /* Peak search */ float mag_sq_sum = 0.0f; bool match = false; for (u32 i=0; i<fft_len; i++) { const fft_cplx_t *r = &result_fft[i]; float re = (float)r->re; float im = (float)r->im; float mag_sq = re*re + im*im; mag_sq_sum += mag_sq; if (mag_sq > best_mag_sq) { best_mag_sq = mag_sq; best_doppler = doppler; best_sample_offset = i; match = true; } } if (match) { best_mag_sq_sum = mag_sq_sum; } } /* Account for non-integer number of codes and circular convolution: * If correlation peak is in the first half of the buffer, most samples * have NOT wrapped, so assume a positive shift. * If correlation peak is in the second half of the buffer, most samples * HAVE wrapped, so assume a negative shift. */ s32 corrected_sample_offset = (best_sample_offset < fft_len/2) ? (s32)best_sample_offset : (s32)best_sample_offset - (s32)fft_len; /* Compute code phase */ float cp = chips_per_sample * corrected_sample_offset; /* Modulus code length */ cp -= CODE_LENGTH * floorf(cp / CODE_LENGTH); /* Compute C/N0 */ float snr = best_mag_sq / (best_mag_sq_sum / fft_len); float cn0 = 10.0f * log10f(snr) + 10.0f * log10f(fft_bin_width); /* Bandwidth */ /* Set output */ acq_result->sample_count = sample_count; acq_result->cp = cp; acq_result->cf = best_doppler; acq_result->cn0 = cn0; return true; }
ViewportAttributes computeViewportAttributes(ViewportArguments args, int desktopWidth, int deviceWidth, int deviceHeight, int deviceDPI, IntSize visibleViewport) { ViewportAttributes result; float availableWidth = visibleViewport.width(); float availableHeight = visibleViewport.height(); #if !OS(ANDROID) // FIXME: Enable this assert http://b/5633905. ASSERT(availableWidth > 0 && availableHeight > 0); #endif float autoDPI = deviceDPI; switch (args.type) { case ViewportArguments::Implicit: autoDPI = deviceDPI; break; case ViewportArguments::ViewportMeta: autoDPI = 160; break; } #if OS(ANDROID) bool isAutoDPI = args.targetDensityDpi == ViewportArguments::ValueAuto; #endif switch (int(args.targetDensityDpi)) { case ViewportArguments::ValueDeviceDPI: args.targetDensityDpi = deviceDPI; break; case ViewportArguments::ValueLowDPI: args.targetDensityDpi = 120; break; case ViewportArguments::ValueAuto: args.targetDensityDpi = autoDPI; break; case ViewportArguments::ValueMediumDPI: args.targetDensityDpi = 160; break; case ViewportArguments::ValueHighDPI: args.targetDensityDpi = 240; break; } result.devicePixelRatio = float(deviceDPI / args.targetDensityDpi); #if OS(ANDROID) // Snap the devicePixelRatio so that it converts the device width from // integer to integer. Since we're currently applying it with page scale, // effectively this will happen later in the pipeline anyway, so we should // do it in advance to make all calculations match that. int deviceWidthInLayoutPixels = static_cast<int>(deviceWidth / result.devicePixelRatio); result.devicePixelRatio = deviceWidth / static_cast<float>(deviceWidthInLayoutPixels); #endif // Resolve non-'auto' width and height to pixel values. if (result.devicePixelRatio != 1.0) { availableWidth /= result.devicePixelRatio; availableHeight /= result.devicePixelRatio; deviceWidth /= result.devicePixelRatio; deviceHeight /= result.devicePixelRatio; } #if OS(ANDROID) result.devicePixelRatioForDeviceDimensions = isAutoDPI ? (deviceDPI / 160.0f) : result.devicePixelRatio; #endif switch (int(args.width)) { case ViewportArguments::ValueDesktopWidth: args.width = desktopWidth; break; case ViewportArguments::ValueDeviceWidth: args.width = deviceWidth; break; case ViewportArguments::ValueDeviceHeight: args.width = deviceHeight; break; } switch (int(args.height)) { case ViewportArguments::ValueDesktopWidth: args.height = desktopWidth; break; case ViewportArguments::ValueDeviceWidth: args.height = deviceWidth; break; case ViewportArguments::ValueDeviceHeight: args.height = deviceHeight; break; } // Clamp values to range defined by spec and resolve minimum-scale and maximum-scale values if (args.width != ViewportArguments::ValueAuto) args.width = min(float(10000), max(args.width, float(1))); if (args.height != ViewportArguments::ValueAuto) args.height = min(float(10000), max(args.height, float(1))); if (args.initialScale != ViewportArguments::ValueAuto) args.initialScale = min(float(10), max(args.initialScale, float(0.1))); if (args.minimumScale != ViewportArguments::ValueAuto) args.minimumScale = min(float(10), max(args.minimumScale, float(0.1))); if (args.maximumScale != ViewportArguments::ValueAuto) args.maximumScale = min(float(10), max(args.maximumScale, float(0.1))); // Resolve minimum-scale and maximum-scale values according to spec. if (args.minimumScale == ViewportArguments::ValueAuto) result.minimumScale = float(0.25); else result.minimumScale = args.minimumScale; if (args.maximumScale == ViewportArguments::ValueAuto) { result.maximumScale = float(5.0); result.minimumScale = min(float(5.0), result.minimumScale); } else result.maximumScale = args.maximumScale; result.maximumScale = max(result.minimumScale, result.maximumScale); // Resolve initial-scale value. result.initialScale = args.initialScale; if (result.initialScale == ViewportArguments::ValueAuto) { result.initialScale = availableWidth / desktopWidth; if (args.width != ViewportArguments::ValueAuto) result.initialScale = availableWidth / args.width; if (args.height != ViewportArguments::ValueAuto) { // if 'auto', the initial-scale will be negative here and thus ignored. result.initialScale = max<float>(result.initialScale, availableHeight / args.height); } } // Constrain initial-scale value to minimum-scale/maximum-scale range. result.initialScale = min(result.maximumScale, max(result.minimumScale, result.initialScale)); // Resolve width value. float width; if (args.width != ViewportArguments::ValueAuto) width = args.width; else { if (args.initialScale == ViewportArguments::ValueAuto) width = desktopWidth; else if (args.height != ViewportArguments::ValueAuto) width = args.height * (availableWidth / availableHeight); else width = availableWidth / result.initialScale; } // Resolve height value. float height; if (args.height != ViewportArguments::ValueAuto) height = args.height; else height = width * availableHeight / availableWidth; // Extend width and height to fill the visual viewport for the resolved initial-scale. width = max<float>(width, availableWidth / result.initialScale); height = max<float>(height, availableHeight / result.initialScale); result.layoutSize.setWidth(static_cast<int>(roundf(width))); result.layoutSize.setHeight(static_cast<int>(roundf(height))); result.userScalable = args.userScalable; return result; }
static void wiiu_font_render_line( video_frame_info_t *video_info, wiiu_font_t* font, const char* msg, unsigned msg_len, float scale, const unsigned int color, float pos_x, float pos_y, unsigned text_align) { unsigned i; wiiu_video_t* wiiu = (wiiu_video_t*)video_info->userdata; unsigned width = video_info->width; unsigned height = video_info->height; int x = roundf(pos_x * width); int y = roundf((1.0 - pos_y) * height); if( !wiiu || wiiu->vertex_cache.current + (msg_len * 4) > wiiu->vertex_cache.size) return; switch (text_align) { case TEXT_ALIGN_RIGHT: x -= wiiu_font_get_message_width(font, msg, msg_len, scale); break; case TEXT_ALIGN_CENTER: x -= wiiu_font_get_message_width(font, msg, msg_len, scale) / 2; break; } sprite_vertex_t* v = wiiu->vertex_cache.v + wiiu->vertex_cache.current; for (i = 0; i < msg_len; i++) { const char* msg_tmp = &msg[i]; unsigned code = utf8_walk(&msg_tmp); unsigned skip = msg_tmp - &msg[i]; if (skip > 1) i += skip - 1; const struct font_glyph* glyph = font->font_driver->get_glyph(font->font_data, code); if (!glyph) /* Do something smarter here ... */ glyph = font->font_driver->get_glyph(font->font_data, '?'); if (!glyph) continue; v->pos.x = x + glyph->draw_offset_x * scale; v->pos.y = y + glyph->draw_offset_y * scale; v->pos.width = glyph->width * scale; v->pos.height = glyph->height * scale; v->coord.u = glyph->atlas_offset_x; v->coord.v = glyph->atlas_offset_y; v->coord.width = glyph->width; v->coord.height = glyph->height; v->color = color; v++; x += glyph->advance_x * scale; y += glyph->advance_y * scale; } int count = v - wiiu->vertex_cache.v - wiiu->vertex_cache.current; if (!count) return; GX2Invalidate(GX2_INVALIDATE_MODE_CPU_ATTRIBUTE_BUFFER, wiiu->vertex_cache.v + wiiu->vertex_cache.current, count * sizeof(wiiu->vertex_cache.v)); if(font->atlas->dirty) { for (i = 0; (i < font->atlas->height) && (i < font->texture.surface.height); i++) memcpy(font->texture.surface.image + (i * font->texture.surface.pitch), font->atlas->buffer + (i * font->atlas->width), font->atlas->width); GX2Invalidate(GX2_INVALIDATE_MODE_CPU_TEXTURE, font->texture.surface.image, font->texture.surface.imageSize); font->atlas->dirty = false; } GX2SetPixelTexture(&font->texture, sprite_shader.ps.samplerVars[0].location); GX2SetVertexUniformBlock(sprite_shader.vs.uniformBlocks[1].offset, sprite_shader.vs.uniformBlocks[1].size, font->ubo_tex); GX2DrawEx(GX2_PRIMITIVE_MODE_POINTS, count, wiiu->vertex_cache.current, 1); GX2SetVertexUniformBlock(sprite_shader.vs.uniformBlocks[1].offset, sprite_shader.vs.uniformBlocks[1].size, wiiu->ubo_tex); wiiu->vertex_cache.current = v - wiiu->vertex_cache.v; }
// frame detection void wlanframesync_execute_seekplcp(wlanframesync _q) { _q->timer++; // TODO : only check every 100 - 150 (decimates/reduced complexity) if (_q->timer < 64) return; // reset timer _q->timer = 0; // read contents of input buffer float complex * rc; windowcf_read(_q->input_buffer, &rc); // estimate gain // TODO : use gain from result of FFT unsigned int i; float g = 0.0f; for (i=16; i<80; i+=4) { // compute |rc[i]|^2 efficiently g += crealf(rc[i ])*crealf(rc[i ]) + cimagf(rc[i ])*cimagf(rc[i ]); g += crealf(rc[i+1])*crealf(rc[i+1]) + cimagf(rc[i+1])*cimagf(rc[i+1]); g += crealf(rc[i+2])*crealf(rc[i+2]) + cimagf(rc[i+2])*cimagf(rc[i+2]); g += crealf(rc[i+3])*crealf(rc[i+3]) + cimagf(rc[i+3])*cimagf(rc[i+3]); } g = 64.0f / (g + 1e-12f); // save gain (permits dynamic invocation of get_rssi() method) _q->g0 = g; // estimate S0 gain wlanframesync_estimate_gain_S0(_q, &rc[16], _q->G0a); // compute S0 metrics float complex s_hat; wlanframesync_S0_metrics(_q, _q->G0a, &s_hat); s_hat *= g; float tau_hat = cargf(s_hat) * (float)(16.0f) / (2*M_PI); #if DEBUG_WLANFRAMESYNC_PRINT printf(" - gain=%12.3f, rssi=%8.2f dB, s_hat=%12.4f <%12.8f>, tau_hat=%8.3f\n", sqrt(g), -10*log10(g), cabsf(s_hat), cargf(s_hat), tau_hat); #endif // if (cabsf(s_hat) > WLANFRAMESYNC_S0A_ABS_THRESH) { int dt = (int)roundf(tau_hat); // set timer appropriately... _q->timer = (16 + dt) % 16; //_q->timer += 32; // add delay to help ensure good S0 estimate (multiple of 16) _q->state = WLANFRAMESYNC_STATE_RXSHORT0; #if DEBUG_WLANFRAMESYNC_PRINT printf("********** frame detected! ************\n"); printf(" s_hat : %12.8f <%12.8f>\n", cabsf(s_hat), cargf(s_hat)); printf(" tau_hat : %12.8f\n", tau_hat); printf(" dt : %12d\n", dt); printf(" timer : %12u\n", _q->timer); #endif } }
int Entity::getRoundedY() const { return roundf( y ); }
int Entity::getRoundedX() const { return roundf( x ); }
//Returns steps from units (mm) for a particular drive long LookAhead::EndPointToMachine(int8_t drive, float coord) { return (long)roundf(coord*reprap.GetPlatform()->DriveStepsPerUnit(drive)); }
static void vita2d_gfx_update_viewport(vita_video_t* vita) { int x = 0; int y = 0; float device_aspect = ((float)PSP_FB_WIDTH) / PSP_FB_HEIGHT; float width = PSP_FB_WIDTH; float height = PSP_FB_HEIGHT; settings_t *settings = config_get_ptr(); if (settings->video.scale_integer) { video_viewport_get_scaled_integer(&vita->vp, PSP_FB_WIDTH, PSP_FB_HEIGHT, video_driver_get_aspect_ratio(), vita->keep_aspect); width = vita->vp.width; height = vita->vp.height; } else if (vita->keep_aspect) { float desired_aspect = video_driver_get_aspect_ratio(); if (vita->rotation == ORIENTATION_VERTICAL || vita->rotation == ORIENTATION_FLIPPED_ROTATED){ device_aspect = 1.0 / device_aspect; width = PSP_FB_HEIGHT; height = PSP_FB_WIDTH; } #if defined(HAVE_MENU) if (settings->video.aspect_ratio_idx == ASPECT_RATIO_CUSTOM) { struct video_viewport *custom = video_viewport_get_custom(); if (custom) { x = custom->x; y = custom->y; width = custom->width; height = custom->height; } } else #endif { float delta; if ((fabsf(device_aspect - desired_aspect) < 0.0001f)) { /* If the aspect ratios of screen and desired aspect * ratio are sufficiently equal (floating point stuff), * assume they are actually equal. */ } else if (device_aspect > desired_aspect) { delta = (desired_aspect / device_aspect - 1.0f) / 2.0f + 0.5f; x = (int)roundf(width * (0.5f - delta)); width = (unsigned)roundf(2.0f * width * delta); } else { delta = (device_aspect / desired_aspect - 1.0f) / 2.0f + 0.5f; y = (int)roundf(height * (0.5f - delta)); height = (unsigned)roundf(2.0f * height * delta); } if (vita->rotation == ORIENTATION_VERTICAL || vita->rotation == ORIENTATION_FLIPPED_ROTATED){ x = (PSP_FB_WIDTH - width) * 0.5f; y = (PSP_FB_HEIGHT - height) * 0.5f; } } vita->vp.x = x; vita->vp.y = y; vita->vp.width = width; vita->vp.height = height; } else { vita->vp.x = vita->vp.y = 0; vita->vp.width = width; vita->vp.height = height; } vita->vp.width += vita->vp.width&0x1; vita->vp.height += vita->vp.height&0x1; vita->should_resize = false; }
static PassRefPtrWillBeRawPtr<SVGInteger> toPositiveInteger(const InterpolableValue* number) { return SVGInteger::create(clampTo<int>(roundf(toInterpolableNumber(number)->value()), 1)); }
Fixed::Fixed(const float f) : _raw_bits(roundf(f * (1 << _n_frac_bits))) { std::cout << "Float constructor called" << std::endl; }
int main () { float S[8][16], val; int i,k, count = 0; int vali; printf("/* sbc_coeffs.h - Automatically generated by cosdata.c. */\n\n"); vali = SI_MULTI; printf("#define SIMULTI\t%d\n\n", vali); printf("static const int32_t sbc_coeffs8[] = {\n "); for (k = 0;k < AC(sbc8_coeffs);k++) { if (count % 8 == 0 && count != 0) printf("\n "); val = sbc8_coeffs[k] * COEFFS_MULTI; vali = roundf(val); printf("%d, ",vali); count++; } printf("\n};\n"); count = 0; printf("static const int32_t sbc_coeffs4[] = {\n "); for (k = 0;k < AC(sbc4_coeffs);k++) { if (count % 8 == 0 && count != 0) printf("\n "); val = sbc4_coeffs[k] * COEFFS_MULTI; vali = roundf(val); printf("%d, ",vali); count++; } printf("\n};\n"); count = 0; printf("static const int32_t cosdata8[8][16] = {\n "); for (i = 0; i < 8; i++) { for (k = 0;k < 16;k++) { S[i][k] = cosf((i+0.5)*(k-4)*(M_PI_4/2)); if (count % 8 == 0 && count != 0) printf("\n "); if (k == 0) printf("{ "); val = S[i][k] * SI_MULTI; vali = roundf(val); printf("%d, ",vali); if (k == 15) printf("},"); count++; } } printf("\n};\n"); count = 0; printf("static const int32_t cosdata4[4][8] = {\n "); for (i = 0; i < 4; i++) { for (k = 0;k < 8;k++) { S[i][k] = cosf((i+0.5)*(k-2)*(M_PI_4)); if (count % 8 == 0 && count != 0) printf("\n "); if (k == 0) printf("{ "); val = S[i][k] * SI_MULTI; vali = roundf(val); printf("%d, ",vali); if (k == 7) printf("},"); count++; } } printf("\n};\n"); return 0; }
float CMouseControl::MovePointerRel (float dx, float dy, int* dxRes, int* dyRes) { OnDisplayChanged (); // Apply factors dx*= m_fDx; dy*= m_fDy; // Low-pass filter dx= dx * (1.0f - m_actualMotionWeight) + m_dxant * m_actualMotionWeight; dy= dy * (1.0f - m_actualMotionWeight) + m_dyant * m_actualMotionWeight; m_dxant= dx; m_dyant= dy; // Acceleration float distance= (float) ::sqrt (dx * dx + dy * dy); unsigned int iAccelArray= (unsigned int) (distance + 0.5f); if (iAccelArray>= ACCEL_ARRAY_SIZE) iAccelArray= ACCEL_ARRAY_SIZE - 1; dx*= m_accelArray[iAccelArray]; dy*= m_accelArray[iAccelArray]; // Apply delta threshold if (-m_minDeltaThreshold < dx && dx < m_minDeltaThreshold) dx= 0.0f; if (-m_minDeltaThreshold < dy && dy < m_minDeltaThreshold) dy= 0.0f; int idx= (int) roundf(dx); int idy= (int) roundf(dy); int mouseX, mouseY; if (m_enabledRestrictedWorkingArea && !m_enabledWrapPointer) { GetPointerLocation (mouseX, mouseY); if (mouseX + idx< m_minScreenX) idx= m_minScreenX - mouseX; else if (mouseX + idx > m_maxScreenX) idx= m_maxScreenX - mouseX; if (mouseY + idy < m_minScreenY) idy= m_minScreenY - mouseY; else if (mouseY + idy > m_maxScreenY) idy= m_maxScreenY - mouseY; } if (m_enabledWrapPointer) { int minWrapX= 0; int maxWrapX= m_ScreenWidth; int minWrapY= 0; int maxWrapY= m_ScreenHeight; if (m_enabledRestrictedWorkingArea) { minWrapX= m_minScreenX; maxWrapX= m_maxScreenX; minWrapY= m_minScreenY; maxWrapY= m_maxScreenY; } GetPointerLocation(mouseX, mouseY); if (mouseX + idx < minWrapX) { idx -= mouseX - minWrapX; DoMovePointerAbs(maxWrapX, mouseY); } if (mouseX + idx > maxWrapX) { idx -= maxWrapX - mouseX; DoMovePointerAbs(minWrapX, mouseY); } if (mouseY + idy < minWrapY) { idy -= mouseY - minWrapY; DoMovePointerAbs(mouseX, maxWrapY); } if (mouseY + idy > maxWrapY) { idy -= maxWrapY - mouseY; DoMovePointerAbs(mouseX, minWrapY); } } DoMovePointerRel (idx, idy); if (dxRes) *dxRes= idx; if (dyRes) *dyRes= idy; return (float) sqrt((double)(idx * idx + idy * idy)); }
static bool ffemu_init_audio(ffemu_t *handle) { struct ff_config_param *params = &handle->config; struct ff_audio_info *audio = &handle->audio; struct ff_video_info *video = &handle->video; struct ffemu_params *param = &handle->params; AVCodec *codec = avcodec_find_encoder_by_name(*params->acodec ? params->acodec : "flac"); if (!codec) { RARCH_ERR("[FFmpeg]: Cannot find acodec %s.\n", *params->acodec ? params->acodec : "flac"); return false; } audio->encoder = codec; audio->codec = avcodec_alloc_context3(codec); audio->codec->codec_type = AVMEDIA_TYPE_AUDIO; audio->codec->channels = param->channels; audio->codec->channel_layout = param->channels > 1 ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; ffemu_audio_resolve_format(audio, codec); ffemu_audio_resolve_sample_rate(handle, codec); if (params->sample_rate) { audio->ratio = (double)params->sample_rate / param->samplerate; audio->codec->sample_rate = params->sample_rate; audio->codec->time_base = av_d2q(1.0 / params->sample_rate, 1000000); rarch_resampler_realloc(&audio->resampler_data, &audio->resampler, g_settings.audio.resampler, audio->ratio); } else { audio->codec->sample_fmt = AV_SAMPLE_FMT_S16; audio->codec->sample_rate = (int)roundf(param->samplerate); audio->codec->time_base = av_d2q(1.0 / param->samplerate, 1000000); } if (params->audio_qscale) { audio->codec->flags |= CODEC_FLAG_QSCALE; audio->codec->global_quality = params->audio_global_quality; } else if (params->audio_bit_rate) audio->codec->bit_rate = params->audio_bit_rate; // Allow experimental codecs. audio->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; if (handle->muxer.ctx->oformat->flags & AVFMT_GLOBALHEADER) audio->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(audio->codec, codec, params->audio_opts ? ¶ms->audio_opts : NULL) != 0) return false; if (!audio->codec->frame_size) // If not set (PCM), just set something. audio->codec->frame_size = 1024; audio->buffer = (uint8_t*)av_malloc( audio->codec->frame_size * audio->codec->channels * audio->sample_size); //RARCH_LOG("[FFmpeg]: Audio frame size: %d.\n", audio->codec->frame_size); if (!audio->buffer) return false; audio->outbuf_size = FF_MIN_BUFFER_SIZE; audio->outbuf = (uint8_t*)av_malloc(audio->outbuf_size); if (!audio->outbuf) return false; return true; }
static bool thread_frame(void *data, const void *frame_, unsigned width, unsigned height, uint64_t frame_count, unsigned pitch, const char *msg) { unsigned copy_stride; const uint8_t *src = NULL; uint8_t *dst = NULL; thread_video_t *thr = (thread_video_t*)data; /* If called from within read_viewport, we're actually in the * driver thread, so just render directly. */ if (thr->frame.within_thread) { thread_update_driver_state(thr); if (thr->driver && thr->driver->frame) return thr->driver->frame(thr->driver_data, frame_, width, height, frame_count, pitch, msg); return false; } RARCH_PERFORMANCE_INIT(thr_frame); RARCH_PERFORMANCE_START(thr_frame); copy_stride = width * (thr->info.rgb32 ? sizeof(uint32_t) : sizeof(uint16_t)); src = (const uint8_t*)frame_; dst = thr->frame.buffer; slock_lock(thr->lock); if (!thr->nonblock) { settings_t *settings = config_get_ptr(); retro_time_t target_frame_time = (retro_time_t) roundf(1000000 / settings->video.refresh_rate); retro_time_t target = thr->last_time + target_frame_time; /* Ideally, use absolute time, but that is only a good idea on POSIX. */ while (thr->frame.updated) { retro_time_t current = rarch_get_time_usec(); retro_time_t delta = target - current; if (delta <= 0) break; if (!scond_wait_timeout(thr->cond_cmd, thr->lock, delta)) break; } } /* Drop frame if updated flag is still set, as thread is * still working on last frame. */ if (!thr->frame.updated) { if (src) { unsigned h; for (h = 0; h < height; h++, src += pitch, dst += copy_stride) memcpy(dst, src, copy_stride); } thr->frame.updated = true; thr->frame.width = width; thr->frame.height = height; thr->frame.count = frame_count; thr->frame.pitch = copy_stride; if (msg) strlcpy(thr->frame.msg, msg, sizeof(thr->frame.msg)); else *thr->frame.msg = '\0'; scond_signal(thr->cond_thread); #if defined(HAVE_MENU) if (thr->texture.enable) { while (thr->frame.updated) scond_wait(thr->cond_cmd, thr->lock); } #endif thr->hit_count++; } else thr->miss_count++; slock_unlock(thr->lock); RARCH_PERFORMANCE_STOP(thr_frame); thr->last_time = rarch_get_time_usec(); return true; }
unsigned int ClassWeightedImageSampler<type, nChannels>::sample(const type *data, const unsigned char *labels, unsigned int width, unsigned int height, unsigned int *samples) const { unsigned int **perClassBucket = new unsigned int*[m_nClasses]; for (int i=0; i<m_nClasses; i++) perClassBucket[i] = new unsigned int[m_perClassBucketSize[i]]; unsigned int *perClassNSamples = new unsigned int[m_nClasses]; unsigned int *perClassCounter = new unsigned int[m_nClasses]; std::fill(perClassNSamples, perClassNSamples+m_nClasses, 0); std::fill(perClassCounter, perClassCounter+m_nClasses, 0); boost::random::mt19937 gen; boost::random::uniform_01<> dist; boost::crc_32_type checksum_agent; checksum_agent.process_bytes((unsigned char*)data, sizeof(type)*width*height*nChannels); gen.seed(checksum_agent.checksum() ^ this->m_seed); // Scan the whole image and perform per-class reservoir sampling for (int i=0; i<width*height; i++) { unsigned char label = labels[i]; if (label) { label -= 1; unsigned int *currBucket = perClassBucket[label]; unsigned int perClassBucketSize = m_perClassBucketSize[label]; if (perClassNSamples[label]<perClassBucketSize) { currBucket[perClassNSamples[label]] = i; perClassNSamples[label]++; } else { unsigned int j = roundf(dist(gen)*perClassCounter[label]); if (j<perClassBucketSize) { currBucket[j] = i; } } perClassCounter[label]++; } } // Copy the sampled pixels to the output buffer unsigned int totSamples=perClassNSamples[0]; std::copy(perClassBucket[0], perClassBucket[0]+perClassNSamples[0], samples); for (int i=1; i<m_nClasses; i++) { std::copy(perClassBucket[i], perClassBucket[i]+perClassNSamples[i], samples+totSamples); totSamples += perClassNSamples[i]; } // Finally, sort samples std::sort(samples, samples+totSamples); for (int i=0; i<m_nClasses; i++) delete perClassBucket[i]; delete []perClassBucket; delete []perClassNSamples; delete []perClassCounter; return totSamples; }
/* * Initialize the destination byte buffer with image data scaled to the * width and height specified from the source byte buffer. */ static void fillFourCompBuffer( FilterParam params ) { int srcWidth, srcHeight, srcComponents, srcPixels; int dstWidth, dstHeight; int row_cmp_size; row_cmp_ptr row; int sx, sy, dx, dy; int sxrem, syrem, dxrem, dyrem; int amtx, amty; int srcRowByteOffset, srcColByteOffset; int dstByteOffset; int dstRowByteIndex; int dstRowByteLength; int i; float a, r, g, b; float ascale, mult; jbyte *row_data; jbyte *srcBuffer; jbyte *dstBuffer; int a_int, r_int, g_int, b_int; param_ptr source = (param_ptr)params; srcWidth = source->pub.srcWidth; srcHeight = source->pub.srcHeight; srcComponents = source->pub.srcComponents; srcPixels = srcWidth * srcHeight; srcBuffer = source->pub.src_pixel_data; dstWidth = source->pub.dstWidth; dstHeight = source->pub.dstHeight; dstBuffer = source->pub.dst_pixel_data; row = (row_cmp_ptr)malloc( sizeof( row_cmp ) ); row_cmp_size = dstWidth * sizeof( float ); row->red = (float*)malloc( row_cmp_size ); row->green = (float*)malloc( row_cmp_size ); row->blue = (float*)malloc( row_cmp_size ); row->alpha = (float*)malloc( row_cmp_size ); dstRowByteLength = dstWidth*srcComponents; row_data = (jbyte*)malloc( dstRowByteLength ); /////////////////////////////////////////////////////////////////// sy = 0; syrem = dstHeight; dy = 0; dyrem = 0; srcRowByteOffset = 0; dstByteOffset = 0; while ( sy < srcHeight ) { amty; if ( dyrem == 0 ) { for ( i = 0; i < dstWidth; i++ ) { row->alpha[i] = row->red[i] = row->green[i] = row->blue[i] = 0; } dyrem = srcHeight; } if ( syrem < dyrem ) { amty = syrem; } else { amty = dyrem; } sx = 0; dx = 0; sxrem = 0; dxrem = srcWidth; a = 0, r = 0, g = 0, b = 0; while ( sx < srcWidth ) { if ( sxrem == 0 ) { sxrem = dstWidth; srcColByteOffset = sx * srcComponents; r = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset++ ]; g = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset++ ]; b = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset++ ]; a = 0xff & srcBuffer[ srcRowByteOffset + srcColByteOffset ]; // premultiply the components if necessary if ( a != 255.0 ) { ascale = a / 255.0; r *= ascale; g *= ascale; b *= ascale; } } int amtx; if ( sxrem < dxrem ) { amtx = sxrem; } else { amtx = dxrem; } mult = ((float)amtx) * amty; row->alpha[dx] += mult * a; row->red[dx] += mult * r; row->green[dx] += mult * g; row->blue[dx] += mult * b; if ( ( sxrem -= amtx ) == 0 ) { sx++; } if ( ( dxrem -= amtx ) == 0 ) { dx++; dxrem = srcWidth; } } if ( ( dyrem -= amty ) == 0 ) { /////////////////////////////////////////////////////////////////// dstRowByteIndex = 0; for ( i = 0; i < dstWidth; i++ ) { mult = (float)srcPixels; a_int = (int)roundf( row->alpha[i] / mult ); if ( a_int <= 0 ) { a_int = 0; } else if ( a_int >= 255 ) { a_int = 255; } else { // un-premultiply the components (by modifying mult here, we // are effectively doing the divide by mult and divide by // alpha in the same step) mult = row->alpha[i] / 255; } r_int = (int)roundf( row->red[i] / mult ); g_int = (int)roundf( row->green[i] / mult ); b_int = (int)roundf( row->blue[i] / mult ); if ( r_int < 0 ) { r_int = 0; } else if ( r_int > 255 ) { r_int = 255; } if ( g_int < 0 ) { g_int = 0; } else if ( g_int > 255 ) { g_int = 255; } if ( b_int < 0 ) { b_int = 0; } else if ( b_int > 255 ) { b_int = 255; } row_data[dstRowByteIndex++] = (jbyte)r_int; row_data[dstRowByteIndex++] = (jbyte)g_int; row_data[dstRowByteIndex++] = (jbyte)b_int; row_data[dstRowByteIndex++] = (jbyte)a_int; } /////////////////////////////////////////////////////////////////// do { for ( i = 0; i < dstRowByteLength; i++ ) { dstBuffer[ dstByteOffset++ ] = row_data[i]; } dy++; } while ( ( ( syrem -= amty ) >= amty ) && ( amty == srcHeight ) ); } else { syrem -= amty; } if ( syrem == 0 ) { syrem = dstHeight; sy++; srcRowByteOffset += srcWidth * srcComponents; } } /////////////////////////////////////////////////////////////////// free( row_data ); free( row->red ); free( row->green ); free( row->blue ); free( row->alpha ); free( row ); }
static t_int *pluck_perform(t_int *w){ t_pluck *x = (t_pluck *)(w[1]); int n = (int)(w[2]); t_random_state *rstate = (t_random_state *)(w[3]); t_float *hz_in = (t_float *)(w[4]); t_float *t_in = (t_float *)(w[5]); t_float *ain = (t_float *)(w[6]); t_float *cut_in = (t_float *)(w[7]); t_float *out = (t_float *)(w[8]); uint32_t *s1 = &rstate->s1; uint32_t *s2 = &rstate->s2; uint32_t *s3 = &rstate->s3; t_float sr = x->x_sr; t_float last_trig = x->x_last_trig; t_float sum = x->x_sum; t_float amp = x->x_amp; double xnm1 = x->x_xnm1; double xnm2 = x->x_xnm2; double ynm1 = x->x_ynm1; double ynm2 = x->x_ynm2; for(t_int i = 0; i < n; i++){ t_float hz = hz_in[i]; t_float trig = t_in[i]; if(hz < 1){ out[i] = sum = 0; xnm1 = xnm2 = ynm1 = ynm2 = 0; } else{ float period = 1./hz; float delms = period * 1000; t_int samps = (int)roundf(period * sr); double fb_del = pluck_read_delay(x, x->x_ybuf, samps); // get delayed vals if (ain[i] == 0) ain[i] = 0; else ain[i] = copysign(exp(log(0.001) * delms/fabs(ain[i])), ain[i]); if(trig != 0 && last_trig == 0){ sum = 0; amp = trig; } // Filter stuff double cuttoff = (double)cut_in[i]; double omega, alphaQ, cos_w, a0, a1, a2, b0, b1, b2, yn; double nyq = (sr * 0.5); double hz2rad = PI/nyq; if (cuttoff < 0.000001) cuttoff = 0.000001; if (cuttoff > nyq - 0.000001) cuttoff = nyq - 0.000001; omega = cuttoff * hz2rad; alphaQ = sin(omega); // q = 0.5 cos_w = cos(omega); b0 = alphaQ + 1; a0 = (1 - cos_w) / (2 * b0); a1 = (1 - cos_w) / b0; a2 = a0; b1 = 2*cos_w / b0; b2 = (alphaQ - 1) / b0; // gate t_float gate = (sum++ <= samps) * amp; // noise t_float noise; if(gate != 0) noise = (t_float)(random_frand(s1, s2, s3)) * gate; else noise = 0; // output double output = (double)noise + (double)ain[i] * fb_del; out[i] = output; // filter yn = a0 * output + a1 * xnm1 + a2 * xnm2 + b1 * ynm1 + b2 * ynm2; // put into delay buffer int wh = x->x_wh; x->x_ybuf[wh] = yn; x->x_wh = (wh + 1) % x->x_sz; // increment writehead last_trig = trig; xnm2 = xnm1; xnm1 = output; ynm2 = ynm1; ynm1 = yn; } }; x->x_sum = sum; // next x->x_last_trig = amp; x->x_last_trig = last_trig; x->x_xnm1 = xnm1; x->x_xnm2 = xnm2; x->x_ynm1 = ynm1; x->x_ynm2 = ynm2; return(w+9); }
int real_to_pixel(float x) const { return (unsigned) roundf(x * _fx); }
// Called only from stroke_to(). Calculate everything needed to // draw the dab, then let the surface do the actual drawing. // // This is only gets called right after update_states_and_setting_values(). // Returns TRUE if the surface was modified. gboolean prepare_and_draw_dab (MyPaintBrush *self, MyPaintSurface * surface) { float x, y, opaque; float radius; // ensure we don't get a positive result with two negative opaque values if (self->settings_value[MYPAINT_BRUSH_SETTING_OPAQUE] < 0) self->settings_value[MYPAINT_BRUSH_SETTING_OPAQUE] = 0; opaque = self->settings_value[MYPAINT_BRUSH_SETTING_OPAQUE] * self->settings_value[MYPAINT_BRUSH_SETTING_OPAQUE_MULTIPLY]; opaque = CLAMP(opaque, 0.0, 1.0); //if (opaque == 0.0) return FALSE; <-- cannot do that, since we need to update smudge state. if (self->settings_value[MYPAINT_BRUSH_SETTING_OPAQUE_LINEARIZE]) { // OPTIMIZE: no need to recalculate this for each dab float alpha, beta, alpha_dab, beta_dab; float dabs_per_pixel; // dabs_per_pixel is just estimated roughly, I didn't think hard // about the case when the radius changes during the stroke dabs_per_pixel = ( mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_DABS_PER_ACTUAL_RADIUS]) + mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_DABS_PER_BASIC_RADIUS]) ) * 2.0; // the correction is probably not wanted if the dabs don't overlap if (dabs_per_pixel < 1.0) dabs_per_pixel = 1.0; // interpret the user-setting smoothly dabs_per_pixel = 1.0 + mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_OPAQUE_LINEARIZE])*(dabs_per_pixel-1.0); // see doc/brushdab_saturation.png // beta = beta_dab^dabs_per_pixel // <==> beta_dab = beta^(1/dabs_per_pixel) alpha = opaque; beta = 1.0-alpha; beta_dab = powf(beta, 1.0/dabs_per_pixel); alpha_dab = 1.0-beta_dab; opaque = alpha_dab; } x = self->states[MYPAINT_BRUSH_STATE_ACTUAL_X]; y = self->states[MYPAINT_BRUSH_STATE_ACTUAL_Y]; float base_radius = expf(mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_RADIUS_LOGARITHMIC])); if (self->settings_value[MYPAINT_BRUSH_SETTING_OFFSET_BY_SPEED]) { x += self->states[MYPAINT_BRUSH_STATE_NORM_DX_SLOW] * self->settings_value[MYPAINT_BRUSH_SETTING_OFFSET_BY_SPEED] * 0.1 * base_radius; y += self->states[MYPAINT_BRUSH_STATE_NORM_DY_SLOW] * self->settings_value[MYPAINT_BRUSH_SETTING_OFFSET_BY_SPEED] * 0.1 * base_radius; } if (self->settings_value[MYPAINT_BRUSH_SETTING_OFFSET_BY_RANDOM]) { float amp = self->settings_value[MYPAINT_BRUSH_SETTING_OFFSET_BY_RANDOM]; if (amp < 0.0) amp = 0.0; x += rand_gauss (self->rng) * amp * base_radius; y += rand_gauss (self->rng) * amp * base_radius; } radius = self->states[MYPAINT_BRUSH_STATE_ACTUAL_RADIUS]; if (self->settings_value[MYPAINT_BRUSH_SETTING_RADIUS_BY_RANDOM]) { float radius_log, alpha_correction; // go back to logarithmic radius to add the noise radius_log = self->settings_value[MYPAINT_BRUSH_SETTING_RADIUS_LOGARITHMIC]; radius_log += rand_gauss (self->rng) * self->settings_value[MYPAINT_BRUSH_SETTING_RADIUS_BY_RANDOM]; radius = expf(radius_log); radius = CLAMP(radius, ACTUAL_RADIUS_MIN, ACTUAL_RADIUS_MAX); alpha_correction = self->states[MYPAINT_BRUSH_STATE_ACTUAL_RADIUS] / radius; alpha_correction = SQR(alpha_correction); if (alpha_correction <= 1.0) { opaque *= alpha_correction; } } // update smudge color if (self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE_LENGTH] < 1.0 && // optimization, since normal brushes have smudge_length == 0.5 without actually smudging (self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE] != 0.0 || !mypaint_mapping_is_constant(self->settings[MYPAINT_BRUSH_SETTING_SMUDGE]))) { float fac = self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE_LENGTH]; if (fac < 0.01) fac = 0.01; int px, py; px = ROUND(x); py = ROUND(y); // Calling get_color() is almost as expensive as rendering a // dab. Because of this we use the previous value if it is not // expected to hurt quality too much. We call it at most every // second dab. float r, g, b, a; self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_RECENTNESS] *= fac; if (self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_RECENTNESS] < 0.5*fac) { if (self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_RECENTNESS] == 0.0) { // first initialization of smudge color fac = 0.0; } self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_RECENTNESS] = 1.0; float smudge_radius = radius * expf(self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE_RADIUS_LOG]); smudge_radius = CLAMP(smudge_radius, ACTUAL_RADIUS_MIN, ACTUAL_RADIUS_MAX); mypaint_surface_get_color(surface, px, py, smudge_radius, &r, &g, &b, &a); self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_R] = r; self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_G] = g; self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_B] = b; self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_A] = a; } else { r = self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_R]; g = self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_G]; b = self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_B]; a = self->states[MYPAINT_BRUSH_STATE_LAST_GETCOLOR_A]; } // updated the smudge color (stored with premultiplied alpha) self->states[MYPAINT_BRUSH_STATE_SMUDGE_A ] = fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_A ] + (1-fac)*a; // fix rounding errors self->states[MYPAINT_BRUSH_STATE_SMUDGE_A ] = CLAMP(self->states[MYPAINT_BRUSH_STATE_SMUDGE_A], 0.0, 1.0); self->states[MYPAINT_BRUSH_STATE_SMUDGE_RA] = fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_RA] + (1-fac)*r*a; self->states[MYPAINT_BRUSH_STATE_SMUDGE_GA] = fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_GA] + (1-fac)*g*a; self->states[MYPAINT_BRUSH_STATE_SMUDGE_BA] = fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_BA] + (1-fac)*b*a; } // color part float color_h = mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_COLOR_H]); float color_s = mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_COLOR_S]); float color_v = mypaint_mapping_get_base_value(self->settings[MYPAINT_BRUSH_SETTING_COLOR_V]); float eraser_target_alpha = 1.0; if (self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE] > 0.0) { // mix (in RGB) the smudge color with the brush color hsv_to_rgb_float (&color_h, &color_s, &color_v); float fac = self->settings_value[MYPAINT_BRUSH_SETTING_SMUDGE]; if (fac > 1.0) fac = 1.0; // If the smudge color somewhat transparent, then the resulting // dab will do erasing towards that transparency level. // see also ../doc/smudge_math.png eraser_target_alpha = (1-fac)*1.0 + fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_A]; // fix rounding errors (they really seem to happen in the previous line) eraser_target_alpha = CLAMP(eraser_target_alpha, 0.0, 1.0); if (eraser_target_alpha > 0) { color_h = (fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_RA] + (1-fac)*color_h) / eraser_target_alpha; color_s = (fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_GA] + (1-fac)*color_s) / eraser_target_alpha; color_v = (fac*self->states[MYPAINT_BRUSH_STATE_SMUDGE_BA] + (1-fac)*color_v) / eraser_target_alpha; } else { // we are only erasing; the color does not matter color_h = 1.0; color_s = 0.0; color_v = 0.0; } rgb_to_hsv_float (&color_h, &color_s, &color_v); } // eraser if (self->settings_value[MYPAINT_BRUSH_SETTING_ERASER]) { eraser_target_alpha *= (1.0-self->settings_value[MYPAINT_BRUSH_SETTING_ERASER]); } // HSV color change color_h += self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_H]; color_s += self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_HSV_S]; color_v += self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_V]; // HSL color change if (self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_L] || self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_HSL_S]) { // (calculating way too much here, can be optimized if neccessary) // this function will CLAMP the inputs hsv_to_rgb_float (&color_h, &color_s, &color_v); rgb_to_hsl_float (&color_h, &color_s, &color_v); color_v += self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_L]; color_s += self->settings_value[MYPAINT_BRUSH_SETTING_CHANGE_COLOR_HSL_S]; hsl_to_rgb_float (&color_h, &color_s, &color_v); rgb_to_hsv_float (&color_h, &color_s, &color_v); } float hardness = CLAMP(self->settings_value[MYPAINT_BRUSH_SETTING_HARDNESS], 0.0, 1.0); // anti-aliasing attempt (works surprisingly well for ink brushes) float current_fadeout_in_pixels = radius * (1.0 - hardness); float min_fadeout_in_pixels = self->settings_value[MYPAINT_BRUSH_SETTING_ANTI_ALIASING]; if (current_fadeout_in_pixels < min_fadeout_in_pixels) { // need to soften the brush (decrease hardness), but keep optical radius // so we tune both radius and hardness, to get the desired fadeout_in_pixels float current_optical_radius = radius - (1.0-hardness)*radius/2.0; // Equation 1: (new fadeout must be equal to min_fadeout) // min_fadeout_in_pixels = radius_new*(1.0 - hardness_new) // Equation 2: (optical radius must remain unchanged) // current_optical_radius = radius_new - (1.0-hardness_new)*radius_new/2.0 // // Solved Equation 1 for hardness_new, using Equation 2: (thanks to mathomatic) float hardness_new = ((current_optical_radius - (min_fadeout_in_pixels/2.0))/(current_optical_radius + (min_fadeout_in_pixels/2.0))); // Using Equation 1: float radius_new = (min_fadeout_in_pixels/(1.0 - hardness_new)); hardness = hardness_new; radius = radius_new; } // snap to pixel float snapToPixel = self->settings_value[MYPAINT_BRUSH_SETTING_SNAP_TO_PIXEL]; if (snapToPixel > 0.0) { // linear interpolation between non-snapped and snapped float snapped_x = floor(x) + 0.5; float snapped_y = floor(y) + 0.5; x = x + (snapped_x - x) * snapToPixel; y = y + (snapped_y - y) * snapToPixel; float snapped_radius = roundf(radius * 2.0) / 2.0; if (snapped_radius < 0.5) snapped_radius = 0.5; if (snapToPixel > 0.9999 ) { snapped_radius -= 0.0001; // this fixes precision issues where // neighboor pixels could be wrongly painted } radius = radius + (snapped_radius - radius) * snapToPixel; } // the functions below will CLAMP most inputs hsv_to_rgb_float (&color_h, &color_s, &color_v); return mypaint_surface_draw_dab (surface, x, y, radius, color_h, color_s, color_v, opaque, hardness, eraser_target_alpha, self->states[MYPAINT_BRUSH_STATE_ACTUAL_ELLIPTICAL_DAB_RATIO], self->states[MYPAINT_BRUSH_STATE_ACTUAL_ELLIPTICAL_DAB_ANGLE], self->settings_value[MYPAINT_BRUSH_SETTING_LOCK_ALPHA], self->settings_value[MYPAINT_BRUSH_SETTING_COLORIZE]); }
void GCodeExport::writeMove(int x, int y, int z, double speed, double extrusion_mm3_per_mm) { if (currentPosition.x == x && currentPosition.y == y && currentPosition.z == z) return; assert(speed < 200 && speed > 1); // normal F values occurring in UM2 gcode (this code should not be compiled for release) assert((Point3(x,y,z) - currentPosition).vSize() < MM2INT(300)); // no crazy positions (this code should not be compiled for release) if (extrusion_mm3_per_mm < 0) logWarning("Warning! Negative extrusion move!"); double extrusion_per_mm = extrusion_mm3_per_mm; if (!is_volumatric) { extrusion_per_mm = extrusion_mm3_per_mm / getFilamentArea(current_extruder); } Point gcode_pos = getGcodePos(x,y, current_extruder); if (flavor == EGCodeFlavor::BFB) { //For Bits From Bytes machines, we need to handle this completely differently. As they do not use E values but RPM values. float fspeed = speed * 60; float rpm = extrusion_per_mm * speed * 60; const float mm_per_rpm = 4.0; //All BFB machines have 4mm per RPM extrusion. rpm /= mm_per_rpm; if (rpm > 0) { if (isRetracted) { if (currentSpeed != double(rpm)) { //fprintf(f, "; %f e-per-mm %d mm-width %d mm/s\n", extrusion_per_mm, lineWidth, speed); //fprintf(f, "M108 S%0.1f\r\n", rpm); *output_stream << "M108 S" << std::setprecision(1) << rpm << "\r\n"; currentSpeed = double(rpm); } //Add M101 or M201 to enable the proper extruder. *output_stream << "M" << int((current_extruder + 1) * 100 + 1) << "\r\n"; isRetracted = false; } //Fix the speed by the actual RPM we are asking, because of rounding errors we cannot get all RPM values, but we have a lot more resolution in the feedrate value. // (Trick copied from KISSlicer, thanks Jonathan) fspeed *= (rpm / (roundf(rpm * 100) / 100)); //Increase the extrusion amount to calculate the amount of filament used. Point3 diff = Point3(x,y,z) - getPosition(); extrusion_amount += extrusion_per_mm * diff.vSizeMM(); }else{ //If we are not extruding, check if we still need to disable the extruder. This causes a retraction due to auto-retraction. if (!isRetracted) { *output_stream << "M103\r\n"; isRetracted = true; } } *output_stream << std::setprecision(3) << "G1 X" << INT2MM(gcode_pos.X) << " Y" << INT2MM(gcode_pos.Y) << " Z" << INT2MM(z) << std::setprecision(1) << " F" << fspeed << "\r\n"; } else { //Normal E handling. if (extrusion_mm3_per_mm > 0.000001) { Point3 diff = Point3(x,y,z) - getPosition(); if (isZHopped > 0) { // TinyG G1: Straight feed *output_stream << std::setprecision(3) << "G1 Z" << INT2MM(currentPosition.z) << "\n"; isZHopped = 0; } extrusion_amount += (is_volumatric) ? last_coasted_amount_mm3 : last_coasted_amount_mm3 / getFilamentArea(current_extruder); if (isRetracted) { if (flavor == EGCodeFlavor::ULTIGCODE || flavor == EGCodeFlavor::REPRAP_VOLUMATRIC) { *output_stream << "G11\n"; //TODO try this code and see what happens //Assume default UM2 retraction settings. if (last_coasted_amount_mm3 > 0) { *output_stream << "G1 F" << (retractionPrimeSpeed * 60) << " " << extruder_attr[current_extruder].extruderCharacter << std::setprecision(5) << extrusion_amount << "\n"; } estimateCalculator.plan(TimeEstimateCalculator::Position(INT2MM(currentPosition.x), INT2MM(currentPosition.y), INT2MM(currentPosition.z), extrusion_amount), 25.0); }else{ // TinyG checked *output_stream << "G1 F" << (retractionPrimeSpeed * 60) << " " << extruder_attr[current_extruder].extruderCharacter << std::setprecision(5) << extrusion_amount << "\n"; currentSpeed = retractionPrimeSpeed; estimateCalculator.plan(TimeEstimateCalculator::Position(INT2MM(currentPosition.x), INT2MM(currentPosition.y), INT2MM(currentPosition.z), extrusion_amount), currentSpeed); } if (getExtrusionAmountMM3(current_extruder) > 10000.0) //According to https://github.com/Ultimaker/CuraEngine/issues/14 having more then 21m of extrusion causes inaccuracies. So reset it every 10m, just to be sure. resetExtrusionValue(); isRetracted = false; } else { if (last_coasted_amount_mm3 > 0) { *output_stream << "G1 F" << (retractionPrimeSpeed * 60) << " " << extruder_attr[current_extruder].extruderCharacter << std::setprecision(5) << extrusion_amount << "\n"; estimateCalculator.plan(TimeEstimateCalculator::Position(INT2MM(currentPosition.x), INT2MM(currentPosition.y), INT2MM(currentPosition.z), extrusion_amount), currentSpeed); } } last_coasted_amount_mm3 = 0; extrusion_amount += extrusion_per_mm * diff.vSizeMM(); // TinyG TODO: add one axis *output_stream << "G1"; }else{ *output_stream << "G0"; if (commandSocket) { // we should send this travel as a non-retraction move cura::Polygons travelPoly; PolygonRef travel = travelPoly.newPoly(); travel.add(Point(currentPosition.x, currentPosition.y)); travel.add(Point(x, y)); commandSocket->sendPolygons(isRetracted ? MoveRetractionType : MoveCombingType, layer_nr, travelPoly, isRetracted ? MM2INT(0.2) : MM2INT(0.1)); } } if (currentSpeed != speed) { *output_stream << " F" << (speed * 60); currentSpeed = speed; } *output_stream << std::setprecision(3) << " X" << INT2MM(gcode_pos.X) << " Y" << INT2MM(gcode_pos.Y); if (z != currentPosition.z) *output_stream << " Z" << INT2MM(z + isZHopped); if (extrusion_mm3_per_mm > 0.000001) *output_stream << " " << extruder_attr[current_extruder].extruderCharacter << std::setprecision(5) << extrusion_amount; *output_stream << "\n"; } currentPosition = Point3(x, y, z); startPosition = currentPosition; estimateCalculator.plan(TimeEstimateCalculator::Position(INT2MM(currentPosition.x), INT2MM(currentPosition.y), INT2MM(currentPosition.z), extrusion_amount), speed); }
void init_video(void) { unsigned max_dim, scale, width, height; video_viewport_t *custom_vp = NULL; const input_driver_t *tmp = NULL; const struct retro_game_geometry *geom = NULL; video_info_t video = {0}; static uint16_t dummy_pixels[32] = {0}; driver_t *driver = driver_get_ptr(); settings_t *settings = config_get_ptr(); rarch_system_info_t *system = rarch_system_info_get_ptr(); struct retro_system_av_info *av_info = video_viewport_get_system_av_info(); init_video_filter(video_state.pix_fmt); event_command(EVENT_CMD_SHADER_DIR_INIT); if (av_info) geom = (const struct retro_game_geometry*)&av_info->geometry; max_dim = max(geom->max_width, geom->max_height); scale = next_pow2(max_dim) / RARCH_SCALE_BASE; scale = max(scale, 1); if (video_state.filter.filter) scale = video_state.filter.scale; /* Update core-dependent aspect ratio values. */ video_viewport_set_square_pixel(geom->base_width, geom->base_height); video_viewport_set_core(); video_viewport_set_config(); /* Update CUSTOM viewport. */ custom_vp = video_viewport_get_custom(); if (settings->video.aspect_ratio_idx == ASPECT_RATIO_CUSTOM) { float default_aspect = aspectratio_lut[ASPECT_RATIO_CORE].value; aspectratio_lut[ASPECT_RATIO_CUSTOM].value = (custom_vp->width && custom_vp->height) ? (float)custom_vp->width / custom_vp->height : default_aspect; } video_driver_set_aspect_ratio_value( aspectratio_lut[settings->video.aspect_ratio_idx].value); if (settings->video.fullscreen) { width = settings->video.fullscreen_x; height = settings->video.fullscreen_y; } else { if (settings->video.force_aspect) { /* Do rounding here to simplify integer scale correctness. */ unsigned base_width = roundf(geom->base_height * video_driver_get_aspect_ratio()); width = roundf(base_width * settings->video.scale); } else width = roundf(geom->base_width * settings->video.scale); height = roundf(geom->base_height * settings->video.scale); } if (width && height) RARCH_LOG("Video @ %ux%u\n", width, height); else RARCH_LOG("Video @ fullscreen\n"); driver->display_type = RARCH_DISPLAY_NONE; driver->video_display = 0; driver->video_window = 0; if (!init_video_pixel_converter(RARCH_SCALE_BASE * scale)) { RARCH_ERR("Failed to initialize pixel converter.\n"); rarch_fail(1, "init_video()"); } video.width = width; video.height = height; video.fullscreen = settings->video.fullscreen; video.vsync = settings->video.vsync && !system->force_nonblock; video.force_aspect = settings->video.force_aspect; #ifdef GEKKO video.viwidth = settings->video.viwidth; video.vfilter = settings->video.vfilter; #endif video.smooth = settings->video.smooth; video.input_scale = scale; video.rgb32 = video_state.filter.filter ? video_state.filter.out_rgb32 : (video_state.pix_fmt == RETRO_PIXEL_FORMAT_XRGB8888); tmp = (const input_driver_t*)driver->input; /* Need to grab the "real" video driver interface on a reinit. */ find_video_driver(); #ifdef HAVE_THREADS if (settings->video.threaded && !video_state.hw_render_callback.context_type) { /* Can't do hardware rendering with threaded driver currently. */ RARCH_LOG("Starting threaded video driver ...\n"); if (!rarch_threaded_video_init(&driver->video, &driver->video_data, &driver->input, &driver->input_data, driver->video, &video)) { RARCH_ERR("Cannot open threaded video driver ... Exiting ...\n"); rarch_fail(1, "init_video()"); } } else #endif driver->video_data = driver->video->init(&video, &driver->input, &driver->input_data); if (!driver->video_data) { RARCH_ERR("Cannot open video driver ... Exiting ...\n"); rarch_fail(1, "init_video()"); } driver->video_poke = NULL; if (driver->video->poke_interface) driver->video->poke_interface(driver->video_data, &driver->video_poke); if (driver->video->viewport_info && (!custom_vp->width || !custom_vp->height)) { /* Force custom viewport to have sane parameters. */ custom_vp->width = width; custom_vp->height = height; video_driver_viewport_info(custom_vp); } video_driver_set_rotation( (settings->video.rotation + system->rotation) % 4); video_driver_suppress_screensaver(settings->ui.suspend_screensaver_enable); if (!driver->input) init_video_input(tmp); event_command(EVENT_CMD_OVERLAY_DEINIT); event_command(EVENT_CMD_OVERLAY_INIT); video_driver_cached_frame_set(&dummy_pixels, 4, 4, 8); #if defined(PSP) video_driver_set_texture_frame(&dummy_pixels, false, 1, 1, 1.0f); #endif }
bool UniscribeController::shapeAndPlaceItem(const UChar* cp, unsigned i, const Font* fontData, GlyphBuffer* glyphBuffer) { // Determine the string for this item. const UChar* str = cp + m_items[i].iCharPos; int len = m_items[i+1].iCharPos - m_items[i].iCharPos; SCRIPT_ITEM item = m_items[i]; // Set up buffers to hold the results of shaping the item. Vector<WORD> glyphs; Vector<WORD> clusters; Vector<SCRIPT_VISATTR> visualAttributes; clusters.resize(len); // Shape the item. // The recommended size for the glyph buffer is 1.5 * the character length + 16 in the uniscribe docs. // Apparently this is a good size to avoid having to make repeated calls to ScriptShape. glyphs.resize(1.5 * len + 16); visualAttributes.resize(glyphs.size()); if (!shape(str, len, item, fontData, glyphs, clusters, visualAttributes)) return true; // We now have a collection of glyphs. Vector<GOFFSET> offsets; Vector<int> advances; offsets.resize(glyphs.size()); advances.resize(glyphs.size()); int glyphCount = 0; HRESULT placeResult = ScriptPlace(0, fontData->scriptCache(), glyphs.data(), glyphs.size(), visualAttributes.data(), &item.a, advances.data(), offsets.data(), 0); if (placeResult == E_PENDING) { // The script cache isn't primed with enough info yet. We need to select our HFONT into // a DC and pass the DC in to ScriptPlace. HWndDC hdc(0); HFONT hfont = fontData->platformData().hfont(); HFONT oldFont = (HFONT)SelectObject(hdc, hfont); placeResult = ScriptPlace(hdc, fontData->scriptCache(), glyphs.data(), glyphs.size(), visualAttributes.data(), &item.a, advances.data(), offsets.data(), 0); SelectObject(hdc, oldFont); } if (FAILED(placeResult) || glyphs.isEmpty()) return true; // Convert all chars that should be treated as spaces to use the space glyph. // We also create a map that allows us to quickly go from space glyphs back to their corresponding characters. Vector<int> spaceCharacters(glyphs.size()); spaceCharacters.fill(-1); const float cLogicalScale = fontData->platformData().useGDI() ? 1.0f : 32.0f; float spaceWidth = fontData->spaceWidth() - fontData->syntheticBoldOffset(); unsigned logicalSpaceWidth = spaceWidth * cLogicalScale; for (int k = 0; k < len; k++) { UChar ch = *(str + k); bool treatAsSpace = FontCascade::treatAsSpace(ch); bool treatAsZeroWidthSpace = FontCascade::treatAsZeroWidthSpace(ch); if (treatAsSpace || treatAsZeroWidthSpace) { // Substitute in the space glyph at the appropriate place in the glyphs // array. glyphs[clusters[k]] = fontData->spaceGlyph(); advances[clusters[k]] = treatAsSpace ? logicalSpaceWidth : 0; if (treatAsSpace) spaceCharacters[clusters[k]] = m_currentCharacter + k + item.iCharPos; } } // Populate our glyph buffer with this information. bool hasExtraSpacing = m_font.letterSpacing() || m_font.wordSpacing() || m_padding; float leftEdge = m_runWidthSoFar; for (unsigned k = 0; k < glyphs.size(); k++) { Glyph glyph = glyphs[k]; float advance = advances[k] / cLogicalScale; float offsetX = offsets[k].du / cLogicalScale; float offsetY = offsets[k].dv / cLogicalScale; // Match AppKit's rules for the integer vs. non-integer rendering modes. float roundedAdvance = roundf(advance); if (!fontData->platformData().isSystemFont()) { advance = roundedAdvance; offsetX = roundf(offsetX); offsetY = roundf(offsetY); } advance += fontData->syntheticBoldOffset(); if (hasExtraSpacing) { // If we're a glyph with an advance, go ahead and add in letter-spacing. // That way we weed out zero width lurkers. This behavior matches the fast text code path. if (advance && m_font.letterSpacing()) advance += m_font.letterSpacing(); // Handle justification and word-spacing. int characterIndex = spaceCharacters[k]; // characterIndex is left at the initial value of -1 for glyphs that do not map back to treated-as-space characters. if (characterIndex != -1) { // Account for padding. WebCore uses space padding to justify text. // We distribute the specified padding over the available spaces in the run. if (m_padding) { // Use leftover padding if not evenly divisible by number of spaces. if (m_padding < m_padPerSpace) { advance += m_padding; m_padding = 0; } else { m_padding -= m_padPerSpace; advance += m_padPerSpace; } } // Account for word-spacing. if (characterIndex > 0 && m_font.wordSpacing()) { UChar candidateSpace; if (m_run.is8Bit()) candidateSpace = *(m_run.data8(characterIndex - 1)); else candidateSpace = *(m_run.data16(characterIndex - 1)); if (!FontCascade::treatAsSpace(candidateSpace)) advance += m_font.wordSpacing(); } } } m_runWidthSoFar += advance; // FIXME: We need to take the GOFFSETS for combining glyphs and store them in the glyph buffer // as well, so that when the time comes to draw those glyphs, we can apply the appropriate // translation. if (glyphBuffer) { FloatSize size(offsetX, -offsetY); glyphBuffer->add(glyph, fontData, advance, GlyphBuffer::noOffset, &size); } FloatRect glyphBounds = fontData->boundsForGlyph(glyph); glyphBounds.move(m_glyphOrigin.x(), m_glyphOrigin.y()); m_minGlyphBoundingBoxX = min(m_minGlyphBoundingBoxX, glyphBounds.x()); m_maxGlyphBoundingBoxX = max(m_maxGlyphBoundingBoxX, glyphBounds.maxX()); m_minGlyphBoundingBoxY = min(m_minGlyphBoundingBoxY, glyphBounds.y()); m_maxGlyphBoundingBoxY = max(m_maxGlyphBoundingBoxY, glyphBounds.maxY()); m_glyphOrigin.move(advance + offsetX, -offsetY); // Mutate the glyph array to contain our altered advances. if (m_computingOffsetPosition) advances[k] = advance; } while (m_computingOffsetPosition && m_offsetX >= leftEdge && m_offsetX < m_runWidthSoFar) { // The position is somewhere inside this run. int trailing = 0; HRESULT rc = ::ScriptXtoCP(m_offsetX - leftEdge, clusters.size(), glyphs.size(), clusters.data(), visualAttributes.data(), advances.data(), &item.a, &m_offsetPosition, &trailing); if (FAILED(rc)) { WTFLogAlways("UniscribeController::shapeAndPlaceItem: ScriptXtoCP failed rc=%lx", rc); return true; } if (trailing && m_includePartialGlyphs && m_offsetPosition < len - 1) { m_offsetPosition += m_currentCharacter + m_items[i].iCharPos; m_offsetX += m_run.rtl() ? -trailing : trailing; } else { m_computingOffsetPosition = false; m_offsetPosition += m_currentCharacter + m_items[i].iCharPos; if (trailing && m_includePartialGlyphs) m_offsetPosition++; return false; } } return true; }
// create fskdem object (frequency demodulator) // _m : bits per symbol, _m > 0 // _k : samples/symbol, _k >= 2^_m // _bandwidth : total signal bandwidth, (0,0.5) fskdem fskdem_create(unsigned int _m, unsigned int _k, float _bandwidth) { // validate input if (_m == 0) { fprintf(stderr,"error: fskdem_create(), bits/symbol must be greater than 0\n"); exit(1); } else if (_k < 2 || _k > 2048) { fprintf(stderr,"error: fskdem_create(), samples/symbol must be in [2^_m, 2048]\n"); exit(1); } else if (_bandwidth <= 0.0f || _bandwidth >= 0.5f) { fprintf(stderr,"error: fskdem_create(), bandwidth must be in (0,0.5)\n"); exit(1); } // create main object memory fskdem q = (fskdem) malloc(sizeof(struct fskdem_s)); // set basic internal properties q->m = _m; // bits per symbol q->k = _k; // samples per symbol q->bandwidth = _bandwidth; // signal bandwidth // derived values q->M = 1 << q->m; // constellation size q->M2 = 0.5f*(float)(q->M-1); // (M-1)/2 // compute demodulation FFT size such that FFT output bin frequencies are // as close to modulated frequencies as possible float df = q->bandwidth / q->M2; // frequency spacing float err_min = 1e9f; // minimum error value unsigned int K_min = q->k; // minimum FFT size unsigned int K_max = q->k*4 < 16 ? 16 : q->k*4; // maximum FFT size unsigned int K_hat; for (K_hat=K_min; K_hat<=K_max; K_hat++) { // compute candidate FFT size float v = 0.5f*df*(float)K_hat; // bin spacing float err = fabsf( roundf(v) - v ); // fractional bin spacing #if DEBUG_FSKDEM // print results printf(" K_hat = %4u : v = %12.8f, err=%12.8f %s\n", K_hat, v, err, err < err_min ? "*" : ""); #endif // save best result if (K_hat==K_min || err < err_min) { q->K = K_hat; err_min = err; } // perfect match; no need to continue searching if (err < 1e-6f) break; } // determine demodulation mapping between tones and frequency bins // TODO: use gray coding q->demod_map = (unsigned int *) malloc(q->M * sizeof(unsigned int)); unsigned int i; for (i=0; i<q->M; i++) { // print frequency bins float freq = ((float)i - q->M2) * q->bandwidth / q->M2; float idx = freq * (float)(q->K); unsigned int index = (unsigned int) (idx < 0 ? roundf(idx + q->K) : roundf(idx)); q->demod_map[i] = index; #if DEBUG_FSKDEM printf(" s=%3u, f = %12.8f, index=%3u\n", i, freq, index); #endif } // check for uniqueness for (i=1; i<q->M; i++) { if (q->demod_map[i] == q->demod_map[i-1]) { fprintf(stderr,"warning: fskdem_create(), demod map is not unique; consider increasing bandwidth\n"); break; } } // allocate memory for transform q->buf_time = (float complex*) malloc(q->K * sizeof(float complex)); q->buf_freq = (float complex*) malloc(q->K * sizeof(float complex)); q->fft = FFT_CREATE_PLAN(q->K, q->buf_time, q->buf_freq, FFT_DIR_FORWARD, 0); // reset modem object fskdem_reset(q); // return main object return q; }