void vgModifyPathCoords(VGPath dstPath, VGint startIndex, VGint numSegments, const void * pathData) { struct vg_context *ctx = vg_current_context(); struct path *p = 0; if (dstPath == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (startIndex < 0 || numSegments <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } p = (struct path *)dstPath; if (!pathData || !is_aligned_to(pathData, path_datatype_size(p))) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (startIndex + numSegments > path_num_segments(p)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!(path_capabilities(p)&VG_PATH_CAPABILITY_MODIFY)) { vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR); return; } path_modify_coords(p, startIndex, numSegments, pathData); }
void vegaSeparableConvolve(VGImage dst, VGImage src, VGint kernelWidth, VGint kernelHeight, VGint shiftX, VGint shiftY, const VGshort * kernelX, const VGshort * kernelY, VGfloat scale, VGfloat bias, VGTilingMode tilingMode) { struct vg_context *ctx = vg_current_context(); VGshort *kernel; VGint i, j, idx = 0; const VGint max_kernel_size = vegaGeti(VG_MAX_SEPARABLE_KERNEL_SIZE); if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (kernelWidth <= 0 || kernelHeight <= 0 || kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!kernelX || !kernelY || !is_aligned_to(kernelX, 2) || !is_aligned_to(kernelY, 2)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (tilingMode < VG_TILE_FILL || tilingMode > VG_TILE_REFLECT) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } kernel = malloc(sizeof(VGshort)*kernelWidth*kernelHeight); for (i = 0; i < kernelWidth; ++i) { for (j = 0; j < kernelHeight; ++j) { kernel[idx] = kernelX[i] * kernelY[j]; ++idx; } } vegaConvolve(dst, src, kernelWidth, kernelHeight, shiftX, shiftY, kernel, scale, bias, tilingMode); free(kernel); }
void vgAppendPathData(VGPath dstPath, VGint numSegments, const VGubyte * pathSegments, const void * pathData) { struct vg_context *ctx = vg_current_context(); struct path *p = 0; VGint i; if (dstPath == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (!pathSegments) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (numSegments <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } for (i = 0; i < numSegments; ++i) { if (pathSegments[i] < VG_CLOSE_PATH || pathSegments[i] > VG_LCWARC_TO_REL) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } } p = (struct path*)dstPath; if (!pathData || !is_aligned_to(pathData, path_datatype_size(p))) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!(path_capabilities(p)&VG_PATH_CAPABILITY_APPEND_TO)) { vg_set_error(ctx, VG_PATH_CAPABILITY_ERROR); return; } path_append_data(p, numSegments, pathSegments, pathData); }
void vegaConvolve(VGImage dst, VGImage src, VGint kernelWidth, VGint kernelHeight, VGint shiftX, VGint shiftY, const VGshort * kernel, VGfloat scale, VGfloat bias, VGTilingMode tilingMode) { struct vg_context *ctx = vg_current_context(); VGfloat *buffer; VGint buffer_len; VGint i, j; VGint idx = 0; struct vg_image *d, *s; VGint kernel_size = kernelWidth * kernelHeight; struct filter_info info; const VGint max_kernel_size = vegaGeti(VG_MAX_KERNEL_SIZE); if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (kernelWidth <= 0 || kernelHeight <= 0 || kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!kernel || !is_aligned_to(kernel, 2)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (tilingMode < VG_TILE_FILL || tilingMode > VG_TILE_REFLECT) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } d = handle_to_image(dst); s = handle_to_image(src); if (vg_image_overlaps(d, s)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); buffer_len = 8 + 2 * 4 * kernel_size; buffer = malloc(buffer_len * sizeof(VGfloat)); buffer[0] = 0.f; buffer[1] = 1.f; buffer[2] = 2.f; /*unused*/ buffer[3] = 4.f; /*unused*/ buffer[4] = kernelWidth * kernelHeight; buffer[5] = scale; buffer[6] = bias; buffer[7] = 0.f; idx = 8; for (j = 0; j < kernelHeight; ++j) { for (i = 0; i < kernelWidth; ++i) { VGint index = j * kernelWidth + i; VGfloat x, y; x = texture_offset(s->width, kernelWidth, i, shiftX); y = texture_offset(s->height, kernelHeight, j, shiftY); buffer[idx + index*4 + 0] = x; buffer[idx + index*4 + 1] = y; buffer[idx + index*4 + 2] = 0.f; buffer[idx + index*4 + 3] = 0.f; } } idx += kernel_size * 4; for (j = 0; j < kernelHeight; ++j) { for (i = 0; i < kernelWidth; ++i) { /* transpose the kernel */ VGint index = j * kernelWidth + i; VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1); buffer[idx + index*4 + 0] = kernel[kindex]; buffer[idx + index*4 + 1] = kernel[kindex]; buffer[idx + index*4 + 2] = kernel[kindex]; buffer[idx + index*4 + 3] = kernel[kindex]; } } info.dst = d; info.src = s; info.setup_shader = &setup_convolution; info.user_data = (void*)(long)(buffer_len/4); info.const_buffer = buffer; info.const_buffer_len = buffer_len * sizeof(VGfloat); info.tiling_mode = tilingMode; info.extra_texture_view = NULL; execute_filter(ctx, &info); free(buffer); }
static constexpr inline bool is_aligned_as(span_size_t addr) noexcept { return is_aligned_to(addr, span_align_of<T>()); }