void vegaRenderToMask(VGPath path, VGbitfield paintModes, VGMaskOperation operation) { struct vg_context *ctx = vg_current_context(); if (path == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (!paintModes || (paintModes&(~(VG_STROKE_PATH|VG_FILL_PATH)))) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (operation < VG_CLEAR_MASK || operation > VG_SUBTRACT_MASK) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!vg_object_is_valid(path, VG_OBJECT_PATH)) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } vg_validate_state(ctx); mask_render_to(handle_to_path(path), paintModes, operation); }
void vegaMask(VGHandle mask, VGMaskOperation operation, VGint x, VGint y, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); if (width <=0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (operation < VG_CLEAR_MASK || operation > VG_SUBTRACT_MASK) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); if (operation == VG_CLEAR_MASK) { mask_fill(x, y, width, height, 0.f); } else if (operation == VG_FILL_MASK) { mask_fill(x, y, width, height, 1.f); } else if (vg_object_is_valid(mask, VG_OBJECT_IMAGE)) { struct vg_image *image = handle_to_image(mask); mask_using_image(image, operation, x, y, width, height); } else if (vg_object_is_valid(mask, VG_OBJECT_MASK)) { struct vg_mask_layer *layer = handle_to_masklayer(mask); mask_using_layer(layer, operation, x, y, width, height); } else { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); } }
void vegaCopyMask(VGMaskLayer maskLayer, VGint sx, VGint sy, VGint dx, VGint dy, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); struct vg_mask_layer *mask = 0; if (maskLayer == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!vg_object_is_valid(maskLayer, VG_OBJECT_MASK)) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } vg_validate_state(ctx); mask = handle_to_masklayer(maskLayer); mask_copy(mask, sx, sy, dx, dy, width, height); }
void vgCopyPixels(VGint dx, VGint dy, VGint sx, VGint sy, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); struct pipe_framebuffer_state *fb = &ctx->state.g3d.fb; struct st_renderbuffer *strb = ctx->draw_buffer->strb; if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } /* do nothing if we copy from outside the fb */ if (dx >= (VGint)fb->width || dy >= (VGint)fb->height || sx >= (VGint)fb->width || sy >= (VGint)fb->height) return; vg_validate_state(ctx); /* make sure rendering has completed */ vgFinish(); vg_copy_surface(ctx, strb->surface, dx, dy, strb->surface, sx, sy, width, height); }
void vg_prepare_blend_surface_from_mask(struct vg_context *ctx) { struct pipe_surface *dest_surface = NULL; struct pipe_context *pipe = ctx->pipe; struct st_framebuffer *stfb = ctx->draw_buffer; struct st_renderbuffer *strb = stfb->strb; vg_validate_state(ctx); /* first finish all pending rendering */ vgFinish(); dest_surface = pipe->screen->get_tex_surface(pipe->screen, stfb->blend_texture_view->texture, 0, 0, 0, PIPE_BIND_RENDER_TARGET); /* flip it, because we want to use it as a sampler */ util_blit_pixels_tex(ctx->blit, stfb->alpha_mask_view, 0, strb->height, strb->width, 0, dest_surface, 0, 0, strb->width, strb->height, 0.0, PIPE_TEX_MIPFILTER_NEAREST); /* make sure it's complete */ vgFinish(); if (dest_surface) pipe_surface_reference(&dest_surface, NULL); }
static void vg_context_update_surface_mask_view(struct vg_context *ctx, uint width, uint height) { struct st_framebuffer *stfb = ctx->draw_buffer; struct pipe_sampler_view *old_sampler_view = stfb->surface_mask_view; struct pipe_context *pipe = ctx->pipe; if (old_sampler_view && old_sampler_view->texture->width0 == width && old_sampler_view->texture->height0 == height) return; /* we use PIPE_FORMAT_B8G8R8A8_UNORM because we want to render to this texture and use it as a sampler, so while this wastes some space it makes both of those a lot simpler */ stfb->surface_mask_view = create_tex_and_view(pipe, PIPE_FORMAT_B8G8R8A8_UNORM, width, height); if (!stfb->surface_mask_view) { if (old_sampler_view) pipe_sampler_view_reference(&old_sampler_view, NULL); return; } /* XXX could this call be avoided? */ vg_validate_state(ctx); /* alpha mask starts with 1.f alpha */ mask_fill(0, 0, width, height, 1.f); /* if we had an old surface copy it over */ if (old_sampler_view) { struct pipe_box src_box; u_box_origin_2d(MIN2(old_sampler_view->texture->width0, stfb->surface_mask_view->texture->width0), MIN2(old_sampler_view->texture->height0, stfb->surface_mask_view->texture->height0), &src_box); pipe->resource_copy_region(pipe, stfb->surface_mask_view->texture, 0, 0, 0, 0, old_sampler_view->texture, 0, &src_box); } /* Free the old texture */ if (old_sampler_view) pipe_sampler_view_reference(&old_sampler_view, NULL); }
struct pipe_sampler_view *vg_prepare_blend_surface_from_mask(struct vg_context *ctx) { struct st_framebuffer *stfb = ctx->draw_buffer; vg_validate_state(ctx); vg_context_update_surface_mask_view(ctx, stfb->width, stfb->height); vg_prepare_blend_texture(ctx, stfb->surface_mask_view); return stfb->blend_texture_view; }
static void setup_new_alpha_mask(struct vg_context *ctx, struct st_framebuffer *stfb) { struct pipe_context *pipe = ctx->pipe; struct pipe_sampler_view *old_sampler_view = stfb->alpha_mask_view; /* we use PIPE_FORMAT_B8G8R8A8_UNORM because we want to render to this texture and use it as a sampler, so while this wastes some space it makes both of those a lot simpler */ stfb->alpha_mask_view = create_tex_and_view(pipe, PIPE_FORMAT_B8G8R8A8_UNORM, stfb->width, stfb->height); if (!stfb->alpha_mask_view) { if (old_sampler_view) pipe_sampler_view_reference(&old_sampler_view, NULL); return; } /* XXX could this call be avoided? */ vg_validate_state(ctx); /* alpha mask starts with 1.f alpha */ mask_fill(0, 0, stfb->width, stfb->height, 1.f); /* if we had an old surface copy it over */ if (old_sampler_view) { struct pipe_subresource subsurf, subold_surf; subsurf.face = 0; subsurf.level = 0; subold_surf.face = 0; subold_surf.level = 0; pipe->resource_copy_region(pipe, stfb->alpha_mask_view->texture, subsurf, 0, 0, 0, old_sampler_view->texture, subold_surf, 0, 0, 0, MIN2(old_sampler_view->texture->width0, stfb->alpha_mask_view->texture->width0), MIN2(old_sampler_view->texture->height0, stfb->alpha_mask_view->texture->height0)); } /* Free the old texture */ if (old_sampler_view) pipe_sampler_view_reference(&old_sampler_view, NULL); }
void vgDrawImage(VGImage image) { struct vg_context *ctx = vg_current_context(); if (!ctx) return; if (image == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } vg_validate_state(ctx); image_draw((struct vg_image*)image); }
void vegaDrawImage(VGImage image) { struct vg_context *ctx = vg_current_context(); if (!ctx) return; if (image == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } vg_validate_state(ctx); image_draw(handle_to_image(image), &ctx->state.vg.image_user_to_surface_matrix); }
void vegaFillMaskLayer(VGMaskLayer maskLayer, VGint x, VGint y, VGint width, VGint height, VGfloat value) { struct vg_mask_layer *mask = 0; struct vg_context *ctx = vg_current_context(); if (maskLayer == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (value < 0 || value > 1) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (x < 0 || y < 0 || (x + width) < 0 || (y + height) < 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!vg_object_is_valid(maskLayer, VG_OBJECT_MASK)) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } mask = handle_to_masklayer(maskLayer); if (x + width > mask_layer_width(mask) || y + height > mask_layer_height(mask)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); mask_layer_fill(mask, x, y, width, height, value); }
void vgSetPixels(VGint dx, VGint dy, VGImage src, VGint sx, VGint sy, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); vg_validate_state(ctx); if (src == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } image_set_pixels(dx, dy, (struct vg_image*)src, sx, sy, width, height); }
struct pipe_sampler_view *vg_prepare_blend_surface(struct vg_context *ctx) { struct pipe_context *pipe = ctx->pipe; struct pipe_sampler_view *view; struct pipe_sampler_view view_templ; struct st_framebuffer *stfb = ctx->draw_buffer; struct st_renderbuffer *strb = stfb->strb; vg_validate_state(ctx); u_sampler_view_default_template(&view_templ, strb->texture, strb->texture->format); view = pipe->create_sampler_view(pipe, strb->texture, &view_templ); vg_prepare_blend_texture(ctx, view); pipe_sampler_view_reference(&view, NULL); return stfb->blend_texture_view; }
void vgCopyImage(VGImage dst, VGint dx, VGint dy, VGImage src, VGint sx, VGint sy, VGint width, VGint height, VGboolean dither) { struct vg_context *ctx = vg_current_context(); if (src == VG_INVALID_HANDLE || dst == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); image_copy((struct vg_image*)dst, dx, dy, (struct vg_image*)src, sx, sy, width, height, dither); }
void vgWritePixels(const void * data, VGint dataStride, VGImageFormat dataFormat, VGint dx, VGint dy, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); struct pipe_context *pipe = ctx->pipe; if (!supported_image_format(dataFormat)) { vg_set_error(ctx, VG_UNSUPPORTED_IMAGE_FORMAT_ERROR); return; } if (!data || !is_aligned(data)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); { struct vg_image *img = image_create(dataFormat, width, height); image_sub_data(img, data, dataStride, dataFormat, 0, 0, width, height); #if 0 struct matrix *matrix = &ctx->state.vg.image_user_to_surface_matrix; matrix_translate(matrix, dx, dy); image_draw(img); matrix_translate(matrix, -dx, -dy); #else /* this looks like a better approach */ image_set_pixels(dx, dy, img, 0, 0, width, height); #endif image_destroy(img); } /* make sure rendering has completed */ pipe->flush(pipe, PIPE_FLUSH_RENDER_CACHE, NULL); }
void vegaClear(VGint x, VGint y, VGint width, VGint height) { struct vg_context *ctx = vg_current_context(); struct st_framebuffer *stfb = ctx->draw_buffer; if (width <= 0 || height <= 0) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); #if 0 debug_printf("Clear [%d, %d, %d, %d] with [%f, %f, %f, %f]\n", x, y, width, height, ctx->state.vg.clear_color[0], ctx->state.vg.clear_color[1], ctx->state.vg.clear_color[2], ctx->state.vg.clear_color[3]); #endif /* check for a whole surface clear */ if (!ctx->state.vg.scissoring && (x == 0 && y == 0 && width == stfb->width && height == stfb->height)) { union pipe_color_union clear_color; clear_color.f[0] = ctx->state.vg.clear_color[0]; clear_color.f[1] = ctx->state.vg.clear_color[1]; clear_color.f[2] = ctx->state.vg.clear_color[2]; clear_color.f[3] = ctx->state.vg.clear_color[3]; ctx->pipe->clear(ctx->pipe, PIPE_CLEAR_COLOR | PIPE_CLEAR_DEPTHSTENCIL, &clear_color, 1., 0); } else if (renderer_clear_begin(ctx->renderer)) { /* XXX verify coord round-off */ renderer_clear(ctx->renderer, x, y, width, height, ctx->state.vg.clear_color); renderer_clear_end(ctx->renderer); } }
void vegaConvolve(VGImage dst, VGImage src, VGint kernelWidth, VGint kernelHeight, VGint shiftX, VGint shiftY, const VGshort * kernel, VGfloat scale, VGfloat bias, VGTilingMode tilingMode) { struct vg_context *ctx = vg_current_context(); VGfloat *buffer; VGint buffer_len; VGint i, j; VGint idx = 0; struct vg_image *d, *s; VGint kernel_size = kernelWidth * kernelHeight; struct filter_info info; const VGint max_kernel_size = vegaGeti(VG_MAX_KERNEL_SIZE); if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (kernelWidth <= 0 || kernelHeight <= 0 || kernelWidth > max_kernel_size || kernelHeight > max_kernel_size) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (!kernel || !is_aligned_to(kernel, 2)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (tilingMode < VG_TILE_FILL || tilingMode > VG_TILE_REFLECT) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } d = handle_to_image(dst); s = handle_to_image(src); if (vg_image_overlaps(d, s)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); buffer_len = 8 + 2 * 4 * kernel_size; buffer = malloc(buffer_len * sizeof(VGfloat)); buffer[0] = 0.f; buffer[1] = 1.f; buffer[2] = 2.f; /*unused*/ buffer[3] = 4.f; /*unused*/ buffer[4] = kernelWidth * kernelHeight; buffer[5] = scale; buffer[6] = bias; buffer[7] = 0.f; idx = 8; for (j = 0; j < kernelHeight; ++j) { for (i = 0; i < kernelWidth; ++i) { VGint index = j * kernelWidth + i; VGfloat x, y; x = texture_offset(s->width, kernelWidth, i, shiftX); y = texture_offset(s->height, kernelHeight, j, shiftY); buffer[idx + index*4 + 0] = x; buffer[idx + index*4 + 1] = y; buffer[idx + index*4 + 2] = 0.f; buffer[idx + index*4 + 3] = 0.f; } } idx += kernel_size * 4; for (j = 0; j < kernelHeight; ++j) { for (i = 0; i < kernelWidth; ++i) { /* transpose the kernel */ VGint index = j * kernelWidth + i; VGint kindex = (kernelWidth - i - 1) * kernelHeight + (kernelHeight - j - 1); buffer[idx + index*4 + 0] = kernel[kindex]; buffer[idx + index*4 + 1] = kernel[kindex]; buffer[idx + index*4 + 2] = kernel[kindex]; buffer[idx + index*4 + 3] = kernel[kindex]; } } info.dst = d; info.src = s; info.setup_shader = &setup_convolution; info.user_data = (void*)(long)(buffer_len/4); info.const_buffer = buffer; info.const_buffer_len = buffer_len * sizeof(VGfloat); info.tiling_mode = tilingMode; info.extra_texture_view = NULL; execute_filter(ctx, &info); free(buffer); }
void vegaLookupSingle(VGImage dst, VGImage src, const VGuint * lookupTable, VGImageChannel sourceChannel, VGboolean outputLinear, VGboolean outputPremultiplied) { struct vg_context *ctx = vg_current_context(); struct vg_image *d, *s; struct pipe_sampler_view *lut_texture_view; VGfloat buffer[4]; struct filter_info info; VGuint color_data[256]; VGint i; if (dst == VG_INVALID_HANDLE || src == VG_INVALID_HANDLE) { vg_set_error(ctx, VG_BAD_HANDLE_ERROR); return; } if (!lookupTable || !is_aligned(lookupTable)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } if (sourceChannel != VG_RED && sourceChannel != VG_GREEN && sourceChannel != VG_BLUE && sourceChannel != VG_ALPHA) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } d = handle_to_image(dst); s = handle_to_image(src); if (vg_image_overlaps(d, s)) { vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR); return; } vg_validate_state(ctx); for (i = 0; i < 256; ++i) { VGuint rgba = lookupTable[i]; VGubyte blue, green, red, alpha; red = (rgba & 0xff000000)>>24; green = (rgba & 0x00ff0000)>>16; blue = (rgba & 0x0000ff00)>> 8; alpha = (rgba & 0x000000ff)>> 0; color_data[i] = blue << 24 | green << 16 | red << 8 | alpha; } lut_texture_view = create_texture_1d_view(ctx, color_data, 256); buffer[0] = 0.f; buffer[1] = 0.f; buffer[2] = 1.f; buffer[3] = 1.f; info.dst = d; info.src = s; info.setup_shader = &setup_lookup_single; info.user_data = (void*)sourceChannel; info.const_buffer = buffer; info.const_buffer_len = 4 * sizeof(VGfloat); info.tiling_mode = VG_TILE_PAD; info.extra_texture_view = lut_texture_view; execute_filter(ctx, &info); pipe_sampler_view_reference(&lut_texture_view, NULL); }
static void setup_new_alpha_mask(struct vg_context *ctx, struct st_framebuffer *stfb, uint width, uint height) { struct pipe_context *pipe = ctx->pipe; struct pipe_texture *old_texture = stfb->alpha_mask; /* we use PIPE_FORMAT_A8R8G8B8_UNORM because we want to render to this texture and use it as a sampler, so while this wastes some space it makes both of those a lot simpler */ stfb->alpha_mask = create_texture(pipe, PIPE_FORMAT_A8R8G8B8_UNORM, width, height); if (!stfb->alpha_mask) { if (old_texture) pipe_texture_reference(&old_texture, NULL); return; } vg_validate_state(ctx); /* alpha mask starts with 1.f alpha */ mask_fill(0, 0, width, height, 1.f); /* if we had an old surface copy it over */ if (old_texture) { struct pipe_surface *surface = pipe->screen->get_tex_surface( pipe->screen, stfb->alpha_mask, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE); struct pipe_surface *old_surface = pipe->screen->get_tex_surface( pipe->screen, old_texture, 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ); if (pipe->surface_copy) { pipe->surface_copy(pipe, surface, 0, 0, old_surface, 0, 0, MIN2(old_surface->width, width), MIN2(old_surface->height, height)); } else { util_surface_copy(pipe, FALSE, surface, 0, 0, old_surface, 0, 0, MIN2(old_surface->width, width), MIN2(old_surface->height, height)); } if (surface) pipe_surface_reference(&surface, NULL); if (old_surface) pipe_surface_reference(&old_surface, NULL); } /* Free the old texture */ if (old_texture) pipe_texture_reference(&old_texture, NULL); }