struct pipe_context * fd4_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags) { struct fd_screen *screen = fd_screen(pscreen); struct fd4_context *fd4_ctx = CALLOC_STRUCT(fd4_context); struct pipe_context *pctx; if (!fd4_ctx) return NULL; pctx = &fd4_ctx->base.base; pctx->screen = pscreen; fd4_ctx->base.dev = fd_device_ref(screen->dev); fd4_ctx->base.screen = fd_screen(pscreen); pctx->destroy = fd4_context_destroy; pctx->create_blend_state = fd4_blend_state_create; pctx->create_rasterizer_state = fd4_rasterizer_state_create; pctx->create_depth_stencil_alpha_state = fd4_zsa_state_create; fd4_draw_init(pctx); fd4_gmem_init(pctx); fd4_texture_init(pctx); fd4_prog_init(pctx); fd4_emit_init(pctx); pctx = fd_context_init(&fd4_ctx->base, pscreen, primtypes, priv, flags); if (!pctx) return NULL; fd_hw_query_init(pctx); fd4_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM, "vs_pvt"); fd4_ctx->fs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM, "fs_pvt"); fd4_ctx->vsc_size_mem = fd_bo_new(screen->dev, 0x1000, DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_size"); fd_context_setup_common_vbos(&fd4_ctx->base); fd4_query_context_init(pctx); fd4_ctx->border_color_uploader = u_upload_create(pctx, 4096, 0, PIPE_USAGE_STREAM, 0); return pctx; }
struct pipe_context * fd4_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags) { struct fd_screen *screen = fd_screen(pscreen); struct fd4_context *fd4_ctx = CALLOC_STRUCT(fd4_context); struct pipe_context *pctx; if (!fd4_ctx) return NULL; pctx = &fd4_ctx->base.base; fd4_ctx->base.dev = fd_device_ref(screen->dev); fd4_ctx->base.screen = fd_screen(pscreen); pctx->destroy = fd4_context_destroy; pctx->create_blend_state = fd4_blend_state_create; pctx->create_rasterizer_state = fd4_rasterizer_state_create; pctx->create_depth_stencil_alpha_state = fd4_zsa_state_create; fd4_draw_init(pctx); fd4_gmem_init(pctx); fd4_texture_init(pctx); fd4_prog_init(pctx); fd4_emit_init(pctx); pctx = fd_context_init(&fd4_ctx->base, pscreen, primtypes, priv); if (!pctx) return NULL; util_dynarray_init(&fd4_ctx->rbrc_patches); fd4_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd4_ctx->fs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd4_ctx->vsc_size_mem = fd_bo_new(screen->dev, 0x1000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd4_ctx->solid_vbuf = create_solid_vertexbuf(pctx); fd4_ctx->blit_texcoord_vbuf = create_blit_texcoord_vertexbuf(pctx); /* setup solid_vbuf_state: */ fd4_ctx->solid_vbuf_state.vtx = pctx->create_vertex_elements_state( pctx, 1, (struct pipe_vertex_element[]){{ .vertex_buffer_index = 0, .src_offset = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT, }});
struct pipe_context * fd2_context_create(struct pipe_screen *pscreen, void *priv) { struct fd_screen *screen = fd_screen(pscreen); struct fd2_context *fd2_ctx = CALLOC_STRUCT(fd2_context); struct pipe_context *pctx; if (!fd2_ctx) return NULL; pctx = &fd2_ctx->base.base; pctx->destroy = fd2_context_destroy; pctx->create_blend_state = fd2_blend_state_create; pctx->create_rasterizer_state = fd2_rasterizer_state_create; pctx->create_depth_stencil_alpha_state = fd2_zsa_state_create; fd2_draw_init(pctx); fd2_gmem_init(pctx); fd2_texture_init(pctx); fd2_prog_init(pctx); pctx = fd_context_init(&fd2_ctx->base, pscreen, (screen->gpu_id >= 220) ? a22x_primtypes : a20x_primtypes, priv); if (!pctx) return NULL; /* construct vertex state used for solid ops (clear, and gmem<->mem) */ fd2_ctx->solid_vertexbuf = create_solid_vertexbuf(pctx); fd2_emit_setup(&fd2_ctx->base); return pctx; }
struct pipe_context * fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen, const uint8_t *primtypes, void *priv) { struct fd_screen *screen = fd_screen(pscreen); struct pipe_context *pctx; int i; ctx->screen = screen; ctx->primtypes = primtypes; ctx->primtype_mask = 0; for (i = 0; i < PIPE_PRIM_MAX; i++) if (primtypes[i]) ctx->primtype_mask |= (1 << i); /* need some sane default in case state tracker doesn't * set some state: */ ctx->sample_mask = 0xffff; pctx = &ctx->base; pctx->screen = pscreen; pctx->priv = priv; pctx->flush = fd_context_flush; for (i = 0; i < ARRAY_SIZE(ctx->rings); i++) { ctx->rings[i] = fd_ringbuffer_new(screen->pipe, 0x100000); if (!ctx->rings[i]) goto fail; } fd_context_next_rb(pctx); fd_reset_wfi(ctx); util_dynarray_init(&ctx->draw_patches); util_slab_create(&ctx->transfer_pool, sizeof(struct pipe_transfer), 16, UTIL_SLAB_SINGLETHREADED); fd_draw_init(pctx); fd_resource_context_init(pctx); fd_query_context_init(pctx); fd_texture_init(pctx); fd_state_init(pctx); ctx->blitter = util_blitter_create(pctx); if (!ctx->blitter) goto fail; ctx->primconvert = util_primconvert_create(pctx, ctx->primtype_mask); if (!ctx->primconvert) goto fail; return pctx; fail: pctx->destroy(pctx); return NULL; }
/** * Create a new texture object, using the given template info. */ static struct pipe_resource * fd_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *tmpl) { struct fd_screen *screen = fd_screen(pscreen); struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); struct pipe_resource *prsc = &rsc->base.b; uint32_t flags, size; DBG("target=%d, format=%s, %ux%u@%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); if (!rsc) return NULL; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); prsc->screen = pscreen; rsc->base.vtbl = &fd_resource_vtbl; rsc->pitch = ALIGN(tmpl->width0, 32); rsc->cpp = util_format_get_blocksize(tmpl->format); size = rsc->pitch * tmpl->height0 * rsc->cpp; flags = DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ rsc->bo = fd_bo_new(screen->dev, size, flags); return prsc; }
struct fd_bo * fd_screen_bo_from_handle(struct pipe_screen *pscreen, struct winsys_handle *whandle, unsigned *out_stride) { struct fd_screen *screen = fd_screen(pscreen); struct fd_bo *bo; if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { bo = fd_bo_from_name(screen->dev, whandle->handle); } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { bo = fd_bo_from_handle(screen->dev, whandle->handle, 0); } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) { bo = fd_bo_from_dmabuf(screen->dev, whandle->handle); } else { DBG("Attempt to import unsupported handle type %d", whandle->type); return NULL; } if (!bo) { DBG("ref name 0x%08x failed", whandle->handle); return NULL; } *out_stride = whandle->stride; return bo; }
void fd_query_screen_init(struct pipe_screen *pscreen) { pscreen->get_driver_query_info = fd_get_driver_query_info; pscreen->get_driver_query_group_info = fd_get_driver_query_group_info; setup_perfcntr_query_info(fd_screen(pscreen)); }
/** * Create a new texture object, using the given template info. */ static struct pipe_resource * fd_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *tmpl) { struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); struct pipe_resource *prsc = &rsc->base.b; uint32_t size; DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); if (!rsc) return NULL; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); list_inithead(&rsc->list); prsc->screen = pscreen; util_range_init(&rsc->valid_buffer_range); rsc->base.vtbl = &fd_resource_vtbl; rsc->cpp = util_format_get_blocksize(tmpl->format); assert(rsc->cpp); if (is_a4xx(fd_screen(pscreen))) { switch (tmpl->target) { case PIPE_TEXTURE_3D: /* TODO 3D_ARRAY? */ rsc->layer_first = false; break; default: rsc->layer_first = true; break; } } size = setup_slices(rsc, slice_alignment(pscreen, tmpl)); if (rsc->layer_first) { rsc->layer_size = align(size, 4096); size = rsc->layer_size * prsc->array_size; } realloc_bo(rsc, size); if (!rsc->bo) goto fail; return prsc; fail: fd_resource_destroy(pscreen, prsc); return NULL; }
static const char * fd_screen_get_name(struct pipe_screen *pscreen) { static char buffer[128]; util_snprintf(buffer, sizeof(buffer), "FD%03d", fd_screen(pscreen)->device_id); return buffer; }
struct pipe_context * fd3_context_create(struct pipe_screen *pscreen, void *priv) { struct fd_screen *screen = fd_screen(pscreen); struct fd3_context *fd3_ctx = CALLOC_STRUCT(fd3_context); struct pipe_context *pctx; if (!fd3_ctx) return NULL; pctx = &fd3_ctx->base.base; fd3_ctx->base.dev = fd_device_ref(screen->dev); fd3_ctx->base.screen = fd_screen(pscreen); pctx->destroy = fd3_context_destroy; pctx->create_blend_state = fd3_blend_state_create; pctx->create_rasterizer_state = fd3_rasterizer_state_create; pctx->create_depth_stencil_alpha_state = fd3_zsa_state_create; fd3_draw_init(pctx); fd3_gmem_init(pctx); fd3_texture_init(pctx); fd3_prog_init(pctx); pctx = fd_context_init(&fd3_ctx->base, pscreen, primtypes, priv); if (!pctx) return NULL; util_dynarray_init(&fd3_ctx->rbrc_patches); fd3_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd3_ctx->fs_pvt_mem = fd_bo_new(screen->dev, 0x2000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd3_ctx->vsc_size_mem = fd_bo_new(screen->dev, 0x1000, DRM_FREEDRENO_GEM_TYPE_KMEM); fd3_ctx->solid_vbuf = create_solid_vertexbuf(pctx); fd3_ctx->blit_texcoord_vbuf = create_blit_texcoord_vertexbuf(pctx); return pctx; }
void fd3_screen_init(struct pipe_screen *pscreen) { struct fd_screen *screen = fd_screen(pscreen); screen->max_rts = A3XX_MAX_RENDER_TARGETS; screen->compiler = ir3_compiler_create(screen->dev, screen->gpu_id); pscreen->context_create = fd3_context_create; pscreen->is_format_supported = fd3_screen_is_format_supported; }
static const void * fd_get_compiler_options(struct pipe_screen *pscreen, enum pipe_shader_ir ir, unsigned shader) { struct fd_screen *screen = fd_screen(pscreen); if (is_ir3(screen)) return ir3_get_compiler_options(); return NULL; }
static void fd_screen_destroy(struct pipe_screen *pscreen) { struct fd_screen *screen = fd_screen(pscreen); if (screen->pipe) fd_pipe_del(screen->pipe); if (screen->dev) fd_device_del(screen->dev); free(screen); }
static void realloc_bo(struct fd_resource *rsc, uint32_t size) { struct fd_screen *screen = fd_screen(rsc->base.b.screen); uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ if (rsc->bo) fd_bo_del(rsc->bo); rsc->bo = fd_bo_new(screen->dev, size, flags); rsc->timestamp = 0; rsc->dirty = false; }
struct pipe_context * fd_context_create(struct pipe_screen *pscreen, void *priv) { struct fd_screen *screen = fd_screen(pscreen); struct fd_context *ctx = CALLOC_STRUCT(fd_context); struct pipe_context *pctx; if (!ctx) return NULL; DBG(""); ctx->screen = screen; ctx->ring = fd_ringbuffer_new(screen->pipe, 0x100000); ctx->draw_start = fd_ringmarker_new(ctx->ring); ctx->draw_end = fd_ringmarker_new(ctx->ring); pctx = &ctx->base; pctx->screen = pscreen; pctx->priv = priv; pctx->flush = fd_context_flush; pctx->destroy = fd_context_destroy; util_slab_create(&ctx->transfer_pool, sizeof(struct pipe_transfer), 16, UTIL_SLAB_SINGLETHREADED); fd_vbo_init(pctx); fd_blend_init(pctx); fd_rasterizer_init(pctx); fd_zsa_init(pctx); fd_state_init(pctx); fd_resource_context_init(pctx); fd_clear_init(pctx); fd_prog_init(pctx); fd_texture_init(pctx); ctx->blitter = util_blitter_create(pctx); if (!ctx->blitter) { fd_context_destroy(pctx); return NULL; } /* construct vertex state used for solid ops (clear, and gmem<->mem) */ ctx->solid_vertexbuf = create_solid_vertexbuf(pctx); fd_state_emit_setup(pctx); return pctx; }
static uint64_t fd_screen_get_timestamp(struct pipe_screen *pscreen) { struct fd_screen *screen = fd_screen(pscreen); if (screen->has_timestamp) { uint64_t n; fd_pipe_get_param(screen->pipe, FD_TIMESTAMP, &n); debug_assert(screen->max_freq > 0); return n * 1000000000 / screen->max_freq; } else { int64_t cpu_time = os_time_get() * 1000; return cpu_time + screen->cpu_gpu_time_delta; } }
struct pipe_context * fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen, void *priv) { struct fd_screen *screen = fd_screen(pscreen); struct pipe_context *pctx; ctx->screen = screen; /* need some sane default in case state tracker doesn't * set some state: */ ctx->sample_mask = 0xffff; pctx = &ctx->base; pctx->screen = pscreen; pctx->priv = priv; pctx->flush = fd_context_flush; ctx->ring = fd_ringbuffer_new(screen->pipe, 0x100000); if (!ctx->ring) goto fail; ctx->draw_start = fd_ringmarker_new(ctx->ring); ctx->draw_end = fd_ringmarker_new(ctx->ring); util_slab_create(&ctx->transfer_pool, sizeof(struct pipe_transfer), 16, UTIL_SLAB_SINGLETHREADED); fd_draw_init(pctx); fd_resource_context_init(pctx); fd_texture_init(pctx); fd_state_init(pctx); ctx->blitter = util_blitter_create(pctx); if (!ctx->blitter) goto fail; return pctx; fail: pctx->destroy(pctx); return NULL; }
struct fd_bo * fd_screen_bo_from_handle(struct pipe_screen *pscreen, struct winsys_handle *whandle, unsigned *out_stride) { struct fd_screen *screen = fd_screen(pscreen); struct fd_bo *bo; bo = fd_bo_from_name(screen->dev, whandle->handle); if (!bo) { DBG("ref name 0x%08x failed", whandle->handle); return NULL; } *out_stride = whandle->stride; return bo; }
static void realloc_bo(struct fd_resource *rsc, uint32_t size) { struct fd_screen *screen = fd_screen(rsc->base.b.screen); uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ /* if we start using things other than write-combine, * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT */ if (rsc->bo) fd_bo_del(rsc->bo); rsc->bo = fd_bo_new(screen->dev, size, flags); rsc->timestamp = 0; rsc->dirty = false; }
static int fd_get_driver_query_group_info(struct pipe_screen *pscreen, unsigned index, struct pipe_driver_query_group_info *info) { struct fd_screen *screen = fd_screen(pscreen); if (!info) return screen->num_perfcntr_groups; if (index >= screen->num_perfcntr_groups) return 0; const struct fd_perfcntr_group *g = &screen->perfcntr_groups[index]; info->name = g->name; info->max_active_queries = g->num_counters; info->num_queries = g->num_countables; return 1; }
static int fd_get_driver_query_info(struct pipe_screen *pscreen, unsigned index, struct pipe_driver_query_info *info) { struct fd_screen *screen = fd_screen(pscreen); if (!info) return ARRAY_SIZE(sw_query_list) + screen->num_perfcntr_queries; if (index >= ARRAY_SIZE(sw_query_list)) { index -= ARRAY_SIZE(sw_query_list); if (index >= screen->num_perfcntr_queries) return 0; *info = screen->perfcntr_queries[index]; return 1; } *info = sw_query_list[index]; return 1; }
void fd_state_init(struct pipe_context *pctx) { pctx->set_blend_color = fd_set_blend_color; pctx->set_stencil_ref = fd_set_stencil_ref; pctx->set_clip_state = fd_set_clip_state; pctx->set_sample_mask = fd_set_sample_mask; pctx->set_min_samples = fd_set_min_samples; pctx->set_constant_buffer = fd_set_constant_buffer; pctx->set_shader_buffers = fd_set_shader_buffers; pctx->set_shader_images = fd_set_shader_images; pctx->set_framebuffer_state = fd_set_framebuffer_state; pctx->set_polygon_stipple = fd_set_polygon_stipple; pctx->set_scissor_states = fd_set_scissor_states; pctx->set_viewport_states = fd_set_viewport_states; pctx->set_vertex_buffers = fd_set_vertex_buffers; pctx->bind_blend_state = fd_blend_state_bind; pctx->delete_blend_state = fd_blend_state_delete; pctx->bind_rasterizer_state = fd_rasterizer_state_bind; pctx->delete_rasterizer_state = fd_rasterizer_state_delete; pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind; pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete; pctx->create_vertex_elements_state = fd_vertex_state_create; pctx->delete_vertex_elements_state = fd_vertex_state_delete; pctx->bind_vertex_elements_state = fd_vertex_state_bind; pctx->create_stream_output_target = fd_create_stream_output_target; pctx->stream_output_target_destroy = fd_stream_output_target_destroy; pctx->set_stream_output_targets = fd_set_stream_output_targets; if (has_compute(fd_screen(pctx->screen))) { pctx->bind_compute_state = fd_bind_compute_state; pctx->set_compute_resources = fd_set_compute_resources; pctx->set_global_binding = fd_set_global_binding; } }
static void fd_screen_destroy(struct pipe_screen *pscreen) { struct fd_screen *screen = fd_screen(pscreen); if (screen->pipe) fd_pipe_del(screen->pipe); if (screen->dev) fd_device_del(screen->dev); fd_bc_fini(&screen->batch_cache); slab_destroy_parent(&screen->transfer_pool); mtx_destroy(&screen->lock); ralloc_free(screen->compiler); free(screen); }
static void realloc_bo(struct fd_resource *rsc, uint32_t size) { struct fd_screen *screen = fd_screen(rsc->base.b.screen); uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ /* if we start using things other than write-combine, * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT */ if (rsc->bo) fd_bo_del(rsc->bo); rsc->bo = fd_bo_new(screen->dev, size, flags); rsc->timestamp = 0; rsc->status = 0; rsc->pending_ctx = NULL; list_delinit(&rsc->list); util_range_set_empty(&rsc->valid_buffer_range); }
/* TODO either move caps to a2xx/a3xx specific code, or maybe have some tables for things that differ if the delta is not too much.. */ static int fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param) { struct fd_screen *screen = fd_screen(pscreen); /* this is probably not totally correct.. but it's a start: */ switch (param) { /* Supported features (boolean caps). */ case PIPE_CAP_NPOT_TEXTURES: case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: case PIPE_CAP_TWO_SIDED_STENCIL: case PIPE_CAP_ANISOTROPIC_FILTER: case PIPE_CAP_POINT_SPRITE: case PIPE_CAP_TEXTURE_SHADOW_MAP: case PIPE_CAP_TEXTURE_MIRROR_CLAMP: case PIPE_CAP_BLEND_EQUATION_SEPARATE: case PIPE_CAP_TEXTURE_SWIZZLE: case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: case PIPE_CAP_SEAMLESS_CUBE_MAP: case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: case PIPE_CAP_TGSI_INSTANCEID: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_COMPUTE: case PIPE_CAP_START_INSTANCE: case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: case PIPE_CAP_USER_CONSTANT_BUFFERS: case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: return 1; case PIPE_CAP_SHADER_STENCIL_EXPORT: case PIPE_CAP_TGSI_TEXCOORD: case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: case PIPE_CAP_CONDITIONAL_RENDER: case PIPE_CAP_PRIMITIVE_RESTART: case PIPE_CAP_TEXTURE_MULTISAMPLE: case PIPE_CAP_TEXTURE_BARRIER: case PIPE_CAP_SM3: return 0; case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 256; case PIPE_CAP_GLSL_FEATURE_LEVEL: return ((screen->gpu_id >= 300) && glsl130) ? 130 : 120; /* Unsupported features. */ case PIPE_CAP_INDEP_BLEND_ENABLE: case PIPE_CAP_INDEP_BLEND_FUNC: case PIPE_CAP_DEPTH_CLIP_DISABLE: case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_CLAMPED: case PIPE_CAP_USER_VERTEX_BUFFERS: case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_QUERY_PIPELINE_STATISTICS: case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: case PIPE_CAP_TGSI_VS_LAYER: case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: case PIPE_CAP_TEXTURE_GATHER_SM5: case PIPE_CAP_FAKE_SW_MSAA: case PIPE_CAP_TEXTURE_QUERY_LOD: case PIPE_CAP_SAMPLE_SHADING: case PIPE_CAP_TEXTURE_GATHER_OFFSETS: case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: return 0; /* Stream output. */ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: return 0; /* Geometry shader output, unsupported. */ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: return 0; /* Texturing. */ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: return MAX_MIP_LEVELS; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return 0; /* TODO: a3xx+ should support (required in gles3) */ /* Render targets. */ case PIPE_CAP_MAX_RENDER_TARGETS: return 1; /* Queries. */ case PIPE_CAP_QUERY_TIME_ELAPSED: case PIPE_CAP_QUERY_TIMESTAMP: return 0; case PIPE_CAP_OCCLUSION_QUERY: return (screen->gpu_id >= 300) ? 1 : 0; case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MAX_TEXEL_OFFSET: return 7; case PIPE_CAP_ENDIANNESS: return PIPE_ENDIAN_LITTLE; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return 64; default: DBG("unknown param %d", param); return 0; } }
static int fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader, enum pipe_shader_cap param) { struct fd_screen *screen = fd_screen(pscreen); switch(shader) { case PIPE_SHADER_FRAGMENT: case PIPE_SHADER_VERTEX: break; case PIPE_SHADER_COMPUTE: case PIPE_SHADER_GEOMETRY: /* maye we could emulate.. */ return 0; default: DBG("unknown shader type %d", shader); return 0; } /* this is probably not totally correct.. but it's a start: */ switch (param) { case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: return 16384; case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: return 8; /* XXX */ case PIPE_SHADER_CAP_MAX_INPUTS: case PIPE_SHADER_CAP_MAX_OUTPUTS: return 16; case PIPE_SHADER_CAP_MAX_TEMPS: return 64; /* Max native temporaries. */ case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: /* NOTE: seems to be limit for a3xx is actually 512 but * split between VS and FS. Use lower limit of 256 to * avoid getting into impossible situations: */ return ((is_a3xx(screen) || is_a4xx(screen)) ? 4096 : 64) * sizeof(float[4]); case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: return is_ir3(screen) ? 16 : 1; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* nothing uses this */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: /* Technically this should be the same as for TEMP/CONST, since * everything is just normal registers. This is just temporary * hack until load_input/store_output handle arrays in a similar * way as load_var/store_var.. */ return 0; case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: /* a2xx compiler doesn't handle indirect: */ return is_ir3(screen) ? 1 : 0; case PIPE_SHADER_CAP_SUBROUTINES: case PIPE_SHADER_CAP_DOUBLES: case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED: case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED: case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED: case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE: return 0; case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INTEGERS: if (glsl120) return 0; return is_ir3(screen) ? 1 : 0; case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS: return 16; case PIPE_SHADER_CAP_PREFERRED_IR: if ((fd_mesa_debug & FD_DBG_NIR) && is_ir3(screen)) return PIPE_SHADER_IR_NIR; return PIPE_SHADER_IR_TGSI; case PIPE_SHADER_CAP_SUPPORTED_IRS: return 0; case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT: return 32; case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS: case PIPE_SHADER_CAP_MAX_SHADER_IMAGES: return 0; } debug_printf("unknown shader param %d\n", param); return 0; }
static int fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader, enum pipe_shader_cap param) { struct fd_screen *screen = fd_screen(pscreen); switch(shader) { case PIPE_SHADER_FRAGMENT: case PIPE_SHADER_VERTEX: break; case PIPE_SHADER_COMPUTE: case PIPE_SHADER_GEOMETRY: /* maye we could emulate.. */ return 0; default: DBG("unknown shader type %d", shader); return 0; } /* this is probably not totally correct.. but it's a start: */ switch (param) { case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: return 16384; case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: return 8; /* XXX */ case PIPE_SHADER_CAP_MAX_INPUTS: case PIPE_SHADER_CAP_MAX_OUTPUTS: return 16; case PIPE_SHADER_CAP_MAX_TEMPS: return 64; /* Max native temporaries. */ case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: /* NOTE: seems to be limit for a3xx is actually 512 but * split between VS and FS. Use lower limit of 256 to * avoid getting into impossible situations: */ return ((is_a3xx(screen) || is_a4xx(screen)) ? 4096 : 64) * sizeof(float[4]); case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: return is_ir3(screen) ? 16 : 1; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* nothing uses this */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: return 1; case PIPE_SHADER_CAP_SUBROUTINES: case PIPE_SHADER_CAP_DOUBLES: case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED: case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED: case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED: case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE: return 0; case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INTEGERS: if (glsl120) return 0; return is_ir3(screen) ? 1 : 0; case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS: return 16; case PIPE_SHADER_CAP_PREFERRED_IR: return PIPE_SHADER_IR_TGSI; case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT: return 32; } debug_printf("unknown shader param %d\n", param); return 0; }
/* TODO either move caps to a2xx/a3xx specific code, or maybe have some tables for things that differ if the delta is not too much.. */ static int fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param) { struct fd_screen *screen = fd_screen(pscreen); /* this is probably not totally correct.. but it's a start: */ switch (param) { /* Supported features (boolean caps). */ case PIPE_CAP_NPOT_TEXTURES: case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: case PIPE_CAP_TWO_SIDED_STENCIL: case PIPE_CAP_ANISOTROPIC_FILTER: case PIPE_CAP_POINT_SPRITE: case PIPE_CAP_TEXTURE_SHADOW_MAP: case PIPE_CAP_BLEND_EQUATION_SEPARATE: case PIPE_CAP_TEXTURE_SWIZZLE: case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: case PIPE_CAP_SEAMLESS_CUBE_MAP: case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: case PIPE_CAP_USER_CONSTANT_BUFFERS: case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: case PIPE_CAP_VERTEXID_NOBASE: return 1; case PIPE_CAP_SHADER_STENCIL_EXPORT: case PIPE_CAP_TGSI_TEXCOORD: case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: case PIPE_CAP_TEXTURE_MULTISAMPLE: case PIPE_CAP_TEXTURE_BARRIER: case PIPE_CAP_TEXTURE_MIRROR_CLAMP: case PIPE_CAP_COMPUTE: return 0; case PIPE_CAP_SM3: case PIPE_CAP_PRIMITIVE_RESTART: case PIPE_CAP_TGSI_INSTANCEID: case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: case PIPE_CAP_INDEP_BLEND_ENABLE: case PIPE_CAP_INDEP_BLEND_FUNC: case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR: case PIPE_CAP_CONDITIONAL_RENDER: case PIPE_CAP_CONDITIONAL_RENDER_INVERTED: case PIPE_CAP_FAKE_SW_MSAA: case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: case PIPE_CAP_DEPTH_CLIP_DISABLE: case PIPE_CAP_CLIP_HALFZ: return is_a3xx(screen) || is_a4xx(screen); case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT: if (is_a3xx(screen)) return 16; if (is_a4xx(screen)) return 32; return 0; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: /* We could possibly emulate more by pretending 2d/rect textures and * splitting high bits of index into 2nd dimension.. */ if (is_a3xx(screen)) return 8192; if (is_a4xx(screen)) return 16384; return 0; case PIPE_CAP_TEXTURE_FLOAT_LINEAR: case PIPE_CAP_CUBE_MAP_ARRAY: case PIPE_CAP_START_INSTANCE: case PIPE_CAP_SAMPLER_VIEW_TARGET: case PIPE_CAP_TEXTURE_QUERY_LOD: return is_a4xx(screen); case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: return 256; case PIPE_CAP_GLSL_FEATURE_LEVEL: if (glsl120) return 120; return is_ir3(screen) ? 140 : 120; /* Unsupported features. */ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: case PIPE_CAP_VERTEX_COLOR_CLAMPED: case PIPE_CAP_USER_VERTEX_BUFFERS: case PIPE_CAP_USER_INDEX_BUFFERS: case PIPE_CAP_QUERY_PIPELINE_STATISTICS: case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT: case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: case PIPE_CAP_TEXTURE_GATHER_SM5: case PIPE_CAP_SAMPLE_SHADING: case PIPE_CAP_TEXTURE_GATHER_OFFSETS: case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: case PIPE_CAP_DRAW_INDIRECT: case PIPE_CAP_MULTI_DRAW_INDIRECT: case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE: case PIPE_CAP_POLYGON_OFFSET_CLAMP: case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: case PIPE_CAP_RESOURCE_FROM_USER_MEMORY: case PIPE_CAP_DEVICE_RESET_STATUS_QUERY: case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS: case PIPE_CAP_DEPTH_BOUNDS_TEST: case PIPE_CAP_TGSI_TXQS: case PIPE_CAP_FORCE_PERSAMPLE_INTERP: case PIPE_CAP_SHAREABLE_SHADERS: case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS: case PIPE_CAP_CLEAR_TEXTURE: case PIPE_CAP_DRAW_PARAMETERS: case PIPE_CAP_TGSI_PACK_HALF_FLOAT: return 0; case PIPE_CAP_MAX_VIEWPORTS: return 1; /* Stream output. */ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: if (is_ir3(screen)) return PIPE_MAX_SO_BUFFERS; return 0; case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: if (is_ir3(screen)) return 1; return 0; case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: if (is_ir3(screen)) return 16 * 4; /* should only be shader out limit? */ return 0; /* Geometry shader output, unsupported. */ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: case PIPE_CAP_MAX_VERTEX_STREAMS: return 0; case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE: return 2048; /* Texturing. */ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: return MAX_MIP_LEVELS; case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: return 11; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return (is_a3xx(screen) || is_a4xx(screen)) ? 256 : 0; /* Render targets. */ case PIPE_CAP_MAX_RENDER_TARGETS: return screen->max_rts; case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: return is_a3xx(screen) ? 1 : 0; /* Queries. */ case PIPE_CAP_QUERY_TIME_ELAPSED: case PIPE_CAP_QUERY_TIMESTAMP: return 0; case PIPE_CAP_OCCLUSION_QUERY: return is_a3xx(screen) || is_a4xx(screen); case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: case PIPE_CAP_MAX_TEXEL_OFFSET: return 7; case PIPE_CAP_ENDIANNESS: return PIPE_ENDIAN_LITTLE; case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return 64; case PIPE_CAP_VENDOR_ID: return 0x5143; case PIPE_CAP_DEVICE_ID: return 0xFFFFFFFF; case PIPE_CAP_ACCELERATED: return 1; case PIPE_CAP_VIDEO_MEMORY: DBG("FINISHME: The value returned is incorrect\n"); return 10; case PIPE_CAP_UMA: return 1; } debug_printf("unknown param %d\n", param); return 0; }
static int fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader, enum pipe_shader_cap param) { struct fd_screen *screen = fd_screen(pscreen); switch(shader) { case PIPE_SHADER_FRAGMENT: case PIPE_SHADER_VERTEX: break; case PIPE_SHADER_COMPUTE: case PIPE_SHADER_GEOMETRY: /* maye we could emulate.. */ return 0; default: DBG("unknown shader type %d", shader); return 0; } /* this is probably not totally correct.. but it's a start: */ switch (param) { case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: return 16384; case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: return 8; /* XXX */ case PIPE_SHADER_CAP_MAX_INPUTS: return 16; case PIPE_SHADER_CAP_MAX_TEMPS: return 64; /* Max native temporaries. */ case PIPE_SHADER_CAP_MAX_ADDRS: return 1; /* Max native address registers */ case PIPE_SHADER_CAP_MAX_CONSTS: return (screen->gpu_id >= 300) ? 1024 : 64; case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: return 1; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* nothing uses this */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: return 1; case PIPE_SHADER_CAP_SUBROUTINES: return 0; case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: return 1; case PIPE_SHADER_CAP_INTEGERS: /* we should be able to support this on a3xx, but not * implemented yet: */ return ((screen->gpu_id >= 300) && glsl130) ? 1 : 0; case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS: return 16; case PIPE_SHADER_CAP_PREFERRED_IR: return PIPE_SHADER_IR_TGSI; default: DBG("unknown shader param %d", param); return 0; } return 0; }
static uint64_t fd_screen_get_timestamp(struct pipe_screen *pscreen) { int64_t cpu_time = os_time_get() * 1000; return cpu_time + fd_screen(pscreen)->cpu_gpu_time_delta; }