static void redotoplin(void) { home(); if(index(toplines, '\n')) cl_end(); putstr(toplines); cl_end(); tlx = curx; tly = cury; flags.toplin = 1; if(tly > 1) more(); }
void redotoplin() { home(); if (strchr(toplines, '\n')) cl_end(); putstr(toplines); cl_end(); tlx = curx; tly = cury; flags.toplin = 1; if (tly > 1) more(); }
void vc4_flush(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); struct pipe_surface *cbuf = vc4->framebuffer.cbufs[0]; struct pipe_surface *zsbuf = vc4->framebuffer.zsbuf; if (!vc4->needs_flush) return; /* The RCL setup would choke if the draw bounds cause no drawing, so * just drop the drawing if that's the case. */ if (vc4->draw_max_x <= vc4->draw_min_x || vc4->draw_max_y <= vc4->draw_min_y) { vc4_job_reset(vc4); return; } /* Increment the semaphore indicating that binning is done and * unblocking the render thread. Note that this doesn't act until the * FLUSH completes. */ cl_ensure_space(&vc4->bcl, 8); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); cl_u8(&bcl, VC4_PACKET_INCREMENT_SEMAPHORE); /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */ cl_u8(&bcl, VC4_PACKET_FLUSH); cl_end(&vc4->bcl, bcl); if (cbuf && (vc4->resolve & PIPE_CLEAR_COLOR0)) { pipe_surface_reference(&vc4->color_write, cbuf); if (!(vc4->cleared & PIPE_CLEAR_COLOR0)) { pipe_surface_reference(&vc4->color_read, cbuf); } else { pipe_surface_reference(&vc4->color_read, NULL); } } else { pipe_surface_reference(&vc4->color_write, NULL); pipe_surface_reference(&vc4->color_read, NULL); } if (vc4->framebuffer.zsbuf && (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) { pipe_surface_reference(&vc4->zs_write, zsbuf); if (!(vc4->cleared & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) { pipe_surface_reference(&vc4->zs_read, zsbuf); } else { pipe_surface_reference(&vc4->zs_read, NULL); } } else { pipe_surface_reference(&vc4->zs_write, NULL); pipe_surface_reference(&vc4->zs_read, NULL); } vc4_job_submit(vc4); }
void redotoplin() { home(); if(index(toplines, '\n') != 0) { cl_end(); } putstr(toplines); cl_end(); tlx = curx; tly = cury; flags.topl = 1; if(tly > 1) { more(); } }
/* free after Robert Viduya * must only be called with curx = 1 */ void cl_eos(void) { if(CD) xputs(CD); else { int cx = curx, cy = cury; while(cury <= LI-2) { cl_end(); xputc('\n'); curx = 1; cury++; } cl_end(); curs(cx, cy); } }
int cl_unlink(char **cmds, t_envi *cl) { if (!cmds || !cmds[1]) return (file_error("ERROR: Wrong number of arguments", cl, CLIENT, 0)); send(cl->fd, cl->buff, ft_strlen(cl->buff), 0); if ((cl->rec = recv(cl->fd, cl->buff, BUFF_SIZE, 0)) > 0) { cl->buff[cl->rec] = '\0'; if (ft_strchr(cl->buff, '\2')) return (file_error(cl->buff, cl, CLIENT, 0)); } if (cl->rec == 0) cl_end("Server: disconnected.", cl); if (cl->rec == -1) cl_end("\033[31mERROR: recv() fail.", cl); write(1, cl->buff, cl->rec); return (0); }
void clrlin(void) { if(flags.toplin) { home(); cl_end(); if(tly > 1) docorner(1, tly-1); remember_topl(); } flags.toplin = 0; }
uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo) { uint32_t hindex; uint32_t *current_handles = vc4->bo_handles.base; for (hindex = 0; hindex < cl_offset(&vc4->bo_handles) / 4; hindex++) { if (current_handles[hindex] == bo->handle) return hindex; } struct vc4_cl_out *out; out = cl_start(&vc4->bo_handles); cl_u32(&out, bo->handle); cl_end(&vc4->bo_handles, out); out = cl_start(&vc4->bo_pointers); cl_ptr(&out, vc4_bo_reference(bo)); cl_end(&vc4->bo_pointers, out); return hindex; }
void clrlin() { if(flags.topl != 0) { home(); cl_end(); if(tly > 1) { docorner(1, tly - 1); } remember_topl(); } flags.topl = 0; }
/** * Does the initial bining command list setup for drawing to a given FBO. */ static void vc4_start_draw(struct vc4_context *vc4) { struct vc4_job *job = vc4->job; if (job->needs_flush) return; vc4_get_draw_cl_space(job, 0); struct vc4_cl_out *bcl = cl_start(&job->bcl); // Tile state data is 48 bytes per tile, I think it can be thrown away // as soon as binning is finished. cl_u8(&bcl, VC4_PACKET_TILE_BINNING_MODE_CONFIG); cl_u32(&bcl, 0); /* tile alloc addr, filled by kernel */ cl_u32(&bcl, 0); /* tile alloc size, filled by kernel */ cl_u32(&bcl, 0); /* tile state addr, filled by kernel */ cl_u8(&bcl, job->draw_tiles_x); cl_u8(&bcl, job->draw_tiles_y); /* Other flags are filled by kernel. */ cl_u8(&bcl, job->msaa ? VC4_BIN_CONFIG_MS_MODE_4X : 0); /* START_TILE_BINNING resets the statechange counters in the hardware, * which are what is used when a primitive is binned to a tile to * figure out what new state packets need to be written to that tile's * command list. */ cl_u8(&bcl, VC4_PACKET_START_TILE_BINNING); /* Reset the current compressed primitives format. This gets modified * by VC4_PACKET_GL_INDEXED_PRIMITIVE and * VC4_PACKET_GL_ARRAY_PRIMITIVE, so it needs to be reset at the start * of every tile. */ cl_u8(&bcl, VC4_PACKET_PRIMITIVE_LIST_FORMAT); cl_u8(&bcl, (VC4_PRIMITIVE_LIST_FORMAT_16_INDEX | VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES)); job->needs_flush = true; job->draw_width = vc4->framebuffer.width; job->draw_height = vc4->framebuffer.height; cl_end(&job->bcl, bcl); }
static void xmore(const char *s) /* allowed chars besides space/return */ { if(flags.toplin) { curs(tlx, tly); if(tlx + 8 > CO) putsym('\n'), tly++; } if(flags.standout) standoutbeg(); putstr("--More--"); if(flags.standout) standoutend(); xwaitforspace(s); if(flags.toplin && tly > 1) { home(); cl_end(); docorner(1, tly-1); } flags.toplin = 0; }
/* spaceflag: TRUE if space required */ void xmore(boolean spaceflag) { if(flags.topl != 0) { curs(tlx, tly); if((tlx + 8) > COLNO) { putsym('\n'); ++tly; } } putstr("--More--"); xwaitforspace(spaceflag); if((flags.topl != 0) && (tly > 1)) { home(); cl_end(); docorner(1, tly - 1); } flags.topl = 0; }
/* * Flexible pager: feed it with a number of lines and it will decide * whether these should be fed to the pager above, or displayed in a * corner. * Call: * cornline(0, title or 0) : initialize * cornline(1, text) : add text to the chain of texts * cornline(2, morcs) : output everything and cleanup * cornline(3, 0) : cleanup */ void cornline(int mode, char *text) { static struct line { struct line *next_line; char *line_text; } *texthead, *texttail; static int maxlen; static int linect; struct line *tl; if(mode == 0) { texthead = 0; maxlen = 0; linect = 0; if(text) { cornline(1, text); /* title */ cornline(1, ""); /* blank line */ } return; } if(mode == 1) { int len; if(!text) return; /* superfluous, just to be sure */ linect++; len = strlen(text); if(len > maxlen) maxlen = len; tl = (struct line *) alloc((unsigned)(len + sizeof(struct line) + 1)); tl->next_line = 0; tl->line_text = (char *)(tl + 1); (void) strlcpy(tl->line_text, text, len + 1); if(!texthead) texthead = tl; else texttail->next_line = tl; texttail = tl; return; } /* --- now we really do it --- */ if(mode == 2 && linect == 1) /* topline only */ pline(texthead->line_text); else if(mode == 2) { int curline, lth; if(flags.toplin == 1) more(); /* ab@unido */ remember_topl(); lth = CO - maxlen - 2; /* Use full screen width */ if (linect < LI && lth >= 10) { /* in a corner */ home(); cl_end(); flags.toplin = 0; curline = 1; for (tl = texthead; tl; tl = tl->next_line) { curs(lth, curline); if(curline > 1) cl_end(); putsym(' '); putstr (tl->line_text); curline++; } curs(lth, curline); cl_end(); cmore(text); home(); cl_end(); docorner(lth, curline-1); } else { /* feed to pager */ set_pager(0); for (tl = texthead; tl; tl = tl->next_line) { if (page_line (tl->line_text)) { set_pager(2); goto cleanup; } } if(text) { cgetret(text); set_pager(2); } else set_pager(1); } } cleanup: while ((tl = texthead)) { texthead = tl->next_line; free((char *) tl); } }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_update_shadow_textures(pctx, &vc4->verttex); vc4_update_shadow_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx); vc4_get_draw_cl_space(vc4); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); vc4_update_compiled_shaders(vc4, info->mode); vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&vc4->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(vc4, &vc4->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, info->count); cl_u32(&bcl, info->start); } cl_end(&vc4->bcl, bcl); if (vc4->zsa && vc4->zsa->base.depth.enabled) { vc4->resolve |= PIPE_CLEAR_DEPTH; } if (vc4->zsa && vc4->zsa->base.stencil[0].enabled) vc4->resolve |= PIPE_CLEAR_STENCIL; vc4->resolve |= PIPE_CLEAR_COLOR0; vc4->shader_rec_count++; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }
static void vc4_emit_gl_shader_state(struct vc4_context *vc4, const struct pipe_draw_info *info) { /* VC4_DIRTY_VTXSTATE */ struct vc4_vertex_stateobj *vtx = vc4->vtx; /* VC4_DIRTY_VTXBUF */ struct vc4_vertexbuf_stateobj *vertexbuf = &vc4->vertexbuf; /* The simulator throws a fit if VS or CS don't read an attribute, so * we emit a dummy read. */ uint32_t num_elements_emit = MAX2(vtx->num_elements, 1); /* Emit the shader record. */ struct vc4_cl_out *shader_rec = cl_start_shader_reloc(&vc4->shader_rec, 3 + num_elements_emit); /* VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER */ cl_u16(&shader_rec, VC4_SHADER_FLAG_ENABLE_CLIPPING | VC4_SHADER_FLAG_FS_SINGLE_THREAD | ((info->mode == PIPE_PRIM_POINTS && vc4->rasterizer->base.point_size_per_vertex) ? VC4_SHADER_FLAG_VS_POINT_SIZE : 0)); /* VC4_DIRTY_COMPILED_FS */ cl_u8(&shader_rec, 0); /* fs num uniforms (unused) */ cl_u8(&shader_rec, vc4->prog.fs->num_inputs); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.fs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ /* VC4_DIRTY_COMPILED_VS */ cl_u16(&shader_rec, 0); /* vs num uniforms */ cl_u8(&shader_rec, vc4->prog.vs->vattrs_live); cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[8]); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.vs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ /* VC4_DIRTY_COMPILED_CS */ cl_u16(&shader_rec, 0); /* cs num uniforms */ cl_u8(&shader_rec, vc4->prog.cs->vattrs_live); cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[8]); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, vc4->prog.cs->bo, 0); cl_u32(&shader_rec, 0); /* UBO offset written by kernel */ uint32_t max_index = 0xffff; for (int i = 0; i < vtx->num_elements; i++) { struct pipe_vertex_element *elem = &vtx->pipe[i]; struct pipe_vertex_buffer *vb = &vertexbuf->vb[elem->vertex_buffer_index]; struct vc4_resource *rsc = vc4_resource(vb->buffer); /* not vc4->dirty tracked: vc4->last_index_bias */ uint32_t offset = (vb->buffer_offset + elem->src_offset + vb->stride * info->index_bias); uint32_t vb_size = rsc->bo->size - offset; uint32_t elem_size = util_format_get_blocksize(elem->src_format); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, rsc->bo, offset); cl_u8(&shader_rec, elem_size - 1); cl_u8(&shader_rec, vb->stride); cl_u8(&shader_rec, vc4->prog.vs->vattr_offsets[i]); cl_u8(&shader_rec, vc4->prog.cs->vattr_offsets[i]); if (vb->stride > 0) { max_index = MIN2(max_index, (vb_size - elem_size) / vb->stride); } } if (vtx->num_elements == 0) { assert(num_elements_emit == 1); struct vc4_bo *bo = vc4_bo_alloc(vc4->screen, 4096, "scratch VBO"); cl_reloc(vc4, &vc4->shader_rec, &shader_rec, bo, 0); cl_u8(&shader_rec, 16 - 1); /* element size */ cl_u8(&shader_rec, 0); /* stride */ cl_u8(&shader_rec, 0); /* VS VPM offset */ cl_u8(&shader_rec, 0); /* CS VPM offset */ vc4_bo_unreference(&bo); } cl_end(&vc4->shader_rec, shader_rec); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); /* the actual draw call. */ cl_u8(&bcl, VC4_PACKET_GL_SHADER_STATE); assert(vtx->num_elements <= 8); /* Note that number of attributes == 0 in the packet means 8 * attributes. This field also contains the offset into shader_rec. */ cl_u32(&bcl, num_elements_emit & 0x7); cl_end(&vc4->bcl, bcl); vc4_write_uniforms(vc4, vc4->prog.fs, &vc4->constbuf[PIPE_SHADER_FRAGMENT], &vc4->fragtex); vc4_write_uniforms(vc4, vc4->prog.vs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4_write_uniforms(vc4, vc4->prog.cs, &vc4->constbuf[PIPE_SHADER_VERTEX], &vc4->verttex); vc4->last_index_bias = info->index_bias; vc4->max_index = max_index; }
static void write_texture_p1(struct vc5_job *job, struct vc5_cl_out **uniforms, struct vc5_texture_stateobj *texstate, uint32_t unit) { struct pipe_sampler_view *psview = texstate->textures[unit]; struct vc5_sampler_view *sview = vc5_sampler_view(psview); struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 unpacked = { .texture_state_record_base_address = texstate->texture_state[unit], }; uint32_t packed; V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1_pack(&job->indirect, (uint8_t *)&packed, &unpacked); cl_aligned_u32(uniforms, packed | sview->p1); } struct vc5_cl_reloc vc5_write_uniforms(struct vc5_context *vc5, struct vc5_compiled_shader *shader, struct vc5_constbuf_stateobj *cb, struct vc5_texture_stateobj *texstate) { struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms; struct vc5_job *job = vc5->job; const uint32_t *gallium_uniforms = cb->cb[0].user_buffer; struct vc5_bo *ubo = vc5_upload_ubo(vc5, shader, gallium_uniforms); /* We always need to return some space for uniforms, because the HW * will be prefetching, even if we don't read any in the program. */ vc5_cl_ensure_space(&job->indirect, MAX2(uinfo->count, 1) * 4, 4); struct vc5_cl_reloc uniform_stream = cl_get_address(&job->indirect); vc5_bo_reference(uniform_stream.bo); struct vc5_cl_out *uniforms = cl_start(&job->indirect); for (int i = 0; i < uinfo->count; i++) { switch (uinfo->contents[i]) { case QUNIFORM_CONSTANT: cl_aligned_u32(&uniforms, uinfo->data[i]); break; case QUNIFORM_UNIFORM: cl_aligned_u32(&uniforms, gallium_uniforms[uinfo->data[i]]); break; case QUNIFORM_VIEWPORT_X_SCALE: cl_aligned_f(&uniforms, vc5->viewport.scale[0] * 256.0f); break; case QUNIFORM_VIEWPORT_Y_SCALE: cl_aligned_f(&uniforms, vc5->viewport.scale[1] * 256.0f); break; case QUNIFORM_VIEWPORT_Z_OFFSET: cl_aligned_f(&uniforms, vc5->viewport.translate[2]); break; case QUNIFORM_VIEWPORT_Z_SCALE: cl_aligned_f(&uniforms, vc5->viewport.scale[2]); break; case QUNIFORM_USER_CLIP_PLANE: cl_aligned_f(&uniforms, vc5->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]); break; case QUNIFORM_TEXTURE_CONFIG_P1: write_texture_p1(job, &uniforms, texstate, uinfo->data[i]); break; #if 0 case QUNIFORM_TEXTURE_FIRST_LEVEL: write_texture_first_level(job, &uniforms, texstate, uinfo->data[i]); break; #endif case QUNIFORM_TEXRECT_SCALE_X: case QUNIFORM_TEXRECT_SCALE_Y: cl_aligned_u32(&uniforms, get_texrect_scale(texstate, uinfo->contents[i], uinfo->data[i])); break; case QUNIFORM_TEXTURE_WIDTH: case QUNIFORM_TEXTURE_HEIGHT: case QUNIFORM_TEXTURE_DEPTH: case QUNIFORM_TEXTURE_ARRAY_SIZE: case QUNIFORM_TEXTURE_LEVELS: cl_aligned_u32(&uniforms, get_texture_size(texstate, uinfo->contents[i], uinfo->data[i])); break; case QUNIFORM_STENCIL: cl_aligned_u32(&uniforms, vc5->zsa->stencil_uniforms[uinfo->data[i]] | (uinfo->data[i] <= 1 ? (vc5->stencil_ref.ref_value[uinfo->data[i]] << 8) : 0)); break; case QUNIFORM_ALPHA_REF: cl_aligned_f(&uniforms, vc5->zsa->base.alpha.ref_value); break; case QUNIFORM_SAMPLE_MASK: cl_aligned_u32(&uniforms, vc5->sample_mask); break; case QUNIFORM_UBO_ADDR: if (uinfo->data[i] == 0) { cl_aligned_reloc(&job->indirect, &uniforms, ubo, 0); } else { int ubo_index = uinfo->data[i]; struct vc5_resource *rsc = vc5_resource(cb->cb[ubo_index].buffer); cl_aligned_reloc(&job->indirect, &uniforms, rsc->bo, cb->cb[ubo_index].buffer_offset); } break; case QUNIFORM_TEXTURE_FIRST_LEVEL: case QUNIFORM_TEXTURE_MSAA_ADDR: case QUNIFORM_TEXTURE_BORDER_COLOR: /* XXX */ break; default: assert(quniform_contents_is_texture_p0(uinfo->contents[i])); write_texture_p0(job, &uniforms, texstate, uinfo->contents[i] - QUNIFORM_TEXTURE_CONFIG_P0_0, uinfo->data[i]); break; } #if 0 uint32_t written_val = *((uint32_t *)uniforms - 1); fprintf(stderr, "shader %p[%d]: 0x%08x / 0x%08x (%f)\n", shader, i, __gen_address_offset(&uniform_stream) + i * 4, written_val, uif(written_val)); #endif } cl_end(&job->indirect, uniforms); vc5_bo_unreference(&ubo); return uniform_stream; } void vc5_set_shader_uniform_dirty_flags(struct vc5_compiled_shader *shader) { uint32_t dirty = 0; for (int i = 0; i < shader->prog_data.base->uniforms.count; i++) { switch (shader->prog_data.base->uniforms.contents[i]) { case QUNIFORM_CONSTANT: break; case QUNIFORM_UNIFORM: case QUNIFORM_UBO_ADDR: dirty |= VC5_DIRTY_CONSTBUF; break; case QUNIFORM_VIEWPORT_X_SCALE: case QUNIFORM_VIEWPORT_Y_SCALE: case QUNIFORM_VIEWPORT_Z_OFFSET: case QUNIFORM_VIEWPORT_Z_SCALE: dirty |= VC5_DIRTY_VIEWPORT; break; case QUNIFORM_USER_CLIP_PLANE: dirty |= VC5_DIRTY_CLIP; break; case QUNIFORM_TEXTURE_CONFIG_P1: case QUNIFORM_TEXTURE_BORDER_COLOR: case QUNIFORM_TEXTURE_FIRST_LEVEL: case QUNIFORM_TEXTURE_MSAA_ADDR: case QUNIFORM_TEXRECT_SCALE_X: case QUNIFORM_TEXRECT_SCALE_Y: case QUNIFORM_TEXTURE_WIDTH: case QUNIFORM_TEXTURE_HEIGHT: case QUNIFORM_TEXTURE_DEPTH: case QUNIFORM_TEXTURE_ARRAY_SIZE: case QUNIFORM_TEXTURE_LEVELS: /* We could flag this on just the stage we're * compiling for, but it's not passed in. */ dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX; break; case QUNIFORM_STENCIL: case QUNIFORM_ALPHA_REF: dirty |= VC5_DIRTY_ZSA; break; case QUNIFORM_SAMPLE_MASK: dirty |= VC5_DIRTY_SAMPLE_MASK; break; default: assert(quniform_contents_is_texture_p0(shader->prog_data.base->uniforms.contents[i])); dirty |= VC5_DIRTY_FRAGTEX | VC5_DIRTY_VERTTEX; break; } } shader->uniform_dirty_bits = dirty; }
void vc4_emit_state(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); struct vc4_cl_out *bcl = cl_start(&vc4->bcl); if (vc4->dirty & (VC4_DIRTY_SCISSOR | VC4_DIRTY_VIEWPORT | VC4_DIRTY_RASTERIZER)) { float *vpscale = vc4->viewport.scale; float *vptranslate = vc4->viewport.translate; float vp_minx = -fabsf(vpscale[0]) + vptranslate[0]; float vp_maxx = fabsf(vpscale[0]) + vptranslate[0]; float vp_miny = -fabsf(vpscale[1]) + vptranslate[1]; float vp_maxy = fabsf(vpscale[1]) + vptranslate[1]; /* Clip to the scissor if it's enabled, but still clip to the * drawable regardless since that controls where the binner * tries to put things. * * Additionally, always clip the rendering to the viewport, * since the hardware does guardband clipping, meaning * primitives would rasterize outside of the view volume. */ uint32_t minx, miny, maxx, maxy; if (!vc4->rasterizer->base.scissor) { minx = MAX2(vp_minx, 0); miny = MAX2(vp_miny, 0); maxx = MIN2(vp_maxx, vc4->draw_width); maxy = MIN2(vp_maxy, vc4->draw_height); } else { minx = MAX2(vp_minx, vc4->scissor.minx); miny = MAX2(vp_miny, vc4->scissor.miny); maxx = MIN2(vp_maxx, vc4->scissor.maxx); maxy = MIN2(vp_maxy, vc4->scissor.maxy); } cl_u8(&bcl, VC4_PACKET_CLIP_WINDOW); cl_u16(&bcl, minx); cl_u16(&bcl, miny); cl_u16(&bcl, maxx - minx); cl_u16(&bcl, maxy - miny); vc4->draw_min_x = MIN2(vc4->draw_min_x, minx); vc4->draw_min_y = MIN2(vc4->draw_min_y, miny); vc4->draw_max_x = MAX2(vc4->draw_max_x, maxx); vc4->draw_max_y = MAX2(vc4->draw_max_y, maxy); } if (vc4->dirty & (VC4_DIRTY_RASTERIZER | VC4_DIRTY_ZSA)) { uint8_t ez_enable_mask_out = ~0; /* HW-2905: If the RCL ends up doing a full-res load when * multisampling, then early Z tracking may end up with values * from the previous tile due to a HW bug. Disable it to * avoid that. * * We should be able to skip this when the Z is cleared, but I * was seeing bad rendering on glxgears -samples 4 even in * that case. */ if (vc4->msaa) ez_enable_mask_out &= ~VC4_CONFIG_BITS_EARLY_Z; cl_u8(&bcl, VC4_PACKET_CONFIGURATION_BITS); cl_u8(&bcl, vc4->rasterizer->config_bits[0] | vc4->zsa->config_bits[0]); cl_u8(&bcl, vc4->rasterizer->config_bits[1] | vc4->zsa->config_bits[1]); cl_u8(&bcl, (vc4->rasterizer->config_bits[2] | vc4->zsa->config_bits[2]) & ez_enable_mask_out); } if (vc4->dirty & VC4_DIRTY_RASTERIZER) { cl_u8(&bcl, VC4_PACKET_DEPTH_OFFSET); cl_u16(&bcl, vc4->rasterizer->offset_factor); cl_u16(&bcl, vc4->rasterizer->offset_units); cl_u8(&bcl, VC4_PACKET_POINT_SIZE); cl_f(&bcl, vc4->rasterizer->point_size); cl_u8(&bcl, VC4_PACKET_LINE_WIDTH); cl_f(&bcl, vc4->rasterizer->base.line_width); } if (vc4->dirty & VC4_DIRTY_VIEWPORT) { cl_u8(&bcl, VC4_PACKET_CLIPPER_XY_SCALING); cl_f(&bcl, vc4->viewport.scale[0] * 16.0f); cl_f(&bcl, vc4->viewport.scale[1] * 16.0f); cl_u8(&bcl, VC4_PACKET_CLIPPER_Z_SCALING); cl_f(&bcl, vc4->viewport.translate[2]); cl_f(&bcl, vc4->viewport.scale[2]); cl_u8(&bcl, VC4_PACKET_VIEWPORT_OFFSET); cl_u16(&bcl, 16 * vc4->viewport.translate[0]); cl_u16(&bcl, 16 * vc4->viewport.translate[1]); } if (vc4->dirty & VC4_DIRTY_FLAT_SHADE_FLAGS) { cl_u8(&bcl, VC4_PACKET_FLAT_SHADE_FLAGS); cl_u32(&bcl, vc4->rasterizer->base.flatshade ? vc4->prog.fs->color_inputs : 0); } cl_end(&vc4->bcl, bcl); }
/* * main -- * This is the main loop for the standalone curses editor. */ int main(int argc, char **argv) { static int reenter; CL_PRIVATE *clp; GS *gp; WIN *wp; size_t rows, cols; int rval; char **p_av, **t_av; const char *ttype; /* If loaded at 0 and jumping through a NULL pointer, stop. */ if (reenter++) abort(); /* Create and initialize the global structure. */ __global_list = gp = gs_init(argv[0]); /* * Strip out any arguments that vi isn't going to understand. There's * no way to portably call getopt twice, so arguments parsed here must * be removed from the argument list. */ for (p_av = t_av = argv;;) { if (*t_av == NULL) { *p_av = NULL; break; } if (!strcmp(*t_av, "--")) { while ((*p_av++ = *t_av++) != NULL); break; } *p_av++ = *t_av++; } /* Create new window */ wp = gs_new_win(gp); /* Create and initialize the CL_PRIVATE structure. */ clp = cl_init(wp); /* * Initialize the terminal information. * * We have to know what terminal it is from the start, since we may * have to use termcap/terminfo to find out how big the screen is. */ if ((ttype = getenv("TERM")) == NULL) { if (isatty(STDIN_FILENO)) fprintf(stderr, "%s: warning: TERM is not set\n", gp->progname); ttype = "unknown"; } term_init(gp->progname, ttype); /* Add the terminal type to the global structure. */ if ((OG_D_STR(gp, GO_TERM) = OG_STR(gp, GO_TERM) = strdup(ttype)) == NULL) perr(gp->progname, NULL); /* Figure out how big the screen is. */ if (cl_ssize(NULL, 0, &rows, &cols, NULL)) exit (1); /* Add the rows and columns to the global structure. */ OG_VAL(gp, GO_LINES) = OG_D_VAL(gp, GO_LINES) = rows; OG_VAL(gp, GO_COLUMNS) = OG_D_VAL(gp, GO_COLUMNS) = cols; /* Ex wants stdout to be buffered. */ (void)setvbuf(stdout, NULL, _IOFBF, 0); /* Start catching signals. */ if (sig_init(gp, NULL)) exit (1); /* Run ex/vi. */ rval = editor(wp, argc, argv); /* Clean out the global structure. */ gs_end(gp); /* Clean up signals. */ sig_end(gp); /* Clean up the terminal. */ (void)cl_quit(gp); /* * XXX * Reset the O_MESG option. */ if (clp->tgw != TGW_UNKNOWN) (void)cl_omesg(NULL, clp, clp->tgw == TGW_SET); /* * XXX * Reset the X11 xterm icon/window name. */ if (F_ISSET(clp, CL_RENAME)) cl_setname(gp, clp->oname); /* If a killer signal arrived, pretend we just got it. */ if (clp->killersig) { (void)signal(clp->killersig, SIG_DFL); (void)kill(getpid(), clp->killersig); /* NOTREACHED */ } /* Free the global and CL private areas. */ #if defined(DEBUG) || defined(PURIFY) || defined(LIBRARY) cl_end(clp); free(gp); #endif exit (rval); }
static void vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct vc4_context *vc4 = vc4_context(pctx); if (info->mode >= PIPE_PRIM_QUADS) { util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf); util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base); util_primconvert_draw_vbo(vc4->primconvert, info); perf_debug("Fallback conversion for %d %s vertices\n", info->count, u_prim_name(info->mode)); return; } /* Before setting up the draw, do any fixup blits necessary. */ vc4_predraw_check_textures(pctx, &vc4->verttex); vc4_predraw_check_textures(pctx, &vc4->fragtex); vc4_hw_2116_workaround(pctx, info->count); struct vc4_job *job = vc4_get_job_for_fbo(vc4); vc4_get_draw_cl_space(job, info->count); if (vc4->prim_mode != info->mode) { vc4->prim_mode = info->mode; vc4->dirty |= VC4_DIRTY_PRIM_MODE; } vc4_start_draw(vc4); if (!vc4_update_compiled_shaders(vc4, info->mode)) { debug_warn_once("shader compile failed, skipping draw call.\n"); return; } vc4_emit_state(pctx); if ((vc4->dirty & (VC4_DIRTY_VTXBUF | VC4_DIRTY_VTXSTATE | VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_COMPILED_CS | VC4_DIRTY_COMPILED_VS | VC4_DIRTY_COMPILED_FS | vc4->prog.cs->uniform_dirty_bits | vc4->prog.vs->uniform_dirty_bits | vc4->prog.fs->uniform_dirty_bits)) || vc4->last_index_bias != info->index_bias) { vc4_emit_gl_shader_state(vc4, info, 0); } vc4->dirty = 0; /* Note that the primitive type fields match with OpenGL/gallium * definitions, up to but not including QUADS. */ struct vc4_cl_out *bcl = cl_start(&job->bcl); if (info->indexed) { uint32_t offset = vc4->indexbuf.offset; uint32_t index_size = vc4->indexbuf.index_size; struct pipe_resource *prsc; if (vc4->indexbuf.index_size == 4) { prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf, info->count, &offset); index_size = 2; } else { if (vc4->indexbuf.user_buffer) { prsc = NULL; u_upload_data(vc4->uploader, 0, info->count * index_size, 4, vc4->indexbuf.user_buffer, &offset, &prsc); } else { prsc = vc4->indexbuf.buffer; } } struct vc4_resource *rsc = vc4_resource(prsc); cl_start_reloc(&job->bcl, &bcl, 1); cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE); cl_u8(&bcl, info->mode | (index_size == 2 ? VC4_INDEX_BUFFER_U16: VC4_INDEX_BUFFER_U8)); cl_u32(&bcl, info->count); cl_reloc(job, &job->bcl, &bcl, rsc->bo, offset); cl_u32(&bcl, vc4->max_index); job->draw_calls_queued++; if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer) pipe_resource_reference(&prsc, NULL); } else { uint32_t count = info->count; uint32_t start = info->start; uint32_t extra_index_bias = 0; while (count) { uint32_t this_count = count; uint32_t step = count; static const uint32_t max_verts = 65535; /* GFXH-515 / SW-5891: The binner emits 16 bit indices * for drawarrays, which means that if start + count > * 64k it would truncate the top bits. Work around * this by emitting a limited number of primitives at * a time and reemitting the shader state pointing * farther down the vertex attribute arrays. * * To do this properly for line loops or trifans, we'd * need to make a new VB containing the first vertex * plus whatever remainder. */ if (extra_index_bias) { cl_end(&job->bcl, bcl); vc4_emit_gl_shader_state(vc4, info, extra_index_bias); bcl = cl_start(&job->bcl); } if (start + count > max_verts) { switch (info->mode) { case PIPE_PRIM_POINTS: this_count = step = max_verts; break; case PIPE_PRIM_LINES: this_count = step = max_verts - (max_verts % 2); break; case PIPE_PRIM_LINE_STRIP: this_count = max_verts; step = max_verts - 1; break; case PIPE_PRIM_LINE_LOOP: this_count = max_verts; step = max_verts - 1; debug_warn_once("unhandled line loop " "looping behavior with " ">65535 verts\n"); break; case PIPE_PRIM_TRIANGLES: this_count = step = max_verts - (max_verts % 3); break; case PIPE_PRIM_TRIANGLE_STRIP: this_count = max_verts; step = max_verts - 2; break; default: debug_warn_once("unhandled primitive " "max vert count, truncating\n"); this_count = step = max_verts; } } cl_u8(&bcl, VC4_PACKET_GL_ARRAY_PRIMITIVE); cl_u8(&bcl, info->mode); cl_u32(&bcl, this_count); cl_u32(&bcl, start); job->draw_calls_queued++; count -= step; extra_index_bias += start + step; start = 0; } } cl_end(&job->bcl, bcl); /* We shouldn't have tripped the HW_2116 bug with the GFXH-515 * workaround. */ assert(job->draw_calls_queued <= VC4_HW_2116_COUNT); if (vc4->zsa && vc4->framebuffer.zsbuf) { struct vc4_resource *rsc = vc4_resource(vc4->framebuffer.zsbuf->texture); if (vc4->zsa->base.depth.enabled) { job->resolve |= PIPE_CLEAR_DEPTH; rsc->initialized_buffers = PIPE_CLEAR_DEPTH; } if (vc4->zsa->base.stencil[0].enabled) { job->resolve |= PIPE_CLEAR_STENCIL; rsc->initialized_buffers |= PIPE_CLEAR_STENCIL; } } job->resolve |= PIPE_CLEAR_COLOR0; if (vc4_debug & VC4_DEBUG_ALWAYS_FLUSH) vc4_flush(pctx); }