uint8_t blend_func4(uint8_t a, uint8_t b) { uint8_t c = 0xff - b; if( c == 0 ) return CLAMP_Y(c); return CLAMP_Y( (a*a)/c ); }
uint8_t blend_func5(uint8_t a, uint8_t b) { uint8_t val; uint8_t c = 0xff - b; if( c == 0 ) return CLAMP_Y(b); val = b / c; return CLAMP_Y(val); }
uint8_t blend_func2(uint8_t a, uint8_t b) { uint8_t val; if( a == 0 ) a = 0xff; val = 255 - ((255-b) * (255-b))/a; return CLAMP_Y(val); }
///////////////////////////////////////////////////////// // do the YUV processing here // ///////////////////////////////////////////////////////// void pix_multiply :: processYUV_YUV(imageStruct &image, imageStruct &right) { long src,h,w; int y1,y2; src =0; //format is U Y V Y for (h=0; h<image.ysize; h++) { for(w=0; w<image.xsize/2; w++) { y1 = (image.data[src+chY0] * right.data[src+chY0]) >> 8; image.data[src+chY0] = CLAMP_Y(y1); y2 = (image.data[src+chY1] * right.data[src+chY1]) >> 8; image.data[src+chY1] = CLAMP_Y(y2); src+=4; } } }
void pix_movement :: processYUVImage(imageStruct &image) { // assume that the pix_size does not change ! bool doclear=(image.xsize*image.ysize != buffer.xsize*buffer.ysize); buffer.xsize = image.xsize; buffer.ysize = image.ysize; buffer.reallocate(); if(doclear) { buffer.setWhite(); } int pixsize = image.ysize * image.xsize; int Y1, Y0; unsigned char thresh; //these get rid of the invariant loads of the global chY0 and chY1 Y1 = chY1; Y0 = chY0; thresh = threshold; unsigned char *rp = image.data; // read pointer unsigned char *wp=buffer.data; // write pointer to the copy unsigned char grey,grey1; grey = 0; grey1 = 0; pixsize/=2; while(pixsize--) { grey = rp[Y0]; rp[Y0]=CLAMP_Y(255*(abs(grey-*wp)>thresh)); *wp++=grey; grey1 = rp[Y1]; rp[Y1]=CLAMP_Y(255*(abs(grey1-*wp)>thresh)); *wp++=grey1; // looks cool (C64), but what for ? if(true) { // black&white rp[chU]=128; rp[chV]=128; } rp+=4; } }
void pong_idle(){ static point_t orb = {XRES/2, YRES/2}; point_t *points = draw_get_back_buffer(); for(uint8_t i = 0; i < PADDLE_WIDTH; ++i){ for(uint8_t j = 0; j < PADDLE_HEIGHT; ++j){ uint8_t l_offs = PADDLE_LEFT_OFFSET + i*PADDLE_HEIGHT + j; uint8_t r_offs = PADDLE_RIGHT_OFFSET + i*PADDLE_HEIGHT + j; delta_t d_l = { (XRES/2 - points[l_offs].x)/8, (YRES/2 - points[l_offs].y)/8 }; delta_t d_r = { (XRES/2 - points[r_offs].x)/8, (YRES/2 - points[r_offs].y)/8 }; points[l_offs].x = CLAMP_X(points[l_offs].x + d_l.x); points[l_offs].y = CLAMP_Y(points[l_offs].y + d_l.y); points[r_offs].x = CLAMP_X(points[r_offs].x + d_r.x); points[r_offs].y = CLAMP_Y(points[r_offs].y + d_r.y); } } }
static void contrast_y_apply(VJFrame *frame, int *s) { unsigned int r; register int m; const int len = frame->len; uint8_t *Y = frame->data[0]; for(r=0; r < len; r++) { m = Y[r]; m -= 128; m *= s[1]; m = (m + 50)/100; m += 128; Y[r] = CLAMP_Y(m); } }
static void another_try_edge(VJFrame *frame, int w, int h) { uint8_t p; const int len=frame->len-w; unsigned int r,c; uint8_t *Y = frame->data[0]; for(r=w; r < len; r+= w) { for(c=1; c < w-1; c++) { p = ((Y[r+c-w] * -1) + (Y[r+c-w-1] * -1) + (Y[r+c-w+1] * -1) + (Y[r+c-1] * -1) + (Y[r+c] * -8) + (Y[r+c+1] * -1) + (Y[r+c+w] * -1) + (Y[r+c+w-1] * -1) + (Y[r+c+w+1] * -1))/9; Y[r+c] = CLAMP_Y(p); } } }
/********************************************************************************************** * * aggressive_emboss_framedata: much like the above two, but more aggressive. * **********************************************************************************************/ static void aggressive_emboss_framedata(VJFrame *frame, int width, int height) { int r, c; uint8_t val; uint8_t *Y = frame->data[0]; const int len = frame->len; for (r = 0; r < len; r += width) { for (c = 0; c < width; c++) { val = (Y[r - 1 + c - 1] - Y[r - 1 + c] - Y[r - 1 + c + 1] + Y[r + c - 1] - Y[r + c] - Y[r + c + 1] - Y[r + 1 + c - 1] + Y[r + 1 + c] + Y[r + 1 + c + 1] ) / 9; Y[c + r] = CLAMP_Y(val); } } }
/********************************************************************************************** * * lines_white_balanced_framedata: it looks cool, just try it. * **********************************************************************************************/ static void lines_white_balance_framedata(VJFrame *frame, int width, int height) { unsigned int r, c; const int len = frame->len - width; uint8_t val; uint8_t *Y = frame->data[0]; for (r = width; r < len; r += width) { for (c = 1; c < (width-1); c++) { val = (Y[r - 1 + c - 1] - Y[r - 1 + c] - Y[r - 1 + c + 1] + Y[r + c - 1] - Y[r + c] + Y[r + c + 1] + Y[r + 1 + c - 1] - Y[r + 1 + c] - Y[r + 1 + c + 1] ) / 9; Y[c + r] = CLAMP_Y(val); } } }
uint8_t blend_func6(uint8_t a, uint8_t b) { uint8_t val = a + (2 * (b)) - 255; return CLAMP_Y(val); }
bool GLDriver::checkActiveSamplers() { // TODO: Vertex Samplers, Geometry Samplers // Pixel samplers id 0...16 for (auto i = 0; i < latte::MaxSamplers; ++i) { auto sq_tex_sampler_word0 = getRegister<latte::SQ_TEX_SAMPLER_WORD0_N>(latte::Register::SQ_TEX_SAMPLER_WORD0_0 + 4 * (i * 3)); auto sq_tex_sampler_word1 = getRegister<latte::SQ_TEX_SAMPLER_WORD1_N>(latte::Register::SQ_TEX_SAMPLER_WORD1_0 + 4 * (i * 3)); auto sq_tex_sampler_word2 = getRegister<latte::SQ_TEX_SAMPLER_WORD2_N>(latte::Register::SQ_TEX_SAMPLER_WORD2_0 + 4 * (i * 3)); // TODO: is there a sampler bit that indicates this, maybe word2.TYPE? auto sq_tex_resource_word0 = getRegister<latte::SQ_TEX_RESOURCE_WORD0_N>(latte::Register::SQ_TEX_RESOURCE_WORD0_0 + latte::SQ_PS_TEX_RESOURCE_0 + 4 * (i * 7)); auto depthCompare = sq_tex_resource_word0.TILE_TYPE(); if (sq_tex_sampler_word0.value == mPixelSamplerCache[i].word0 && sq_tex_sampler_word1.value == mPixelSamplerCache[i].word1 && sq_tex_sampler_word2.value == mPixelSamplerCache[i].word2 && depthCompare == mPixelSamplerCache[i].depthCompare) { continue; // No change in sampler state } mPixelSamplerCache[i].word0 = sq_tex_sampler_word0.value; mPixelSamplerCache[i].word1 = sq_tex_sampler_word1.value; mPixelSamplerCache[i].word2 = sq_tex_sampler_word2.value; mPixelSamplerCache[i].depthCompare = depthCompare; if (sq_tex_sampler_word0.value == 0 && sq_tex_sampler_word1.value == 0 && sq_tex_sampler_word2.value == 0) { gl::glBindSampler(i, 0); continue; } auto &sampler = mPixelSamplers[i]; if (!sampler.object) { gl::glCreateSamplers(1, &sampler.object); } // Texture clamp auto clamp_x = getTextureWrap(sq_tex_sampler_word0.CLAMP_X()); auto clamp_y = getTextureWrap(sq_tex_sampler_word0.CLAMP_Y()); auto clamp_z = getTextureWrap(sq_tex_sampler_word0.CLAMP_Z()); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_WRAP_S, static_cast<gl::GLint>(clamp_x)); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_WRAP_T, static_cast<gl::GLint>(clamp_y)); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_WRAP_R, static_cast<gl::GLint>(clamp_z)); // Texture filter auto xy_min_filter = getTextureXYFilter(sq_tex_sampler_word0.XY_MIN_FILTER()); auto xy_mag_filter = getTextureXYFilter(sq_tex_sampler_word0.XY_MAG_FILTER()); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_MIN_FILTER, static_cast<gl::GLint>(xy_min_filter)); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_MAG_FILTER, static_cast<gl::GLint>(xy_mag_filter)); // Setup border color auto border_color_type = sq_tex_sampler_word0.BORDER_COLOR_TYPE(); std::array<float, 4> colors; switch (border_color_type) { case latte::SQ_TEX_BORDER_COLOR_TRANS_BLACK: colors = { 0.0f, 0.0f, 0.0f, 0.0f }; break; case latte::SQ_TEX_BORDER_COLOR_OPAQUE_BLACK: colors = { 0.0f, 0.0f, 0.0f, 1.0f }; break; case latte::SQ_TEX_BORDER_COLOR_OPAQUE_WHITE: colors = { 1.0f, 1.0f, 1.0f, 0.0f }; break; case latte::SQ_TEX_BORDER_COLOR_REGISTER: { auto td_ps_sampler_border_red = getRegister<latte::TD_PS_SAMPLER_BORDERN_RED>(latte::Register::TD_PS_SAMPLER_BORDER0_RED + 4 * (i * 4)); auto td_ps_sampler_border_green = getRegister<latte::TD_PS_SAMPLER_BORDERN_GREEN>(latte::Register::TD_PS_SAMPLER_BORDER0_GREEN + 4 * (i * 4)); auto td_ps_sampler_border_blue = getRegister<latte::TD_PS_SAMPLER_BORDERN_BLUE>(latte::Register::TD_PS_SAMPLER_BORDER0_BLUE + 4 * (i * 4)); auto td_ps_sampler_border_alpha = getRegister<latte::TD_PS_SAMPLER_BORDERN_ALPHA>(latte::Register::TD_PS_SAMPLER_BORDER0_ALPHA + 4 * (i * 4)); colors = { td_ps_sampler_border_red.BORDER_RED, td_ps_sampler_border_green.BORDER_GREEN, td_ps_sampler_border_blue.BORDER_BLUE, td_ps_sampler_border_alpha.BORDER_ALPHA, }; break; } default: decaf_abort(fmt::format("Unsupported border_color_type = {}", border_color_type)); } gl::glSamplerParameterfv(sampler.object, gl::GL_TEXTURE_BORDER_COLOR, &colors[0]); // Depth compare auto mode = depthCompare ? gl::GL_COMPARE_REF_TO_TEXTURE : gl::GL_NONE; gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_COMPARE_MODE, static_cast<gl::GLint>(mode)); auto depth_compare_function = getTextureCompareFunction(sq_tex_sampler_word0.DEPTH_COMPARE_FUNCTION()); gl::glSamplerParameteri(sampler.object, gl::GL_TEXTURE_COMPARE_FUNC, static_cast<gl::GLint>(depth_compare_function)); // Setup texture LOD auto min_lod = sq_tex_sampler_word1.MIN_LOD(); auto max_lod = sq_tex_sampler_word1.MAX_LOD(); auto lod_bias = sq_tex_sampler_word1.LOD_BIAS(); // TODO: GL_TEXTURE_MIN_LOD, GL_TEXTURE_MAX_LOD, GL_TEXTURE_LOD_BIAS // Bind sampler gl::glBindSampler(i, sampler.object); } return true; }