void ConvertYUVToRGBOperation::executePixel(float output[4], float x, float y, PixelSampler sampler) { float inputColor[4]; this->m_inputOperation->read(inputColor, x, y, sampler); yuv_to_rgb(inputColor[0], inputColor[1], inputColor[2], &output[0], &output[1], &output[2]); output[3] = inputColor[3]; }
static void imageProcess(const void* p, int width, int height) { unsigned char* src = (unsigned char*)p; #if 0 unsigned char *dst = NV12ToRGB(src, width, height); unsigned char *rgb_buf = malloc(width * height * 3); NV12ToRGB(src, width, height); jpegWrite(dst, width, height); #else unsigned char *yuv_src[3]; yuv_src[0] = src; yuv_src[2] = src+width*height; yuv_src[1] = src+width*height + width*height/4; unsigned char *rgb_buf = malloc(width * height * 3); yuv_to_rgb(yuv_src, 0, width, height, rgb_buf); jpegWrite(rgb_buf, width, height); BOOL CreateBmp(const char *filename, uint8_t *pRGBBuffer, int width, int height, int bpp); CreateBmp("out.bmp", rgb_buf, width, height, 24); #endif }
int yuyv_to_rgb(const void *yuyv_array, int yuyv_size, void *rgb_out, int img_w, int img_h, int rgb_flags) { struct YUYV yuyv_color; struct RGB2 rgb_color; memset(&rgb_color, 255, (RGB_SIZE*2)); int rgb_size = (img_w * img_h * RGB_SIZE); memset(rgb_out, 255, rgb_size); int i=0, counter = 0; for (i=0; i<yuyv_size; i=i+YUYV_SIZE) { if ((counter + (RGB_SIZE * 2)) > rgb_size) { #ifdef DEBUG printf("DEBUG: RGB buffer overflow [yuyv_to_rgb]\n"); #endif return -1; } memcpy(&yuyv_color, yuyv_array + i, YUYV_SIZE); yuv_to_rgb(yuyv_color, &rgb_color, rgb_flags); memcpy(rgb_out + counter, &rgb_color, (RGB_SIZE * 2)); counter = counter + (RGB_SIZE * 2); } return 0; }
static void do_comb_yuva(bNode *UNUSED(node), float *out, float *in1, float *in2, float *in3, float *in4) { float r,g,b; yuv_to_rgb(in1[0], in2[0], in3[0], &r, &g, &b); out[0] = r; out[1] = g; out[2] = b; out[3] = in4[0]; }
void CAtmoLightFilter::Transform_Packed_Y41P( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { // MEDIASUBTYPE_Y41P packed format 8 Pixel, 12 byte U0 Y0 V0 Y1 U4 Y2 V4 Y3 Y4 Y5 Y6 Y7 unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; int row_len = (bmiHeader->biWidth / 8) * 12; // 8 pixels are 12 bytes - so assume that width is allways a 8px :) // is die länge dword alligned? for( int row = 1; row <= m_atmo_capture_height; row ++ ) { unsigned char *y41p_line = pBuffer + ((row * abs(bmiHeader->biHeight)) / (m_atmo_capture_height+1)) * row_len ; for( int col = 1; col <= m_atmo_capture_width; col ++ ) { int pixel = (col * bmiHeader->biWidth) / (m_atmo_capture_width+1); unsigned char *y41p_pixel = y41p_line + (int)(pixel / 8) * 12; unsigned char y; switch( pixel & 7 ) { case 0: y = y41p_pixel[1]; break; case 1: y = y41p_pixel[3]; break; case 2: y = y41p_pixel[5]; break; case 3: y = y41p_pixel[7]; break; case 4: y = y41p_pixel[8]; break; case 5: y = y41p_pixel[9]; break; case 6: y = y41p_pixel[10]; break; case 7: y = y41p_pixel[11]; break; } // pixel 0..3 -> u0 0 v0 2 // pixel 4..7 -> u0 4 v4 6 unsigned char u = ((pixel & 7)<4) ? y41p_pixel[0] : y41p_pixel[4]; unsigned char v = ((pixel & 7)<4) ? y41p_pixel[2] : y41p_pixel[6]; yuv_to_rgb(b, g, r, y, u, v); r+=4; g+=4; b+=4; } } }
void YUYVtoRGB(const picture_t* p_pic_in, planar_rgb_image_t* image_out) { for(int y = 0; y < p_pic_in->p[0].i_visible_lines; y++) { const uint8_t* position = &(p_pic_in->p[0].p_pixels[y * p_pic_in->p[0].i_pitch]); const uint8_t* position_end = position + p_pic_in->p[0].i_visible_pitch; uint8_t* r_position = &(image_out->r_plane[y * image_out->pitch]); uint8_t* g_position = &(image_out->g_plane[y * image_out->pitch]); uint8_t* b_position = &(image_out->b_plane[y * image_out->pitch]); while (position < position_end) { int r1,g1,b1, r2,g2,b2; uint8_t y1 = *(position); uint8_t u = *(++position); uint8_t y2 = *(++position); uint8_t v = *(++position); yuv_to_rgb(&r1, &g1, &b1, y1, u, v); yuv_to_rgb(&r2, &g2, &b2, y2, u, v); *r_position = r1; *(++r_position) = r2; *g_position = g1; *(++g_position) = g2; *b_position = b1; *(++b_position) = b2; ++position; ++r_position; ++g_position; ++b_position; } } }
void CAtmoLightFilter::Transform_Packed_CLJR( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { // MEDIASUBTYPE_CLJR packed format 4 Pixel, 4 byte Y[5], Y[5], Y[5], Y[5], U[6], V[6] []Bits! im dword! /* Y3 31 30 29 28 27 Y2 26 25 24 23 22 Y1 21 20 19 18 17 Y0 16 15 14 13 12 U 11 10 9 8 7 6 V 5 4 3 2 1 0 */ #define cljr_v_mask ((unsigned int)0x0000003F) #define cljr_u_mask ((unsigned int)0x00000FC0) //after rotation #define cljr_y_mask ((unsigned int)0x000000F8) #define cljr_y0_mask ((unsigned int)0x0001F000) #define cljr_y1_mask ((unsigned int)0x003E0000) #define cljr_y2_mask ((unsigned int)0x07C00000) #define cljr_y3_mask ((unsigned int)0xF8000000) unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; int row_len = bmiHeader->biWidth; for( int row = 1; row <= m_atmo_capture_height; row ++ ) { unsigned int *cljr_line = (unsigned int *)(pBuffer + ((row * abs(bmiHeader->biHeight)) / (m_atmo_capture_height+1)) * row_len) ; for( int col = 1; col <= m_atmo_capture_width; col ++ ) { int pixel = (col * bmiHeader->biWidth) / (m_atmo_capture_width+1); // beware on 64bit os is int still 32bit? :) unsigned int cljr_pixel = cljr_line[ (pixel / 4) ]; unsigned char y = (unsigned char)((cljr_pixel >> (9 + (pixel & 3) * 5)) & cljr_y_mask); unsigned char u = (cljr_pixel & cljr_u_mask) >> 4; unsigned char v = (cljr_pixel & cljr_v_mask) << 2; yuv_to_rgb(b, g, r, y, u, v); r+=4; g+=4; b+=4; } } }
inline value_type convert(const value_type& v, ColorSpaceType input, ColorSpaceType output) { if (input == ColorSpaceRGB) { if (output == ColorSpaceRGB) return v; if (output == ColorSpaceYUV) return rgb_to_yuv(v); if (output == ColorSpaceLAB) return rgb_to_lab(v); } if (input == ColorSpaceYUV) { if (output == ColorSpaceRGB) return yuv_to_rgb(v); if (output == ColorSpaceYUV) return v; if (output == ColorSpaceLAB) return yuv_to_lab(v); } if (input == ColorSpaceLAB) { if (output == ColorSpaceRGB) return lab_to_rgb(v); if (output == ColorSpaceYUV) return lab_to_yuv(v); if (output == ColorSpaceLAB) return v; } return value_type::Zero(); }
void CAtmoLightFilter::Transform_Planar_NV12( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { /* NV12 Planar */ /* 2 Planes 0: Y Plane voll auflösung 1: VU Interleaved Plane in Zeile immer abwechselnd VU oder umgekehrt? (2x2 subsampled) */ int y_stride = (stride * 2) / 3; int vu_stride = y_stride; int y_visible_width = (rect.right - rect.left); int y_visible_height = (rect.bottom - rect.top); int uv_visible_width = y_visible_width / 2; int uv_visible_height = y_visible_height / 2; unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; // plane order in buffer: YVU unsigned char *y_plane = pBuffer; unsigned char *vu_plane = y_plane + (abs(bmiHeader->biHeight) * y_stride ); for( int row = 1; row < atmo_capture_height; row ++ ) { unsigned char *y_line = y_plane + ((row * y_visible_height) / atmo_capture_height) * y_stride; unsigned char *vu_line = vu_plane + ((row * uv_visible_height) / atmo_capture_height) * vu_stride; for( int col = 1; col < atmo_capture_width; col ++ ) { int y_pixel = (col * y_visible_width) / atmo_capture_width; int uv_pixel = ((col * uv_visible_width) / atmo_capture_width) * 2; yuv_to_rgb(b, g, r, y_line[y_pixel], vu_line[uv_pixel+1], vu_line[uv_pixel]); r+=4; g+=4; b+=4; } } }
// Funció que copia dins "dst" (ha de tenir la grandària apropiada) els píxels // en format RGB (BGR en realitat) des de la font de la càmera (format YUV420P) void VideoDevice::YUVtoBGR(unsigned char *dst) { int x,y; unsigned char *buf, *uu, *vv; static int r,g,b; int w = vmap.width; int h = vmap.height; buf = C4L::CaptureV4LGetImage(vmap,vm); uu = buf + h*w; vv = uu + h*w/4; for(y=0;y<h;y++) for(x=0;x<w;x++) { yuv_to_rgb( *buf, uu[x/2 + (y/2)*w/2], vv[x/2 + (y/2)*w/2], r,g,b); *dst++ = b; *dst++ = g; *dst++ = r; buf += 1; } }
void CAtmoLightFilter::Transform_Packed_Y411( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; int row_len = (bmiHeader->biWidth / 4)*6; // 4 pixels are 6 bytes - so assume that width is allways a 4x :) // is die länge dword alligned? for( int row = 1; row <= m_atmo_capture_height; row ++ ) { unsigned char *y411_line = pBuffer + ((row * abs(bmiHeader->biHeight)) / (m_atmo_capture_height+1)) * row_len ; for( int col = 1; col <= m_atmo_capture_width; col ++ ) { int pixel = (col * bmiHeader->biWidth) / (m_atmo_capture_width+1); unsigned char *y411_pixel = y411_line + (int)(pixel / 4) * 6; unsigned char y; switch( pixel & 3 ) { case 0: y = y411_pixel[1]; break; case 1: y = y411_pixel[2]; break; case 2: y = y411_pixel[4]; break; case 3: y = y411_pixel[5]; break; } unsigned char u = y411_pixel[0]; unsigned char v = y411_pixel[3]; yuv_to_rgb(b, g, r, y, u, v); r+=4; g+=4; b+=4; } } }
void get_rgb_frame(uchar **frame, uchar *rgb_frame, int g_height, int g_width){ uchar Y, U, V; uchar R, G, B; int pix_c = 0; for( int i = 0; i < g_height; i++ ) { for( int j = 0; j < 3*g_width; j += 3 ) { Y = frame[i][ j + 0 ]; U = frame[i][ j + 1 ]; V = frame[i][ j + 2 ]; yuv_to_rgb( Y, U, V, &R, &G, &B ); rgb_frame[ 4*pix_c + 0 ] = Y; rgb_frame[ 4*pix_c + 1 ] = U; rgb_frame[ 4*pix_c + 2 ] = Y; rgb_frame[ 4*pix_c + 3 ] = V; pix_c++; } } }
void CAtmoLightFilter::Transform_Planar_IYUV_I420( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { /* is a planar format */ int y_stride = (stride * 2) / 3; int uv_stride = y_stride / 2; int y_visible_width = (rect.right - rect.left); int y_visible_height = (rect.bottom - rect.top); int uv_visible_width = y_visible_width / 2; int uv_visible_height = y_visible_height / 2; unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; // plane order in buffer: YUV unsigned char *y_plane = pBuffer; unsigned char *u_plane = y_plane + (abs(bmiHeader->biHeight) * y_stride ); unsigned char *v_plane = u_plane + (abs(bmiHeader->biHeight) / 2 * uv_stride ); for( int row = 1; row < atmo_capture_height; row ++ ) { unsigned char *y_line = y_plane + ((row * y_visible_height) / atmo_capture_height) * y_stride; unsigned char *u_line = u_plane + ((row * uv_visible_height) / atmo_capture_height) * uv_stride; unsigned char *v_line = v_plane + ((row * uv_visible_height) / atmo_capture_height) * uv_stride; for( int col = 1; col < atmo_capture_width; col ++ ) { int y_pixel = (col * y_visible_width) / atmo_capture_width; int uv_pixel = (col * uv_visible_width) / atmo_capture_width; yuv_to_rgb(b, g, r, y_line[y_pixel], u_line[uv_pixel], v_line[uv_pixel]); r+=4; g+=4; b+=4; } } }
void CAtmoLightFilter::Transform_Packed_YVYU( IMediaSample *pSample, unsigned char *pBuffer, RECT &rect, BITMAPINFOHEADER *bmiHeader, int stride, unsigned char *rgb_buffer ) { // MEDIASUBTYPE_YVYU packed format 2 pixel , 4 byte Y0 V0 Y1 U0 unsigned char *r = rgb_buffer; unsigned char *g = rgb_buffer + 1; unsigned char *b = rgb_buffer + 2; int row_len = (bmiHeader->biWidth / 2) * 4; for( int row = 1; row <= m_atmo_capture_height; row ++ ) { unsigned char *yvyu_line = pBuffer + ((row * abs(bmiHeader->biHeight)) / (m_atmo_capture_height+1)) * row_len ; for( int col = 1; col <= m_atmo_capture_width; col ++ ) { int pixel = (col * bmiHeader->biWidth) / (m_atmo_capture_width+1); unsigned char *yvyu_pixel = yvyu_line + (int)(pixel / 2) * 4; unsigned char y = ((pixel & 1)==1) ? yvyu_pixel[2] : yvyu_pixel[0]; unsigned char u = yvyu_pixel[3]; unsigned char v = yvyu_pixel[1]; yuv_to_rgb(b, g, r, y, u, v); r+=4; g+=4; b+=4; } } }
static UINT video_VideoData(VideoClientContext* context, TSMM_VIDEO_DATA *data) { VideoClientContextPriv *priv = context->priv; PresentationContext *presentation; int status; presentation = priv->currentPresentation; if (!presentation) { WLog_ERR(TAG, "no current presentation"); return CHANNEL_RC_OK; } if (presentation->PresentationId != data->PresentationId) { WLog_ERR(TAG, "current presentation id=%d doesn't match data id=%d", presentation->PresentationId, data->PresentationId); return CHANNEL_RC_OK; } if (!Stream_EnsureRemainingCapacity(presentation->currentSample, data->cbSample)) { WLog_ERR(TAG, "unable to expand the current packet"); return CHANNEL_RC_NO_MEMORY; } Stream_Write(presentation->currentSample, data->pSample, data->cbSample); if (data->CurrentPacketIndex == data->PacketsInSample) { H264_CONTEXT *h264 = presentation->h264; UINT64 startTime = GetTickCount64(), timeAfterH264; MAPPED_GEOMETRY *geom = presentation->geometry; Stream_SealLength(presentation->currentSample); Stream_SetPosition(presentation->currentSample, 0); status = h264->subsystem->Decompress(h264, Stream_Pointer(presentation->currentSample), Stream_Length(presentation->currentSample)); if (status == 0) return CHANNEL_RC_OK; if (status < 0) return CHANNEL_RC_OK; timeAfterH264 = GetTickCount64(); if (data->SampleNumber == 1) { presentation->lastPublishTime = startTime; } presentation->lastPublishTime += (data->hnsDuration / 10000); if (presentation->lastPublishTime <= timeAfterH264 + 10) { int dropped = 0; /* if the frame is to be published in less than 10 ms, let's consider it's now */ yuv_to_rgb(presentation, presentation->surfaceData); context->showSurface(context, presentation->surface); priv->publishedFrames++; /* cleanup previously scheduled frames */ EnterCriticalSection(&priv->framesLock); while (Queue_Count(priv->frames) > 0) { VideoFrame *frame = Queue_Dequeue(priv->frames); if (frame) { priv->droppedFrames++; VideoFrame_free(&frame); dropped++; } } LeaveCriticalSection(&priv->framesLock); if (dropped) WLog_DBG(TAG, "showing frame (%d dropped)", dropped); } else { BOOL enqueueResult; VideoFrame *frame = calloc(1, sizeof(*frame)); if (!frame) { WLog_ERR(TAG, "unable to create frame"); return CHANNEL_RC_NO_MEMORY; } mappedGeometryRef(geom); frame->presentation = presentation; frame->publishTime = presentation->lastPublishTime; frame->geometry = geom; frame->w = presentation->SourceWidth; frame->h = presentation->SourceHeight; frame->surfaceData = BufferPool_Take(priv->surfacePool, frame->w * frame->h * 4); if (!frame->surfaceData) { WLog_ERR(TAG, "unable to allocate frame data"); mappedGeometryUnref(geom); free(frame); return CHANNEL_RC_NO_MEMORY; } if (!yuv_to_rgb(presentation, frame->surfaceData)) { WLog_ERR(TAG, "error during YUV->RGB conversion"); BufferPool_Return(priv->surfacePool, frame->surfaceData); mappedGeometryUnref(geom); free(frame); return CHANNEL_RC_NO_MEMORY; } InterlockedIncrement(&presentation->refCounter); EnterCriticalSection(&priv->framesLock); enqueueResult = Queue_Enqueue(priv->frames, frame); LeaveCriticalSection(&priv->framesLock); if (!enqueueResult) { WLog_ERR(TAG, "unable to enqueue frame"); VideoFrame_free(&frame); return CHANNEL_RC_NO_MEMORY; } WLog_DBG(TAG, "scheduling frame in %"PRIu32" ms", (frame->publishTime-startTime)); } } return CHANNEL_RC_OK; }
// Reading the data from memory, converting it to RGB, and then showing the picture void show_video( IMAGE_CONTEXT *image_ctx, uchar **frame, uchar thres[3][256], int color, int bf, int bc, bool *halt ) { XImage *xImage1 = image_ctx->xImage; XEvent event; uchar Y, U, V; uchar R, G, B; int pix_c = 0; int by_s, by_e, bx_s, bx_e, ys, ye, us, ue, vs, ve; uchar *imageLine1 = (uchar*) xImage1 -> data; for( int i = 0; i < g_height; i++ ) { for( int j = 0; j < 3*g_width; j += 3 ) { Y = frame[i][ j + 0 ]; U = frame[i][ j + 1 ]; V = frame[i][ j + 2 ]; yuv_to_rgb( Y, U, V, &R, &G, &B ); imageLine1[ 4*pix_c + 0 ] = R; imageLine1[ 4*pix_c + 1 ] = G; imageLine1[ 4*pix_c + 2 ] = B; imageLine1[ 4*pix_c + 3 ] = 255; pix_c++; } } image_put( image_ctx ); if ( XPending( image_ctx->display ) > 0 ) { XNextEvent( image_ctx->display, &event ); if( event.type == KeyPress ) { if( event.xkey.keycode == 65 ) { // if SPACE is pressed if( *halt == false ) *halt = true; else *halt = false; } else memcpy( thres, last_thres, 768 ); } if( event.type == ButtonPress ) { memcpy( last_thres, thres, 768 ); by_s = 0; by_e = g_height; bx_s = 0; bx_e = g_width; if( event.xbutton.y - bf + 1 >= 0 ) by_s = event.xbutton.y - bf + 1; if( event.xbutton.y + bf <= g_height ) by_e = event.xbutton.y + bf; if( event.xbutton.x - bf + 1 >= 0 ) bx_s = event.xbutton.x - bf + 1; if( event.xbutton.x + bf <= g_width ) bx_e = event.xbutton.x + bf; for( int i = by_s; i < by_e; i++ ) for( int j = bx_s; j < bx_e; j++ ) { Y = frame[i][ 3*j + 0 ]; U = frame[i][ 3*j + 1 ]; V = frame[i][ 3*j + 2 ]; ys = 0; ye = 256; us = 0; ue = 256; vs = 0; ve = 256; if( Y - bc + 1 >= 0 ) ys = Y - bc + 1; if( Y + bc <= 256 ) ye = Y + bc; if( U - bc + 1 >= 0 ) us = U - bc + 1; if( U + bc <= 256 ) ue = U + bc; if( V - bc + 1 >= 0 ) vs = V - bc + 1; if( V + bc <= 256 ) ve = V + bc; for( int k = ys; k < ye; k++ ) thres[0][k] |= 1 << color; for( int k = us; k < ue; k++ ) thres[1][k] |= 1 << color; for( int k = vs; k < ve; k++ ) thres[2][k] |= 1 << color; } } } }
static void combine_side_by_side_yuv420(picture_t *p_inpic, picture_t *p_outpic, int left, int right) { uint8_t *y1inl = p_inpic->p[Y_PLANE].p_pixels; uint8_t *y2inl; uint8_t *uinl = p_inpic->p[U_PLANE].p_pixels; uint8_t *vinl = p_inpic->p[V_PLANE].p_pixels; uint8_t *y1out = p_outpic->p[Y_PLANE].p_pixels; uint8_t *y2out; uint8_t *uout = p_outpic->p[U_PLANE].p_pixels; uint8_t *vout = p_outpic->p[V_PLANE].p_pixels; const int in_pitch = p_inpic->p[Y_PLANE].i_pitch; const int out_pitch = p_outpic->p[Y_PLANE].i_pitch; const int visible_pitch = p_inpic->p[Y_PLANE].i_visible_pitch; const int visible_lines = p_inpic->p[Y_PLANE].i_visible_lines; const int uv_visible_pitch = p_inpic->p[U_PLANE].i_visible_pitch; const uint8_t *yend = y1inl + visible_lines * in_pitch; while (y1inl < yend) { uint8_t *y1inr = y1inl + visible_pitch/2; uint8_t *y2inr; uint8_t *uinr = uinl + uv_visible_pitch/2; uint8_t *vinr = vinl + uv_visible_pitch/2; const uint8_t *y1end = y1inr; y2inl = y1inl + in_pitch; y2inr = y1inr + in_pitch; y2out = y1out + out_pitch; while (y1inl < y1end) { int rl, gl, bl, rr, gr, br, r, g, b; int rshift = !!((0xff0000&left) && (0xff0000&right)); int gshift = !!((0x00ff00&left) && (0x00ff00&right)); int bshift = !!((0x0000ff&left) && (0x0000ff&right)); yuv_to_rgb(&rl, &gl, &bl, *y1inl, *uinl, *vinl); yuv_to_rgb(&rr, &gr, &br, *y1inr, *uinr, *vinr); r = ((!!(0xff0000&left))*rl + (!!(0xff0000&right))*rr)>>rshift; g = ((!!(0x00ff00&left))*gl + (!!(0x00ff00&right))*gr)>>gshift; b = ((!!(0x0000ff&left))*bl + (!!(0x0000ff&right))*br)>>bshift; rgb_to_yuv(y1out, uout++, vout++, r, g, b); y1out[1] = *y1out; y1out+=2; y1inl++; y1inr++; yuv_to_rgb(&rl, &gl, &bl, *y1inl, *uinl, *vinl); yuv_to_rgb(&rr, &gr, &br, *y1inr, *uinr, *vinr); r = ((!!(0xff0000&left))*rl + (!!(0xff0000&right))*rr)>>rshift; g = ((!!(0x00ff00&left))*gl + (!!(0x00ff00&right))*gr)>>gshift; b = ((!!(0x0000ff&left))*bl + (!!(0x0000ff&right))*br)>>bshift; rgb_to_yuv(y1out, uout++, vout++, r, g, b); y1out[1] = *y1out; y1out+=2; y1inl++; y1inr++; yuv_to_rgb(&rl, &gl, &bl, *y2inl, *uinl, *vinl); yuv_to_rgb(&rr, &gr, &br, *y2inr, *uinr, *vinr); r = ((!!(0xff0000&left))*rl + (!!(0xff0000&right))*rr)>>rshift; g = ((!!(0x00ff00&left))*gl + (!!(0x00ff00&right))*gr)>>gshift; b = ((!!(0x0000ff&left))*bl + (!!(0x0000ff&right))*br)>>bshift; rgb_to_yuv(y2out, uout/*will be overwritten later, as will vout*/, vout, r, g, b); y2out[1] = *y2out; y2out+=2; y2inl++; y2inr++; yuv_to_rgb(&rl, &gl, &bl, *y2inl, *uinl, *vinl); yuv_to_rgb(&rr, &gr, &br, *y2inr, *uinr, *vinr); r = ((!!(0xff0000&left))*rl + (!!(0xff0000&right))*rr)>>rshift; g = ((!!(0x00ff00&left))*gl + (!!(0x00ff00&right))*gr)>>gshift; b = ((!!(0x0000ff&left))*bl + (!!(0x0000ff&right))*br)>>bshift; rgb_to_yuv(y2out, uout/*will be overwritten later, as will vout*/, vout, r, g, b); y2out[1] = *y2out; y2out+=2; y2inl++; y2inr++; uinl++; vinl++; uinr++; vinr++; } y1inl = y1inr + 2*in_pitch - visible_pitch; y1out += 2*out_pitch - visible_pitch; uinl = uinr + p_inpic->p[U_PLANE].i_pitch - uv_visible_pitch; vinl = vinr + p_inpic->p[V_PLANE].i_pitch - uv_visible_pitch; uout += p_outpic->p[U_PLANE].i_pitch - uv_visible_pitch; vout += p_outpic->p[V_PLANE].i_pitch - uv_visible_pitch; } }
inline value_type yuv_to_lab(const value_type& v) { return rgb_to_lab(yuv_to_rgb(v)); }