Exemple #1
0
/*
build histogram of viewport Y values (downsampled by HISTO_STEP_SIZE)
NOTE also used by lua get_live_histo
*/
int live_histogram_read_y(unsigned short *h)
{
    int total;

    int vp_width = vid_get_viewport_width();
    int vp_height = vid_get_viewport_height();
    int vp_offset = vid_get_viewport_row_offset();

    total = (vp_width * vp_height) / (HISTO_STEP_SIZE * 2);
    memset(h, 0, sizeof(unsigned short)*256);

    unsigned char *img = vid_get_viewport_active_buffer();
    if (!img) return total;

    img += vid_get_viewport_image_offset() + 1;

    int y;
    for (y=0; y<vp_height; y++, img += vp_offset)
    {
        int x;
        for (x=0; x<vp_width; x += HISTO_STEP_SIZE*2, img+=HISTO_STEP_SIZE*6)
        {
            ++h[*img];
        }
    }

    return total;
}
Exemple #2
0
static void __attribute__((optimize("O0"))) bench_screen_read() {
    long t;
    register unsigned int i, n, s;
    register char c;
    register char *scr;

    scr = vid_get_viewport_active_buffer();
    s = camera_screen.width * vid_get_viewport_height() * 3;
    t = get_tick_count();
    for (n=0; n<64; ++n)
        for (i=0; i<s; ++i)
            c = scr[i];
    t = get_tick_count() - t;
    bench.screen_input_bps = s*64*100 / (t/10);
}
Exemple #3
0
/*
send selected data for live view
returns 0 on error, total size on success
should only be called from ptp handler
*/
int live_view_get_data(ptp_data *data, int flags) {
    int vp_size = 0,bm_size = 0,pal_size = 0;
    lv_data_header *lv;
    lv_framebuffer_desc *vp;
    lv_framebuffer_desc *bm;

    // determine if we will send palette so it can go in one send
    if ( flags & LV_TFR_PALETTE ) // bitmap palette
    {
        // if no palette, will be set to zero
        pal_size = vid_get_palette_size();
    }
    
    // one contiguous buffer to allow a single send call
    int buf_size = sizeof(lv_data_header) + sizeof(lv_framebuffer_desc)*2 + pal_size;
    void *buf = malloc(buf_size);
    if(!buf) {
        return 0;
    }
    lv = buf;

    lv->vp_desc_start = sizeof(lv_data_header);
    lv->bm_desc_start = lv->vp_desc_start+sizeof(lv_framebuffer_desc);

    vp = buf + lv->vp_desc_start;
    bm = buf + lv->bm_desc_start;

    lv->version_major = LIVE_VIEW_VERSION_MAJOR;
    lv->version_minor = LIVE_VIEW_VERSION_MINOR;

    lv->lcd_aspect_ratio = vid_get_aspect_ratio();

    lv->palette_type = vid_get_palette_type();
    lv->palette_data_start = 0;


    vp->fb_type = LV_FB_YUV8;
    vp->buffer_width = vid_get_viewport_buffer_width_proper();

    vp->visible_width = vid_get_viewport_width_proper();
    vp->visible_height = vid_get_viewport_height_proper();

    vp->margin_left = vid_get_viewport_display_xoffset_proper();
    vp->margin_top = vid_get_viewport_display_yoffset_proper();

    // TODO returning margins from lib.c might be better
    // can end up with negative if size and offset sources don't update at exactly the same time
    vp->margin_right = vid_get_viewport_fullscreen_width() - vp->visible_width - vp->margin_left;
    vp->margin_bot = vid_get_viewport_fullscreen_height() - vp->visible_height - vp->margin_top;

    bm->fb_type = LV_FB_PAL8;
    bm->buffer_width = camera_screen.buffer_width;

    bm->margin_left = 0;
    bm->margin_top = 0;
    bm->margin_right = 0;
    bm->margin_bot = 0;

    bm->visible_width = ASPECT_XCORRECTION(camera_screen.width);
    bm->visible_height = camera_screen.height;


    vp->data_start = 0;
    bm->data_start = 0;

    int total_size = buf_size;

    void *vp_fb = vid_get_viewport_active_buffer();
    // Add viewport details if requested, and not null
    if ( flags & LV_TFR_VIEWPORT && vp_fb) // live buffer
    {
        vp->data_start = total_size;
        vp_size = (vp->buffer_width*vp->visible_height*6)/4;
        total_size += vp_size;
        // offset to start of actual data
        vp_fb += vid_get_viewport_image_offset();
    }

    // Add bitmap details if requested
    if ( flags & LV_TFR_BITMAP ) // bitmap buffer
    {
        bm->data_start = total_size;
        bm_size = bm->buffer_width*bm->visible_height;
        total_size += bm_size;
    }

    // Add palette detals if requested and available
    if ( pal_size ) // bitmap palette
    {
        lv->palette_data_start = buf_size - pal_size;
        memcpy(buf + lv->palette_data_start,vid_get_bitmap_active_palette(),pal_size);
    }

    // Send header structure (along with total size to be sent)
    data->send_data(data->handle,(char*)buf,buf_size,total_size,0,0,0);

    // Send viewport data if requested
    if ( vp_size )
    {
        data->send_data(data->handle,vp_fb,vp_size,0,0,0,0);
    }

    // Send bitmap data if requested
    if ( bm_size )
    {
        data->send_data(data->handle,vid_get_bitmap_active_buffer(),bm_size,0,0,0,0);
    }

    free(buf);
    return total_size;
}
Exemple #4
0
void histogram_process()
{
    static unsigned char *img;
    static int viewport_size, viewport_width, viewport_row_offset;

    register int x, i, hi;
    int y, v, u, c;
    float (*histogram_transform)(float);
    unsigned int histo_fill[5];
    int histo_main;

    long exposition_thresh = camera_screen.size / 500;

    // Select transform function
    switch (conf.histo_mode)
    {
        case HISTO_MODE_LOG: 
            histogram_transform = logarithmic; 
            break;
        case HISTO_MODE_LINEAR: 
        default:
            histogram_transform = identity; 
            break;
    }

    // Select which calculated histogram channel determines magnification / scaling
    if (conf.histo_layout == OSD_HISTO_LAYOUT_Y || conf.histo_layout == OSD_HISTO_LAYOUT_Y_argb)
        histo_main = HISTO_Y;
    else
        histo_main = HISTO_RGB;

    histogram_alloc();

    // This function is called in the main spytask loop roughly every 20msec
    // To avoid hogging all the CPU it performs it's work in stages controlled by histogram-stage
    // Stage  Function
    //   0      Initialize global variables used in next stages
    //   1,2,3  Count number of values for a third of the viewport image at each stage
    //   4      Calculate max values, over and under exposure setting
    //   5      Calculate the histogram display values
    switch (histogram_stage)
    {
        case 0:
            img=vid_get_viewport_active_buffer();
            if (!img) return;

            img += vid_get_viewport_image_offset();		// offset into viewport for when image size != viewport size (e.g. 16:9 image on 4:3 LCD)
            viewport_size = vid_get_viewport_height() * vid_get_viewport_byte_width() * vid_get_viewport_yscale();
            viewport_width = vid_get_viewport_width();
            viewport_row_offset = vid_get_viewport_row_offset();
            for (c=0; c<5; ++c) {
                memset(histogram_proc[c],0,256*sizeof(unsigned short));
                histo_max[c] = histo_max_center[c] = 0;
            }

            histogram_stage=1;
            break;

        case 1:
        case 2:
        case 3:
            x = 0;  // count how many blocks we have done on the current row (to skip unused buffer space at end of each row)
            for (i=(histogram_stage-1)*6; i<viewport_size; i+=HISTO_STEP_SIZE*6) {
                y = img[i+1];
                u = *(signed char*)(&img[i]);
                //if (u&0x00000080) u|=0xFFFFFF00;  // Compiler should handle the unsigned -> signed conversion
                v = *(signed char*)(&img[i+2]);
                //if (v&0x00000080) v|=0xFFFFFF00;  // Compiler should handle the unsigned -> signed conversion

                ++histogram_proc[HISTO_Y][y];                       // Y
                hi = clip(((y<<12)          + v*5743 + 2048)>>12);  // R
                ++histogram_proc[HISTO_R][hi];
                hi = clip(((y<<12) - u*1411 - v*2925 + 2048)>>12);  // G
                ++histogram_proc[HISTO_G][hi];
                hi = clip(((y<<12) + u*7258          + 2048)>>12);  // B
                ++histogram_proc[HISTO_B][hi];

                // Handle case where viewport memory buffer is wider than the actual buffer.
                x += HISTO_STEP_SIZE * 2;	// viewport width is measured in blocks of three bytes each even though the data is stored in six byte chunks !
                if (x == viewport_width)
                {
                    i += viewport_row_offset;
                    x = 0;
                }
            }

            ++histogram_stage;
            break;

        case 4:
            for (i=0, c=0; i<HISTO_WIDTH; ++i, c+=2) { // G
                // Merge each pair of values into a single value (for width = 128)
                // Warning: this is optimised for HISTO_WIDTH = 128, don't change the width unless you re-write this code as well.
                histogram_proc[HISTO_Y][i] = histogram_proc[HISTO_Y][c] + histogram_proc[HISTO_Y][c+1];
                histogram_proc[HISTO_R][i] = histogram_proc[HISTO_R][c] + histogram_proc[HISTO_R][c+1];
                histogram_proc[HISTO_G][i] = histogram_proc[HISTO_G][c] + histogram_proc[HISTO_G][c+1];
                histogram_proc[HISTO_B][i] = histogram_proc[HISTO_B][c] + histogram_proc[HISTO_B][c+1];
                // Calc combined RGB totals
                histogram_proc[HISTO_RGB][i] = histogram_proc[HISTO_R][i] + histogram_proc[HISTO_G][i] + histogram_proc[HISTO_B][i];
            }

            // calculate maximums
            for (c=0; c<5; ++c) {
                for (i=0; i<HISTO_WIDTH; ++i) {
                    if (histo_max[c]<histogram_proc[c][i])
                        histo_max[c]=histogram_proc[c][i];
                    if (histo_max_center[c]<histogram_proc[c][i] && i>=conf.histo_ignore_boundary && i<HISTO_WIDTH-conf.histo_ignore_boundary)
                        histo_max_center[c]=histogram_proc[c][i];
                }

                if (histo_max_center[c] > 0) {
                    histo_max_center_invw[c] = ((float)HISTO_HEIGHT)/histogram_transform((float)histo_max_center[c]);
                } else if (histo_max[c] > 0) {
                    histo_max_center_invw[c] = ((float)HISTO_HEIGHT)/histogram_transform((float)histo_max[c]);
                } else {
                    histo_max_center_invw[c] = 0.0f;
                }
            }

            if (histo_max[HISTO_RGB] > 0) { // over- / under- expos
                under_exposed = (histogram_proc[HISTO_RGB][0]*8
                                +histogram_proc[HISTO_RGB][1]*4
                                +histogram_proc[HISTO_RGB][2]) > exposition_thresh;

                over_exposed  = (histogram_proc[HISTO_RGB][HISTO_WIDTH-3]
                                +histogram_proc[HISTO_RGB][HISTO_WIDTH-2]*4
                                +histogram_proc[HISTO_RGB][HISTO_WIDTH-1]*8) > exposition_thresh;
            } else {
                over_exposed = 0;
                under_exposed = 1;
            }

            histogram_stage=5;
            break;

        case 5:
            for (c=0; c<5; ++c) {
                histo_fill[c]=0;
                for (i=0; i<HISTO_WIDTH; ++i) {
                    histogram[c][i] = (histogram_transform((float)histogram_proc[c][i]))*histo_max_center_invw[c];
                    if (histogram[c][i] > HISTO_HEIGHT)
                        histogram[c][i] = HISTO_HEIGHT;
                    histo_fill[c]+=histogram[c][i];
                }
            }

            histo_magnification = 0;
            if (conf.histo_auto_ajust) {
                if (histo_fill[histo_main] < (HISTO_HEIGHT*HISTO_WIDTH)/5) { // try to ajust if average level is less than 20%
                    histo_magnification = (20*HISTO_HEIGHT*HISTO_WIDTH) / histo_fill[histo_main];
                    for (c=0; c<5; ++c) {
                        for (i=0;i<HISTO_WIDTH;i++) {
                            histogram[c][i] = histogram[c][i] * histo_magnification / 100;
                            if (histogram[c][i] > HISTO_HEIGHT)
                                histogram[c][i] = HISTO_HEIGHT;
                        }
                    }
                }
            }

            histogram_stage=0;
            break;
    }

}
Exemple #5
0
// Sobel edge detector
static int calc_edge_overlay()
{
    int shutter_fullpress = kbd_is_key_pressed(KEY_SHOOT_FULL);

    const unsigned char* img = vid_get_viewport_active_buffer();
    if (!img) return shutter_fullpress;

    const unsigned char*  ptrh1 = NULL;    // previous pixel line
    const unsigned char*  ptrh2 = NULL;    // current pixel line
    const unsigned char*  ptrh3 = NULL;    // next pixel line
    unsigned char*  smptr = NULL;    // pointer to line in smbuf
    int x, y, xdiv3;
    int conv1, conv2;

    const int y_min = camera_screen.edge_hmargin+ slice   *slice_height;
    const int y_max = camera_screen.edge_hmargin+(slice+1)*slice_height;
    const int x_min = 6;
    const int x_max = (viewport_width - 2) * 3;

    img += vid_get_viewport_image_offset();		// offset into viewport for when image size != viewport size (e.g. 16:9 image on 4:3 LCD)

    xoffset = 0;
    yoffset = 0;

    // Reserve buffers
    ensure_allocate_imagebuffer();
    if( !is_buffer_ready() ) return 0;

    // In every 6 bytes the Y of four pixels are described in the
    // viewport (UYVYYY format). For edge detection we only
    // consider the second in the current and the first
    // in the next pixel.

    // Clear all edges in the current slice
    int compressed_slice = edgebuf->ptrLen / EDGE_SLICES;
    memset(edgebuf->ptr + slice*compressed_slice, 0, compressed_slice);

    if (conf.edge_overlay_filter)
    {
        // Prefill smbuf with three lines of avergae-filtered data.
        // This looks much more complex then it actually is.
        // We really are just summing up nine pixels in a 3x3 box
        // and averaging the current pixel based on them. And
        // we do it 4 bytes at a time because of the UYVYYY format.
        for (y = -1; y <= 1; ++y)
        {
            shutter_fullpress |= kbd_is_key_pressed(KEY_SHOOT_FULL);

            ptrh1 = img + (y_min+y-1) * viewport_byte_width*viewport_yscale;
            smptr = smbuf + (y+1) * viewport_byte_width;

            average_filter_row(ptrh1, smptr, x_min, x_max);
        }
    }

    for (y = y_min; y < y_max; ++y)
    {
        shutter_fullpress |= kbd_is_key_pressed(KEY_SHOOT_FULL);

        if (conf.edge_overlay_filter)
        {
            // We need to shift up our smbuf one line,
            // and fill in the last line (which now empty)
            // with average-filtered data from img.
            // By storing only three lines of smoothed picture
            // in memory, we save memory.

            // Shift
            memcpy(smbuf, smbuf+viewport_byte_width, viewport_byte_width*2);

            // Filter new line
            ptrh1 = img + y * viewport_byte_width*viewport_yscale;
            smptr = smbuf + 2 * viewport_byte_width;
            average_filter_row(ptrh1, smptr, x_min, x_max);

            ptrh1 = smbuf;
        }
        else
        {
            ptrh1 = img + (y-1) * viewport_byte_width*viewport_yscale;
        }
        ptrh2 = ptrh1 + viewport_byte_width*viewport_yscale;
        ptrh3 = ptrh2 + viewport_byte_width*viewport_yscale;

        // Now we do sobel on the current line

        for (x = x_min, xdiv3 = x_min/3; x < x_max; x += 6, xdiv3 += 2)
        {
            // convolve vert (second Y)
            conv1 = *(ptrh1 + x + 1) * ( 1) +
                    *(ptrh1 + x + 4) * (-1) +

                    *(ptrh2 + x + 1) * ( 2) +
                    *(ptrh2 + x + 4) * (-2) +

                    *(ptrh3 + x + 1) * ( 1) +
                    *(ptrh3 + x + 4) * (-1);
            if  (conv1 < 0)     // abs()
                conv1 = -conv1;

            // convolve vert (first Y of next pixel)
            conv2 = *(ptrh1 + x + 1) * ( 1) +
                    *(ptrh1 + x + 3) * ( 2) +
                    *(ptrh1 + x + 4) * ( 1) +

                    *(ptrh3 + x + 1) * (-1) +
                    *(ptrh3 + x + 3) * (-2) +
                    *(ptrh3 + x + 4) * (-1);
            if  (conv2 < 0)     // abs()
                conv2 = -conv2;

            if (conv1 + conv2 > conf.edge_overlay_thresh)
            {
                bv_set(edgebuf, (y-camera_screen.edge_hmargin)*viewport_width + xdiv3, 1);
            }

            // Do it once again for the next 'pixel'

            // convolve vert (second Y)
            conv1 = *(ptrh1 + x + 5) * ( 1) +
                    *(ptrh1 + x + 9) * (-1) +

                    *(ptrh2 + x + 5) * ( 2) +
                    *(ptrh2 + x + 9) * (-2) +

                    *(ptrh3 + x + 5) * ( 1) +
                    *(ptrh3 + x + 9) * (-1);
            if  (conv1 < 0)     // abs()
                conv1 = -conv1;

            // convolve vert (first Y of next pixel)
            conv2 = *(ptrh1 + x + 5) * ( 1) +
                    *(ptrh1 + x + 7) * ( 2) +
                    *(ptrh1 + x + 9) * ( 1) +

                    *(ptrh3 + x + 5) * (-1) +
                    *(ptrh3 + x + 7) * (-2) +
                    *(ptrh3 + x + 9) * (-1);
            if  (conv2 < 0)     // abs()
                conv2 = -conv2;

            if (conv1 + conv2 > conf.edge_overlay_thresh)
            {
                bv_set(edgebuf, (y-camera_screen.edge_hmargin)*viewport_width + xdiv3+1, 1);
            }
        }   // for x
    }   // for y


//  For an even more improved edge overlay, enabling the following lines will
//  post-filter the results of the edge detection, removing false edge 'dots'
//  from the display. However, the speed hit is large. In the developer's opinion
//  this code is not needed, but if you want that additional quality and do not
//  care so much about performance, you can enable it.
//
//    if (conf.edge_overlay_filter)
//    {
//        // Here we do basic filtering on the detected edges.
//        // If a pixel is marked as edge but just a few of its
//        // neighbors are also edges, then we assume that the
//        // current pixel is just noise and delete the mark.
//
//        bit_vector_t* bv_tmp = bv_create(edgebuf->nElem, edgebuf->nBits);
//        if (bv_tmp != NULL)
//        {
//            memset(bv_tmp->ptr, 0, bv_tmp->ptrLen);
//
//            for (y = 1; y < viewport_height-1; ++y)
//            {
//                shutter_fullpress |= kbd_is_key_pressed(KEY_SHOOT_FULL);
//
//                for (x=12; x<(viewport_width - 4); ++x)
//                {
//                    int bEdge = bv_get(edgebuf, y*viewport_width + x);
//                    if (bEdge)
//                    {
//                        // Count the number of neighbor edges
//                        int sum =
//                            bv_get(edgebuf, (y-1)*viewport_width + (x-1)) +
//                            bv_get(edgebuf, (y-1)*viewport_width + (x)) +
//                            bv_get(edgebuf, (y-1)*viewport_width + (x+1)) +
//
//                            bv_get(edgebuf, (y)*viewport_width + (x-1)) +
////              bv_get(&edgebuf, (y)*viewport_width + (x)) + //  we only inspect the neighbors
//                            bv_get(edgebuf, (y)*viewport_width + (x+1)) +
//
//                            bv_get(edgebuf, (y+1)*viewport_width + (x-1)) +
//                            bv_get(edgebuf, (y+1)*viewport_width + (x)) +
//                            bv_get(edgebuf, (y+1)*viewport_width + (x+1));
//
//                        if (!conf.edge_overlay_show)
//                        {
//                            if (sum >= 5)    // if we have at least 5 neighboring edges
//                                bv_set(bv_tmp, y*viewport_width + x, 1);   // keep the edge
//                            // else
//                            // there is no need to delete because the buffer is already zeroed
//                        }
//                    }
//                }   // for x
//            }   // for y
//
//            // Swap the filtered edge buffer for the real one
//            bit_vector_t* swap_tmp = edgebuf;
//            edgebuf = bv_tmp;
//            bv_free(swap_tmp);
//        }   // NULL-check
//    }   // if filtering

    return shutter_fullpress;
}
static int md_detect_motion(void)
{
    int idx, tick, rv;
    int val, cy, cv, cu;

    register int col, row, x, y;

    if(!md_running())
    {
        return 0;
    }

    tick = get_tick_count();
    rv = 1;

#ifdef OPT_MD_DEBUG
    if(motion_detector.comp_calls_cnt < MD_REC_CALLS_CNT)
    {
        motion_detector.comp_calls[motion_detector.comp_calls_cnt]=tick;
    }
    motion_detector.comp_calls_cnt++;
#endif

    if(motion_detector.start_time + motion_detector.timeout < tick )
    {
        md_save_calls_history();
        motion_detector.running = 0;
        return 0;
    }

    if(motion_detector.last_measure_time + motion_detector.measure_interval > tick)
    {
        // wait for the next time
        return 1;
    }

    motion_detector.last_measure_time = tick;

    unsigned char* img = vid_get_viewport_active_buffer();
    if (!img) return 0;

#ifdef OPT_MD_DEBUG
    if(motion_detector.comp_calls_cnt==50 && (motion_detector.parameters & MD_MAKE_RAM_DUMP_FILE) != 0 )
    {
        mx_dump_memory((char*)img);
    }
#endif

	img += vid_get_viewport_image_offset();		// offset into viewport for when image size != viewport size (e.g. 16:9 image on 4:3 LCD)

	int vp_h = vid_get_viewport_height();
    int vp_w = vid_get_viewport_width();
	int vp_bw = vid_get_viewport_byte_width() * vid_get_viewport_yscale();

	int x_step = motion_detector.pixels_step * 3;
	int y_step = motion_detector.pixels_step * vp_bw;

    for (idx=0, row=0; row < motion_detector.rows; row++)
    {
        // Calc img y start and end offsets (use same height for all cells so 'points' is consistent)
        int y_start = ((row * vp_h) / motion_detector.rows) * vp_bw;
        int y_end = y_start + ((vp_h / motion_detector.rows) * vp_bw);

        for (col=0; col < motion_detector.columns; col++, idx++)
        {
            int in_clipping_region=0;

            if (col+1 >= motion_detector.clipping_region_column1 &&
                col+1 <= motion_detector.clipping_region_column2 &&
                row+1 >= motion_detector.clipping_region_row1 &&
                row+1 <= motion_detector.clipping_region_row2)
            {
                in_clipping_region=1;
            }

            int curr = 0;
            int diff = 0;

            if (
                (motion_detector.clipping_region_mode==MD_REGION_NONE) ||
                (motion_detector.clipping_region_mode==MD_REGION_EXCLUDE && in_clipping_region==0) ||
                (motion_detector.clipping_region_mode==MD_REGION_INCLUDE && in_clipping_region==1)
               )
            {
                // Calc img x start and end offsets (use same width for all cells so 'points' is consistent)
                int x_start = ((col * vp_w) / motion_detector.columns) * 3;
                int x_end = x_start + ((vp_w / motion_detector.columns) * 3);

                int points = 0;

                for (y=y_start; y<y_end; y+=y_step)
                {
                    for (x=x_start; x<x_end; x+=x_step)
                    {
                        // ARRAY of UYVYYY values
                        // 6 bytes - 4 pixels

                        if (motion_detector.pixel_measure_mode == MD_MEASURE_MODE_Y)
                        {
                            val = img[y + x + 1];				                        //Y
                        }
                        else
                        {
                            // Calc offset to UYV component
                            int uvx = x;
                            if (uvx & 1) uvx -= 3;

                            switch(motion_detector.pixel_measure_mode)
                            {
                            case MD_MEASURE_MODE_U:
                                val = (signed char)img[y + uvx];		                //U
                                break;

                            case MD_MEASURE_MODE_V:
                                val = (signed char)img[y + uvx + 2];	                //V
                                break;

                            case MD_MEASURE_MODE_R:
                                cy = img[y + x + 1];
                                cv = (signed char)img[y + uvx + 2];
                                val = clip(((cy<<12)           + cv*5743 + 2048)>>12); // R
                                break;

                            case MD_MEASURE_MODE_G:
                                cy = img[y + x + 1];
                                cu = (signed char)img[y + uvx];
                                cv = (signed char)img[y + uvx + 2];
                                val = clip(((cy<<12) - cu*1411 - cv*2925 + 2048)>>12); // G
                                break;

                            case MD_MEASURE_MODE_B:
                                cy = img[y + x + 1];
                                cu = (signed char)img[y + uvx];
                                val = clip(((cy<<12) + cu*7258           + 2048)>>12); // B
                                break;

                            default:
                                val = 0;    // Stop compiler warning
                                break;
                            }
                        }

                        curr += val;
                        points++;
                    }
                }
                motion_detector.points = points ;
                diff = (curr - motion_detector.prev[idx]) / points;
                if (diff < 0) diff = -diff;
                if ((diff > motion_detector.threshold) &&
                    (motion_detector.start_time+motion_detector.msecs_before_trigger < tick))
                {
                    motion_detector.detected_cells++;
                }
            }

            motion_detector.diff[idx] = diff;
            motion_detector.prev[idx] = curr;
	}
    }