static int md_detect_motion(void) { int idx, tick, rv; int val, cy, cv, cu; register int col, row, x, y; if(!md_running()) { return 0; } tick = get_tick_count(); rv = 1; #ifdef OPT_MD_DEBUG if(motion_detector.comp_calls_cnt < MD_REC_CALLS_CNT) { motion_detector.comp_calls[motion_detector.comp_calls_cnt]=tick; } motion_detector.comp_calls_cnt++; #endif if(motion_detector.start_time + motion_detector.timeout < tick ) { md_save_calls_history(); motion_detector.running = 0; return 0; } if(motion_detector.last_measure_time + motion_detector.measure_interval > tick) { // wait for the next time return 1; } motion_detector.last_measure_time = tick; unsigned char* img = vid_get_viewport_live_fb(); if(img==NULL) { img = vid_get_viewport_fb(); } #ifdef OPT_MD_DEBUG if(motion_detector.comp_calls_cnt==50 && (motion_detector.parameters & MD_MAKE_RAM_DUMP_FILE) != 0 ) { mx_dump_memory((char*)img); } #endif img += vid_get_viewport_image_offset(); // offset into viewport for when image size != viewport size (e.g. 16:9 image on 4:3 LCD) int vp_h = vid_get_viewport_height(); int vp_w = vid_get_viewport_width(); int vp_bw = vid_get_viewport_byte_width() * vid_get_viewport_yscale(); int x_step = motion_detector.pixels_step * 3; int y_step = motion_detector.pixels_step * vp_bw; for (idx=0, row=0; row < motion_detector.rows; row++) { // Calc img y start and end offsets (use same height for all cells so 'points' is consistent) int y_start = ((row * vp_h) / motion_detector.rows) * vp_bw; int y_end = y_start + ((vp_h / motion_detector.rows) * vp_bw); for (col=0; col < motion_detector.columns; col++, idx++) { int in_clipping_region=0; if (col+1 >= motion_detector.clipping_region_column1 && col+1 <= motion_detector.clipping_region_column2 && row+1 >= motion_detector.clipping_region_row1 && row+1 <= motion_detector.clipping_region_row2) { in_clipping_region=1; } int curr = 0; int diff = 0; if ( (motion_detector.clipping_region_mode==MD_REGION_NONE) || (motion_detector.clipping_region_mode==MD_REGION_EXCLUDE && in_clipping_region==0) || (motion_detector.clipping_region_mode==MD_REGION_INCLUDE && in_clipping_region==1) ) { // Calc img x start and end offsets (use same width for all cells so 'points' is consistent) int x_start = ((col * vp_w) / motion_detector.columns) * 3; int x_end = x_start + ((vp_w / motion_detector.columns) * 3); int points = 0; for (y=y_start; y<y_end; y+=y_step) { for (x=x_start; x<x_end; x+=x_step) { // ARRAY of UYVYYY values // 6 bytes - 4 pixels if (motion_detector.pixel_measure_mode == MD_MEASURE_MODE_Y) { val = img[y + x + 1]; //Y } else { // Calc offset to UYV component int uvx = x; if (uvx & 1) uvx -= 3; switch(motion_detector.pixel_measure_mode) { case MD_MEASURE_MODE_U: val = (signed char)img[y + uvx]; //U break; case MD_MEASURE_MODE_V: val = (signed char)img[y + uvx + 2]; //V break; case MD_MEASURE_MODE_R: cy = img[y + x + 1]; cv = (signed char)img[y + uvx + 2]; val = clip(((cy<<12) + cv*5743 + 2048)>>12); // R break; case MD_MEASURE_MODE_G: cy = img[y + x + 1]; cu = (signed char)img[y + uvx]; cv = (signed char)img[y + uvx + 2]; val = clip(((cy<<12) - cu*1411 - cv*2925 + 2048)>>12); // G break; case MD_MEASURE_MODE_B: cy = img[y + x + 1]; cu = (signed char)img[y + uvx]; val = clip(((cy<<12) + cu*7258 + 2048)>>12); // B break; default: val = 0; // Stop compiler warning break; } } curr += val; points++; } } motion_detector.points = points ; diff = (curr - motion_detector.prev[idx]) / points; if (diff < 0) diff = -diff; if ((diff > motion_detector.threshold) && (motion_detector.start_time+motion_detector.msecs_before_trigger < tick)) { motion_detector.detected_cells++; } } motion_detector.diff[idx] = diff; motion_detector.prev[idx] = curr; } }
int md_detect_motion(void){ int *tmp; unsigned char * img; int vp_w, vp_h, idx, tmp2, tick, in_clipping_region, x_step, y_step, x_end, y_end; int val; int cy,cv,cu; register int col, row, x, y; if(!md_running()){ return 0; } tick=get_tick_count(); #ifdef OPT_MD_DEBUG if(motion_detector->comp_calls_cnt < MD_REC_CALLS_CNT) { motion_detector->comp_calls[motion_detector->comp_calls_cnt]=tick; } motion_detector->comp_calls_cnt++; #endif if(motion_detector->start_time + motion_detector->timeout < tick ) { md_save_calls_history(); motion_detector->running = 0; return 0; } if(motion_detector->last_measure_time + motion_detector->measure_interval > tick){ // wait for the next time return 1; } motion_detector->last_measure_time=tick; // swap pointers so we don't need to copy last data array into Previous one tmp=motion_detector->curr; motion_detector->curr=motion_detector->prev; motion_detector->prev=tmp; img = vid_get_viewport_live_fb(); if(img==NULL){ img = vid_get_viewport_fb(); } #ifdef OPT_MD_DEBUG if(motion_detector->comp_calls_cnt==50 && (motion_detector->parameters & MD_MAKE_RAM_DUMP_FILE) != 0 ){ mx_dump_memory((char*)img); } #endif vp_h = vid_get_viewport_height(); vp_w = vid_get_viewport_buffer_width(); img += vid_get_viewport_image_offset(); // offset into viewport for when image size != viewport size (e.g. 16:9 image on 4:3 LCD) x_step=vid_get_viewport_width()/motion_detector->columns; y_step=vp_h/motion_detector->rows; for (idx=0, row=0; row < motion_detector->rows; row++) { for (col=0; col < motion_detector->columns; col++, idx++) { // clear cur and points, previously down in it's own loop // might be able to avoid clearing all, since some are overwritten below motion_detector->points[idx] = 0; motion_detector->curr[idx] = 0; in_clipping_region=0; if (col+1 >= motion_detector->clipping_region_column1 && col+1 <= motion_detector->clipping_region_column2 && row+1 >= motion_detector->clipping_region_row1 && row+1 <= motion_detector->clipping_region_row2) { in_clipping_region=1; } if ( (motion_detector->clipping_region_mode==MD_REGION_NONE) || (motion_detector->clipping_region_mode==MD_REGION_EXCLUDE && in_clipping_region==0) || (motion_detector->clipping_region_mode==MD_REGION_INCLUDE && in_clipping_region==1) ) { x_end=(col+1)*x_step; y_end=(row+1)*y_step*vp_w; for(y=row*y_step*vp_w; y<y_end; y+=motion_detector->pixels_step*vp_w){ for(x=col*x_step; x<x_end; x+=motion_detector->pixels_step){ // ARRAY of UYVYYY values // 6 bytes - 4 pixels switch(motion_detector->pixel_measure_mode){ default: case MD_MEASURE_MODE_Y: val = img[(y+x)*3 + 1]; //Y break; case MD_MEASURE_MODE_U: val = img[(y+(x&0xFFFFFFFE))*3]; //U break; case MD_MEASURE_MODE_V: val = img[(y+(x&0xFFFFFFFE))*3 + 2]; //V break; case MD_MEASURE_MODE_R: cy=img[(y+x)*3 + 1]; cv=img[(y+(x&0xFFFFFFFE))*3 + 2]; val = clip(((cy<<12) + cv*5743 + 2048)>>12); // R break; case MD_MEASURE_MODE_G: cy=img[(y+x)*3 + 1]; cu=img[(y+(x&0xFFFFFFFE))*3]; cv=img[(y+(x&0xFFFFFFFE))*3 + 2]; val = clip(((cy<<12) - cu*1411 - cv*2925 + 2048)>>12); // G break; case MD_MEASURE_MODE_B: cy=img[(y+x)*3 + 1]; cu=img[(y+(x&0xFFFFFFFE))*3]; val = clip(((cy<<12) + cu*7258 + 2048)>>12); // B break; } motion_detector->curr[ idx ] += val; motion_detector->points[ idx ]++; } } } } }