void mtracer_apply( VJFrame *frame, VJFrame *frame2, int mode, int n) { const int len = frame->len; VJFrame m; veejay_memcpy( &m, frame, sizeof(VJFrame )); if (mtrace_counter == 0) { overlaymagic_apply(frame, frame2, mode,0); vj_frame_copy1( mtrace_buffer[0], frame->data[0], len ); } else { overlaymagic_apply(frame, frame2, mode,0); m.data[0] = mtrace_buffer[0]; m.data[1] = frame->data[1]; m.data[2] = frame->data[2]; m.data[3] = frame->data[3]; overlaymagic_apply( &m, frame2, mode, 0 ); vj_frame_copy1( mtrace_buffer[0],frame->data[0], len ); } mtrace_counter++; if (mtrace_counter >= n) mtrace_counter = 0; }
int motionmap_prepare( uint8_t *map[4], int width, int height ) { if(!is_initialized) return 0; vj_frame_copy1( map[0], bg_image, width * height ); motionmap_blur( bg_image, width,height ); veejay_memcpy( prev_img, bg_image, width * height ); have_bg = 1; nframe_ = 0; running = 0; stored_frame = 0; do_interpolation = 0; scale_lock = 0; veejay_msg(2, "Motion Mapping: Snapped background frame"); return 1; }
int contourextract_prepare(uint8_t *map[4], int width, int height) { if(!static_bg ) { return 0; } vj_frame_copy1( map[0], static_bg, (width*height)); VJFrame tmp; veejay_memset( &tmp, 0, sizeof(VJFrame)); tmp.data[0] = static_bg; tmp.width = width; tmp.height = height; softblur_apply( &tmp, width,height,0); veejay_msg(2, "Contour extraction: Snapped background frame"); return 1; }
void autoeq_apply( VJFrame *frame, int width, int height, int val, int intensity, int strength) { if( val == 0 ) { VJFrame tmp; veejay_memcpy( &tmp, frame, sizeof(VJFrame)); tmp.data[0] = (uint8_t*) vj_malloc( sizeof(uint8_t) * frame->len ); vj_frame_copy1( frame->data[0], tmp.data[0], frame->len ); veejay_histogram_draw( histogram_,&tmp, frame, intensity, strength ); vj_frame_clear1( frame->data[1], 128, frame->uv_len ); vj_frame_clear1( frame->data[2], 128, frame->uv_len ); free(tmp.data[0]); } else { veejay_histogram_analyze( histogram_, frame, 0 ); veejay_histogram_equalize( histogram_, frame, intensity, strength ); } }
void contourextract_apply(void *ed, VJFrame *frame,int width, int height, int threshold, int reverse,int mode, int take_bg, int feather, int min_blob_weight) { unsigned int i,j,k; const uint32_t len = frame->len; const uint32_t uv_len = frame->uv_len; uint8_t *Y = frame->data[0]; uint8_t *Cb = frame->data[1]; uint8_t *Cr = frame->data[2]; uint32_t cx[256]; uint32_t cy[256]; uint32_t xsize[256]; uint32_t ysize[256]; float sx = (float) width / (float) dw_; float sy = (float) height / (float) dh_; float sw = (float) sqrt( sx * sy ); veejay_memset( cx,0,sizeof(cx)); veejay_memset( cy,0,sizeof(cy)); veejay_memset( xsize,0,sizeof(xsize)); veejay_memset( ysize,0,sizeof(ysize)); contourextract_data *ud = (contourextract_data*) ed; if( take_bg != take_bg_ ) { vj_frame_copy1( frame->data[0], static_bg, frame->len ); take_bg_ = take_bg; bg_frame_ ++; return; } if( bg_frame_ > 0 && bg_frame_ < 4 ) { for( i = 0 ; i < len ; i ++ ) { static_bg[i] = (static_bg[i] + Y[i] ) >> 1; } bg_frame_ ++; return; } int packets = 0; //@ clear distance transform map veejay_memset( dt_map, 0 , len * sizeof(uint32_t) ); //@ todo: optimize with mmx binarify( ud->bitmap,static_bg, frame->data[0], threshold, reverse,len ); if(mode==1) { //@ show difference image in grayscale vj_frame_copy1( ud->bitmap, Y, len ); vj_frame_clear1( Cb, 128, uv_len ); vj_frame_clear1( Cr, 128, uv_len ); return; } //@ calculate distance map veejay_distance_transform8( ud->bitmap, width, height, dt_map ); to_shrink_.data[0] = ud->bitmap; shrinked_.data[0] = ud->current; uint32_t blobs[255]; veejay_memset( blobs, 0, sizeof(blobs) ); yuv_convert_and_scale_grey( shrink_, &to_shrink_, &shrinked_ ); uint32_t labels = veejay_component_labeling_8(dw_,dh_, shrinked_.data[0], blobs, cx,cy,xsize,ysize, min_blob_weight); veejay_memset( Y, 0, len ); veejay_memset( Cb , 128, uv_len); veejay_memset( Cr , 128, uv_len ); int num_objects = 0; for( i = 1 ; i <= labels; i ++ ) if( blobs[i] ) num_objects ++; //@ Iterate over blob's bounding boxes and extract contours for( i = 1; i <= labels; i ++ ) { if( blobs[i] > 0 ) { int nx = cx[i] * sx; int ny = cy[i] * sy; int size_x = xsize[i] * sx; int size_y = ysize[i] * sy * 0.5; int x1 = nx - size_x; int y1 = ny - size_y; int x2 = nx + size_y; int y2 = ny + size_y; int n_points = 0; int center = 0; int dx1 = 0,dy1=0; if( x1 < 0 ) x1 = 0; else if ( x1 > width ) x1 = width; if( x2 < 0 ) x2 = 0; else if ( x2 > width ) x2 = width; if( y1 < 0 ) y1 = 0; else if ( y1 >= height ) y1 = height -1; if( y2 < 0 ) y2 = 0; else if ( y2 >= height ) y2 = height -1; for( k = y1; k < y2; k ++ ) { for( j = x1; j < x2; j ++ ) { //@ use distance transform map to find centroid (fuzzy) if( dt_map[ (k * width + j) ] > center ) { center = dt_map[ (k* width +j) ]; dx1 = j; dy1 = k; } if( dt_map[ (k * width + j) ] == feather ) { Y[ (k * width +j)] = 0xff; points[ n_points ]->x = j; points[ n_points ]->y = k; n_points++; if( n_points >= 11999 ) { veejay_msg(0, "Too many points in contour"); return; } } } } } } }
void motionmap_apply( VJFrame *frame, int threshold, int limit1, int draw, int history, int decay, int interpol, int last_act_level, int act_decay ) { unsigned int i; const unsigned int width = frame->width; const unsigned int height = frame->height; const int len = frame->len; uint8_t *Cb = frame->data[1]; uint8_t *Cr = frame->data[2]; const int limit = limit1 * 10; if(!have_bg) { veejay_msg(VEEJAY_MSG_ERROR,"Motion Mapping: Snap the background frame with VIMS 339 or mask button in reloaded"); return; } if( act_decay != last_act_decay ) { last_act_decay = act_decay; activity_decay = act_decay; } // run difference algorithm over multiple threads if( vj_task_available() ) { VJFrame task; task.stride[0] = len; // plane length task.stride[1] = len; task.stride[2] = len; task.stride[3] = 0; task.data[0] = bg_image; // plane 0 = background image task.data[1] = frame->data[0]; // plane 1 = luminance channel task.data[2] = prev_img; // plane 2 = luminance channel of previous frame task.data[3] = NULL; task.ssm = 1; // all planes are the same size task.format = frame->format; // not important, but cannot be 0 task.shift_v = 0; task.shift_h = 0; task.uv_width = width; task.uv_height = height; task.width = width; // dimensions task.height = height; uint8_t *dst[4] = { binary_img, diff_img, diff_img + RUP8(len), NULL }; vj_task_set_from_frame( &task ); vj_task_set_param( threshold, 0 ); vj_task_run( task.data, dst, NULL,NULL,4, (performer_job_routine) &motionmap_find_diff_job ); } else { motionmap_calc_diff( (const uint8_t*) bg_image, prev_img, (const uint8_t*) frame->data[0], diff_img, diff_img + RUP8(len), binary_img, len, threshold ); } if( draw ) { vj_frame_clear1( Cb, 128, len ); vj_frame_clear1( Cr, 128, len ); vj_frame_copy1( binary_img, frame->data[0], len ); running = 0; stored_frame = 0; scale_lock = 0; return; } int32_t activity_level = motionmap_activity_level( binary_img, width, height ); int32_t avg_actlvl = 0; int32_t min = INT_MAX; int32_t local_max = 0; current_his_len = history; current_decay = decay; histogram_[ (nframe_%current_his_len) ] = activity_level; for( i = 0; i < current_his_len; i ++ ) { avg_actlvl += histogram_[i]; if(histogram_[i] > max ) max = histogram_[i]; if(histogram_[i] < min ) min = histogram_[i]; if(histogram_[i] > local_max) local_max = histogram_[i]; } avg_actlvl = avg_actlvl / current_his_len; if( avg_actlvl < limit ) { avg_actlvl = 0; } nframe_ ++; switch( last_act_level ) { case 0: if( (nframe_ % current_his_len)==0 ) { key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; } break; case 1: key1_ = min; key2_ = max; keyv_ = local_max; global_max = local_max; break; case 2: key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; break; case 3: if( (nframe_ % current_his_len)==0 ) { key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; } if( avg_actlvl == 0 ) scale_lock = 1; else scale_lock = 0; //reset to normal after "acitivity_decay" ticks if( scale_lock && act_decay > 0) { activity_decay --; if( activity_decay == 0 ) { last_act_decay = 0; scale_lock = 0; } } break; } running = 1; do_interpolation = interpol; }
void timedistort_apply( VJFrame *frame, int width, int height, int val) { unsigned int i; const int len = (width * height); uint8_t *Y = frame->data[0]; uint8_t *Cb = frame->data[1]; uint8_t *Cr = frame->data[2]; uint8_t *diff = nonmap; uint8_t *prev = nonmap + len; int interpolate = 1; int motion = 0; int tmp1,tmp2; if(motionmap_active()) //@ use motion mapping frame { motionmap_scale_to( 255,255,1,1,&tmp1,&tmp2, &n__,&N__ ); motion = 1; diff = motionmap_bgmap(); } else { n__ = 0; N__ = 0; if(!have_bg) { vj_frame_copy1( Y, prev, len ); VJFrame smooth; veejay_memcpy(&smooth,frame, sizeof(VJFrame)); smooth.data[0] = prev; softblur_apply(&smooth, width, height, 0 ); veejay_memset( diff, 0, len ); have_bg = 1; return; } else { /*for( i = 0; i < len ; i ++ ) { diff[i] = (abs(prev[i] - Y[i])> val ? 0xff: 0 ); }*/ vje_diff_plane( prev, Y, diff, val, len ); vj_frame_copy1( Y, prev, len ); VJFrame smooth; veejay_memcpy(&smooth,frame, sizeof(VJFrame)); smooth.data[0] = prev; softblur_apply(&smooth, width, height, 0 ); } } if( n__ == N__ || n__ == 0 ) interpolate = 0; //@ process uint8_t *planeTables[4] = { planetableY[plane], planetableU[plane], planetableV[plane], NULL }; int strides[4] = { len, len, len, 0 }; vj_frame_copy( frame->data, planeTables, strides ); uint8_t *p = warptime[ warptimeFrame ] + width + 1; uint8_t *q = warptime[ warptimeFrame ^ 1] + width + 1; unsigned int x,y; for( y = height - 2; y > 0 ; y -- ) { for( x = width - 2; x > 0; x -- ) { i = *(p - width) + *(p-1) + *(p+1) + *(p + width); if( i > 3 ) i-= 3; p++; *q++ = i >> 2; } p += 2; q += 2; } q = warptime[ warptimeFrame ^ 1 ] + width + 1; int n_plane = 0; for( i = 0; i < len; i ++ ) { if( diff[i] ) { q[i] = PLANES - 1; } n_plane = ( plane - q[i] + PLANES ) & (PLANES-1); Y[i] = planetableY[ n_plane ][i]; Cb[i] = planetableU[ n_plane ][i]; Cr[i] = planetableV[ n_plane ][i]; } plane ++; plane = plane & (PLANES-1); warptimeFrame ^= 1; if(interpolate) motionmap_interpolate_frame( frame, N__,n__ ); if(motion) motionmap_store_frame(frame); }
void neighbours3_apply( VJFrame *frame, int width, int height, int brush_size, int intensity_level, int mode ) { int x,y; const double intensity = intensity_level / 255.0; uint8_t *Y = tmp_buf[0]; uint8_t *Y2 = tmp_buf[1]; uint8_t *dstY = frame->data[0]; uint8_t *dstCb = frame->data[1]; uint8_t *dstCr = frame->data[2]; // keep luma vj_frame_copy1( frame->data[0],Y2, frame->len ); if(mode) { int strides[4] = { 0, frame->len, frame->len, 0 }; uint8_t *dest[4] = { NULL, chromacity[0], chromacity[1], NULL }; vj_frame_copy( frame->data, dest, strides ); } // premultiply intensity map for( y = 0 ; y < frame->len ; y ++ ) Y[y] = (uint8_t) ( (double)Y2[y] * intensity ); if(!mode) { for( y = 0; y < height; y ++ ) { for( x = 0; x < width; x ++ ) { *(dstY)++ = evaluate_pixel_b( x,y, brush_size, intensity, width, height, Y, Y2 ); } } veejay_memset( frame->data[1], 128, frame->len ); veejay_memset( frame->data[2], 128, frame->len ); } else { pixel_t tmp; for( y = 0; y < height; y ++ ) { for( x = 0; x < width; x ++ ) { tmp = evaluate_pixel_bc( x,y, brush_size, intensity, width, height, Y, Y2, chromacity[0], chromacity[1] ); *(dstY++) = tmp.y; *(dstCb++) = tmp.u; *(dstCr++) = tmp.v; } } } }
void contourextract_apply(void *ed, VJFrame *frame, int threshold, int reverse, int mode, int take_bg, int feather, int min_blob_weight) { unsigned int i; const unsigned int width = frame->width; const unsigned int height = frame->height; const int len = frame->len; const int uv_len = frame->uv_len; uint8_t *Y = frame->data[0]; uint8_t *Cb = frame->data[1]; uint8_t *Cr = frame->data[2]; uint32_t cx[256]; uint32_t cy[256]; uint32_t xsize[256]; uint32_t ysize[256]; uint32_t blobs[255]; veejay_memset( cx,0,sizeof(cx)); veejay_memset( cy,0,sizeof(cy)); veejay_memset( xsize,0,sizeof(xsize)); veejay_memset( ysize,0,sizeof(ysize)); veejay_memset( blobs, 0, sizeof(blobs) ); contourextract_data *ud = (contourextract_data*) ed; //@ clear distance transform map veejay_memset( dt_map, 0 , len * sizeof(uint32_t) ); binarify_1src( ud->bitmap, frame->data[0], threshold, reverse, width, height ); if(mode==1) { //@ show difference image in grayscale vj_frame_copy1( ud->bitmap, Y, len ); vj_frame_clear1( Cb, 128, uv_len ); vj_frame_clear1( Cr, 128, uv_len ); return; } //@ calculate distance map veejay_distance_transform8( ud->bitmap, width, height, dt_map ); to_shrink_.data[0] = ud->bitmap; shrinked_.data[0] = ud->current; yuv_convert_and_scale_grey( shrink_, &to_shrink_, &shrinked_ ); uint32_t labels = veejay_component_labeling_8(dw_,dh_, shrinked_.data[0], blobs, cx,cy,xsize,ysize,min_blob_weight); veejay_memset( Y, 0, len ); veejay_memset( Cb , 128, uv_len); veejay_memset( Cr , 128, uv_len ); int num_objects = 0; for( i = 1 ; i <= labels; i ++ ) if( blobs[i] ) num_objects ++; //@ Iterate over blob's bounding boxes and extract contours //@ use snippet below to get center of blob --> parameter extraction TODO /* for( i = 1; i <= labels; i ++ ) { if( blobs[i] > 0 ) { int nx = cx[i] * sx; int ny = cy[i] * sy; int size_x = xsize[i] * sx; int size_y = ysize[i] * sy * 0.5; int x1 = nx - size_x; int y1 = ny - size_y; int x2 = nx + size_y; int y2 = ny + size_y; int n_points = 0; int center = 0; if( x1 < 0 ) x1 = 0; else if ( x1 > width ) x1 = width; if( x2 < 0 ) x2 = 0; else if ( x2 > width ) x2 = width; if( y1 < 0 ) y1 = 0; else if ( y1 >= height ) y1 = height -1; if( y2 < 0 ) y2 = 0; else if ( y2 >= height ) y2 = height -1; for( k = y1; k < y2; k ++ ) { for( j = x1; j < x2; j ++ ) { //@ use distance transform map to find centroid (fuzzy) if( dt_map[ (k * width + j) ] > center ) { center = dt_map[ (k* width +j) ]; } if( dt_map[ (k * width + j) ] == feather ) { Y[ (k * width +j)] = 0xff; points[ n_points ]->x = j; points[ n_points ]->y = k; n_points++; if( n_points >= 11999 ) { veejay_msg(0, "Too many points in contour"); return; } } } } } } */ }