static void calculate_img_dimension(int w, int h, int *dst_w, int *dst_h, float *result, int max_w, int max_h, int quality) { int tmp_w = w; int tmp_h = h; float ratio = (float)tmp_w / (float) tmp_h; *result = ratio; if( quality > 0 ) { int qdown = quality; while( (qdown > 0) ) { tmp_h = tmp_h / 2; tmp_w = tmp_w / 2; qdown--; } } if( tmp_h > max_h ) { tmp_h = max_h; tmp_w = (int) ( (float) tmp_h * ratio ); } else if( tmp_w > max_w ) { tmp_w = max_w; tmp_h = tmp_w / ratio; } *dst_w = RUP8(tmp_w); *dst_h = RUP8(tmp_h); }
int average_malloc(int width, int height) { running_sum[0] = (double*) vj_calloc( sizeof(double) * RUP8(width * height * 3 )); if(!running_sum[0]) return 0; running_sum[1] = running_sum[0] + RUP8(width*height); running_sum[2] = running_sum[1] + RUP8(width*height); return 1; }
int tracer_malloc(int w, int h) { trace_buffer[0] = (uint8_t *) vj_malloc(sizeof(uint8_t) * RUP8(w * h * 3) ); trace_buffer[1] = trace_buffer[0] + RUP8(w*h); trace_buffer[2] = trace_buffer[1] + RUP8(w*h); vj_frame_clear1( trace_buffer[0], pixel_Y_lo_, RUP8(w*h)); vj_frame_clear1( trace_buffer[1], 128, RUP8(w*h*2) ); return 1; }
int chameleon_malloc(int w, int h) { int i; for( i = 0; i < 3; i ++ ) { bgimage[i] = vj_malloc(sizeof(uint8_t) * RUP8(w * h) + RUP8(w*2) ); tmpimage[i] = vj_malloc(sizeof(uint8_t) * RUP8(w * h) ); } vj_frame_clear1( bgimage[0], pixel_Y_lo_, RUP8(w*h)); vj_frame_clear1( tmpimage[0], pixel_Y_lo_, RUP8(w*h)); for( i = 1; i < 3; i ++ ) { vj_frame_clear1( bgimage[i], 128, RUP8(w*h)); vj_frame_clear1( tmpimage[i], 128, RUP8(w*h)); } sum = (int32_t*) vj_calloc( RUP8(w * h) * sizeof(int32_t)); timebuffer = (uint8_t*) vj_calloc( RUP8(w * h) * PLANES ); has_bg = 0; plane = 0; N__ = 0; n__ = 0; last_mode_ = -1; return 1; }
int radcor_malloc( int width, int height ) { badbuf = (uint8_t*) vj_malloc( RUP8( width * height * 4 * sizeof(uint8_t))); if(!badbuf) return 0; Map = (uint32_t*) vj_malloc( RUP8(width * height * sizeof(uint32_t))); veejay_memset( Map, 0, RUP8(width * height * sizeof(uint32_t)) ); if(!Map) return 0; return 1; }
static void alpha_blend_transition( uint8_t *Y, uint8_t *Cb, uint8_t *Cr, uint8_t *a0, const uint8_t *Y2, const uint8_t *Cb2, const uint8_t *Cr2, const uint8_t *a1, const size_t len, const size_t w, unsigned int time_index, const unsigned int dur, const int alpha_select ) { uint8_t lookup[256]; const uint8_t *T = (const uint8_t*) lookup; const uint8_t *aA = (alpha_select == 0 ? a0 : a1); size_t i; /* precalc lookup table for vectorization */ for( i = 0; i < 256; i ++ ) { if( time_index < aA[i] ) lookup[i] = 0; else if ( time_index >= (aA[i] + dur) ) lookup[i] = 0xff; else lookup[i] = 0xff * ( (double) (time_index - i ) / dur ); } uint8_t AA[ RUP8(w) ]; for( i = 0; i < len; i += w ) { /* unroll the lookup table so we can vectorize */ expand_lookup_table( AA, T, aA + i, w ); alpha_blend( Y + i, Y2 + i, AA, w ); alpha_blend( Cb+ i, Cb2+ i, AA, w ); alpha_blend( Cr+ i, Cr2+ i, AA, w ); } }
int magicmirror_malloc(int w, int h) { int i ; for( i = 0; i < 3 ;i ++ ) { magicmirrorbuf[i] = (uint8_t*)vj_malloc(sizeof(uint8_t) * RUP8(w*h)); if(!magicmirrorbuf[i]) return 0; } funhouse_x = (double*)vj_calloc(sizeof(double) * w ); if(!funhouse_x) return 0; cache_x = (unsigned int *)vj_calloc(sizeof(unsigned int)*w); if(!cache_x) return 0; funhouse_y = (double*)vj_calloc(sizeof(double) * h ); if(!funhouse_y) return 0; cache_y = (unsigned int*)vj_calloc(sizeof(unsigned int)*h); if(!cache_y) return 0; veejay_memset(cache_x,0,w); veejay_memset(cache_y,0,h); n__ =0; N__ =0; return 1; }
int mtracer_malloc(int w, int h) { size_t buflen = RUP8( (w*h+w)) * sizeof(uint8_t); mtrace_buffer[0] = (uint8_t*) vj_malloc( buflen ); if(!mtrace_buffer[0]) { return 0; } return 1; }
int contourextract_malloc(void **d, int width, int height) { contourextract_data *my; *d = (void*) vj_calloc(sizeof(contourextract_data)); my = (contourextract_data*) *d; dw_ = nearest_div( width / 8 ); dh_ = nearest_div( height / 8 ); my->current = (uint8_t*) vj_calloc( sizeof(uint8_t) * RUP8( dw_ * dh_ * 3 )); my->bitmap = (uint8_t*) vj_calloc( sizeof(uint8_t) * RUP8( width * height )); if(static_bg == NULL) static_bg = (uint8_t*) vj_calloc( sizeof(uint8_t) * RUP8( width * height) + RUP8(width*2)); if(dt_map == NULL ) dt_map = (uint32_t*) vj_calloc( sizeof(uint32_t) * RUP8( width * height )); veejay_memset( &template_, 0, sizeof(sws_template) ); veejay_memset( proj_, 0, sizeof(proj_) ); template_.flags = 1; vj_get_yuvgrey_template( &to_shrink_, width, height ); vj_get_yuvgrey_template( &shrinked_ , dw_, dh_ ); shrink_ = yuv_init_swscaler( &(to_shrink_), &(shrinked_), &template_ , yuv_sws_get_cpu_flags() ); points = (point_t**) vj_calloc( sizeof(point_t*) * 12000 ); int i; for( i = 0; i < 12000; i ++ ) { points[i] = (point_t*) vj_calloc( sizeof(point_t) ); } veejay_memset( x_, 0, sizeof(x_) ); veejay_memset( y_, 0, sizeof(y_) ); return 1; }
int bgsubtract_malloc(int width, int height) { if(static_bg__ == NULL){ static_bg__ = (uint8_t*) vj_malloc( RUP8(width*height)*4); bg_frame__[0] = static_bg__; bg_frame__[1] = bg_frame__[0] + RUP8(width*height); bg_frame__[2] = bg_frame__[1] + RUP8(width*height); bg_frame__[3] = bg_frame__[2] + RUP8(width*height); } instance = 1; veejay_msg( VEEJAY_MSG_INFO, "You can enable/disable the histogram equalizer by setting env var VEEJAY_BG_AUTO_HISTOGRAM_EQ" ); veejay_msg( VEEJAY_MSG_INFO, "Histogram equalization is %s", (auto_hist ? "enabled" : "disabled" )); return 1; }
void multitrack_get_preview_dimensions( int w , int h, int *dst_w, int *dst_h ) { int tmp_w = w; int tmp_h = h; float ratio = (float)tmp_w / (float) tmp_h; if( tmp_h > MAX_PREVIEW_HEIGHT ) { tmp_h = MAX_PREVIEW_HEIGHT; tmp_w = (int) ( (float) tmp_h * ratio ); } if( tmp_w > MAX_PREVIEW_WIDTH ) { tmp_w = MAX_PREVIEW_WIDTH; tmp_h = tmp_w / ratio; } *dst_w = RUP8(tmp_w); *dst_h = RUP8(tmp_h); }
int zoom_malloc(int width, int height) { int i; for( i = 0; i < 3; i ++ ) { zoom_private_[i] = (uint8_t*) vj_malloc( sizeof(uint8_t) * RUP8(width*height)); if(!zoom_private_[i]) return 0; } return 1; }
livido_init_f init_instance( livido_port_t *my_instance ) { int w = 0, h = 0; lvd_extract_dimensions( my_instance, "out_channels", &w, &h ); lvd_crop_t *c = (lvd_crop_t*) livido_malloc( sizeof(lvd_crop_t)); livido_memset(c,0,sizeof(lvd_crop_t)); c->buf[0] = (uint8_t*) livido_malloc( sizeof(uint8_t) * RUP8(w * h * 4)); c->buf[1] = c->buf[0] + RUP8(w*h); c->buf[2] = c->buf[1] + RUP8(w*h); c->flags = SWS_FAST_BILINEAR; c->w = -1; c->h = -1; livido_property_set( my_instance, "PLUGIN_private", LIVIDO_ATOM_TYPE_VOIDPTR,1, &c); return LIVIDO_NO_ERROR; }
int gaussblur_malloc(int w, int h) { gaussfilter = (FilterParam*) vj_calloc(sizeof(FilterParam)); temp = (uint8_t*) vj_malloc( sizeof(uint8_t) * RUP8(w*h)); if(temp == NULL) return 0; last_radius = 0; last_strength = 0; last_quality = 0; return 1; }
int gvr_track_connect( void *preview, char *hostname, int port_num, int *new_track ) { veejay_preview_t *vp = (veejay_preview_t*) preview; int track_num = track_find( vp ); if(track_num == -1) { vj_msg(0, "All tracks used."); return 0; } if(track_exists( vp, hostname, port_num, new_track ) ) { vj_msg(VEEJAY_MSG_WARNING, "Veejay '%s':%d already in track %d", hostname, port_num, *new_track ); return 0; } vj_client *fd = vj_client_alloc(0,0,0); if(!vj_client_connect( fd, hostname, NULL, port_num ) ) { vj_msg(VEEJAY_MSG_ERROR, "Unable to connect to %s:%d", hostname, port_num ); vj_client_free( fd ); return 0; } veejay_track_t *vt = (veejay_track_t*) vj_calloc( sizeof(veejay_track_t)); vt->hostname = strdup(hostname); vt->port_num = port_num; vt->active = 1; vt->fd = fd; vt->preview = is_button_toggled( "previewtoggle" ); vt->status_buffer = (uint8_t*) vj_calloc(sizeof(uint8_t) * 256); vt->data_buffer = (uint8_t*) vj_calloc(sizeof(uint8_t) * RUP8(MAX_PREVIEW_WIDTH * MAX_PREVIEW_HEIGHT * 3) ); vt->tmp_buffer = (uint8_t*) vj_calloc(sizeof(uint8_t) * RUP8( MAX_PREVIEW_WIDTH * MAX_PREVIEW_HEIGHT * 4) ); *new_track = track_num; vp->tracks[ track_num ] = vt; vp->track_sync->active_list[ track_num ] = 1; return 1; }
int cutstop_malloc(int width, int height) { int i; for( i = 0; i < 3 ;i ++ ) { vvcutstop_buffer[i] = (uint8_t*)vj_malloc(sizeof(uint8_t) * RUP8( width * height )); if(!vvcutstop_buffer[i] ) return 0; } veejay_memset( vvcutstop_buffer[0],0, width*height); veejay_memset( vvcutstop_buffer[1],128,(width*height)); veejay_memset( vvcutstop_buffer[2],128,(width*height)); return 1; }
int motionmap_malloc(int w, int h ) { bg_image = (uint8_t*) vj_malloc( sizeof(uint8_t) * RUP8(w * h)); binary_img = (uint8_t*) vj_malloc(sizeof(uint8_t) * RUP8(w * h)); prev_img = (uint8_t*) vj_malloc(sizeof(uint8_t) * RUP8(w*h)); interpolate_buf = vj_malloc( sizeof(uint8_t) * RUP8(w*h*3)); diff_img = (uint8_t*) vj_malloc( sizeof(uint8_t) * RUP8(w*h*2)); veejay_msg(2, "This is 'Motion Mapping'"); veejay_msg(2, "This FX calculates motion energy activity levels over a period of time to scale FX parameters"); veejay_msg(2, "Add any of the following to the FX chain (if not already present)"); veejay_msg(2, "\tBathroom Window, Displacement Mapping, Multi Mirrors, Magic Mirror, Sinoids"); veejay_msg(2, "\tSlice Window , Smear, ChameleonTV and TimeDistort TV"); veejay_memset( histogram_, 0, sizeof(int32_t) * HIS_LEN ); nframe_ = 0; running = 0; is_initialized ++; return 1; }
int chromascratcher_malloc(int w, int h) { cframe[0] = (uint8_t *) vj_malloc( RUP8(w * h * 3) * MAX_SCRATCH_FRAMES * sizeof(uint8_t) ); if(!cframe[0]) return 0; cframe[1] = cframe[0] + ( w * h * MAX_SCRATCH_FRAMES ); cframe[2] = cframe[1] + ( w * h * MAX_SCRATCH_FRAMES ); int strides[4] = { w * h * MAX_SCRATCH_FRAMES, w * h * MAX_SCRATCH_FRAMES, w * h * MAX_SCRATCH_FRAMES, 0 }; vj_frame_clear( cframe, strides, 128 ); return 1; }
int timedistort_malloc( int w, int h ) { unsigned int i; if(nonmap) timedistort_free(); nonmap = vj_malloc( RUP8(w + 2 * w * h) * sizeof(uint8_t)); if(!nonmap) return 0; planes[0] = vj_malloc( RUP8(PLANES * 3 * w * h) * sizeof(uint8_t)); planes[1] = planes[0] + RUP8(PLANES * w * h ); planes[2] = planes[1] + RUP8(PLANES * w * h ); veejay_memset( planes[0],0, RUP8(PLANES * w * h )); veejay_memset( planes[1],128,RUP8(PLANES * w * h )); veejay_memset( planes[2],128,RUP8(PLANES * w * h )); have_bg = 0; n__ = 0; N__ = 0; for( i = 0; i < PLANES; i ++ ) { planetableY[i] = &planes[0][ (w*h) * i ]; planetableU[i] = &planes[1][ (w*h) * i ]; planetableV[i] = &planes[2][ (w*h) * i ]; } warptime[0] = (uint8_t*) vj_calloc( sizeof(uint8_t) * RUP8((w * h)+w+1) ); warptime[1] = (uint8_t*) vj_calloc( sizeof(uint8_t) * RUP8((w * h)+w+1) ); if( warptime[0] == NULL || warptime[1] == NULL ) return 0; plane = 0; state = 1; return 1; }
void *reader_thread(void *data) { vj_tag *tag = (vj_tag*) data; threaded_t *t = tag->priv; char buf[16]; snprintf(buf,sizeof(buf)-1, "%03d:;", VIMS_GET_FRAME); int retrieve = 0; int success = 0; vj_client *v = vj_client_alloc( t->w, t->h, t->af ); v->lzo = lzo_new(); success = vj_client_connect_dat( v, tag->source_name,tag->video_channel ); if( success > 0 ) { veejay_msg(VEEJAY_MSG_INFO, "Connecton established with %s:%d",tag->source_name, tag->video_channel + 5); } else if ( tag->source_type != VJ_TAG_TYPE_MCAST ) { veejay_msg(0, "Unable to connect to %s: %d", tag->source_name, tag->video_channel+5); goto NETTHREADEXIT; } lock(t); t->state = STATE_RUNNING; unlock(t); for( ;; ) { int error = 0; int res = 0; int ret = 0; if( retrieve == 0 && t->have_frame == 0 ) { ret = vj_client_send( v, V_CMD,(unsigned char*) buf ); if( ret <= 0 ) { error = 1; } else { retrieve = 1; } } if(!error && retrieve == 1 ) { res = vj_client_poll(v, V_CMD ); if( res ) { if(vj_client_link_can_read( v, V_CMD ) ) { retrieve = 2; } } else if ( res < 0 ) { error = 1; } else if ( res == 0 ) { net_delay(10,0); continue; } } if(!error && retrieve == 2) { int ret = 0; int strides[3] = { 0,0,0}; int compr_len = 0; if( vj_client_read_frame_header( v, &(t->in_w), &(t->in_h), &(t->in_fmt), &compr_len, &strides[0],&strides[1],&strides[2]) == 0 ) { error = 1; } if(!error) { int need_rlock = 0; if( compr_len <= 0 ) need_rlock = 1; if( need_rlock ) { lock(t); } if( t->bufsize < (t->in_w * t->in_h * 3) || t->buf == NULL ) { t->bufsize = t->in_w * t->in_h * 3; t->buf = (uint8_t*) realloc( t->buf, RUP8(t->bufsize)); } ret = vj_client_read_frame_data( v, compr_len, strides[0], strides[1], strides[2], t->buf ); if( ret == 2 ) { if(!need_rlock) { lock(t); vj_client_decompress_frame_data( v, t->buf, t->in_fmt, t->in_w, t->in_h, compr_len, strides[0],strides[1],strides[2] ); unlock(t); } } if( need_rlock ) { unlock(t); } } // lock(t); //t->buf = vj_client_read_i( v, t->buf,&(t->bufsize), &ret ); if(ret && t->buf) { t->have_frame = 1; t->in_fmt = v->in_fmt; t->in_w = v->in_width; t->in_h = v->in_height; retrieve = 0; } if( ret <= 0 || t->buf == NULL ) { if( tag->source_type == VJ_TAG_TYPE_NET ) { veejay_msg(VEEJAY_MSG_DEBUG,"Error reading video frame from %s:%d",tag->source_name,tag->video_channel ); error = 1; } } // unlock(t); } NETTHREADRETRY: if( error ) { int success = 0; vj_client_close(v); veejay_msg(VEEJAY_MSG_INFO, " ZZzzzzz ... waiting for Link %s:%d to become ready", tag->source_name, tag->video_channel ); net_delay( 0, 5 ); if(tag->source_type == VJ_TAG_TYPE_MCAST ) success = vj_client_connect( v,NULL,tag->source_name,tag->video_channel ); else success = vj_client_connect_dat( v, tag->source_name,tag->video_channel ); if( t->state == 0 ) { veejay_msg(VEEJAY_MSG_INFO, "Network thread with %s: %d was told to exit",tag->source_name,tag->video_channel+5); goto NETTHREADEXIT; } if( success <= 0 ) { goto NETTHREADRETRY; } else { veejay_msg(VEEJAY_MSG_INFO, "Connecton re-established with %s:%d",tag->source_name,tag->video_channel + 5); } retrieve = 0; } if( t->state == 0 ) { veejay_msg(VEEJAY_MSG_INFO, "Network thread with %s: %d was told to exit",tag->source_name,tag->video_channel+5); goto NETTHREADEXIT; } } NETTHREADEXIT: if(t->buf) free(t->buf); t->buf = NULL; if(v) { vj_client_close(v); vj_client_free(v); v = NULL; } veejay_msg(VEEJAY_MSG_INFO, "Network thread with %s: %d has exited",tag->source_name,tag->video_channel+5); //pthread_exit( &(t->thread)); return NULL; }
void motionmap_apply( VJFrame *frame, int threshold, int limit1, int draw, int history, int decay, int interpol, int last_act_level, int act_decay ) { unsigned int i; const unsigned int width = frame->width; const unsigned int height = frame->height; const int len = frame->len; uint8_t *Cb = frame->data[1]; uint8_t *Cr = frame->data[2]; const int limit = limit1 * 10; if(!have_bg) { veejay_msg(VEEJAY_MSG_ERROR,"Motion Mapping: Snap the background frame with VIMS 339 or mask button in reloaded"); return; } if( act_decay != last_act_decay ) { last_act_decay = act_decay; activity_decay = act_decay; } // run difference algorithm over multiple threads if( vj_task_available() ) { VJFrame task; task.stride[0] = len; // plane length task.stride[1] = len; task.stride[2] = len; task.stride[3] = 0; task.data[0] = bg_image; // plane 0 = background image task.data[1] = frame->data[0]; // plane 1 = luminance channel task.data[2] = prev_img; // plane 2 = luminance channel of previous frame task.data[3] = NULL; task.ssm = 1; // all planes are the same size task.format = frame->format; // not important, but cannot be 0 task.shift_v = 0; task.shift_h = 0; task.uv_width = width; task.uv_height = height; task.width = width; // dimensions task.height = height; uint8_t *dst[4] = { binary_img, diff_img, diff_img + RUP8(len), NULL }; vj_task_set_from_frame( &task ); vj_task_set_param( threshold, 0 ); vj_task_run( task.data, dst, NULL,NULL,4, (performer_job_routine) &motionmap_find_diff_job ); } else { motionmap_calc_diff( (const uint8_t*) bg_image, prev_img, (const uint8_t*) frame->data[0], diff_img, diff_img + RUP8(len), binary_img, len, threshold ); } if( draw ) { vj_frame_clear1( Cb, 128, len ); vj_frame_clear1( Cr, 128, len ); vj_frame_copy1( binary_img, frame->data[0], len ); running = 0; stored_frame = 0; scale_lock = 0; return; } int32_t activity_level = motionmap_activity_level( binary_img, width, height ); int32_t avg_actlvl = 0; int32_t min = INT_MAX; int32_t local_max = 0; current_his_len = history; current_decay = decay; histogram_[ (nframe_%current_his_len) ] = activity_level; for( i = 0; i < current_his_len; i ++ ) { avg_actlvl += histogram_[i]; if(histogram_[i] > max ) max = histogram_[i]; if(histogram_[i] < min ) min = histogram_[i]; if(histogram_[i] > local_max) local_max = histogram_[i]; } avg_actlvl = avg_actlvl / current_his_len; if( avg_actlvl < limit ) { avg_actlvl = 0; } nframe_ ++; switch( last_act_level ) { case 0: if( (nframe_ % current_his_len)==0 ) { key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; } break; case 1: key1_ = min; key2_ = max; keyv_ = local_max; global_max = local_max; break; case 2: key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; break; case 3: if( (nframe_ % current_his_len)==0 ) { key1_ = min; key2_ = max; keyp_ = keyv_; keyv_ = avg_actlvl; global_max = max; } if( avg_actlvl == 0 ) scale_lock = 1; else scale_lock = 0; //reset to normal after "acitivity_decay" ticks if( scale_lock && act_decay > 0) { activity_decay --; if( activity_decay == 0 ) { last_act_decay = 0; scale_lock = 0; } } break; } running = 1; do_interpolation = interpol; }