コード例 #1
0
ファイル: fibdownscale.c プロジェクト: c0ntrol/veejay
static void fibdownscale1_apply(VJFrame *frame, VJFrame *frame2)
{
	unsigned i, f1;
	const int len = frame->len >> 1;
	const int uv_len = (frame->ssm ? frame->len : frame->uv_len) >> 1;

	uint8_t *Y = frame->data[0];
	uint8_t *Cb = frame->data[1];
	uint8_t *Cr = frame->data[2];

	uint8_t *Y2 = frame2->data[0];
	uint8_t *Cb2 = frame2->data[1];
	uint8_t *Cr2 = frame2->data[2];

	/* do fib over half of image. (now we have 2 squares in upper half) */
	for (i = 2; i < len; i++)
	{
		f1 = (i + 1) + (i - 1);
		Y[i] = Y2[f1];
	}

	/* copy over first half (we could use veejay_memcpy) */
	veejay_memcpy( Y + len, Y, len ); 

	/* do the same thing for UV to get correct image */
	for (i = 2; i < uv_len; i++)
	{
		f1 = (i + 1) + (i - 1);
		Cb[i] = Cb2[f1];
		Cr[i] = Cr2[f1];
	}

	veejay_memcpy( Cb + uv_len, Cb , uv_len );
	veejay_memcpy( Cr + uv_len, Cr , uv_len );
}
コード例 #2
0
ファイル: packet.c プロジェクト: c0ntrol/veejay
int			packet_put_data(packet_header_t *h, void *payload, const uint8_t *plane )
{
	uint8_t *dst = (uint8_t*) payload;
	veejay_memcpy( dst, h , PACKET_HEADER_LENGTH );
	veejay_memcpy( dst + PACKET_HEADER_LENGTH, plane, CHUNK_SIZE );
	return 1;
}
コード例 #3
0
ファイル: bgsubtract.c プロジェクト: c0ntrol/veejay
int bgsubtract_prepare(VJFrame *frame)
{
	if(!static_bg__ )
	{
		return 0;
	}
	
	if( auto_hist )
		vje_histogram_auto_eq( frame );

	//@ copy the iamge
	veejay_memcpy( bg_frame__[0], frame->data[0], frame->len );
	
	if( frame->ssm ) {
		veejay_memcpy( bg_frame__[1], frame->data[1], frame->len );
		veejay_memcpy( bg_frame__[2], frame->data[2], frame->len );
		bg_ssm = 1;
	}
	else {
		// if data is not subsampled, upsample chroma planes now 
		veejay_memcpy( bg_frame__[1], frame->data[1], frame->uv_len );
		veejay_memcpy( bg_frame__[2], frame->data[2], frame->uv_len );
		chroma_supersample( SSM_422_444, frame, bg_frame__ );
		bg_ssm = 1;
	}

	bg_n = 0;

	veejay_msg(2, "Subtract background: Snapped background frame (4:4:4 = %d)", bg_ssm);
	return 1;
}
コード例 #4
0
ファイル: packet.c プロジェクト: c0ntrol/veejay
int			packet_put_padded_data(packet_header_t *h, void *payload, const uint8_t *plane, int bytes )
{
	uint8_t *dst = (uint8_t*) payload;
	size_t len = PACKET_HEADER_LENGTH;
	veejay_memcpy( dst, h , len );
	veejay_memcpy( dst + len, plane, bytes );
	return (len + bytes);
}
コード例 #5
0
ファイル: cutstop.c プロジェクト: c0ntrol/veejay
void cutstop_apply( VJFrame *frame, int threshold, int freq, int cutmode, int holdmode)
{
	int i=0;
	const int len = frame->len;

	uint8_t *Yb = vvcutstop_buffer[0];
	uint8_t *Ub = vvcutstop_buffer[1];
	uint8_t *Vb = vvcutstop_buffer[2];
	uint8_t *Yd = frame->data[0];
	uint8_t *Ud = frame->data[1];
	uint8_t *Vd = frame->data[2];
	
	frq_cnt = frq_cnt + freq;
	
	if (freq == 255 || frq_cnt > 255) {
		veejay_memcpy(Yb, Yd, len);
		veejay_memcpy(Ub, Ud, len);
		veejay_memcpy(Vb, Vd, len);
		frq_cnt = 0;
	}	
	// moved cutmode & holdmode outside loop	
	if(cutmode && !holdmode)
	{
		for( i = 0; i < len; i ++ )
			if( threshold > Yb[i] )
			{
				Yd[i] = Yb[i]; Ud[i] = Ub[i]; Vd[i] = Vb[i];	
			}	
	}
	if(cutmode && holdmode)
	{
		for( i = 0; i < len; i ++ )
			if( threshold > Yd[i] )
			{
				Yd[i] = Yb[i]; Ud[i] = Ub[i]; Vd[i] = Vb[i];
			}
	}
	if(!cutmode && holdmode)
	{
		for( i =0 ; i < len; i ++ )
			if( threshold < Yd[i])
			{ 
				Yd[i] = Yb[i]; Ud[i] = Ub[i]; Vd[i] = Vb[i];
			}
	}
	if(!cutmode && !holdmode)
	{
		for( i = 0; i < len; i ++ )
			if(threshold < Yb[i])
			{
				Yd[i] = Yb[i]; Ud[i] = Ub[i]; Vd[i] = Vb[i];
			}
	}

}
コード例 #6
0
ファイル: motionmap.c プロジェクト: c0ntrol/veejay
void	motionmap_store_frame( VJFrame *fx )
{
	if( running == 0  || !do_interpolation)
		return;

	veejay_memcpy( interpolate_buf, fx->data[0], fx->len );
	veejay_memcpy( interpolate_buf + fx->len, fx->data[1], fx->len );
	veejay_memcpy( interpolate_buf + fx->len + fx->len, fx->data[2], fx->len );

	stored_frame = 1;
}
コード例 #7
0
ファイル: bgsubtract.c プロジェクト: c0ntrol/veejay
static void bgsubtract_show_bg( VJFrame *frame )
{
	veejay_memcpy( frame->data[0], bg_frame__[0], frame->len );
	if( bg_ssm && frame->ssm ) {
		veejay_memcpy( frame->data[1], bg_frame__[1], frame->len );
		veejay_memcpy( frame->data[2], bg_frame__[2], frame->len );
	} else { /* subsampling does not match */
		veejay_memset( frame->data[1], 128, frame->uv_len );
		veejay_memset( frame->data[2], 128, frame->uv_len );
	}	
}
コード例 #8
0
ファイル: yuv4mpeg.c プロジェクト: c0ntrol/veejay
int y4m_write_fields_cb(y4m_cb_writer_t * fd, const y4m_stream_info_t *si,
		     const y4m_frame_info_t *fi,
		     uint8_t * const *upper_field, 
		     uint8_t * const *lower_field)
{
  int p, err;
  int planes = y4m_si_get_plane_count(si);
  int numwbuf=0;
  const int maxwbuf=32*1024;
  uint8_t *wbuf;
  
  /* Write frame header */
  if ((err = y4m_write_frame_header_cb(fd, si, fi)) != Y4M_OK) return err;
  /* Write each plane */
  wbuf=_y4m_alloc(maxwbuf);
  for (p = 0; p < planes; p++) {
    uint8_t *srctop = upper_field[p];
    uint8_t *srcbot = lower_field[p];
    int height = y4m_si_get_plane_height(si, p);
    int width = y4m_si_get_plane_width(si, p);
    int y;
    /* alternately write one line from each field */
    for (y = 0; y < height; y += 2) {
      if( width*2 >= maxwbuf ) {
        if (y4m_write_cb(fd, srctop, width)) goto y4merr;
        if (y4m_write_cb(fd, srcbot, width)) goto y4merr;
      } else {
        if (numwbuf + 2 * width > maxwbuf) {
          if(y4m_write_cb(fd, wbuf, numwbuf)) goto y4merr;
          numwbuf=0;
        }

        veejay_memcpy(wbuf+numwbuf,srctop,width); numwbuf += width;
        veejay_memcpy(wbuf+numwbuf,srcbot,width); numwbuf += width;
      }
      srctop  += width;
      srcbot  += width;
    }
  }
  if( numwbuf )
    if( y4m_write_cb(fd, wbuf, numwbuf) )
      goto y4merr;
  _y4m_free(wbuf);
  return Y4M_OK;

 y4merr:
  _y4m_free(wbuf);
  return Y4M_ERR_SYSTEM;
}
コード例 #9
0
ファイル: gaussblur.c プロジェクト: c0ntrol/veejay
void gaussblur_apply(VJFrame *frame, int radius, int strength, int quality )
{
	uint8_t *A = frame->data[3];
	const unsigned int width = frame->width;
	const unsigned int height = frame->height;
	const int len = frame->len;

	if( last_radius != radius || last_strength != strength || last_quality != quality )
	{
		if( gaussfilter->filter_context ) {
			sws_freeContext( gaussfilter->filter_context );
		}
		if( gaussfilter_init( width, height, radius, strength, quality ) == 0 )
			return;

		last_radius = radius;
		last_strength = strength;
		last_quality = quality;
	}


	veejay_memcpy( temp, A, len );
	gaussblur( A, width, temp, width, width, height, gaussfilter->filter_context );

}
コード例 #10
0
void scratcher_apply(VJFrame *src,
		     int width, int height, int opacity, int n,
		     int no_reverse)
{

    unsigned int len = src->len;
    unsigned int op1 = (opacity > 255) ? 255 : opacity;
    int offset = len * nframe;
    int uv_len = src->uv_len;
    int uv_offset = uv_len * nframe;
	VJFrame copy;

    if (nframe== 0) {
		int strides[4] = { len, uv_len, uv_len, 0 };
		vj_frame_copy( src->data, frame, strides );
        return;
    }
	
	VJFrame srcB;
	veejay_memcpy( &srcB, src, sizeof(VJFrame) );
	srcB.data[0] = frame[0] + offset;
	srcB.data[1] = frame[1] + uv_offset;
	srcB.data[2] = frame[2] + uv_offset;
	opacity_applyN( src, &srcB, src->width,src->height, opacity );
	copy.uv_len = src->uv_len;
	copy.data[0] = frame[0];
	copy.data[1] = frame[1];
	copy.data[2] = frame[2];
   	
	store_frame( &copy, width, height, n, no_reverse);

}
コード例 #11
0
ファイル: scratcher.c プロジェクト: c0ntrol/veejay
void scratcher_apply(VJFrame *src,int opacity, int n, int no_reverse)
{
    const int len = src->len;
    const int offset = len * nframe;
    const int uv_len = src->uv_len;
    const int uv_offset = uv_len * nframe;

	VJFrame tmp;
	veejay_memcpy( &tmp, src, sizeof(VJFrame) );
	
	tmp.data[0] = frame[0] + offset;
	tmp.data[1] = frame[1] + uv_offset;
	tmp.data[2] = frame[2] + uv_offset;

	if( no_reverse != last_reverse || n != last_n )
	{
		last_reverse = no_reverse;
		nframe = n;
		last_n = n;
	}		

	if( nframe == 0 ) {
		tmp.data[0] = src->data[0];
		tmp.data[1] = src->data[1];
		tmp.data[2] = src->data[2];
	}

	opacity_applyN( src, &tmp, opacity );
	
	store_frame( src, n, no_reverse);
}
コード例 #12
0
ファイル: yuv4mpeg.c プロジェクト: c0ntrol/veejay
int y4m_read_fields_data_cb(y4m_cb_reader_t * fd, const y4m_stream_info_t *si,
                         y4m_frame_info_t *fi,
                         uint8_t * const *upper_field, 
                         uint8_t * const *lower_field)
{
  int p;
  int planes = y4m_si_get_plane_count(si);
  const int maxrbuf=32*1024;
  uint8_t *rbuf=_y4m_alloc(maxrbuf);
  int rbufpos=0,rbuflen=0;
  
  /* Read each plane */
  for (p = 0; p < planes; p++) {
    uint8_t *dsttop = upper_field[p];
    uint8_t *dstbot = lower_field[p];
    int height = y4m_si_get_plane_height(si, p);
    int width = y4m_si_get_plane_width(si, p);
    int y;
    /* alternately read one line into each field */
    for (y = 0; y < height; y += 2) {
      if( width*2 >= maxrbuf ) {
        if (y4m_read_cb(fd, dsttop, width)) goto y4merr;
        if (y4m_read_cb(fd, dstbot, width)) goto y4merr;
      } else {
        if( rbufpos==rbuflen ) {
          rbuflen=(height-y)*width;
          if( rbuflen>maxrbuf )
            rbuflen=maxrbuf-maxrbuf%(2*width);
          if( y4m_read_cb(fd,rbuf,rbuflen) )
            goto y4merr;
          rbufpos=0;
        }
            
        veejay_memcpy(dsttop,rbuf+rbufpos,width); rbufpos+=width;
        veejay_memcpy(dstbot,rbuf+rbufpos,width); rbufpos+=width;
      }
      dsttop+=width;
      dstbot+=width;
    }
  }
  _y4m_free(rbuf);
  return Y4M_OK;

 y4merr:
  _y4m_free(rbuf);
  return Y4M_ERR_SYSTEM;
}
コード例 #13
0
ファイル: packet.c プロジェクト: c0ntrol/veejay
packet_header_t		packet_get_header(const void *data)
{
	packet_header_t h,tmp;
	veejay_memcpy( &tmp, data, PACKET_HEADER_LENGTH );
	h.seq_num = tmp.seq_num;
	h.length = tmp.length;
	h.usec = tmp.usec;
	h.timeout = tmp.timeout;
	return h;
}
コード例 #14
0
void _fibdownscale_apply(VJFrame *frame, VJFrame *frame2, int width,
			 int height)
{

    unsigned i, f1;
    unsigned int len = frame->len / 2;
    unsigned int uv_len = frame->uv_len / 2;

 	uint8_t *Y = frame->data[0];
	uint8_t *Cb = frame->data[1];
	uint8_t *Cr = frame->data[2];

	uint8_t *Y2 = frame2->data[0];
	uint8_t *Cb2 = frame2->data[1];
	uint8_t *Cr2 = frame2->data[2];




    /* do fib over half of image. (now we have 2 squares in upper half) */
    for (i = 2; i < len; i++) {
		f1 = (i + 1) + (i - 1);
		Y[i] = Y2[f1];
    }

    /* copy over first half (we could use veejay_memcpy) */
//    for (i = len; i < (width * height); i++) {
//	Y[i] = Y[i - len];
  //  }
    veejay_memcpy( Y + len, Y, len ); 

    /* do the same thing for UV to get correct image */

    for (i = 2; i < uv_len; i++) {
	f1 = (i + 1) + (i - 1);
	Cb[i] = Cb2[f1];
	Cr[i] = Cr2[f1];
    }

	veejay_memcpy( Cb + uv_len, Cb , uv_len );
	veejay_memcpy( Cr + uv_len, Cr , uv_len );
}
コード例 #15
0
ファイル: median.c プロジェクト: d-j-a-y/veejay-dyne-outdated
void medianfilter_apply( VJFrame *frame, int width, int height, int val)
{
    uint8_t *Y = frame->data[0];
    uint8_t *Cb = frame->data[1];
    uint8_t *Cr = frame->data[2];

    if( val == 0 )
	   return; 

     uint8_t *buffer = (uint8_t*) vj_malloc(sizeof(uint8_t)*width*height*3);
     veejay_memset( buffer,0, width*height*3);
     ctmf( Y, buffer, width,height,width,width,val,1,1024*1024*8);
     ctmf( Cb,buffer + (width*height), width,height/2,width,width,val,1,512*1024);
     ctmf( Cr,buffer + (width*height*2),width,height/2,width,width,val,1,512*1024);

     veejay_memcpy( Y, buffer, width*height);
     veejay_memcpy( Cb,buffer + (width*height), width*height);
     veejay_memcpy( Cr,buffer + (width*height*2), width*height);
     
     free(buffer);

}
コード例 #16
0
ファイル: motionmap.c プロジェクト: c0ntrol/veejay
void	motionmap_interpolate_frame( VJFrame *fx, int N, int n )
{
	if( running == 0 || !do_interpolation) 
		return;

	VJFrame prev;
	veejay_memcpy(&prev, fx, sizeof(VJFrame));
	prev.data[0] = interpolate_buf;
	prev.data[1] = interpolate_buf + (fx->len);
	prev.data[2] = interpolate_buf + (2*fx->len);

	motionmap_lerp_frame( fx, &prev, N, n );
}
コード例 #17
0
ファイル: toalpha.c プロジェクト: c0ntrol/veejay
void toalpha_apply( VJFrame *frame, int mode)
{
	const int len = frame->len;
	uint8_t *a = frame->data[3];
	uint8_t *Y = frame->data[0];
		
	if( mode == 0 ) {
		veejay_memcpy(a, Y, len );
	}
	else {
		int i;
		for( i = 0; i < len; i ++ ) 
		{
			a[i] = __lookup_table[ Y[i] ];
		}
	}
}
コード例 #18
0
ファイル: motionmap.c プロジェクト: c0ntrol/veejay
int	motionmap_prepare( uint8_t *map[4], int width, int height )
{
	if(!is_initialized)
		return 0;

	vj_frame_copy1( map[0], bg_image, width * height );
	motionmap_blur( bg_image, width,height );
	veejay_memcpy( prev_img, bg_image, width * height );

	have_bg = 1;
	nframe_ = 0;
	running = 0;
	stored_frame = 0;
	do_interpolation = 0;
	scale_lock = 0;
	veejay_msg(2, "Motion Mapping: Snapped background frame");
	return 1;
}
コード例 #19
0
ファイル: autoeq.c プロジェクト: d-j-a-y/veejay-dyne-outdated
void autoeq_apply( VJFrame *frame, int width, int height, int val, int intensity, int strength)
{
	if( val == 0 )
	{
		VJFrame tmp;
		veejay_memcpy( &tmp, frame, sizeof(VJFrame));
		tmp.data[0] = (uint8_t*) vj_malloc( sizeof(uint8_t) * frame->len );
		vj_frame_copy1( frame->data[0], tmp.data[0], frame->len );

		veejay_histogram_draw( histogram_,&tmp, frame, intensity, strength );

		vj_frame_clear1( frame->data[1], 128, frame->uv_len );
		vj_frame_clear1( frame->data[2], 128, frame->uv_len );

		free(tmp.data[0]);
	}
	else
	{
		veejay_histogram_analyze( histogram_, frame, 0 );
		veejay_histogram_equalize( histogram_, frame, intensity, strength );
	}
}
コード例 #20
0
int bgsubtract_prepare(uint8_t *map[4], int width, int height)
{
	if(!static_bg )
	{
		return 0;
	}
	
	//@ copy the iamge
	veejay_memcpy( static_bg, map[0], (width*height));
	
	VJFrame tmp;
	veejay_memset( &tmp, 0, sizeof(VJFrame));
	tmp.data[0] = static_bg;
	tmp.width = width;
	tmp.height = height;

	//@ 3x3 blur
	softblur_apply( &tmp, width,height,0);

	veejay_msg(2, "Substract background: Snapped background frame");
	return 1;
}
コード例 #21
0
ファイル: mtracer.c プロジェクト: c0ntrol/veejay
void mtracer_apply( VJFrame *frame, VJFrame *frame2, int mode, int n)
{
	const int len = frame->len;
    VJFrame m;
    veejay_memcpy( &m, frame, sizeof(VJFrame ));

    if (mtrace_counter == 0) {
		overlaymagic_apply(frame, frame2, mode,0);
		vj_frame_copy1( mtrace_buffer[0], frame->data[0], len );
    } else {
		overlaymagic_apply(frame, frame2, mode,0);
		m.data[0] = mtrace_buffer[0];
		m.data[1] = frame->data[1];
		m.data[2] = frame->data[2];
		m.data[3] = frame->data[3];
		overlaymagic_apply( &m, frame2, mode, 0 );
		vj_frame_copy1( mtrace_buffer[0],frame->data[0], len );
    }

    mtrace_counter++;
    if (mtrace_counter >= n)
	mtrace_counter = 0;
}
コード例 #22
0
ファイル: morphology.c プロジェクト: c0ntrol/veejay
void morphology_apply( VJFrame *frame, int threshold, int convolution_kernel, int mode, int channel )
{
	unsigned int x,y;
	int len = frame->len;
	int width = frame->width;

	const int uv_len = (frame->ssm ? len : frame->uv_len);
	
	uint8_t *I = frame->data[0];
	
	uint8_t *Cb = frame->data[1];
	uint8_t *Cr = frame->data[2];

	switch( channel ) {
		case 1: I = frame->data[3];
		break;
		default:
			I = frame->data[0];
		break;
	}
	
	morph_func	p = _morphology_function(mode);

	if( threshold == 0 ) {
		/* assume image is binary thresholded already */
		veejay_memcpy( binary_img, I, len );
	}
	else {
		morph_threshold_image( I, len, threshold, binary_img );
	}

	if( channel == 0 ) { /* other channel is alpha */
		veejay_memset( Cb, 128, uv_len );
		veejay_memset( Cr, 128, uv_len );
	}	

	len -= width;

	if( mode == 0 ) {
		for(y = width; y < len; y += width  )
		{	
			for(x = 1; x < width-1; x ++)
			{	
				if(binary_img[x+y] == 0)
				{
					uint8_t mt[9] = {
						binary_img[x-1+y-width], binary_img[x+y-width], binary_img[x+1+y-width],
						binary_img[x-1+y],binary_img[x+y], binary_img[x+1+y],
						binary_img[x-1+y+width], binary_img[x+y+width], binary_img[x+1+y+width]
						};
					I[x+y] = p( kernels[convolution_kernel], mt );
				}
				else
				{
					I[x+y] = 0xff;
				}
			}
		}
	}
	else {
		for(y = width; y < len; y += width  )
		{	
			for(x = 1; x < width-1; x ++)
			{	
				if(binary_img[x+y] == 0xff)
				{
					uint8_t mt[9] = {
						binary_img[x-1+y-width], binary_img[x+y-width], binary_img[x+1+y-width],
						binary_img[x-1+y], binary_img[x+y],binary_img[x+1+y],
						binary_img[x-1+y+width], binary_img[x+y+width], binary_img[x+1+y+width]
						};
					I[x+y] = p( kernels[convolution_kernel], mt );
				}
				else 
				{
					I[x+y] = 0;
				}
			}
		}

	}
}
コード例 #23
0
ファイル: packet.c プロジェクト: c0ntrol/veejay
int			packet_get_data(packet_header_t *h, const void *data, uint8_t *plane )
{
	uint8_t *addr = (uint8_t*) data;
	veejay_memcpy( plane , addr + PACKET_HEADER_LENGTH, CHUNK_SIZE );
	return 1;
}
コード例 #24
0
ファイル: chromascratcher.c プロジェクト: c0ntrol/veejay
void chromascratcher_apply(VJFrame *frame, int mode, int opacity, int n,
                           int no_reverse)
{
    unsigned int i;
	const unsigned int width = frame->width;
	const unsigned int height = frame->height;
    const int len = frame->len;
    const unsigned int op_a = (opacity > 255) ? 255 : opacity;
    const unsigned int op_b = 255 - op_a;
    const int offset = len * cnframe;
 	uint8_t *Y = frame->data[0];
	uint8_t *Cb = frame->data[1];
	uint8_t *Cr = frame->data[2];
    veejay_memcpy( &_tmp, frame, (sizeof(VJFrame)));
	_tmp.data[0] = cframe[0];
	_tmp.data[1] = cframe[1];
	_tmp.data[2] = cframe[2];

    if(no_reverse != chroma_restart)
    {
		chroma_restart = no_reverse;
		cnframe = n;
    }

    if( cnframe == 0 ) {
	_tmp.data[0] = frame->data[0];
	_tmp.data[1] = frame->data[1];
	_tmp.data[2] = frame->data[2];
    }

    if(mode>3) {
	   int matte_mode = mode - 3;
   	   chromamagick_apply( frame,&_tmp,matte_mode,opacity);
    }
    else {
	    switch (mode) {		/* scratching with a sequence of frames (no scene changes) */

		case 0:
			/* moving parts will dissapear over time */
			for (i = 0; i < len; i++) {
			    if (cframe[0][offset + i] < Y[i]) {
					Y[i] = cframe[0][offset + i];
					Cb[i] = cframe[1][offset + i];
					Cr[i] = cframe[2][offset + i];
			    }
			}
			break;
   		 case 1:
			for (i = 0; i < len; i++) {
			    /* moving parts will remain visible */
			    if (cframe[0][offset + i] > Y[i]) {
					Y[i] = cframe[0][offset + i];
					Cb[i] = cframe[1][offset + i];
					Cr[i] = cframe[2][offset + i];
		    		}
			}
		break;
  	  case 2:
		for (i = 0; i < len; i++) {
		    if ((cframe[0][offset + i] * op_a) < (Y[i] * op_b)) {
				Y[i] = cframe[0][offset + i];
				Cb[i] = cframe[1][offset + i];
					Cr[i] = cframe[2][offset + i];
	 	   }
		}
		break;
	   case 3:
		for (i = 0; i < len; i++) {
		    /* moving parts will remain visible */
		    if ((cframe[0][offset + i] * op_a) > (Y[i] * op_b)) {
			Y[i] = cframe[0][offset + i];
			Cb[i] = cframe[1][offset + i];
			Cr[i] = cframe[2][offset + i];
		    }
		}
		break;
    
		}
	}

	chromastore_frame(frame, width, height, n, no_reverse);
}
コード例 #25
0
static	void	vj_midi_send_vims_now( vmidi_t *v, int *data )
{
	// format vims message and send it now
	// it would be nice to filter out unique events per frame step
	
	// this can be done by keeping a temporary vevo port
	// and store (instead of send) the VIMS message
	// including the sample_id and chain_entry_id but
	// cutting off all other arguments.
	// then, last SET_SPEED will overwrite any previous ones for this frame step.
	//
	// last, send all messages in temporary port out and cleanup

	char key[32];

	if( v->learn )
	{
		veejay_memcpy( v->learn_event, data, sizeof(v->learn_event ));
		vj_msg(VEEJAY_MSG_INFO, "MIDI %x:%x,%x -> ?", v->learn_event[0],v->learn_event[1],
			v->learn_event[2]);
		return;
	}

	snprintf(key,sizeof(key), "%03d%03d", data[0],data[1] ); //@ event key is midi event type + midi control/param id

	dvims_t *d = NULL;
	int error = vevo_property_get( v->vims, key, 0, &d);
	if( error == VEVO_NO_ERROR )
	{
		if( d->extra )
		{	//@ argument is dynamic
			double min = 0.0;
			double max = 0.0;
			double val = 0.0;
			switch(d->extra)
			{
				case 1: //slider
				{
					GtkAdjustment *a = gtk_range_get_adjustment( GTK_RANGE(
							glade_xml_get_widget_( v->mw, d->widget ) ) );
					
					min = a->lower;
					max = a->upper;
				}
				break;
				case 2: //spinbox
					gtk_spin_button_get_range( GTK_SPIN_BUTTON(
							glade_xml_get_widget_( v->mw, d->widget)), &min, &max);
				
				break;
			}
			
			if( data[0] == SND_SEQ_EVENT_PITCHBEND )
			{
				val =  ( (data[2]/16384.0f) * (max-min) );
			}
			else if( data[0] == SND_SEQ_EVENT_CONTROLLER || data[0] == SND_SEQ_EVENT_KEYPRESS )
			{
				val = ((max-min)/127.0) * data[2] + min;
			}
		   	else {
				vj_msg(VEEJAY_MSG_INFO, "MIDI: what's this %x,%x,%x ?",data[0],data[1],data[2]);
				return;
			}

			char vims_msg[255];
			snprintf(vims_msg,sizeof(vims_msg), "%s %d;", d->msg, (int) val );

			/* use control/param as sample_id */
			int tmpv[3];
			if ( sscanf(vims_msg, "%03d:%d %d;",&tmpv[0],&tmpv[1],&tmpv[2]) == 3 )
			{
			    if(tmpv[1] == 0 && tmpv[0] >= 100 && tmpv[0] < 200) //@ VIMS: sample events, replace 0 (current_id) for control/param number
			    {
				snprintf(vims_msg,sizeof(vims_msg),"%03d:%d %d;", tmpv[0], data[1], (int)val);
			    	veejay_msg(VEEJAY_MSG_DEBUG, "(midi) using control/param %d as sample identifer",data[1]);
			    }	    
			}

			msg_vims( vims_msg );
			vj_msg(VEEJAY_MSG_INFO, "MIDI %x:%x, %x ->  vims %s", data[0], data[1],data[2], vims_msg);
		}
		else
		{
			msg_vims( d->msg );
			vj_msg(VEEJAY_MSG_INFO, "MIDI %x: %x,%x -> vims %s", data[0],data[1],data[2], d->msg);
		}
	}
	else
	{
		vj_msg(VEEJAY_MSG_ERROR, "No vims event for MIDI %x:%x,%x found",data[0],data[1],data[2]);
	}
}
コード例 #26
0
void timedistort_apply( VJFrame *frame, int width, int height, int val)
{
	unsigned int i;
	const int len = (width * height);

	uint8_t *Y = frame->data[0];
	uint8_t *Cb = frame->data[1];
	uint8_t *Cr = frame->data[2];

	uint8_t *diff = nonmap;
	uint8_t *prev = nonmap + len;
	int interpolate = 1;
	int motion = 0;
	int tmp1,tmp2;
	if(motionmap_active()) //@ use motion mapping frame
	{
		motionmap_scale_to( 255,255,1,1,&tmp1,&tmp2, &n__,&N__ );
		motion = 1;
		diff = motionmap_bgmap();
	}
	else
	{
		n__ = 0;
		N__ = 0;

		if(!have_bg)
		{
			vj_frame_copy1( Y, prev, len );
			VJFrame smooth;
			veejay_memcpy(&smooth,frame, sizeof(VJFrame));
			smooth.data[0] = prev;
			softblur_apply(&smooth, width, height, 0 );
			veejay_memset( diff, 0, len );
			have_bg = 1;
			return;
		}
		else
		{
			/*for( i = 0; i < len ; i ++ )
			{
				diff[i] = (abs(prev[i] - Y[i])> val ? 0xff: 0 );
			}*/
			vje_diff_plane( prev, Y, diff, val, len );
			vj_frame_copy1( Y, prev, len );
			VJFrame smooth;
			veejay_memcpy(&smooth,frame, sizeof(VJFrame));
			smooth.data[0] = prev;
			softblur_apply(&smooth, width, height, 0 );
		}
	}
	
	if( n__ == N__ || n__ == 0 )
		interpolate = 0;

	//@ process
	uint8_t *planeTables[4] = { planetableY[plane], planetableU[plane], planetableV[plane], NULL };
	int strides[4] = { len, len, len, 0 };
	vj_frame_copy( frame->data, planeTables, strides );

	uint8_t *p = warptime[ warptimeFrame	] + width + 1;
	uint8_t *q = warptime[ warptimeFrame ^ 1] + width + 1;

	unsigned int x,y;
	for( y = height - 2; y > 0 ; y -- )
	{
		for( x = width - 2; x > 0; x -- )
		{
			i = *(p - width) + *(p-1) + *(p+1) + *(p + width);
			if( i > 3 ) i-= 3;
			p++;
			*q++ = i >> 2;
	
		}
		p += 2;
		q += 2;
	}
	q = warptime[ warptimeFrame ^ 1 ] + width + 1;
	int n_plane = 0;
	for( i = 0; i < len; i ++ )
	{
		if( diff[i] ) {
			q[i] = PLANES - 1;
		}

		n_plane = ( plane - q[i] + PLANES ) & (PLANES-1);

		Y[i]  = planetableY[ n_plane ][i];
		Cb[i] = planetableU[ n_plane ][i];
		Cr[i] = planetableV[ n_plane ][i];
	}

	plane ++;
	plane = plane & (PLANES-1);
	warptimeFrame ^= 1;

	if(interpolate)
		motionmap_interpolate_frame( frame, N__,n__ );
	if(motion)
		motionmap_store_frame(frame);

}