예제 #1
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_data_t*   	 c = data;			// Current working data
  af_data_t*   	 l = af->data;	 		// Local data
  af_channels_t* s = af->setup;
  int 		 i;
  
  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  // Reset unused channels
  memset(l->audio,0,(c->len*af->mul.n)/af->mul.d);
  
  if(AF_OK == check_routes(s,c->nch,l->nch))
    for(i=0;i<s->nr;i++)
      copy(c->audio,l->audio,c->nch,s->route[i][FR],
	   l->nch,s->route[i][TO],c->len,c->bps);
  
  // Set output data
  c->audio = l->audio;
  c->len   = (c->len*af->mul.n)/af->mul.d;
  c->nch   = l->nch;

  return c;
}
예제 #2
0
파일: af_format.c 프로젝트: kax4/mpv
// Filter data through filter
static struct mp_audio* play(struct af_instance* af, struct mp_audio* data)
{
  struct mp_audio*   l   = af->data;	// Local data
  struct mp_audio*   c   = data;	// Current working data
  int 	       len = c->len/c->bps; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  // Change to cpu native endian format
  if((c->format&AF_FORMAT_END_MASK)!=AF_FORMAT_NE)
    endian(c->audio,c->audio,len,c->bps);

  // Conversion table
  if((c->format & AF_FORMAT_POINT_MASK) == AF_FORMAT_F) {
      float2int(c->audio, l->audio, len, l->bps);
      if((l->format&AF_FORMAT_SIGN_MASK) == AF_FORMAT_US)
	si2us(l->audio,len,l->bps);
  } else {
    // Input must be int

    // Change signed/unsigned
    if((c->format&AF_FORMAT_SIGN_MASK) != (l->format&AF_FORMAT_SIGN_MASK)){
      si2us(c->audio,len,c->bps);
    }
    // Convert to special formats
    switch(l->format&AF_FORMAT_POINT_MASK){
    case(AF_FORMAT_F):
      int2float(c->audio, l->audio, len, c->bps);
      break;
    default:
      // Change the number of bits
      if(c->bps != l->bps)
	change_bps(c->audio,l->audio,len,c->bps,l->bps);
      else
	memcpy(l->audio,c->audio,len*c->bps);
      break;
    }
  }

  // Switch from cpu native endian to the correct endianness
  if((l->format&AF_FORMAT_END_MASK)!=AF_FORMAT_NE)
    endian(l->audio,l->audio,len,l->bps);

  // Set output data
  c->audio  = l->audio;
  c->len    = len*l->bps;
  c->bps    = l->bps;
  c->format = l->format;
  return c;
}
예제 #3
0
static af_data_t* play_swapendian(struct af_instance_s* af, af_data_t* data)
{
  af_data_t*   l   = af->data;	// Local data
  af_data_t*   c   = data;	// Current working data
  int 	       len = c->len/c->bps; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  endian(c->audio,l->audio,len,c->bps);

  c->audio = l->audio;
  c->format = l->format;

  return c;
}
예제 #4
0
파일: af_format.c 프로젝트: benf/mpv
static struct mp_audio* play_swapendian(struct af_instance* af, struct mp_audio* data)
{
  struct mp_audio*   l   = af->data;	// Local data
  struct mp_audio*   c   = data;	// Current working data
  int 	       len = c->len/c->bps; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  endian(c->audio,l->audio,len,c->bps);

  c->audio = l->audio;
  mp_audio_set_format(c, l->format);

  return c;
}
예제 #5
0
파일: af_format.c 프로젝트: benf/mpv
static struct mp_audio* play_float_s16(struct af_instance* af, struct mp_audio* data)
{
  struct mp_audio*   l   = af->data;	// Local data
  struct mp_audio*   c   = data;	// Current working data
  int 	       len = c->len/4; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  float2int(c->audio, l->audio, len, 2);

  c->audio = l->audio;
  mp_audio_set_format(c, l->format);
  c->len = len*2;

  return c;
}
예제 #6
0
static af_data_t* play_s16_float(struct af_instance_s* af, af_data_t* data)
{
  af_data_t*   l   = af->data;	// Local data
  af_data_t*   c   = data;	// Current working data
  int 	       len = c->len/2; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  int2float(c->audio, l->audio, len, 2);

  c->audio = l->audio;
  c->len = len*4;
  c->bps = 4;
  c->format = l->format;

  return c;
}
예제 #7
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_data_t*    c    = data;		// Current working data
  af_data_t*	l    = af->data;	// Local data
  af_pan_t*  	s    = af->setup; 	// Setup for this instance
  float*   	in   = c->audio;	// Input audio data
  float*   	out  = NULL;		// Output audio data
  float*	end  = in+c->len/4; 	// End of loop
  int		nchi = c->nch;		// Number of input channels
  int		ncho = l->nch;		// Number of output channels
  register int  j,k;

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  out = l->audio;
  // Execute panning
  // FIXME: Too slow
  while(in < end){
    for(j=0;j<ncho;j++){
      register float  x   = 0.0;
      register float* tin = in;
      for(k=0;k<nchi;k++)
	x += tin[k] * s->level[j][k];
      out[j] = x;
    }
    out+= ncho;
    in+= nchi;
  }

  // Set output data
  c->audio = l->audio;
  c->len   = c->len / c->nch * l->nch;
  c->nch   = l->nch;

  return c;
}
예제 #8
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_resample_t *s = af->setup;
  int ret;
  int8_t *in = (int8_t*)data->audio;
  int8_t *out;
  int chans   = data->nch;
  int in_len  = data->len;
  int out_len = in_len * af->mul + 10;

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
      return NULL;

  av_fast_malloc(&s->tmp[0], &s->tmp_alloc, FFALIGN(out_len,32));
  if(s->tmp[0] == NULL) return NULL;

  out= (int8_t*)af->data->audio;

  out_len= FFMIN(out_len, af->data->len);

  av_fast_malloc(&s->in[0], &s->in_alloc, FFALIGN(in_len,32));
  if(s->in[0] == NULL) return NULL;

  memcpy(s->in[0], in, in_len);

  ret = swr_convert(s->swrctx, &s->tmp[0], out_len/chans/2, &s->in[0], in_len/chans/2);
  if (ret < 0) return NULL;
  out_len= ret*chans*2;

  memcpy(out, s->tmp[0], out_len);

  data->audio = af->data->audio;
  data->len   = out_len;
  data->rate  = af->data->rate;
  return data;
}
예제 #9
0
/* Filter data through filter

Two "tricks" are used to compensate the "color" of the KEMAR data:

1. The KEMAR data is refiltered to ensure that the front L, R channels
on the same side of the ear are equalized (especially in the high
frequencies).

2. A bass compensation is introduced to ensure that 0-200 Hz are not
damped (without any real 3D acoustical image, however).
*/
static af_data_t* play(struct af_instance_s *af, af_data_t *data)
{
    af_hrtf_t *s = af->setup;
    short *in = data->audio; // Input audio data
    short *out = NULL; // Output audio data
    short *end = in + data->len / sizeof(short); // Loop end
    float common, left, right, diff, left_b, right_b;
    const int dblen = s->dlbuflen, hlen = s->hrflen, blen = s->basslen;

    if(AF_OK != RESIZE_LOCAL_BUFFER(af, data))
	return NULL;

    if(s->print_flag) {
	s->print_flag = 0;
	switch (s->decode_mode) {
	case HRTF_MIX_51:
	  mp_msg(MSGT_AFILTER, MSGL_INFO,
		 "[hrtf] Using HRTF to mix %s discrete surround into "
		 "L, R channels\n", s->matrix_mode ? "5+1" : "5");
	  break;
	case HRTF_MIX_STEREO:
	  mp_msg(MSGT_AFILTER, MSGL_INFO,
		 "[hrtf] Using HRTF to mix stereo into "
		 "L, R channels\n");
	  break;
	case HRTF_MIX_MATRIX2CH:
	  mp_msg(MSGT_AFILTER, MSGL_INFO,
		 "[hrtf] Using active matrix to decode 2 channel "
		 "input, HRTF to mix %s matrix surround into "
		 "L, R channels\n", "3/2");
	  break;
	default:
	  mp_msg(MSGT_AFILTER, MSGL_WARN,
		 "[hrtf] bogus decode_mode: %d\n", s->decode_mode);
	  break;
	}

       if(s->matrix_mode)
	  mp_msg(MSGT_AFILTER, MSGL_INFO,
		 "[hrtf] Using active matrix to decode rear center "
		 "channel\n");
    }

    out = af->data->audio;

    /* MPlayer's 5 channel layout (notation for the variable):
     *
     * 0: L (LF), 1: R (RF), 2: Ls (LR), 3: Rs (RR), 4: C (CF), matrix
     * encoded: Cs (CR)
     *
     * or: L = left, C = center, R = right, F = front, R = rear
     *
     * Filter notation:
     *
     *      CF
     * OF        AF
     *      Ear->
     * OR        AR
     *      CR
     *
     * or: C = center, A = same side, O = opposite, F = front, R = rear
     */

    while(in < end) {
	const int k = s->cyc_pos;

	update_ch(s, in, k);

	/* Simulate a 7.5 ms -20 dB echo of the center channel in the
	   front channels (like reflection from a room wall) - a kind of
	   psycho-acoustically "cheating" to focus the center front
	   channel, which is normally hard to be perceived as front */
	s->lf[k] += CFECHOAMPL * s->cf[(k + CFECHODELAY) % s->dlbuflen];
	s->rf[k] += CFECHOAMPL * s->cf[(k + CFECHODELAY) % s->dlbuflen];

	switch (s->decode_mode) {
	case HRTF_MIX_51:
	case HRTF_MIX_MATRIX2CH:
	   /* Mixer filter matrix */
	   common = conv(dblen, hlen, s->cf, s->cf_ir, k + s->cf_o);
	   if(s->matrix_mode) {
	      /* In matrix decoding mode, the rear channel gain must be
		 renormalized, as there is an additional channel. */
	      matrix_decode(in, k, 2, 3, 0, s->dlbuflen,
			    s->lr_fwr, s->rr_fwr,
			    s->lrprr_fwr, s->lrmrr_fwr,
			    &(s->adapt_lr_gain), &(s->adapt_rr_gain),
			    &(s->adapt_lrprr_gain), &(s->adapt_lrmrr_gain),
			    s->lr, s->rr, NULL, NULL, s->cr);
	      common +=
		 conv(dblen, hlen, s->cr, s->cr_ir, k + s->cr_o) *
		 M1_76DB;
	      left    =
		 ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
		   conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o) +
		   (conv(dblen, hlen, s->lr, s->ar_ir, k + s->ar_o) +
		    conv(dblen, hlen, s->rr, s->or_ir, k + s->or_o)) *
		   M1_76DB + common);
	      right   =
		 ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
		   conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o) +
		   (conv(dblen, hlen, s->rr, s->ar_ir, k + s->ar_o) +
		    conv(dblen, hlen, s->lr, s->or_ir, k + s->or_o)) *
		   M1_76DB + common);
	   } else {
	      left    =
		 ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
		   conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o) +
		   conv(dblen, hlen, s->lr, s->ar_ir, k + s->ar_o) +
		   conv(dblen, hlen, s->rr, s->or_ir, k + s->or_o) +
		   common);
	      right   =
		 ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
		   conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o) +
		   conv(dblen, hlen, s->rr, s->ar_ir, k + s->ar_o) +
		   conv(dblen, hlen, s->lr, s->or_ir, k + s->or_o) +
		   common);
	   }
	   break;
	case HRTF_MIX_STEREO:
	   left    =
	      ( conv(dblen, hlen, s->lf, s->af_ir, k + s->af_o) +
		conv(dblen, hlen, s->rf, s->of_ir, k + s->of_o));
	   right   =
	      ( conv(dblen, hlen, s->rf, s->af_ir, k + s->af_o) +
		conv(dblen, hlen, s->lf, s->of_ir, k + s->of_o));
	   break;
	default:
	    /* make gcc happy */
	    left = 0.0;
	    right = 0.0;
	    break;
	}

	/* Bass compensation for the lower frequency cut of the HRTF.  A
	   cross talk of the left and right channel is introduced to
	   match the directional characteristics of higher frequencies.
	   The bass will not have any real 3D perception, but that is
	   OK (note at 180 Hz, the wavelength is about 2 m, and any
	   spatial perception is impossible). */
	left_b  = conv(dblen, blen, s->ba_l, s->ba_ir, k);
	right_b = conv(dblen, blen, s->ba_r, s->ba_ir, k);
	left  += (1 - BASSCROSS) * left_b  + BASSCROSS * right_b;
	right += (1 - BASSCROSS) * right_b + BASSCROSS * left_b;
	/* Also mix the LFE channel (if available) */
	if(data->nch >= 6) {
	    left  += in[5] * M3_01DB;
	    right += in[5] * M3_01DB;
	}

	/* Amplitude renormalization. */
	left  *= AMPLNORM;
	right *= AMPLNORM;

	switch (s->decode_mode) {
	case HRTF_MIX_51:
	case HRTF_MIX_STEREO:
	   /* "Cheating": linear stereo expansion to amplify the 3D
	      perception.  Note: Too much will destroy the acoustic space
	      and may even result in headaches. */
	   diff = STEXPAND2 * (left - right);
	   out[0] = (int16_t)(left  + diff);
	   out[1] = (int16_t)(right - diff);
	   break;
	case HRTF_MIX_MATRIX2CH:
	   /* Do attempt any stereo expansion with matrix encoded
	      sources.  The L, R channels are already stereo expanded
	      by the steering, any further stereo expansion will sound
	      very unnatural. */
	   out[0] = (int16_t)left;
	   out[1] = (int16_t)right;
	   break;
	}

	/* Next sample... */
	in = &in[data->nch];
	out = &out[af->data->nch];
	(s->cyc_pos)--;
	if(s->cyc_pos < 0)
	    s->cyc_pos += dblen;
    }

    /* Set output data */
    data->audio = af->data->audio;
    data->len   = data->len / data->nch * 2;
    data->nch   = 2;

    return data;
}
예제 #10
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data){
  af_surround_t* s   = (af_surround_t*)af->setup;
  float*	 m   = steering_matrix[0]; 
  float*     	 in  = data->audio; 	// Input audio data
  float*     	 out = NULL;		// Output audio data
  float*	 end = in + data->len / sizeof(float); // Loop end
  int 		 i   = s->i;	// Filter queue index
  int 		 ri  = s->ri;	// Read index for delay queue
  int 		 wi  = s->wi;	// Write index for delay queue

  if (AF_OK != RESIZE_LOCAL_BUFFER(af, data))
    return NULL;

  out = af->data->audio;

  while(in < end){
    /* Dominance:
       abs(in[0])  abs(in[1]);
       abs(in[0]+in[1])  abs(in[0]-in[1]);
       10 * log( abs(in[0]) / (abs(in[1])|1) );
       10 * log( abs(in[0]+in[1]) / (abs(in[0]-in[1])|1) ); */

    /* About volume balancing...
       Surround encoding does the following:
           Lt=L+.707*C+.707*S, Rt=R+.707*C-.707*S
       So S should be extracted as:
           (Lt-Rt)
       But we are splitting the S to two output channels, so we
       must take 3dB off as we split it:
           Ls=Rs=.707*(Lt-Rt)
       Trouble is, Lt could be +1, Rt -1, so possibility that S will
       overflow. So to avoid that, we cut L/R by 3dB (*.707), and S by
       6dB (/2). This keeps the overall balance, but guarantees no
       overflow. */

    // Output front left and right
    out[0] = m[0]*in[0] + m[1]*in[1];
    out[1] = m[2]*in[0] + m[3]*in[1];

    // Low-pass output @ 7kHz
    FIR((&s->lq[i]), s->w, s->dl[wi]);

    // Delay output by d ms
    out[2] = s->dl[ri];

#ifdef SPLITREAR
    // Low-pass output @ 7kHz
    FIR((&s->rq[i]), s->w, s->dr[wi]);

    // Delay output by d ms
    out[3] = s->dr[ri];
#else
    out[3] = -out[2];
#endif

    // Update delay queues indexes
    UPDATEQI(ri);
    UPDATEQI(wi);

    // Calculate and save surround in circular queue
#ifdef SPLITREAR
    ADDQUE(i, s->rq, s->lq, m[6]*in[0]+m[7]*in[1], m[8]*in[0]+m[9]*in[1]);
#else
    ADDQUE(i, s->lq, m[4]*in[0]+m[5]*in[1]);
#endif

    // Next sample...
    in = &in[data->nch];  
    out = &out[af->data->nch];
  }
  
  // Save indexes
  s->i  = i; s->ri = ri; s->wi = wi;

  // Set output data
  data->audio = af->data->audio;
  data->len   = (data->len*af->mul.n)/af->mul.d;
  data->nch   = af->data->nch;

  return data;
}
예제 #11
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_resample_t *s = af->setup;
  int i, j, consumed, ret;
  int16_t *in = (int16_t*)data->audio;
  int16_t *out;
  int chans   = data->nch;
  int in_len  = data->len/(2*chans);
  int out_len = in_len * af->mul + 10;
  int16_t tmp[AF_NCH][out_len];

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
      return NULL;

  out= (int16_t*)af->data->audio;

  out_len= min(out_len, af->data->len/(2*chans));

  if(s->in_alloc < in_len + s->index){
      s->in_alloc= in_len + s->index;
      for(i=0; i<chans; i++){
          s->in[i]= realloc(s->in[i], s->in_alloc*sizeof(int16_t));
      }
  }

  if(chans==1){
      memcpy(&s->in[0][s->index], in, in_len * sizeof(int16_t));
  }else if(chans==2){
      for(j=0; j<in_len; j++){
          s->in[0][j + s->index]= *(in++);
          s->in[1][j + s->index]= *(in++);
      }
  }else{
      for(j=0; j<in_len; j++){
          for(i=0; i<chans; i++){
              s->in[i][j + s->index]= *(in++);
          }
      }
  }
  in_len += s->index;

  for(i=0; i<chans; i++){
      ret= av_resample(s->avrctx, tmp[i], s->in[i], &consumed, in_len, out_len, i+1 == chans);
  }
  out_len= ret;

  s->index= in_len - consumed;
  for(i=0; i<chans; i++){
      memmove(s->in[i], s->in[i] + consumed, s->index*sizeof(int16_t));
  }

  if(chans==1){
      memcpy(out, tmp[0], out_len*sizeof(int16_t));
  }else if(chans==2){
      for(j=0; j<out_len; j++){
          *(out++)= tmp[0][j];
          *(out++)= tmp[1][j];
      }
  }else{
      for(j=0; j<out_len; j++){
          for(i=0; i<chans; i++){
              *(out++)= tmp[i][j];
          }
      }
  }

  data->audio = af->data->audio;
  data->len   = out_len*chans*2;
  data->rate  = af->data->rate;
  return data;
}