Ejemplo n.º 1
0
static void _preextrapolate_helper( vorbis_dsp_state *v )
{
    int i;
    int order = 16;
    float *lpc = alloca( order * sizeof( *lpc ) );
    float *work = alloca( v->pcm_current * sizeof( *work ) );
    long j;
    v->preextrapolate = 1;

    if ( v->pcm_current - v->centerW > order*2 )
    {
        /* safety */
        for ( i = 0;i < v->vi->channels;i++ )
        {
            /* need to run the extrapolation in reverse! */
            for ( j = 0;j < v->pcm_current;j++ )
                work[j] = v->pcm[i][v->pcm_current-j-1];

            /* prime as above */
            vorbis_lpc_from_data( work, lpc, v->pcm_current - v->centerW, order );

#if 0
            if ( v->vi->channels == 2 )
            {
                if ( i == 0 )
                    _analysis_output( "predataL", 0, work, v->pcm_current - v->centerW, 0, 0, 0 );
                else
                    _analysis_output( "predataR", 0, work, v->pcm_current - v->centerW, 0, 0, 0 );
            }
            else
            {
                _analysis_output( "predata", 0, work, v->pcm_current - v->centerW, 0, 0, 0 );
            }
#endif

            /* run the predictor filter */
            vorbis_lpc_predict( lpc, work + v->pcm_current - v->centerW - order,
                                order,
                                work + v->pcm_current - v->centerW,
                                v->centerW );

            for ( j = 0;j < v->pcm_current;j++ )
                v->pcm[i][v->pcm_current-j-1] = work[j];

        }
    }
}
Ejemplo n.º 2
0
static void _vp_noisemask(VorbisPsy *p,
			  float *logfreq, 
			  float *logmask){

  int i,n=p->n/2;
  float *work=alloca(n*sizeof(*work));

  bark_noise_hybridmp(n,p->bark,logfreq,logmask,
		      140.,-1);

  for(i=0;i<n;i++)work[i]=logfreq[i]-logmask[i];

  bark_noise_hybridmp(n,p->bark,work,logmask,0.,
		      p->vi->noisewindowfixed);

  for(i=0;i<n;i++)work[i]=logfreq[i]-work[i];
  
  {
    static int seq=0;
    
    float work2[n];
    for(i=0;i<n;i++){
      work2[i]=logmask[i]+work[i];
    }
    
    _analysis_output("logfreq",seq,logfreq,n,0,0);
    _analysis_output("median",seq,work,n,0,0);
    _analysis_output("envelope",seq,work2,n,0,0);
    seq++;
  }

  for(i=0;i<n;i++){
    int dB=logmask[i]+.5;
    if(dB>=NOISE_COMPAND_LEVELS)dB=NOISE_COMPAND_LEVELS-1;
    if(dB<0)dB=0;
    logmask[i]= work[i]+p->vi->noisecompand[dB]+p->noiseoffset[i];
  }

}
Ejemplo n.º 3
0
void compute_curve(VorbisPsy *psy, float *audio, float *curve)
{
   int i;
   float work[psy->n];

   float scale=4.f/psy->n;
   float scale_dB;

   scale_dB=todB(scale);
  
   /* window the PCM data; use a BH4 window, not vorbis */
   for(i=0;i<psy->n;i++)
     work[i]=audio[i] * psy->window[i];

   {
     static int seq=0;
     
     _analysis_output("win",seq,work,psy->n,0,0);

     seq++;
   }

    /* FFT yields more accurate tonal estimation (not phase sensitive) */
    spx_drft_forward(&psy->lookup,work);

    /* magnitudes */
    work[0]=scale_dB+todB(work[0]);
    for(i=1;i<psy->n-1;i+=2){
      float temp = work[i]*work[i] + work[i+1]*work[i+1];
      work[(i+1)>>1] = scale_dB+.5f * todB(temp);
    }

    /* derive a noise curve */
    _vp_noisemask(psy,work,curve);
#define SIDE 12
    for (i=0;i<SIDE;i++)
    {
       curve[i]=curve[SIDE];
       curve[(psy->n>>1)-i-1]=curve[(psy->n>>1)-SIDE];
    }
    for(i=0;i<((psy->n)>>1);i++)
       //curve[i] = fromdB(.5*curve[i])*pow(1+i/12,.6);
       curve[i] = fromdB(1.2*curve[i]+.2*i);

}
Ejemplo n.º 4
0
static int mapping0_forward(vorbis_block *vb){
  vorbis_dsp_state      *vd=vb->vd;
  vorbis_info           *vi=vd->vi;
  codec_setup_info      *ci=vi->codec_setup;
  private_state         *b=vb->vd->backend_state;
  vorbis_block_internal *vbi=(vorbis_block_internal *)vb->internal;
  int                    n=vb->pcmend;
  int i,j,k;

  int    *nonzero    = alloca(sizeof(*nonzero)*vi->channels);
  float  **gmdct     = _vorbis_block_alloc(vb,vi->channels*sizeof(*gmdct));
  int    **ilogmaskch= _vorbis_block_alloc(vb,vi->channels*sizeof(*ilogmaskch));
  int ***floor_posts = _vorbis_block_alloc(vb,vi->channels*sizeof(*floor_posts));
  
  float global_ampmax=vbi->ampmax;
  float *local_ampmax=alloca(sizeof(*local_ampmax)*vi->channels);
  int blocktype=vbi->blocktype;

  int modenumber=vb->W;
  vorbis_info_mapping0 *info=ci->map_param[modenumber];
  vorbis_look_psy *psy_look=
    b->psy+blocktype+(vb->W?2:0);

  vb->mode=modenumber;

  for(i=0;i<vi->channels;i++){
    float scale=4.f/n;
    float scale_dB;

    float *pcm     =vb->pcm[i]; 
    float *logfft  =pcm;

    gmdct[i]=_vorbis_block_alloc(vb,n/2*sizeof(**gmdct));

    scale_dB=todB(&scale) + .345; /* + .345 is a hack; the original
                                     todB estimation used on IEEE 754
                                     compliant machines had a bug that
                                     returned dB values about a third
                                     of a decibel too high.  The bug
                                     was harmless because tunings
                                     implicitly took that into
                                     account.  However, fixing the bug
                                     in the estimator requires
                                     changing all the tunings as well.
                                     For now, it's easier to sync
                                     things back up here, and
                                     recalibrate the tunings in the
                                     next major model upgrade. */

#if 0
    if(vi->channels==2)
      if(i==0)
	_analysis_output("pcmL",seq,pcm,n,0,0,total-n/2);
      else
	_analysis_output("pcmR",seq,pcm,n,0,0,total-n/2);
#endif
  
    /* window the PCM data */
    _vorbis_apply_window(pcm,b->window,ci->blocksizes,vb->lW,vb->W,vb->nW);

#if 0
    if(vi->channels==2)
      if(i==0)
	_analysis_output("windowedL",seq,pcm,n,0,0,total-n/2);
      else
	_analysis_output("windowedR",seq,pcm,n,0,0,total-n/2);
#endif

    /* transform the PCM data */
    /* only MDCT right now.... */
    mdct_forward(b->transform[vb->W][0],pcm,gmdct[i]);
    
    /* FFT yields more accurate tonal estimation (not phase sensitive) */
    drft_forward(&b->fft_look[vb->W],pcm);
    logfft[0]=scale_dB+todB(pcm)  + .345; /* + .345 is a hack; the
                                     original todB estimation used on
                                     IEEE 754 compliant machines had a
                                     bug that returned dB values about
                                     a third of a decibel too high.
                                     The bug was harmless because
                                     tunings implicitly took that into
                                     account.  However, fixing the bug
                                     in the estimator requires
                                     changing all the tunings as well.
                                     For now, it's easier to sync
                                     things back up here, and
                                     recalibrate the tunings in the
                                     next major model upgrade. */
    local_ampmax[i]=logfft[0];
    for(j=1;j<n-1;j+=2){
      float temp=pcm[j]*pcm[j]+pcm[j+1]*pcm[j+1];
      temp=logfft[(j+1)>>1]=scale_dB+.5f*todB(&temp)  + .345; /* +
                                     .345 is a hack; the original todB
                                     estimation used on IEEE 754
                                     compliant machines had a bug that
                                     returned dB values about a third
                                     of a decibel too high.  The bug
                                     was harmless because tunings
                                     implicitly took that into
                                     account.  However, fixing the bug
                                     in the estimator requires
                                     changing all the tunings as well.
                                     For now, it's easier to sync
                                     things back up here, and
                                     recalibrate the tunings in the
                                     next major model upgrade. */
      if(temp>local_ampmax[i])local_ampmax[i]=temp;
    }

    if(local_ampmax[i]>0.f)local_ampmax[i]=0.f;
    if(local_ampmax[i]>global_ampmax)global_ampmax=local_ampmax[i];

#if 0
    if(vi->channels==2){
      if(i==0){
	_analysis_output("fftL",seq,logfft,n/2,1,0,0);
      }else{
	_analysis_output("fftR",seq,logfft,n/2,1,0,0);
      }
    }
#endif

  }
  
  {
    float   *noise        = _vorbis_block_alloc(vb,n/2*sizeof(*noise));
    float   *tone         = _vorbis_block_alloc(vb,n/2*sizeof(*tone));
    
    for(i=0;i<vi->channels;i++){
      /* the encoder setup assumes that all the modes used by any
	 specific bitrate tweaking use the same floor */
      
      int submap=info->chmuxlist[i];
      
      /* the following makes things clearer to *me* anyway */
      float *mdct    =gmdct[i];
      float *logfft  =vb->pcm[i];
      
      float *logmdct =logfft+n/2;
      float *logmask =logfft;

      vb->mode=modenumber;

      floor_posts[i]=_vorbis_block_alloc(vb,PACKETBLOBS*sizeof(**floor_posts));
      memset(floor_posts[i],0,sizeof(**floor_posts)*PACKETBLOBS);
      
      for(j=0;j<n/2;j++)
	logmdct[j]=todB(mdct+j)  + .345; /* + .345 is a hack; the original
                                     todB estimation used on IEEE 754
                                     compliant machines had a bug that
                                     returned dB values about a third
                                     of a decibel too high.  The bug
                                     was harmless because tunings
                                     implicitly took that into
                                     account.  However, fixing the bug
                                     in the estimator requires
                                     changing all the tunings as well.
                                     For now, it's easier to sync
                                     things back up here, and
                                     recalibrate the tunings in the
                                     next major model upgrade. */

#if 0
      if(vi->channels==2){
	if(i==0)
	  _analysis_output("mdctL",seq,logmdct,n/2,1,0,0);
	else
	  _analysis_output("mdctR",seq,logmdct,n/2,1,0,0);
      }else{
	_analysis_output("mdct",seq,logmdct,n/2,1,0,0);
      }
#endif 
      
      /* first step; noise masking.  Not only does 'noise masking'
         give us curves from which we can decide how much resolution
         to give noise parts of the spectrum, it also implicitly hands
         us a tonality estimate (the larger the value in the
         'noise_depth' vector, the more tonal that area is) */

      _vp_noisemask(psy_look,
		    logmdct,
		    noise); /* noise does not have by-frequency offset
                               bias applied yet */
#if 0
      if(vi->channels==2){
	if(i==0)
	  _analysis_output("noiseL",seq,noise,n/2,1,0,0);
	else
	  _analysis_output("noiseR",seq,noise,n/2,1,0,0);
      }
#endif

      /* second step: 'all the other crap'; all the stuff that isn't
         computed/fit for bitrate management goes in the second psy
         vector.  This includes tone masking, peak limiting and ATH */

      _vp_tonemask(psy_look,
		   logfft,
		   tone,
		   global_ampmax,
		   local_ampmax[i]);

#if 0
      if(vi->channels==2){
	if(i==0)
	  _analysis_output("toneL",seq,tone,n/2,1,0,0);
	else
	  _analysis_output("toneR",seq,tone,n/2,1,0,0);
      }
#endif

      /* third step; we offset the noise vectors, overlay tone
	 masking.  We then do a floor1-specific line fit.  If we're
	 performing bitrate management, the line fit is performed
	 multiple times for up/down tweakage on demand. */

#if 0
      {
      float aotuv[psy_look->n];
#endif

	_vp_offset_and_mix(psy_look,
			   noise,
			   tone,
			   1,
			   logmask,
			   mdct,
			   logmdct);
	
#if 0
	if(vi->channels==2){
	  if(i==0)
	    _analysis_output("aotuvM1_L",seq,aotuv,psy_look->n,1,1,0);
	  else
	    _analysis_output("aotuvM1_R",seq,aotuv,psy_look->n,1,1,0);
	}
      }
#endif


#if 0
      if(vi->channels==2){
	if(i==0)
	  _analysis_output("mask1L",seq,logmask,n/2,1,0,0);
	else
	  _analysis_output("mask1R",seq,logmask,n/2,1,0,0);
      }
#endif

      /* this algorithm is hardwired to floor 1 for now; abort out if
         we're *not* floor1.  This won't happen unless someone has
         broken the encode setup lib.  Guard it anyway. */
      if(ci->floor_type[info->floorsubmap[submap]]!=1)return(-1);

      floor_posts[i][PACKETBLOBS/2]=
	floor1_fit(vb,b->flr[info->floorsubmap[submap]],
		   logmdct,
		   logmask);
      
      /* are we managing bitrate?  If so, perform two more fits for
         later rate tweaking (fits represent hi/lo) */
      if(vorbis_bitrate_managed(vb) && floor_posts[i][PACKETBLOBS/2]){
	/* higher rate by way of lower noise curve */

	_vp_offset_and_mix(psy_look,
			   noise,
			   tone,
			   2,
			   logmask,
			   mdct,
			   logmdct);

#if 0
	if(vi->channels==2){
	  if(i==0)
	    _analysis_output("mask2L",seq,logmask,n/2,1,0,0);
	  else
	    _analysis_output("mask2R",seq,logmask,n/2,1,0,0);
	}
#endif
	
	floor_posts[i][PACKETBLOBS-1]=
	  floor1_fit(vb,b->flr[info->floorsubmap[submap]],
		     logmdct,
		     logmask);
      
	/* lower rate by way of higher noise curve */
	_vp_offset_and_mix(psy_look,
			   noise,
			   tone,
			   0,
			   logmask,
			   mdct,
			   logmdct);

#if 0
	if(vi->channels==2)
	  if(i==0)
	    _analysis_output("mask0L",seq,logmask,n/2,1,0,0);
	  else
	    _analysis_output("mask0R",seq,logmask,n/2,1,0,0);
#endif

	floor_posts[i][0]=
	  floor1_fit(vb,b->flr[info->floorsubmap[submap]],
		     logmdct,
		     logmask);
	
	/* we also interpolate a range of intermediate curves for
           intermediate rates */
	for(k=1;k<PACKETBLOBS/2;k++)
	  floor_posts[i][k]=
	    floor1_interpolate_fit(vb,b->flr[info->floorsubmap[submap]],
				   floor_posts[i][0],
				   floor_posts[i][PACKETBLOBS/2],
				   k*65536/(PACKETBLOBS/2));
	for(k=PACKETBLOBS/2+1;k<PACKETBLOBS-1;k++)
	  floor_posts[i][k]=
	    floor1_interpolate_fit(vb,b->flr[info->floorsubmap[submap]],
				   floor_posts[i][PACKETBLOBS/2],
				   floor_posts[i][PACKETBLOBS-1],
				   (k-PACKETBLOBS/2)*65536/(PACKETBLOBS/2));
      }
    }
  }
  vbi->ampmax=global_ampmax;

  /*
    the next phases are performed once for vbr-only and PACKETBLOB
    times for bitrate managed modes.
    
    1) encode actual mode being used
    2) encode the floor for each channel, compute coded mask curve/res
    3) normalize and couple.
    4) encode residue
    5) save packet bytes to the packetblob vector
    
  */

  /* iterate over the many masking curve fits we've created */

  {
    float **res_bundle=alloca(sizeof(*res_bundle)*vi->channels);
    float **couple_bundle=alloca(sizeof(*couple_bundle)*vi->channels);
    int *zerobundle=alloca(sizeof(*zerobundle)*vi->channels);
    int **sortindex=alloca(sizeof(*sortindex)*vi->channels);
    float **mag_memo;
    int **mag_sort;

    if(info->coupling_steps){
      mag_memo=_vp_quantize_couple_memo(vb,
					&ci->psy_g_param,
					psy_look,
					info,
					gmdct);    
      
      mag_sort=_vp_quantize_couple_sort(vb,
					psy_look,
					info,
					mag_memo);    

      hf_reduction(&ci->psy_g_param,
		   psy_look,
		   info,
		   mag_memo);
    }

    memset(sortindex,0,sizeof(*sortindex)*vi->channels);
    if(psy_look->vi->normal_channel_p){
      for(i=0;i<vi->channels;i++){
	float *mdct    =gmdct[i];
	sortindex[i]=alloca(sizeof(**sortindex)*n/2);
	_vp_noise_normalize_sort(psy_look,mdct,sortindex[i]);
      }
    }

    for(k=(vorbis_bitrate_managed(vb)?0:PACKETBLOBS/2);
	k<=(vorbis_bitrate_managed(vb)?PACKETBLOBS-1:PACKETBLOBS/2);
	k++){
      oggpack_buffer *opb=vbi->packetblob[k];

      /* start out our new packet blob with packet type and mode */
      /* Encode the packet type */
      oggpack_write(opb,0,1);
      /* Encode the modenumber */
      /* Encode frame mode, pre,post windowsize, then dispatch */
      oggpack_write(opb,modenumber,b->modebits);
      if(vb->W){
	oggpack_write(opb,vb->lW,1);
	oggpack_write(opb,vb->nW,1);
      }

      /* encode floor, compute masking curve, sep out residue */
      for(i=0;i<vi->channels;i++){
	int submap=info->chmuxlist[i];
	float *mdct    =gmdct[i];
	float *res     =vb->pcm[i];
	int   *ilogmask=ilogmaskch[i]=
	  _vorbis_block_alloc(vb,n/2*sizeof(**gmdct));
      
	nonzero[i]=floor1_encode(opb,vb,b->flr[info->floorsubmap[submap]],
				 floor_posts[i][k],
				 ilogmask);
#if 0
	{
	  char buf[80];
	  sprintf(buf,"maskI%c%d",i?'R':'L',k);
	  float work[n/2];
	  for(j=0;j<n/2;j++)
	    work[j]=FLOOR1_fromdB_LOOKUP[ilogmask[j]];
	  _analysis_output(buf,seq,work,n/2,1,1,0);
	}
#endif
	_vp_remove_floor(psy_look,
			 mdct,
			 ilogmask,
			 res,
			 ci->psy_g_param.sliding_lowpass[vb->W][k]);

	_vp_noise_normalize(psy_look,res,res+n/2,sortindex[i]);

	
#if 0
	{
	  char buf[80];
	  float work[n/2];
	  for(j=0;j<n/2;j++)
	    work[j]=FLOOR1_fromdB_LOOKUP[ilogmask[j]]*(res+n/2)[j];
	  sprintf(buf,"resI%c%d",i?'R':'L',k);
	  _analysis_output(buf,seq,work,n/2,1,1,0);

	}
#endif
      }
      
      /* our iteration is now based on masking curve, not prequant and
	 coupling.  Only one prequant/coupling step */
      
      /* quantize/couple */
      /* incomplete implementation that assumes the tree is all depth
         one, or no tree at all */
      if(info->coupling_steps){
	_vp_couple(k,
		   &ci->psy_g_param,
		   psy_look,
		   info,
		   vb->pcm,
		   mag_memo,
		   mag_sort,
		   ilogmaskch,
		   nonzero,
		   ci->psy_g_param.sliding_lowpass[vb->W][k]);
      }
      
      /* classify and encode by submap */
      for(i=0;i<info->submaps;i++){
	int ch_in_bundle=0;
	long **classifications;
	int resnum=info->residuesubmap[i];

	for(j=0;j<vi->channels;j++){
	  if(info->chmuxlist[j]==i){
	    zerobundle[ch_in_bundle]=0;
	    if(nonzero[j])zerobundle[ch_in_bundle]=1;
	    res_bundle[ch_in_bundle]=vb->pcm[j];
	    couple_bundle[ch_in_bundle++]=vb->pcm[j]+n/2;
	  }
	}
	
	classifications=_residue_P[ci->residue_type[resnum]]->
	  class(vb,b->residue[resnum],couple_bundle,zerobundle,ch_in_bundle);
	
	_residue_P[ci->residue_type[resnum]]->
	  forward(opb,vb,b->residue[resnum],
		  couple_bundle,NULL,zerobundle,ch_in_bundle,classifications);
      }
      
      /* ok, done encoding.  Next protopacket. */
    }
    
  }

#if 0
  seq++;
  total+=ci->blocksizes[vb->W]/4+ci->blocksizes[vb->nW]/4;
#endif
  return(0);
}
Ejemplo n.º 5
0
int vorbis_synthesis_blockin(vorbis_dsp_state *v,vorbis_block *vb){
  vorbis_info *vi=v->vi;
  codec_setup_info *ci=vi->codec_setup;

  /* Shift out any PCM that we returned previously */
  /* centerW is currently the center of the last block added */

  if(v->centerW>ci->blocksizes[1]/2 &&
  /* Quick additional hack; to avoid *alot* of shifts, use an
     oversized buffer.  This increases memory usage, but doesn't make
     much difference wrt L1/L2 cache pressure. */
     v->pcm_returned>8192){

    /* don't shift too much; we need to have a minimum PCM buffer of
       1/2 long block */

    int shiftPCM=v->centerW-ci->blocksizes[1]/2;
    shiftPCM=(v->pcm_returned<shiftPCM?v->pcm_returned:shiftPCM);

    v->pcm_current-=shiftPCM;
    v->centerW-=shiftPCM;
    v->pcm_returned-=shiftPCM;
    
    if(shiftPCM){
      int i;
      for(i=0;i<vi->channels;i++)
	memmove(v->pcm[i],v->pcm[i]+shiftPCM,
		v->pcm_current*sizeof(float));
    }
  }

  v->lW=v->W;
  v->W=vb->W;
  v->nW=-1;

  v->glue_bits+=vb->glue_bits;
  v->time_bits+=vb->time_bits;
  v->floor_bits+=vb->floor_bits;
  v->res_bits+=vb->res_bits;

  if(v->sequence+1 != vb->sequence)v->granulepos=-1; /* out of sequence;
                                                     lose count */

  v->sequence=vb->sequence;

  {
    int sizeW=ci->blocksizes[v->W];
    int centerW=v->centerW+ci->blocksizes[v->lW]/4+sizeW/4;
    int beginW=centerW-sizeW/2;
    int endW=beginW+sizeW;
    int beginSl;
    int endSl;
    int i,j;

    /* Do we have enough PCM/mult storage for the block? */
    if(endW>v->pcm_storage){
      /* expand the storage */
      v->pcm_storage=endW+ci->blocksizes[1];
   
      for(i=0;i<vi->channels;i++)
	v->pcm[i]=_ogg_realloc(v->pcm[i],v->pcm_storage*sizeof(float)); 
    }

    /* overlap/add PCM */

    switch(v->W){
    case 0:
      beginSl=0;
      endSl=ci->blocksizes[0]/2;
      break;
    case 1:
      beginSl=ci->blocksizes[1]/4-ci->blocksizes[v->lW]/4;
      endSl=beginSl+ci->blocksizes[v->lW]/2;
      break;
    default:
      return(-1);
    }

    for(j=0;j<vi->channels;j++){
      static int seq=0;
      float *pcm=v->pcm[j]+beginW;
      float *p=vb->pcm[j];

      /* the overlap/add section */
      for(i=beginSl;i<endSl;i++)
	pcm[i]+=p[i];
      /* the remaining section */
      for(;i<sizeW;i++)
	pcm[i]=p[i];

      _analysis_output("lapped",seq,pcm,sizeW,0,0);
      _analysis_output("buffered",seq++,v->pcm[j],sizeW+beginW,0,0);
    
    }

    /* deal with initial packet state; we do this using the explicit
       pcm_returned==-1 flag otherwise we're sensitive to first block
       being short or long */

    if(v->pcm_returned==-1)
      v->pcm_returned=centerW;

    /* track the frame number... This is for convenience, but also
       making sure our last packet doesn't end with added padding.  If
       the last packet is partial, the number of samples we'll have to
       return will be past the vb->granulepos.
       
       This is not foolproof!  It will be confused if we begin
       decoding at the last page after a seek or hole.  In that case,
       we don't have a starting point to judge where the last frame
       is.  For this reason, vorbisfile will always try to make sure
       it reads the last two marked pages in proper sequence */

    if(v->granulepos==-1)
      if(vb->granulepos==-1){
	v->granulepos=0;
      }else{
	v->granulepos=vb->granulepos;
      }
    else{
      v->granulepos+=(centerW-v->centerW);
      if(vb->granulepos!=-1 && v->granulepos!=vb->granulepos){

	if(v->granulepos>vb->granulepos){
	  long extra=v->granulepos-vb->granulepos;

	  if(vb->eofflag){
	    /* partial last frame.  Strip the extra samples off */
	    centerW-=extra;
	  }else if(vb->sequence == 1){
	    /* ^^^ argh, this can be 1 from seeking! */


	    /* partial first frame.  Discard extra leading samples */
	    v->pcm_returned+=extra;
	    if(v->pcm_returned>centerW)v->pcm_returned=centerW;
	    
	  }
	  
	}/* else{ Shouldn't happen *unless* the bitstream is out of
	    spec.  Either way, believe the bitstream } */
	v->granulepos=vb->granulepos;
      }
    }

    /* Update, cleanup */

    v->centerW=centerW;
    v->pcm_current=endW;

    if(vb->eofflag)v->eofflag=1;
  }

  return(0);
}
Ejemplo n.º 6
0
void _vp_psy_init(vorbis_look_psy *p,vorbis_info_psy *vi,
		  vorbis_info_psy_global *gi,int n,long rate){
  long i,j,k,lo=0,hi=0;
  long maxoc;
  memset(p,0,sizeof(vorbis_look_psy));


  p->eighth_octave_lines=gi->eighth_octave_lines;
  p->shiftoc=rint(log(gi->eighth_octave_lines*8)/log(2))-1;

  p->firstoc=toOC(.25f*rate/n)*(1<<(p->shiftoc+1))-gi->eighth_octave_lines;
  maxoc=toOC((n*.5f-.25f)*rate/n)*(1<<(p->shiftoc+1))+.5f;
  p->total_octave_lines=maxoc-p->firstoc+1;

  if(vi->ath)
    p->ath=_ogg_malloc(n*sizeof(float));
  p->octave=_ogg_malloc(n*sizeof(long));
  p->bark=_ogg_malloc(n*sizeof(unsigned long));
  p->vi=vi;
  p->n=n;
  p->rate=rate;

  /* set up the lookups for a given blocksize and sample rate */
  if(vi->ath)
    set_curve(vi->ath, p->ath,n,rate);
  for(i=0;i<n;i++){
    float bark=toBARK(rate/(2*n)*i); 

    for(;lo+vi->noisewindowlomin<i && 
	  toBARK(rate/(2*n)*lo)<(bark-vi->noisewindowlo);lo++);
    
    for(;hi<n && (hi<i+vi->noisewindowhimin ||
	  toBARK(rate/(2*n)*hi)<(bark+vi->noisewindowhi));hi++);
    
    p->bark[i]=(hi<<16)+lo;

  }

  for(i=0;i<n;i++)
    p->octave[i]=toOC((i*.5f+.25f)*rate/n)*(1<<(p->shiftoc+1))+.5f;

  p->tonecurves=_ogg_malloc(P_BANDS*sizeof(float **));
  p->noisethresh=_ogg_malloc(n*sizeof(float));
  p->noiseoffset=_ogg_malloc(n*sizeof(float));
  for(i=0;i<P_BANDS;i++)
    p->tonecurves[i]=_ogg_malloc(P_LEVELS*sizeof(float *));
  
  for(i=0;i<P_BANDS;i++)
    for(j=0;j<P_LEVELS;j++)
      p->tonecurves[i][j]=_ogg_malloc((EHMER_MAX+2)*sizeof(float));
  

  /* OK, yeah, this was a silly way to do it */
  memcpy(p->tonecurves[0][4]+2,tone_125_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[0][6]+2,tone_125_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[0][8]+2,tone_125_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[0][10]+2,tone_125_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[2][4]+2,tone_125_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[2][6]+2,tone_125_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[2][8]+2,tone_125_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[2][10]+2,tone_125_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[4][4]+2,tone_250_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[4][6]+2,tone_250_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[4][8]+2,tone_250_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[4][10]+2,tone_250_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[6][4]+2,tone_500_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[6][6]+2,tone_500_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[6][8]+2,tone_500_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[6][10]+2,tone_500_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[8][4]+2,tone_1000_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[8][6]+2,tone_1000_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[8][8]+2,tone_1000_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[8][10]+2,tone_1000_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[10][4]+2,tone_2000_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[10][6]+2,tone_2000_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[10][8]+2,tone_2000_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[10][10]+2,tone_2000_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[12][4]+2,tone_4000_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[12][6]+2,tone_4000_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[12][8]+2,tone_4000_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[12][10]+2,tone_4000_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[14][4]+2,tone_8000_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[14][6]+2,tone_8000_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[14][8]+2,tone_8000_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[14][10]+2,tone_8000_100dB_SL,sizeof(float)*EHMER_MAX);

  memcpy(p->tonecurves[16][4]+2,tone_8000_40dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[16][6]+2,tone_8000_60dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[16][8]+2,tone_8000_80dB_SL,sizeof(float)*EHMER_MAX);
  memcpy(p->tonecurves[16][10]+2,tone_8000_100dB_SL,sizeof(float)*EHMER_MAX);

  /* value limit the tonal masking curves; the peakatt not only
     optionally specifies maximum dynamic depth, but also [always]
     limits the masking curves to a minimum depth */
  for(i=0;i<P_BANDS;i+=2)
    for(j=4;j<P_LEVELS;j+=2)
      for(k=2;k<EHMER_MAX+2;k++)
	p->tonecurves[i][j][k]+=vi->tone_masteratt;

  /* interpolate curves between */
  for(i=1;i<P_BANDS;i+=2)
    for(j=4;j<P_LEVELS;j+=2){
      memcpy(p->tonecurves[i][j]+2,p->tonecurves[i-1][j]+2,EHMER_MAX*sizeof(float));
      /*interp_curve(p->tonecurves[i][j],
		   p->tonecurves[i-1][j],
		   p->tonecurves[i+1][j],.5);*/
      min_curve(p->tonecurves[i][j]+2,p->tonecurves[i+1][j]+2);
    }

  /* set up the final curves */
  for(i=0;i<P_BANDS;i++)
    setup_curve(p->tonecurves[i],i,vi->toneatt->block[i]);

  if(vi->curvelimitp){
    /* value limit the tonal masking curves; the peakatt not only
       optionally specifies maximum dynamic depth, but also [always]
       limits the masking curves to a minimum depth  */
    for(i=0;i<P_BANDS;i++)
      for(j=0;j<P_LEVELS;j++){
	for(k=2;k<EHMER_OFFSET+2+vi->curvelimitp;k++)
	  if(p->tonecurves[i][j][k]> vi->peakatt->block[i][j])
	    p->tonecurves[i][j][k]=  vi->peakatt->block[i][j];
	  else
	    break;
      }
  }

  if(vi->peakattp) /* we limit depth only optionally */
    for(i=0;i<P_BANDS;i++)
      for(j=0;j<P_LEVELS;j++)
	if(p->tonecurves[i][j][EHMER_OFFSET+2]< vi->peakatt->block[i][j])
	  p->tonecurves[i][j][EHMER_OFFSET+2]=  vi->peakatt->block[i][j];

  /* but guarding is mandatory */
  for(i=0;i<P_BANDS;i++)
    for(j=0;j<P_LEVELS;j++)
      if(p->tonecurves[i][j][EHMER_OFFSET+2]< vi->tone_maxatt)
	  p->tonecurves[i][j][EHMER_OFFSET+2]=  vi->tone_maxatt;

  /* set up rolling noise median */
  for(i=0;i<n;i++){
    float halfoc=toOC((i+.5)*rate/(2.*n))*2.;
    int inthalfoc;
    float del;
    
    if(halfoc<0)halfoc=0;
    if(halfoc>=P_BANDS-1)halfoc=P_BANDS-1;
    inthalfoc=(int)halfoc;
    del=halfoc-inthalfoc;

    p->noisethresh[i]=((p->vi->noisethresh[inthalfoc]*(1.-del) + 
			p->vi->noisethresh[inthalfoc+1]*del))*2.f-1.f;
    p->noiseoffset[i]=
      p->vi->noiseoff[inthalfoc]*(1.-del) + 
      p->vi->noiseoff[inthalfoc+1]*del;
  }

  analysis_noisy=1;
  _analysis_output("noiseoff",0,p->noiseoffset,n,1,0);
  _analysis_output("noisethresh",0,p->noisethresh,n,1,0);

  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_63Hz",i,p->tonecurves[0][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_88Hz",i,p->tonecurves[1][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_125Hz",i,p->tonecurves[2][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_170Hz",i,p->tonecurves[3][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_250Hz",i,p->tonecurves[4][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_350Hz",i,p->tonecurves[5][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_500Hz",i,p->tonecurves[6][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_700Hz",i,p->tonecurves[7][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_1kHz",i,p->tonecurves[8][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_1.4Hz",i,p->tonecurves[9][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_2kHz",i,p->tonecurves[10][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_2.4kHz",i,p->tonecurves[11][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_4kHz",i,p->tonecurves[12][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_5.6kHz",i,p->tonecurves[13][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_8kHz",i,p->tonecurves[14][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_11.5kHz",i,p->tonecurves[15][i]+2,EHMER_MAX,0,0);
  for(i=0;i<P_LEVELS;i++)
    _analysis_output("curve_16kHz",i,p->tonecurves[16][i]+2,EHMER_MAX,0,0);
  analysis_noisy=1;

}
Ejemplo n.º 7
0
static int mapping0_inverse(vorbis_block *vb,vorbis_look_mapping *l){
  vorbis_dsp_state     *vd=vb->vd;
  vorbis_info          *vi=vd->vi;
  codec_setup_info     *ci=vi->codec_setup;
  backend_lookup_state *b=vd->backend_state;
  vorbis_look_mapping0 *look=(vorbis_look_mapping0 *)l;
  vorbis_info_mapping0 *info=look->map;
  vorbis_info_mode     *mode=look->mode;
  int                   i,j;
  long                  n=vb->pcmend=ci->blocksizes[vb->W];

  float *window=b->window[vb->W][vb->lW][vb->nW][mode->windowtype];
  float **pcmbundle=alloca(sizeof(float *)*vi->channels);
  int    *zerobundle=alloca(sizeof(int)*vi->channels);

  int   *nonzero  =alloca(sizeof(int)*vi->channels);
  void **floormemo=alloca(sizeof(void *)*vi->channels);
  
  /* time domain information decode (note that applying the
     information would have to happen later; we'll probably add a
     function entry to the harness for that later */
  /* NOT IMPLEMENTED */

  /* recover the spectral envelope; store it in the PCM vector for now */
  for(i=0;i<vi->channels;i++){
    int submap=info->chmuxlist[i];
    floormemo[i]=look->floor_func[submap]->
      inverse1(vb,look->floor_look[submap]);
    if(floormemo[i])
      nonzero[i]=1;
    else
      nonzero[i]=0;      
    memset(vb->pcm[i],0,sizeof(float)*n/2);
  }

  /* channel coupling can 'dirty' the nonzero listing */
  for(i=0;i<info->coupling_steps;i++){
    if(nonzero[info->coupling_mag[i]] ||
       nonzero[info->coupling_ang[i]]){
      nonzero[info->coupling_mag[i]]=1; 
      nonzero[info->coupling_ang[i]]=1; 
    }
  }

  /* recover the residue into our working vectors */
  for(i=0;i<info->submaps;i++){
    int ch_in_bundle=0;
    for(j=0;j<vi->channels;j++){
      if(info->chmuxlist[j]==i){
	if(nonzero[j])
	  zerobundle[ch_in_bundle]=1;
	else
	  zerobundle[ch_in_bundle]=0;
	pcmbundle[ch_in_bundle++]=vb->pcm[j];
      }
    }
    
    look->residue_func[i]->inverse(vb,look->residue_look[i],
				   pcmbundle,zerobundle,ch_in_bundle);
  }

  /* channel coupling */
  for(i=info->coupling_steps-1;i>=0;i--){
    float *pcmM=vb->pcm[info->coupling_mag[i]];
    float *pcmA=vb->pcm[info->coupling_ang[i]];

    for(j=0;j<n/2;j++){
      float mag=pcmM[j];
      float ang=pcmA[j];

      if(mag>0)
	if(ang>0){
	  pcmM[j]=mag;
	  pcmA[j]=mag-ang;
	}else{
	  pcmA[j]=mag;
	  pcmM[j]=mag+ang;
	}
      else
	if(ang>0){
	  pcmM[j]=mag;
	  pcmA[j]=mag+ang;
	}else{
	  pcmA[j]=mag;
	  pcmM[j]=mag-ang;
	}
    }
  }

  /* compute and apply spectral envelope */
  for(i=0;i<vi->channels;i++){
    float *pcm=vb->pcm[i];
    int submap=info->chmuxlist[i];
    look->floor_func[submap]->
      inverse2(vb,look->floor_look[submap],floormemo[i],pcm);
  }

  /* transform the PCM data; takes PCM vector, vb; modifies PCM vector */
  /* only MDCT right now.... */
  for(i=0;i<vi->channels;i++){
    float *pcm=vb->pcm[i];
    _analysis_output("out",seq+i,pcm,n/2,1,1);
    mdct_backward(b->transform[vb->W][0],pcm,pcm);
  }

  /* window the data */
  for(i=0;i<vi->channels;i++){
    float *pcm=vb->pcm[i];
    if(nonzero[i])
      for(j=0;j<n;j++)
	pcm[j]*=window[j];
    else
      for(j=0;j<n;j++)
	pcm[j]=0.f;
    _analysis_output("final",seq++,pcm,n,0,0);
  }
	    
  /* now apply the decoded post-window time information */
  /* NOT IMPLEMENTED */

  /* all done! */
  return(0);
}
Ejemplo n.º 8
0
static int mapping0_forward(vorbis_block *vb,vorbis_look_mapping *l){
  vorbis_dsp_state      *vd=vb->vd;
  vorbis_info           *vi=vd->vi;
  codec_setup_info      *ci=vi->codec_setup;
  backend_lookup_state  *b=vb->vd->backend_state;
  vorbis_look_mapping0  *look=(vorbis_look_mapping0 *)l;
  vorbis_info_mapping0  *info=look->map;
  vorbis_info_mode      *mode=look->mode;
  vorbis_block_internal *vbi=(vorbis_block_internal *)vb->internal;
  int                    n=vb->pcmend;
  int i,j;
  float *window=b->window[vb->W][vb->lW][vb->nW][mode->windowtype];
  int   *nonzero=alloca(sizeof(int)*vi->channels);

  float *work=_vorbis_block_alloc(vb,n*sizeof(float));

  float global_ampmax=vbi->ampmax;
  float *local_ampmax=alloca(sizeof(float)*vi->channels);
  int blocktype;

  /* we differentiate between short and long block types to help the
     masking engine; the window shapes also matter.
     impulse block (a short block in which an impulse occurs)
     padding block (a short block that pads between a transitional 
          long block and an impulse block, or vice versa)
     transition block (the wqeird one; a long block with the transition 
          window; affects bass/midrange response and that must be 
	  accounted for in masking) 
     long block (run of the mill long block)
  */

  if(vb->W){
    if(!vb->lW || !vb->nW)
      blocktype=BLOCKTYPE_TRANSITION;
    else
      blocktype=BLOCKTYPE_LONG;
  }else{
    /* right now we're missing the infrastructure to distingush the
       two short types */
    blocktype=BLOCKTYPE_IMPULSE;
  }

  for(i=0;i<vi->channels;i++){
    float scale=4.f/n;

    /* the following makes things clearer to *me* anyway */
    float *pcm     =vb->pcm[i]; 
    float *fft     =work;
    float *logfft  =pcm+n/2;

    /*float *res     =pcm;
    float *mdct    =pcm;
    float *codedflr=pcm+n/2;
    float *logmax  =work;
    float *logmask =work+n/2;*/

    /* window the PCM data */
    for(j=0;j<n;j++)
      fft[j]=pcm[j]*=window[j];
    
    /* transform the PCM data */
    /* only MDCT right now.... */
    mdct_forward(b->transform[vb->W][0],pcm,pcm);
    
    /* FFT yields more accurate tonal estimation (not phase sensitive) */
    drft_forward(&look->fft_look,fft);
    fft[0]*=scale;
    logfft[0]=todB(fft);
    local_ampmax[i]=logfft[0];
    for(j=1;j<n-1;j+=2){
      float temp=scale*FAST_HYPOT(fft[j],fft[j+1]);
      temp=logfft[(j+1)>>1]=todB(&temp);
      if(temp>local_ampmax[i])local_ampmax[i]=temp;
    }
    if(local_ampmax[i]>global_ampmax)global_ampmax=local_ampmax[i];

    _analysis_output("fft",seq+i,logfft,n/2,1,0);
  }

  for(i=0;i<vi->channels;i++){
    int submap=info->chmuxlist[i];

    /* the following makes things clearer to *me* anyway */
    float *mdct    =vb->pcm[i]; 
    float *res     =mdct;
    float *codedflr=mdct+n/2;
    float *logfft  =mdct+n/2;

    float *logmdct =work;
    float *logmax  =mdct+n/2;
    float *logmask =work+n/2;

    for(j=0;j<n/2;j++)
      logmdct[j]=todB(mdct+j);
    _analysis_output("mdct",seq+i,logmdct,n/2,1,0);


    /* perform psychoacoustics; do masking */
    _vp_compute_mask(look->psy_look[blocktype],
		     b->psy_g_look,
		     i,
		     logfft, /* -> logmax */
		     logmdct,
		     logmask,
		     global_ampmax,
		     local_ampmax[i],
		     ci->blocksizes[vb->lW]/2);

    _analysis_output("mask",seq+i,logmask,n/2,1,0);
    
    /* perform floor encoding */
    nonzero[i]=look->floor_func[submap]->
      forward(vb,look->floor_look[submap],
	      mdct,
	      logmdct,
	      logmask,
	      logmax,

	      codedflr);


    _analysis_output("mdct2",seq+i,mdct,n/2,1,1);
    _vp_remove_floor(look->psy_look[blocktype],
		     b->psy_g_look,
		     logmdct,
		     mdct,
		     codedflr,
		     res,
		     local_ampmax[i]);

    /*for(j=0;j<n/2;j++)
      if(fabs(res[j])>1200){
	analysis_noisy=1;
	fprintf(stderr,"%ld ",seq+i);
	}*/

    _analysis_output("res",seq+i,res,n/2,1,0);
    _analysis_output("codedflr",seq+i,codedflr,n/2,1,1);
      
  }

  vbi->ampmax=global_ampmax;

  /* partition based prequantization and channel coupling */
  /* Steps in prequant and coupling:
     
     down-couple/down-quantize from perfect residue ->  quantized vector 
     classify by this first quantized vector
     
     do{ 
        encode quantized vector; add encoded values to 'so-far' vector
        more? [not yet at bitrate/not yet at target]
          yes{
              down-couple/down-quantize from perfect-'so-far' -> 
	        quantized vector; when subtracting coupling, 
		account for +/- out-of-phase component
          }no{  
              break
          }
     }
     done.

     quantization in each iteration is done (after circular normalization 
     in coupling) using a by-iteration quantization granule value.
  */
   
  {
    float  **pcm=vb->pcm;
    float  **quantized=alloca(sizeof(float*)*vi->channels);
    float  **sofar=alloca(sizeof(float*)*vi->channels);

    long  ***classifications=alloca(sizeof(long**)*info->submaps);
    float ***pcmbundle=alloca(sizeof(float **)*info->submaps);
    float ***sobundle=alloca(sizeof(float **)*info->submaps);
    int    **zerobundle=alloca(sizeof(int *)*info->submaps);
    int     *chbundle=alloca(sizeof(int)*info->submaps);
    int      chcounter=0;

    /* play a little loose with this abstraction */
    int   quant_passes=look->psy_look[blocktype]->vi->coupling_passes;
    int   stopflag=0;

    for(i=0;i<vi->channels;i++){
      quantized[i]=pcm[i]+n/2;
      sofar[i]=_vorbis_block_alloc(vb,n/2*sizeof(float));
      memset(sofar[i],0,sizeof(float)*n/2);
    }

    pcmbundle[0]=alloca(sizeof(float *)*vi->channels);
    sobundle[0]=alloca(sizeof(float *)*vi->channels);
    zerobundle[0]=alloca(sizeof(int)*vi->channels);

    /* initial down-quantized coupling */
    
    if(info->coupling_steps==0){
      /* this assumes all or nothing coupling right now.  it should pass
	 through any channels left uncoupled, but it doesn't do that now */
      for(i=0;i<vi->channels;i++){
	float *lpcm=pcm[i];
	float *lqua=quantized[i];
	for(j=0;j<n/2;j++)
	  lqua[j]=lpcm[j];
      }
    }else{
      _vp_quantize_couple(look->psy_look[blocktype],
			  info,
			  pcm,
			  sofar,
			  quantized,
			  nonzero,
			  0);
    }

    for(i=0;i<vi->channels;i++)
      _analysis_output("quant",seq+i,quantized[i],n/2,1,0);

  
    /* classify, by submap */

    for(i=0;i<info->submaps;i++){
      int ch_in_bundle=0;
      pcmbundle[i]=pcmbundle[0]+chcounter;
      sobundle[i]=sobundle[0]+chcounter;
      zerobundle[i]=zerobundle[0]+chcounter;

      for(j=0;j<vi->channels;j++){
	if(info->chmuxlist[j]==i){
	  if(nonzero[j])
	    zerobundle[i][ch_in_bundle]=1;
	  else
	    zerobundle[i][ch_in_bundle]=0;
	  pcmbundle[i][ch_in_bundle]=quantized[j];
	  sobundle[i][ch_in_bundle++]=sofar[j];
	}
      }
      chbundle[i]=ch_in_bundle;
      chcounter+=ch_in_bundle;

      classifications[i]=look->residue_func[i]->
	class(vb,look->residue_look[i],pcmbundle[i],zerobundle[i],chbundle[i]);
    }

    /* actual encoding loop */
    for(i=0;!stopflag;){

      /* perform residue encoding of this pass's quantized residue
         vector, according residue mapping */
    
      for(j=0;j<info->submaps;j++)
	look->residue_func[j]->
	  forward(vb,look->residue_look[j],
		  pcmbundle[j],sobundle[j],zerobundle[j],chbundle[j],
		  i,classifications[j]);
      i++;
      
      /* bitrate management decision hook; the following if() is where
         we tell progressive encoding to halt, right now it just
         avoids falling off the edge */
      if(i>=quant_passes /* || yadda yadda */)stopflag=1;

      if(!stopflag){
	/* down-couple/down-quantize from perfect-'so-far' -> 
	   new quantized vector */
	if(info->coupling_steps==0){
	  /* this assumes all or nothing coupling right now.  it should pass
	     through any channels left uncoupled, but it doesn't do that now */
	  for(i=0;i<vi->channels;i++){
	    float *lpcm=pcm[i];
	    float *lsof=sofar[i];
	    float *lqua=quantized[i];
	    for(j=0;j<n/2;j++)
	      lqua[j]=lpcm[j]-lsof[j];
	  }
	}else{
	  _vp_quantize_couple(look->psy_look[blocktype],
			      info,
			      pcm,
			      sofar,
			      quantized,
			      nonzero,
			      i);
	}
      }
      /* steady as she goes */
    }
    seq+=vi->channels;
  }
  
  look->lastframe=vb->sequence;
  return(0);
}