static int a52_decode_frame(unsigned char *buf, int maxlen,
			    struct frame_fmt *fmt)
{
	sample_t level = 1, bias = 384;
	int flags;
	int i, len = -1;
	if (!a52_buffer.a_in_buffer_len)
		if (a52_fillbuff(&a52_buffer) < 0)
			return len;
	a52_buffer.a_in_buffer_len = 0;

	flags = a52_flags | A52_ADJUST_LEVEL;
	level *= gain;

	if (a52_frame(a52_state, a52_buffer.a_in_buffer, &flags, &level, bias)) {
		printf("a52_decode_frame a52: error decoding frame\n");
		return len;
	}

	len = 0;
	for (i = 0; i < 6; i++) {
		if (a52_block(a52_state)) {
			printf("a52: error at resampling\n");
			break;
		}
		len +=
		    2 * a52_resample(a52_samples(a52_state),
				     (int16_t *) & buf[len]);
	}

	assert(len <= maxlen);
	return len;
}
예제 #2
0
static int decode_audio(sh_audio_t *sh_audio,unsigned char *buf,int minlen,int maxlen)
{
    sample_t level=a52_level, bias=384;
    int flags=a52_flags|A52_ADJUST_LEVEL;
    int i,len=-1;
	if (sh_audio->sample_format == AF_FORMAT_FLOAT_NE)
	    bias = 0;
	if(!sh_audio->a_in_buffer_len)
	    if(a52_fillbuff(sh_audio)<0) return len; /* EOF */
	sh_audio->a_in_buffer_len=0;
	if (a52_frame (a52_state, sh_audio->a_in_buffer, &flags, &level, bias)){
	    mp_msg(MSGT_DECAUDIO,MSGL_WARN,"a52: error decoding frame\n");
	    return len;
	}

	/* handle dynrng */
	if (a52_drc_action != DRC_NO_ACTION) {
	    if (a52_drc_action == DRC_NO_COMPRESSION)
		a52_dynrng(a52_state, NULL, NULL);
	    else
		a52_dynrng(a52_state, dynrng_call, NULL);
	}

	len=0;
	for (i = 0; i < 6; i++) {
	    if (a52_block (a52_state)){
		mp_msg(MSGT_DECAUDIO,MSGL_WARN,"a52: error at resampling\n");
		break;
	    }
	    len+=2*a52_resample(a52_samples(a52_state),(int16_t *)&buf[len]);
	}
	assert(len <= maxlen);
  return len;
}
예제 #3
0
/*****************************************************************************
 * DoWork: decode an ATSC A/52 frame.
 *****************************************************************************/
static void DoWork( filter_t * p_filter,
                    aout_buffer_t * p_in_buf, aout_buffer_t * p_out_buf )
{
    filter_sys_t    *p_sys = p_filter->p_sys;
#ifdef LIBA52_FIXED
    sample_t        i_sample_level = (1 << 24);
#else
    sample_t        i_sample_level = 1;
#endif
    int             i_flags = p_sys->i_flags;
    int             i_bytes_per_block = 256 * p_sys->i_nb_channels
                      * sizeof(sample_t);
    int             i;

    /* Do the actual decoding now. */
    a52_frame( p_sys->p_liba52, p_in_buf->p_buffer,
               &i_flags, &i_sample_level, 0 );

    if ( (i_flags & A52_CHANNEL_MASK) != (p_sys->i_flags & A52_CHANNEL_MASK)
          && !p_sys->b_dontwarn )
    {
        msg_Warn( p_filter,
                  "liba52 couldn't do the requested downmix 0x%x->0x%x",
                  p_sys->i_flags  & A52_CHANNEL_MASK,
                  i_flags & A52_CHANNEL_MASK );

        p_sys->b_dontwarn = 1;
    }

    if( !p_sys->b_dynrng )
    {
        a52_dynrng( p_sys->p_liba52, NULL, NULL );
    }

    for ( i = 0; i < 6; i++ )
    {
        sample_t * p_samples;

        if( a52_block( p_sys->p_liba52 ) )
        {
            msg_Warn( p_filter, "a52_block failed for block %d", i );
        }

        p_samples = a52_samples( p_sys->p_liba52 );

        if ( ((p_sys->i_flags & A52_CHANNEL_MASK) == A52_CHANNEL1
               || (p_sys->i_flags & A52_CHANNEL_MASK) == A52_CHANNEL2
               || (p_sys->i_flags & A52_CHANNEL_MASK) == A52_MONO)
              && (p_filter->fmt_out.audio.i_physical_channels
                   & (AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT)) )
        {
            Duplicate( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block),
                       p_samples );
        }
        else if ( p_filter->fmt_out.audio.i_original_channels
                    & AOUT_CHAN_REVERSESTEREO )
        {
            Exchange( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block),
                      p_samples );
        }
        else
        {
            /* Interleave the *$£%ù samples. */
            Interleave( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block),
                        p_samples, p_sys->i_nb_channels, p_sys->pi_chan_table);
        }
    }

    p_out_buf->i_nb_samples = p_in_buf->i_nb_samples;
    p_out_buf->i_buffer = i_bytes_per_block * 6;
}
예제 #4
0
파일: a52.c 프로젝트: claymodel/rockbox
static void a52_decode_data(uint8_t *start, uint8_t *end)
{
    static uint8_t *bufptr = buf;
    static uint8_t *bufpos = buf + 7;
    /*
     * sample_rate and flags are static because this routine could
     * exit between the a52_syncinfo() and the ao_setup(), and we want
     * to have the same values when we get back !
     */
    static int sample_rate;
    static int flags;
    int bit_rate;
    int len;

    while (1) {
        len = end - start;
        if (!len)
            break;
        if (len > bufpos - bufptr)
            len = bufpos - bufptr;
        memcpy(bufptr, start, len);
        bufptr += len;
        start += len;
        if (bufptr == bufpos) {
            if (bufpos == buf + 7) {
                int length;

                length = a52_syncinfo(buf, &flags, &sample_rate, &bit_rate);
                if (!length) {
                    //DEBUGF("skip\n");
                    for (bufptr = buf; bufptr < buf + 6; bufptr++)
                        bufptr[0] = bufptr[1];
                    continue;
                }
                bufpos = buf + length;
            } else {
                /* Unity gain is 1 << 26, and we want to end up on 28 bits
                   of precision instead of the default 30.
                 */
                level_t level = 1 << 24;
                sample_t bias = 0;
                int i;

                /* This is the configuration for the downmixing: */
                flags = A52_STEREO | A52_ADJUST_LEVEL;

                if (a52_frame(state, buf, &flags, &level, bias))
                    goto error;
                a52_dynrng(state, NULL, NULL);
                frequency = sample_rate;

                /* An A52 frame consists of 6 blocks of 256 samples
                   So we decode and output them one block at a time */
                for (i = 0; i < 6; i++) {
                    if (a52_block(state))
                        goto error;
                    output_audio(a52_samples(state));
                    samplesdone += 256;
                }
                ci->set_elapsed(samplesdone/(frequency/1000));
                bufptr = buf;
                bufpos = buf + 7;
                continue;
            error:
                //logf("Error decoding A52 stream\n");
                bufptr = buf;
                bufpos = buf + 7;
            }
        }   
    }
}
예제 #5
0
파일: liba52_dec.c 프로젝트: bigbensk/gpac
static GF_Err AC3_ProcessData(GF_MediaDecoder *ifcg,
		char *inBuffer, u32 inBufferLength,
		u16 ES_ID,
		char *outBuffer, u32 *outBufferLength,
		u8 PaddingBits, u32 mmlevel)
{
    short *out_samples;
	int i, len, bit_rate;
	sample_t level;
	A52CTX();

	/*check not using scalabilty*/
	if (ctx->ES_ID != ES_ID) return GF_BAD_PARAM;

	/*if late or seeking don't decode*/
	switch (mmlevel) {
	case GF_CODEC_LEVEL_SEEK:
	case GF_CODEC_LEVEL_DROP:
		*outBufferLength = 0;
		return GF_OK;
	default:
		break;
	}

	if (ctx->out_size > *outBufferLength) {
		*outBufferLength = ctx->out_size;
		return GF_BUFFER_TOO_SMALL;
	}

	GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[A52] Decoding AU\n"));

	len = a52_syncinfo(inBuffer, &ctx->flags, &ctx->sample_rate, &bit_rate);
	if (!len) return GF_NON_COMPLIANT_BITSTREAM;

	/*init decoder*/
	if (!ctx->out_size) {
		ctx->num_channels = ac3_channels[ctx->flags & 7];
		if (ctx->flags & A52_LFE) ctx->num_channels++;
		ctx->flags |= A52_ADJUST_LEVEL;

		ctx->out_size = ctx->num_channels * sizeof(short) * 1536;
		*outBufferLength = ctx->out_size;
		return GF_BUFFER_TOO_SMALL;
	}

	level = 1;
	if ( a52_frame(ctx->codec, inBuffer, &ctx->flags, &level, 384)) {
		GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[A52] Error decoding AU\n" ));
		*outBufferLength = 0;
		return GF_NON_COMPLIANT_BITSTREAM;
	}

	out_samples = (short*)outBuffer;
	for (i=0; i<6; i++) {
		if (a52_block(ctx->codec))
			return GF_NON_COMPLIANT_BITSTREAM;

		float_to_int(ctx->samples, out_samples + i * 256 * ctx->num_channels, ctx->num_channels);
	}

	*outBufferLength = 6 * ctx->num_channels * 256 * sizeof(short);

	return GF_OK;
}
예제 #6
0
uint8_t ADM_AudiocodecAC3::run(uint8_t *inptr, uint32_t nbIn, float *outptr,   uint32_t *nbOut)
{
    uint32_t avail;
    uint32_t length;
    int flags = 0, samprate = 0, bitrate = 0;
    uint8_t chan = _wavHeader->channels;
    *nbOut=0;

    //  Ready to decode
    while(nbIn)
    {
        if(nbIn<7)
        {
            if(nbIn)
                printf("[a52]: no data to decode avail %u\n",nbIn);
            break;
        }
        length = a52_syncinfo(inptr, &flags, &samprate, &bitrate);
        if(length==0)
        {
            printf("[a52] No startcode found\n");
            break;
        }
        if(length>nbIn)
        {
            // not enough data
            break;
        }


		CHANNEL_TYPE *p_ch_type = channelMapping;
		if (flags & A52_LFE) {
			*(p_ch_type++) = CHTYP_LFE;
		}
		switch (flags & A52_CHANNEL_MASK) {
			case A52_CHANNEL:
			case A52_MONO:
				*(p_ch_type++) = CHTYP_MONO;
			break;
			case A52_STEREO:
			case A52_DOLBY:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
			break;
			case A52_3F:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_CENTER;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
			break;
			case A52_2F1R:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
				*(p_ch_type++) = CHTYP_REAR_CENTER;
			break;
			case A52_3F1R:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_CENTER;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
				*(p_ch_type++) = CHTYP_REAR_CENTER;
			break;
			case A52_2F2R:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
				*(p_ch_type++) = CHTYP_REAR_LEFT;
				*(p_ch_type++) = CHTYP_REAR_RIGHT;
			break;
			case A52_3F2R:
				*(p_ch_type++) = CHTYP_FRONT_LEFT;
				*(p_ch_type++) = CHTYP_FRONT_CENTER;
				*(p_ch_type++) = CHTYP_FRONT_RIGHT;
				*(p_ch_type++) = CHTYP_REAR_LEFT;
				*(p_ch_type++) = CHTYP_REAR_RIGHT;
			break;
			default:
				ADM_assert(0);
		}
	

        sample_t level = 1, bias = 0;

        if (a52_frame(AC3_HANDLE, inptr, &flags, &level, bias))
        {
            printf("\n A52_frame failed!");
            inptr+=length;
            nbIn-=length;
            *nbOut += 256 * chan * 6;
            break;
        };
        inptr+=length;
        nbIn-=length;
        *nbOut += 256 * chan * 6;

        float *cur;
        for (int i = 0; i < 6; i++) {
                if (a52_block(AC3_HANDLE)) {
                        printf("\n A52_block failed! on fblock :%lu", i);
                        // in that case we silent out the chunk
                        memset(outptr, 0, 256 * chan * sizeof(float));
                } else {
                        for (int k = 0; k < chan; k++) {
                                sample_t *sample=(sample_t *)ac3_sample;
                                sample += 256 * k;
                                cur = outptr + k;
                                for (int j = 0; j < 256; j++) {
                                        *cur = *sample++;
                                        cur+=chan;
                                }
                        }
                }
                outptr += chan * 256;
        }
    }
    return 1; 

}
예제 #7
0
파일: ac3.c 프로젝트: ratopi/CinelerraCV
int mpeg3audio_doac3(mpeg3_ac3_t *audio, 
	char *frame, 
	int frame_size, 
	float **output,
	int render)
{
	int output_position = 0;
	sample_t level = 1;
	int i, j, k, l;

//printf("mpeg3audio_doac3 1\n");
	a52_frame(audio->state, 
		frame, 
		&audio->flags,
	   	&level, 
		0);
//printf("mpeg3audio_doac3 2\n");
	a52_dynrng(audio->state, NULL, NULL);
//printf("mpeg3audio_doac3 3\n");
	for(i = 0; i < 6; i++)
	{
		if(!a52_block(audio->state))
		{
			l = 0;
			if(render)
			{
// Remap the channels to conform to encoders.
				for(j = 0; j < audio->channels; j++)
				{
					int dst_channel = j;

// Make LFE last channel.
// Shift all other channels down 1.
					if((audio->flags & A52_LFE))
					{
						if(j == 0)
							dst_channel = audio->channels - 1;
						else
							dst_channel--;
					}

// Swap front left and center for certain configurations
					switch(audio->flags & A52_CHANNEL_MASK)
					{
						case A52_3F:
						case A52_3F1R:
						case A52_3F2R:
							if(dst_channel == 0) dst_channel = 1;
							else
							if(dst_channel == 1) dst_channel = 0;
							break;
					}

					for(k = 0; k < 256; k++)
					{
						output[dst_channel][output_position + k] = ((sample_t*)audio->output)[l];
						l++;
					}
				}
			}
			output_position += 256;
		}
	}


	return output_position;
}
예제 #8
0
HRESULT TaudioCodecLiba52::decode(TbyteBuffer &src)
{
    unsigned char *p=src.size() ? &src[0] : NULL;
    unsigned char *base=p;
    unsigned char *end=p+src.size();

    while (end-p>7) {
        int size=0,flags,sample_rate,bit_rate;
        if ((size=a52_syncinfo(p,&flags,&sample_rate,&bit_rate))>0) {
            bool enoughData=p+size<=end;
            if (enoughData) {
                if (codecId==CODEC_ID_SPDIF_AC3) {
                    bpssum+=(lastbps=bit_rate/1000);
                    numframes++;
                    HRESULT hr=deciA->deliverSampleSPDIF(p,size,bit_rate,sample_rate,true);
                    if (hr!=S_OK) {
                        return hr;
                    }
                } else {
                    flags|=A52_ADJUST_LEVEL;
                    liba52::sample_t level=1,bias=0;
                    if (a52_frame(state,p,&flags,&level,bias)==0) {
                        bpssum+=(lastbps=bit_rate/1000);
                        numframes++;
                        // Dynamic range compression
                        if (deci->getParam2(IDFF_audio_decoder_DRC)) {
                            liba52::sample_t drcLevel = ((liba52::sample_t)deci->getParam2(IDFF_audio_decoder_DRC_Level) / 100);
                            a52_dynrngsetlevel(state, drcLevel);
                        } else {
                            a52_dynrngsetlevel(state, 0.0);
                        }
                        int scmapidx=std::min(flags&A52_CHANNEL_MASK,int(countof(scmaps)/2));
                        const Tscmap &scmap=scmaps[scmapidx+((flags&A52_LFE)?(countof(scmaps)/2):0)];
                        float *dst0,*dst;
                        dst0=dst=(float*)getDst(6*256*scmap.nchannels*sizeof(float));
                        int i=0;
                        for(; i<6 && a52_block(state)==0; i++) {
                            liba52::sample_t* samples=a52_samples(state);
                            for (int j=0; j<256; j++,samples++)
                                for (int ch=0; ch<scmap.nchannels; ch++) {
                                    *dst++=float(*(samples+256*scmap.ch[ch])/level);
                                }
                        }
                        if (i==6) {
                            fmt.sf=TsampleFormat::SF_FLOAT32;
                            fmt.freq=sample_rate;
                            fmt.setChannels(scmap.nchannels,scmap.channelMask);
                            HRESULT hr=sinkA->deliverDecodedSample(dst0,6*256,fmt);
                            if (hr!=S_OK) {
                                return hr;
                            }
                        }
                    }
                }
                p+=size;
            }
            memmove(base,p,end-p);
            end=base+(end-p);
            p=base;
            if (!enoughData) {
                break;
            }
        } else {
            p++;
        }
    }
    src.resize(end-p);
    return S_OK;
}
예제 #9
0
static GstFlowReturn
gst_a52dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer)
{
  GstA52Dec *a52dec;
  gint channels, i;
  gboolean need_reneg = FALSE;
  gint chans;
  gint length = 0, flags, sample_rate, bit_rate;
  GstMapInfo map;
  GstFlowReturn result = GST_FLOW_OK;
  GstBuffer *outbuf;
  const gint num_blocks = 6;

  a52dec = GST_A52DEC (bdec);

  /* no fancy draining */
  if (G_UNLIKELY (!buffer))
    return GST_FLOW_OK;

  /* parsed stuff already, so this should work out fine */
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  g_assert (map.size >= 7);

  /* re-obtain some sync header info,
   * should be same as during _parse and could also be cached there,
   * but anyway ... */
  bit_rate = a52dec->bit_rate;
  sample_rate = a52dec->sample_rate;
  flags = 0;
  length = a52_syncinfo (map.data, &flags, &sample_rate, &bit_rate);
  g_assert (length == map.size);

  /* update stream information, renegotiate or re-streaminfo if needed */
  need_reneg = FALSE;
  if (a52dec->sample_rate != sample_rate) {
    GST_DEBUG_OBJECT (a52dec, "sample rate changed");
    need_reneg = TRUE;
    a52dec->sample_rate = sample_rate;
  }

  if (flags) {
    if (a52dec->stream_channels != (flags & (A52_CHANNEL_MASK | A52_LFE))) {
      GST_DEBUG_OBJECT (a52dec, "stream channel flags changed, marking update");
      a52dec->flag_update = TRUE;
    }
    a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  }

  if (bit_rate != a52dec->bit_rate) {
    a52dec->bit_rate = bit_rate;
    gst_a52dec_update_streaminfo (a52dec);
  }

  /* If we haven't had an explicit number of channels chosen through properties
   * at this point, choose what to downmix to now, based on what the peer will
   * accept - this allows a52dec to do downmixing in preference to a
   * downstream element such as audioconvert.
   */
  if (a52dec->request_channels != A52_CHANNEL) {
    flags = a52dec->request_channels;
  } else if (a52dec->flag_update) {
    GstCaps *caps;

    a52dec->flag_update = FALSE;

    caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec));
    if (caps && gst_caps_get_size (caps) > 0) {
      GstCaps *copy = gst_caps_copy_nth (caps, 0);
      GstStructure *structure = gst_caps_get_structure (copy, 0);
      gint orig_channels = flags ? gst_a52dec_channels (flags, NULL) : 6;
      gint fixed_channels = 0;
      const int a52_channels[6] = {
        A52_MONO,
        A52_STEREO,
        A52_STEREO | A52_LFE,
        A52_2F2R,
        A52_2F2R | A52_LFE,
        A52_3F2R | A52_LFE,
      };

      /* Prefer the original number of channels, but fixate to something
       * preferred (first in the caps) downstream if possible.
       */
      gst_structure_fixate_field_nearest_int (structure, "channels",
          orig_channels);

      if (gst_structure_get_int (structure, "channels", &fixed_channels)
          && fixed_channels <= 6) {
        if (fixed_channels < orig_channels)
          flags = a52_channels[fixed_channels - 1];
      } else {
        flags = a52_channels[5];
      }

      gst_caps_unref (copy);
    } else if (flags)
      flags = a52dec->stream_channels;
    else
      flags = A52_3F2R | A52_LFE;

    if (caps)
      gst_caps_unref (caps);
  } else {
    flags = a52dec->using_channels;
  }

  /* process */
  flags |= A52_ADJUST_LEVEL;
  a52dec->level = 1;
  if (a52_frame (a52dec->state, map.data, &flags, &a52dec->level, a52dec->bias)) {
    gst_buffer_unmap (buffer, &map);
    GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
        ("a52_frame error"), result);
    goto exit;
  }
  gst_buffer_unmap (buffer, &map);

  channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  if (a52dec->using_channels != channels) {
    need_reneg = TRUE;
    a52dec->using_channels = channels;
  }

  /* negotiate if required */
  if (need_reneg) {
    GST_DEBUG_OBJECT (a52dec,
        "a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d",
        a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels);
    if (!gst_a52dec_reneg (a52dec))
      goto failed_negotiation;
  }

  if (a52dec->dynamic_range_compression == FALSE) {
    a52_dynrng (a52dec->state, NULL, NULL);
  }

  flags &= (A52_CHANNEL_MASK | A52_LFE);
  chans = gst_a52dec_channels (flags, NULL);
  if (!chans)
    goto invalid_flags;

  /* handle decoded data;
   * each frame has 6 blocks, one block is 256 samples, ea */
  outbuf =
      gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks);

  gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
  {
    guint8 *ptr = map.data;
    for (i = 0; i < num_blocks; i++) {
      if (a52_block (a52dec->state)) {
        /* also marks discont */
        GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
            ("error decoding block %d", i), result);
        if (result != GST_FLOW_OK) {
          gst_buffer_unmap (outbuf, &map);
          goto exit;
        }
      } else {
        gint n, c;
        gint *reorder_map = a52dec->channel_reorder_map;

        for (n = 0; n < 256; n++) {
          for (c = 0; c < chans; c++) {
            ((sample_t *) ptr)[n * chans + reorder_map[c]] =
                a52dec->samples[c * 256 + n];
          }
        }
      }
      ptr += 256 * chans * (SAMPLE_WIDTH / 8);
    }
  }
  gst_buffer_unmap (outbuf, &map);

  result = gst_audio_decoder_finish_frame (bdec, outbuf, 1);

exit:
  return result;

  /* ERRORS */
failed_negotiation:
  {
    GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL));
    return GST_FLOW_ERROR;
  }
invalid_flags:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
        ("Invalid channel flags: %d", flags));
    return GST_FLOW_ERROR;
  }
}
예제 #10
0
파일: gsta52dec.c 프로젝트: zsx/ossbuild
static GstFlowReturn
gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data,
    guint length, gint flags, gint sample_rate, gint bit_rate)
{
  gint channels, i;
  gboolean need_reneg = FALSE;

  /* update stream information, renegotiate or re-streaminfo if needed */
  need_reneg = FALSE;
  if (a52dec->sample_rate != sample_rate) {
    need_reneg = TRUE;
    a52dec->sample_rate = sample_rate;
  }

  if (flags) {
    a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  }

  if (bit_rate != a52dec->bit_rate) {
    a52dec->bit_rate = bit_rate;
    gst_a52dec_update_streaminfo (a52dec);
  }

  /* If we haven't had an explicit number of channels chosen through properties
   * at this point, choose what to downmix to now, based on what the peer will 
   * accept - this allows a52dec to do downmixing in preference to a 
   * downstream element such as audioconvert.
   */
  if (a52dec->request_channels != A52_CHANNEL) {
    flags = a52dec->request_channels;
  } else if (a52dec->flag_update) {
    GstCaps *caps;

    a52dec->flag_update = FALSE;

    caps = gst_pad_get_allowed_caps (a52dec->srcpad);
    if (caps && gst_caps_get_size (caps) > 0) {
      GstCaps *copy = gst_caps_copy_nth (caps, 0);
      GstStructure *structure = gst_caps_get_structure (copy, 0);
      gint channels;
      const int a52_channels[6] = {
        A52_MONO,
        A52_STEREO,
        A52_STEREO | A52_LFE,
        A52_2F2R,
        A52_2F2R | A52_LFE,
        A52_3F2R | A52_LFE,
      };

      /* Prefer the original number of channels, but fixate to something 
       * preferred (first in the caps) downstream if possible.
       */
      gst_structure_fixate_field_nearest_int (structure, "channels",
          flags ? gst_a52dec_channels (flags, NULL) : 6);
      gst_structure_get_int (structure, "channels", &channels);
      if (channels <= 6)
        flags = a52_channels[channels - 1];
      else
        flags = a52_channels[5];

      gst_caps_unref (copy);
    } else if (flags)
      flags = a52dec->stream_channels;
    else
      flags = A52_3F2R | A52_LFE;

    if (caps)
      gst_caps_unref (caps);
  } else {
    flags = a52dec->using_channels;
  }
  /* process */
  flags |= A52_ADJUST_LEVEL;
  a52dec->level = 1;
  if (a52_frame (a52dec->state, data, &flags, &a52dec->level, a52dec->bias)) {
    GST_WARNING ("a52_frame error");
    a52dec->discont = TRUE;
    return GST_FLOW_OK;
  }
  channels = flags & (A52_CHANNEL_MASK | A52_LFE);
  if (a52dec->using_channels != channels) {
    need_reneg = TRUE;
    a52dec->using_channels = channels;
  }

  /* negotiate if required */
  if (need_reneg) {
    GST_DEBUG ("a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d",
        a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels);
    if (!gst_a52dec_reneg (a52dec, a52dec->srcpad)) {
      GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL));
      return GST_FLOW_ERROR;
    }
  }

  if (a52dec->dynamic_range_compression == FALSE) {
    a52_dynrng (a52dec->state, NULL, NULL);
  }

  /* each frame consists of 6 blocks */
  for (i = 0; i < 6; i++) {
    if (a52_block (a52dec->state)) {
      /* ignore errors but mark a discont */
      GST_WARNING ("a52_block error %d", i);
      a52dec->discont = TRUE;
    } else {
      GstFlowReturn ret;

      /* push on */
      ret = gst_a52dec_push (a52dec, a52dec->srcpad, a52dec->using_channels,
          a52dec->samples, a52dec->time);
      if (ret != GST_FLOW_OK)
        return ret;
    }
    a52dec->time += 256 * GST_SECOND / a52dec->sample_rate;
  }

  return GST_FLOW_OK;
}