static int decode_audio(sh_audio_t *sh_audio,unsigned char *buf,int minlen,int maxlen) { sample_t level=a52_level, bias=384; int flags=a52_flags|A52_ADJUST_LEVEL; int i,len=-1; if (sh_audio->sample_format == AF_FORMAT_FLOAT_NE) bias = 0; if(!sh_audio->a_in_buffer_len) if(a52_fillbuff(sh_audio)<0) return len; /* EOF */ sh_audio->a_in_buffer_len=0; if (a52_frame (a52_state, sh_audio->a_in_buffer, &flags, &level, bias)){ mp_msg(MSGT_DECAUDIO,MSGL_WARN,"a52: error decoding frame\n"); return len; } /* handle dynrng */ if (a52_drc_action != DRC_NO_ACTION) { if (a52_drc_action == DRC_NO_COMPRESSION) a52_dynrng(a52_state, NULL, NULL); else a52_dynrng(a52_state, dynrng_call, NULL); } len=0; for (i = 0; i < 6; i++) { if (a52_block (a52_state)){ mp_msg(MSGT_DECAUDIO,MSGL_WARN,"a52: error at resampling\n"); break; } len+=2*a52_resample(a52_samples(a52_state),(int16_t *)&buf[len]); } assert(len <= maxlen); return len; }
/***************************************************************************** * DoWork: decode an ATSC A/52 frame. *****************************************************************************/ static void DoWork( filter_t * p_filter, aout_buffer_t * p_in_buf, aout_buffer_t * p_out_buf ) { filter_sys_t *p_sys = p_filter->p_sys; #ifdef LIBA52_FIXED sample_t i_sample_level = (1 << 24); #else sample_t i_sample_level = 1; #endif int i_flags = p_sys->i_flags; int i_bytes_per_block = 256 * p_sys->i_nb_channels * sizeof(sample_t); int i; /* Do the actual decoding now. */ a52_frame( p_sys->p_liba52, p_in_buf->p_buffer, &i_flags, &i_sample_level, 0 ); if ( (i_flags & A52_CHANNEL_MASK) != (p_sys->i_flags & A52_CHANNEL_MASK) && !p_sys->b_dontwarn ) { msg_Warn( p_filter, "liba52 couldn't do the requested downmix 0x%x->0x%x", p_sys->i_flags & A52_CHANNEL_MASK, i_flags & A52_CHANNEL_MASK ); p_sys->b_dontwarn = 1; } if( !p_sys->b_dynrng ) { a52_dynrng( p_sys->p_liba52, NULL, NULL ); } for ( i = 0; i < 6; i++ ) { sample_t * p_samples; if( a52_block( p_sys->p_liba52 ) ) { msg_Warn( p_filter, "a52_block failed for block %d", i ); } p_samples = a52_samples( p_sys->p_liba52 ); if ( ((p_sys->i_flags & A52_CHANNEL_MASK) == A52_CHANNEL1 || (p_sys->i_flags & A52_CHANNEL_MASK) == A52_CHANNEL2 || (p_sys->i_flags & A52_CHANNEL_MASK) == A52_MONO) && (p_filter->fmt_out.audio.i_physical_channels & (AOUT_CHAN_LEFT | AOUT_CHAN_RIGHT)) ) { Duplicate( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block), p_samples ); } else if ( p_filter->fmt_out.audio.i_original_channels & AOUT_CHAN_REVERSESTEREO ) { Exchange( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block), p_samples ); } else { /* Interleave the *$£%ù samples. */ Interleave( (sample_t *)(p_out_buf->p_buffer + i * i_bytes_per_block), p_samples, p_sys->i_nb_channels, p_sys->pi_chan_table); } } p_out_buf->i_nb_samples = p_in_buf->i_nb_samples; p_out_buf->i_buffer = i_bytes_per_block * 6; }
static void a52_decode_data(uint8_t *start, uint8_t *end) { static uint8_t *bufptr = buf; static uint8_t *bufpos = buf + 7; /* * sample_rate and flags are static because this routine could * exit between the a52_syncinfo() and the ao_setup(), and we want * to have the same values when we get back ! */ static int sample_rate; static int flags; int bit_rate; int len; while (1) { len = end - start; if (!len) break; if (len > bufpos - bufptr) len = bufpos - bufptr; memcpy(bufptr, start, len); bufptr += len; start += len; if (bufptr == bufpos) { if (bufpos == buf + 7) { int length; length = a52_syncinfo(buf, &flags, &sample_rate, &bit_rate); if (!length) { //DEBUGF("skip\n"); for (bufptr = buf; bufptr < buf + 6; bufptr++) bufptr[0] = bufptr[1]; continue; } bufpos = buf + length; } else { /* Unity gain is 1 << 26, and we want to end up on 28 bits of precision instead of the default 30. */ level_t level = 1 << 24; sample_t bias = 0; int i; /* This is the configuration for the downmixing: */ flags = A52_STEREO | A52_ADJUST_LEVEL; if (a52_frame(state, buf, &flags, &level, bias)) goto error; a52_dynrng(state, NULL, NULL); frequency = sample_rate; /* An A52 frame consists of 6 blocks of 256 samples So we decode and output them one block at a time */ for (i = 0; i < 6; i++) { if (a52_block(state)) goto error; output_audio(a52_samples(state)); samplesdone += 256; } ci->set_elapsed(samplesdone/(frequency/1000)); bufptr = buf; bufpos = buf + 7; continue; error: //logf("Error decoding A52 stream\n"); bufptr = buf; bufpos = buf + 7; } } } }
int mpeg3audio_doac3(mpeg3_ac3_t *audio, char *frame, int frame_size, float **output, int render) { int output_position = 0; sample_t level = 1; int i, j, k, l; //printf("mpeg3audio_doac3 1\n"); a52_frame(audio->state, frame, &audio->flags, &level, 0); //printf("mpeg3audio_doac3 2\n"); a52_dynrng(audio->state, NULL, NULL); //printf("mpeg3audio_doac3 3\n"); for(i = 0; i < 6; i++) { if(!a52_block(audio->state)) { l = 0; if(render) { // Remap the channels to conform to encoders. for(j = 0; j < audio->channels; j++) { int dst_channel = j; // Make LFE last channel. // Shift all other channels down 1. if((audio->flags & A52_LFE)) { if(j == 0) dst_channel = audio->channels - 1; else dst_channel--; } // Swap front left and center for certain configurations switch(audio->flags & A52_CHANNEL_MASK) { case A52_3F: case A52_3F1R: case A52_3F2R: if(dst_channel == 0) dst_channel = 1; else if(dst_channel == 1) dst_channel = 0; break; } for(k = 0; k < 256; k++) { output[dst_channel][output_position + k] = ((sample_t*)audio->output)[l]; l++; } } } output_position += 256; } } return output_position; }
static GstFlowReturn gst_a52dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer) { GstA52Dec *a52dec; gint channels, i; gboolean need_reneg = FALSE; gint chans; gint length = 0, flags, sample_rate, bit_rate; GstMapInfo map; GstFlowReturn result = GST_FLOW_OK; GstBuffer *outbuf; const gint num_blocks = 6; a52dec = GST_A52DEC (bdec); /* no fancy draining */ if (G_UNLIKELY (!buffer)) return GST_FLOW_OK; /* parsed stuff already, so this should work out fine */ gst_buffer_map (buffer, &map, GST_MAP_READ); g_assert (map.size >= 7); /* re-obtain some sync header info, * should be same as during _parse and could also be cached there, * but anyway ... */ bit_rate = a52dec->bit_rate; sample_rate = a52dec->sample_rate; flags = 0; length = a52_syncinfo (map.data, &flags, &sample_rate, &bit_rate); g_assert (length == map.size); /* update stream information, renegotiate or re-streaminfo if needed */ need_reneg = FALSE; if (a52dec->sample_rate != sample_rate) { GST_DEBUG_OBJECT (a52dec, "sample rate changed"); need_reneg = TRUE; a52dec->sample_rate = sample_rate; } if (flags) { if (a52dec->stream_channels != (flags & (A52_CHANNEL_MASK | A52_LFE))) { GST_DEBUG_OBJECT (a52dec, "stream channel flags changed, marking update"); a52dec->flag_update = TRUE; } a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE); } if (bit_rate != a52dec->bit_rate) { a52dec->bit_rate = bit_rate; gst_a52dec_update_streaminfo (a52dec); } /* If we haven't had an explicit number of channels chosen through properties * at this point, choose what to downmix to now, based on what the peer will * accept - this allows a52dec to do downmixing in preference to a * downstream element such as audioconvert. */ if (a52dec->request_channels != A52_CHANNEL) { flags = a52dec->request_channels; } else if (a52dec->flag_update) { GstCaps *caps; a52dec->flag_update = FALSE; caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec)); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint orig_channels = flags ? gst_a52dec_channels (flags, NULL) : 6; gint fixed_channels = 0; const int a52_channels[6] = { A52_MONO, A52_STEREO, A52_STEREO | A52_LFE, A52_2F2R, A52_2F2R | A52_LFE, A52_3F2R | A52_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", orig_channels); if (gst_structure_get_int (structure, "channels", &fixed_channels) && fixed_channels <= 6) { if (fixed_channels < orig_channels) flags = a52_channels[fixed_channels - 1]; } else { flags = a52_channels[5]; } gst_caps_unref (copy); } else if (flags) flags = a52dec->stream_channels; else flags = A52_3F2R | A52_LFE; if (caps) gst_caps_unref (caps); } else { flags = a52dec->using_channels; } /* process */ flags |= A52_ADJUST_LEVEL; a52dec->level = 1; if (a52_frame (a52dec->state, map.data, &flags, &a52dec->level, a52dec->bias)) { gst_buffer_unmap (buffer, &map); GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL), ("a52_frame error"), result); goto exit; } gst_buffer_unmap (buffer, &map); channels = flags & (A52_CHANNEL_MASK | A52_LFE); if (a52dec->using_channels != channels) { need_reneg = TRUE; a52dec->using_channels = channels; } /* negotiate if required */ if (need_reneg) { GST_DEBUG_OBJECT (a52dec, "a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d", a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels); if (!gst_a52dec_reneg (a52dec)) goto failed_negotiation; } if (a52dec->dynamic_range_compression == FALSE) { a52_dynrng (a52dec->state, NULL, NULL); } flags &= (A52_CHANNEL_MASK | A52_LFE); chans = gst_a52dec_channels (flags, NULL); if (!chans) goto invalid_flags; /* handle decoded data; * each frame has 6 blocks, one block is 256 samples, ea */ outbuf = gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks); gst_buffer_map (outbuf, &map, GST_MAP_WRITE); { guint8 *ptr = map.data; for (i = 0; i < num_blocks; i++) { if (a52_block (a52dec->state)) { /* also marks discont */ GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL), ("error decoding block %d", i), result); if (result != GST_FLOW_OK) { gst_buffer_unmap (outbuf, &map); goto exit; } } else { gint n, c; gint *reorder_map = a52dec->channel_reorder_map; for (n = 0; n < 256; n++) { for (c = 0; c < chans; c++) { ((sample_t *) ptr)[n * chans + reorder_map[c]] = a52dec->samples[c * 256 + n]; } } } ptr += 256 * chans * (SAMPLE_WIDTH / 8); } } gst_buffer_unmap (outbuf, &map); result = gst_audio_decoder_finish_frame (bdec, outbuf, 1); exit: return result; /* ERRORS */ failed_negotiation: { GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } invalid_flags: { GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL), ("Invalid channel flags: %d", flags)); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data, guint length, gint flags, gint sample_rate, gint bit_rate) { gint channels, i; gboolean need_reneg = FALSE; /* update stream information, renegotiate or re-streaminfo if needed */ need_reneg = FALSE; if (a52dec->sample_rate != sample_rate) { need_reneg = TRUE; a52dec->sample_rate = sample_rate; } if (flags) { a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE); } if (bit_rate != a52dec->bit_rate) { a52dec->bit_rate = bit_rate; gst_a52dec_update_streaminfo (a52dec); } /* If we haven't had an explicit number of channels chosen through properties * at this point, choose what to downmix to now, based on what the peer will * accept - this allows a52dec to do downmixing in preference to a * downstream element such as audioconvert. */ if (a52dec->request_channels != A52_CHANNEL) { flags = a52dec->request_channels; } else if (a52dec->flag_update) { GstCaps *caps; a52dec->flag_update = FALSE; caps = gst_pad_get_allowed_caps (a52dec->srcpad); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint channels; const int a52_channels[6] = { A52_MONO, A52_STEREO, A52_STEREO | A52_LFE, A52_2F2R, A52_2F2R | A52_LFE, A52_3F2R | A52_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", flags ? gst_a52dec_channels (flags, NULL) : 6); gst_structure_get_int (structure, "channels", &channels); if (channels <= 6) flags = a52_channels[channels - 1]; else flags = a52_channels[5]; gst_caps_unref (copy); } else if (flags) flags = a52dec->stream_channels; else flags = A52_3F2R | A52_LFE; if (caps) gst_caps_unref (caps); } else { flags = a52dec->using_channels; } /* process */ flags |= A52_ADJUST_LEVEL; a52dec->level = 1; if (a52_frame (a52dec->state, data, &flags, &a52dec->level, a52dec->bias)) { GST_WARNING ("a52_frame error"); a52dec->discont = TRUE; return GST_FLOW_OK; } channels = flags & (A52_CHANNEL_MASK | A52_LFE); if (a52dec->using_channels != channels) { need_reneg = TRUE; a52dec->using_channels = channels; } /* negotiate if required */ if (need_reneg) { GST_DEBUG ("a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d", a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels); if (!gst_a52dec_reneg (a52dec, a52dec->srcpad)) { GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } } if (a52dec->dynamic_range_compression == FALSE) { a52_dynrng (a52dec->state, NULL, NULL); } /* each frame consists of 6 blocks */ for (i = 0; i < 6; i++) { if (a52_block (a52dec->state)) { /* ignore errors but mark a discont */ GST_WARNING ("a52_block error %d", i); a52dec->discont = TRUE; } else { GstFlowReturn ret; /* push on */ ret = gst_a52dec_push (a52dec, a52dec->srcpad, a52dec->using_channels, a52dec->samples, a52dec->time); if (ret != GST_FLOW_OK) return ret; } a52dec->time += 256 * GST_SECOND / a52dec->sample_rate; } return GST_FLOW_OK; }