コード例 #1
0
ファイル: pcm.c プロジェクト: opieproject/opie
int mpeg3audio_do_pcm(mpeg3audio_t *audio)
{
	int i, j, k;
	MPEG3_INT16 sample;
	int frame_samples = (audio->framesize - 3) / audio->channels / 2;

	if(mpeg3bits_read_buffer(audio->astream, audio->ac3_buffer, frame_samples * audio->channels * 2))
		return 1;

/* Need more room */
	if(audio->pcm_point / audio->channels >= audio->pcm_allocated - MPEG3AUDIO_PADDING * audio->channels)
	{
		mpeg3audio_replace_buffer(audio, audio->pcm_allocated + MPEG3AUDIO_PADDING * audio->channels);
	}

	k = 0;
	for(i = 0; i < frame_samples; i++)
	{
		for(j = 0; j < audio->channels; j++)
		{
			sample = ((MPEG3_INT16)(audio->ac3_buffer[k++])) << 8;
			sample |= audio->ac3_buffer[k++];
			audio->pcm_sample[audio->pcm_point + i * audio->channels + j] =
				(mpeg3_real_t)sample / 32767;
		}
	}
	audio->pcm_point += frame_samples * audio->channels;
	return 0;
}
コード例 #2
0
ファイル: ac3.c プロジェクト: ruthmagnus/audacity
int mpeg3audio_do_ac3(mpeg3audio_t *audio)
{
	int result = 0, i;

/* Reset the coefficients and exponents */
	mpeg3audio_ac3_reset_frame(audio);

	for(i = 0; i < 6 && !result; i++)
	{
		memset(audio->ac3_samples, 0, sizeof(float) * 256 * (audio->ac3_bsi.nfchans + audio->ac3_bsi.lfeon));
/* Extract most of the audblk info from the bitstream
 * (minus the mantissas */
		result |= mpeg3audio_read_ac3_audblk(audio);

/* Take the differential exponent data and turn it into
 * absolute exponents */
		if(!result) result |= mpeg3audio_ac3_exponent_unpack(audio, 
					&(audio->ac3_bsi), 
					&(audio->ac3_audblk));

/* Figure out how many bits per mantissa */
		if(!result) result |= mpeg3audio_ac3_bit_allocate(audio, 
					audio->sampling_frequency_code, 
					&(audio->ac3_bsi), 
					&(audio->ac3_audblk));

/* Extract the mantissas from the data stream */
		if(!result) result |= mpeg3audio_ac3_coeff_unpack(audio,
					&(audio->ac3_bsi), 
					&(audio->ac3_audblk),
					audio->ac3_samples);

		if(audio->ac3_bsi.acmod == 0x2)
			if(!result) result |= mpeg3audio_ac3_rematrix(&(audio->ac3_audblk), 
					audio->ac3_samples);

/* Convert the frequency data into time samples */
		if(!result) result |= mpeg3audio_ac3_imdct(audio, 
			&(audio->ac3_bsi), 
			&(audio->ac3_audblk), 
			audio->ac3_samples);

		if(audio->pcm_point / audio->channels >= audio->pcm_allocated - MPEG3AUDIO_PADDING * audio->channels)
		{
/* Need more room */
			mpeg3audio_replace_buffer(audio, audio->pcm_allocated + MPEG3AUDIO_PADDING * audio->channels);
		}
	}

	mpeg3bits_use_demuxer(audio->astream);

	return result;
}
コード例 #3
0
ファイル: layer2.c プロジェクト: ruthmagnus/audacity
int mpeg3audio_dolayer2(mpeg3audio_t *audio)
{
	int i, j, result = 0;
	int channels = audio->channels;
	float fraction[2][4][SBLIMIT]; /* pick_table clears unused subbands */
	unsigned int bit_alloc[64];
	int scale[192];
	int single = audio->single;

 	if(audio->error_protection)
		mpeg3bits_getbits(audio->astream, 16);

	mpeg3audio_II_select_table(audio);

  	audio->jsbound = (audio->mode == MPG_MD_JOINT_STEREO) ?
     	(audio->mode_ext << 2) + 4 : audio->II_sblimit;

  	if(channels == 1 || single == 3)
    	single = 0;

  	result |= mpeg3audio_II_step_one(audio, bit_alloc, scale);

	for(i = 0; i < SCALE_BLOCK && !result; i++)
	{
    	result |= mpeg3audio_II_step_two(audio, bit_alloc, fraction, scale, i >> 2);

    	for(j = 0; j < 3; j++) 
    	{
    		if(single >= 0)
    		{
/* Monaural */
        		mpeg3audio_synth_mono(audio, fraction[single][j], audio->pcm_sample, &(audio->pcm_point));
    		}
    		else 
			{
/* Stereo */
        		int p1 = audio->pcm_point;
        		mpeg3audio_synth_stereo(audio, fraction[0][j], 0, audio->pcm_sample, &p1);
        		mpeg3audio_synth_stereo(audio, fraction[1][j], 1, audio->pcm_sample, &(audio->pcm_point));
    		}

    		if(audio->pcm_point / audio->channels >= audio->pcm_allocated - MPEG3AUDIO_PADDING * audio->channels)
			{
/* Need more room */
				mpeg3audio_replace_buffer(audio, audio->pcm_allocated + MPEG3AUDIO_PADDING * audio->channels);
			}
    	}
	}


  	return result;
}