コード例 #1
0
ファイル: audio_encoder.c プロジェクト: Brilon314/gpac
int dc_audio_encoder_encode(AudioOutputFile *audio_output_file, AudioInputData *audio_input_data)
{
	int got_pkt;
	AVCodecContext *audio_codec_ctx = audio_output_file->codec_ctx;

	while (av_fifo_size(audio_output_file->fifo) >= audio_output_file->frame_bytes) {
#ifdef DC_AUDIO_RESAMPLER
		uint8_t **data; //mirror AVFrame::data
		int num_planes_out;
#endif
		Bool resample;

		av_fifo_generic_read(audio_output_file->fifo, audio_output_file->adata_buf, audio_output_file->frame_bytes, NULL);

		audio_output_file->aframe->data[0] = audio_output_file->adata_buf;
		audio_output_file->aframe->linesize[0] = audio_output_file->frame_bytes;
		audio_output_file->aframe->linesize[1] = 0;

		av_init_packet(&audio_output_file->packet);
		audio_output_file->packet.data = NULL;
		audio_output_file->packet.size = 0;

		/*
		 * Set PTS (method 1)
		 */
		//audio_output_file->aframe->pts = audio_input_data->next_pts;

		/*
		 * Set PTS (method 2)
		 */
		//{
		//	int64_t now = av_gettime();
		//	AVRational avr;
		//	avr.num = 1;
		//	avr.den = AV_TIME_BASE;
		//	audio_output_file->aframe->pts = av_rescale_q(now, avr, audio_codec_ctx->time_base);
		//}

		resample = (DC_AUDIO_SAMPLE_FORMAT != audio_codec_ctx->sample_fmt
		            || DC_AUDIO_SAMPLE_RATE != audio_codec_ctx->sample_rate
		            || DC_AUDIO_NUM_CHANNELS != audio_codec_ctx->channels
		            || DC_AUDIO_CHANNEL_LAYOUT != audio_codec_ctx->channel_layout);
		/* Resample if needed */
		if (resample) {
#ifndef DC_AUDIO_RESAMPLER
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Audio resampling is needed at the encoding stage, but not supported by your version of DashCast. Aborting.\n"));
			exit(1);
#else
			if (ensure_resampler(audio_output_file, audio_codec_ctx)) {
				return -1;
			}

			data = audio_output_file->aframe->extended_data;
			if (resample_audio(audio_output_file, audio_codec_ctx, &num_planes_out)) {
				return -1;
			}
#endif
		}

		/* Encode audio */
		if (avcodec_encode_audio2(audio_codec_ctx, &audio_output_file->packet, audio_output_file->aframe, &got_pkt) != 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while encoding audio.\n"));
#ifdef DC_AUDIO_RESAMPLER
			if (resample) {
				int i;
				for (i=0; i<num_planes_out; ++i) {
					av_free(audio_output_file->aframe->extended_data[i]);
				}
				av_free(audio_output_file->aframe->extended_data);
				audio_output_file->aframe->extended_data = data;
			}
#endif
			return -1;
		}

#ifdef DC_AUDIO_RESAMPLER
		if (resample) {
			int i;
			for (i=0; i<num_planes_out; ++i) {
				av_free(audio_output_file->aframe->extended_data[i]);
			}
			av_free(audio_output_file->aframe->extended_data);
			audio_output_file->aframe->extended_data = data;
		}
#endif

		if (got_pkt) {
			//audio_output_file->acc_samples += audio_output_file->aframe->nb_samples;
			return 0;
		}

		av_free_packet(&audio_output_file->packet);
	}

	return 1;
}
コード例 #2
0
ファイル: audio_decoder.c プロジェクト: Bevara/GPAC
int dc_audio_decoder_read(AudioInputFile *audio_input_file, AudioInputData *audio_input_data)
{
	int ret;
	AVPacket packet;
	int got_frame = 0;
	AVCodecContext *codec_ctx;
	AudioDataNode *audio_data_node;

	/* Get a pointer to the codec context for the audio stream */
	codec_ctx = audio_input_file->av_fmt_ctx->streams[audio_input_file->astream_idx]->codec;

	/* Read frames */
	while (1) {
		if (audio_input_file->av_pkt_list) {
			if (gf_list_count(audio_input_file->av_pkt_list)) {
				AVPacket *packet_copy;
				assert(audio_input_file->av_pkt_list);
				gf_mx_p(audio_input_file->av_pkt_list_mutex);
				packet_copy = gf_list_pop_front(audio_input_file->av_pkt_list);
				gf_mx_v(audio_input_file->av_pkt_list_mutex);

				if (packet_copy == NULL) {
					ret = AVERROR_EOF;
				} else {
					memcpy(&packet, packet_copy, sizeof(AVPacket));
					gf_free(packet_copy);
					ret = 0;
				}
			} else {
				gf_sleep(1);
				continue;
			}
		} else {
			ret = av_read_frame(audio_input_file->av_fmt_ctx, &packet);
		}
		if (ret == AVERROR_EOF) {
			if (audio_input_file->mode == LIVE_MEDIA && audio_input_file->no_loop == 0) {
				av_seek_frame(audio_input_file->av_fmt_ctx, audio_input_file->astream_idx, 0, 0);
				continue;
			}

			/* Flush decoder */
			packet.data = NULL;
			packet.size = 0;

#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(audio_input_data->aframe);
#else
			av_frame_unref(audio_input_data->aframe);
#endif

			avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet);

			if (got_frame) {
				dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf);
				dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);
				audio_data_node = (AudioDataNode*)dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);

				audio_data_node->abuf_size = audio_input_data->aframe->linesize[0];
				memcpy(audio_data_node->abuf, audio_input_data->aframe->data[0], audio_data_node->abuf_size);

				dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
				return 0;
			}

			dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf);
			dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

			return -2;
		}
		else if (ret < 0)
		{
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read audio frame.\n"));
			continue;
		}

		/* Is this a packet from the audio stream? */
		if (packet.stream_index == audio_input_file->astream_idx) {
			/* Set audio frame to default */

#ifndef FF_API_AVFRAME_LAVC
			avcodec_get_frame_defaults(audio_input_data->aframe);
#else
			av_frame_unref(audio_input_data->aframe);
#endif

			/* Decode audio frame */
			if (avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet) < 0) {
				av_free_packet(&packet);
				GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding audio.\n"));
				dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf);
				dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);
				return -1;
			}

			if (audio_input_data->aframe->pts != AV_NOPTS_VALUE)
				audio_input_data->next_pts = audio_input_data->aframe->pts;

			audio_input_data->next_pts += ((int64_t)AV_TIME_BASE * audio_input_data->aframe->nb_samples) / codec_ctx->sample_rate;

			/* Did we get an audio frame? */
			if (got_frame) {
				uint8_t **data;
				int data_size;
#ifdef DC_AUDIO_RESAMPLER
				int num_planes_out;
#endif
#ifdef GPAC_USE_LIBAV
				int sample_rate = codec_ctx->sample_rate;
				int num_channels = codec_ctx->channels;
				u64 channel_layout = codec_ctx->channel_layout;
#else
				int sample_rate = audio_input_data->aframe->sample_rate;
				int num_channels = audio_input_data->aframe->channels;
				u64 channel_layout = audio_input_data->aframe->channel_layout;
#endif
				enum AVSampleFormat sample_format = (enum AVSampleFormat)audio_input_data->aframe->format;
				Bool resample = (sample_rate    != DC_AUDIO_SAMPLE_RATE
				                 || num_channels   != DC_AUDIO_NUM_CHANNELS
				                 || channel_layout != DC_AUDIO_CHANNEL_LAYOUT
				                 || sample_format  != DC_AUDIO_SAMPLE_FORMAT);

				/* Resample if needed */
				if (resample) {
#ifndef DC_AUDIO_RESAMPLER
					GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Audio resampling is needed at the decoding stage, but not supported by your version of DashCast. Aborting.\n"));
					exit(1);
#else
					uint8_t **output;
					if (ensure_resampler(audio_input_file, sample_rate, num_channels, channel_layout, sample_format)) {
						return -1;
					}

					if (resample_audio(audio_input_file, audio_input_data, codec_ctx, &output, &num_planes_out, num_channels, sample_format)) {
						return -1;
					} else {
						data = output;
						av_samples_get_buffer_size(&data_size, num_channels, audio_input_data->aframe->nb_samples, sample_format, 0);
					}
#endif
				} else {
					/*no resampling needed: read data from the AVFrame*/
					data = audio_input_data->aframe->extended_data;
					data_size = audio_input_data->aframe->linesize[0];
				}

				assert(!av_sample_fmt_is_planar(DC_AUDIO_SAMPLE_FORMAT));
				av_fifo_generic_write(audio_input_file->fifo, data[0], data_size, NULL);

				if (/*audio_input_file->circular_buf.mode == OFFLINE*/audio_input_file->mode == ON_DEMAND || audio_input_file->mode == LIVE_MEDIA) {
					dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf);

					/* Unlock the previous node in the circular buffer. */
					dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

					/* Get the pointer of the current node in circular buffer. */
					audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);
					audio_data_node->channels = DC_AUDIO_NUM_CHANNELS;
					audio_data_node->channel_layout = DC_AUDIO_CHANNEL_LAYOUT;
					audio_data_node->sample_rate = DC_AUDIO_SAMPLE_RATE;
					audio_data_node->format = DC_AUDIO_SAMPLE_FORMAT;
					audio_data_node->abuf_size = data_size;
					av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL);

					dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
				} else {
					while (av_fifo_size(audio_input_file->fifo) >= LIVE_FRAME_SIZE) {
						/* Lock the current node in the circular buffer. */
						if (dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf) < 0) {
							GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped an audio frame\n"));
							continue;
						}

						/* Unlock the previous node in the circular buffer. */
						dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf);

						/* Get the pointer of the current node in circular buffer. */
						audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf);

						audio_data_node->abuf_size = LIVE_FRAME_SIZE;
						av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL);

						dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf);
					}
				}

#ifdef DC_AUDIO_RESAMPLER
				if (resample) {
					int i;
					for (i=0; i<num_planes_out; ++i) {
						av_free(data[i]);
					}
					av_free(data);
				}
#endif

				return 0;
			}
		}

		/*
		 * Free the packet that was allocated by av_read_frame
		 */
		av_free_packet(&packet);
	}

	GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading audio frame.\n"));
	return -1;
}