int dc_video_decoder_read(VideoInputFile *video_input_file, VideoInputData *video_input_data, int source_number, int use_source_timing, int is_live_capture, const int *exit_signal_addr) { #ifdef DASHCAST_DEBUG_TIME_ struct timeval start, end; long elapsed_time; #endif AVPacket packet; int ret, got_frame, already_locked = 0; AVCodecContext *codec_ctx; VideoDataNode *video_data_node; /* Get a pointer to the codec context for the video stream */ codec_ctx = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->codec; /* Read frames */ while (1) { #ifdef DASHCAST_DEBUG_TIME_ gf_gettimeofday(&start, NULL); #endif memset(&packet, 0, sizeof(AVPacket)); ret = av_read_frame(video_input_file->av_fmt_ctx, &packet); #ifdef DASHCAST_DEBUG_TIME_ gf_gettimeofday(&end, NULL); elapsed_time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); fprintf(stdout, "fps: %f\n", 1000000.0/elapsed_time); #endif /* If we demux for the audio thread, send the packet to the audio */ if (video_input_file->av_fmt_ctx_ref_cnt && ((packet.stream_index != video_input_file->vstream_idx) || (ret == AVERROR_EOF))) { AVPacket *packet_copy = NULL; if (ret != AVERROR_EOF) { GF_SAFEALLOC(packet_copy, AVPacket); memcpy(packet_copy, &packet, sizeof(AVPacket)); } assert(video_input_file->av_pkt_list); gf_mx_p(video_input_file->av_pkt_list_mutex); gf_list_add(video_input_file->av_pkt_list, packet_copy); gf_mx_v(video_input_file->av_pkt_list_mutex); if (ret != AVERROR_EOF) { continue; } } if (ret == AVERROR_EOF) { if (video_input_file->mode == LIVE_MEDIA && video_input_file->no_loop == 0) { av_seek_frame(video_input_file->av_fmt_ctx, video_input_file->vstream_idx, 0, 0); av_free_packet(&packet); continue; } dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf); dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf); video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf); video_data_node->source_number = source_number; /* Flush decoder */ memset(&packet, 0, sizeof(AVPacket)); #ifndef FF_API_AVFRAME_LAVC avcodec_get_frame_defaults(video_data_node->vframe); #else av_frame_unref(video_data_node->vframe); #endif avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet); if (got_frame) { dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf); return 0; } dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf); dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf); return -2; } else if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read video frame.\n")); continue; } /* Is this a packet from the video stream? */ if (packet.stream_index == video_input_file->vstream_idx) { u32 nb_retry = 10; while (!already_locked) { if (dc_producer_lock(&video_input_data->producer, &video_input_data->circular_buf) < 0) { if (!nb_retry) break; gf_sleep(10); nb_retry--; continue; } dc_producer_unlock_previous(&video_input_data->producer, &video_input_data->circular_buf); already_locked = 1; } if (!already_locked) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped a video frame\n")); continue; } video_data_node = (VideoDataNode *) dc_producer_produce(&video_input_data->producer, &video_input_data->circular_buf); video_data_node->source_number = source_number; /* Set video frame to default */ #ifndef FF_API_AVFRAME_LAVC avcodec_get_frame_defaults(video_data_node->vframe); #else av_frame_unref(video_data_node->vframe); #endif /* Decode video frame */ if (avcodec_decode_video2(codec_ctx, video_data_node->vframe, &got_frame, &packet) < 0) { av_free_packet(&packet); GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding video.\n")); dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf); dc_producer_unlock(&video_input_data->producer, &video_input_data->circular_buf); return -1; } /* Did we get a video frame? */ if (got_frame) { if (use_source_timing && is_live_capture) { u64 pts; if (video_input_file->pts_init == 0) { video_input_file->pts_init = 1; video_input_file->utc_at_init = gf_net_get_utc(); video_input_file->first_pts = packet.pts; video_input_file->prev_pts = 0; video_input_data->frame_duration = 0; } if (video_input_file->pts_init && (video_input_file->pts_init!=3) ) { if (packet.pts==AV_NOPTS_VALUE) { video_input_file->pts_init=1; } else if (video_input_file->pts_init==1) { video_input_file->pts_init=2; video_input_file->pts_dur_estimate = packet.pts; } else if (video_input_file->pts_init==2) { video_input_file->pts_init=3; video_input_data->frame_duration = packet.pts - video_input_file->pts_dur_estimate; video_input_file->sync_tolerance = 9*video_input_data->frame_duration/5; //TODO - check with audio if sync is OK } } //move to 0-based PTS if (packet.pts!=AV_NOPTS_VALUE) { pts = packet.pts - video_input_file->first_pts; } else { pts = video_input_file->prev_pts + video_input_data->frame_duration; } //check for drop frames #ifndef GPAC_DISABLE_LOG if (0 && gf_log_tool_level_on(GF_LOG_DASH, GF_LOG_WARNING)) { if (pts - video_input_file->prev_pts > video_input_file->sync_tolerance) { u32 nb_lost=0; while (video_input_file->prev_pts + video_input_data->frame_duration + video_input_file->sync_tolerance < pts) { video_input_file->prev_pts += video_input_data->frame_duration; nb_lost++; } if (nb_lost) { GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("[DashCast] Capture lost %d video frames \n", nb_lost)); } } } #endif video_input_file->prev_pts = pts; video_data_node->vframe->pts = pts; } if (video_data_node->vframe->pts==AV_NOPTS_VALUE) { if (!use_source_timing) { video_data_node->vframe->pts = video_input_file->frame_decoded; } else { video_data_node->vframe->pts = video_data_node->vframe->pkt_pts; } } video_input_file->frame_decoded++; GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[DashCast] Video Frame TS "LLU" decoded at UTC "LLU" ms\n", video_data_node->vframe->pts, gf_net_get_utc() )); // For a decode/encode process we must free this memory. //But if the input is raw and there is no need to decode then // the packet is directly passed for decoded frame. We must wait until rescale is done before freeing it if (codec_ctx->codec->id == CODEC_ID_RAWVIDEO) { video_data_node->nb_raw_frames_ref = video_input_file->nb_consumers; video_data_node->raw_packet = packet; dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf); while (video_data_node->nb_raw_frames_ref && ! *exit_signal_addr) { gf_sleep(0); } } else { dc_producer_advance(&video_input_data->producer, &video_input_data->circular_buf); av_free_packet(&packet); } return 0; } } /* Free the packet that was allocated by av_read_frame */ av_free_packet(&packet); } GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading video frame.\n")); return -1; }
void dc_video_scaler_end_signal(VideoScaledData *video_scaled_data) { dc_producer_end_signal(&video_scaled_data->producer, &video_scaled_data->circular_buf); dc_producer_unlock_previous(&video_scaled_data->producer, &video_scaled_data->circular_buf); }
void dc_video_input_data_end_signal(VideoInputData *video_input_data) { dc_producer_end_signal(&video_input_data->producer, &video_input_data->circular_buf); dc_producer_end_signal_previous(&video_input_data->producer, &video_input_data->circular_buf); }
void dc_audio_inout_data_end_signal(AudioInputData *audio_input_data) { dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf); dc_producer_end_signal_previous(&audio_input_data->producer, &audio_input_data->circular_buf); }
int dc_audio_decoder_read(AudioInputFile *audio_input_file, AudioInputData *audio_input_data) { int ret; AVPacket packet; int got_frame = 0; AVCodecContext *codec_ctx; AudioDataNode *audio_data_node; /* Get a pointer to the codec context for the audio stream */ codec_ctx = audio_input_file->av_fmt_ctx->streams[audio_input_file->astream_idx]->codec; /* Read frames */ while (1) { if (audio_input_file->av_pkt_list) { if (gf_list_count(audio_input_file->av_pkt_list)) { AVPacket *packet_copy; assert(audio_input_file->av_pkt_list); gf_mx_p(audio_input_file->av_pkt_list_mutex); packet_copy = gf_list_pop_front(audio_input_file->av_pkt_list); gf_mx_v(audio_input_file->av_pkt_list_mutex); if (packet_copy == NULL) { ret = AVERROR_EOF; } else { memcpy(&packet, packet_copy, sizeof(AVPacket)); gf_free(packet_copy); ret = 0; } } else { gf_sleep(1); continue; } } else { ret = av_read_frame(audio_input_file->av_fmt_ctx, &packet); } if (ret == AVERROR_EOF) { if (audio_input_file->mode == LIVE_MEDIA && audio_input_file->no_loop == 0) { av_seek_frame(audio_input_file->av_fmt_ctx, audio_input_file->astream_idx, 0, 0); continue; } /* Flush decoder */ packet.data = NULL; packet.size = 0; #ifndef FF_API_AVFRAME_LAVC avcodec_get_frame_defaults(audio_input_data->aframe); #else av_frame_unref(audio_input_data->aframe); #endif avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet); if (got_frame) { dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf); dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf); audio_data_node = (AudioDataNode*)dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf); audio_data_node->abuf_size = audio_input_data->aframe->linesize[0]; memcpy(audio_data_node->abuf, audio_input_data->aframe->data[0], audio_data_node->abuf_size); dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf); return 0; } dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf); dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf); return -2; } else if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot read audio frame.\n")); continue; } /* Is this a packet from the audio stream? */ if (packet.stream_index == audio_input_file->astream_idx) { /* Set audio frame to default */ #ifndef FF_API_AVFRAME_LAVC avcodec_get_frame_defaults(audio_input_data->aframe); #else av_frame_unref(audio_input_data->aframe); #endif /* Decode audio frame */ if (avcodec_decode_audio4(codec_ctx, audio_input_data->aframe, &got_frame, &packet) < 0) { av_free_packet(&packet); GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error while decoding audio.\n")); dc_producer_end_signal(&audio_input_data->producer, &audio_input_data->circular_buf); dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf); return -1; } if (audio_input_data->aframe->pts != AV_NOPTS_VALUE) audio_input_data->next_pts = audio_input_data->aframe->pts; audio_input_data->next_pts += ((int64_t)AV_TIME_BASE * audio_input_data->aframe->nb_samples) / codec_ctx->sample_rate; /* Did we get an audio frame? */ if (got_frame) { uint8_t **data; int data_size; #ifdef DC_AUDIO_RESAMPLER int num_planes_out; #endif #ifdef GPAC_USE_LIBAV int sample_rate = codec_ctx->sample_rate; int num_channels = codec_ctx->channels; u64 channel_layout = codec_ctx->channel_layout; #else int sample_rate = audio_input_data->aframe->sample_rate; int num_channels = audio_input_data->aframe->channels; u64 channel_layout = audio_input_data->aframe->channel_layout; #endif enum AVSampleFormat sample_format = (enum AVSampleFormat)audio_input_data->aframe->format; Bool resample = (sample_rate != DC_AUDIO_SAMPLE_RATE || num_channels != DC_AUDIO_NUM_CHANNELS || channel_layout != DC_AUDIO_CHANNEL_LAYOUT || sample_format != DC_AUDIO_SAMPLE_FORMAT); /* Resample if needed */ if (resample) { #ifndef DC_AUDIO_RESAMPLER GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Audio resampling is needed at the decoding stage, but not supported by your version of DashCast. Aborting.\n")); exit(1); #else uint8_t **output; if (ensure_resampler(audio_input_file, sample_rate, num_channels, channel_layout, sample_format)) { return -1; } if (resample_audio(audio_input_file, audio_input_data, codec_ctx, &output, &num_planes_out, num_channels, sample_format)) { return -1; } else { data = output; av_samples_get_buffer_size(&data_size, num_channels, audio_input_data->aframe->nb_samples, sample_format, 0); } #endif } else { /*no resampling needed: read data from the AVFrame*/ data = audio_input_data->aframe->extended_data; data_size = audio_input_data->aframe->linesize[0]; } assert(!av_sample_fmt_is_planar(DC_AUDIO_SAMPLE_FORMAT)); av_fifo_generic_write(audio_input_file->fifo, data[0], data_size, NULL); if (/*audio_input_file->circular_buf.mode == OFFLINE*/audio_input_file->mode == ON_DEMAND || audio_input_file->mode == LIVE_MEDIA) { dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf); /* Unlock the previous node in the circular buffer. */ dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf); /* Get the pointer of the current node in circular buffer. */ audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf); audio_data_node->channels = DC_AUDIO_NUM_CHANNELS; audio_data_node->channel_layout = DC_AUDIO_CHANNEL_LAYOUT; audio_data_node->sample_rate = DC_AUDIO_SAMPLE_RATE; audio_data_node->format = DC_AUDIO_SAMPLE_FORMAT; audio_data_node->abuf_size = data_size; av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL); dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf); } else { while (av_fifo_size(audio_input_file->fifo) >= LIVE_FRAME_SIZE) { /* Lock the current node in the circular buffer. */ if (dc_producer_lock(&audio_input_data->producer, &audio_input_data->circular_buf) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[dashcast] Live system dropped an audio frame\n")); continue; } /* Unlock the previous node in the circular buffer. */ dc_producer_unlock_previous(&audio_input_data->producer, &audio_input_data->circular_buf); /* Get the pointer of the current node in circular buffer. */ audio_data_node = (AudioDataNode *) dc_producer_produce(&audio_input_data->producer, &audio_input_data->circular_buf); audio_data_node->abuf_size = LIVE_FRAME_SIZE; av_fifo_generic_read(audio_input_file->fifo, audio_data_node->abuf, audio_data_node->abuf_size, NULL); dc_producer_advance(&audio_input_data->producer, &audio_input_data->circular_buf); } } #ifdef DC_AUDIO_RESAMPLER if (resample) { int i; for (i=0; i<num_planes_out; ++i) { av_free(data[i]); } av_free(data); } #endif return 0; } } /* * Free the packet that was allocated by av_read_frame */ av_free_packet(&packet); } GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Unknown error while reading audio frame.\n")); return -1; }