Esempio n. 1
0
int main(void)
{

	int frame = 0, ret = 0, got_picture = 0, frameFinished = 0, videoStream = 0, check_yuv = 0;
	int frame_size = 0, bitrate = 0;
	int streamIdx = 0;
	unsigned i = 0;
	enum AVMediaType mediaType;
	struct SwsContext *sws_ctx = NULL;
	AVStream *video_st = NULL;
	AVCodecContext    *pCodecCtx = NULL, *ctxEncode = NULL;
	AVFrame           *pFrame = NULL;
	AVPacket          input_pkt, output_pkt;

	check_yuv = check_file();

	// Register all formats and codecs
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	init_parameter(&input_pkt, &output_pkt); //init parameter function
	pictureEncoded_init();

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(inFmtCtx->streams[streamIdx]->codec->width, inFmtCtx->streams[streamIdx]->codec->height, inFmtCtx->streams[streamIdx]->codec->pix_fmt, clip_width, clip_height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

	while (av_read_frame(inFmtCtx, &input_pkt) >= 0) {

		streamIdx = input_pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;

		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		pFrame = av_frame_alloc();

		if (!pFrame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&input_pkt, inFmtCtx->streams[videoStream]->time_base, inFmtCtx->streams[streamIdx]->codec->time_base);


		if (mediaType == AVMEDIA_TYPE_VIDEO){


			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, pFrame, &frameFinished, &input_pkt);       // Decode video frame (input_pkt-> pFrame)

			if (ret < 0)
			{
				av_frame_free(&pFrame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}
			 

			if (frameFinished){

				frame_num++;

				sws_scale(sws_ctx, (const uint8_t * const *)pFrame->data, pFrame->linesize, 0, clip_height, pictureEncoded->data, pictureEncoded->linesize);

				pictureEncoded->pts = av_frame_get_best_effort_timestamp(pFrame);

				//pictureEncoded-> output_pkt
				//avcodec_encode_video2(ctxEncode, &output_pkt, pictureEncoded, &got_picture);
				avcodec_encode_video2(ofmt_ctx->streams[streamIdx]->codec, &output_pkt, pictureEncoded, &got_picture);

				av_frame_free(&pFrame);

				//if the function is working
				if (got_picture){

					//printf("Encoding %d \n", frame_use);

					frame_use++;


					av_packet_rescale_ts(&output_pkt, ofmt_ctx->streams[streamIdx]->codec->time_base, ofmt_ctx->streams[streamIdx]->time_base);

					//av_packet_rescale_ts(&output_pkt, ctxEncode->time_base, video_st->time_base);

					ret = av_interleaved_write_frame(ofmt_ctx, &output_pkt);

					if (ret < 0) {
						fprintf(stderr, "Error muxing packet\n");
						break;
					}
				}
			}

			av_free_packet(&input_pkt);
			av_free_packet(&output_pkt);

		}

	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {

			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}

	printf("\n\n total frame_num : %d , frame_encode:  %d \n", frame_num - 1, frame_use - 1);


	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */
	av_write_trailer(ofmt_ctx);

	// Free the YUV frame
	av_frame_free(&pFrame);
	av_frame_free(&pictureEncoded);

	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		avcodec_close(inFmtCtx->streams[i]->codec);
		if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
			avcodec_close(ofmt_ctx->streams[i]->codec);
	}

	avformat_close_input(&inFmtCtx);

	if (ofmt_ctx&& !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_closep(&ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred \n");

	system("pause");
	return 0;
}
Esempio n. 2
0
QList<QbPacket> VideoStream::readPackets(AVPacket *packet)
{
    QList<QbPacket> packets;

    if (!this->isValid())
        return packets;

    AVFrame iFrame;
    avcodec_get_frame_defaults(&iFrame);

    int gotFrame;

    avcodec_decode_video2(this->codecContext(),
                          &iFrame,
                          &gotFrame,
                          packet);

    if (!gotFrame)
        return packets;

    int frameSize = avpicture_get_size(this->codecContext()->pix_fmt,
                                       this->codecContext()->width,
                                       this->codecContext()->height);

    QSharedPointer<uchar> oBuffer(new uchar[frameSize]);

    if (!oBuffer)
        return packets;

    static bool sync;

    if (this->m_fst)
    {
        sync = av_frame_get_best_effort_timestamp(&iFrame)? false: true;
        this->m_pts = 0;
        this->m_duration = this->fps().invert().value() * this->timeBase().invert().value();
        this->m_fst = false;
    }
    else
        this->m_pts += this->m_duration;

    avpicture_layout((AVPicture *) &iFrame,
                     this->codecContext()->pix_fmt,
                     this->codecContext()->width,
                     this->codecContext()->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    QbCaps caps = this->caps();
    caps.setProperty("sync", sync);

    QbPacket oPacket(caps,
                     oBuffer,
                     frameSize);

    oPacket.setPts(this->m_pts);
    oPacket.setDuration(this->m_duration);
    oPacket.setTimeBase(this->timeBase());
    oPacket.setIndex(this->index());

    packets << oPacket;

    return packets;
}
Esempio n. 3
0
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	if (!m_pFormatCtx)
		return false;
	if (!m_pCodecCtx)
		return false;
	if ((!m_pFrame)||(!m_pFrameRGB))
		return false;

	updateSwsFormat(videoPixelMode);
	// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
	// Update the linesize for the new format too.  We started with the largest size, so it should fit.
	m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;

	AVPacket packet;
	av_init_packet(&packet);
	int frameFinished;
	bool bGetFrame = false;
	while (!bGetFrame) {
		bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
		// Even if we've read all frames, some may have been re-ordered frames at the end.
		// Still need to decode those, so keep calling avcodec_decode_video2().
		if (dataEnd || packet.stream_index == m_videoStream) {
			// avcodec_decode_video2() gives us the re-ordered frames with a NULL packet.
			if (dataEnd)
				av_free_packet(&packet);

			int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
			if (frameFinished) {
				if (!skipFrame) {
					sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
						m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
				}

				if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE)
					m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp;
				else
					m_videopts += av_frame_get_pkt_duration(m_pFrame);
				bGetFrame = true;
			}
			if (result <= 0 && dataEnd) {
				// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
				// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
				m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
				if (m_isVideoEnd)
					m_decodingsize = 0;
				break;
			}
		}
		av_free_packet(&packet);
	}
	return bGetFrame;
#else
	// If video engine is not available, just add to the timestamp at least.
	m_videopts += 3003;
	return true;
#endif // USE_FFMPEG
}
Esempio n. 4
0
int main(int argc, char **argv){	
	struct 				tm start_time_tm;
	int outputPorts;

	pthread_t *audioThreads;
	pthread_attr_t custom_sched_attr;	
	int fifo_max_prio = 0;
	int fifo_min_prio = 0;
	int fifo_mid_prio = 0;	
	struct sched_param fifo_param;

	syncbuffer = 0;
	normalbuffer = 0;

	if(argc < 3){
		printf("./<audio_decoder> udp://[IP]:[PORT] [ptsDelay] [Amount of channel] [Channel 0] [Channel n]\n");
		return 0;
	}

	if(argc != 3){

	}

	
	ff_ctx = malloc(sizeof(ff_ctx_t));

	av_register_all();
	avformat_network_init();

	InitFF(ff_ctx, argv[1], argv[2]);
	

	if (avformat_open_input (&ff_ctx->avInputCtx, ff_ctx->udp_address, NULL , &ff_ctx->avDic) != 0) {
		printf ("Cloud not open UDP input stream at %s\n", ff_ctx->udp_address);
		return -1;
	}

	if (avformat_find_stream_info(ff_ctx->avInputCtx, NULL) < 0) {
		printf ("Cloud not get stream info\n");
		return -1;
	}

	if (ff_ctx->audioIndexStream = av_find_best_stream(ff_ctx->avInputCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_ctx->avCodec, 0) < 0) {
		printf ("No audio streams found\n");
		return -1;
	}

	printf ("Audio stream found at %d\n", ff_ctx->audioIndexStream);

	ff_ctx->avDicentry = av_dict_get(ff_ctx->avInputCtx->metadata, "service_name", NULL, 0);

	if(ff_ctx->avDicentry != NULL){
		strptime( ff_ctx->avDicentry->value, "%Y-%m-%d %H:%M:%S", &start_time_tm);
		start_time = mktime(&start_time_tm);
	}
	else {
		start_time = getSystemTime(NULL);
	}
	
	ff_ctx->avCodecCtx = ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->codec;
	ff_ctx->avCodec = avcodec_find_decoder(ff_ctx->avCodecCtx->codec_id);

	av_dump_format(ff_ctx->avInputCtx, 0, ff_ctx->udp_address, 0);

	if (avcodec_open2 (ff_ctx->avCodecCtx, ff_ctx->avCodec, NULL) < 0) {
		return -1;
	}

	outputPorts = ff_ctx->avCodecCtx->channels;
	InitBF(ff_ctx->avCodecCtx->channels, &to_audio_buffer, TO_AUDIO_BUFFER_SIZE);
	InitBF(ff_ctx->avCodecCtx->channels, &to_jack_buffer, TO_JACK_BUFFER_SIZE);

	//One thread for each channel
	audioThreads = malloc (sizeof(pthread_t)*outputPorts);

	pthread_attr_init(&custom_sched_attr);	
 	pthread_attr_setinheritsched(&custom_sched_attr, PTHREAD_INHERIT_SCHED /* PTHREAD_EXPLICIT_SCHED */);

 	//Options below only are applied when PTHREAD_EXPLICIT_SCHED is used!
 	pthread_attr_setscope(&custom_sched_attr, PTHREAD_SCOPE_SYSTEM );	
 	pthread_attr_setschedpolicy(&custom_sched_attr, SCHED_FIFO);	

 	fifo_max_prio = sched_get_priority_max(SCHED_FIFO);	
 	fifo_min_prio = sched_get_priority_min(SCHED_FIFO);	
 	fifo_mid_prio = (fifo_min_prio + fifo_max_prio) / 2;	
 	fifo_param.sched_priority = fifo_mid_prio;	
 	pthread_attr_setschedparam(&custom_sched_attr, &fifo_param);

 	int i;
 	threadArgs_t args[outputPorts];
 	for (i = 0; i < outputPorts; i++) {
 		args[i].channel = i;
 		args[i].process_block_size = AUDIO_PROCESS_BLOCK_SIZE;
 		if (pthread_create(&audioThreads[i], &custom_sched_attr, audioThreadFunction, &args[i])) {
 			printf ("Unable to create audio_thread %d\n", i);
 			return 0;
 		}
 	}
    
    av_init_packet(&ff_ctx->avPacket);

	static AVFrame frame;
	int frameFinished;
	int nb, ch;

	char samplebuf[30];
	av_get_sample_fmt_string (samplebuf, 30, ff_ctx->avCodecCtx->sample_fmt);
	printf ("Audio sample format is %s\n", samplebuf);

	audio_sync_sample_t **sync_samples;
	sync_samples = malloc (outputPorts*sizeof(audio_sync_sample_t*));

	long double initPTS, PTS, frame_pts_offset;
	unsigned long int frame_count, framePTS, sample_count;

	int sample_rate = ff_ctx->avCodecCtx->sample_rate;

	if (init_jack(&jackCtx, outputPorts)) {
		return 1;
	}

	while(av_read_frame (ff_ctx->avInputCtx, &ff_ctx->avPacket)>=0) {

		if(ff_ctx->avPacket.stream_index == ff_ctx->audioIndexStream ) {
			int contador = 0;
			long double time_1 = getSystemTime(NULL);

			int len = avcodec_decode_audio4 (ff_ctx->avCodecCtx, &frame, &frameFinished, &ff_ctx->avPacket);

			if (frameFinished) {
				int data_size = frame.nb_samples * av_get_bytes_per_sample(frame.format);
				int sync_size = frame.nb_samples * sizeof (audio_sync_sample_t);

				framePTS = av_frame_get_best_effort_timestamp (&frame);

				frame_count = framePTS - ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->start_time;
				frame_pts_offset = frame_count * av_q2d(ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base) ;

				initPTS = start_time + frame_pts_offset + ff_ctx->ptsDelay;

#ifdef _DBG_PTS
				printf ("frame decoded PTS %lu, frame count %lu, TB %d/%d, PTS %Lf\n", framePTS, frame_count, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.num, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.den, initPTS);
#endif

				//Build sync info data, sample timing
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					sync_samples[ch] =  malloc(sync_size);

					PTS = initPTS;

					for (sample_count = 0; sample_count < frame.nb_samples; sample_count++) {
						PTS += (1/(float) sample_rate);
						sync_samples[ch][sample_count].samplePTS = PTS;
					}
				}

#ifdef _DBG_PTS
				printf ("ended samples PTS %Lf\n", PTS);
#endif
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					ProduceSyncToBuffer (&to_audio_buffer, ch, (uint8_t*) sync_samples[ch], sync_size);
					ProduceAudioToBuffer(&to_audio_buffer, ch, (uint8_t*) frame.extended_data[ch], data_size);

					free(sync_samples[ch]);
				}
			}

	       	long double time_2 = getSystemTime(NULL);
	       	adaptativeSleep( (1/READ_INPUT_FRAME_RATE) - (time_2 - time_1));
		}
	}
}
Esempio n. 5
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();
    int got_frame;

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file\n", argv[0]);
        exit(1);
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
            got_frame = 0;
            ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
                break;
            }

            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);

                /* push the decoded frame into the filtergraph */
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                /* pull filtered frames from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;
                    display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
                    av_frame_unref(filt_frame);
                }
                av_frame_unref(frame);
            }
        }
        av_packet_unref(&packet);
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_close(dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
Esempio n. 6
0
void loop() {
  int64_t dts_shift = AV_NOPTS_VALUE;

  uint32_t buf_size = 10240;
  char *buf = (char *)malloc(buf_size);
  while(1) {
    uint32_t len;
    int idx = 0;
    int read_bytes = 0;
    if((read_bytes = read1(in_fd, &len, 4)) != 4) {
      if(read_bytes == 0) {
        _exit(0);
      }
      error("Can't read input length: %d", read_bytes);
    }
    len = ntohl(len);
    if(len > buf_size) {
      buf_size = len;
      free(buf);
      buf = (char *)malloc(buf_size);
    }

    if((read_bytes = read1(in_fd, buf, len)) != len) error("Can't read %d bytes from input: %d", len, read_bytes);
    int version = 0;
    ei_decode_version(buf, &idx, &version);
    int command_idx = idx;

    int arity = 0;
    if(ei_decode_tuple_header(buf, &idx, &arity) == -1) error("must pass tuple");


    int t = 0;
    int size = 0;
    ei_get_type(buf, &idx, &t, &size);
    if(t != ERL_ATOM_EXT) error("first element must be atom");
    char command[MAXATOMLEN+1];
    ei_decode_atom(buf, &idx, command); arity--;


    if(!strcmp(command, "ping")) {
      pong();
      continue;
    }
    if(!strcmp(command, "exit")) {
      return;
    }
    if(!strcmp(command, "init_input")) {
      if(arity != 3) error("Must provide 3 arguments to init_input command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      int decoder_config_len = 0;
      ei_get_type(buf, &idx, &t, &decoder_config_len);
      if(t != ERL_BINARY_EXT) error("decoder config must be a binary");
      uint8_t *decoder_config = av_mallocz(decoder_config_len + FF_INPUT_BUFFER_PADDING_SIZE);
      long bin_len = 0;
      ei_decode_binary(buf, &idx, decoder_config, &bin_len);

      Track *t = NULL;
      if(!strcmp(content, "video")) {
        t = &input_video;
      } else if(!strcmp(content, "audio")) {
        t = &input_audio;
      } else {
        error("Unknown media content: '%s'", content);
      }
      if(t->codec) error("Double initialization of media '%s'", content);

      t->codec = avcodec_find_decoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) 
        error("Unknown %s decoder '%s'", content, codec);
      t->ctx->time_base = (AVRational){1, 90};
      t->ctx->extradata_size = decoder_config_len;
      t->ctx->extradata = decoder_config;
      if(avcodec_open2(t->ctx, t->codec, NULL) < 0) 
        error("failed to allocate %s decoder", content);

      reply_atom("ready");
      continue;
    }

    if(!strcmp(command, "init_output")) {
      if(arity != 4) error("Must provide 4 arguments to init_output command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      long track_id = -1;
      if(ei_decode_long(buf, &idx, &track_id) == -1) error("track_id must be integer");
      if(track_id < 1 || track_id > MAX_OUTPUT_TRACKS+1) error("track_id must be from 1 to %d", MAX_OUTPUT_TRACKS+1);
      track_id--;

      Track *t = NULL;
      if(!strcmp(content, "audio")) {
        t = &output_audio[out_audio_count++];
      } else if(!strcmp(content, "video")) {
        t = &output_video[out_video_count++];
      } else {
        error("invalid_content '%s'", content);
      }
      t->track_id = track_id;

      t->codec = avcodec_find_encoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) error("Unknown encoder '%s'", codec);

      AVCodecContext* ctx = t->ctx;
      AVDictionary *opts = NULL;


      int options_count = 0;
      if(ei_decode_list_header(buf, &idx, &options_count) < 0) error("options must be a proplist");
      while(options_count > 0) {
        int arity1 = 0;

        int t,s;
        ei_get_type(buf, &idx, &t, &s);
        if(t == ERL_NIL_EXT) {
          ei_skip_term(buf, &idx);
          break;
        }

        if(ei_decode_tuple_header(buf, &idx, &arity1) < 0) error("options must be a proper proplist");
        if(arity1 != 2) error("tuples in options proplist must be arity 2");

        char key[MAXATOMLEN];
        if(ei_decode_atom(buf, &idx, key) == 0) {

          if(!strcmp(key, "width")) {
            long w = 0;
            if(ei_decode_long(buf, &idx, &w) < 0) error("width must be integer");
            ctx->width = w;
            continue;
          }

          if(!strcmp(key, "height")) {
            long h = 0;
            if(ei_decode_long(buf, &idx, &h) < 0) error("height must be integer");
            ctx->height = h;
            continue;
          }

          if(!strcmp(key, "bitrate")) {
            long b = 0;
            if(ei_decode_long(buf, &idx, &b) < 0) error("bitrate must be integer");
            ctx->bit_rate = b;
            continue;
          }

          if(!strcmp(key, "sample_rate")) {
            long sr = 0;
            if(ei_decode_long(buf, &idx, &sr) < 0) error("sample_rate must be integer");
            ctx->sample_rate = sr;
            continue;
          }

          if(!strcmp(key, "channels")) {
            long ch = 0;
            if(ei_decode_long(buf, &idx, &ch) < 0) error("channels must be integer");
            ctx->channels = ch;
            continue;
          }

          fprintf(stderr, "Unknown key: '%s'\r\n", key);
          ei_skip_term(buf, &idx);
          continue;
        } else if(ei_decode_string(buf, &idx, key) == 0) {
          char value[MAXATOMLEN];
          if(ei_decode_string(buf, &idx, value) < 0) error("key-value must be strings");
          av_dict_set(&opts, key, value, 0);
        } else {
          error("Invalid options proplist");
        }
      }

      if(!strcmp(content, "video")) {
        ctx->pix_fmt = AV_PIX_FMT_YUV420P;
      }
      if(!strcmp(content, "audio")) {
        ctx->sample_fmt = AV_SAMPLE_FMT_S16;
        ctx->profile = FF_PROFILE_AAC_MAIN;
      }
      ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
      ctx->time_base = (AVRational){1,90};

      if(avcodec_open2(ctx, t->codec, &opts) < 0) error("failed to allocate video encoder");

      AVPacket config;
      config.dts = config.pts = 0;
      config.flags = CODEC_FLAG_GLOBAL_HEADER;
      config.data = ctx->extradata;
      config.size = ctx->extradata_size;
      reply_avframe(&config, t->codec);      
      continue;
    }

    if(!strcmp(command, "video_frame")) {
      idx = command_idx;
      struct video_frame *fr = read_video_frame(buf, &idx);

      AVPacket packet;
      av_new_packet(&packet, fr->body.size);
      memcpy(packet.data, fr->body.data, fr->body.size);
      packet.size = fr->body.size;
      packet.dts = fr->dts*90;
      packet.pts = fr->pts*90;
      packet.stream_index = fr->track_id;

      // if(packet_size != pkt_size) error("internal error in reading frame body");

      if(fr->content == frame_content_audio) {
        if(!input_audio.ctx) error("input audio uninitialized");

        AVFrame *decoded_frame = avcodec_alloc_frame();
        int got_output = 0;
        int ret = avcodec_decode_audio4(input_audio.ctx, decoded_frame, &got_output, &packet);
        if(got_output) {
          reply_atom("ok");
        } else {
          error("Got: %d, %d\r\n", ret, got_output);
        }
        free(fr);
        continue;
      }

      if(fr->content == frame_content_video) {
        if(!input_video.ctx) error("input video uninitialized");
        AVFrame *decoded_frame = avcodec_alloc_frame();
        int could_decode = 0;
        int ret = avcodec_decode_video2(input_video.ctx, decoded_frame, &could_decode, &packet);
        if(ret < 0) {
          error("failed to decode video");
        }
        if(could_decode) {
          decoded_frame->pts = av_frame_get_best_effort_timestamp(decoded_frame);
          int sent_config = 0;

          AVPacket pkt;
          av_init_packet(&pkt);
          pkt.data = NULL;
          pkt.size = 0;

          int could_encode = 0;

          if(out_video_count <= 0) error("trying to transcode uninitialized video");
          if(avcodec_encode_video2(output_video[0].ctx, &pkt, decoded_frame, &could_encode) != 0) 
            error("Failed to encode h264");

          if(could_encode) {
            if(dts_shift == AV_NOPTS_VALUE) {
              dts_shift = -pkt.dts;
            }
            pkt.dts += dts_shift;
            reply_avframe(&pkt, output_video[0].codec);
          } else if(!sent_config) {
            reply_atom("ok");
          }
          free(fr);
          continue;
        } else {
          reply_atom("ok");
          free(fr);
          continue;
        }
      }

      error("Unknown content");
    }

    // AVCodecContext
    // AVPacket
    // AVFrame



    char *s = (char *)malloc(1024);
    ei_s_print_term(&s, buf, &command_idx);
    error("Unknown command: %s", s);
  }
}
	const CGEAudioFrameBufferData* CGEVideoDecodeHandler::getCurrentAudioFrame()
	{
		if(m_context->pSwrCtx == nullptr)
		{
			if(m_context->pAudioStream->codec->sample_fmt != AV_SAMPLE_FMT_S16)
			{
				m_context->pSwrCtx = swr_alloc();
				if(m_context->pSwrCtx == nullptr)
				{
					CGE_LOG_ERROR("Allocate resampler context failed!\n");
					return nullptr;
				}

				auto ctx = m_context->pSwrCtx;
				auto c = m_context->pAudioStream->codec;

				av_opt_set_int       (ctx, "in_channel_count",   c->channels,       0);
				av_opt_set_int       (ctx, "in_sample_rate",     c->sample_rate,    0);
				av_opt_set_sample_fmt(ctx, "in_sample_fmt",      c->sample_fmt, 0);
				av_opt_set_int       (ctx, "out_channel_count",  1,       0);
				av_opt_set_int       (ctx, "out_sample_rate",    c->sample_rate,    0);
				av_opt_set_sample_fmt(ctx, "out_sample_fmt",     AV_SAMPLE_FMT_S16,     0);

				int ret;

				if ((ret = swr_init(ctx)) < 0)
				{
					CGE_LOG_ERROR("Failed to initialize the resampling context: %d\n", ret);
					return nullptr;
				}

				m_context->maxDstNbSamples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
			10000 : c->frame_size;

				ret = av_samples_alloc_array_and_samples(&m_context->dstSampleData, &m_context->dstSamplesLinesize, c->channels, m_context->maxDstNbSamples, c->sample_fmt, 0);

				if (ret < 0)
				{
					CGE_LOG_ERROR("Could not allocate destination samples\n");
					return nullptr;
				}

				m_context->dstSamplesSize = av_samples_get_buffer_size(NULL, c->channels, m_context->maxDstNbSamples, c->sample_fmt, 0);

			}
			else
			{
				CGE_LOG_ERROR("errorxxxx");
			}
		}

		int ret = swr_convert(m_context->pSwrCtx, m_context->dstSampleData, m_context->dstSamplesSize, (const uint8_t**)m_context->pAudioFrame->data, m_context->pAudioFrame->nb_samples);

		if(ret <= 0)
			return nullptr;

		m_cachedAudioFrame.timestamp = av_frame_get_best_effort_timestamp(m_context->pAudioFrame);
		m_cachedAudioFrame.data = m_context->dstSampleData[0];
		m_cachedAudioFrame.nbSamples = m_context->pAudioFrame->nb_samples;
		m_cachedAudioFrame.channels = 1;//av_frame_get_channels(m_context->pAudioFrame);
		m_cachedAudioFrame.bytesPerSample = 2;
		m_cachedAudioFrame.linesize = m_context->dstSamplesSize;
		m_cachedAudioFrame.format = CGE_SAMPLE_FMT_S16;
		return &m_cachedAudioFrame;
	}
Esempio n. 8
0
u32 adecOpen(AudioDecoder* data)
{
	AudioDecoder& adec = *data;

	adec.adecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU);

	u32 adec_id = cellAdec->GetNewId(data);

	adec.id = adec_id;

	adec.adecCb->SetName("Audio Decoder[" + std::to_string(adec_id) + "] Callback");

	thread t("Audio Decoder[" + std::to_string(adec_id) + "] Thread", [&]()
	{
		cellAdec->Notice("Audio Decoder thread started");

		AdecTask& task = adec.task;

		while (true)
		{
			if (Emu.IsStopped())
			{
				break;
			}

			if (!adec.job.GetCountUnsafe() && adec.is_running)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			/*if (adec.frames.GetCount() >= 50)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}*/

			if (!adec.job.Pop(task))
			{
				break;
			}

			switch (task.type)
			{
			case adecStartSeq:
			{
				// TODO: reset data
				cellAdec->Warning("adecStartSeq:");

				adec.reader.addr = 0;
				adec.reader.size = 0;
				adec.reader.init = false;
				if (adec.reader.rem) free(adec.reader.rem);
				adec.reader.rem = nullptr;
				adec.reader.rem_size = 0;
				adec.is_running = true;
				adec.just_started = true;
			}
			break;

			case adecEndSeq:
			{
				// TODO: finalize
				cellAdec->Warning("adecEndSeq:");

				/*Callback cb;
				cb.SetAddr(adec.cbFunc);
				cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);
				cb.Branch(true); // ???*/
				adec.adecCb->ExecAsCallback(adec.cbFunc, true, adec.id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, adec.cbArg);

				adec.is_running = false;
				adec.just_finished = true;
			}
			break;

			case adecDecodeAu:
			{
				int err = 0;

				adec.reader.addr = task.au.addr;
				adec.reader.size = task.au.size;
				//LOG_NOTICE(HLE, "Audio AU: size = 0x%x, pts = 0x%llx", task.au.size, task.au.pts);

				if (adec.just_started)
				{
					adec.first_pts = task.au.pts;
					adec.last_pts = task.au.pts - 0x10000; // hack
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_calloc(1, size + FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
						//av_free_packet(this);
					}

				} au(0);

				/*{
					wxFile dump;
					dump.Open(wxString::Format("audio pts-0x%llx.dump", task.au.pts), wxFile::write);
					u8* buf = (u8*)malloc(task.au.size);
					if (Memory.CopyToReal(buf, task.au.addr, task.au.size)) dump.Write(buf, task.au.size);
					free(buf);
					dump.Close();
				}*/

				if (adec.just_started && adec.just_finished)
				{
					avcodec_flush_buffers(adec.ctx);
					adec.reader.init = true;
					adec.just_finished = false;
					adec.just_started = false;
				}
				else if (adec.just_started) // deferred initialization
				{
					err = avformat_open_input(&adec.fmt, NULL, av_find_input_format("oma"), NULL);
					if (err)
					{
						cellAdec->Error("adecDecodeAu: avformat_open_input() failed");
						Emu.Pause();
						break;
					}
					AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P); // ???
					if (!codec)
					{
						cellAdec->Error("adecDecodeAu: avcodec_find_decoder() failed");
						Emu.Pause();
						break;
					}
					//err = avformat_find_stream_info(adec.fmt, NULL);
					//if (err)
					//{
					//	cellAdec->Error("adecDecodeAu: avformat_find_stream_info() failed");
					//	Emu.Pause();
					//	break;
					//}
					//if (!adec.fmt->nb_streams)
					//{
					//	cellAdec->Error("adecDecodeAu: no stream found");
					//	Emu.Pause();
					//	break;
					//}
					if (!avformat_new_stream(adec.fmt, codec))
					{
						cellAdec->Error("adecDecodeAu: avformat_new_stream() failed");
						Emu.Pause();
						break;
					}
					adec.ctx = adec.fmt->streams[0]->codec; // TODO: check data

					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(adec.ctx, codec, &opts);
					}
					if (err)
					{
						cellAdec->Error("adecDecodeAu: avcodec_open2() failed");
						Emu.Pause();
						break;
					}
					adec.just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped())
					{
						cellAdec->Warning("adecDecodeAu: aborted");
						return;
					}

					/*if (!adec.ctx) // fake
					{
						AdecFrame frame;
						frame.pts = task.au.pts;
						frame.auAddr = task.au.addr;
						frame.auSize = task.au.size;
						frame.userdata = task.au.userdata;
						frame.size = 4096;
						frame.data = nullptr;
						adec.frames.Push(frame);

						adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);

						break;
					}*/

					last_frame = av_read_frame(adec.fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct AdecFrameHolder : AdecFrame
					{
						AdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~AdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						cellAdec->Error("adecDecodeAu: av_frame_alloc() failed");
						Emu.Pause();
						break;
					}

					int got_frame = 0;

					int decode = avcodec_decode_audio4(adec.ctx, frame.data, &got_frame, &au);

					if (decode <= 0)
					{
						if (!last_frame && decode < 0)
						{
							cellAdec->Error("adecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_frame && adec.reader.size == 0) break;
					}

					if (got_frame)
					{
						u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						if (ts != AV_NOPTS_VALUE)
						{
							frame.pts = ts/* - adec.first_pts*/;
							adec.last_pts = frame.pts;
						}
						else
						{
							adec.last_pts += ((u64)frame.data->nb_samples) * 90000 / 48000;
							frame.pts = adec.last_pts;
						}
						//frame.pts = adec.last_pts;
						//adec.last_pts += ((u64)frame.data->nb_samples) * 90000 / 48000; // ???
						frame.auAddr = task.au.addr;
						frame.auSize = task.au.size;
						frame.userdata = task.au.userdata;
						frame.size = frame.data->nb_samples * frame.data->channels * sizeof(float);

						if (frame.data->format != AV_SAMPLE_FMT_FLTP)
						{
							cellAdec->Error("adecDecodeaAu: unsupported frame format(%d)", frame.data->format);
							Emu.Pause();
							break;
						}
						if (frame.data->channels != 2)
						{
							cellAdec->Error("adecDecodeAu: unsupported channel count (%d)", frame.data->channels);
							Emu.Pause();
							break;
						}

						//LOG_NOTICE(HLE, "got audio frame (pts=0x%llx, nb_samples=%d, ch=%d, sample_rate=%d, nbps=%d)",
							//frame.pts, frame.data->nb_samples, frame.data->channels, frame.data->sample_rate,
							//av_get_bytes_per_sample((AVSampleFormat)frame.data->format));

						adec.frames.Push(frame);
						frame.data = nullptr; // to prevent destruction

						/*Callback cb;
						cb.SetAddr(adec.cbFunc);
						cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
						cb.Branch(false);*/
						adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, adec.cbArg);
					}
				}
						
				/*Callback cb;
				cb.SetAddr(adec.cbFunc);
				cb.Handle(adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
				cb.Branch(false);*/
				adec.adecCb->ExecAsCallback(adec.cbFunc, false, adec.id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, adec.cbArg);
			}
			break;

			case adecClose:
			{
				adec.is_finished = true;
				cellAdec->Notice("Audio Decoder thread ended");
				return;
			}

			default:
				cellAdec->Error("Audio Decoder thread error: unknown task(%d)", task.type);
			}
		}
		adec.is_finished = true;
		cellAdec->Warning("Audio Decoder thread aborted");
	});

	t.detach();

	return adec_id;
}
Esempio n. 9
0
int main()
{
	int ret=0, check_yuv = 0;
	AVPacket pkt;
	AVFrame *frame = NULL;
	enum AVMediaType mediaType;
	unsigned int streamIdx;
	unsigned int i;
	int gotFrame;

	check_yuv = check_file();

	//inititialize all the registers
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	//initialize packet, set data to NULL
	av_init_packet(&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	//read all packets
	while (av_read_frame(inFmtCtx, &pkt) >= 0)
	{
		streamIdx = pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;
		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		frame = av_frame_alloc();

		if (!frame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&pkt,inFmtCtx->streams[streamIdx]->time_base,	inFmtCtx->streams[streamIdx]->codec->time_base);

		if (mediaType == AVMEDIA_TYPE_VIDEO) {
			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, frame, &gotFrame, &pkt);

			if (ret < 0)
			{
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}
		}

		if (gotFrame)
		{
			frame->pts = av_frame_get_best_effort_timestamp(frame);
			ret = encode_write_frame(frame, streamIdx, &gotFrame);
			//av_frame_free(&frame);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Error Encoding Frame");
				exit(1);
			}
		}
		else av_frame_free(&frame);

		av_free_packet(&pkt);
	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}


	//free all
	av_write_trailer(outFmtCtx);
	av_free_packet(&pkt);
	//av_frame_free(&frame);
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		avcodec_close(inFmtCtx->streams[i]->codec);
		if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec)
			avcodec_close(outFmtCtx->streams[i]->codec);
	}
	avformat_close_input(&inFmtCtx);
	if (outFmtCtx && !(outFmtCtx->oformat->flags & AVFMT_NOFILE))
		avio_closep(&outFmtCtx->pb);
	avformat_free_context(outFmtCtx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred \n");

	return 0;


}
Esempio n. 10
0
size_t decode_video(PlayerCtx *ctx, AVPacket *packet, int new_packet, int32_t seekid)
{
	int decoded;
	int got_frame;

	if (!new_packet)
		fprintf(stderr, "warn: multi-frame video packets, pts might be inaccurate\n");

	ctx->v_pkt_pts = packet->pts;

	ctx->v_frame = avcodec_alloc_frame();
	decoded = avcodec_decode_video2(ctx->v_codec_ctx, ctx->v_frame, &got_frame, packet);
	if (decoded < 0) {
		fprintf(stderr, "Error while decoding video frame\n");
		decoded = packet->size;
		goto fail;
	}
	if (!got_frame)
		goto fail;


	// The pts magic guesswork
	int64_t pts = AV_NOPTS_VALUE;
	int64_t frame_pts = AV_NOPTS_VALUE;
	frame_pts = av_frame_get_best_effort_timestamp(ctx->v_frame);

	if (packet->dts != AV_NOPTS_VALUE) {
		ctx->v_faulty_dts += packet->dts <= ctx->v_last_dts;
		ctx->v_last_dts = packet->dts;
	}
	if (frame_pts != AV_NOPTS_VALUE) {
		ctx->v_faulty_pts += frame_pts <= ctx->v_last_pts;
		ctx->v_last_pts = frame_pts;
	}
	if ((ctx->v_faulty_pts <= ctx->v_faulty_dts || packet->dts == AV_NOPTS_VALUE)
		&& frame_pts != AV_NOPTS_VALUE)
		pts = frame_pts;
	else
		pts = packet->dts;

	if (pts == AV_NOPTS_VALUE) {
		if (ctx->v_last_pts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_pts++;
		} else if (ctx->v_last_dts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_dts++;
		}
	}

	if (pts == AV_NOPTS_VALUE) {
		if (ctx->v_last_pts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_pts++;
		} else if (ctx->v_last_dts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_dts++;
		} else {
			pts = 0;
		}
	}

	pthread_mutex_lock(&ctx->v_buf_mutex);
	while (((ctx->v_buf_put + 1) % ctx->v_buf_len) == ctx->v_buf_get) {
		printf("Wait for space in video buffer\n");
		pthread_cond_wait(&ctx->v_buf_not_full, &ctx->v_buf_mutex);
	}
	pthread_mutex_unlock(&ctx->v_buf_mutex);

	VideoFrame *frame = ctx->v_bufs[ctx->v_buf_put];
	if (!frame) {
		frame = malloc(sizeof(VideoFrame));
		frame->stride = ctx->v_frame->linesize[0];
		frame->data_size = frame->stride * ctx->height;
		frame->data = malloc(frame->data_size);
		ctx->v_bufs[ctx->v_buf_put] = frame;
	}

	if (frame->stride != ctx->v_frame->linesize[0]) {
		fprintf(stderr, "stride mismatch: %d != %d\n", (int)frame->stride, ctx->v_frame->linesize[0]);
		goto fail;
	}
	fprintf(stderr, "pix fmt: %d\n", ctx->v_codec_ctx->pix_fmt);

	if (!ctx->v_sws_ctx) {
		ctx->v_sws_ctx = sws_getContext(ctx->width, ctx->height, ctx->v_codec_ctx->pix_fmt,
										ctx->width, ctx->height, PIX_FMT_GRAY8, SWS_BICUBIC,
										NULL, NULL, NULL);
	}

	AVPicture pict;
	pict.data[0] = frame->data;
	pict.linesize[0] = frame->stride;

	sws_scale(ctx->v_sws_ctx, (const uint8_t* const*)ctx->v_frame->data, ctx->v_frame->linesize, 0, ctx->height, pict.data, pict.linesize);

	frame->pts = av_q2d(ctx->v_stream->time_base) * pts;
	frame->seekid = seekid;

	printf("Put frame %d (pts:%f seekid:%d)\n", ctx->v_buf_put, frame->pts, seekid);
	pthread_mutex_lock(&ctx->v_buf_mutex);
	if (++ctx->v_buf_put == ctx->v_buf_len)
		ctx->v_buf_put = 0;
	pthread_cond_signal(&ctx->v_buf_not_empty);
	pthread_mutex_unlock(&ctx->v_buf_mutex);

fail:
	avcodec_free_frame(&ctx->v_frame);
	ctx->v_frame = NULL;
	return decoded;
}
Esempio n. 11
0
bool CDVDVideoCodecFFmpeg::GetPictureCommon(DVDVideoPicture* pDvdVideoPicture)
{
  if (!m_pFrame)
    return false;

  pDvdVideoPicture->iWidth = m_pFrame->width;
  pDvdVideoPicture->iHeight = m_pFrame->height;

  /* crop of 10 pixels if demuxer asked it */
  if(m_pCodecContext->coded_width  && m_pCodecContext->coded_width  < (int)pDvdVideoPicture->iWidth
                                   && m_pCodecContext->coded_width  > (int)pDvdVideoPicture->iWidth  - 10)
    pDvdVideoPicture->iWidth = m_pCodecContext->coded_width;

  if(m_pCodecContext->coded_height && m_pCodecContext->coded_height < (int)pDvdVideoPicture->iHeight
                                   && m_pCodecContext->coded_height > (int)pDvdVideoPicture->iHeight - 10)
    pDvdVideoPicture->iHeight = m_pCodecContext->coded_height;

  double aspect_ratio;

  /* use variable in the frame */
  AVRational pixel_aspect = m_pFrame->sample_aspect_ratio;

  if (pixel_aspect.num == 0)
    aspect_ratio = 0;
  else
    aspect_ratio = av_q2d(pixel_aspect) * pDvdVideoPicture->iWidth / pDvdVideoPicture->iHeight;

  if (aspect_ratio <= 0.0)
    aspect_ratio = (float)pDvdVideoPicture->iWidth / (float)pDvdVideoPicture->iHeight;

  /* XXX: we suppose the screen has a 1.0 pixel ratio */ // CDVDVideo will compensate it.
  pDvdVideoPicture->iDisplayHeight = pDvdVideoPicture->iHeight;
  pDvdVideoPicture->iDisplayWidth  = ((int)RINT(pDvdVideoPicture->iHeight * aspect_ratio)) & -3;
  if (pDvdVideoPicture->iDisplayWidth > pDvdVideoPicture->iWidth)
  {
    pDvdVideoPicture->iDisplayWidth  = pDvdVideoPicture->iWidth;
    pDvdVideoPicture->iDisplayHeight = ((int)RINT(pDvdVideoPicture->iWidth / aspect_ratio)) & -3;
  }


  pDvdVideoPicture->pts = DVD_NOPTS_VALUE;

  AVDictionaryEntry * entry = av_dict_get(av_frame_get_metadata(m_pFrame), "stereo_mode", NULL, 0);
  if(entry && entry->value)
  {
    strncpy(pDvdVideoPicture->stereo_mode, (const char*)entry->value, sizeof(pDvdVideoPicture->stereo_mode));
    pDvdVideoPicture->stereo_mode[sizeof(pDvdVideoPicture->stereo_mode)-1] = '\0';
  }

  pDvdVideoPicture->iRepeatPicture = 0.5 * m_pFrame->repeat_pict;
  pDvdVideoPicture->iFlags = DVP_FLAG_ALLOCATED;
  pDvdVideoPicture->iFlags |= m_pFrame->interlaced_frame ? DVP_FLAG_INTERLACED : 0;
  pDvdVideoPicture->iFlags |= m_pFrame->top_field_first ? DVP_FLAG_TOP_FIELD_FIRST: 0;

  if (m_codecControlFlags & DVD_CODEC_CTRL_DROP)
    pDvdVideoPicture->iFlags |= DVP_FLAG_DROPPED;

  pDvdVideoPicture->chroma_position = m_pCodecContext->chroma_sample_location;
  pDvdVideoPicture->color_primaries = m_pCodecContext->color_primaries;
  pDvdVideoPicture->color_transfer = m_pCodecContext->color_trc;
  pDvdVideoPicture->color_matrix = m_pCodecContext->colorspace;
  if(m_pCodecContext->color_range == AVCOL_RANGE_JPEG
  || m_pCodecContext->pix_fmt     == AV_PIX_FMT_YUVJ420P)
    pDvdVideoPicture->color_range = 1;
  else
    pDvdVideoPicture->color_range = 0;

  int qscale_type;
  pDvdVideoPicture->qp_table = av_frame_get_qp_table(m_pFrame, &pDvdVideoPicture->qstride, &qscale_type);

  switch (qscale_type)
  {
  case FF_QSCALE_TYPE_MPEG1:
    pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG1;
    break;
  case FF_QSCALE_TYPE_MPEG2:
    pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG2;
    break;
  case FF_QSCALE_TYPE_H264:
    pDvdVideoPicture->qscale_type = DVP_QSCALE_H264;
    break;
  default:
    pDvdVideoPicture->qscale_type = DVP_QSCALE_UNKNOWN;
  }

  if (pDvdVideoPicture->iRepeatPicture)
    pDvdVideoPicture->dts = DVD_NOPTS_VALUE;
  else
    pDvdVideoPicture->dts = m_dts;

  m_dts = DVD_NOPTS_VALUE;

  int64_t bpts = av_frame_get_best_effort_timestamp(m_pFrame);
  if (bpts != AV_NOPTS_VALUE)
  {
    pDvdVideoPicture->pts = (double)bpts * DVD_TIME_BASE / AV_TIME_BASE;
    if (pDvdVideoPicture->pts == m_decoderPts)
    {
      pDvdVideoPicture->iRepeatPicture = -0.5;
      pDvdVideoPicture->pts = DVD_NOPTS_VALUE;
      pDvdVideoPicture->dts = DVD_NOPTS_VALUE;
    }
  }
  else
    pDvdVideoPicture->pts = DVD_NOPTS_VALUE;

  if (pDvdVideoPicture->pts != DVD_NOPTS_VALUE)
    m_decoderPts = pDvdVideoPicture->pts;

  if (m_requestSkipDeint)
  {
    pDvdVideoPicture->iFlags |= DVD_CODEC_CTRL_SKIPDEINT;
    m_skippedDeint++;
  }

  m_requestSkipDeint = false;
  pDvdVideoPicture->iFlags |= m_codecControlFlags;

  if (!m_started)
    pDvdVideoPicture->iFlags |= DVP_FLAG_DROPPED;

  return true;
}
Esempio n. 12
0
JNIEXPORT jlong JNICALL Java_bits_jav_codec_JavFrame_nBestEffortTimestamp
(JNIEnv* env, jclass clazz, jlong pointer)
{
  AVFrame* frame = *(AVFrame**)&pointer;
  return av_frame_get_best_effort_timestamp( frame );
}
Esempio n. 13
0
int main(int argc, char** argv){
	int quadrant_line, quadrant_column;
	
	char *videoFileName = argv[1];
	char quadFileName[64];

	int i = 0, k, j;

	long unsigned int inc = 0;
	long unsigned int incaudio = 0;

	int videoStreamIndex;
	int audioStreamIndex= -1;
	int frameFinished, gotPacket;

	AVDictionary	*codecOptions = NULL;
	
	UDP_PTSframe_t PTS_frame;

	struct tm *start_time_tm;
	char start_time_str[64];
	long unsigned int start_time;
	time_t start_timer_t;
	
	//Crop env
	int tam_quad;
	int frist = 1, marginLeft = 0, marginTop = 0;
	int width , height;

    if(argc < 4){
        usage();    
        return -1;
    }

    signal (SIGTERM, handlerToFinish);
	signal (SIGINT, handlerToFinish);

    tam_quad = sqrt(amount_of_quadrants);
    quadrant_line = atoi(argv[2]);
    quadrant_column = atoi(argv[3]);
    amount_of_quadrants = (quadrant_line * quadrant_column) + 1;

    strcpy (quadFileName, argv[4]);

    //Allocat output streams context
    ff_output = malloc (sizeof(ff_output_t) * amount_of_quadrants);

	av_register_all();
	avformat_network_init();

	//Initialize Input
	if (avformat_open_input (&ff_input.formatCtx, videoFileName, NULL, NULL) != 0) {
		printf ("Cold not open input video file at %s\n", videoFileName);
		return -1;
	}

	if (avformat_find_stream_info(ff_input.formatCtx, NULL) < 0) {
		printf ("Cold not get stream info\n");
		return -1;
	}

	av_dump_format(ff_input.formatCtx, 0, videoFileName, 0);

	videoStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &ff_input.encoder, 0);
	if (videoStreamIndex < 0) {
		printf ("no video streams found\n");
		return -1;
	}

	audioStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_input.audioencoder, 0);
    if (audioStreamIndex < 0) {
        printf ("no audio streams found\n");
        return -1;
    }
    printf ("VIDEO ST %d, AUDIO ST %d\n", videoStreamIndex, audioStreamIndex);

    ff_input.audiocodecCtx = ff_input.formatCtx->streams[audioStreamIndex]->codec;
	ff_input.codecCtx = ff_input.formatCtx->streams[videoStreamIndex]->codec;

	if (avcodec_open2 (ff_input.audiocodecCtx, ff_input.audioencoder, NULL) < 0) {
        printf ("Could not open input codec\n");
        return -1;
    }

	if (avcodec_open2 (ff_input.codecCtx, ff_input.encoder, NULL) < 0) {
		printf ("Could not open input codec\n");
		return -1;
	}

	//Get system time and append as metadata
	getSystemTime (&PTS_frame.frameTimeVal); //Must be the same for all output contexts
	start_time = PTS_frame.frameTimeVal.tv_sec;
	start_timer_t = (time_t) start_time;
	start_time_tm = localtime (&start_timer_t);
	strftime(start_time_str, sizeof start_time_str, "%Y-%m-%d %H:%M:%S", start_time_tm);

	if (avformat_alloc_output_context2(&formatCtx, NULL, AV_OUTPUT_FORMAT, quadFileName) < 0) {
			printf ("could not create output context\n");
			return -1;
	}

	//Initialize Video Output Streams
	for (i = 0; i < amount_of_quadrants - 1; i++) {

		ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
		if (ff_output[i].outStream == NULL) {
			printf ("Could not create output stream\n");
			return -1;
		}

		ff_output[i].outStream->id = formatCtx->nb_streams - 1;

		ff_output[i].codecCtx = ff_output[i].outStream->codec;
		ff_output[i].encoder = avcodec_find_encoder_by_name (AV_OUTPUT_CODEC);
		if (ff_output[i].encoder == NULL) {
			printf ("Codec %s not found..\n", AV_OUTPUT_CODEC);
			return -1;
		}

		//Sliced sizes
		width = ff_input.codecCtx->width/quadrant_column;
		height = ff_input.codecCtx->height/quadrant_line;

		ff_output[i].codecCtx->codec_type 	= AVMEDIA_TYPE_VIDEO;
		ff_output[i].codecCtx->height 		= height;
		ff_output[i].codecCtx->width 		= width;
		ff_output[i].codecCtx->pix_fmt		= ff_input.codecCtx->pix_fmt;

		if (strcmp (AV_OUTPUT_CODEC, "libvpx") == 0) {
			//Maintain input aspect ratio for codec and stream info, and b_frames for codec info
			ff_output[i].codecCtx->sample_aspect_ratio = ff_input.codecCtx->sample_aspect_ratio;
			ff_output[i].codecCtx->max_b_frames = ff_input.codecCtx->max_b_frames;
			ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = AV_FRAMERATE;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 10000;			
		}

		if (strcmp (AV_OUTPUT_CODEC, "libx264") == 0) {
			// ff_output[i].codecCtx->profile = FF_PROFILE_H264_MAIN;
			// av_dict_set(&codecOptions, "profile","main",0);

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			ff_output[i].codecCtx->bit_rate_tolerance = 0;
			ff_output[i].codecCtx->rc_max_rate = 0;
			ff_output[i].codecCtx->rc_buffer_size = 0;
			ff_output[i].codecCtx->gop_size = 40;
			ff_output[i].codecCtx->max_b_frames = 3;
			ff_output[i].codecCtx->b_frame_strategy = 1;
			ff_output[i].codecCtx->coder_type = 1;
			ff_output[i].codecCtx->me_cmp = 1;
			ff_output[i].codecCtx->me_range = 16;
			ff_output[i].codecCtx->qmin = 10;
			ff_output[i].codecCtx->qmax = 51;
			ff_output[i].codecCtx->scenechange_threshold = 40;
			ff_output[i].codecCtx->flags |= CODEC_FLAG_LOOP_FILTER;
			ff_output[i].codecCtx->me_method = ME_HEX;
			ff_output[i].codecCtx->me_subpel_quality = 5;
			ff_output[i].codecCtx->i_quant_factor = 0.71;
			ff_output[i].codecCtx->qcompress = 0.6;
			ff_output[i].codecCtx->max_qdiff = 4;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = 24;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 90000;		
		}

		formatCtx->start_time_realtime = start_time;
		av_dict_set (&formatCtx->metadata, "service_name", start_time_str, 0);
		av_dict_set (&formatCtx->metadata, "creation_time", start_time_str, 0);

		//Open codec
		if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
			printf ("Could not open output codec...\n");
			return -1;
		}
	}

	//Initializing Audio Output
	i = amount_of_quadrants-1; //Last stream
	ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
	if (ff_output[i].outStream == NULL) {
		printf ("Could not create output stream\n");
		return -1;
	}

	ff_output[i].outStream->id = formatCtx->nb_streams - 1;

	ff_output[i].codecCtx = ff_output[i].outStream->codec;
	ff_output[i].encoder = avcodec_find_encoder (ff_input.audiocodecCtx->codec_id);
	if (ff_output[i].encoder == NULL) {
		printf ("Codec %s not found..\n", AUDIO_OUTPUT_CODEC);
		return -1;
	}
  
    ff_output[i].codecCtx = ff_output[amount_of_quadrants-1].outStream->codec;
    ff_output[i].codecCtx->codec_id = ff_input.audiocodecCtx->codec_id;
    ff_output[i].codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    ff_output[i].codecCtx->sample_fmt = ff_input.audiocodecCtx->sample_fmt;
    ff_output[i].codecCtx->sample_rate = ff_input.audiocodecCtx->sample_rate;
    ff_output[i].codecCtx->channel_layout = ff_input.audiocodecCtx->channel_layout;
    ff_output[i].codecCtx->channels = av_get_channel_layout_nb_channels(ff_output[amount_of_quadrants-1].codecCtx->channel_layout);
    ff_output[i].codecCtx->bit_rate = ff_input.audiocodecCtx->bit_rate;  
    ff_output[i].codecCtx->sample_aspect_ratio = ff_input.audiocodecCtx->sample_aspect_ratio;
    ff_output[i].codecCtx->max_b_frames = ff_input.audiocodecCtx->max_b_frames;
    ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

    ff_output[i].outStream->time_base.num = ff_input.formatCtx->streams[audioStreamIndex]->time_base.num;
	ff_output[i].outStream->time_base.den = ff_input.formatCtx->streams[audioStreamIndex]->time_base.den;

	ff_output[i].codecCtx->time_base.num = ff_input.audiocodecCtx->time_base.num;
	ff_output[i].codecCtx->time_base.den = ff_input.audiocodecCtx->time_base.den;

	printf("sample_rate %d\n", ff_input.audiocodecCtx->sample_rate);

	//Open codec
	if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
		printf ("Could not open output codec...\n");
		return -1;
	}

	av_dump_format (formatCtx, 0, quadFileName, 1);

	//Open output context
	if (avio_open (&formatCtx->pb, quadFileName, AVIO_FLAG_WRITE)) {
		printf ("avio_open failed %s\n", quadFileName);
		return -1;
	}
	
	//Write format context header
	if (avformat_write_header (formatCtx, &formatCtx->metadata)) {
		printf ("fail to write outstream header\n");
		return -1;
	}

	printf ("OUTPUT TO %s, at %lu\n", quadFileName, start_time);


	incaudio = 0;
	printf("Generating video streams...\n");
	while(av_read_frame (ff_input.formatCtx, &ff_input.packet) >= 0 && _keepEncoder) {
		if (ff_input.packet.stream_index == audioStreamIndex)
		{
			av_packet_ref  (&ff_output[amount_of_quadrants-1].packet, &ff_input.packet); 
            ff_output[amount_of_quadrants-1].packet.stream_index = amount_of_quadrants-1;
            ff_output[amount_of_quadrants-1].packet.pts = incaudio;

            // printf("%lu\n", ff_output[amount_of_quadrants-1].packet.pts);
            // if(gotPacket){
            	if (av_write_frame(formatCtx, &ff_output[amount_of_quadrants-1].packet) < 0) {
	                printf ("Unable to write to output stream..\n");
	                pthread_exit(NULL);
            	// }
            }            
            incaudio += 2880;
		}

		if (ff_input.packet.stream_index == videoStreamIndex) {

			ff_input.frame = av_frame_alloc();
			avcodec_decode_video2 (ff_input.codecCtx, ff_input.frame, &frameFinished, &ff_input.packet);

			if (frameFinished) {
				//TODO: Slice inputFrame and fill avQuadFrames[quadrant]
				//By now, inputFrame are replicated to all quadrants

				ff_input.frame->pts = av_frame_get_best_effort_timestamp (ff_input.frame);
				
				i = 0;
				for ( k = 0; k < quadrant_line; ++k) {
                    for (j = 0; j < quadrant_column; ++j) {
            			ff_output[i].frame = av_frame_alloc();

            			//make the cut quadrant ff_output[i]!
            			av_picture_crop((AVPicture *)ff_output[i].frame, (AVPicture *)ff_input.frame,       
            							ff_input.formatCtx->streams[videoStreamIndex]->codec->pix_fmt, marginTop, marginLeft);
            			
            			ff_output[i].frame->width = width; // updates the new width
						ff_output[i].frame->height = height; // updates the new height
						ff_output[i].frame->format = ff_input.frame->format;

						ff_output[i].frame->pts = inc;

						ff_output[i].packet.data = NULL;
						ff_output[i].packet.size = 0;
						av_init_packet (&ff_output[i].packet);

						avcodec_encode_video2 (ff_output[i].codecCtx, &ff_output[i].packet, ff_output[i].frame, &gotPacket);

						if (gotPacket) {
							ff_output[i].packet.stream_index = i;
							av_packet_rescale_ts (&ff_output[i].packet,
													ff_output[i].codecCtx->time_base,
													ff_output[i].outStream->time_base);

							if (av_write_frame (formatCtx, &ff_output[i].packet) < 0) {
								printf ("Unable to write to output stream..\n");
								pthread_exit(NULL);
							}

						}

						av_frame_free (&ff_output[i].frame);	

						i++;
						marginLeft += width;	

            		}
            		marginLeft = 0;
            		marginTop += height;
            	}
            	marginTop = 0; 
            	i = 0;
            	inc++;
			}
			av_frame_free (&ff_input.frame);
		}
	}

	return 0;
}
Esempio n. 14
0
CBaseDec::RetCode CFfmpegDec::Decoder(FILE *_in, int /*OutputFd*/, State* state, CAudioMetaData* _meta_data, time_t* time_played, unsigned int* secondsToSkip)
{
	in = _in;
	RetCode Status=OK;
	is_stream = fseek((FILE *)in, 0, SEEK_SET);

	if (!SetMetaData((FILE *)in, _meta_data, true)) {
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	AVCodecContext *c = avc->streams[best_stream]->codec;

	mutex.lock();
	int r = avcodec_open2(c, codec, NULL);
	mutex.unlock();
	if (r)
	{
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	SwrContext *swr = swr_alloc();
	if (!swr) {
		mutex.lock();
		avcodec_close(c);
		mutex.unlock();
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	mSampleRate = samplerate;
	mChannels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
	audioDecoder->PrepareClipPlay(mChannels, mSampleRate, 16, 1);

	AVFrame *frame = NULL;
	AVPacket rpacket;
	av_init_packet(&rpacket);
	c->channel_layout = c->channel_layout ? c->channel_layout : AV_CH_LAYOUT_STEREO;

	av_opt_set_int(swr, "in_channel_layout",	c->channel_layout,	0);
	//av_opt_set_int(swr, "out_channel_layout",	c->channel_layout,	0);
	av_opt_set_int(swr, "out_channel_layout",	AV_CH_LAYOUT_STEREO,	0);
	av_opt_set_int(swr, "in_sample_rate",		c->sample_rate,		0);
	av_opt_set_int(swr, "out_sample_rate",		c->sample_rate,		0);
	av_opt_set_sample_fmt(swr, "in_sample_fmt",	c->sample_fmt,          0);
	av_opt_set_sample_fmt(swr, "out_sample_fmt",   	AV_SAMPLE_FMT_S16,      0);

	if (( swr_init(swr)) < 0) {
		Status=DATA_ERR;
		return Status;
	}

	uint8_t *outbuf = NULL;
	int outsamples = 0;
	int outsamples_max = 0;

	int64_t pts = 0, start_pts = 0, next_skip_pts = 0;
	uint64_t skip = 0;
	int seek_flags = 0;

	do
	{
		int actSecsToSkip = *secondsToSkip;
		if (!is_stream && (actSecsToSkip || *state==FF || *state==REV) && avc->streams[best_stream]->time_base.num) {
			if (!next_skip_pts || pts >= next_skip_pts) {
				skip = avc->streams[best_stream]->time_base.den / avc->streams[best_stream]->time_base.num;
				if (actSecsToSkip)
					skip *= actSecsToSkip;
				if (*state == REV) {
					next_skip_pts = pts - skip;
					pts = next_skip_pts - skip/4;
					seek_flags = AVSEEK_FLAG_BACKWARD;
					if (pts < start_pts) {
						pts = start_pts;
						*state = PAUSE; 
					}
				} else {
					pts += skip;
					next_skip_pts = pts + skip/4;
					seek_flags = 0;
				}
				av_seek_frame(avc, best_stream, pts, seek_flags);
				// if a custom value was set we only jump once
				if (actSecsToSkip != 0) {
					*state=PLAY;
					*secondsToSkip = 0;
				}
			}
		}

		while(*state==PAUSE && !is_stream)
			usleep(10000);

		if (av_read_frame(avc, &rpacket)) {
			Status=DATA_ERR;
			break;
		}

		if (rpacket.stream_index != best_stream) {
			av_packet_unref(&rpacket);
			continue;
		}

		AVPacket packet = rpacket;
		while (packet.size > 0) {
			int got_frame = 0;
			if (!frame) {
				if (!(frame = av_frame_alloc())) {
					Status=DATA_ERR;
					break;
				}
			} else
				av_frame_unref(frame);

			int len = avcodec_decode_audio4(c, frame, &got_frame, &packet);
			if (len < 0) {
				// skip frame
				packet.size = 0;
				avcodec_flush_buffers(c);
				mutex.lock();
				avcodec_close(c);
				avcodec_open2(c, codec, NULL);
				mutex.unlock();
				continue;
			}
			if (got_frame && *state!=PAUSE) {
				int out_samples;
				outsamples = av_rescale_rnd(swr_get_delay(swr, c->sample_rate) + frame->nb_samples,
					c->sample_rate, c->sample_rate, AV_ROUND_UP);
				if (outsamples > outsamples_max) {
					av_free(outbuf);
					if (av_samples_alloc(&outbuf, &out_samples, mChannels, //c->channels,
								frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) {
						Status=WRITE_ERR;
						packet.size = 0;
						break;
					}
					outsamples_max = outsamples;
				}
				outsamples = swr_convert(swr, &outbuf, outsamples,
							(const uint8_t **) &frame->data[0], frame->nb_samples);
				int outbuf_size = av_samples_get_buffer_size(&out_samples, mChannels, //c->channels,
									  outsamples, AV_SAMPLE_FMT_S16, 1);

				if(audioDecoder->WriteClip((unsigned char*) outbuf, outbuf_size) != outbuf_size)
				{
					fprintf(stderr,"%s: PCM write error (%s).\n", ProgName, strerror(errno));
					Status=WRITE_ERR;
				}
				pts = av_frame_get_best_effort_timestamp(frame);
				if (!start_pts)
					start_pts = pts;
			}
			packet.size -= len;
			packet.data += len;
		}
		if (time_played && avc->streams[best_stream]->time_base.den)
			*time_played = (pts - start_pts) * avc->streams[best_stream]->time_base.num / avc->streams[best_stream]->time_base.den;
		av_packet_unref(&rpacket);
	} while (*state!=STOP_REQ && Status==OK);

	audioDecoder->StopClip();
	meta_data_valid = false;

	swr_free(&swr);
	av_free(outbuf);
	av_packet_unref(&rpacket);
	av_frame_free(&frame);
	avcodec_close(c);
	//av_free(avcc);

	DeInit();
	if (_meta_data->cover_temporary && !_meta_data->cover.empty()) {
		_meta_data->cover_temporary = false;
		unlink(_meta_data->cover.c_str());
	}
	return Status;
}
Esempio n. 15
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet = { .data = NULL, .size = 0 };
    AVFrame *frame = NULL;
    enum AVMediaType type;
    unsigned int stream_index;
    unsigned int i;
    int got_frame;
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

    if (argc != 3) {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = open_output_file(argv[2])) < 0)
        goto end;
    if ((ret = init_filters()) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
            break;
        stream_index = packet.stream_index;
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
                stream_index);

        if (filter_ctx[stream_index].filter_graph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ifmt_ctx->streams[stream_index]->codec->time_base);
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                avcodec_decode_audio4;
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                    &got_frame, &packet);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, stream_index);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            } else {
                av_frame_free(&frame);
            }
        } else {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ofmt_ctx->streams[stream_index]->time_base);

            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
            if (ret < 0)
                goto end;
        }
        av_packet_unref(&packet);
    }

    /* flush filters and encoders */
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        /* flush filter */
        if (!filter_ctx[i].filter_graph)
            continue;
        ret = filter_encode_write_frame(NULL, i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
            goto end;
        }

        /* flush encoder */
        ret = flush_encoder(i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }

    av_write_trailer(ofmt_ctx);
end:
    av_packet_unref(&packet);
    av_frame_free(&frame);
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        avcodec_close(ifmt_ctx->streams[i]->codec);
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
            avcodec_close(ofmt_ctx->streams[i]->codec);
        if (filter_ctx && filter_ctx[i].filter_graph)
            avfilter_graph_free(&filter_ctx[i].filter_graph);
    }
    av_free(filter_ctx);
    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

    return ret ? 1 : 0;
}
Esempio n. 16
0
u32 vdecOpen(VideoDecoder* data)
{
	VideoDecoder& vdec = *data;

	vdec.vdecCb = &Emu.GetCPU().AddThread(CPU_THREAD_PPU);

	u32 vdec_id = cellVdec->GetNewId(data);

	vdec.id = vdec_id;

	vdec.vdecCb->SetName("Video Decoder[" + std::to_string(vdec_id) + "] Callback");

	thread t("Video Decoder[" + std::to_string(vdec_id) + "] Thread", [&]()
	{
		cellVdec->Notice("Video Decoder thread started");

		VdecTask& task = vdec.task;

		while (true)
		{
			if (Emu.IsStopped())
			{
				break;
			}

			if (!vdec.job.GetCountUnsafe() && vdec.is_running)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (vdec.frames.GetCount() >= 50)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(1));
				continue;
			}

			if (!vdec.job.Pop(task))
			{
				break;
			}

			switch (task.type)
			{
			case vdecStartSeq:
			{
				// TODO: reset data
				cellVdec->Warning("vdecStartSeq:");

				vdec.reader.addr = 0;
				vdec.reader.size = 0;
				vdec.is_running = true;
				vdec.just_started = true;
			}
			break;

			case vdecEndSeq:
			{
				// TODO: finalize
				cellVdec->Warning("vdecEndSeq:");

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, vdec.cbArg);
				cb.Branch(true); // ???*/

				vdec.is_running = false;
				vdec.just_finished = true;
			}
			break;

			case vdecDecodeAu:
			{
				int err;

				if (task.mode != CELL_VDEC_DEC_MODE_NORMAL)
				{
					cellVdec->Error("vdecDecodeAu: unsupported decoding mode(%d)", task.mode);
					break;
				}

				vdec.reader.addr = task.addr;
				vdec.reader.size = task.size;
				//LOG_NOTICE(HLE, "Video AU: size = 0x%x, pts = 0x%llx, dts = 0x%llx", task.size, task.pts, task.dts);

				if (vdec.just_started)
				{
					vdec.first_pts = task.pts;
					vdec.last_pts = task.pts;
					vdec.first_dts = task.dts;
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
							memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
						//av_free_packet(this);
					}

				} au(0);

				if (vdec.just_started && vdec.just_finished)
				{
					avcodec_flush_buffers(vdec.ctx);
					vdec.just_started = false;
					vdec.just_finished = false;
				}
				else if (vdec.just_started) // deferred initialization
				{
					err = avformat_open_input(&vdec.fmt, NULL, av_find_input_format("mpeg"), NULL);
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avformat_open_input() failed");
						Emu.Pause();
						break;
					}
					AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_H264); // ???
					if (!codec)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_find_decoder() failed");
						Emu.Pause();
						break;
					}
					/*err = avformat_find_stream_info(vdec.fmt, NULL);
					if (err)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: avformat_find_stream_info() failed");
						Emu.Pause();
						break;
					}
					if (!vdec.fmt->nb_streams)
					{
						LOG_ERROR(HLE, "vdecDecodeAu: no stream found");
						Emu.Pause();
						break;
					}*/
					if (!avformat_new_stream(vdec.fmt, codec))
					{
						cellVdec->Error("vdecDecodeAu: avformat_new_stream() failed");
						Emu.Pause();
						break;
					}
					vdec.ctx = vdec.fmt->streams[0]->codec; // TODO: check data
						
					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(vdec.ctx, codec, &opts);
					}
					if (err)
					{
						cellVdec->Error("vdecDecodeAu: avcodec_open2() failed");
						Emu.Pause();
						break;
					}
					vdec.just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped() || vdec.job.PeekIfExist().type == vdecClose)
					{
						vdec.is_finished = true;
						cellVdec->Warning("vdecDecodeAu: aborted");
						return;
					}

					last_frame = av_read_frame(vdec.fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct VdecFrameHolder : VdecFrame
					{
						VdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~VdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						cellVdec->Error("vdecDecodeAu: av_frame_alloc() failed");
						Emu.Pause();
						break;
					}

					int got_picture = 0;

					int decode = avcodec_decode_video2(vdec.ctx, frame.data, &got_picture, &au);

					if (decode <= 0)
					{
						if (!last_frame && decode < 0)
						{
							cellVdec->Error("vdecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_picture && vdec.reader.size == 0) break; // video end?
					}

					if (got_picture)
					{
						u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						if (ts != AV_NOPTS_VALUE)
						{
							frame.pts = ts/* - vdec.first_pts*/; // ???
							vdec.last_pts = frame.pts;
						}
						else
						{
							vdec.last_pts += vdec.ctx->time_base.num * 90000 / (vdec.ctx->time_base.den / vdec.ctx->ticks_per_frame);
							frame.pts = vdec.last_pts;
						}
						//frame.pts = vdec.last_pts;
						//vdec.last_pts += 3754;
						frame.dts = (frame.pts - vdec.first_pts) + vdec.first_dts;
						frame.userdata = task.userData;

						//LOG_NOTICE(HLE, "got picture (pts=0x%llx, dts=0x%llx)", frame.pts, frame.dts);

						vdec.frames.Push(frame); // !!!!!!!!
						frame.data = nullptr; // to prevent destruction

						vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						/*Callback cb;
						cb.SetAddr(vdec.cbFunc);
						cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, vdec.cbArg);
						cb.Branch(false);*/
					}
				}

				vdec.vdecCb->ExecAsCallback(vdec.cbFunc, false, vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				/*Callback cb;
				cb.SetAddr(vdec.cbFunc);
				cb.Handle(vdec.id, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, vdec.cbArg);
				cb.Branch(false);*/
			}
			break;

			case vdecClose:
			{
				vdec.is_finished = true;
				cellVdec->Notice("Video Decoder thread ended");
				return;
			}

			case vdecSetFrameRate:
			{
				cellVdec->Error("TODO: vdecSetFrameRate(%d)", task.frc);
			}
			break;

			default:
				cellVdec->Error("Video Decoder thread error: unknown task(%d)", task.type);
			}
		}

		vdec.is_finished = true;
		cellVdec->Warning("Video Decoder thread aborted");
	});

	t.detach();

	return vdec_id;
}
Esempio n. 17
0
void cVideo::run(void)
{
	lt_info("====================== start decoder thread ================================\n");
	AVCodec *codec;
	AVCodecContext *c= NULL;
	AVFormatContext *avfc = NULL;
	AVInputFormat *inp;
	AVFrame *frame, *rgbframe;
	uint8_t *inbuf = (uint8_t *)av_malloc(INBUF_SIZE);
	AVPacket avpkt;
	struct SwsContext *convert = NULL;

	time_t warn_r = 0; /* last read error */
	time_t warn_d = 0; /* last decode error */

	bufpos = 0;
	buf_num = 0;
	buf_in = 0;
	buf_out = 0;
	dec_r = 0;

	av_init_packet(&avpkt);
	inp = av_find_input_format("mpegts");
	AVIOContext *pIOCtx = avio_alloc_context(inbuf, INBUF_SIZE, // internal Buffer and its size
			0,		// bWriteable (1=true,0=false)
			NULL,		// user data; will be passed to our callback functions
			my_read,	// read callback
			NULL,		// write callback
			NULL);		// seek callback
	avfc = avformat_alloc_context();
	avfc->pb = pIOCtx;
	avfc->iformat = inp;
	avfc->probesize = 188*5;

	thread_running = true;
	if (avformat_open_input(&avfc, NULL, inp, NULL) < 0) {
		lt_info("%s: Could not open input\n", __func__);
		goto out;
	}
	while (avfc->nb_streams < 1)
	{
		lt_info("%s: nb_streams %d, should be 1 => retry\n", __func__, avfc->nb_streams);
		if (av_read_frame(avfc, &avpkt) < 0)
			lt_info("%s: av_read_frame < 0\n", __func__);
		av_free_packet(&avpkt);
		if (! thread_running)
			goto out;
	}

	if (avfc->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
		lt_info("%s: no video codec? 0x%x\n", __func__, avfc->streams[0]->codec->codec_type);

	c = avfc->streams[0]->codec;
	codec = avcodec_find_decoder(c->codec_id);
	if (!codec) {
		lt_info("%s: Codec for %s not found\n", __func__, avcodec_get_name(c->codec_id));
		goto out;
	}
	if (avcodec_open2(c, codec, NULL) < 0) {
		lt_info("%s: Could not open codec\n", __func__);
		goto out;
	}
	frame = av_frame_alloc();
	rgbframe = av_frame_alloc();
	if (!frame || !rgbframe) {
		lt_info("%s: Could not allocate video frame\n", __func__);
		goto out2;
	}
	lt_info("decoding %s\n", avcodec_get_name(c->codec_id));
	while (thread_running) {
		if (av_read_frame(avfc, &avpkt) < 0) {
			if (warn_r - time(NULL) > 4) {
				lt_info("%s: av_read_frame < 0\n", __func__);
				warn_r = time(NULL);
			}
			usleep(10000);
			continue;
		}
		int got_frame = 0;
		int len = avcodec_decode_video2(c, frame, &got_frame, &avpkt);
		if (len < 0) {
			if (warn_d - time(NULL) > 4) {
				lt_info("%s: avcodec_decode_video2 %d\n", __func__, len);
				warn_d = time(NULL);
			}
			av_free_packet(&avpkt);
			continue;
		}
		if (avpkt.size > len)
			lt_info("%s: WARN: pkt->size %d != len %d\n", __func__, avpkt.size, len);
		if (got_frame) {
			unsigned int need = avpicture_get_size(PIX_FMT_RGB32, c->width, c->height);
			convert = sws_getCachedContext(convert,
						       c->width, c->height, c->pix_fmt,
						       c->width, c->height, PIX_FMT_RGB32,
						       SWS_BICUBIC, 0, 0, 0);
			if (!convert)
				lt_info("%s: ERROR setting up SWS context\n", __func__);
			else {
				buf_m.lock();
				SWFramebuffer *f = &buffers[buf_in];
				if (f->size() < need)
					f->resize(need);
				avpicture_fill((AVPicture *)rgbframe, &(*f)[0], PIX_FMT_RGB32,
						c->width, c->height);
				sws_scale(convert, frame->data, frame->linesize, 0, c->height,
						rgbframe->data, rgbframe->linesize);
				if (dec_w != c->width || dec_h != c->height) {
					lt_info("%s: pic changed %dx%d -> %dx%d\n", __func__,
							dec_w, dec_h, c->width, c->height);
					dec_w = c->width;
					dec_h = c->height;
					w_h_changed = true;
				}
				f->width(c->width);
				f->height(c->height);
				int64_t vpts = av_frame_get_best_effort_timestamp(frame);
				if (v_format == VIDEO_FORMAT_MPEG2)
					vpts += 90000*3/10; /* 300ms */
				f->pts(vpts);
				AVRational a = av_guess_sample_aspect_ratio(avfc, avfc->streams[0], frame);
				f->AR(a);
				buf_in++;
				buf_in %= VDEC_MAXBUFS;
				buf_num++;
				if (buf_num > (VDEC_MAXBUFS - 1)) {
					lt_info("%s: buf_num overflow\n", __func__);
					buf_out++;
					buf_out %= VDEC_MAXBUFS;
					buf_num--;
				}
				dec_r = c->time_base.den/(c->time_base.num * c->ticks_per_frame);
				buf_m.unlock();
			}
			lt_debug("%s: time_base: %d/%d, ticks: %d rate: %d pts 0x%" PRIx64 "\n", __func__,
					c->time_base.num, c->time_base.den, c->ticks_per_frame, dec_r,
					av_frame_get_best_effort_timestamp(frame));
		}
		av_free_packet(&avpkt);
	}
	sws_freeContext(convert);
 out2:
	avcodec_close(c);
	av_frame_free(&frame);
	av_frame_free(&rgbframe);
 out:
	avformat_close_input(&avfc);
	av_free(pIOCtx->buffer);
	av_free(pIOCtx);
	/* reset output buffers */
	bufpos = 0;
	buf_num = 0;
	buf_in = 0;
	buf_out = 0;
	lt_info("======================== end decoder thread ================================\n");
}
int main(int argc, char *argv[])
{
    int ret;
    AVPacket packet;
    AVFrame frame;
    int got_frame;

    avcodec_register_all();
    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file("cuc_ieschool.flv")) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;
#if ENABLE_YUVFILE
    FILE *fp_yuv = fopen("test.yuv", "wb+");
#endif
#if ENABLE_SDL
    SDL_Surface *screen;
    SDL_Overlay *bmp;
    SDL_Rect rect;
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        printf( "Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
    if (!screen) {
        printf("SDL: could not set video mode - exiting\n");
        return -1;
    }
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);

    SDL_WM_SetCaption("Simplest FFmpeg Video Filter", NULL);
#endif

    /* read all packets */
    while (1) {
        AVFilterBufferRef *picref;
        if ((ret = av_read_frame(pFormatCtx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
            avcodec_get_frame_defaults(&frame);
            got_frame = 0;
            ret = avcodec_decode_video2(pCodecCtx, &frame, &got_frame, &packet);
            if (ret < 0) {
                printf( "Error decoding video\n");
                break;
            }

            if (got_frame) {
                frame.pts = av_frame_get_best_effort_timestamp(&frame);

                /* push the decoded frame into the filtergraph */
                if (av_buffersrc_add_frame(buffersrc_ctx, &frame) < 0) {
                    printf( "Error while feeding the filtergraph\n");
                    break;
                }

                /* pull filtered pictures from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;

                    if (picref) {
#if ENABLE_YUVFILE
                        int y_size = picref->video->w * picref->video->h;
                        fwrite(picref->data[0], 1, y_size, fp_yuv);  //Y
                        fwrite(picref->data[1], 1, y_size / 4, fp_yuv); //U
                        fwrite(picref->data[2], 1, y_size / 4, fp_yuv); //V
#endif

#if ENABLE_SDL
                        SDL_LockYUVOverlay(bmp);
                        int y_size = picref->video->w * picref->video->h;
                        memcpy(bmp->pixels[0], picref->data[0], y_size); //Y
                        memcpy(bmp->pixels[2], picref->data[1], y_size / 4); //U
                        memcpy(bmp->pixels[1], picref->data[2], y_size / 4); //V
                        bmp->pitches[0] = picref->linesize[0];
                        bmp->pitches[2] = picref->linesize[1];
                        bmp->pitches[1] = picref->linesize[2];
                        SDL_UnlockYUVOverlay(bmp);
                        rect.x = 0;
                        rect.y = 0;
                        rect.w = picref->video->w;
                        rect.h = picref->video->h;
                        SDL_DisplayYUVOverlay(bmp, &rect);
                        //Delay 40ms
                        SDL_Delay(40);
#endif
                        avfilter_unref_bufferp(&picref);
                    }
                }
            }
        }
        av_free_packet(&packet);
    }
#if ENABLE_YUVFILE
    fclose(fp_yuv);
#endif
end:
    avfilter_graph_free(&filter_graph);
    if (pCodecCtx)
        avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    if (ret < 0 && ret != AVERROR_EOF) {
        char buf[1024];
        av_strerror(ret, buf, sizeof(buf));
        printf("Error occurred: %s\n", buf);
        return -1;
    }

    return 0;
}