Beispiel #1
0
void VideoBackgroundLoader::startLoadingThread()
{
    {
        SDL_LockMutex(backgroundMutex);
        if (mState != STARTING) {
            mState = STOPPING;
            SDL_UnlockMutex(backgroundMutex);
            return;
        }
        mState = RUNNING;
        SDL_UnlockMutex(backgroundMutex);
    }
    AVPacket packet;

    while (!eof) {
        {
            SDL_LockMutex(backgroundMutex);

            if (quitRequested)
            {
                mState = STOPPING;
                SDL_UnlockMutex(backgroundMutex);
                return;
            }

            // move from initial or stopping to stopped
            if (mState != RUNNING) {
                SDL_UnlockMutex(backgroundMutex);
                return;
            }
            SDL_UnlockMutex(backgroundMutex);
        }

        eof = av_read_frame(mContext->formatctx, &packet) < 0;
        if (!eof) {
            if (packet.stream_index == mContext->videoindex) {
                int framedone = 0;
                avcodec_decode_video2(mContext->videocodecctx, mContext->readframe, &framedone, &packet);
                if (framedone) {
                    size_t bsize = (mContext->videowidth * mContext->videoheight * 3 + 4);
                    boost::shared_array<unsigned char> buffer(new unsigned char[bsize]);
                    if (buffer) {
                        toRGB_convert_ctx = sws_getCachedContext(toRGB_convert_ctx,
                                                 mContext->videowidth, mContext->videoheight,
                                                 mContext->videocodecctx->pix_fmt,
                                                 mContext->videowidth, mContext->videoheight,
                                                 PIX_FMT_BGR24, //NOTE: DX9 only!!!
                                                 sws_flags, NULL, NULL, NULL);

                        sws_scale(toRGB_convert_ctx,
                                  mContext->readframe->data,
                                  mContext->readframe->linesize,
                                  0, mContext->videoheight,
                                  mContext->drawframe->data,
                                  mContext->drawframe->linesize);
                        unsigned int sh = mContext->videoheight, sw = mContext->videowidth;
                        memcpy(buffer.get(), mContext->drawframe->data[0], sw * sh * 3);

                        {
                            SDL_LockMutex(backgroundMutex);
                            avctx::Frame f = {buffer, packet.dts};
                            mContext->frames.push_back(f);

                            if (quitRequested)
                            {
                                mState = STOPPING;
                                SDL_UnlockMutex(backgroundMutex);
                                return;
                            }

                            // if too many buffered frames, sleep
                            if (mContext->frames.size() >= MAX_BUFFERED_FRAMES) {
                                backgroundReaderWaiting = true;
                                while (backgroundReaderWaiting)
                                {
                                    SDL_CondWait(framesFullCVar, backgroundMutex);
                                }
                                backgroundReaderWaiting = false;
                            }
                            SDL_UnlockMutex(backgroundMutex);
                        }
                    }
                }
            } else if (packet.stream_index == mContext->audioindex && mContext->audiobuffer) {
                int frameDone;
                avcodec_decode_audio4(mContext->audiocodecctx, mContext->audioframe, &frameDone, &packet);

                if(frameDone)
                {
                    int srcNSamples = mContext->audioframe->nb_samples;
                    int dstNSamples = mContext->audiobuffersize / 2;

                    if (mContext->audiocodecctx->channels>1)
                    {
                        dstNSamples /= 2;
                    }

                    uint8_t**  src = mContext->audioframe->extended_data;
                    uint8_t*   dst = (uint8_t*)mContext->audiobuffer;
                    dstNSamples = swr_convert(mContext->resamplerctx, &dst, dstNSamples, (const uint8_t**)src, srcNSamples);

                    int size = dstNSamples*2/*sizeof(int16_t)*/; //NOTE: hardcode!!!
                    if (mContext->audiocodecctx->channels>1)
                    {
                        size *= 2;
                    }
                    unsigned long long duration =
                        (unsigned long long)( (double(mContext->audiocodecctx->time_base.num)
                                               / mContext->audiocodecctx->time_base.den) * 1000 );
                    mContext->audiostream->addSample( (char *)mContext->audiobuffer, size, mContext->audiotime, duration );
                    mContext->audiotime += duration;
                }
            }
            av_free_packet(&packet);
        } else { break; }
    }
    {
        SDL_LockMutex(backgroundMutex);
        mState = STOPPING;
        SDL_UnlockMutex(backgroundMutex);
    }
}
Beispiel #2
0
/*
 * Audio decoding.
 */
static void audio_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int len;
    FILE *f, *outfile;
    uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;
    AVFrame *decoded_frame = NULL;

    av_init_packet(&avpkt);

    printf("Audio decoding\n");

    /* find the mpeg audio decoder */
    codec = avcodec_find_decoder(CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }
    outfile = fopen(outfilename, "wb");
    if (!outfile) {
        av_free(c);
        exit(1);
    }

    /* decode until eof */
    avpkt.data = inbuf;
    avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);

    while (avpkt.size > 0) {
        int got_frame = 0;

        if (!decoded_frame) {
            if (!(decoded_frame = avcodec_alloc_frame())) {
                fprintf(stderr, "out of memory\n");
                exit(1);
            }
        } else
            avcodec_get_frame_defaults(decoded_frame);

        len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
        if (len < 0) {
            fprintf(stderr, "Error while decoding\n");
            exit(1);
        }
        if (got_frame) {
            /* if a frame has been decoded, output it */
            int data_size = av_samples_get_buffer_size(NULL, c->channels,
                                                       decoded_frame->nb_samples,
                                                       c->sample_fmt, 1);
            fwrite(decoded_frame->data[0], 1, data_size, outfile);
        }
        avpkt.size -= len;
        avpkt.data += len;
        avpkt.dts =
        avpkt.pts = AV_NOPTS_VALUE;
        if (avpkt.size < AUDIO_REFILL_THRESH) {
            /* Refill the input buffer, to avoid trying to decode
             * incomplete frames. Instead of this, one could also use
             * a parser, or use a proper container format through
             * libavformat. */
            memmove(inbuf, avpkt.data, avpkt.size);
            avpkt.data = inbuf;
            len = fread(avpkt.data + avpkt.size, 1,
                        AUDIO_INBUF_SIZE - avpkt.size, f);
            if (len > 0)
                avpkt.size += len;
        }
    }

    fclose(outfile);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(decoded_frame);
}
Beispiel #3
0
int import_ffmpeg_decode_frame(streamContext *sc, bool flushing)
{
   int      nBytesDecoded;
   wxUint8 *pDecode = sc->m_pktDataPtr;
   int      nDecodeSiz = sc->m_pktRemainingSiz;

   sc->m_frameValid = 0;

   if (flushing)
   {
      // If we're flushing the decoders we don't actually have any NEW data to decode.
      pDecode = NULL;
      nDecodeSiz = 0;
   }
   else
   {
      if (!sc->m_pkt || (sc->m_pktRemainingSiz <= 0))
      {
         //No more data
         return -1;
      }
   }

   AVPacketEx avpkt;
   avpkt.data = pDecode;
   avpkt.size = nDecodeSiz;

   AVFrameHolder frame{ av_frame_alloc() };
   int got_output = 0;

   nBytesDecoded =
      avcodec_decode_audio4(sc->m_codecCtx,
                            frame.get(),                                   // out
                            &got_output,                             // out
                            &avpkt);                                 // in

   if (nBytesDecoded < 0)
   {
      // Decoding failed. Don't stop.
      return -1;
   }

   sc->m_samplefmt = sc->m_codecCtx->sample_fmt;
   sc->m_samplesize = av_get_bytes_per_sample(sc->m_samplefmt);

   int channels = sc->m_codecCtx->channels;
   unsigned int newsize = sc->m_samplesize * frame->nb_samples * channels;
   sc->m_decodedAudioSamplesValidSiz = newsize;
   // Reallocate the audio sample buffer if it's smaller than the frame size.
   if (newsize > sc->m_decodedAudioSamplesSiz )
   {
      // Reallocate a bigger buffer.  But av_realloc is NOT compatible with the returns of av_malloc!
      // So do this:
      sc->m_decodedAudioSamples.reset(static_cast<uint8_t *>(av_malloc(newsize)));
      sc->m_decodedAudioSamplesSiz = newsize;
      if (!sc->m_decodedAudioSamples)
      {
         //Can't allocate bytes
         return -1;
      }
   }
   if (frame->data[1]) {
      for (int i = 0; i<frame->nb_samples; i++) {
         for (int ch = 0; ch<channels; ch++) {
            memcpy(sc->m_decodedAudioSamples.get() + sc->m_samplesize * (ch + channels*i),
                  frame->extended_data[ch] + sc->m_samplesize*i,
                  sc->m_samplesize);
         }
      }
   } else {
      memcpy(sc->m_decodedAudioSamples.get(), frame->data[0], newsize);
   }

   // We may not have read all of the data from this packet. If so, the user can call again.
   // Whether or not they do depends on if m_pktRemainingSiz == 0 (they can check).
   sc->m_pktDataPtr += nBytesDecoded;
   sc->m_pktRemainingSiz -= nBytesDecoded;

   // At this point it's normally safe to assume that we've read some samples. However, the MPEG
   // audio decoder is broken. If this is the case then we just return with m_frameValid == 0
   // but m_pktRemainingSiz perhaps != 0, so the user can call again.
   if (sc->m_decodedAudioSamplesValidSiz > 0)
   {
      sc->m_frameValid = 1;
   }
   return 0;
}
Beispiel #4
0
static int
unpack_audio(const char *url, pcm_sound_t *out)
{
  AVFormatContext *fctx;
  AVCodecContext *ctx = NULL;
 
  char errbuf[256];
  fa_handle_t *fh = fa_open_ex(url, errbuf, sizeof(errbuf), 0, NULL);
  if(fh == NULL) {
  fail:
    TRACE(TRACE_ERROR, "audiotest", "Unable to open %s -- %s", url, errbuf);
    return -1;
  }
  AVIOContext *avio = fa_libav_reopen(fh, 0);

  if((fctx = fa_libav_open_format(avio, url, errbuf, sizeof(errbuf), NULL,
                                  0, -1, -1)) == NULL) {
    fa_libav_close(avio);
    goto fail;
  }

  int s;
  for(s = 0; s < fctx->nb_streams; s++) {
    ctx = fctx->streams[s]->codec;

    if(ctx->codec_type != AVMEDIA_TYPE_AUDIO)
      continue;

    const AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL)
      continue;

    if(avcodec_open2(ctx, codec, NULL) < 0) {
      TRACE(TRACE_ERROR, "audiotest", "Unable to codec");
      continue;
    }
    break;
  }

  AVFrame *frame = av_frame_alloc();

  out->samples = 0;
  out->data = NULL;

  while(1) {
    AVPacket pkt;
    int r;

    r = av_read_frame(fctx, &pkt);
    if(r == AVERROR(EAGAIN))
      continue;
    if(r)
      break;
    if(pkt.stream_index == s) {
      int got_frame;
      while(pkt.size) {
	r = avcodec_decode_audio4(ctx, frame, &got_frame, &pkt);
	if(r < 0)
	  break;
	if(got_frame) {
	  int ns = frame->nb_samples * 2;

	  out->data = realloc(out->data, sizeof(int16_t) * (out->samples + ns));

	  const int16_t *src = (const int16_t *)frame->data[0];

	  for(int i = 0; i < frame->nb_samples; i++) {
	    int16_t v = src[i];
	    out->data[out->samples + i * 2 + 0] = v;
	    out->data[out->samples + i * 2 + 1] = v;
	  }
	  out->samples += ns;
	}
	pkt.data += r;
	pkt.size -= r;
      }
    }
    av_free_packet(&pkt);
  }
  av_frame_free(&frame);
  avcodec_close(ctx);
  fa_libav_close_format(fctx);
  return 0;
}
jbyteArray stream(JNIEnv *env, jint minbufsize) {
    data_size = 0;
    if (!arr) {
        arr = (char *) malloc(sizeof(char) * minbufsize * 3);
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "sizeof(char) * minbufsize * 2 : %d", sizeof(char) * minbufsize * 2);
    }
    __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream1");
    while (data_size < minbufsize) {
        //__android_log_print(ANDROID_LOG_DEBUG, null, "reading");
        if (pktLeft <= 0) {
            for (; ;) {
                __android_log_print(ANDROID_LOG_DEBUG, TAG, "avpkt->size1:%d", avpkt.size);
                int len = av_read_frame(pFormatCtx, &avpkt);
                //397565 18432
                __android_log_print(ANDROID_LOG_DEBUG, TAG, "avfrm->size2:%d", avpkt.size);
                char errbuf[100];
                __android_log_print(ANDROID_LOG_DEBUG, TAG, "loop->%d", len);
                av_strerror(len, errbuf, sizeof(errbuf));
                __android_log_print(ANDROID_LOG_INFO, TAG, "loop->error:%s", errbuf);
                if (len < 0) {
                    __android_log_print(ANDROID_LOG_DEBUG, null, "cannot read packet");
                    data_size = 0;
                    free(arr);
                    arr = NULL;
                    return jarr;
                }
                if (avpkt.stream_index != audioStream) {
                    av_free_packet(&avpkt);
                    continue;
                }

                pktLeft = avpkt.size;
                break;
            }
        }
        int got_frame = 0, len;
        len = avcodec_decode_audio4(pCodecCtx, avfrm, &got_frame, &avpkt);
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "avpkt->time->dts:%lld", avpkt.dts);
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "avpkt->time->pts:%lld", avpkt.pts);
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream len-->%d", len);
        //25804800 dts
        //1074563437 pts
        //325955917 duration 255920113
        //10997760
        __android_log_print(ANDROID_LOG_DEBUG, TAG, "pFormatCtx->duration--->%lld", pFormatCtx->duration);
        if (len < 0) {
            av_free_packet(&avpkt);
            break;
        }
        if (got_frame) {
            __android_log_print(ANDROID_LOG_DEBUG, TAG, "stream5");
//            int dsize = av_samples_get_buffer_size(NULL, pCodecCtx->channels,
//                                                   avfrm->nb_samples,
//                                                   pCodecCtx->sample_fmt, 1);
            int resampLen = audioResampling(pCodecCtx, avfrm, AV_SAMPLE_FMT_S16, 2, 44100,
                                            arr + data_size);
            data_size += resampLen;
            pktLeft -= len;
        } else {
            av_free_packet(&avpkt);
            break;
        }
//        }
    }
    jarr = env->NewByteArray(data_size);
    env->SetByteArrayRegion(jarr, 0, data_size, (jbyte *) arr);

    return jarr;
}
bool FFmpegDecoder::GetAudio()
{
    AVPacket pkt;
    av_init_packet(&pkt);
    
    while (true)
    {
        int decode = av_read_frame(av->ctx, &pkt);
        if (decode < 0)
            return false;
        if (pkt.stream_index == av->stream_idx)
            break;
        av_free_packet(&pkt);
    }
    
    av->started = true;
    AVPacket pkt_temp;
    av_init_packet(&pkt_temp);
    pkt_temp.data = pkt.data;
    pkt_temp.size = pkt.size;
    
    AVCodecContext *dec_ctx = av->stream->codec;
    
    while (pkt_temp.size > 0)
    {
        AVFrame frame;
        avcodec_get_frame_defaults(&frame);
        int got_frame = 0;
        int bytes_read = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &pkt_temp);
        if (bytes_read < 0)
        {
            av_free_packet(&pkt);
            return false;
        }
        if (got_frame && bytes_read > 0)
        {
            int channels = dec_ctx->channels;
            enum AVSampleFormat in_fmt = dec_ctx->sample_fmt;
            enum AVSampleFormat out_fmt = AV_SAMPLE_FMT_S16;
            
            int stride = -1;
            if (channels > 1 && av_sample_fmt_is_planar(in_fmt))
                stride = frame.extended_data[1] - frame.extended_data[0];

            int written = convert_audio(frame.nb_samples, channels,
                                        stride,
                                        in_fmt, frame.extended_data[0],
                                        frame.nb_samples, channels,
                                        -1,
                                        out_fmt, av->temp_data);
            
            av_fifo_generic_write(av->fifo, av->temp_data, written, NULL);

            pkt_temp.data += bytes_read;
            pkt_temp.size -= bytes_read;
        }
    }
    
    av_free_packet(&pkt);
    return true;
}
Beispiel #7
0
	virtual void cpu_task() override
	{
		while (true)
		{
			if (Emu.IsStopped() || is_closed)
			{
				break;
			}

			if (!job.pop(task, &is_closed))
			{
				break;
			}

			switch (task.type)
			{
			case adecStartSeq:
			{
				// TODO: reset data
				cellAdec.warning("adecStartSeq:");

				reader.addr = 0;
				reader.size = 0;
				reader.init = false;
				reader.has_ats = false;
				just_started = true;

				if (adecIsAtracX(type))
				{
					ch_cfg = task.at3p.channel_config;
					ch_out = task.at3p.channels;
					frame_size = task.at3p.frame_size;
					sample_rate = task.at3p.sample_rate;
					use_ats_headers = task.at3p.ats_header == 1;
				}
				break;
			}

			case adecEndSeq:
			{
				// TODO: finalize
				cellAdec.warning("adecEndSeq:");
				cbFunc(*this, id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, cbArg);
				lv2_obj::sleep(*this);

				just_finished = true;
				break;
			}

			case adecDecodeAu:
			{
				int err = 0;

				reader.addr = task.au.addr;
				reader.size = task.au.size;
				reader.has_ats = use_ats_headers;
				//LOG_NOTICE(HLE, "Audio AU: size = 0x%x, pts = 0x%llx", task.au.size, task.au.pts);

				if (just_started)
				{
					first_pts = task.au.pts;
					last_pts = task.au.pts;
					if (adecIsAtracX(type)) last_pts -= 0x10000; // hack
				}

				struct AVPacketHolder : AVPacket
				{
					AVPacketHolder(u32 size)
					{
						av_init_packet(this);

						if (size)
						{
							data = (u8*)av_calloc(1, size + FF_INPUT_BUFFER_PADDING_SIZE);
							this->size = size + FF_INPUT_BUFFER_PADDING_SIZE;
						}
						else
						{
							data = NULL;
							size = 0;
						}
					}

					~AVPacketHolder()
					{
						av_free(data);
					}

				} au(0);

				if (just_started && just_finished)
				{
					avcodec_flush_buffers(ctx);
					
					reader.init = true; // wrong
					just_finished = false;
					just_started = false;
				}
				else if (just_started) // deferred initialization
				{
					AVDictionary* opts = nullptr;
					av_dict_set(&opts, "probesize", "96", 0);
					err = avformat_open_input(&fmt, NULL, input_format, &opts);
					if (err || opts)
					{
						fmt::throw_exception("avformat_open_input() failed (err=0x%x, opts=%d)" HERE, err, opts ? 1 : 0);
					}
					//err = avformat_find_stream_info(fmt, NULL);
					//if (err || !fmt->nb_streams)
					//{
					//	ADEC_ERROR("adecDecodeAu: avformat_find_stream_info() failed (err=0x%x, nb_streams=%d)", err, fmt->nb_streams);
					//}
					if (!avformat_new_stream(fmt, codec))
					{
						fmt::throw_exception("avformat_new_stream() failed" HERE);
					}
					ctx = fmt->streams[0]->codec; // TODO: check data

					opts = nullptr;
					av_dict_set(&opts, "refcounted_frames", "1", 0);
					{
						std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2);
						// not multithread-safe (???)
						err = avcodec_open2(ctx, codec, &opts);
					}
					if (err || opts)
					{
						fmt::throw_exception("avcodec_open2() failed (err=0x%x, opts=%d)" HERE, err, opts ? 1 : 0);
					}
					just_started = false;
				}

				bool last_frame = false;

				while (true)
				{
					if (Emu.IsStopped() || is_closed)
					{
						if (Emu.IsStopped()) cellAdec.warning("adecDecodeAu: aborted");
						break;
					}

					last_frame = av_read_frame(fmt, &au) < 0;
					if (last_frame)
					{
						//break;
						av_free(au.data);
						au.data = NULL;
						au.size = 0;
					}

					struct AdecFrameHolder : AdecFrame
					{
						AdecFrameHolder()
						{
							data = av_frame_alloc();
						}

						~AdecFrameHolder()
						{
							if (data)
							{
								av_frame_unref(data);
								av_frame_free(&data);
							}
						}

					} frame;

					if (!frame.data)
					{
						fmt::throw_exception("av_frame_alloc() failed" HERE);
					}

					int got_frame = 0;

					int decode = avcodec_decode_audio4(ctx, frame.data, &got_frame, &au);

					if (decode <= 0)
					{
						if (decode < 0)
						{
							cellAdec.error("adecDecodeAu: AU decoding error(0x%x)", decode);
						}
						if (!got_frame && reader.size == 0) break;
					}

					if (got_frame)
					{
						//u64 ts = av_frame_get_best_effort_timestamp(frame.data);
						//if (ts != AV_NOPTS_VALUE)
						//{
						//	frame.pts = ts/* - first_pts*/;
						//	last_pts = frame.pts;
						//}
						last_pts += ((u64)frame.data->nb_samples) * 90000 / frame.data->sample_rate;
						frame.pts = last_pts;

						s32 nbps = av_get_bytes_per_sample((AVSampleFormat)frame.data->format);
						switch (frame.data->format)
						{
						case AV_SAMPLE_FMT_FLTP: break;
						case AV_SAMPLE_FMT_S16P: break;
						default:
						{
							fmt::throw_exception("Unsupported frame format(%d)" HERE, frame.data->format);
						}
						}
						frame.auAddr = task.au.addr;
						frame.auSize = task.au.size;
						frame.userdata = task.au.userdata;
						frame.size = frame.data->nb_samples * frame.data->channels * nbps;

						//LOG_NOTICE(HLE, "got audio frame (pts=0x%llx, nb_samples=%d, ch=%d, sample_rate=%d, nbps=%d)",
							//frame.pts, frame.data->nb_samples, frame.data->channels, frame.data->sample_rate, nbps);

						if (frames.push(frame, &is_closed))
						{
							frame.data = nullptr; // to prevent destruction
							cbFunc(*this, id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, cbArg);
							lv2_obj::sleep(*this);
						}
					}
				}

				cbFunc(*this, id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, cbArg);
				lv2_obj::sleep(*this);
				break;
			}

			case adecClose:
			{
				break;
			}

			default:
			{
				fmt::throw_exception("Unknown task(%d)" HERE, (u32)task.type);
			}
			}
		}

		is_finished = true;
	}
Beispiel #8
0
/*	decode and play stream. returns 0 or av error code.
 */
static int play (player_t * const player) {
	assert (player != NULL);

	AVPacket pkt;
	av_init_packet (&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	AVFrame *frame = NULL, *filteredFrame = NULL;
	frame = av_frame_alloc ();
	assert (frame != NULL);
	filteredFrame = av_frame_alloc ();
	assert (filteredFrame != NULL);

	while (!player->doQuit) {
		int ret = av_read_frame (player->fctx, &pkt);
		if (ret < 0) {
			av_free_packet (&pkt);
			return ret;
		} else if (pkt.stream_index != player->streamIdx) {
			av_free_packet (&pkt);
			continue;
		}

		AVPacket pkt_orig = pkt;

		/* pausing */
		pthread_mutex_lock (&player->pauseMutex);
		if (player->doPause) {
			av_read_pause (player->fctx);
			do {
				pthread_cond_wait (&player->pauseCond, &player->pauseMutex);
			} while (player->doPause);
			av_read_play (player->fctx);
		}
		pthread_mutex_unlock (&player->pauseMutex);

		while (pkt.size > 0 && !player->doQuit) {
			int got_frame = 0;

			const int decoded = avcodec_decode_audio4 (player->st->codec,
					frame, &got_frame, &pkt);
			if (decoded < 0) {
				/* skip this one */
				break;
			}

			if (got_frame != 0) {
				/* XXX: suppresses warning from resample filter */
				if (frame->pts == (int64_t) AV_NOPTS_VALUE) {
					frame->pts = 0;
				}
				ret = av_buffersrc_write_frame (player->fabuf, frame);
				assert (ret >= 0);

				while (true) {
					if (av_buffersink_get_frame (player->fbufsink, filteredFrame) < 0) {
						/* try again next frame */
						break;
					}

					const int numChannels = av_get_channel_layout_nb_channels (
							filteredFrame->channel_layout);
					const int bps = av_get_bytes_per_sample(filteredFrame->format);
					ao_play (player->aoDev, (char *) filteredFrame->data[0],
							filteredFrame->nb_samples * numChannels * bps);

					av_frame_unref (filteredFrame);
				}
			}

			pkt.data += decoded;
			pkt.size -= decoded;
		};

		av_free_packet (&pkt_orig);

		player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts;
		player->lastTimestamp = pkt.pts;
	}

	av_frame_free (&filteredFrame);
	av_frame_free (&frame);

	return 0;
}
Beispiel #9
0
  int InputProcessor::decodeAudio(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff) {

    if (audioDecoder == 0) {
      ELOG_DEBUG("No se han inicializado los parámetros del audioDecoder");
      return -1;
    }

    AVPacket avpkt;
    int outSize;
    int decSize = 0;
    int len = -1;
    uint8_t *decBuff = (uint8_t*) malloc(16000);

    av_init_packet(&avpkt);
    avpkt.data = (unsigned char*) inBuff;
    avpkt.size = inBuffLen;

    while (avpkt.size > 0) {

      outSize = 16000;

      //Puede fallar. Cogido de libavcodec/utils.c del paso de avcodec_decode_audio3 a avcodec_decode_audio4
      //avcodec_decode_audio3(aDecoderContext, (short*)decBuff, &outSize, &avpkt);

      AVFrame frame;
      int got_frame = 0;

      //      aDecoderContext->get_buffer = avcodec_default_get_buffer;
      //      aDecoderContext->release_buffer = avcodec_default_release_buffer;

      len = avcodec_decode_audio4(aDecoderContext, &frame, &got_frame,
          &avpkt);
      if (len >= 0 && got_frame) {
        int plane_size;
        //int planar = av_sample_fmt_is_planar(aDecoderContext->sample_fmt);
        int data_size = av_samples_get_buffer_size(&plane_size,
            aDecoderContext->channels, frame.nb_samples,
            aDecoderContext->sample_fmt, 1);
        if (outSize < data_size) {
          ELOG_DEBUG("output buffer size is too small for the current frame");
          free(decBuff);
          return AVERROR(EINVAL);
        }

        memcpy(decBuff, frame.extended_data[0], plane_size);

        /* Si hay más de un canal
           if (planar && aDecoderContext->channels > 1) {
           uint8_t *out = ((uint8_t *)decBuff) + plane_size;
           for (int ch = 1; ch < aDecoderContext->channels; ch++) {
           memcpy(out, frame.extended_data[ch], plane_size);
           out += plane_size;
           }
           }
           */
        outSize = data_size;
      } else {
        outSize = 0;
      }

      if (len < 0) {
        ELOG_DEBUG("Error al decodificar audio");
        free(decBuff);
        return -1;
      }

      avpkt.size -= len;
      avpkt.data += len;

      if (outSize <= 0) {
        continue;
      }

      memcpy(outBuff, decBuff, outSize);
      outBuff += outSize;
      decSize += outSize;
    }

    free(decBuff);

    if (outSize <= 0) {
      ELOG_DEBUG("Error de decodificación de audio debido a tamaño incorrecto");
      return -1;
    }

    return decSize;

  }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{

  static AVPacket pkt;
  static uint8_t *audio_pkt_data = NULL;
  static int audio_pkt_size = 0;
  static AVFrame frame;

  int len1, data_size = 0;
    //i changed
   //AVFrame *pAudioFrame = av_frame_alloc();
//    AVPacket *t_packet;
//    int pkt_pos = 0,pkt_len =0;
//    int src_len = 0,dst_len = 0;
//    int frame_finished = 0;
    //avcodec_get_frame_defaults(pAudioFrame);
    //memset(pAudioFrame,0,sizeof(AVFrame));
    uint8_t *out[] = {audio_buf};
    int64_t wanted_channel_layout = 0;
    wanted_channel_layout = aCodecCtx->channel_layout;
    
    SwrContext* t_audio_conv = swr_alloc_set_opts(NULL,
                                                  wanted_channel_layout,AV_SAMPLE_FMT_S16,aCodecCtx->sample_rate,
                                                  wanted_channel_layout,aCodecCtx->sample_fmt, aCodecCtx->sample_rate,
                                                  0,NULL);
    swr_init(t_audio_conv);
    
    for (; ;)
    {
        while(audio_pkt_size > 0)
        {
            int got_frame = 0;
            len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
            if(len1 < 0)
            {
                /* if error, skip frame */
                audio_pkt_size = 0;
                break;
            }
            audio_pkt_data += len1;
            audio_pkt_size -= len1;
            if (got_frame)
            {
                /*
                data_size =
                av_samples_get_buffer_size
                (
                 NULL,
                 aCodecCtx->channels,
                 frame.nb_samples,
                 aCodecCtx->sample_fmt,
                 1
                 );
                memcpy(audio_buf, frame.data[0], data_size);
                */
                int size1 = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                int len = swr_convert(t_audio_conv,
                                      out, buf_size/aCodecCtx->channels/size1,
                                      (const uint8_t **)frame.extended_data, frame.nb_samples);
                
                len = len * aCodecCtx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                
                //av_free_packet(&pkt);
                av_packet_unref(&pkt);//用这个来代替av_free_packet
                //av_frame_free(&pAudioFrame);
                swr_free(&t_audio_conv);
                return len;
                
            }
            if(data_size <= 0)
            {
                /* No data yet, get more frames */
                continue;
            }
            /* We have data, return it and come back for more later */
            return data_size;
        }
        if(pkt.data)
        {
            //av_free_packet(&pkt);
            av_packet_unref(&pkt);
        }
        if(quit)
        {
            return -1;
        }
        
        if(packet_queue_get(&audioq, &pkt, 1) < 0)
        {
            return -1;
        }
        audio_pkt_data = pkt.data;
        audio_pkt_size = pkt.size;
        
    }
    
    
    
//  for(;;)
//  {
//    while(audio_pkt_size > 0)
//    {
//      int got_frame = 0;
//      len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
//      if(len1 < 0)
//      {
//        /* if error, skip frame */
//        audio_pkt_size = 0;
//        break;
//      }
//      audio_pkt_data += len1;
//      audio_pkt_size -= len1;
//      if (got_frame)
//      {
//          data_size = 
//            av_samples_get_buffer_size
//            (
//                NULL, 
//                aCodecCtx->channels,
//                frame.nb_samples,
//                aCodecCtx->sample_fmt,
//                1
//            );
//          memcpy(audio_buf, frame.data[0], data_size);
//      }
//      if(data_size <= 0)
//      {
//        /* No data yet, get more frames */
//        continue;
//      }
//      /* We have data, return it and come back for more later */
//      return data_size;
//    }
//    if(pkt.data)
//    {
//      av_free_packet(&pkt);
//    }
//    if(quit)
//    {
//      return -1;
//    }
//
//    if(packet_queue_get(&audioq, &pkt, 1) < 0)
//    {
//      return -1;
//    }
//    audio_pkt_data = pkt.data;
//    audio_pkt_size = pkt.size;
//  }
}
Beispiel #11
0
static gint
gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
    AVCodec * in_plugin, guint8 * data, guint size,
    GstBuffer ** outbuf, GstFlowReturn * ret)
{
  gint len = -1;
  gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
  AVPacket packet;
  AVFrame frame;

  GST_DEBUG_OBJECT (ffmpegdec, "size: %d", size);

  gst_avpacket_init (&packet, data, size);
  memset (&frame, 0, sizeof (frame));
  avcodec_get_frame_defaults (&frame);
  len = avcodec_decode_audio4 (ffmpegdec->context, &frame, &have_data, &packet);

  GST_DEBUG_OBJECT (ffmpegdec,
      "Decode audio: len=%d, have_data=%d", len, have_data);

  if (len >= 0 && have_data > 0) {
    BufferInfo *buffer_info = frame.opaque;
    gint nsamples, channels, byte_per_sample;
    gsize output_size;

    if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
      *outbuf = NULL;
      *ret = GST_FLOW_NOT_NEGOTIATED;
      len = -1;
      goto beach;
    }

    channels = ffmpegdec->info.channels;
    nsamples = frame.nb_samples;
    byte_per_sample = ffmpegdec->info.finfo->width / 8;

    /* frame.linesize[0] might contain padding, allocate only what's needed */
    output_size = nsamples * byte_per_sample * channels;

    GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
    if (buffer_info) {
      *outbuf = buffer_info->buffer;
      gst_buffer_unmap (buffer_info->buffer, &buffer_info->map);
      g_slice_free (BufferInfo, buffer_info);
      frame.opaque = NULL;
    } else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)
        && channels > 1) {
      gint i, j;
      GstMapInfo minfo;

      /* note: linesize[0] might contain padding, allocate only what's needed */
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);

      gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE);

      switch (ffmpegdec->info.finfo->width) {
        case 8:{
          guint8 *odata = minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint8 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 16:{
          guint16 *odata = (guint16 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint16 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 32:{
          guint32 *odata = (guint32 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint32 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        case 64:{
          guint64 *odata = (guint64 *) minfo.data;

          for (i = 0; i < nsamples; i++) {
            for (j = 0; j < channels; j++) {
              odata[j] = ((const guint64 *) frame.extended_data[j])[i];
            }
            odata += channels;
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      gst_buffer_unmap (*outbuf, &minfo);
    } else {
      *outbuf =
          gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
          (ffmpegdec), output_size);
      gst_buffer_fill (*outbuf, 0, frame.data[0], output_size);
    }

    GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data);

    /* Reorder channels to the GStreamer channel order */
    if (ffmpegdec->needs_reorder) {
      *outbuf = gst_buffer_make_writable (*outbuf);
      gst_audio_buffer_reorder_channels (*outbuf, ffmpegdec->info.finfo->format,
          ffmpegdec->info.channels, ffmpegdec->ffmpeg_layout,
          ffmpegdec->info.position);
    }
  } else {
    *outbuf = NULL;
  }

beach:
  GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
      *ret, *outbuf, len);
  return len;
}
static gboolean ffaudio_play (const gchar * filename, VFSFile * file)
{
    AUDDBG ("Playing %s.\n", filename);
    if (! file)
        return FALSE;

    AVPacket pkt = {.data = NULL};
    gint errcount;
    gboolean codec_opened = FALSE;
    gint out_fmt;
    gboolean planar;
    gboolean seekable;
    gboolean error = FALSE;

    void *buf = NULL;
    gint bufsize = 0;

    AVFormatContext * ic = open_input_file (filename, file);
    if (! ic)
        return FALSE;

    CodecInfo cinfo;

    if (! find_codec (ic, & cinfo))
    {
        fprintf (stderr, "ffaudio: No codec found for %s.\n", filename);
        goto error_exit;
    }

    AUDDBG("got codec %s for stream index %d, opening\n", cinfo.codec->name, cinfo.stream_idx);

    if (avcodec_open2 (cinfo.context, cinfo.codec, NULL) < 0)
        goto error_exit;

    codec_opened = TRUE;

    switch (cinfo.context->sample_fmt)
    {
        case AV_SAMPLE_FMT_U8: out_fmt = FMT_U8; planar = FALSE; break;
        case AV_SAMPLE_FMT_S16: out_fmt = FMT_S16_NE; planar = FALSE; break;
        case AV_SAMPLE_FMT_S32: out_fmt = FMT_S32_NE; planar = FALSE; break;
        case AV_SAMPLE_FMT_FLT: out_fmt = FMT_FLOAT; planar = FALSE; break;

        case AV_SAMPLE_FMT_U8P: out_fmt = FMT_U8; planar = TRUE; break;
        case AV_SAMPLE_FMT_S16P: out_fmt = FMT_S16_NE; planar = TRUE; break;
        case AV_SAMPLE_FMT_S32P: out_fmt = FMT_S32_NE; planar = TRUE; break;
        case AV_SAMPLE_FMT_FLTP: out_fmt = FMT_FLOAT; planar = TRUE; break;

    default:
        fprintf (stderr, "ffaudio: Unsupported audio format %d\n", (int) cinfo.context->sample_fmt);
        goto error_exit;
    }

    /* Open audio output */
    AUDDBG("opening audio output\n");

    if (aud_input_open_audio(out_fmt, cinfo.context->sample_rate, cinfo.context->channels) <= 0)
    {
        error = TRUE;
        goto error_exit;
    }

    AUDDBG("setting parameters\n");

    aud_input_set_bitrate(ic->bit_rate);

    errcount = 0;
    seekable = ffaudio_codec_is_seekable(cinfo.codec);

    while (! aud_input_check_stop ())
    {
        int seek_value = aud_input_check_seek ();

        if (seek_value >= 0 && seekable)
        {
            if (av_seek_frame (ic, -1, (gint64) seek_value * AV_TIME_BASE /
             1000, AVSEEK_FLAG_ANY) < 0)
            {
                _ERROR("error while seeking\n");
            } else
                errcount = 0;

            seek_value = -1;
        }

        AVPacket tmp;
        gint ret;

        /* Read next frame (or more) of data */
        if ((ret = av_read_frame(ic, &pkt)) < 0)
        {
            if (ret == AVERROR_EOF)
            {
                AUDDBG("eof reached\n");
                break;
            }
            else
            {
                if (++errcount > 4)
                {
                    _ERROR("av_read_frame error %d, giving up.\n", ret);
                    break;
                } else
                    continue;
            }
        } else
            errcount = 0;

        /* Ignore any other substreams */
        if (pkt.stream_index != cinfo.stream_idx)
        {
            av_free_packet(&pkt);
            continue;
        }

        /* Decode and play packet/frame */
        memcpy(&tmp, &pkt, sizeof(tmp));
        while (tmp.size > 0 && ! aud_input_check_stop ())
        {
            /* Check for seek request and bail out if we have one */
            if (seek_value < 0)
                seek_value = aud_input_check_seek ();

            if (seek_value >= 0)
                break;

#if CHECK_LIBAVCODEC_VERSION (55, 28, 1)
            AVFrame * frame = av_frame_alloc ();
#else
            AVFrame * frame = avcodec_alloc_frame ();
#endif
            int decoded = 0;
            int len = avcodec_decode_audio4 (cinfo.context, frame, & decoded, & tmp);

            if (len < 0)
            {
                fprintf (stderr, "ffaudio: decode_audio() failed, code %d\n", len);
                break;
            }

            tmp.size -= len;
            tmp.data += len;

            if (! decoded)
                continue;

            gint size = FMT_SIZEOF (out_fmt) * cinfo.context->channels * frame->nb_samples;

            if (planar)
            {
                if (bufsize < size)
                {
                    buf = g_realloc (buf, size);
                    bufsize = size;
                }

                audio_interlace ((const void * *) frame->data, out_fmt,
                 cinfo.context->channels, buf, frame->nb_samples);
                aud_input_write_audio (buf, size);
            }
            else
                aud_input_write_audio (frame->data[0], size);

#if CHECK_LIBAVCODEC_VERSION (55, 28, 1)
            av_frame_free (& frame);
#else
            avcodec_free_frame (& frame);
#endif
        }

        if (pkt.data)
            av_free_packet(&pkt);
    }

error_exit:
    if (pkt.data)
        av_free_packet(&pkt);
    if (codec_opened)
        avcodec_close(cinfo.context);
    if (ic != NULL)
        close_input_file(ic);

    g_free (buf);

    return ! error;
}
Beispiel #13
0
static int decode_packet(AVCodecContext *dec_ctx, FILE *dst_file, AVFrame *frame, int *got_frame, int *frame_count, AVPacket *pkt)
{
    int ret = -1;
    *got_frame = 0;
    AVSubtitle sub;
    unsigned i, j, k, l;

    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        ret = 0;
        /* decode video frame */
        ret = avcodec_decode_video2(dec_ctx, frame, got_frame, pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
            return ret;
        }

        if (*got_frame) {

            if (frame->width != width || frame->height != height ||
                frame->format != pix_fmt) {
                fprintf(stderr, "Error: input video width/height/format changed:\n"
                        "old: width = %d, height = %d, format = %s\n"
                        "new: width = %d, height = %d, format = %s\n",
                        width, height, av_get_pix_fmt_name(pix_fmt),
                        dec_ctx->width, dec_ctx->height,
                        av_get_pix_fmt_name(dec_ctx->pix_fmt));
                return -1;
            }

            printf("video_frame n:%d coded_n:%d pts:%s\n",
                   *frame_count, frame->coded_picture_number,
                   av_ts2timestr(frame->pts, &dec_ctx->time_base));

            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          pix_fmt, width, height);
            *frame_count += 1;

            /* write to rawvideo file */
            fwrite(video_dst_data[0], 1, video_dst_bufsize, dst_file);
        }
    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
        ret = 0;
        /* decode audio frame */
        ret = avcodec_decode_audio4(dec_ctx, frame, got_frame, pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
            return ret;
        }
        /* Some audio decoders decode only part of the packet, and have to be
         * called again with the remainder of the packet data.
         * Sample: fate-suite/lossless-audio/luckynight-partial.shn
         * Also, some decoders might over-read the packet. */
        ret = FFMIN(ret, pkt->size);

        if (*got_frame) {
            size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
            printf("audio_frame n:%d nb_samples:%d pts:%s\n",
                   *frame_count, frame->nb_samples,
                   av_ts2timestr(frame->pts, &dec_ctx->time_base));
            *frame_count += 1;

            /* Write the raw audio data samples of the first plane. This works
             * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
             * most audio decoders output planar audio, which uses a separate
             * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
             * In other words, this code will write only the first audio channel
             * in these cases.
             * You should use libswresample or libavfilter to convert the frame
             * to packed data. */
            fwrite(frame->extended_data[0], 1, unpadded_linesize, dst_file);
        }
    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
        ret = 0;
        /* decode video frame */
        ret = avcodec_decode_subtitle2(dec_ctx, &sub, got_frame, pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding subtitle (%s)\n", av_err2str(ret));
            return ret;
        }

        if (*got_frame) {

            printf("subtitle n:%d format:%u pts:%s start_time:%u end_time:%u num_recs:%u\n",
                   *frame_count, sub.format,
                   av_ts2timestr(sub.pts, &dec_ctx->time_base),
                   sub.start_display_time, sub.end_display_time, sub.num_rects);

            *frame_count += 1;

            /* write to text file */
            for (i = 0; i < sub.num_rects; i += 1) {
                fprintf(dst_file, "x:%d y:%d w:%d h:%d nb_colors:%d flags:%x linesizes:%d,%d,%d,%d,%d,%d,%d,%d\n"
                        "text:%s\nass:%s\n",
                        sub.rects[i]->x, sub.rects[i]->y, sub.rects[i]->w, sub.rects[i]->h,
                        sub.rects[i]->nb_colors, sub.rects[i]->flags,
                        sub.rects[i]->pict.linesize[0], sub.rects[i]->pict.linesize[1],
                        sub.rects[i]->pict.linesize[2], sub.rects[i]->pict.linesize[3],
                        sub.rects[i]->pict.linesize[4], sub.rects[i]->pict.linesize[5],
                        sub.rects[i]->pict.linesize[6], sub.rects[i]->pict.linesize[7],
                        sub.rects[i]->text, sub.rects[i]->ass);
                for (j = 0; j < AV_NUM_DATA_POINTERS; j += 1) {
                    if (sub.rects[i]->pict.linesize[j]) {
                        fprintf(dst_file, "data:%d\n", j);
                        for (k = 0; k < sub.rects[i]->h; k += 1) {
                            for (l = 0; l < sub.rects[i]->w; l += 1) {
                                fprintf(dst_file, "%x", sub.rects[i]->pict.data[j][l + k * sub.rects[i]->pict.linesize[j]]);
                            }
                            fprintf(dst_file, "\n");
                        }
                    }
                }
            }
            avsubtitle_free(&sub);
        }
    }

    /* de-reference the frame, which is not used anymore */
    if (*got_frame)
        av_frame_unref(frame);

    return ret;
}
Beispiel #14
0
uint32_t MythRAOPConnection::decodeAudioPacket(uint8_t type,
        const QByteArray *buf,
        QList<AudioData> *dest)
{
    const char *data_in = buf->constData();
    int len             = buf->size();
    if (type == AUDIO_RESEND)
    {
        data_in += 4;
        len     -= 4;
    }
    data_in += 12;
    len     -= 12;
    if (len < 16)
        return -1;

    int aeslen = len & ~0xf;
    unsigned char iv[16];
    unsigned char decrypted_data[MAX_PACKET_SIZE];
    memcpy(iv, m_AESIV.constData(), sizeof(iv));
    AES_cbc_encrypt((const unsigned char*)data_in,
                    decrypted_data, aeslen,
                    &m_aesKey, iv, AES_DECRYPT);
    memcpy(decrypted_data + aeslen, data_in + aeslen, len - aeslen);

    AVPacket tmp_pkt;
    AVCodecContext *ctx = m_codeccontext;

    av_init_packet(&tmp_pkt);
    tmp_pkt.data = decrypted_data;
    tmp_pkt.size = len;

    uint32_t frames_added = 0;
    while (tmp_pkt.size > 0)
    {
        AVFrame *frame = avcodec_alloc_frame();
        int got_frame = 0;
        int ret = avcodec_decode_audio4(ctx, frame, &got_frame, &tmp_pkt);

        if (ret < 0)
        {
            LOG(VB_GENERAL, LOG_ERR, LOC + QString("Error decoding audio"));
            return -1;
        }

        if (ret > 0 && got_frame)
        {
            int decoded_size =
                av_samples_get_buffer_size(NULL, ctx->channels,
                                           frame->nb_samples,
                                           ctx->sample_fmt, 1);
            frame->linesize[0] = decoded_size;
            int frames = frame->nb_samples;

            frames_added += frames;
            dest->append(frame);
        }
        tmp_pkt.data += ret;
        tmp_pkt.size -= ret;
    }
    return frames_added;
}
Beispiel #15
0
void PhVideoDecoder::decodeFrame()
{
	if(!ready()) {
		PHDBG(24) << "not ready";
		return;
	}

	if (_requestedFrames.empty()) {
		// all pending requests have been cancelled
		return;
	}

	// now proceed with the first requested frame
	PhVideoBuffer *buffer = _requestedFrames.takeFirst();
	PhFrame frame = buffer->requestFrame();

	// resize the buffer if needed
	int bufferSize = avpicture_get_size(AV_PIX_FMT_BGRA, width(), height());
	if(bufferSize <= 0) {
		PHERR << "avpicture_get_size() returned" << bufferSize;
		return;
	}
	buffer->reuse(bufferSize);

	// clip to stream boundaries
	if(frame < 0)
		frame = 0;
	if (frame >= this->frameLength())
		frame = this->frameLength();

	// Stay with the same frame if the time has changed less than the time between two frames
	// Note that av_seek_frame will seek to the _closest_ frame, sometimes a little bit in the "future",
	// so it is necessary to use a little margin for the second comparison, otherwise a seek may
	// be performed on each call to decodeFrame
	if (frame == _currentFrame) {
		frameToRgb(_videoFrame, buffer);
		return;
	}

	// we need to perform a frame seek if the requested frame is:
	// 1) in the past
	// 2) after the next keyframe
	//      how to know when the next keyframe is ??
	//      -> for now we take a arbitrary threshold of 20 frames
	if((frame < _currentFrame) || (frame >= _currentFrame + 20)) {
		// seek to the closest keyframe in the past
		int flags = AVSEEK_FLAG_BACKWARD;
		int64_t timestamp = PhFrame_to_AVTimestamp(frame);
		PHDBG(24) << "seek:" << buffer << " " << _currentFrame << " " << frame - _currentFrame << " " << timestamp;
		av_seek_frame(_formatContext, _videoStream->index, timestamp, flags);

		avcodec_flush_buffers(_videoStream->codec);
	}

	AVPacket packet;

	bool lookingForVideoFrame = true;
	while(lookingForVideoFrame) {
		int error = av_read_frame(_formatContext, &packet);
		switch(error) {
		case 0:
			if(packet.stream_index == _videoStream->index) {
				int frameFinished = 0;
				avcodec_decode_video2(_videoStream->codec, _videoFrame, &frameFinished, &packet);
				if(frameFinished) {
					// update the current position of the engine
					// (Note that it is best not to do use '_currentTime = time' here, because the seeking operation may
					// not be 100% accurate: the actual time may be different from the requested time. So a time drift
					// could appear.)
					_currentFrame = AVTimestamp_to_PhFrame(av_frame_get_best_effort_timestamp(_videoFrame));

					PHDBG(24) << frame << _currentFrame;

					if (frame < _currentFrame) {
						// something went wrong with the seeking
						// this is not going to work! we cannot go backward!
						// the loop will go until the end of the file, which is bad...
						// So stop here and just return what we have.
						PHERR << "current video time is larger than requested time... returning current frame!";
						frameToRgb(_videoFrame, buffer);
						lookingForVideoFrame = false;
					}

					// convert and emit the frame if this is the one that was requested
					if (frame == _currentFrame) {
						PHDBG(24) << "decoded!";
						frameToRgb(_videoFrame, buffer);
						lookingForVideoFrame = false;
					}
				} // if frame decode is not finished, let's read another packet.
			}
			else if(_audioStream && (packet.stream_index == _audioStream->index)) {
				int ok = 0;
				avcodec_decode_audio4(_audioStream->codec, _audioFrame, &ok, &packet);
				if(ok) {
					PHDBG(24) << "audio:" << _audioFrame->nb_samples;
				}
			}
			break;
		case AVERROR_INVALIDDATA:
		case AVERROR_EOF:
		default:
			{
				char errorStr[256];
				av_strerror(error, errorStr, 256);
				PHDBG(24) << frame << "error:" << errorStr;
				lookingForVideoFrame = false;
				break;
			}
		}

		//Avoid memory leak
		av_free_packet(&packet);
	}
}
Beispiel #16
0
/**
 * A callback function that decodes an audio packet. This should be supplied
 * to the <code>decodePacketTemplate</code> template.
 *
 * @param codecContext - the codec context that will be used to decode the callback.
 * @param packet - the packet that is to be decoded.
 * @return the decoded packet as a vector of AVFrames.
 */
static vector<AVFrame*> decodeAudioPacketCallback(AVCodecContext *codecContext,
        AVPacket *packet) {

    if (AVMEDIA_TYPE_AUDIO != findCodecType(codecContext)) {

        throw IllegalArgumentException(
                "The supplied codec context for decoding audio must have media type AVMEDIA_TYPE_AUDIO.");
    }

    // This vector will contain all the audio frames decoded from the supplied packet.
    vector<AVFrame*> frames;

    // The frame pointer that will hold each new frame before it is placed in the vector.
    AVFrame *decodedFrame = NULL;

    // This will be set to 1 if a frame has successfully been decoded with the
    // avcodec_decode_audio4() function.
    int frameDecoded = 0;

    int bytesDecoded = 0; // The number of bytes that were decoded in each iteration.

    while (0 < packet->size) {
        // Create a new frame to contain the decoded data if one is required.
        if (NULL == decodedFrame) decodedFrame = avcodec_alloc_frame();

        // Decode the packet and store it in the new frame.
        // Also record how many bytes were decoded because it might not have been all
        // of them.
        bytesDecoded = avcodec_decode_audio4(codecContext, decodedFrame,
                &frameDecoded,
                packet);

        // If there is an invalid data error throw a more specific exception.
        if (AVERROR_INVALIDDATA == bytesDecoded) {

            throw InvalidPacketDataException(errorMessage(bytesDecoded));
        }

        if (0 > bytesDecoded) {

            throw PacketDecodeException(errorMessage(bytesDecoded));
        }

        // If a frame was successfully decoded add it the vector to be returned and
        // set the pointer to null to indicate we need a new frame allocated.
        if (0 != frameDecoded) {

            frames.push_back(decodedFrame);

            decodedFrame = NULL;

        } else {
            // If we haven't successfully decoded a frame reset the decode frame
            // values to make sure it's ready for another decode attempt.
            avcodec_get_frame_defaults(decodedFrame);
        }

        // Push the data pointer down the byte array passed the last byte that we
        // decoded.
        packet->data += bytesDecoded;
        // Reduce the relative size of the data to the amount that is yet to be
        // decoded.
        packet->size -= bytesDecoded;
    }

    return frames;
}
Beispiel #17
0
void ffplayer::AudioDecoderThreadRun()
{
	while (1){
		SDL_Delay(10);
		if (m_bStop){
			break;
		}
		PInfo pInfo = m_AudioPackBuf.getInfoFromList();
		if (!pInfo)//事件等待
		{
			continue;
		}
		
		AVPacket packet;
		av_init_packet(&packet);
		packet.data = pInfo->Data;
		packet.size = pInfo->DataLen;

		int got;
		int len=avcodec_decode_audio4(m_pAudioCodecCtx, m_pAudioFrame, &got, &packet);

		if (len > 0&&got)
		{
			Uint8 resampleBuf[PCMBUFLEN];

			//	int decodeLen = AudioResampling(m_pAudioCodecCtx, m_pAudioFrame, (Uint8*) resampleBuf);

			if (!m_au_convert_ctx){
				m_au_convert_ctx = swr_alloc();
				int channel_layout = av_get_default_channel_layout(m_pAudioCodecCtx->channels);

				m_au_convert_ctx = swr_alloc_set_opts(m_au_convert_ctx, channel_layout, AV_SAMPLE_FMT_S16, m_pAudioCodecCtx->sample_rate,
					channel_layout, m_pAudioCodecCtx->sample_fmt, m_pAudioCodecCtx->sample_rate, 0, NULL);
				swr_init(m_au_convert_ctx);

				//	av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, m_pAudioCodecCtx->channels,
				//	2048, m_pAudioCodecCtx->sample_fmt, 0);
			}
			Uint8 *dst_data[] = { resampleBuf };
			int ret = swr_convert(m_au_convert_ctx, (Uint8**)dst_data, PCMBUFLEN, (const uint8_t **)m_pAudioFrame->data, m_pAudioFrame->nb_samples);
			int decodeLen = ret* m_out_channels * av_get_bytes_per_sample((AVSampleFormat)m_out_sample_fmt);
			if (m_fVolume == 0.0)
			{
				memset(resampleBuf, 0, decodeLen);
			}
			else if (m_fVolume>0.0&&m_fVolume<1.0)
			{
				short *pcm = (short*)resampleBuf;
				for (int i = 0; i < decodeLen / 2; i++)
				{
					*pcm = (*pcm)*m_fVolume;
					pcm++;
				}
			}

			PInfo pPcmInfo = m_FreePcmPackBuf.getFreeInfoFromList(decodeLen);

			memcpy(pPcmInfo->Data, resampleBuf, decodeLen);
			pPcmInfo->DataLen = decodeLen;
			pPcmInfo->frameInfo.iTimestampObsolute = pInfo->frameInfo.iTimestampObsolute;
			pPcmInfo->frameInfo.serial = pInfo->frameInfo.serial;
			/*
			AVRational tb;
				tb.num = 1;
			tb.den = m_pAudioFrame->sample_rate;
			//(AVRational){ 1, m_pAudioFrame->sample_rate };
			if (m_pAudioFrame->pts != AV_NOPTS_VALUE)
				m_pAudioFrame->pts = av_rescale_q(m_pAudioFrame->pts, m_pAudioCodecCtx->time_base, tb);
			else if (m_pAudioFrame->pkt_pts != AV_NOPTS_VALUE)
				m_pAudioFrame->pts = av_rescale_q(m_pAudioFrame->pkt_pts, av_codec_get_pkt_timebase(m_pAudioCodecCtx), tb);
			//pPcmInfo->frameInfo.iTimestampObsolute = m_pAudioFrame->pkt_pts;*/
			
			while (m_PcmPackBuf.getCurCount() >= MAX_PCM_SIZE)
			{
				SDL_Delay(10);
				if (m_bStop){
					CBuffer::freeBuf(&pPcmInfo);
					return;
				}
			}
			if (m_bSeekState || pPcmInfo->frameInfo.serial!=m_serial)
			{
				m_FreePcmPackBuf.insertList(pPcmInfo); 
			}
			else
			{
				m_PcmPackBuf.insertList(pPcmInfo);
			}

		}
		m_FreeAudioPackBuf.insertList(pInfo);	
	}
}
Beispiel #18
0
int Libav::decodeNextAudio(void) throw (AVException*)
{
	
	int bytes;
	int numSamples;
	
	// int bytesPerSample = (this->getSampleSize() * this->getSampleChannels());
	
	
	//NSLog(@"> [libav decodeNextAudio] sample counter: %d",sampleCounter);
	// there must be another way to initialise the audio decoder than this sampleCounter test.
	
	if (!this->isOpen)
		this->open();
	
	if (sampleCounter == 0)
	{
		//pFrame=avcodec_alloc_frame();
		pFrame=av_frame_alloc();
		if (!pFrame)
			throw new AVException ("Unable to allocate AUDIO buffer",MEMORY_ALLOCATION_ERROR);
		//avcodec_get_frame_defaults(pFrame);		
		av_frame_unref(pFrame);
	}
	
	// we don't know SPS until we decode the first frame. 
	// we cannot calculate SamplesOut without SPS.
	// what can we do here.  Assume that once the sampleCounter is above 0 the SPS will be set.
	
	//	NSLog (@"sampleCounter: %d  samplesOut: %d", sampleCounter, [self getSamplesOut]);
	
	if (sampleCounter > 0) 
		if (this->compareSamplesRange(sampleCounter) > 0)
			return -1;
	
	// loop until we are passed the frame in marker
	do {
		//	NSLog(@"[libav decodeNextAudio] before IN loop");
		
#if LIBAVFORMAT_VERSION_MAJOR  < 54
		;
#else
		AVFrame *iFrame;
#endif
		int gotFrame = 1;
		
		// loop until "gotFrame"
		do {
			//  NSLog(@"[libav decodeNextAudio] gotFrame");
			
			// Find our specified stream
			do {
				// NSLog(@"[libav decodeNextAudio] != avStream: %d",avStream);
				
				bytes = av_read_frame(pFormatCtx, &packet);
				if (bytes < 0) 
					return bytes;
			} while (packet.stream_index != avStream) ;
			
			// int len;
			
#if LIBAVFORMAT_VERSION_MAJOR  < 50
			avcodec_decode_audio2(pCodecCtx, aBuffer, &numBytes, packet.data, packet.size);
#elseif LIBAVFORMAT_VERSION_MAJOR  < 54
			avcodec_decode_audio3(pCodecCtx, aBuffer, &numBytes, &packet);
#else				
			gotFrame = 0;
			//iFrame=avcodec_alloc_frame();
			iFrame=av_frame_alloc();
			//avcodec_get_frame_defaults(iFrame);
			av_frame_unref(iFrame);
			
			avcodec_decode_audio4(pCodecCtx, iFrame, &gotFrame, &packet);
			//			NSLog (@"avcodec_decode_audio4 len: %d gotFrame: %d",len,gotFrame);
			
			if (gotFrame) {
				/* if a frame has been decoded, output it */
				bytes = av_samples_get_buffer_size(NULL, pCodecCtx->channels,
												   iFrame->nb_samples,
												   pCodecCtx->sample_fmt, 1);
				
				//		NSLog (@"decode Audio sps: %d",iFrame->sample_rate);
				//		NSLog (@"audio samples decoded: %d",iFrame->nb_samples);
				if (sampleCounter == 0)
					this->setSamplesPerSecond(iFrame->sample_rate);
			}
			
#endif
			//NSLog(@"decoded: %d finished: %d",len,gotFrame);
			
		} while (!gotFrame);
		
		
		int bytesPerSample = (this->getSampleSize() * this->getSampleChannels());			
		numSamples = bytes / bytesPerSample;
		int64_t endCounter = sampleCounter + numSamples;
		pFrame->nb_samples =numSamples;
		//	NSLog(@"bytesPerSample: %d, numSamples: %d",bytesPerSample,numSamples);
		if (this->compareSamplesRange(endCounter) >=0) {
			if ((this->compareSamplesRange(sampleCounter)<0) && (this->compareSamplesRange(endCounter) >0))
			{
				// send partial frame
				//NSLog(@"partial case 1");
				
                if ((this->getSamplesOut() - this->getSamplesIn()) > INT_MAX) {
                    throw new AVException(std::string("Sample Range Too Large"),SAMPLE_RANGE_ERROR);
                }
                
				pFrame->nb_samples =(this->getSamplesOut() - this->getSamplesIn()) ;
				avcodec_fill_audio_frame(pFrame, this->getSampleChannels(), this->getSampleFormat(),
										 iFrame->data[0] + (this->getSamplesIn() - sampleCounter) * bytesPerSample,
										 pFrame->nb_samples* bytesPerSample , 1);
			}
			else if ((this->compareSamplesRange(sampleCounter)<0) && (this->compareSamplesRange(endCounter) ==0))
			{
				// send partial frame
				//NSLog(@"partial case 2");
				
                if ((sampleCounter + numSamples - this->getSamplesIn() + 1) > INT_MAX) {
                    throw new AVException(std::string("Sample Range Too Large"),SAMPLE_RANGE_ERROR);
                }

                
				pFrame->nb_samples =( sampleCounter + numSamples - this->getSamplesIn() + 1) ;
				avcodec_fill_audio_frame(pFrame, this->getSampleChannels(), this->getSampleFormat(),
										 iFrame->data[0] + (this->getSamplesIn() - sampleCounter) * bytesPerSample,
										 pFrame->nb_samples* bytesPerSample,1);
				
			} 
			else if ((this->compareSamplesRange(sampleCounter)==0) && (this->compareSamplesRange(endCounter) >0))
			{
				// send partial frame
				//NSLog(@"partial case 3");
				
                if ((this->getSamplesOut() - sampleCounter) > INT_MAX) {
                    throw new AVException(std::string("Sample Range Too Large"),SAMPLE_RANGE_ERROR);
                }

                
				pFrame->nb_samples =( this->getSamplesOut() - sampleCounter);
				avcodec_fill_audio_frame(pFrame, this->getSampleChannels(), this->getSampleFormat(),
										 iFrame->data[0],
										 pFrame->nb_samples * bytesPerSample,1);
				
			}
			else 
			{
				// send entire frame.
				//NSLog(@"full case: %d",numSamples);
				
				//	NSLog(@"pic buffer: %x pframe->data[0]: %x",pictureBuffer,pFrame->data[0]);
				
				pFrame->nb_samples =numSamples;
				
				avcodec_fill_audio_frame(pFrame, this->getSampleChannels(), this->getSampleFormat(),
										 iFrame->data[0],bytes,1);
				//			memset(pFrame->data[0],127,bytes);
			}
		}
		
		//avcodec_free_frame(&iFrame);
		av_frame_free(&iFrame);
		av_free(iFrame);
#if LIBAVFORMAT_VERSION_MAJOR  < 57	
		av_free_packet(&packet);
#else
		av_packet_unref(&packet);
#endif
		
		
		sampleCounter += pFrame->nb_samples;
		
		//	NSLog(@"sample counter: %d",sampleCounter);
		
	} while (this->compareSamplesRange(sampleCounter) < 0);			
	//	NSLog(@"< [libav decodeNextAudio] %d %d ",pFrame->nb_samples , bytes) ;
	
	return pFrame->nb_samples;
}
Beispiel #19
0
/**
 * Decode one audio frame from the input file.
 * @param      frame                Audio frame to be decoded
 * @param      input_format_context Format context of the input file
 * @param      input_codec_context  Codec context of the input file
 * @param[out] data_present         Indicates whether data has been decoded
 * @param[out] finished             Indicates whether the end of file has
 *                                  been reached and all data has been
 *                                  decoded. If this flag is false, there
 *                                  is more data to be decoded, i.e., this
 *                                  function has to be called again.
 * @return Error code (0 if successful)
 */
int Transcode::decode_audio_frame(AVFrame *frame,
                              AVFormatContext *input_format_context,
                              AVCodecContext *input_codec_context,
                              int *data_present, int *finished)
{
    /* Packet used for temporary storage. */
    AVPacket input_packet;
    int error;
    init_packet(&input_packet);
    
    /* Read one audio frame from the input file into a temporary packet. */
    if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
        /* If we are at the end of the file, flush the decoder below. */
        if (error == AVERROR_EOF)
            *finished = 1;
        else {
            fprintf(stderr, "Could not read frame (error '%s')\n",
                    av_cplus_err2str(error));
            return error;
        }
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)    
    /* Decode the audio frame stored in the temporary packet.
     * The input audio stream decoder is used to do this.
     * If we are at the end of the file, pass an empty packet to the decoder
     * to flush it. */
    if ((error = avcodec_decode_audio4(input_codec_context, frame,
                                       data_present, &input_packet)) < 0) {
        fprintf(stderr, "Could not decode frame (error '%s')\n",
                av_cplus_err2str(error));
        av_packet_unref(&input_packet);
        return error;
    }
#else    
    int ret;
    // AVERROR(EAGAIN) means that we need to feed more
    // That we can decode Frame or Packet
    do {
        do {
            ret = avcodec_send_packet(input_codec_context, &input_packet);
        } while(ret == AVERROR(EAGAIN));

        if(ret == AVERROR_EOF || ret == AVERROR(EINVAL)) {
            printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n", AVERROR(EAGAIN), AVERROR_EOF, AVERROR(EINVAL));
            printf("fe_read_frame: Frame getting error (%d)!\n", ret);
            return ret;
        } else {
            *data_present = 1;
        }
        ret = avcodec_receive_frame(input_codec_context, frame);
    } while(ret == AVERROR(EAGAIN));
    
    if(ret == AVERROR_EOF){
        *finished = 1;
        *data_present = 0;
    }

    if(ret == AVERROR(EINVAL)) {
        // An error or EOF occured,index break out and return what
        // we have so far.
//        printf("AVERROR(EAGAIN): %d, AVERROR_EOF: %d, AVERROR(EINVAL): %d\n", AVERROR(EAGAIN), AVERROR_EOF, AVERROR(EINVAL));
//        printf("fe_read_frame: EOF or some othere decoding error (%d)!\n", ret);
        fprintf(stderr, "Could not decode frame (error '%s')\n",
        av_cplus_err2str(ret));
        av_packet_unref(&input_packet);
        return ret;
    }
#endif
    
    

    /* If the decoder has not been flushed completely, we are not finished,
     * so that this function has to be called again. */
    if (*finished && *data_present)
        *finished = 0;
    av_packet_unref(&input_packet);
    return 0;
}
Beispiel #20
0
int movie_player::decode_audio_frame(bool fFirst)
{
#ifdef CORSIX_TH_MOVIE_USE_SEND_PACKET_API
    if (!audio_frame)
    {
        audio_frame = av_frame_alloc();
    }
    else
    {
        av_frame_unref(audio_frame);
    }

    int iError = get_frame(audio_stream_index, audio_frame);

    if (iError == AVERROR_EOF)
    {
        return 0;
    }
    else if (iError < 0)
    {
        std::cerr << "Unexpected error " << iError << " while decoding audio packet" << std::endl;
        return 0;
    }

    double dClockPts = get_presentation_time_for_frame(audio_frame, audio_stream_index);
    current_sync_pts = dClockPts;
    current_sync_pts_system_time = SDL_GetTicks();
#else
    int iGotFrame = 0;
    bool fNewPacket = false;
    bool fFlushComplete = false;

    while(!iGotFrame && !aborting)
    {
        if(!audio_packet || audio_packet->size == 0)
        {
            if(audio_packet)
            {
                audio_packet->data = audio_packet_data;
                audio_packet->size = audio_packet_size;
                av_packet_unref(audio_packet);
                av_free(audio_packet);
                audio_packet = nullptr;
            }
            audio_packet = audio_queue->pull(true);
            if(aborting)
            {
                break;
            }

            audio_packet_data = audio_packet->data;
            audio_packet_size = audio_packet->size;

            if(audio_packet == nullptr)
            {
                fNewPacket = false;
                return -1;
            }
            fNewPacket = true;

            if(audio_packet->data == flush_packet->data)
            {
                avcodec_flush_buffers(audio_codec_context);
                fFlushComplete = false;
            }
        }

        if(fFirst)
        {
            int64_t iStreamPts = audio_packet->pts;
            if(iStreamPts != AV_NOPTS_VALUE)
            {
                //There is a time_base in audio_codec_context too, but that one is wrong.
                double dClockPts = iStreamPts * av_q2d(format_context->streams[audio_stream_index]->time_base);
                current_sync_pts = dClockPts;
                current_sync_pts_system_time = SDL_GetTicks();
            }
            fFirst = false;
        }

        while(audio_packet->size > 0 || (!audio_packet->data && fNewPacket))
        {
            if(!audio_frame)
            {
                audio_frame = av_frame_alloc();
            }
            else
            {
                av_frame_unref(audio_frame);
            }

            if(fFlushComplete)
            {
                break;
            }

            fNewPacket = false;

            int iBytesConsumed = avcodec_decode_audio4(audio_codec_context, audio_frame, &iGotFrame, audio_packet);

            if(iBytesConsumed < 0)
            {
                audio_packet->size = 0;
                break;
            }
            audio_packet->data += iBytesConsumed;
            audio_packet->size -= iBytesConsumed;

            if(!iGotFrame)
            {
                if(audio_packet->data && (audio_codec_context->codec->capabilities & CODEC_CAP_DELAY))
                {
                    fFlushComplete = true;
                }
            }
        }
    }
#endif
    //over-estimate output samples
    int iOutSamples = (int)av_rescale_rnd(audio_frame->nb_samples, mixer_frequency, audio_codec_context->sample_rate, AV_ROUND_UP);
    int iSampleSize = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * iOutSamples * mixer_channels;

    if(iSampleSize > audio_buffer_max_size)
    {
        if(audio_buffer_max_size > 0)
        {
            av_free(audio_buffer);
        }
        audio_buffer = (uint8_t*)av_malloc(iSampleSize);
        audio_buffer_max_size = iSampleSize;
    }

#ifdef CORSIX_TH_USE_FFMPEG
    swr_convert(audio_resample_context, &audio_buffer, iOutSamples, (const uint8_t**)&audio_frame->data[0], audio_frame->nb_samples);
#elif defined(CORSIX_TH_USE_LIBAV)
    avresample_convert(audio_resample_context, &audio_buffer, 0, iOutSamples, (uint8_t**)&audio_frame->data[0], 0, audio_frame->nb_samples);
#endif
    return iSampleSize;
}
int udpsocket::ts_demux(void)
{
    AVCodec *pVideoCodec[VIDEO_NUM];
    AVCodec *pAudioCodec[AUDIO_NUM];
    AVCodecContext *pVideoCodecCtx[VIDEO_NUM];
    AVCodecContext *pAudioCodecCtx[AUDIO_NUM];
    AVIOContext * pb;
    AVInputFormat *piFmt;
    AVFormatContext *pFmt;
    uint8_t *buffer;
    int videoindex[VIDEO_NUM];
    int audioindex[AUDIO_NUM];
    AVStream *pVst[VIDEO_NUM];
    AVStream *pAst[AUDIO_NUM];
    AVFrame *pVideoframe[VIDEO_NUM];
    AVFrame *pAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframelast[AUDIO_NUM];
    AVPacket pkt;
    int got_picture;
    int video_num[VIDEO_NUM];
    int audio_num[AUDIO_NUM];
    int frame_size;

    //transcodepool
    transcodepool*  pVideoTransPool[VIDEO_NUM];
    transcodepool*  pAudioTransPool[AUDIO_NUM];

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVideoCodec[i] = NULL;
        pVideoCodecCtx[i] =NULL;
        videoindex[i] = -1;
        pVst[i] = NULL;
        video_num[i] = 0;
        pVideoframe[i] = NULL;
        pVideoframe[i] = av_frame_alloc();
        pVideoTransPool[i] = NULL;
    }
    for( int i=0; i<AUDIO_NUM; i++ ){
        pAudioCodec[i] = NULL;
        pAudioCodecCtx[i] = NULL;
        audioindex[i] = -1;
        pAst[i] = NULL;
        audio_num[i] = 0;
        pOutAudioframe[i] = NULL;
        pOutAudioframe[i] = av_frame_alloc();
        pOutAudioframelast[i] = NULL;
        pOutAudioframelast[i] = av_frame_alloc();
        pAudioframe[i] = NULL;
        pAudioframe[i] = av_frame_alloc();
        pAudioTransPool[i] = NULL;
    }
    pb = NULL;
    piFmt = NULL;
    pFmt = NULL;
    buffer = (uint8_t*)av_mallocz(sizeof(uint8_t)*BUFFER_SIZE);
    got_picture = 0;
    frame_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;

    //encoder
    AVFormatContext *ofmt_ctx = NULL;
    AVPacket enc_pkt;
    AVStream *out_stream;
    AVCodecContext *enc_ctx;
    AVCodec *encoder;

    AVFormatContext *outAudioFormatCtx[AUDIO_NUM];
    AVPacket audio_pkt;
    AVStream *audio_stream[AUDIO_NUM];
    AVCodecContext *AudioEncodeCtx[AUDIO_NUM];
    AVCodec *AudioEncoder[AUDIO_NUM];

    fp_v = fopen("OUT.h264","wb+"); //输出文件
    fp_a = fopen("audio_out.aac","wb+");

    //FFMPEG
    av_register_all();
    pb = avio_alloc_context(buffer, 4096, 0, NULL, read_data, NULL, NULL);
//    printf("thread %d pid %lu tid %lu\n",index,(unsigned long)getpid(),(unsigned long)pthread_self());
    if (!pb) {
        fprintf(stderr, "avio alloc failed!\n");
        return -1;
    }

    int x = av_probe_input_buffer(pb, &piFmt, "", NULL, 0, 0);
    if (x < 0) {
        printf("probe error: %d",x);
       // fprintf(stderr, "probe failed!\n");
    } else {
        fprintf(stdout, "probe success!\n");
        fprintf(stdout, "format: %s[%s]\n", piFmt->name, piFmt->long_name);
    }
    pFmt = avformat_alloc_context();
    pFmt->pb = pb;

    if (avformat_open_input(&pFmt, "", piFmt, NULL) < 0) {
        fprintf(stderr, "avformat open failed.\n");
        return -1;
    } else {
        fprintf(stdout, "open stream success!\n");
    }
    //pFmt->probesize = 4096 * 2000;
    //pFmt->max_analyze_duration = 5 * AV_TIME_BASE;
    //pFmt->probesize = 2048;
   // pFmt->max_analyze_duration = 1000;
    pFmt->probesize = 2048 * 1000 ;
    pFmt->max_analyze_duration = 2048 * 1000;
    if (avformat_find_stream_info(pFmt,0) < 0) {
        fprintf(stderr, "could not fine stream.\n");
        return -1;
    }
    printf("dump format\n");
    av_dump_format(pFmt, 0, "", 0);

    int videox = 0,audiox = 0;
    for (int i = 0; i < pFmt->nb_streams; i++) {
        if(videox == 7 && audiox == 7)
            break;
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videox < 7 ) {
            videoindex[ videox++ ] = i;
        }
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audiox < 7 ) {
            audioindex[ audiox++ ] = i;
        }
    }

    for(int i=0; i<VIDEO_NUM; i++)
        printf("videoindex %d = %d, audioindex %d = %d\n",i , videoindex[i], i ,audioindex[i]);

    if (videoindex[6] < 0 || audioindex[6] < 0) {
        fprintf(stderr, "videoindex=%d, audioindex=%d\n", videoindex[6], audioindex[6]);
        return -1;
    }

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVst[i] = pFmt->streams[videoindex[i]];
        pVideoCodecCtx[i] = pVst[i]->codec;
        pVideoCodec[i] = avcodec_find_decoder(pVideoCodecCtx[i]->codec_id);
        if (!pVideoCodec[i]) {
            fprintf(stderr, "could not find video decoder!\n");
            return -1;
        }
        if (avcodec_open2(pVideoCodecCtx[i], pVideoCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open video codec!\n");
            return -1;
        }
    }

    for( int i=0; i<AUDIO_NUM; i++ ){
        pAst[i] = pFmt->streams[audioindex[i]];
        pAudioCodecCtx[i] = pAst[i]->codec;
        pAudioCodec[i] = avcodec_find_decoder(pAudioCodecCtx[i]->codec_id);
        if (!pAudioCodec[i]) {
            fprintf(stderr, "could not find audio decoder!\n");
            return -1;
        }
        if (avcodec_open2(pAudioCodecCtx[i], pAudioCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open audio codec!\n");
            return -1;
        }
    }

    //video encoder init
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "h264", NULL);
    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(1024*1000);
    AVIOContext *avio_out = NULL;
    avio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
    out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(!out_stream){
        av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
        return -1;
    }
    enc_ctx = out_stream->codec;
    encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    enc_ctx->height = pVideoCodecCtx[0]->height;
    enc_ctx->width = pVideoCodecCtx[0]->width;
    enc_ctx->sample_aspect_ratio = pVideoCodecCtx[0]->sample_aspect_ratio;
    enc_ctx->pix_fmt = encoder->pix_fmts[0];
    out_stream->time_base = pVst[0]->time_base;
//    out_stream->time_base.num = 1;
//    out_stream->time_base.den = 25;
    enc_ctx->me_range = 16;
    enc_ctx->max_qdiff = 4;
    enc_ctx->qmin = 25;
    enc_ctx->qmax = 40;
    enc_ctx->qcompress = 0.6;
    enc_ctx->refs = 3;
    enc_ctx->bit_rate = 1000000;
    int re = avcodec_open2(enc_ctx, encoder, NULL);
    if (re < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream \n");
        return re;
    }

    if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    re = avformat_write_header(ofmt_ctx, NULL);
    if(re < 0){
        av_log(NULL, AV_LOG_ERROR, "Error occured when opening output file\n");
        return re;
    }

    //audio encoder
    for( int i=0; i<AUDIO_NUM; i++){
        outAudioFormatCtx[i] = NULL;
//        audio_pkt = NULL;
        audio_stream[i] = NULL;
        AudioEncodeCtx[i] = NULL;
        AudioEncoder[i] = NULL;
    }
    const char* out_audio_file = "transcodeaudio.aac";          //Output URL

    //Method 1.
    outAudioFormatCtx[0] = avformat_alloc_context();
    outAudioFormatCtx[0]->oformat = av_guess_format(NULL, out_audio_file, NULL);
    AVIOContext *avio_audio_out = NULL;
    avio_audio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_audio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    outAudioFormatCtx[0]->pb = avio_audio_out;
    //Method 2.
    //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    //fmt = pFormatCtx->oformat;

    //Open output URL
    if (avio_open(&outAudioFormatCtx[0]->pb,out_audio_file, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file!\n");
        return -1;
    }

    //Show some information
    av_dump_format(outAudioFormatCtx[0], 0, out_audio_file, 1);

    AudioEncoder[0] = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!AudioEncoder[0]){
        printf("Can not find encoder!\n");
        return -1;
    }
    audio_stream[0] = avformat_new_stream(outAudioFormatCtx[0], AudioEncoder[0]);
    if (audio_stream[0]==NULL){
        return -1;
    }
    AudioEncodeCtx[0] = audio_stream[0]->codec;
    AudioEncodeCtx[0]->codec_id =  outAudioFormatCtx[0]->oformat->audio_codec;
    AudioEncodeCtx[0]->codec_type = AVMEDIA_TYPE_AUDIO;
    AudioEncodeCtx[0]->sample_fmt = AV_SAMPLE_FMT_S16;
    AudioEncodeCtx[0]->sample_rate= 48000;//44100
    AudioEncodeCtx[0]->channel_layout=AV_CH_LAYOUT_STEREO;
    AudioEncodeCtx[0]->channels = av_get_channel_layout_nb_channels(AudioEncodeCtx[0]->channel_layout);
    AudioEncodeCtx[0]->bit_rate = 64000;//64000
    /** Allow the use of the experimental AAC encoder */
    AudioEncodeCtx[0]->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

    /** Set the sample rate for the container. */
    audio_stream[0]->time_base.den = pAudioCodecCtx[0]->sample_rate;
    audio_stream[0]->time_base.num = 1;

    if (avcodec_open2(AudioEncodeCtx[0], AudioEncoder[0],NULL) < 0){
        printf("Failed to open encoder!\n");
        return -1;
    }

    av_samples_get_buffer_size(NULL, AudioEncodeCtx[0]->channels,AudioEncodeCtx[0]->frame_size,AudioEncodeCtx[0]->sample_fmt, 1);

    //uint8_t samples[AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2];
    av_init_packet(&pkt);
    av_init_packet(&audio_pkt);
    av_init_packet(&enc_pkt);
    AVAudioFifo *af = NULL;
    SwrContext *resample_context = NULL;
    long long pts = 0;
    /** Initialize the resampler to be able to convert audio sample formats. */
//    if (init_resampler(input_codec_context, output_codec_context,
//                       &resample_context))
    for(int i=0; i<1; i++){
        printf("work \n");
        printf(" samplerate input = %d , samplerate output = %d\n",pAudioCodecCtx[i]->sample_rate, AudioEncodeCtx[i]->sample_rate);
        resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(AudioEncodeCtx[i]->channels),
                                                          AudioEncodeCtx[i]->sample_fmt,
                                                          AudioEncodeCtx[i]->sample_rate,
                                                          av_get_default_channel_layout(pAudioCodecCtx[i]->channels),
                                                          pAudioCodecCtx[i]->sample_fmt,
                                                          pAudioCodecCtx[i]->sample_rate,
                                                          0, NULL);
        swr_init(resample_context);
    }
    af = av_audio_fifo_alloc(AudioEncodeCtx[0]->sample_fmt, AudioEncodeCtx[0]->channels, 1);
    if(af == NULL)
    {
        printf("error af \n");
        return -1;
    }

    while(1) {
        if (av_read_frame(pFmt, &pkt) >= 0) {
            for( int i=0; i<1; i++ ){
                if (pkt.stream_index == videoindex[i]) {
//                    av_frame_free(&pframe);
                    avcodec_decode_video2(pVideoCodecCtx[i], pVideoframe[i], &got_picture, &pkt);
                    if (got_picture) {
                        if(videoindex[i] == 0){
//                            m_tsRecvPool->write_buffer(pkt.data, pkt.size);
                            pVideoframe[i]->pts = av_frame_get_best_effort_timestamp(pVideoframe[i]);
                            pVideoframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
//                            printf("videoframesize0 = %d, size1 = %d, size2 = %d, size3 = %d, size4 = %d,format = %d\n",pVideoframe[i]->linesize[0],
//                                    pVideoframe[i]->linesize[1],pVideoframe[i]->linesize[2],pVideoframe[i]->linesize[3],pVideoframe[i]->linesize[4],pVideoframe[i]->format);
//                            pVideoTransPool[i]->PutFrame( pVideoframe[i] ,i);
                            int enc_got_frame = 0;
                            /*  ffmpeg encoder */
                            enc_pkt.data = NULL;
                            enc_pkt.size = 0;
                            av_init_packet(&enc_pkt);
                            re = avcodec_encode_video2(ofmt_ctx->streams[videoindex[i]]->codec, &enc_pkt,
                                    pVideoframe[i], &enc_got_frame);
//                            printf("enc_got_frame =%d, re = %d \n",enc_got_frame, re);
                            printf("video Encode 1 Packet\tsize:%d\tpts:%lld\n",enc_pkt.size,enc_pkt.pts);
                            /* prepare packet for muxing */
//                            fwrite(enc_pkt.data,enc_pkt.size, 1, fp_v);
                        }
//                        printf(" video %d decode %d num\n", i, video_num[i]++);
                        break;
                    }

                 }else if (pkt.stream_index == audioindex[i]) {
                    if (avcodec_decode_audio4(pAudioCodecCtx[i], pAudioframe[i], &frame_size, &pkt) >= 0) {
                        if (i == 0){

//                                fwrite(pAudioframe[i]->data[0],pAudioframe[i]->linesize[0], 1, fp_a);
//                                printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                            uint8_t *converted_input_samples = NULL;
                            converted_input_samples = (uint8_t *)calloc(AudioEncodeCtx[i]->channels, sizeof(*converted_input_samples));
                            av_samples_alloc(&converted_input_samples, NULL, AudioEncodeCtx[i]->channels, pAudioframe[i]->nb_samples, AudioEncodeCtx[i]->sample_fmt, 0);
                                        int error = 0;
                            if((error = swr_convert(resample_context, &converted_input_samples, pAudioframe[i]->nb_samples,
                                                   (const uint8_t**)pAudioframe[i]->extended_data, pAudioframe[i]->nb_samples))<0){
                                printf("error  : %d\n",error);
                            }
//                            av_audio_fifo_realloc(af, av_audio_fifo_size(af) + pAudioframe[i]->nb_samples);
                            av_audio_fifo_write(af, (void **)&converted_input_samples, pAudioframe[i]->nb_samples);
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                            pAudioframe[i]->data[0] = frame_buf;
//                            init_converted_samples(&converted_input_samples, output_codec_context, pAudioframe[i]->nb_samples);

                            /** Initialize temporary storage for one output frame. */
//                            printf("pkt.size = %d , pkt.pts = %d ,pkt.dts = %d\n",pkt.size, pkt.pts, pkt.dts);
//                            printf("framesize = %d, audioframesize = %d\n", pAudioframe[i]->nb_samples, frame_size);

//                            pOutAudioframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
                            int got_frame=0;
                            //Encode
//                            av_init_packet(&audio_pkt);
//                            audio_pkt.data = NULL;
//                            audio_pkt.size = 0;
//                            avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
//                            printf("Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                            while(av_audio_fifo_size(af) >= AudioEncodeCtx[i]->frame_size){
                                int frame_size = FFMIN(av_audio_fifo_size(af),AudioEncodeCtx[0]->frame_size);
                                pOutAudioframe[i]->nb_samples =  frame_size;
                                pOutAudioframe[i]->channel_layout = AudioEncodeCtx[0]->channel_layout;
                                pOutAudioframe[i]->sample_rate = AudioEncodeCtx[0]->sample_rate;
                                pOutAudioframe[i]->format = AudioEncodeCtx[0]->sample_fmt;

                                av_frame_get_buffer(pOutAudioframe[i], 0);
                                av_audio_fifo_read(af, (void **)&pOutAudioframe[i]->data, frame_size);

                                pOutAudioframe[i]->pts=pts;
                                pts += pOutAudioframe[i]->nb_samples;

                                audio_pkt.data = NULL;
                                audio_pkt.size = 0;
                                av_init_packet(&audio_pkt);
                                avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
                                printf("audio Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                                fwrite(audio_pkt.data,audio_pkt.size, 1, fp_a);
                            }
                        }
//                        if(i == 0){
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                        }
//                        printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                        break;
                    }
                }
            }
            av_free_packet(&pkt);
            av_free_packet(&enc_pkt);
        }
    }

    av_free(buffer);
    for(int i=0; i<VIDEO_NUM; i++)
        av_free(pVideoframe[i]);

    for(int i=0; i<AUDIO_NUM; i++)
        av_free(pAudioframe[i]);

    return 0;

}
Beispiel #22
0
/*****************************************************************************
 * DecodeAudio: Called to decode one frame
 *****************************************************************************/
block_t * DecodeAudio ( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    AVCodecContext *ctx = p_sys->p_context;

    if( !pp_block || !*pp_block )
        return NULL;

    block_t *p_block = *pp_block;

    if( !ctx->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open)
    {
        InitDecoderConfig( p_dec, ctx );
        if( ffmpeg_OpenCodec( p_dec ) )
            msg_Err( p_dec, "Cannot open decoder %s", p_sys->psz_namecodec );
    }

    if( p_sys->b_delayed_open )
        goto end;

    if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) )
    {
        avcodec_flush_buffers( ctx );
        date_Set( &p_sys->end_date, 0 );

        if( p_sys->i_codec_id == AV_CODEC_ID_MP2 || p_sys->i_codec_id == AV_CODEC_ID_MP3 )
            p_sys->i_reject_count = 3;

        goto end;
    }

    /* We've just started the stream, wait for the first PTS. */
    if( !date_Get( &p_sys->end_date ) && p_block->i_pts <= VLC_TS_INVALID )
        goto end;

    if( p_block->i_buffer <= 0 )
        goto end;

    if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 )
    {
        p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE );
        if( !p_block )
            return NULL;
        p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE;
        memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE );

        p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED;
    }

    AVFrame frame;
    memset( &frame, 0, sizeof( frame ) );

    for( int got_frame = 0; !got_frame; )
    {
        if( p_block->i_buffer == 0 )
            goto end;

        AVPacket pkt;
        av_init_packet( &pkt );
        pkt.data = p_block->p_buffer;
        pkt.size = p_block->i_buffer;

        int used = avcodec_decode_audio4( ctx, &frame, &got_frame, &pkt );
        if( used < 0 )
        {
            msg_Warn( p_dec, "cannot decode one frame (%zu bytes)",
                      p_block->i_buffer );
            goto end;
        }

        assert( p_block->i_buffer >= (unsigned)used );
        p_block->p_buffer += used;
        p_block->i_buffer -= used;
    }

    if( ctx->channels <= 0 || ctx->channels > 8 || ctx->sample_rate <= 0 )
    {
        msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d",
                  ctx->channels, ctx->sample_rate );
        goto end;
    }

    if( p_dec->fmt_out.audio.i_rate != (unsigned int)ctx->sample_rate )
        date_Init( &p_sys->end_date, ctx->sample_rate, 1 );

    if( p_block->i_pts > VLC_TS_INVALID &&
        p_block->i_pts > date_Get( &p_sys->end_date ) )
    {
        date_Set( &p_sys->end_date, p_block->i_pts );
    }

    if( p_block->i_buffer == 0 )
    {   /* Done with this buffer */
        block_Release( p_block );
        *pp_block = NULL;
    }

    /* NOTE WELL: Beyond this point, p_block now refers to the DECODED block */
    p_block = frame.opaque;
    SetupOutputFormat( p_dec, true );

    /* Silent unwanted samples */
    if( p_sys->i_reject_count > 0 )
    {
        memset( p_block->p_buffer, 0, p_block->i_buffer );
        p_sys->i_reject_count--;
    }

    block_t *p_buffer = decoder_NewAudioBuffer( p_dec, p_block->i_nb_samples );
    if (!p_buffer)
        return NULL;
    assert( p_block->i_nb_samples >= (unsigned)frame.nb_samples );
    assert( p_block->i_nb_samples == p_buffer->i_nb_samples );
    p_block->i_buffer = p_buffer->i_buffer; /* drop buffer padding */

    /* Interleave audio if required */
    if( av_sample_fmt_is_planar( ctx->sample_fmt ) )
    {
        aout_Interleave( p_buffer->p_buffer, p_block->p_buffer,
                         p_block->i_nb_samples, ctx->channels,
                         p_dec->fmt_out.audio.i_format );
        if( ctx->channels > AV_NUM_DATA_POINTERS )
            free( frame.extended_data );
        block_Release( p_block );
        p_block = p_buffer;
    }
    else /* FIXME: improve decoder_NewAudioBuffer(), avoid useless buffer... */
        block_Release( p_buffer );

    if (p_sys->b_extract)
    {   /* TODO: do not drop channels... at least not here */
        p_buffer = block_Alloc( p_dec->fmt_out.audio.i_bytes_per_frame
                                * frame.nb_samples );
        if( unlikely(p_buffer == NULL) )
        {
            block_Release( p_block );
            return NULL;
        }
        aout_ChannelExtract( p_buffer->p_buffer,
                             p_dec->fmt_out.audio.i_channels,
                             p_block->p_buffer, ctx->channels,
                             frame.nb_samples, p_sys->pi_extraction,
                             p_dec->fmt_out.audio.i_bitspersample );
        block_Release( p_block );
        p_block = p_buffer;
    }

    p_block->i_nb_samples = frame.nb_samples;
    p_block->i_buffer = frame.nb_samples
                        * p_dec->fmt_out.audio.i_bytes_per_frame;
    p_block->i_pts = date_Get( &p_sys->end_date );
    p_block->i_length = date_Increment( &p_sys->end_date, frame.nb_samples )
                        - p_block->i_pts;
    return p_block;

end:
    block_Release(p_block);
    *pp_block = NULL;
    return NULL;
}
Beispiel #23
0
int bl_audio_decode(
		char const * const filename,
		struct bl_song * const song) {
	int ret;
	// Contexts and libav variables
	AVPacket avpkt;
	AVFormatContext* context;
	int audio_stream;
	AVCodecContext* codec_context = NULL;
	AVCodec *codec = NULL;
	AVFrame *decoded_frame = NULL;
	struct SwrContext *swr_ctx;

	// Size of the samples
	uint64_t size = 0;

	// Dictionary to fetch tags
	AVDictionaryEntry *tags_dictionary;

	// Planar means channels are interleaved in data section
	// See MP3 vs FLAC for instance.
	int is_planar;

	// Pointer to beginning of music data
	int8_t *beginning;
	// Received frame holder
	int got_frame;
	// Position in the data buffer
	int index;
	// Initialize AV lib
	av_register_all();
	context = avformat_alloc_context();

	av_log_set_level(AV_LOG_QUIET);

	// Open input file
	if (avformat_open_input(&context, filename, NULL, NULL) < 0) {
		fprintf(stderr, "Couldn't open file: %s. Error %d encountered.\n", filename, errno);
		return BL_UNEXPECTED;
	}

	// Search for a valid stream
	if (avformat_find_stream_info(context, NULL) < 0) {
		fprintf(stderr, "Couldn't find stream information\n");
		return BL_UNEXPECTED;
	}

	// Get audio stream
	audio_stream = av_find_best_stream(context, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
	if (audio_stream < 0) {
		fprintf(stderr, "Couldn't find a suitable audio stream\n");
		return BL_UNEXPECTED;
	}
	// Find associated codec
	codec_context = context->streams[audio_stream]->codec;
	if (!codec_context) {
		fprintf(stderr, "Codec not found!\n");
		return BL_UNEXPECTED;
	}
	if (avcodec_open2(codec_context, codec, NULL) < 0) {
		fprintf(stderr, "Could not open codec\n");
		return BL_UNEXPECTED;
	}

	// Fill song properties
	song->filename = malloc(strlen(filename) + 1);
	strcpy(song->filename, filename);

	song->sample_rate = codec_context->sample_rate;
	song->duration = (uint64_t)(context->duration) / ((uint64_t)AV_TIME_BASE);
	song->bitrate = context->bit_rate;
	song->not_s16 = 0;
	song->nb_bytes_per_sample = av_get_bytes_per_sample(codec_context->sample_fmt);
	song->channels = codec_context->channels;

	// Get number of samples
	size = (
		((uint64_t)(context->duration) * (uint64_t)song->sample_rate) /
		((uint64_t)AV_TIME_BASE)
		) *
		song->channels *
		song->nb_bytes_per_sample;

	// Estimated number of samples
	song->nSamples = (
		(
		((uint64_t)(context->duration) * (uint64_t)song->sample_rate) /
		((uint64_t)AV_TIME_BASE)
		) *
		song->channels
	);

	// Allocate sample_array
	if((song->sample_array = calloc(size, 1)) == NULL) {
		fprintf(stderr, "Could not allocate enough memory\n");
		return BL_UNEXPECTED;
	}

	beginning = song->sample_array;
	index = 0;

	// If the song is in a floating-point format or int32, prepare the conversion to int16
	if(codec_context->sample_fmt != AV_SAMPLE_FMT_S16 &&
		codec_context->sample_fmt != AV_SAMPLE_FMT_S16P) {
		song->not_s16 = 1;
		song->nb_bytes_per_sample = 2;
	
		swr_ctx = swr_alloc();
		av_opt_set_int(swr_ctx, "in_channel_layout", codec_context->channel_layout, 0);
		av_opt_set_int(swr_ctx, "in_sample_rate", codec_context->sample_rate, 0);
		av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", codec_context->sample_fmt, 0);

		av_opt_set_int(swr_ctx, "out_channel_layout", codec_context->channel_layout, 0);
		av_opt_set_int(swr_ctx, "out_sample_rate", codec_context->sample_rate, 0);
		av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		if((ret = swr_init(swr_ctx)) < 0) {
			fprintf(stderr, "Could not allocate resampler context\n");
			return BL_UNEXPECTED;
		}
	}

    // Zero initialize tags
	song->artist = NULL;
	song->title = NULL;
	song->album = NULL;
	song->tracknumber = NULL;

	// Initialize tracknumber tag
	tags_dictionary = av_dict_get(context->metadata, "track", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->tracknumber = malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->tracknumber, tags_dictionary->value);
		song->tracknumber[strcspn(song->tracknumber, "/")] = '\0';
	} 
	else {
		song->tracknumber = malloc(1 * sizeof(char));
		strcpy(song->tracknumber, "");
	}

    // Initialize title tag
    tags_dictionary = av_dict_get(context->metadata, "title", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->title = malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->title, tags_dictionary->value);
	}
	else {
		song->title = malloc(12 * sizeof(char));
		strcpy(song->title, "<no title>");
	}

	// Initialize artist tag
	tags_dictionary = av_dict_get(context->metadata, "ARTIST", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->artist= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->artist, tags_dictionary->value);
	}
	else {
		song->artist= malloc(12 * sizeof(char));
		strcpy(song->artist, "<no artist>");
	}

	// Initialize album tag
	tags_dictionary = av_dict_get(context->metadata, "ALBUM", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->album= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->album, tags_dictionary->value);
	}
	else {
		song->album= malloc(11 * sizeof(char));
		strcpy(song->album, "<no album>");
	}

	// Initialize genre tag
	tags_dictionary = av_dict_get(context->metadata, "genre", NULL, 0);
	if (tags_dictionary!= NULL) {
		song->genre= malloc(strlen(tags_dictionary->value) + 1);
		strcpy(song->genre, tags_dictionary->value);
	}
	else {
		song->genre = malloc(11 * sizeof(char));
		strcpy(song->genre, "<no genre>");
	}

	// Planar means channels are not interleaved
	is_planar = av_sample_fmt_is_planar(codec_context->sample_fmt);

	// Read the whole data and copy them into a huge buffer
	av_init_packet(&avpkt);
	while(av_read_frame(context, &avpkt) >= 0) {
		if(avpkt.stream_index == audio_stream) {
			got_frame = 0;

			// If decoded frame has not been allocated yet
			if (!decoded_frame) {
				// Try to allocate it
				decoded_frame = av_frame_alloc();
				if(!decoded_frame) {
					fprintf(stderr, "Could not allocate audio frame\n");
					return BL_UNEXPECTED;
				}
			}
			else {
				// Else, unreference it and reset fields
				av_frame_unref(decoded_frame);
			}

			int length = avcodec_decode_audio4(codec_context,
				decoded_frame,
				&got_frame,
				&avpkt);
			if(length < 0) {
				avpkt.size = 0;
			}

			av_packet_unref(&avpkt);

			// Copy decoded data into a huge array
			if(got_frame) {
				size_t data_size = av_samples_get_buffer_size(
					NULL,
					codec_context->channels,
					decoded_frame->nb_samples,
					codec_context->sample_fmt,
				1);

				if((index * song->nb_bytes_per_sample + data_size) > size) {
					int8_t *ptr;
					ptr = realloc(beginning, size + data_size);
					if(ptr != NULL) {
						beginning = ptr;
						size += data_size;
						song->nSamples += data_size / song->nb_bytes_per_sample;
					}
					else
						break;
				}
				int8_t *p = beginning + (index * song->nb_bytes_per_sample);

				// If the song isn't in a 16-bit format, convert it to
				if(song->not_s16 == 1) {
					uint8_t **out_buffer;
					int buff_size;
					buff_size = av_samples_alloc_array_and_samples(&out_buffer, decoded_frame->linesize,
						song->channels, decoded_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
					ret = swr_convert(swr_ctx, out_buffer, buff_size,
						(const uint8_t**)decoded_frame->extended_data, decoded_frame->nb_samples);
					if(ret < 0) {
						fprintf(stderr, "Error while converting from floating-point to int\n");
						return BL_UNEXPECTED;
					}
					memcpy((index * song->nb_bytes_per_sample) + beginning,
						out_buffer[0], buff_size);
					av_freep(&out_buffer[0]);
					free(out_buffer);
					index += buff_size / song->nb_bytes_per_sample;
				}
				else if(1 == is_planar) {
					for (int i = 0;
						i < (decoded_frame->nb_samples * song->nb_bytes_per_sample);
						i += song->nb_bytes_per_sample) {
						for (int j = 0; j < codec_context->channels; ++j) {
							for (int k = 0; k < song->nb_bytes_per_sample; ++k) {
								*p = ((int8_t*)(decoded_frame->extended_data[j]))[i + k];
								++p;
							}
						}
					}
					index += data_size / song->nb_bytes_per_sample;
				}
				else if (0 == is_planar) {
					memcpy((index * song->nb_bytes_per_sample) + beginning,
						decoded_frame->extended_data[0],
						data_size);
					index += data_size / song->nb_bytes_per_sample;
				}
			}
		}
		else {
			// Dropping packets that do not belong to the audio stream
			// (such as album cover)
			av_packet_unref(&avpkt);
		}
	}
	song->sample_array = beginning;

	// Free memory
	avpkt.data = NULL;
	avpkt.size = 0;

	// Use correct number of samples after decoding
	song->nSamples = index; 
	
	// Read the end of audio, as precognized in http://ffmpeg.org/pipermail/libav-user/2015-August/008433.html
	do {
		avcodec_decode_audio4(codec_context, decoded_frame, &got_frame, &avpkt);
	} while(got_frame);
	// Free memory
	if(song->not_s16)
		swr_free(&swr_ctx);
	avcodec_close(codec_context);
	av_frame_unref(decoded_frame);
	# if LIBAVUTIL_VERSION_MAJOR > 51
	av_frame_free(&decoded_frame);
	#endif
	av_packet_unref(&avpkt);
	avformat_close_input(&context);

	return BL_OK;
}
Beispiel #24
0
int main(int argc, char **argv)
{
    AVFormatContext* pCtx = 0;
    AVCodecContext *pCodecCtx = 0;
    AVCodec *pCodec = 0;
    AVPacket packet;
    AVFrame *pFrame = 0;
    FILE *fpo1 = NULL;
    FILE *fpo2 = NULL;
    int nframe;
    int err;
    int got_picture = -1;
    int picwidth, picheight, linesize;
    unsigned char *pBuf;
    int i;
    int64_t timestamp;
    struct options opt;
    int usefo = 0;
    struct audio_dsp dsp;
    int dusecs;
    float usecs1 = 0;
    float usecs2 = 0;
    struct timeval elapsed1, elapsed2;
    int decoded = 0;

	//taoanran add +++++++++
	int ret = -1;
	int videoStream = -1; //video streamID
	// ----------------------

	int flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
 #if 0
    if (SDL_Init (flags)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
        exit(1);
    }
#endif
    av_register_all();

    av_log_set_callback(log_callback);
    av_log_set_level(50);

    if (Myparse_option(&opt, argc, argv) < 0 || (strlen(opt.finput) == 0))
    {
        Myshow_help(argv[0]);
        return 0;
    }

    err = avformat_open_input(&pCtx, opt.finput, 0, 0);
    if (err < 0)
    {
        printf("\n->(avformat_open_input)\tERROR:\t%d\n", err);
        goto fail;
    }
	printf("=========================\n");
    err = avformat_find_stream_info(pCtx, 0);

    if (err < 0)
    {
        printf("\n->(avformat_find_stream_info)\tERROR:\t%d\n", err);
        goto fail;
    }
	av_dump_format(pCtx, 0, opt.finput, 0);

	// check the video stream
	videoStream = find_video_stream(pCtx);
	if (videoStream < 0)
	{
		printf("there is not audio stream !!!!!!! \n");
		return -1;
	}

	pCodecCtx = pCtx->streams[videoStream]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//find the video decoder
 	if (!pCodec)
    {
        printf("\ncan't find the audio decoder!\n");
        goto fail;
    }

	pFrame = avcodec_alloc_frame();

	//open videDecoder
	ret = avcodec_open2(pCodecCtx, pCodec, 0);

	if (ret < 0)
	{
		printf("avcodec_open2 error \n");
		return -1;
	}

#if 0
	//only for audio
	pFrame->nb_samples = pCodecCtx->frame_size;
	pFrame->format = pCodecCtx->sample_fmt;
	pFrame->channel_layout = pCodecCtx->channel_layout;
#endif
#if 0
	//set the param of SDL
	SDL_AudioSpec wanted_spec, spec; 
	wanted_spec.freq = pCodecCtx->sample_rate;  
	wanted_spec.format = AUDIO_S16SYS;  
	wanted_spec.channels = pCodecCtx->channels;  
	wanted_spec.silence = 0;  
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;  
	wanted_spec.callback = audio_callback;//audio_callback;  
	wanted_spec.userdata = pCodecCtx;;//pCodecCtx;  
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)  
    {  
        fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());  
        return -1;  
    } 
#endif			

	 printf(" bit_rate = %d \r\n", pCodecCtx->bit_rate);
     printf(" sample_rate = %d \r\n", pCodecCtx->sample_rate);
     printf(" channels = %d \r\n", pCodecCtx->channels);
     printf(" code_name = %s \r\n", pCodecCtx->codec->name);

	char *data = NULL;
	while(av_read_frame(pCtx, &packet) >= 0)
	{
		//found the  audio frame !!!
		if (packet.stream_index == videoStream)
		{
			int got;
			int i;
		
			avcodec_decode_video2(pCodecCtx, pFrame,&got_picture,&packet);
			data = (char *)malloc(pFrame->width * pFrame->height);
			memset(data,0,pFrame->width * pFrame->height);
			printf("pFrame->width = %d\n", pFrame->width);
			printf("pFrame->height = %d\n", pFrame->height);
			printf("pFrame->linesize[0] = %d\n", pFrame->linesize[0]);
			printf("pFrame->linesize[1] = %d\n", pFrame->linesize[1]);
			printf("pFrame->linesize[2] = %d\n", pFrame->linesize[2]);

			//catch the YUV420P data
			saveYUV420P(pFrame->data[0], pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);      //Y: 4
			saveYUV420P(pFrame->data[1], pFrame->linesize[1], pCodecCtx->width/2, pCodecCtx->height/2);    //U : 1
			saveYUV420P(pFrame->data[2], pFrame->linesize[2], pCodecCtx->width/2, pCodecCtx->height/2);    //V : 1
		}
	}
	
	return 0;
#if 0	
	if (!opt.nodec)
    {
        
        pCodecCtx = pCtx->streams[opt.streamId]->codec;
 
        if (opt.thread_count <= 16 && opt.thread_count > 0 )
        {
            pCodecCtx->thread_count = opt.thread_count;
            pCodecCtx->thread_type = FF_THREAD_FRAME;
        }
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (!pCodec)
        {
            printf("\n->不能找到编解码器!\n");
            goto fail;
        }
        err = avcodec_open2(pCodecCtx, pCodec, 0);
        if (err < 0)
        {
            printf("\n->(avcodec_open)\tERROR:\t%d\n", err);
            goto fail;
        }
        pFrame = avcodec_alloc_frame();
 
        if (opt.bplay)
        {
            dsp.audio_fd = open(OSS_DEVICE, O_WRONLY);
            if (dsp.audio_fd == -1)
            {
                printf("\n-> 无法打开音频设备\n");
                goto fail;
            }
            dsp.channels = pCodecCtx->channels;
            dsp.speed = pCodecCtx->sample_rate;
            dsp.format = map_formats(pCodecCtx->sample_fmt);
            if (set_audio(&dsp) < 0)
            {
                printf("\n-> 不能设置音频设备\n");
                goto fail;
            }
        }
    }
    nframe = 0;
	printf("=========================444444\n");
    while(nframe < opt.frames || opt.frames == -1)
    {
        gettimeofday(&elapsed1, NULL);
        err = av_read_frame(pCtx, &packet);
        if (err < 0)
        {
            printf("\n->(av_read_frame)\tERROR:\t%d\n", err);
            break;
        }
        gettimeofday(&elapsed2, NULL);
        dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
        usecs2 += dusecs;
        timestamp = av_rescale_q(packet.dts, pCtx->streams[packet.stream_index]->time_base, (AVRational){1, AV_TIME_BASE});
        printf("\nFrame No ] stream#%d\tsize mB, timestamp:%6lld, dts:%6lld, pts:%6lld, ", nframe++, packet.stream_index, packet.size,
               timestamp, packet.dts, packet.pts);
        if (packet.stream_index == opt.streamId)
        {
#if 0
            for (i = 0; i < 16; i++)
            {
                if (i == 0) printf("\n pktdata: ");
                printf("%2x ", packet.data[i]);
            }
            printf("\n");
#endif
            if (usefo)
            {
                fwrite(packet.data, packet.size, 1, fpo1);
                fflush(fpo1);
            }
            if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_VIDEO && !opt.nodec)
            {
                picheight = pCtx->streams[opt.streamId]->codec->height;
                picwidth = pCtx->streams[opt.streamId]->codec->width;
 
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                if (got_picture)
                {
                    printf("[Video: type %d, ref %d, pts %lld, pkt_pts %lld, pkt_dts %lld]",
                            pFrame->pict_type, pFrame->reference, pFrame->pts, pFrame->pkt_pts, pFrame->pkt_dts);
 
                    if (pCtx->streams[opt.streamId]->codec->pix_fmt == PIX_FMT_YUV420P)
                    {
                        if (usefo)
                        {
                            linesize = pFrame->linesize[0];
                            pBuf = pFrame->data[0];
                            for (i = 0; i < picheight; i++)
                            {
                                fwrite(pBuf, picwidth, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[1];
                            pBuf = pFrame->data[1];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[2];
                            pBuf = pFrame->data[2];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            fflush(fpo2);
                        }
 
                        if (opt.bplay)
                        {
                            
                        }
                    }
                }
                av_free_packet(&packet);
            }
            else if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_AUDIO && !opt.nodec)
            {
                int got;
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_audio4(pCodecCtx, pFrame, &got, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                                if (got)
                                {
                    printf("[Audio: ]B raw data, decoding time: %d]", pFrame->linesize[0], dusecs);
                    if (usefo)
                    {
                        fwrite(pFrame->data[0], pFrame->linesize[0], 1, fpo2);
                        fflush(fpo2);
                    }
                    if (opt.bplay)
                    {
                        play_pcm(&dsp, pFrame->data[0], pFrame->linesize[0]);
                    }
                                }
            }
        }
    }
    if (!opt.nodec && pCodecCtx)
    {
        avcodec_close(pCodecCtx);
    }
    printf("\n%d 帧解析, average %.2f us per frame\n", nframe, usecs2/nframe);
    printf("%d 帧解码,平均 %.2f 我们每帧\n", decoded, usecs1/decoded);

#endif

fail:
    if (pCtx)
    {
        avformat_close_input(&pCtx);
    }


    return 0;
}
Beispiel #25
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet0, packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();
    int got_frame;

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
        exit(1);
    }

    avcodec_register_all();
    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    packet0.data = NULL;
    packet.data = NULL;
    while (1) {
        if (!packet0.data) {
            if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
                break;
            packet0 = packet;
        }

        if (packet.stream_index == audio_stream_index) {
            got_frame = 0;
            ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
                continue;
            }
            packet.size -= ret;
            packet.data += ret;

            if (got_frame) {
                /* push the audio data from decoded frame into the filtergraph */
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
                    break;
                }

                /* pull filtered audio from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;
                    print_frame(filt_frame);
                    av_frame_unref(filt_frame);
                }
            }

            if (packet.size <= 0)
                av_free_packet(&packet0);
        } else {
            /* discard non-wanted packets */
            av_free_packet(&packet0);
        }
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_close(dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
void cAudio::run()
{
	lt_info("====================== start decoder thread ================================\n");
	/* libavcodec & friends */
	av_register_all();

	AVCodec *codec;
	AVFormatContext *avfc = NULL;
	AVInputFormat *inp;
	AVFrame *frame;
	uint8_t *inbuf = (uint8_t *)av_malloc(INBUF_SIZE);
	AVPacket avpkt;
	int ret, driver;
	/* libao */
	ao_info *ai;
	// ao_device *adevice;
	// ao_sample_format sformat;
	/* resample */
	SwrContext *swr = NULL;
	uint8_t *obuf = NULL;
	int obuf_sz = 0; /* in samples */
	int obuf_sz_max = 0;
	int o_ch, o_sr; /* output channels and sample rate */
	uint64_t o_layout; /* output channels layout */
	char tmp[64] = "unknown";

	curr_pts = 0;
	av_init_packet(&avpkt);
	inp = av_find_input_format("mpegts");
	AVIOContext *pIOCtx = avio_alloc_context(inbuf, INBUF_SIZE, // internal Buffer and its size
			0,		// bWriteable (1=true,0=false)
			NULL,		// user data; will be passed to our callback functions
			_my_read,	// read callback
			NULL,		// write callback
			NULL);		// seek callback
	avfc = avformat_alloc_context();
	avfc->pb = pIOCtx;
	avfc->iformat = inp;
	avfc->probesize = 188*5;
	thread_started = true;

	if (avformat_open_input(&avfc, NULL, inp, NULL) < 0) {
		lt_info("%s: avformat_open_input() failed.\n", __func__);
		goto out;
	}
	ret = avformat_find_stream_info(avfc, NULL);
	lt_debug("%s: avformat_find_stream_info: %d\n", __func__, ret);
	if (avfc->nb_streams != 1)
	{
		lt_info("%s: nb_streams: %d, should be 1!\n", __func__, avfc->nb_streams);
		goto out;
	}
	if (avfc->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
		lt_info("%s: stream 0 no audio codec? 0x%x\n", __func__, avfc->streams[0]->codec->codec_type);

	c = avfc->streams[0]->codec;
	codec = avcodec_find_decoder(c->codec_id);
	if (!codec) {
		lt_info("%s: Codec for %s not found\n", __func__, avcodec_get_name(c->codec_id));
		goto out;
	}
	if (avcodec_open2(c, codec, NULL) < 0) {
		lt_info("%s: avcodec_open2() failed\n", __func__);
		goto out;
	}
	frame = av_frame_alloc();
	if (!frame) {
		lt_info("%s: avcodec_alloc_frame failed\n", __func__);
		goto out2;
	}
	/* output sample rate, channels, layout could be set here if necessary */
	o_ch = c->channels;		/* 2 */
	o_sr = c->sample_rate;		/* 48000 */
	o_layout = c->channel_layout;	/* AV_CH_LAYOUT_STEREO */
	if (sformat.channels != o_ch || sformat.rate != o_sr ||
	    sformat.byte_format != AO_FMT_NATIVE || sformat.bits != 16 || adevice == NULL)
	{
		driver = ao_default_driver_id();
		sformat.bits = 16;
		sformat.channels = o_ch;
		sformat.rate = o_sr;
		sformat.byte_format = AO_FMT_NATIVE;
		sformat.matrix = 0;
		if (adevice)
			ao_close(adevice);
		adevice = ao_open_live(driver, &sformat, NULL);
		ai = ao_driver_info(driver);
		lt_info("%s: changed params ch %d srate %d bits %d adevice %p\n",
			__func__, o_ch, o_sr, 16, adevice);;
		lt_info("libao driver: %d name '%s' short '%s' author '%s'\n",
				driver, ai->name, ai->short_name, ai->author);
	}
#if 0
	lt_info(" driver options:");
	for (int i = 0; i < ai->option_count; ++i)
		fprintf(stderr, " %s", ai->options[i]);
	fprintf(stderr, "\n");
#endif
	av_get_sample_fmt_string(tmp, sizeof(tmp), c->sample_fmt);
	lt_info("decoding %s, sample_fmt %d (%s) sample_rate %d channels %d\n",
		 avcodec_get_name(c->codec_id), c->sample_fmt, tmp, c->sample_rate, c->channels);
	swr = swr_alloc_set_opts(swr,
				 o_layout, AV_SAMPLE_FMT_S16, o_sr,			/* output */
				 c->channel_layout, c->sample_fmt, c->sample_rate,	/* input */
				 0, NULL);
	if (! swr) {
		lt_info("could not alloc resample context\n");
		goto out3;
	}
	swr_init(swr);
	while (thread_started) {
		int gotframe = 0;
		if (av_read_frame(avfc, &avpkt) < 0)
			break;
		avcodec_decode_audio4(c, frame, &gotframe, &avpkt);
		if (gotframe && thread_started) {
			int out_linesize;
			obuf_sz = av_rescale_rnd(swr_get_delay(swr, c->sample_rate) +
						 frame->nb_samples, o_sr, c->sample_rate, AV_ROUND_UP);
			if (obuf_sz > obuf_sz_max) {
				lt_info("obuf_sz: %d old: %d\n", obuf_sz, obuf_sz_max);
				av_free(obuf);
				if (av_samples_alloc(&obuf, &out_linesize, o_ch,
							frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) {
					lt_info("av_samples_alloc failed\n");
					av_free_packet(&avpkt);
					break; /* while (thread_started) */
				}
				obuf_sz_max = obuf_sz;
			}
			obuf_sz = swr_convert(swr, &obuf, obuf_sz,
					      (const uint8_t **)frame->extended_data, frame->nb_samples);
			curr_pts = av_frame_get_best_effort_timestamp(frame);
			lt_debug("%s: pts 0x%" PRIx64 " %3f\n", __func__, curr_pts, curr_pts/90000.0);
			int o_buf_sz = av_samples_get_buffer_size(&out_linesize, o_ch,
								  obuf_sz, AV_SAMPLE_FMT_S16, 1);
			ao_play(adevice, (char *)obuf, o_buf_sz);
		}
		av_free_packet(&avpkt);
	}
	// ao_close(adevice); /* can take long :-( */
	av_free(obuf);
	swr_free(&swr);
 out3:
	av_frame_free(&frame);
 out2:
	avcodec_close(c);
	c = NULL;
 out:
	avformat_close_input(&avfc);
	av_free(pIOCtx->buffer);
	av_free(pIOCtx);
	lt_info("======================== end decoder thread ================================\n");
}
Beispiel #27
0
	Chroma::Result Chroma::operator() (const QString& filename)
	{
		std::shared_ptr<AVFormatContext> formatCtx;
		{
			AVFormatContext *formatCtxRaw = nullptr;
			if (avformat_open_input (&formatCtxRaw, filename.toLatin1 ().constData (), nullptr, nullptr))
				throw std::runtime_error ("error opening file");

			formatCtx.reset (formatCtxRaw,
					[] (AVFormatContext *ctx) { avformat_close_input (&ctx); });
		}

		{
			QMutexLocker locker (&CodecMutex_);
			if (av_find_stream_info (formatCtx.get ()) < 0)
				throw std::runtime_error ("could not find stream");
		}

		AVCodec *codec = nullptr;
		const auto streamIndex = av_find_best_stream (formatCtx.get (), AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
		if (streamIndex < 0)
			throw std::runtime_error ("could not find audio stream");

		auto stream = formatCtx->streams [streamIndex];

		bool codecOpened = false;

		std::shared_ptr<AVCodecContext> codecCtx (stream->codec,
				[&codecOpened] (AVCodecContext *ctx) { if (codecOpened) avcodec_close (ctx); });
		{
			QMutexLocker locker (&CodecMutex_);
			if (avcodec_open2 (codecCtx.get (), codec, nullptr) < 0)
				throw std::runtime_error ("couldn't open the codec");
		}
		codecOpened = true;

		if (codecCtx->channels <= 0)
			throw std::runtime_error ("no channels found");

		std::shared_ptr<SwrContext> swr;
		if (codecCtx->sample_fmt != AV_SAMPLE_FMT_S16)
		{
			swr.reset (swr_alloc (), [] (SwrContext *ctx) { if (ctx) swr_free (&ctx); });
			av_opt_set_int (swr.get (), "in_channel_layout", codecCtx->channel_layout, 0);
			av_opt_set_int (swr.get (), "out_channel_layout", codecCtx->channel_layout,  0);
			av_opt_set_int (swr.get (), "in_sample_rate", codecCtx->sample_rate, 0);
			av_opt_set_int (swr.get (), "out_sample_rate", codecCtx->sample_rate, 0);
			av_opt_set_sample_fmt (swr.get (), "in_sample_fmt", codecCtx->sample_fmt, 0);
			av_opt_set_sample_fmt (swr.get (), "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
			swr_init (swr.get ());
		}

		AVPacket packet;
		av_init_packet (&packet);

		const int maxLength = 120;
		auto remaining = maxLength * codecCtx->channels * codecCtx->sample_rate;
		chromaprint_start (Ctx_, codecCtx->sample_rate, codecCtx->channels);

		std::shared_ptr<AVFrame> frame (avcodec_alloc_frame (),
				[] (AVFrame *frame) { avcodec_free_frame (&frame); });
		auto maxDstNbSamples = 0;

		uint8_t *dstData [1] = { nullptr };
		std::shared_ptr<void> dstDataGuard (nullptr,
				[&dstData] (void*) { if (dstData [0]) av_freep (&dstData [0]); });
		while (true)
		{
			if (av_read_frame (formatCtx.get (), &packet) < 0)
				break;

			std::shared_ptr<void> guard (nullptr,
					[&packet] (void*) { if (packet.data) av_free_packet (&packet); });

			if (packet.stream_index != streamIndex)
				continue;

			avcodec_get_frame_defaults (frame.get ());
			int gotFrame = false;
			auto consumed = avcodec_decode_audio4 (codecCtx.get (), frame.get (), &gotFrame, &packet);

			if (consumed < 0 || !gotFrame)
				continue;

			uint8_t **data = nullptr;
			if (swr)
			{
				if (frame->nb_samples > maxDstNbSamples)
				{
					if (dstData [0])
						av_freep (&dstData [0]);
					int linesize = 0;
					if (av_samples_alloc (dstData, &linesize, codecCtx->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0)
						throw std::runtime_error ("cannot allocate memory for resampling");
				}

				if (swr_convert (swr.get (), dstData, frame->nb_samples, const_cast<const uint8_t**> (frame->data), frame->nb_samples) < 0)
					throw std::runtime_error ("cannot resample audio");

				data = dstData;
			}
			else
				data = frame->data;

			auto length = std::min (remaining, frame->nb_samples * codecCtx->channels);
			if (!chromaprint_feed (Ctx_, data [0], length))
				throw std::runtime_error ("cannot feed data");

			bool finished = false;
			if (maxLength)
			{
				remaining -= length;
				if (remaining <= 0)
					finished = true;
			}
			if (finished)
				break;
		}

		if (!chromaprint_finish (Ctx_))
			throw std::runtime_error ("fingerprint calculation failed");

		char *fingerprint = 0;
		if (!chromaprint_get_fingerprint (Ctx_, &fingerprint))
			throw std::runtime_error ("unable to get fingerprint");

		QByteArray result (fingerprint);
		chromaprint_dealloc (fingerprint);

		const double divideFactor = 1. / av_q2d (stream->time_base);
		const double duration = stream->duration / divideFactor;

		return { result, static_cast<int> (duration) };
	}
Beispiel #28
0
int decode_audio_file(ChromaprintContext *chromaprint_ctx, const char *file_name, int max_length, int *duration)
{
	int ok = 0, remaining, length, consumed, codec_ctx_opened = 0, got_frame, stream_index;
	AVFormatContext *format_ctx = NULL;
	AVCodecContext *codec_ctx = NULL;
	AVCodec *codec = NULL;
	AVStream *stream = NULL;
	AVFrame *frame = NULL;
#if defined(HAVE_SWRESAMPLE)
	SwrContext *convert_ctx = NULL;
#elif defined(HAVE_AVRESAMPLE)
	AVAudioResampleContext *convert_ctx = NULL;
#else
	void *convert_ctx = NULL;
#endif
	int max_dst_nb_samples = 0, dst_linsize = 0;
	uint8_t *dst_data[1] = { NULL };
	uint8_t **data;
	AVPacket packet;

	if (!strcmp(file_name, "-")) {
		file_name = "pipe:0";
	}

	if (avformat_open_input(&format_ctx, file_name, NULL, NULL) != 0) {
		fprintf(stderr, "ERROR: couldn't open the file\n");
		goto done;
	}

	if (avformat_find_stream_info(format_ctx, NULL) < 0) {
		fprintf(stderr, "ERROR: couldn't find stream information in the file\n");
		goto done;
	}

	stream_index = av_find_best_stream(format_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
	if (stream_index < 0) {
		fprintf(stderr, "ERROR: couldn't find any audio stream in the file\n");
		goto done;
	}

	stream = format_ctx->streams[stream_index];

	codec_ctx = stream->codec;
	codec_ctx->request_sample_fmt = AV_SAMPLE_FMT_S16;

	if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
		fprintf(stderr, "ERROR: couldn't open the codec\n");
		goto done;
	}
	codec_ctx_opened = 1;

	if (codec_ctx->channels <= 0) {
		fprintf(stderr, "ERROR: no channels found in the audio stream\n");
		goto done;
	}

	if (codec_ctx->sample_fmt != AV_SAMPLE_FMT_S16) {
		int64_t channel_layout = codec_ctx->channel_layout;
		if (!channel_layout) {
			channel_layout = get_default_channel_layout(codec_ctx->channels);
		}
#if defined(HAVE_SWRESAMPLE)
		convert_ctx = swr_alloc_set_opts(NULL,
			channel_layout, AV_SAMPLE_FMT_S16, codec_ctx->sample_rate,
			channel_layout, codec_ctx->sample_fmt, codec_ctx->sample_rate,
			0, NULL);
		if (!convert_ctx) {
			fprintf(stderr, "ERROR: couldn't allocate audio converter\n");
			goto done;
		}
		if (swr_init(convert_ctx) < 0) {
			fprintf(stderr, "ERROR: couldn't initialize the audio converter\n");
			goto done;
		}
#elif defined(HAVE_AVRESAMPLE)
		convert_ctx = avresample_alloc_context();
		av_opt_set_int(convert_ctx, "out_channel_layout", channel_layout, 0);
		av_opt_set_int(convert_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(convert_ctx, "out_sample_rate", codec_ctx->sample_rate, 0);
		av_opt_set_int(convert_ctx, "in_channel_layout", channel_layout, 0);
		av_opt_set_int(convert_ctx, "in_sample_fmt", codec_ctx->sample_fmt, 0);
		av_opt_set_int(convert_ctx, "in_sample_rate", codec_ctx->sample_rate, 0);
		if (!convert_ctx) {
			fprintf(stderr, "ERROR: couldn't allocate audio converter\n");
			goto done;
		}
		if (avresample_open(convert_ctx) < 0) {
			fprintf(stderr, "ERROR: couldn't initialize the audio converter\n");
			goto done;
		}
#else
		fprintf(stderr, "ERROR: unsupported audio format (please build fpcalc with libswresample)\n");
		goto done;
#endif
	}

	if (stream->duration != AV_NOPTS_VALUE) {
		*duration = stream->time_base.num * stream->duration / stream->time_base.den;
	}
	else if (format_ctx->duration != AV_NOPTS_VALUE) {
		*duration = format_ctx->duration / AV_TIME_BASE;
	}
	else {
		fprintf(stderr, "ERROR: couldn't detect the audio duration\n");
		goto done;
	}

	remaining = max_length * codec_ctx->channels * codec_ctx->sample_rate;
	chromaprint_start(chromaprint_ctx, codec_ctx->sample_rate, codec_ctx->channels);

	frame = av_frame_alloc();

	while (1) {
		if (av_read_frame(format_ctx, &packet) < 0) {
			break;
		}

		if (packet.stream_index == stream_index) {
			av_frame_unref(frame);

			got_frame = 0;
			consumed = avcodec_decode_audio4(codec_ctx, frame, &got_frame, &packet);
			if (consumed < 0) {
				fprintf(stderr, "WARNING: error decoding audio\n");
				continue;
			}

			if (got_frame) {
				data = frame->data;
				if (convert_ctx) {
					if (frame->nb_samples > max_dst_nb_samples) {
						av_freep(&dst_data[0]);
						if (av_samples_alloc(dst_data, &dst_linsize, codec_ctx->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) {
							fprintf(stderr, "ERROR: couldn't allocate audio converter buffer\n");
							goto done;
						}
						max_dst_nb_samples = frame->nb_samples;
					}
#if defined(HAVE_SWRESAMPLE)
					if (swr_convert(convert_ctx, dst_data, frame->nb_samples, (const uint8_t **)frame->data, frame->nb_samples) < 0)
#elif defined(HAVE_AVRESAMPLE)
					if (avresample_convert(convert_ctx, dst_data, 0, frame->nb_samples, (uint8_t **)frame->data, 0, frame->nb_samples) < 0)
#endif
					{
						fprintf(stderr, "ERROR: couldn't convert the audio\n");
						goto done;
					}
					data = dst_data;
				}
				length = MIN(remaining, frame->nb_samples * codec_ctx->channels);
				if (!chromaprint_feed(chromaprint_ctx, data[0], length)) {
					goto done;
				}

				if (max_length) {
					remaining -= length;
					if (remaining <= 0) {
						goto finish;
					}
				}
			}
		}
		av_free_packet(&packet);
	}

finish:
	if (!chromaprint_finish(chromaprint_ctx)) {
		fprintf(stderr, "ERROR: fingerprint calculation failed\n");
		goto done;
	}

	ok = 1;

done:
	if (frame) {
		av_frame_free(&frame);
	}
	if (dst_data[0]) {
		av_freep(&dst_data[0]);
	}
	if (convert_ctx) {
#if defined(HAVE_SWRESAMPLE)
		swr_free(&convert_ctx);
#elif defined(HAVE_AVRESAMPLE)
		avresample_free(&convert_ctx);
#endif
	}
	if (codec_ctx_opened) {
		avcodec_close(codec_ctx);
	}
	if (format_ctx) {
		avformat_close_input(&format_ctx);
	}
	return ok;
}
Beispiel #29
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {
int len1, data_size = 0, n;
AVPacket *pkt = &is->audio_pkt;
double pts;
for(;;) {
while(is->audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
if(len1 < 0) {
/* if error, skip frame */
is->audio_pkt_size = 0;
break;
}
if (got_frame)
{
data_size =
av_samples_get_buffer_size
(
NULL,
is->audio_st->codec->channels,
is->audio_frame.nb_samples,
is->audio_st->codec->sample_fmt,
1
);
memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
}
is->audio_pkt_data += len1;
is->audio_pkt_size -= len1;
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
pts = is->audio_clock;
*pts_ptr = pts;
n = 2 * is->audio_st->codec->channels;
is->audio_clock += (double)data_size /
(double)(n * is->audio_st->codec->sample_rate);
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
if(is->quit) {
return -1;
}
/* next packet */
if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
return -1;
}
if(pkt->data == flush_pkt.data) {
avcodec_flush_buffers(is->audio_st->codec);
continue;
}
is->audio_pkt_data = pkt->data;
is->audio_pkt_size = pkt->size;
/* if update, update the audio clock w/pts */
if(pkt->pts != AV_NOPTS_VALUE) {
is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
}
}
}
Beispiel #30
0
int audio_decode_frame(VideoState *is) {
  int len1, data_size = 0;
  AVPacket *pkt = &is->audio_pkt;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      int got_frame = 0;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      if (got_frame)
      {
          /*
          data_size = 
            av_samples_get_buffer_size
            (
                NULL, 
                is->audio_st->codec->channels,
                is->audio_frame.nb_samples,
                is->audio_st->codec->sample_fmt,
                1
            );
          memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
          */
          data_size =av_samples_get_buffer_size
                  (
                      NULL,
                      is->audio_st->codec->channels,
                      is->audio_frame.nb_samples,
                      AV_SAMPLE_FMT_S16,
                      0
                  );
                if (!swr_ctx)
                {
                    swr_ctx = swr_alloc();
                    if (!swr_ctx)
                    {
                        printf("Could not allocate swr context\n");
                        exit(1);
                    }
                    /* set options */
                    av_opt_set_int(swr_ctx, "in_channel_layout", is->audio_st->codec->channel_layout, 0);
                    av_opt_set_int(swr_ctx, "in_sample_rate", is->audio_st->codec->sample_rate, 0);
                    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", is->audio_st->codec->sample_fmt, 0);
                    av_opt_set_int(swr_ctx, "out_channel_layout", is->audio_st->codec->channel_layout, 0);
                    av_opt_set_int(swr_ctx, "out_sample_rate", is->audio_st->codec->sample_rate, 0);
                    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
                    if (swr_init(swr_ctx) < 0)
                    {
                        printf("Failed to initialize the resampling context\n");
                        exit(1);
                    }
                    decode_buffer = (uint8_t*) malloc(data_size);
                }
                int ret = swr_convert(swr_ctx, &decode_buffer, is->audio_frame.nb_samples, (const uint8_t **) &is->audio_frame.data[0],
                                        is->audio_frame.nb_samples);
                if (ret < 0)
                {
                    printf("Error while converting\n");
                    break;
                }
                memcpy(is->audio_buf, decode_buffer, data_size);

      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
  }
}