コード例 #1
0
void
MediaPictureResamplerTest::testRescale() {

  TestData::Fixture* fixture=mFixtures.getFixture("testfile_h264_mp4a_tmcd.mov");
  TS_ASSERT(fixture);
  char filepath[2048];
  mFixtures.fillPath(fixture, filepath, sizeof(filepath));

  RefPointer<Demuxer> source = Demuxer::make();

  source->open(filepath, 0, false, true, 0, 0);

  int32_t numStreams = source->getNumStreams();
  TS_ASSERT_EQUALS(fixture->num_streams, numStreams);

  int32_t streamToDecode = 0;
  RefPointer<DemuxerStream> stream = source->getStream(streamToDecode);
  TS_ASSERT(stream);
  RefPointer<Decoder> decoder = stream->getDecoder();
  TS_ASSERT(decoder);
  RefPointer<Codec> codec = decoder->getCodec();
  TS_ASSERT(codec);
  TS_ASSERT_EQUALS(Codec::CODEC_ID_H264, codec->getID());

  decoder->open(0, 0);

  TS_ASSERT_EQUALS(PixelFormat::PIX_FMT_YUV420P, decoder->getPixelFormat());

  // now, let's start a decoding loop.
  RefPointer<MediaPacket> packet = MediaPacket::make();

  RefPointer<MediaPicture> picture = MediaPicture::make(
      decoder->getWidth(),
      decoder->getHeight(),
      decoder->getPixelFormat());

  int32_t rescaleW = decoder->getWidth()/4;
  int32_t rescaleH = (int32_t)(decoder->getHeight()*5.22);
  PixelFormat::Type rescaleFmt = PixelFormat::PIX_FMT_RGBA;
  RefPointer<MediaPicture> rescaled = MediaPicture::make(
      rescaleW, rescaleH, rescaleFmt);
  RefPointer<MediaPictureResampler> resampler =
      MediaPictureResampler::make(rescaled->getWidth(),
          rescaled->getHeight(),
          rescaled->getFormat(),
          picture->getWidth(),
          picture->getHeight(),
          picture->getFormat(),
          0);
  resampler->open();

  int32_t frameNo = 0;
  while(source->read(packet.value()) >= 0) {
    // got a packet; now we try to decode it.
    if (packet->getStreamIndex() == streamToDecode &&
        packet->isComplete()) {
      int32_t bytesRead = 0;
      int32_t byteOffset=0;
      do {
        bytesRead = decoder->decodeVideo(picture.value(), packet.value(), byteOffset);
        if (picture->isComplete()) {
          writePicture("MediaPictureResamplerTest_testRescaleVideo",
              &frameNo, picture.value(), resampler.value(), rescaled.value());
        }
        byteOffset += bytesRead;
      } while(byteOffset < packet->getSize());
      // now, handle the case where bytesRead is 0; we need to flush any
      // cached packets
      do {
        decoder->decodeVideo(picture.value(), 0, 0);
        if (picture->isComplete()) {
          writePicture("MediaPictureResamplerTest_testRescaleVideo", &frameNo, picture.value(),
              resampler.value(), rescaled.value());
        }
      } while (picture->isComplete());
    }
    if ((int32_t)(frameNo/30) > 10)
      // 20 pictures should be enough to see if it's working.
      break;
  }
  source->close();
}
コード例 #2
0
void
AudioResamplerTest :: testResamplingAudio()
{
  RefPointer<IAudioResampler> resampler = IAudioResampler::make(2, 1,
      44100, 22050);
  RefPointer<IAudioSamples> samples = 0;
  RefPointer<IAudioSamples> resamples = 0;
  int numSamples = 0;
  int numPackets = 0;
  int retval = -1;
  samples = IAudioSamples::make(1024, 1);
  VS_TUT_ENSURE("got no samples", samples);
  resamples = IAudioSamples::make(1024, 2);
  VS_TUT_ENSURE("got no samples", samples);

  h->setupReading(h->SAMPLE_FILE);

  RefPointer<IPacket> packet = IPacket::make();

  hw->setupWriting("AudioResamplerTest_3_output.flv", 0, "libmp3lame", "flv");
  int outStream = hw->first_output_audio_stream;
  VS_TUT_ENSURE("Could not find an audio stream in the output", outStream >= 0);
  int inStream = h->first_input_audio_stream;
  VS_TUT_ENSURE("Could not find an audio stream in the input", inStream >= 0);

  RefPointer<IStreamCoder> ic = h->coders[inStream];
  RefPointer<IStreamCoder> oc = hw->coders[outStream];
  RefPointer<IPacket> opacket = IPacket::make();
  VS_TUT_ENSURE("! opacket", opacket);

  // Set the output coder correctly.
  int outChannels = 2;
  int outRate = 44100;
  resampler = IAudioResampler::make(outChannels, ic->getChannels(),
      outRate, ic->getSampleRate());
  oc->setSampleRate(outRate);
  oc->setChannels(outChannels);
  oc->setBitRate(ic->getBitRate());

  int maxSamples = 10 * ic->getSampleRate(); // 10 seconds

  retval = ic->open();
  VS_TUT_ENSURE("Could not open input coder", retval >= 0);
  retval = oc->open();
  VS_TUT_ENSURE("Could not open output coder", retval >= 0);

  // write header
  retval = hw->container->writeHeader();
  VS_TUT_ENSURE("could not write header", retval >= 0);

  while (h->container->readNextPacket(packet.value()) == 0
      && numSamples < maxSamples)
  {
    if (packet->getStreamIndex() == inStream)
    {
      int offset = 0;

      numPackets++;

      while (offset < packet->getSize())
      {
        retval = ic->decodeAudio(
            samples.value(),
            packet.value(),
            offset);
        VS_TUT_ENSURE("could not decode any audio",
            retval > 0);
        offset += retval;
        VS_TUT_ENSURE("could not write any samples",
            samples->getNumSamples() > 0);
        numSamples += samples->getNumSamples();
        resamples = IAudioSamples::make((samples->getNumSamples()*2),2);
        // now, resample the audio
        retval = resampler->resample(resamples.value(), samples.value(), 0);
        VS_TUT_ENSURE("could not resample", retval > 0);
        VS_TUT_ENSURE("no resamples", resamples->getNumSamples() > 0);
        VS_TUT_ENSURE_EQUALS("wrong sample rate", resamples->getSampleRate(),
            outRate);
        VS_TUT_ENSURE_EQUALS("wrong channels", resamples->getChannels(),
            outChannels);
        // now, write out the packets.
        unsigned int samplesConsumed = 0;
        do {
          retval = oc->encodeAudio(opacket.value(), resamples.value(),
              samplesConsumed);
          VS_TUT_ENSURE("Could not encode any audio", retval >= 0);
          samplesConsumed += (unsigned int)retval;
          VS_LOG_TRACE("packet: %d; is: %d; os: %d",
              numPackets, numSamples, samplesConsumed);

          if (opacket->isComplete())
          {
            VS_TUT_ENSURE("could not encode audio", opacket->getSize() > 0);
            RefPointer<IBuffer> encodedBuffer = opacket->getData();
            VS_TUT_ENSURE("no encoded data", encodedBuffer);
            VS_TUT_ENSURE("less data than there should be",
                encodedBuffer->getBufferSize() >=
                opacket->getSize());
            retval = hw->container->writePacket(opacket.value());
            VS_TUT_ENSURE("could not write packet", retval >= 0);
          }
          // keep going until we've encoded all samples in this buffer.
        } while (samplesConsumed < resamples->getNumSamples());
      }
    }
  }
  // sigh; it turns out that to flush the encoding buffers you need to
  // ask the encoder to encode a NULL set of samples.  So, let's do that.
  retval = oc->encodeAudio(opacket.value(), 0, 0);
  VS_TUT_ENSURE("Could not encode any audio", retval >= 0);
  if (retval > 0)
  {
    retval = hw->container->writePacket(opacket.value());
    VS_TUT_ENSURE("could not write packet", retval >= 0);
  }

  retval = hw->container->writeTrailer();
  VS_TUT_ENSURE("! writeTrailer", retval >= 0);

  retval = ic->close();
  VS_TUT_ENSURE("! close", retval >= 0);
  retval = oc->close();

  VS_TUT_ENSURE("! close", retval >= 0);
  VS_TUT_ENSURE("could not get any audio packets", numPackets > 0);
  VS_TUT_ENSURE("could not decode any audio", numSamples > 0);
}
コード例 #3
0
ファイル: EncoderTest.cpp プロジェクト: ConeyLiu/humble-video
void
EncoderTest::testRegression36Internal (const Codec::ID codecId,
                                       const int32_t numSamples,
                                       const int32_t sampleRate,
                                       const int32_t channels,
                                       const AudioChannel::Layout channelLayout,
                                       const AudioFormat::Type audioFormat,
                                       const int64_t bitRate,
                                       const char* testOutputName)
{
  VS_LOG_DEBUG("Output filename: %s", testOutputName);
  RefPointer<Codec> codec = Codec::findEncodingCodec (codecId);
  RefPointer<Encoder> encoder = Encoder::make (codec.value ());
  RefPointer<FilterGraph> graph = FilterGraph::make ();
  RefPointer<MediaAudio> audio = MediaAudio::make (numSamples, sampleRate,
                                                   channels, channelLayout,
                                                   audioFormat);
  // set the encoder properties we need
  encoder->setSampleRate (audio->getSampleRate ());
  encoder->setSampleFormat (audio->getFormat ());
  encoder->setChannelLayout (audio->getChannelLayout ());
  encoder->setChannels (audio->getChannels ());
  encoder->setProperty ("b", (int64_t) (bitRate));
  RefPointer<Rational> tb = Rational::make (1, sampleRate);
  encoder->setTimeBase (tb.value ());
  // create an output muxer
  RefPointer<Muxer> muxer = Muxer::make (testOutputName, 0, 0);
  RefPointer<MuxerFormat> format = muxer->getFormat ();
  if (format->getFlag (MuxerFormat::GLOBAL_HEADER))
    encoder->setFlag (Encoder::FLAG_GLOBAL_HEADER, true);

  // open the encoder
  encoder->open (0, 0);
  RefPointer<FilterAudioSink> fsink = graph->addAudioSink (
      "out", audio->getSampleRate (), audio->getChannelLayout (),
      audio->getFormat ());
  // Generate a 220 Hz sine wave with a 880 Hz beep each second, for 10 seconds.
  graph->open ("sine=frequency=220:beep_factor=4:duration=11[out]");
  fsink->setFrameSize (numSamples);
  // add a stream for the encoded packets
  {
    RefPointer<MuxerStream> stream = muxer->addNewStream (encoder.value ());
  }      // and open the muxer
  muxer->open (0, 0);
  // now we're (in theory) ready to start writing data.
  int32_t numCompletePackets = 0;
  RefPointer<MediaPacket> packet;
  // Get one audio packet that is larger than the frame-size.
  fsink->getAudio (audio.value ());
  TS_ASSERT(audio->isComplete ());
  TS_ASSERT_EQUALS(audio->getNumSamples (), sampleRate);
  audio->setTimeStamp (0);
  // let's encode
  packet = MediaPacket::make ();
  encoder->encodeAudio (packet.value (), audio.value ());
  if (packet->isComplete ())
  {
    muxer->write (packet.value (), false);
  }
  // now flush the encoder
  do
  {
    packet = MediaPacket::make ();
    encoder->encodeAudio (packet.value (), 0);
    if (packet->isComplete ())
    {
      muxer->write (packet.value (), false);
      ++numCompletePackets;
    }
  }
  while (packet->isComplete ());
  muxer->close ();
  if (!(codec->getCapabilities() & Codec::CAP_VARIABLE_FRAME_SIZE)) {
    const int32_t numExpectedPackets = audio->getNumSamples()
        / encoder->getFrameSize();

    VS_LOG_DEBUG("%ld vs %ld; framesize: %ld", numCompletePackets, numExpectedPackets, encoder->getFrameSize());
    TS_ASSERT(numCompletePackets > 10);
  }
}
コード例 #4
0
ファイル: EncoderTest.cpp プロジェクト: ConeyLiu/humble-video
void
EncoderTest::testEncodeVideo() {
  Logger::setGlobalIsLogging(Logger::LEVEL_TRACE, false);
  LoggerStack stack;
  stack.setGlobalLevel(Logger::LEVEL_INFO, false);

  const bool isMemCheck = getenv("VS_TEST_MEMCHECK") ? true : false;
  const int32_t maxPics = isMemCheck ? 10 : 500;
  int32_t width=176;
  int32_t height=144;

  RefPointer<Codec> codec = Codec::findEncodingCodec(Codec::CODEC_ID_H264);
  RefPointer<Encoder> encoder = Encoder::make(codec.value());
  RefPointer<MediaPicture> picture = MediaPicture::make(width*2,height*2,
      PixelFormat::PIX_FMT_YUV420P);

  // set the encoder properties we need
  encoder->setWidth(picture->getWidth());
  encoder->setHeight(picture->getHeight());
  encoder->setPixelFormat(picture->getFormat());
  encoder->setProperty("b", (int64_t)400000); // bitrate
  encoder->setProperty("g", (int64_t) 10); // gop
  encoder->setProperty("bf", (int64_t)1); // max b frames

  RefPointer<Rational> tb = Rational::make(1,25);
  encoder->setTimeBase(tb.value());

  // mandlebrot, that is then negated, horizontally flipped, and edge detected, before
  // final outputting to a new picture with each version in one of 4 quadrants.
  char graphCommand[1024];
  snprintf(graphCommand,sizeof(graphCommand),"mandelbrot=s=%dx%d[mb];"
      "[mb]split=4[0][1][2][3];"
      "[0]pad=iw*2:ih*2[a];"
      "[1]negate[b];"
      "[2]hflip[c];"
      "[3]edgedetect[d];"
      "[a][b]overlay=w[x];"
      "[x][c]overlay=0:h[y];"
      "[y][d]overlay=w:h[out]", width, height);

  RefPointer<FilterGraph> graph = FilterGraph::make();
  RefPointer<FilterPictureSink> fsink = graph->addPictureSink("out", picture->getFormat());
  graph->open(graphCommand);

  // let's set a frame time base of 1/30
  RefPointer<Rational> pictureTb = Rational::make(1,30);

  // create an output muxer
  RefPointer<Muxer> muxer = Muxer::make("EncoderTest_encodeVideo.mov", 0, 0);
  RefPointer<MuxerFormat> format = muxer->getFormat();

  // if the container will require a global header, then the encoder needs to set this.
  if (format->getFlag(MuxerFormat::GLOBAL_HEADER))
    encoder->setFlag(Encoder::FLAG_GLOBAL_HEADER, true);

  // open the encoder
  encoder->open(0, 0);

  // add a stream for the encoded packets
  {
    RefPointer<MuxerStream> stream = muxer->addNewStream(encoder.value());
  }

  // and open the muxer
  muxer->open(0, 0);

  // now we're (in theory) ready to start writing data.
  int32_t numPics = 0;
  RefPointer<MediaPacket> packet;

  while(fsink->getPicture(picture.value()) >= 0 && numPics < maxPics) {
    picture->setTimeBase(pictureTb.value());
    picture->setTimeStamp(numPics);

    // let's encode
    packet = MediaPacket::make();
    encoder->encodeVideo(packet.value(), picture.value());
    if (packet->isComplete()) {
      muxer->write(packet.value(), false);
    }
    ++numPics;
  }
  // now flush the encoder
  do {
    packet = MediaPacket::make();
    encoder->encodeVideo(packet.value(), 0);
    if (packet->isComplete()) {
      muxer->write(packet.value(), false);
    }
  } while (packet->isComplete());

  muxer->close();

}
コード例 #5
0
ファイル: EncoderTest.cpp プロジェクト: ConeyLiu/humble-video
/**
 * This test will read ironman (FLV, H263 video and mp3 audio) and transcode
 * to MP4 (H264 Video and aac audio).
 */
void
EncoderTest::testTranscode()
{
  // enable trace logging
  Logger::setGlobalIsLogging(Logger::LEVEL_TRACE, false);
  const bool isMemCheck = getenv("VS_TEST_MEMCHECK") ? true : false;
  LoggerStack stack;
  stack.setGlobalLevel(Logger::LEVEL_INFO, false);

  TestData::Fixture* fixture;
  fixture=mFixtures.getFixture("testfile.flv");
//  fixture=mFixtures.getFixture("testfile_h264_mp4a_tmcd.mov");
//  fixture=mFixtures.getFixture("bigbuckbunny_h264_aac_5.1.mp4");
  TS_ASSERT(fixture);
  char filepath[2048];
  mFixtures.fillPath(fixture, filepath, sizeof(filepath));

  RefPointer<Demuxer> source = Demuxer::make();
  source->open(filepath, 0, false, true, 0, 0);
  int32_t numStreams = source->getNumStreams();
  TS_ASSERT_EQUALS(fixture->num_streams, numStreams);

  // Let's create a helper object to help us with decoding
  typedef struct {
    MediaDescriptor::Type type;
    RefPointer<DemuxerStream> stream;
    RefPointer<Decoder> decoder;
    RefPointer<MediaSampled> media;
  } DemuxerStreamHelper;

  // I know there are only 2 in the input file.
  DemuxerStreamHelper inputHelpers[10];
  for(int32_t i = 0; i < numStreams; i++) {
    DemuxerStreamHelper* input = &inputHelpers[i];
    input->stream = source->getStream(i);
    input->decoder = input->stream->getDecoder();
    if (!input->decoder)
      // skip
      break;
    input->decoder->open(0, 0);
    input->type = input->decoder->getCodecType();
    if (input->type == MediaDescriptor::MEDIA_AUDIO)
      input->media = MediaAudio::make(
          input->decoder->getFrameSize(),
          input->decoder->getSampleRate(),
          input->decoder->getChannels(),
          input->decoder->getChannelLayout(),
          input->decoder->getSampleFormat());
    else if (input->type  == MediaDescriptor::MEDIA_VIDEO)
      input->media = MediaPicture::make(
          input->decoder->getWidth(),
          input->decoder->getHeight(),
          input->decoder->getPixelFormat()
      );
  }

  // now, let's set up our output file.
  RefPointer<Muxer> muxer = Muxer::make("EncoderTest_testTranscode.mp4", 0, 0);
  RefPointer<MuxerFormat> format = muxer->getFormat();

  // Let's create a helper object to help us with decoding
  typedef struct {
    MediaDescriptor::Type type;
    RefPointer<MuxerStream> stream;
    RefPointer<MediaResampler> resampler;
    RefPointer<MediaSampled> media;
    RefPointer<Encoder> encoder;
  } MuxerStreamHelper;

  MuxerStreamHelper outputHelpers[10];
  for(int32_t i = 0; i < numStreams; i++) {
    DemuxerStreamHelper *input = &inputHelpers[i];
    MuxerStreamHelper *output = &outputHelpers[i];
    if (!input->decoder)
      // skip
      break;
    output->type = input->type;
    RefPointer<Encoder> encoder;
    if (output->type == MediaDescriptor::MEDIA_VIDEO) {
      RefPointer<Codec> codec = Codec::findEncodingCodec(Codec::CODEC_ID_H264);
      encoder = Encoder::make(codec.value());

      // set the encoder properties we need
      encoder->setWidth(input->decoder->getWidth());
      encoder->setHeight(input->decoder->getHeight());
      encoder->setPixelFormat(input->decoder->getPixelFormat());
      encoder->setProperty("b", (int64_t)400000); // bitrate
      encoder->setProperty("g", (int64_t) 10); // gop
      encoder->setProperty("bf", (int64_t)0); // max b frames
      RefPointer<Rational> tb = Rational::make(1,2997);
      encoder->setTimeBase(tb.value());


    } else if (output->type == MediaDescriptor::MEDIA_AUDIO) {
      RefPointer<Codec> codec = Codec::findEncodingCodec(Codec::CODEC_ID_AAC);
      encoder = Encoder::make(codec.value());

      // set the encoder properties we need
      encoder->setSampleRate(input->decoder->getSampleRate());
      encoder->setSampleFormat(input->decoder->getSampleFormat());
      encoder->setSampleFormat(AudioFormat::SAMPLE_FMT_S16);
      encoder->setChannelLayout(input->decoder->getChannelLayout());
      encoder->setChannels(input->decoder->getChannels());
      encoder->setProperty("b", (int64_t)64000); // bitrate
      RefPointer<Rational> tb = Rational::make(1,encoder->getSampleRate());
      encoder->setTimeBase(tb.value());
      //      //input->decoder->getTimeBase();
      //      output->encoder->setTimeBase(tb.value());

    }
    output->encoder.reset(encoder.value(), true);
    if (output->encoder) {
      if (format->getFlag(MuxerFormat::GLOBAL_HEADER))
        output->encoder->setFlag(Encoder::FLAG_GLOBAL_HEADER, true);

      output->encoder->open(0,0);

      // sometimes encoders need to change parameters to fit; let's see
      // if that happened.
      output->stream = muxer->addNewStream(output->encoder.value());
    }
    output->media = input->media;
    output->resampler = 0;
    if (output->type == MediaDescriptor::MEDIA_AUDIO) {
      // sometimes encoders only accept certain media types and discard
      // our suggestions. Let's check.
      if (
          output->encoder->getSampleRate() != input->decoder->getSampleRate() ||
          output->encoder->getSampleFormat() != input->decoder->getSampleFormat() ||
          output->encoder->getChannelLayout() != input->decoder->getChannelLayout() ||
          output->encoder->getChannels() != input->decoder->getChannels()
          )
      {
        // we need a resampler.
        VS_LOG_DEBUG("Resampling: [%"PRId32", %"PRId32", %"PRId32"] [%"PRId32", %"PRId32", %"PRId32"]",
                     (int32_t)output->encoder->getChannelLayout(),
                     (int32_t)output->encoder->getSampleRate(),
                     (int32_t)output->encoder->getSampleFormat(),
                     (int32_t)input->decoder->getChannelLayout(),
                     (int32_t)input->decoder->getSampleRate(),
                     (int32_t)input->decoder->getSampleFormat()
        );
        RefPointer<MediaAudioResampler> resampler = MediaAudioResampler::make(
            output->encoder->getChannelLayout(),
            output->encoder->getSampleRate(),
            output->encoder->getSampleFormat(),
            input->decoder->getChannelLayout(),
            input->decoder->getSampleRate(),
            input->decoder->getSampleFormat()
            );
        resampler->open();
        output->resampler.reset(resampler.value(), true);
        output->media = MediaAudio::make(
                  output->encoder->getFrameSize(),
                  output->encoder->getSampleRate(),
                  output->encoder->getChannels(),
                  output->encoder->getChannelLayout(),
                  output->encoder->getSampleFormat());
      }
    }
  }
  // now we should be ready to open the muxer
  muxer->open(0, 0);

  // now, let's start a decoding loop.
  RefPointer<MediaPacket> packet = MediaPacket::make();

  int numPackets = 0;
  while(source->read(packet.value()) >= 0) {
    // got a packet; now we try to decode it.
    if (packet->isComplete()) {
      int32_t streamNo = packet->getStreamIndex();
      DemuxerStreamHelper *input = &inputHelpers[streamNo];
      MuxerStreamHelper* output = &outputHelpers[streamNo];
      if (input->decoder) decodeAndEncode(
          packet.value(),
          input->decoder.value(),
          input->media.value(),
          output->resampler.value(),
          output->media.value(),
          muxer.value(),
          output->encoder.value());
      ++numPackets;
      if (isMemCheck && numPackets > 100) {
        VS_LOG_WARN("Exiting early under valgrind");
        break;
      }
    }
  }

  // now, flush any cached packets
  for(int i = 0; i < numStreams; i++) {
    DemuxerStreamHelper *input = &inputHelpers[i];
    MuxerStreamHelper* output = &outputHelpers[i];
    if (input->decoder) decodeAndEncode(
        0,
        input->decoder.value(),
        input->media.value(),
        output->resampler.value(),
        output->media.value(),
        muxer.value(),
        output->encoder.value());

  }
  source->close();
  muxer->close();
}
コード例 #6
0
ファイル: EncoderTest.cpp プロジェクト: ConeyLiu/humble-video
void
EncoderTest::testEncodeAudio() {
  Logger::setGlobalIsLogging(Logger::LEVEL_TRACE, false);
  LoggerStack stack;
  stack.setGlobalLevel(Logger::LEVEL_INFO, false);

  const bool isMemCheck = getenv("VS_TEST_MEMCHECK") ? true : false;
  const int32_t sampleRate = 44100;
  const int32_t maxSamples = isMemCheck ? sampleRate*0.5 : sampleRate*10;
  const int32_t numSamples = 1024;
  const AudioChannel::Layout channelLayout = AudioChannel::CH_LAYOUT_STEREO;
  const int32_t channels = AudioChannel::getNumChannelsInLayout(channelLayout);
  const AudioFormat::Type audioFormat = AudioFormat::SAMPLE_FMT_S16;
  RefPointer<Codec> codec = Codec::findEncodingCodec(Codec::CODEC_ID_AAC);
  RefPointer<Encoder> encoder = Encoder::make(codec.value());

  RefPointer<FilterGraph> graph = FilterGraph::make();

  RefPointer<MediaAudio> audio = MediaAudio::make(numSamples, sampleRate, channels, channelLayout,
      audioFormat);

  // set the encoder properties we need
  encoder->setSampleRate(audio->getSampleRate());
  encoder->setSampleFormat(audio->getFormat());
  encoder->setChannelLayout(audio->getChannelLayout());
  encoder->setChannels(audio->getChannels());
  encoder->setProperty("b", (int64_t)64000); // bitrate
  RefPointer<Rational> tb = Rational::make(1,25);
  encoder->setTimeBase(tb.value());

  // create an output muxer
  RefPointer<Muxer> muxer = Muxer::make("EncoderTest_encodeAudio.mp4", 0, 0);
  RefPointer<MuxerFormat> format = muxer->getFormat();
  if (format->getFlag(MuxerFormat::GLOBAL_HEADER))
    encoder->setFlag(Encoder::FLAG_GLOBAL_HEADER, true);

  // open the encoder
  encoder->open(0, 0);

  RefPointer<FilterAudioSink> fsink = graph->addAudioSink("out", audio->getSampleRate(), audio->getChannelLayout(), audio->getFormat());

  // Generate a 220 Hz sine wave with a 880 Hz beep each second, for 10 seconds.
  graph->open("sine=frequency=660:beep_factor=4:duration=11[out]");
  // Generate an amplitude modulated signal
  //graph->open("aevalsrc=sin(10*2*PI*t)*sin(880*2*PI*t)[out]");

  // add a stream for the encoded packets
  {
    RefPointer<MuxerStream> stream = muxer->addNewStream(encoder.value());
  }

  // and open the muxer
  muxer->open(0, 0);

  // now we're (in theory) ready to start writing data.
  int32_t numFrames = 0;
  RefPointer<MediaPacket> packet;

  while(fsink->getAudio(audio.value()) >= 0 && audio->isComplete() && numFrames*audio->getNumSamples() < maxSamples) {
    audio->setTimeStamp(numFrames*audio->getNumSamples());

    // let's encode
    packet = MediaPacket::make();
    encoder->encodeAudio(packet.value(), audio.value());
    if (packet->isComplete()) {
      muxer->write(packet.value(), false);
    }
    ++numFrames;
  }
  // now flush the encoder
  do {
    packet = MediaPacket::make();
    encoder->encodeAudio(packet.value(), 0);
    if (packet->isComplete()) {
      muxer->write(packet.value(), false);
    }
  } while (packet->isComplete());

  muxer->close();
}