예제 #1
0
Coder*
Container::Stream::getCoder() {
  if (!mCoder) {
    // we need to know the stream direction...
    AVFormatContext* ctx = mContainer->getFormatCtx();
    AVStream* stream = getCtx();

    if (!ctx || !stream) {
      VS_THROW(HumbleRuntimeError("could not get container context to find coder"));
    }
    if (!stream->codec) {
      VS_THROW(HumbleRuntimeError("No codec set for stream"));
    }
    RefPointer<Codec> codec;

    if (ctx->iformat) {
      // make a copy of the decoder so we decouple it from the container
      // completely
      codec = Codec::findDecodingCodec((Codec::ID)stream->codec->codec_id);
      if (!codec) {
        VS_THROW(HumbleRuntimeError("could not find decoding codec"));
      }
      mCoder = Decoder::make(codec.value(), stream->codec, true);
    } else {
      VS_THROW(HumbleRuntimeError("Got null encoder on MuxerStream which should not be possible"));
    }
  }
  return mCoder.get();
}
예제 #2
0
int32_t
FilterPictureSink::getPicture(MediaPicture* picture) {
  if (!picture) {
    VS_THROW(HumbleInvalidArgument("no picture passed in"));
  }
  AVFilterContext* ctx = getFilterCtx();

  // sadly, FFmpeg will not auto scale width and height at this
  // time, and so we need to check before we get a frame so we
  // don't destroy something we shouldn't.
  if (!ctx->inputs) {
    VS_THROW(HumbleRuntimeError("unexpect ffmpeg internal error"));
  }

  AVFilterLink* link = ctx->inputs[0];
  if (link) {
    if (link->w != picture->getWidth() || link->h != picture->getHeight()) {
      VS_THROW(HumbleInvalidArgument::make("picture dimensions do not match expected.  Got (%d x %d); Expected (%d x %d)",
          picture->getWidth(),
          picture->getHeight(),
          link->w,
          link->h
          ));
    }
  }
  return FilterSink::get(picture);
}
예제 #3
0
MediaSubtitleRectangle*
MediaSubtitle::getRectangle(int32_t n)
{
  if (n < 0 || n >= (int32_t)mCtx->num_rects)
    throw HumbleInvalidArgument("attempt to get out-of-range rectangle");

  if (!mCtx->rects)
    throw HumbleRuntimeError("no rectangles");

  return MediaSubtitleRectangle::make(mCtx->rects[n]);
}
예제 #4
0
Decoder*
DemuxerStream::getDecoder() {
  AVStream* stream = getCtx();

  if (!mDecoder) {
    if (stream->codec) {
      // make a copy of the decoder so we decouple it from the container
      // completely
      RefPointer<Codec> codec = Codec::findDecodingCodec((Codec::ID)stream->codec->codec_id);
      if (!codec) {
        VS_THROW(HumbleRuntimeError("could not find decoding codec"));
      }
      mDecoder = Decoder::make(codec.value(), stream->codec, true);
    }
  }
  return mDecoder.get();
}
예제 #5
0
void
Container::doSetupStreams() {
  // do nothing if we're already all set up.
  AVFormatContext* ctx = getFormatCtx();
  if (!ctx) {
    VS_THROW(HumbleRuntimeError("Attempt to setup streams on closed or error container"));
  }
  if (mStreams.size() == ctx->nb_streams) return;

  // loop through and find the first non-zero time base
  AVRational *goodTimebase = 0;
  for (uint32_t i = 0; i < ctx->nb_streams; i++) {
    AVStream *avStream = ctx->streams[i];
    if (avStream && avStream->time_base.num && avStream->time_base.den) {
      goodTimebase = &avStream->time_base;
      break;
    }
  }

  // Only look for new streams
  for (uint32_t i = mStreams.size(); i < ctx->nb_streams; i++) {
    AVStream *avStream = ctx->streams[i];
    if (!avStream)
      VS_THROW(HumbleRuntimeError::make("no FFMPEG allocated stream: %d", i));
    if (!avStream->time_base.num || !avStream->time_base.den) {
      if (avStream->codec && avStream->codec->sample_rate && avStream->codec->codec_type == AVMEDIA_TYPE_AUDIO)
      {
        // for audio, 1/sample-rate is a good timebase.
        avStream->time_base.num = 1;
        avStream->time_base.den = avStream->codec->sample_rate;
      } else if(goodTimebase) {
        avStream->time_base = *goodTimebase;
      }
    }
    // now let's initialize our stream object.
    Stream* stream = new Stream(this, i);
    mStreams.push_back(stream);
  }
}
예제 #6
0
int64_t
MediaSubtitle::logMetadata(char * buffer, size_t len) {
  VS_THROW(HumbleRuntimeError("NOT IMPLEMENTED"));
  return snprintf(buffer, len, "NOT IMPLEMENTED");
}
예제 #7
0
MediaPictureImpl*
MediaPictureImpl::make(Buffer* buffer, int32_t width, int32_t height,
    PixelFormat::Type format) {
  if (width <= 0) {
    VS_THROW(HumbleInvalidArgument("width must be > 0"));
  }

  if (height <= 0) {
    VS_THROW(HumbleInvalidArgument("height must be > 0"));
  }

  if (format == PixelFormat::PIX_FMT_NONE) {
    VS_THROW(HumbleInvalidArgument("pixel format must be specifie"));
  }

  if (!buffer) {
    VS_THROW(HumbleInvalidArgument("must pass non null buffer"));
  }

  // let's figure out how big of a buffer we need
  int32_t bufSize = PixelFormat::getBufferSizeNeeded(width, height, format);
  if (bufSize < buffer->getBufferSize()) {
    VS_THROW(
        HumbleInvalidArgument(
            "passed in buffer too small to fit requested image parameters"));
  }

  RefPointer<MediaPictureImpl> retval = make();
  AVFrame* frame = retval->mFrame;
  frame->width = width;
  frame->height = height;
  frame->format = format;

  // buffer is large enough; let's fill the data pointers
  uint8_t* data = (uint8_t*) buffer->getBytes(0, bufSize);

  int32_t imgSize = av_image_fill_arrays(frame->data, frame->linesize, data,
      (enum AVPixelFormat) frame->format, frame->width, frame->height, 1);
  if (imgSize != bufSize) {
    VS_ASSERT(imgSize == bufSize, "these should always be equal");
    VS_THROW(HumbleRuntimeError("could not fill image with data"));
  }

  // now, set up the reference buffers
  frame->extended_data = frame->data;
  for (int32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    if (frame->data[i])
      frame->buf[i] = AVBufferSupport::wrapBuffer(buffer, frame->data[i], frame->linesize[0]*frame->height+16);
  }
  // now fill in the AVBufferRefs where we pass of to FFmpeg care
  // of our buffer. Be kind FFmpeg.  Be kind.
  RefPointer<PixelFormatDescriptor> desc = PixelFormat::getDescriptor((PixelFormat::Type)frame->format);

  if (!desc) {
    VS_THROW(HumbleRuntimeError("could not get format descriptor"));
  }
  if (desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PAL) ||
      desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PSEUDOPAL)) {
    av_buffer_unref(&frame->buf[1]);
    frame->buf[1] = AVBufferSupport::wrapBuffer(Buffer::make(retval.value(), 1024));
    if (!frame->buf[1]) {
      VS_THROW(HumbleRuntimeError("memory failure"));
    }

    frame->data[1] = frame->buf[1]->data;
  }

  int32_t n = retval->getNumDataPlanes();
  (void) n;
  VS_LOG_TRACE("Created MediaPicture: %d x %d (%d). [%d, %d, %d, %d]",
      retval->getWidth(),
      retval->getHeight(),
      retval->getFormat(),
      n < 1 ? 0 : retval->getDataPlaneSize(0),
      n < 2 ? 0 : retval->getDataPlaneSize(1),
      n < 3 ? 0 : retval->getDataPlaneSize(2),
      n < 4 ? 0 : retval->getDataPlaneSize(3)
      );

  // and we're done.
  return retval.get();
}