コード例 #1
0
ファイル: Container.cpp プロジェクト: dstieglitz/humble-video
Coder*
Container::Stream::getCoder() {
  if (!mCoder) {
    // we need to know the stream direction...
    AVFormatContext* ctx = mContainer->getFormatCtx();
    AVStream* stream = getCtx();

    if (!ctx || !stream) {
      VS_THROW(HumbleRuntimeError("could not get container context to find coder"));
    }
    if (!stream->codec) {
      VS_THROW(HumbleRuntimeError("No codec set for stream"));
    }
    RefPointer<Codec> codec;

    if (ctx->iformat) {
      // make a copy of the decoder so we decouple it from the container
      // completely
      codec = Codec::findDecodingCodec((Codec::ID)stream->codec->codec_id);
      if (!codec) {
        VS_THROW(HumbleRuntimeError("could not find decoding codec"));
      }
      mCoder = Decoder::make(codec.value(), stream->codec, true);
    } else {
      VS_THROW(HumbleRuntimeError("Got null encoder on MuxerStream which should not be possible"));
    }
  }
  return mCoder.get();
}
コード例 #2
0
int32_t
FilterPictureSink::getPicture(MediaPicture* picture) {
  if (!picture) {
    VS_THROW(HumbleInvalidArgument("no picture passed in"));
  }
  AVFilterContext* ctx = getFilterCtx();

  // sadly, FFmpeg will not auto scale width and height at this
  // time, and so we need to check before we get a frame so we
  // don't destroy something we shouldn't.
  if (!ctx->inputs) {
    VS_THROW(HumbleRuntimeError("unexpect ffmpeg internal error"));
  }

  AVFilterLink* link = ctx->inputs[0];
  if (link) {
    if (link->w != picture->getWidth() || link->h != picture->getHeight()) {
      VS_THROW(HumbleInvalidArgument::make("picture dimensions do not match expected.  Got (%d x %d); Expected (%d x %d)",
          picture->getWidth(),
          picture->getHeight(),
          link->w,
          link->h
          ));
    }
  }
  return FilterSink::get(picture);
}
コード例 #3
0
ファイル: BufferImpl.cpp プロジェクト: ConeyLiu/humble-video
  BufferImpl*
  BufferImpl :: make(io::humble::ferry::RefCounted* requestor, int32_t bufferSize)
  {
    RefPointer<BufferImpl> retval;
    if (bufferSize <= 0)
      VS_THROW(HumbleInvalidArgument("bufferSize must be > 0"));
    
    void * allocator = requestor ? requestor->getJavaAllocator() : 0;
    void *buffer = JNIMemoryManager::malloc(allocator, bufferSize);
    if (!buffer) {
      VS_THROW(HumbleBadAlloc());
    }
      
    try {
      retval = BufferImpl::make();

      retval->mBuffer = buffer;
      retval->mBufferSize = bufferSize;
      retval->mInternallyAllocated = true;
    } catch (std::exception & e) {
      JNIMemoryManager::free(buffer);
      throw;
    }
    return retval.get();
  }
コード例 #4
0
ファイル: BufferImpl.cpp プロジェクト: ConeyLiu/humble-video
  BufferImpl*
  BufferImpl :: make(io::humble::ferry::RefCounted* /*unused*/, void *bufToWrap, int32_t bufferSize,
      FreeFunc freeFunc, void *closure)
  {
    RefPointer<BufferImpl> retval;

    if (!bufToWrap) {
      VS_THROW(HumbleInvalidArgument("bufToWrap must be non null"));
    }

    if (bufferSize <= 0) {
      VS_THROW(HumbleInvalidArgument("bufferSize must be > 0"));
    }

    if (bufToWrap && bufferSize>0)
    {
      retval = BufferImpl::make();
      retval->mFreeFunc = freeFunc;
      retval->mClosure = closure;
      retval->mBufferSize = bufferSize;
      retval->mBuffer = bufToWrap;
      retval->mInternallyAllocated = false;
    }
    return retval.get();
  }
コード例 #5
0
ファイル: Container.cpp プロジェクト: dstieglitz/humble-video
Container::Stream*
Container::getStream(int32_t index) {
  if (index < 0) {
    VS_THROW(HumbleInvalidArgument("index must be >= 0"));
  }
  doSetupStreams();
  if ((size_t)index >= mStreams.size()) {
    VS_THROW(HumbleInvalidArgument("index must be < #getNumStreams()"));
  }
  return mStreams[index];
}
コード例 #6
0
void
MediaPictureImpl::validatePlane(int32_t plane)
{
  if (plane < 0) {
    VS_THROW(HumbleInvalidArgument("plane must be >= 0"));
  }

  if (plane >= 4) {
    VS_THROW(HumbleInvalidArgument("plane must be < getNumDataPlanes()"));
  }

}
コード例 #7
0
MediaPictureImpl*
MediaPictureImpl::make(MediaPictureImpl* src, bool copy) {
  RefPointer<MediaPictureImpl> retval;

  if (!src) VS_THROW(HumbleInvalidArgument("no src object to copy from"));

  if (copy) {
    // first create a new mediaaudio object to copy into
    retval = make(src->getWidth(), src->getHeight(), src->getFormat());
    retval->mComplete = src->mComplete;

    // then copy the data into retval
    int32_t n = src->getNumDataPlanes();
    for(int32_t i = 0; i < n; i++ )
    {
      AVBufferRef* dstBuf = av_frame_get_plane_buffer(retval->mFrame, i);
      AVBufferRef* srcBuf = av_frame_get_plane_buffer(src->mFrame, i);
      VS_ASSERT(dstBuf, "should always have buffer");
      VS_ASSERT(srcBuf, "should always have buffer");
      memcpy(dstBuf->data, srcBuf->data, srcBuf->size);
    }
  } else {
    // first create a new media audio object to reference into
    retval = make();

    // then do the reference
    retval->mComplete = src->mComplete;
    av_frame_ref(retval->mFrame, src->mFrame);
  }
  // set the timebase
  retval->setComplete(src->isComplete());
  RefPointer<Rational> timeBase = src->getTimeBase();
  retval->setTimeBase(timeBase.value());
  return retval.get();
}
コード例 #8
0
ファイル: BufferImpl.cpp プロジェクト: ConeyLiu/humble-video
 BufferImpl*
 BufferImpl :: make(io::humble::ferry::RefCounted* requestor,
     Type type, int32_t numElements, bool zero)
 {
   if (numElements <= 0)
     VS_THROW(HumbleInvalidArgument("numElements must be > 0"));
   if (type < 0 || type >= BUFFER_NB)
     VS_THROW(HumbleInvalidArgument("invalid type"));
   
   int32_t bytesRequested = numElements*mTypeSize[(int32_t)type];
   BufferImpl *retval = BufferImpl::make(requestor, bytesRequested);
   retval->mType = type;
   if (zero)
     memset(retval->getBytes(0, bytesRequested), 0, bytesRequested);
   return retval;
 }
コード例 #9
0
MediaPictureImpl*
MediaPictureImpl::make(int32_t width, int32_t height,
    PixelFormat::Type format) {
  if (width <= 0)
  VS_THROW(HumbleInvalidArgument("width must be > 0"));

  if (height <= 0)
  VS_THROW(HumbleInvalidArgument("height must be > 0"));

  if (format == PixelFormat::PIX_FMT_NONE)
  VS_THROW(HumbleInvalidArgument("pixel format must be specifie"));

  // let's figure out how big of a buffer we need
  int32_t bufSize = PixelFormat::getBufferSizeNeeded(width, height, format);

  RefPointer<Buffer> buffer = Buffer::make(0, bufSize);
  MediaPictureImpl* retval = make(buffer.value(), width, height, format);
  if (retval) buffer->setJavaAllocator(retval->getJavaAllocator());

  return retval;
}
コード例 #10
0
void
MediaPictureImpl::copy(AVFrame* src, bool complete) {
  if (!src)
    VS_THROW(HumbleInvalidArgument("no src"));
  // release any memory we have
  av_frame_unref(mFrame);
  // and copy any data in.
  av_frame_ref(mFrame, src);
  RefPointer<Rational> timeBase = getTimeBase();
  setTimeBase(timeBase.value());
  mComplete=complete;
}
コード例 #11
0
ファイル: Container.cpp プロジェクト: dstieglitz/humble-video
void
Container::doSetupStreams() {
  // do nothing if we're already all set up.
  AVFormatContext* ctx = getFormatCtx();
  if (!ctx) {
    VS_THROW(HumbleRuntimeError("Attempt to setup streams on closed or error container"));
  }
  if (mStreams.size() == ctx->nb_streams) return;

  // loop through and find the first non-zero time base
  AVRational *goodTimebase = 0;
  for (uint32_t i = 0; i < ctx->nb_streams; i++) {
    AVStream *avStream = ctx->streams[i];
    if (avStream && avStream->time_base.num && avStream->time_base.den) {
      goodTimebase = &avStream->time_base;
      break;
    }
  }

  // Only look for new streams
  for (uint32_t i = mStreams.size(); i < ctx->nb_streams; i++) {
    AVStream *avStream = ctx->streams[i];
    if (!avStream)
      VS_THROW(HumbleRuntimeError::make("no FFMPEG allocated stream: %d", i));
    if (!avStream->time_base.num || !avStream->time_base.den) {
      if (avStream->codec && avStream->codec->sample_rate && avStream->codec->codec_type == AVMEDIA_TYPE_AUDIO)
      {
        // for audio, 1/sample-rate is a good timebase.
        avStream->time_base.num = 1;
        avStream->time_base.den = avStream->codec->sample_rate;
      } else if(goodTimebase) {
        avStream->time_base = *goodTimebase;
      }
    }
    // now let's initialize our stream object.
    Stream* stream = new Stream(this, i);
    mStreams.push_back(stream);
  }
}
コード例 #12
0
Decoder*
DemuxerStream::getDecoder() {
  AVStream* stream = getCtx();

  if (!mDecoder) {
    if (stream->codec) {
      // make a copy of the decoder so we decouple it from the container
      // completely
      RefPointer<Codec> codec = Codec::findDecodingCodec((Codec::ID)stream->codec->codec_id);
      if (!codec) {
        VS_THROW(HumbleRuntimeError("could not find decoding codec"));
      }
      mDecoder = Decoder::make(codec.value(), stream->codec, true);
    }
  }
  return mDecoder.get();
}
コード例 #13
0
int64_t
MediaSubtitle::logMetadata(char * buffer, size_t len) {
  VS_THROW(HumbleRuntimeError("NOT IMPLEMENTED"));
  return snprintf(buffer, len, "NOT IMPLEMENTED");
}
コード例 #14
0
MediaPictureImpl*
MediaPictureImpl::make(Buffer* buffer, int32_t width, int32_t height,
    PixelFormat::Type format) {
  if (width <= 0) {
    VS_THROW(HumbleInvalidArgument("width must be > 0"));
  }

  if (height <= 0) {
    VS_THROW(HumbleInvalidArgument("height must be > 0"));
  }

  if (format == PixelFormat::PIX_FMT_NONE) {
    VS_THROW(HumbleInvalidArgument("pixel format must be specifie"));
  }

  if (!buffer) {
    VS_THROW(HumbleInvalidArgument("must pass non null buffer"));
  }

  // let's figure out how big of a buffer we need
  int32_t bufSize = PixelFormat::getBufferSizeNeeded(width, height, format);
  if (bufSize < buffer->getBufferSize()) {
    VS_THROW(
        HumbleInvalidArgument(
            "passed in buffer too small to fit requested image parameters"));
  }

  RefPointer<MediaPictureImpl> retval = make();
  AVFrame* frame = retval->mFrame;
  frame->width = width;
  frame->height = height;
  frame->format = format;

  // buffer is large enough; let's fill the data pointers
  uint8_t* data = (uint8_t*) buffer->getBytes(0, bufSize);

  int32_t imgSize = av_image_fill_arrays(frame->data, frame->linesize, data,
      (enum AVPixelFormat) frame->format, frame->width, frame->height, 1);
  if (imgSize != bufSize) {
    VS_ASSERT(imgSize == bufSize, "these should always be equal");
    VS_THROW(HumbleRuntimeError("could not fill image with data"));
  }

  // now, set up the reference buffers
  frame->extended_data = frame->data;
  for (int32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    if (frame->data[i])
      frame->buf[i] = AVBufferSupport::wrapBuffer(buffer, frame->data[i], frame->linesize[0]*frame->height+16);
  }
  // now fill in the AVBufferRefs where we pass of to FFmpeg care
  // of our buffer. Be kind FFmpeg.  Be kind.
  RefPointer<PixelFormatDescriptor> desc = PixelFormat::getDescriptor((PixelFormat::Type)frame->format);

  if (!desc) {
    VS_THROW(HumbleRuntimeError("could not get format descriptor"));
  }
  if (desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PAL) ||
      desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PSEUDOPAL)) {
    av_buffer_unref(&frame->buf[1]);
    frame->buf[1] = AVBufferSupport::wrapBuffer(Buffer::make(retval.value(), 1024));
    if (!frame->buf[1]) {
      VS_THROW(HumbleRuntimeError("memory failure"));
    }

    frame->data[1] = frame->buf[1]->data;
  }

  int32_t n = retval->getNumDataPlanes();
  (void) n;
  VS_LOG_TRACE("Created MediaPicture: %d x %d (%d). [%d, %d, %d, %d]",
      retval->getWidth(),
      retval->getHeight(),
      retval->getFormat(),
      n < 1 ? 0 : retval->getDataPlaneSize(0),
      n < 2 ? 0 : retval->getDataPlaneSize(1),
      n < 3 ? 0 : retval->getDataPlaneSize(2),
      n < 4 ? 0 : retval->getDataPlaneSize(3)
      );

  // and we're done.
  return retval.get();
}