FilterPictureSink* FilterPictureSink::make(FilterGraph* graph, AVFilterContext* ctx) { Global::init(); RefPointer<FilterPictureSink> r; r.reset(new FilterPictureSink(graph, ctx), true); return r.get(); }
MediaPictureImpl* MediaPictureImpl::make(MediaPictureImpl* src, bool copy) { RefPointer<MediaPictureImpl> retval; if (!src) VS_THROW(HumbleInvalidArgument("no src object to copy from")); if (copy) { // first create a new mediaaudio object to copy into retval = make(src->getWidth(), src->getHeight(), src->getFormat()); retval->mComplete = src->mComplete; // then copy the data into retval int32_t n = src->getNumDataPlanes(); for(int32_t i = 0; i < n; i++ ) { AVBufferRef* dstBuf = av_frame_get_plane_buffer(retval->mFrame, i); AVBufferRef* srcBuf = av_frame_get_plane_buffer(src->mFrame, i); VS_ASSERT(dstBuf, "should always have buffer"); VS_ASSERT(srcBuf, "should always have buffer"); memcpy(dstBuf->data, srcBuf->data, srcBuf->size); } } else { // first create a new media audio object to reference into retval = make(); // then do the reference retval->mComplete = src->mComplete; av_frame_ref(retval->mFrame, src->mFrame); } // set the timebase retval->setComplete(src->isComplete()); RefPointer<Rational> timeBase = src->getTimeBase(); retval->setTimeBase(timeBase.value()); return retval.get(); }
BufferImpl* BufferImpl :: make(io::humble::ferry::RefCounted* requestor, int32_t bufferSize) { RefPointer<BufferImpl> retval; if (bufferSize <= 0) VS_THROW(HumbleInvalidArgument("bufferSize must be > 0")); void * allocator = requestor ? requestor->getJavaAllocator() : 0; void *buffer = JNIMemoryManager::malloc(allocator, bufferSize); if (!buffer) { VS_THROW(HumbleBadAlloc()); } try { retval = BufferImpl::make(); retval->mBuffer = buffer; retval->mBufferSize = bufferSize; retval->mInternallyAllocated = true; } catch (std::exception & e) { JNIMemoryManager::free(buffer); throw; } return retval.get(); }
BufferImpl* BufferImpl :: make(io::humble::ferry::RefCounted* /*unused*/, void *bufToWrap, int32_t bufferSize, FreeFunc freeFunc, void *closure) { RefPointer<BufferImpl> retval; if (!bufToWrap) { VS_THROW(HumbleInvalidArgument("bufToWrap must be non null")); } if (bufferSize <= 0) { VS_THROW(HumbleInvalidArgument("bufferSize must be > 0")); } if (bufToWrap && bufferSize>0) { retval = BufferImpl::make(); retval->mFreeFunc = freeFunc; retval->mClosure = closure; retval->mBufferSize = bufferSize; retval->mBuffer = bufToWrap; retval->mInternallyAllocated = false; } return retval.get(); }
MediaSubtitleRectangle* MediaSubtitleRectangle::make(AVSubtitleRect* ctx) { if (!ctx) throw HumbleInvalidArgument("no context"); RefPointer<MediaSubtitleRectangle> retval = make(); retval->mCtx = ctx; return retval.get(); }
FilterAudioSource* FilterAudioSource::make(FilterGraph* graph, AVFilterContext* ctx) { Global::init(); RefPointer<FilterAudioSource> r; r.reset(new FilterAudioSource(graph, ctx), true); return r.get(); }
RefPointer<Listener::Notification> Listener::JitterBuffer::popNotification() { JBuffer::iterator it = mBuffer.find(mNotifyLast + 1); // have next message? if (it == mBuffer.end()) return NULL; // nothing here else { RefPointer<Notification> result = it->second; // save value mBuffer.erase(it); // remove from buffer secinfo("notify-jit", "%p retrieved from jitter buffer", result.get()); return result; // return it } }
Buffer* MediaPictureImpl::getData(int32_t plane) { validatePlane(plane); // we get the buffer for the given plane if it exists, and wrap // it in an Buffer // now we're guaranteed that we should have a plane. RefPointer<Buffer> buffer; if (mFrame->buf[plane]) buffer = AVBufferSupport::wrapAVBuffer(this, mFrame->buf[plane], mFrame->data[plane], mFrame->buf[plane]->size); return buffer.get(); }
Buffer* MediaSubtitleRectangle::getPictureData(int line) { if (line < 0 || line >= 4) throw HumbleInvalidArgument("line must be between 0 and 3"); // add ref ourselves for the Buffer this->acquire(); // create a buffer RefPointer<Buffer> retval = Buffer::make(this, mCtx->pict.data[line], mCtx->pict.linesize[line], Buffer::refCountedFreeFunc, this); if (!retval) this->release(); return retval.get(); }
// // Act on a death notification for a session's underlying audit session object. // We may not destroy the Session outright here (due to processes that use it), // but we do clear out its accumulated wealth. // Note that we may get spurious death notifications for audit sessions that we // never learned about. Ignore those. // void Session::destroy(SessionId id) { // remove session from session map RefPointer<Session> session = NULL; { StLock<Mutex> _(mSessionLock); SessionMap::iterator it = mSessions.find(id); if (it != mSessions.end()) { session = it->second; assert(session->sessionId() == id); mSessions.erase(it); } } if (session.get()) { session->kill(); } }
MediaPictureImpl* MediaPictureImpl::make(Buffer* buffer, int32_t width, int32_t height, PixelFormat::Type format) { if (width <= 0) { VS_THROW(HumbleInvalidArgument("width must be > 0")); } if (height <= 0) { VS_THROW(HumbleInvalidArgument("height must be > 0")); } if (format == PixelFormat::PIX_FMT_NONE) { VS_THROW(HumbleInvalidArgument("pixel format must be specifie")); } if (!buffer) { VS_THROW(HumbleInvalidArgument("must pass non null buffer")); } // let's figure out how big of a buffer we need int32_t bufSize = PixelFormat::getBufferSizeNeeded(width, height, format); if (bufSize < buffer->getBufferSize()) { VS_THROW( HumbleInvalidArgument( "passed in buffer too small to fit requested image parameters")); } RefPointer<MediaPictureImpl> retval = make(); AVFrame* frame = retval->mFrame; frame->width = width; frame->height = height; frame->format = format; // buffer is large enough; let's fill the data pointers uint8_t* data = (uint8_t*) buffer->getBytes(0, bufSize); int32_t imgSize = av_image_fill_arrays(frame->data, frame->linesize, data, (enum AVPixelFormat) frame->format, frame->width, frame->height, 1); if (imgSize != bufSize) { VS_ASSERT(imgSize == bufSize, "these should always be equal"); VS_THROW(HumbleRuntimeError("could not fill image with data")); } // now, set up the reference buffers frame->extended_data = frame->data; for (int32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) { if (frame->data[i]) frame->buf[i] = AVBufferSupport::wrapBuffer(buffer, frame->data[i], frame->linesize[0]*frame->height+16); } // now fill in the AVBufferRefs where we pass of to FFmpeg care // of our buffer. Be kind FFmpeg. Be kind. RefPointer<PixelFormatDescriptor> desc = PixelFormat::getDescriptor((PixelFormat::Type)frame->format); if (!desc) { VS_THROW(HumbleRuntimeError("could not get format descriptor")); } if (desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PAL) || desc->getFlag(PixelFormatDescriptor::PIX_FMT_FLAG_PSEUDOPAL)) { av_buffer_unref(&frame->buf[1]); frame->buf[1] = AVBufferSupport::wrapBuffer(Buffer::make(retval.value(), 1024)); if (!frame->buf[1]) { VS_THROW(HumbleRuntimeError("memory failure")); } frame->data[1] = frame->buf[1]->data; } int32_t n = retval->getNumDataPlanes(); (void) n; VS_LOG_TRACE("Created MediaPicture: %d x %d (%d). [%d, %d, %d, %d]", retval->getWidth(), retval->getHeight(), retval->getFormat(), n < 1 ? 0 : retval->getDataPlaneSize(0), n < 2 ? 0 : retval->getDataPlaneSize(1), n < 3 ? 0 : retval->getDataPlaneSize(2), n < 4 ? 0 : retval->getDataPlaneSize(3) ); // and we're done. return retval.get(); }
DemuxerStream* DemuxerStream::make(Container* container, int32_t index) { RefPointer<DemuxerStream> r; r.reset(new DemuxerStream(container, index), true); return r.get(); }