예제 #1
0
bool GLWindow::_createFBO( util::FrameBufferObject*& fbo, const int samplesSize)
{
    const PixelViewport& pvp = getPixelViewport();
    const GLuint colorFormat = getColorFormat();

    int depthSize = getIAttribute( WindowSettings::IATTR_PLANES_DEPTH );
    if( depthSize == AUTO )
        depthSize = 24;

    int stencilSize = getIAttribute( WindowSettings::IATTR_PLANES_STENCIL );
    if( stencilSize == AUTO )
        stencilSize = 1;

    fbo = new util::FrameBufferObject( _impl->glewContext,
                                       samplesSize ? GL_TEXTURE_2D_MULTISAMPLE
                                                  : GL_TEXTURE_RECTANGLE_ARB );
    Error error = fbo->init( pvp.w, pvp.h, colorFormat, depthSize,
                             stencilSize, samplesSize );
    if( !error )
        return true;

    if( getIAttribute( WindowSettings::IATTR_PLANES_STENCIL ) == AUTO )
        error = fbo->init( pvp.w, pvp.h, colorFormat, depthSize, 0,
                           samplesSize );

    if( !error )
        return true;

    sendError( error.getCode( ));
    delete fbo;
    fbo = 0;
    return false;
}
예제 #2
0
bool Texture::init(int width, int height, ColorFormat format) noexcept
{
  if (m_data != nullptr) {
    m_data = nullptr;
  }

  m_width = width;
  m_height = height;
  m_format = format;
  size_t size = CalculateDataBlockSize(getWidth(), getHeight());
  if (size > 0) {
    m_data.reset(new uint8_t[size], std::default_delete<uint8_t[]>());

    // filling texture with black color
    size_t count = m_width*m_height;
    uint32_t pixel = GetEmptyPixel(getColorFormat()); pixel = get32u_le(&pixel);
    uint32_t *ofs = (uint32_t*)m_data.get();
    for (size_t i = 0; i < count; i++, ofs++) {
      *ofs = pixel;
    }

    return true;
  }
  return false;
}
예제 #3
0
/*
 * Check whether the camera has the supported color format
 * @param params CameraParameters to retrieve the information
 * @return OK if no error.
 */
status_t CameraSource::isCameraColorFormatSupported(
        const CameraParameters& params) {
    mColorFormat = getColorFormat(params.get(
            CameraParameters::KEY_VIDEO_FRAME_FORMAT));
    if (mColorFormat == -1) {
        return BAD_VALUE;
    }
    return OK;
}
/*
 * Check whether the camera has the supported color format
 * @param params CameraParameters to retrieve the information
 * @return OK if no error.
 */
status_t CameraSource::isCameraColorFormatSupported(
        const CameraParameters& params) {
    const char* fmt = params.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
    if (!fmt) {
        LOGE("Missing parameter %s!", CameraParameters::KEY_VIDEO_FRAME_FORMAT);
        return BAD_VALUE;
    }
    mColorFormat = getColorFormat(fmt);
    if (mColorFormat == -1) {
        return BAD_VALUE;
    }
    return OK;
}
예제 #5
0
bool Texture::resizeTexture(int newWidth, int newHeight) noexcept
{
  if (newWidth > 0 && newHeight > 0) {
    if (newWidth == getWidth() && newHeight == getHeight()) {
      return true;
    }

    size_t newSize = CalculateDataBlockSize(newWidth, newHeight);
    if (newSize > 0) {
      BytePtr ptrNewData(new uint8_t[newSize], std::default_delete<uint8_t[]>());

      // filling new texture with Dxt-encoded "black"
      size_t numPixels = newWidth*newHeight;
      uint32_t pixel = GetEmptyPixel(getColorFormat()); pixel = get32u_le(&pixel);
      uint32_t *ofs = (uint32_t*)ptrNewData.get();
      for (size_t i = 0; i < numPixels; i++, ofs++) {
        *ofs = pixel;
      }

      // copying old data into new texture
      int srcStride = getWidth() * 4;
      int oldHeight = getHeight();
      int dstStride = newWidth * 4;
      int minStride = std::min(srcStride, dstStride);
      int minHeight = std::min(oldHeight, newHeight);
      uint8_t *srcOfs = m_data.get();
      uint8_t *dstOfs = ptrNewData.get();
      for (int y = 0; y < minHeight; y++) {
        std::memcpy(dstOfs, srcOfs, minStride);
        srcOfs += srcStride;
        dstOfs += dstStride;
      }

      m_width = newWidth;
      m_height = newHeight;
      m_data = ptrNewData;

      return true;
    }
  }
  return false;
}
예제 #6
0
	void GLFramebuffer::attach(const string &attachment, GLenum txrFormat)
	{
		GLenum colorFormat, datatype;
		if((colorFormat = getColorFormat(txrFormat)) == GL_NONE || (datatype = getDatatype(txrFormat)) == GL_NONE)
		{
			cerr << "Invalid texture format specified." << endl;
			cerr << "Should be one of: GL_RGB32(F/UI), GL_RGBA32(F/UI), GL_DEPTH_COMPONENT32F." << endl;
			return;
		}

		// Get FBO attachment slot for the new texture
		GLenum slot = findAttachmentSlot(txrFormat);

		GLTexture *pTexture = new GLTexture();
		GLTexture::bind(0, *pTexture);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	    glTexImage2D(GL_TEXTURE_2D, 0, txrFormat, m_width, m_height, 0, colorFormat, datatype, nullptr);
		glFramebufferTexture2D(GL_FRAMEBUFFER, slot, GL_TEXTURE_2D, pTexture->getID(), 0);
		GLTexture::unbind(0);

		// Construct new FBO attachment
		GLFramebufferAttachment fboAttachment;
		fboAttachment.name = attachment;
		fboAttachment.pTexture = pTexture;
		fboAttachment.textureFormat = txrFormat;
		fboAttachment.colorFormat = colorFormat;
		fboAttachment.datatype = datatype;
		fboAttachment.attachmentSlot = slot;

		m_attachments.insert({attachment, fboAttachment});

		// Increment color attachment count if neccessary
		if(txrFormat != GL_DEPTH_COMPONENT32F) m_colorAttachmentCount++;
	}
예제 #7
0
bool Texture::savePvrz(File &fout) const noexcept
{
  if (getEncoding() == Encoding::UNKNOWN) {
    return false;
  }

  uint32_t v32;

  size_t pvrSize = 0x34 + CalculateDataBlockSize(getWidth(), getHeight(), getEncoding());
  BytePtr ptrPvr(new uint8_t[pvrSize], std::default_delete<uint8_t[]>());

  // initializing header
  v32 = 0x03525650; v32 = get32u_le(&v32);
  memcpy(ptrPvr.get() + 0, &v32, 4);        // signature
  v32 = 0;
  memcpy(ptrPvr.get() + 4, &v32, 4);        // flags
  switch (getEncoding()) {
    case Encoding::BC1: v32 = 7; break;
    case Encoding::BC2: v32 = 9; break;
    case Encoding::BC3: v32 = 11; break;
    default:            return false;
  }
  v32 = get32u_le(&v32);
  memcpy(ptrPvr.get() + 8, &v32, 4);        // pixel format
  v32 = 0;
  memcpy(ptrPvr.get() + 12, &v32, 4);       // extended pixel format
  memcpy(ptrPvr.get() + 16, &v32, 4);       // color space
  memcpy(ptrPvr.get() + 20, &v32, 4);       // channel type
  v32 = getHeight(); v32 = get32u_le(&v32);
  memcpy(ptrPvr.get() + 24, &v32, 4);       // height
  v32 = getWidth(); v32 = get32u_le(&v32);
  memcpy(ptrPvr.get() + 28, &v32, 4);       // width
  v32 = 1; v32 = get32u_le(&v32);
  memcpy(ptrPvr.get() + 32, &v32, 4);       // texture depth
  memcpy(ptrPvr.get() + 36, &v32, 4);       // surfaces
  memcpy(ptrPvr.get() + 40, &v32, 4);       // faces
  memcpy(ptrPvr.get() + 44, &v32, 4);       // mipmap levels
  v32 = 0;
  memcpy(ptrPvr.get() + 48, &v32, 4);       // meta data size

  // encoding pixel data
  ConverterPtr converter(new ConverterDxt(m_options, getEncoding()));
  converter->setEncoding(true);
  converter->setColorFormat(getColorFormat());
  bool retVal = (0 != converter->convert(ptrPvr.get() + 0x34, getData().get(), getWidth(), getHeight()));

  // compressing PVR texture
  if (retVal) {
    size_t pvrzSize = pvrSize * 2;
    BytePtr ptrPvrz(new uint8_t[pvrzSize], std::default_delete<uint8_t[]>());
    Compression compress;
    pvrzSize = compress.deflate(ptrPvr.get(), pvrSize, ptrPvrz.get() + 4, pvrzSize);
    if (pvrzSize > 0) {
      v32 = pvrzSize; v32 = get32u_le(&v32);
      memcpy(ptrPvrz.get() + 0, &v32, 4);
      retVal = (fout.write(ptrPvrz.get(), 1, pvrzSize+4) == pvrzSize+4);
    } else {
      retVal = false;
    }
  }

  return retVal;
}
CameraSource::CameraSource(const sp<Camera> &camera)
    : mCamera(camera),
      mFirstFrameTimeUs(0),
      mLastFrameTimestampUs(0),
      mNumFramesReceived(0),
      mNumFramesEncoded(0),
      mNumFramesDropped(0),
      mNumGlitches(0),
      mGlitchDurationThresholdUs(200000),
      mCollectStats(false),
      mStarted(false) {

    int64_t token = IPCThreadState::self()->clearCallingIdentity();
    String8 s = mCamera->getParameters();
    IPCThreadState::self()->restoreCallingIdentity(token);

    printf("params: \"%s\"\n", s.string());

    int32_t width, height, stride, sliceHeight;
    CameraParameters params(s);
    params.getPreviewSize(&width, &height);

    // Calculate glitch duraton threshold based on frame rate
    int32_t frameRate = params.getPreviewFrameRate();
    int64_t glitchDurationUs = (1000000LL / frameRate);
    if (glitchDurationUs > mGlitchDurationThresholdUs) {
        mGlitchDurationThresholdUs = glitchDurationUs;
    }
    int32_t colorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
    const char *colorFormatStr = params.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
    if (colorFormatStr == NULL) {
#ifdef USE_YUV422I_DEFAULT_COLORFORMAT
        // on some devices (such as sholes), the camera doesn't properly report what
        // color format it needs, so we need to force it as a default
        colorFormatStr = CameraParameters::PIXEL_FORMAT_YUV422I;
#else
        colorFormatStr = CameraParameters::PIXEL_FORMAT_YUV420SP;
#endif
    }
    if (colorFormatStr != NULL)
        colorFormat = getColorFormat(colorFormatStr);

    // XXX: query camera for the stride and slice height
    // when the capability becomes available.
    stride = width;
    sliceHeight = height;

    mMeta = new MetaData;
    mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
    mMeta->setInt32(kKeyColorFormat, colorFormat);
    mMeta->setInt32(kKeyWidth, width);
    mMeta->setInt32(kKeyHeight, height);
    mMeta->setInt32(kKeyStride, stride);
    mMeta->setInt32(kKeySliceHeight, sliceHeight);

#if defined (OMAP_ENHANCEMENT) && defined (TARGET_OMAP4)
    int32_t paddedFrameWidth, paddedFrameHeight;
    if (mCamera != 0) {
        // Since we may not honor the preview size that app has requested
        // It is a good idea to get the actual preview size and use it for video recording.
        paddedFrameWidth = atoi(params.get("padded-width"));
        paddedFrameHeight = atoi(params.get("padded-height"));
        if (paddedFrameWidth < 0 || paddedFrameHeight < 0) {
            LOGE("Failed to get camera(%p) preview size", mCamera.get());
        }
        LOGV("CameraSource() : padded WxH=%dx%d", paddedFrameWidth, paddedFrameHeight);
    }
    else
    {
        LOGE("mCamera is NULL");
        paddedFrameWidth = width;
        paddedFrameHeight = height;
    }

    mMeta->setInt32(kKeyPaddedWidth, paddedFrameWidth);
    mMeta->setInt32(kKeyPaddedHeight, paddedFrameHeight);

    int32_t  mS3DCamera = false;
    if (mCamera != 0) {

        if(params.get("s3d-supported")!= NULL && CameraParameters::TRUE != NULL)
            mS3DCamera = strcmp(params.get("s3d-supported"), CameraParameters::TRUE) == 0;

        if(mS3DCamera)
        {
            const char *seiEncodingTypeStr = params.get(TICameraParameters::KEY_SEI_ENCODING_TYPE);
            CHECK(seiEncodingTypeStr != NULL);
            int32_t seiEncodingType = getSEIEncodingType(seiEncodingTypeStr);
            mMeta->setInt32(kKeySEIEncodingType, seiEncodingType);

            const char *frameLayoutStr = params.get(TICameraParameters::KEY_S3D_FRAME_LAYOUT);
            CHECK(frameLayoutStr != NULL);
            mMeta->setCString(kKeyFrameLayout, frameLayoutStr);
        }

    }
    else
    {
        LOGE("mCamera is NULL");
        mS3DCamera = false;
    }
    mMeta->setInt32(kKeyS3dSupported, mS3DCamera);
#endif
}