コード例 #1
0
static void SDL_VoutAndroid_SetNativeWindow_l(SDL_Vout *vout, ANativeWindow *native_window)
{
    AMCTRACE("%s(%p, %p)\n", __func__, vout, native_window);
    SDL_Vout_Opaque *opaque = vout->opaque;

    if (opaque->native_window == native_window) {
        if (native_window == NULL) {
            // always invalidate buffers, if native_window is changed
            SDL_VoutAndroid_invalidateAllBuffers_l(vout);
        }
        return;
    } else

    IJK_EGL_terminate(opaque->egl);
    SDL_VoutAndroid_invalidateAllBuffers_l(vout);

    if (opaque->native_window)
        ANativeWindow_release(opaque->native_window);

    if (native_window)
        ANativeWindow_acquire(native_window);

    opaque->native_window = native_window;
    opaque->null_native_window_warned = 0;
}
コード例 #2
0
bool CEGLNativeTypeAndroid::GetNativeResolution(RESOLUTION_INFO *res) const
{
  EGLNativeWindowType *nativeWindow = (EGLNativeWindowType*)CXBMCApp::GetNativeWindow(30000);
  if (!nativeWindow)
    return false;

  if (!m_width || !m_height)
  {
    ANativeWindow_acquire(*nativeWindow);
    res->iWidth = ANativeWindow_getWidth(*nativeWindow);
    res->iHeight= ANativeWindow_getHeight(*nativeWindow);
    ANativeWindow_release(*nativeWindow);
  }
  else
  {
    res->iWidth = m_width;
    res->iHeight = m_height;
  }

  res->fRefreshRate = currentRefreshRate();
  res->dwFlags= D3DPRESENTFLAG_PROGRESSIVE;
  res->iScreen       = 0;
  res->bFullScreen   = true;
  res->iSubtitles    = (int)(0.965 * res->iHeight);
  res->fPixelRatio   = 1.0f;
  res->iScreenWidth  = res->iWidth;
  res->iScreenHeight = res->iHeight;
  res->strMode       = StringUtils::Format("%dx%d @ %.2f%s - Full Screen", res->iScreenWidth, res->iScreenHeight, res->fRefreshRate,
  res->dwFlags & D3DPRESENTFLAG_INTERLACED ? "i" : "");
  CLog::Log(LOGNOTICE,"Current resolution: %s\n",res->strMode.c_str());
  return true;
}
コード例 #3
0
ファイル: camera_engine.cpp プロジェクト: cRAN-cg/android-ndk
/**
 * The main function rendering a frame. In our case, it is yuv to RGBA8888
 * converter
 */
void CameraEngine::DrawFrame(void) {
  if (!cameraReady_ || !yuvReader_) return;
  AImage* image = yuvReader_->GetNextImage();
  if (!image) {
    return;
  }

  ANativeWindow_acquire(app_->window);
  ANativeWindow_Buffer buf;
  if (ANativeWindow_lock(app_->window, &buf, nullptr) < 0) {
    yuvReader_->DeleteImage(image);
    return;
  }

  yuvReader_->DisplayImage(&buf, image);
  ANativeWindow_unlockAndPost(app_->window);
  ANativeWindow_release(app_->window);
}
コード例 #4
0
void NDKCamera::CreateSession(ANativeWindow* previewWindow,
                              ANativeWindow* jpgWindow, int32_t imageRotation) {
  // Create output from this app's ANativeWindow, and add into output container
  requests_[PREVIEW_REQUEST_IDX].outputNativeWindow_ = previewWindow;
  requests_[PREVIEW_REQUEST_IDX].template_ = TEMPLATE_PREVIEW;
  requests_[JPG_CAPTURE_REQUEST_IDX].outputNativeWindow_ = jpgWindow;
  requests_[JPG_CAPTURE_REQUEST_IDX].template_ = TEMPLATE_STILL_CAPTURE;

  CALL_CONTAINER(create(&outputContainer_));
  for (auto& req : requests_) {
    ANativeWindow_acquire(req.outputNativeWindow_);
    CALL_OUTPUT(create(req.outputNativeWindow_, &req.sessionOutput_));
    CALL_CONTAINER(add(outputContainer_, req.sessionOutput_));
    CALL_TARGET(create(req.outputNativeWindow_, &req.target_));
    CALL_DEV(createCaptureRequest(cameras_[activeCameraId_].device_,
                                  req.template_, &req.request_));
    CALL_REQUEST(addTarget(req.request_, req.target_));
  }

  // Create a capture session for the given preview request
  captureSessionState_ = CaptureSessionState::READY;
  CALL_DEV(createCaptureSession(cameras_[activeCameraId_].device_,
                                outputContainer_, GetSessionListener(),
                                &captureSession_));

  ACaptureRequest_setEntry_i32(requests_[JPG_CAPTURE_REQUEST_IDX].request_,
                               ACAMERA_JPEG_ORIENTATION, 1, &imageRotation);

  /*
   * Only preview request is in manual mode, JPG is always in Auto mode
   * JPG capture mode could also be switch into manual mode and control
   * the capture parameters, this sample leaves JPG capture to be auto mode
   * (auto control has better effect than author's manual control)
   */
  uint8_t aeModeOff = ACAMERA_CONTROL_AE_MODE_OFF;
  CALL_REQUEST(setEntry_u8(requests_[PREVIEW_REQUEST_IDX].request_,
                           ACAMERA_CONTROL_AE_MODE, 1, &aeModeOff));
  CALL_REQUEST(setEntry_i32(requests_[PREVIEW_REQUEST_IDX].request_,
                            ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity_));
  CALL_REQUEST(setEntry_i64(requests_[PREVIEW_REQUEST_IDX].request_,
                            ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime_));
}
コード例 #5
0
/*
 * Class:     com_ytx_ican_media_player_YtxMediaPlayer
 * Method:    _setGlSurface
 * Signature: (Ljava/lang/Object;)V
 */
JNIEXPORT void JNICALL android_media_player_setSurface
        (JNIEnv *env, jobject obj, jobject surface) {

//    YtxMediaPlayer *mPlayer = getMediaPlayer(env, obj);
//    mPlayer->mVideoStateInfo->VideoGlSurfaceViewObj = env->NewGlobalRef(mVideoGlSurfaceView);
//
//    jclass jclazz = env->GetObjectClass(mVideoGlSurfaceView);
//    jmethodID jmtdId = env->GetMethodID(jclazz, "getRenderer",
//                                        "()Lcom/ytx/ican/media/player/render/GraphicRenderer;");
//    jobject GraphicRendererObj = env->CallObjectMethod(mVideoGlSurfaceView, jmtdId);
//
//    mPlayer->mVideoStateInfo->GraphicRendererObj = env->NewGlobalRef(GraphicRendererObj);


    if(surface == NULL){
        ALOGI("android_media_player_setSurface surface == NULL");
        return;
    }
    YtxMediaPlayer *mPlayer = getMediaPlayer(env, obj);
    if (mPlayer->mVideoStateInfo->window) {
        ANativeWindow_release(mPlayer->mVideoStateInfo->window);
        mPlayer->mVideoStateInfo->window = NULL;
    }
    //ANativeWindow_fromSurface(env, mVideoGlSurfaceView);
    mPlayer->mVideoStateInfo->window = ANativeWindow_fromSurface(env, surface);
    ANativeWindow_acquire(mPlayer->mVideoStateInfo->window);
    ALOGI("ytxhao android_media_player_setSurface surface window =%#x",mPlayer->mVideoStateInfo->window);

    /**
     * for test
     */

    jclass jclazz_player = env->GetObjectClass(obj);
    jmethodID jmtdId_player = env->GetMethodID(jclazz_player, "getStorageDirectory",
                                               "()Ljava/lang/String;");
    jstring url = (jstring) env->CallObjectMethod(obj, jmtdId_player);
    const char *storageDirectory = env->GetStringUTFChars(url, NULL);
    mPlayer->mVideoStateInfo->mStorageDir = (char *) storageDirectory;
    ALOGI("android_media_player_setGlSurface OUT storageDirectory=%s\n",
          mPlayer->mVideoStateInfo->mStorageDir);
}
コード例 #6
0
ファイル: VEditMltRun.cpp プロジェクト: amongll/AVFX
void MltRuntime::run(ANativeWindow* nw) throw(Exception)
{
	init();

	Lock lk(&run_lock);
	if ( consumer) mlt_consumer_close(consumer);

	mlt_profile profile = mlt_profile_clone(MltLoader::global_profile);
	consumer = mlt_factory_consumer(profile, "android_surface_preview", NULL);

	if ( consumer ==  NULL) {
		mlt_profile_close(profile);
		throw_error_v(ErrorRuntimeLoadFailed, "consumer init failed");
	}

	mlt_properties props = mlt_consumer_properties(consumer);
	ANativeWindow_acquire(nw);
	mlt_properties_set_data(props, "native_window", nw , sizeof(void*),release_native_window , NULL);

	mlt_consumer_connect(consumer, mlt_producer_service(producer));
	mlt_consumer_start(consumer);
	status = StatusRunning;
}
コード例 #7
0
ファイル: ffmpeg-jni.cpp プロジェクト: peterxu/demo
static void * decode_and_render(FFmpegInfo *pInfo, int nframe)
{
    if (!pInfo) return NULL;

    LOGI("lock native window ...");
    ANativeWindow_acquire(pInfo->window);

    /* acquire window's information: width/height/format */
    AVPixelFormat pix_fmt;
    int win_format = ANativeWindow_getFormat(pInfo->window);
    int win_width = ANativeWindow_getWidth(pInfo->window);
    int win_height = ANativeWindow_getHeight(pInfo->window);
    if (win_format == WINDOW_FORMAT_RGBA_8888)
        pix_fmt = AV_PIX_FMT_RGBA;
    else if (win_format == WINDOW_FORMAT_RGB_565)
        pix_fmt = AV_PIX_FMT_RGB565;
    else {
        LOGE("unsupport window format");
        ANativeWindow_release(pInfo->window);
        return NULL;
    }

    LOGI("alloc decoded buffer w-%d, h-%d, f-%d", win_width, win_height, win_format);
    if (!pInfo->win_data || 
        pInfo->win_width != win_width || 
        pInfo->win_height != win_height || 
        pInfo->win_format != win_format ) {

        /* release old data */
        if (pInfo->win_data) {
            av_free(pInfo->win_data[0]);
            pInfo->win_data[0] = {NULL};
        }

        /* alloc decoded buffer */
        int ret = av_image_alloc(pInfo->win_data, pInfo->win_linesize,
                             win_width, win_height, pix_fmt, 1);
        if (ret < 0) {
            LOGE("Could not allocate raw video buffer");            
            ANativeWindow_release(pInfo->window);
            return NULL;
        }
        pInfo->win_bufsize = ret;
        pInfo->win_format = win_format;
        pInfo->win_width = win_width;
        pInfo->win_height = win_height;
    }

    /* read video frame and decode */
    int num_frames = 0;
    LOGI("read video frame and decode");
    pInfo->video_timestamp = -1;
    av_seek_frame(pInfo->fmt_ctx, pInfo->video_stream_idx, pInfo->video_timestamp, AVSEEK_FLAG_BACKWARD);

    pInfo->video_timestamp = 0;
    while (av_read_frame(pInfo->fmt_ctx, &pInfo->pkt) >= 0) {
        if (num_frames >= nframe) {
            break;
        }

        AVPacket orig_pkt = pInfo->pkt;
        /* process video stream */
        if (pInfo->pkt.stream_index == pInfo->video_stream_idx) {
            num_frames ++;
            AVCodecContext* dec_ctx = pInfo->video_dec_ctx;
            AVFrame* frame = pInfo->frame;
            pInfo->video_timestamp = frame->pts;
            LOGI("pkt.flags = %d, frame.pts=%d", pInfo->pkt.flags, frame->pts);

            /* decode video */
            int got_frame = 0;
            int ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pInfo->pkt);
            if (ret < 0) {
                LOGE("Error decoding video frame");
                break;
            }

            /* decode success */
            if (got_frame) {
                /* copy video data from aligned buffer into unaligned */
                av_image_copy(pInfo->video_dst_data, pInfo->video_dst_linesize, 
                              (const uint8_t **)(frame->data), frame->linesize,
                              dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height);

                /* convert to required format */
                int ret = yuv2rgb(dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 
                                  pInfo->video_dst_linesize, pInfo->video_dst_data,
                                  pInfo->win_width, pInfo->win_height, pix_fmt, 
                                  pInfo->win_linesize, pInfo->win_data);

                /* do paint */
                LOGI("do paint on surface, ret=%d, decoded w=%d, h=%d, fmt=from %d to %d", ret, 
                      dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, pix_fmt);
                struct ANativeWindow_Buffer data;
                ANativeWindow_lock(pInfo->window, &data, NULL);
                memcpy(data.bits, pInfo->win_data[0], pInfo->win_bufsize);
                ANativeWindow_unlockAndPost(pInfo->window);
            }

            /* seek to next key frame */
            av_seek_frame(pInfo->fmt_ctx, pInfo->video_stream_idx, pInfo->video_timestamp, AVSEEK_FLAG_BACKWARD);
        }
        av_free_packet(&orig_pkt);
    }
    //av_free_packet(&pkt);

    ANativeWindow_release(pInfo->window);
    LOGI("decode OK, number=%d", num_frames);
    return NULL;
}
コード例 #8
0
ファイル: native.cpp プロジェクト: AheadIO/Halide
JNIEXPORT void JNICALL Java_com_example_hellohalide_CameraPreview_processFrame(
    JNIEnv *env, jobject obj, jbyteArray jSrc, jint j_w, jint j_h, jobject surf) {

    const int w = j_w, h = j_h;

    halide_set_error_handler(handler);

    unsigned char *src = (unsigned char *)env->GetByteArrayElements(jSrc, NULL);
    if (!src) {
        LOGD("src is null\n");
        return;
    }

    ANativeWindow *win = ANativeWindow_fromSurface(env, surf);
    ANativeWindow_acquire(win);

    static bool first_call = true;
    static unsigned counter = 0;
    static unsigned times[16];
    if (first_call) {
        LOGD("According to Halide, host system has %d cpus\n", halide_host_cpu_count());
        LOGD("Resetting buffer format");
        ANativeWindow_setBuffersGeometry(win, w, h, 0);
        first_call = false;
        for (int t = 0; t < 16; t++) times[t] = 0;
    }

    ANativeWindow_Buffer buf;
    ARect rect = {0, 0, w, h};

    if (int err = ANativeWindow_lock(win, &buf, NULL)) {
        LOGD("ANativeWindow_lock failed with error code %d\n", err);
        return;
    }

    uint8_t *dst = (uint8_t *)buf.bits;

    // If we're using opencl, use the gpu backend for it.
    halide_set_ocl_device_type("gpu");

    // Make these static so that we can reuse device allocations across frames.
    static buffer_t srcBuf = {0};
    static buffer_t dstBuf = {0};

    if (dst) {
        srcBuf.host = (uint8_t *)src;
        srcBuf.host_dirty = true;
        srcBuf.extent[0] = w;
        srcBuf.extent[1] = h;
        srcBuf.extent[2] = 0;
        srcBuf.extent[3] = 0;
        srcBuf.stride[0] = 1;
        srcBuf.stride[1] = w;
        srcBuf.min[0] = 0;
        srcBuf.min[1] = 0;
        srcBuf.elem_size = 1;

        dstBuf.host = dst;
        dstBuf.extent[0] = w;
        dstBuf.extent[1] = h;
        dstBuf.extent[2] = 0;
        dstBuf.extent[3] = 0;
        dstBuf.stride[0] = 1;
        dstBuf.stride[1] = w;
        dstBuf.min[0] = 0;
        dstBuf.min[1] = 0;
        dstBuf.elem_size = 1;

        // Just copy over chrominance untouched
        memcpy(dst + w*h, src + w*h, (w*h)/2);

        int64_t t1 = halide_current_time_ns();
        halide_generated(&srcBuf, &dstBuf);

        if (dstBuf.dev) {
            halide_copy_to_host(NULL, &dstBuf);
        }

        int64_t t2 = halide_current_time_ns();
        unsigned elapsed_us = (t2 - t1)/1000;


        times[counter & 15] = elapsed_us;
        counter++;
        unsigned min = times[0];
        for (int i = 1; i < 16; i++) {
            if (times[i] < min) min = times[i];
        }
        LOGD("Time taken: %d (%d)", elapsed_us, min);
    }

    ANativeWindow_unlockAndPost(win);
    ANativeWindow_release(win);

    env->ReleaseByteArrayElements(jSrc, (jbyte *)src, 0);
}
コード例 #9
0
void FAndroidWindow::AcquireWindowRef(ANativeWindow* InWindow)
{
	ANativeWindow_acquire(InWindow);
}
コード例 #10
0
JNIEXPORT bool JNICALL Java_com_example_helloandroidcamera2_JNIUtils_blit(
    JNIEnv *env, jobject obj, jint srcWidth, jint srcHeight,
    jobject srcLumaByteBuffer, jint srcLumaRowStrideBytes,
    jobject srcChromaUByteBuffer, jobject srcChromaVByteBuffer,
    jint srcChromaElementStrideBytes, jint srcChromaRowStrideBytes,
    jobject dstSurface) {
    if (srcChromaElementStrideBytes != 1 && srcChromaElementStrideBytes != 2) {
        return false;
    }

    uint8_t *srcLumaPtr = reinterpret_cast<uint8_t *>(
        env->GetDirectBufferAddress(srcLumaByteBuffer));
    uint8_t *srcChromaUPtr = reinterpret_cast<uint8_t *>(
        env->GetDirectBufferAddress(srcChromaUByteBuffer));
    uint8_t *srcChromaVPtr = reinterpret_cast<uint8_t *>(
        env->GetDirectBufferAddress(srcChromaVByteBuffer));
    if (srcLumaPtr == nullptr || srcChromaUPtr == nullptr ||
        srcChromaVPtr == nullptr) {
        return false;
    }

    // Check that if src chroma channels are interleaved if element stride is 2.
    // Our Halide kernel "directly deinterleaves" UVUVUVUV --> UUUU, VVVV
    // to handle VUVUVUVU, just swap the destination pointers.
    uint8_t *srcChromaUVInterleavedPtr = nullptr;
    bool swapDstUV;
    if (srcChromaElementStrideBytes == 2) {
        if (srcChromaVPtr - srcChromaUPtr == 1) {
            srcChromaUVInterleavedPtr = srcChromaUPtr;  // UVUVUV...
            swapDstUV = false;
        } else if (srcChromaUPtr - srcChromaVPtr == 1) {
            srcChromaUVInterleavedPtr = srcChromaVPtr;  // VUVUVU...
            swapDstUV = true;
        } else {
            // stride is 2 but the pointers are not off by 1.
            return false;
        }
    }

    ANativeWindow *win = ANativeWindow_fromSurface(env, dstSurface);
    ANativeWindow_acquire(win);

    ANativeWindow_Buffer buf;
    if (int err = ANativeWindow_lock(win, &buf, NULL)) {
        LOGE("ANativeWindow_lock failed with error code %d\n", err);
        ANativeWindow_release(win);
        return false;
    }

    ANativeWindow_setBuffersGeometry(win, srcWidth, srcHeight, 0 /*format unchanged*/);

    if (buf.format != IMAGE_FORMAT_YV12) {
        LOGE("ANativeWindow buffer locked but its format was not YV12.");
        ANativeWindow_unlockAndPost(win);
        ANativeWindow_release(win);
        return false;
    }

    if (!checkBufferSizesMatch(srcWidth, srcHeight, &buf)) {
        LOGE("ANativeWindow buffer locked but its size was %d x %d, expected "
                "%d x %d", buf.width, buf.height, srcWidth, srcHeight);
        ANativeWindow_unlockAndPost(win);
        ANativeWindow_release(win);
        return false;
    }

    int32_t srcChromaWidth = srcWidth / 2;
    int32_t srcChromaHeight = srcHeight / 2;

    // This is guaranteed by the YV12 format, see android.graphics.ImageFormat.
    uint8_t *dstLumaPtr = reinterpret_cast<uint8_t *>(buf.bits);
    uint32_t dstLumaRowStrideBytes = buf.stride;
    uint32_t dstLumaSizeBytes = dstLumaRowStrideBytes * buf.height;
    uint32_t dstChromaRowStrideBytes = ALIGN(buf.stride / 2, 16);
    // Size of one chroma plane.
    uint32_t dstChromaSizeBytes = dstChromaRowStrideBytes * buf.height / 2;
    // Yes, V is actually first.
    uint8_t *dstChromaVPtr = dstLumaPtr + dstLumaSizeBytes;
    uint8_t *dstChromaUPtr = dstLumaPtr + dstLumaSizeBytes + dstChromaSizeBytes;

    // Copy over the luma channel.
    // If strides match, then it's a single copy.
    if (srcLumaRowStrideBytes == dstLumaRowStrideBytes) {
        memcpy(dstLumaPtr, srcLumaPtr, dstLumaSizeBytes);
    } else {
        // Else, copy row by row.
        for (int y = 0; y < srcHeight; y++) {
            uint8_t *srcLumaRow = srcLumaPtr + y * srcLumaRowStrideBytes;
            uint8_t *dstLumaRow = dstLumaPtr + y * dstLumaRowStrideBytes;
            memcpy(dstLumaRow, srcLumaRow, srcLumaRowStrideBytes);
        }
    }

    bool succeeded;

    // Handle the chroma channels.
    // If they are not interleaved, then use memcpy.
    // Otherwise, use Halide to deinterleave.
    if (srcChromaElementStrideBytes == 1) {
        // If strides match, then it's a single copy per channel.
        if (srcChromaRowStrideBytes == dstChromaRowStrideBytes) {
            memcpy(dstChromaUPtr, srcChromaUPtr, dstChromaSizeBytes);
            memcpy(dstChromaVPtr, srcChromaVPtr, dstChromaSizeBytes);
        } else {
            // Else, copy row by row.
            for (int y = 0; y < srcHeight; y++) {
                uint8_t *srcChromaURow =
                        srcChromaUPtr + y * srcChromaRowStrideBytes;
                uint8_t *dstChromaURow =
                        dstChromaUPtr + y * dstChromaRowStrideBytes;
                memcpy(dstChromaURow, srcChromaURow, srcChromaRowStrideBytes);
            }
            for (int y = 0; y < srcHeight; y++) {
                uint8_t *srcChromaVRow =
                        srcChromaVPtr + y * srcChromaRowStrideBytes;
                uint8_t *dstChromaVRow =
                        dstChromaVPtr + y * dstChromaRowStrideBytes;
                memcpy(dstChromaVRow, srcChromaVRow, srcChromaRowStrideBytes);
            }
        }
        succeeded = true;
    } else {
        // Make these static so that we can reuse device allocations across frames.
        // It doesn't matter now, but useful for GPU backends.
        static buffer_t srcBuf = { 0 };
        static buffer_t dstBuf0 = { 0 };
        static buffer_t dstBuf1 = { 0 };

        srcBuf.host = srcChromaUVInterleavedPtr;
        srcBuf.host_dirty = true;
        srcBuf.extent[0] = 2 * srcChromaWidth;  // src is interleaved.
        srcBuf.extent[1] = srcChromaHeight;
        srcBuf.extent[2] = 0;
        srcBuf.extent[3] = 0;
        srcBuf.stride[0] = 1;
        srcBuf.stride[1] = 2 * srcChromaWidth;
        srcBuf.min[0] = 0;
        srcBuf.min[1] = 0;
        srcBuf.elem_size = 1;

        dstBuf0.host = swapDstUV ? dstChromaVPtr : dstChromaUPtr;
        dstBuf0.extent[0] = srcChromaWidth;  // src and dst width and height match.
        dstBuf0.extent[1] = srcChromaHeight;
        dstBuf0.extent[2] = 0;
        dstBuf0.extent[3] = 0;
        dstBuf0.stride[0] = 1;
        dstBuf0.stride[1] = dstChromaRowStrideBytes;  // Halide stride in pixels but pixel size is 1 byte.
        dstBuf0.min[0] = 0;
        dstBuf0.min[1] = 0;
        dstBuf0.elem_size = 1;

        dstBuf1.host = swapDstUV ? dstChromaUPtr : dstChromaVPtr;
        dstBuf1.extent[0] = srcChromaWidth;  // src and dst width and height match.
        dstBuf1.extent[1] = srcChromaHeight;
        dstBuf1.extent[2] = 0;
        dstBuf1.extent[3] = 0;
        dstBuf1.stride[0] = 1;
        dstBuf1.stride[1] = dstChromaRowStrideBytes;  // Halide stride in pixels but pixel size is 1 byte.
        dstBuf1.min[0] = 0;
        dstBuf1.min[1] = 0;
        dstBuf1.elem_size = 1;

        // Use Halide to deinterleave the chroma channels.
        int err = deinterleave(&srcBuf, &dstBuf0, &dstBuf1);
        if (err != halide_error_code_success) {
            LOGE("deinterleave failed with error code: %d", err);
        }
        succeeded = (err != halide_error_code_success);
    }
    ANativeWindow_unlockAndPost(win);
    ANativeWindow_release(win);
    return succeeded;
}
コード例 #11
0
JNIEXPORT bool JNICALL Java_com_example_helloandroidcamera2_JNIUtils_edgeDetect(
    JNIEnv *env, jobject obj, jint srcWidth, jint srcHeight,
    jobject srcLumaByteBuffer, jint srcLumaRowStrideBytes, jobject dstSurface) {
    uint8_t *srcLumaPtr = reinterpret_cast<uint8_t *>(
        env->GetDirectBufferAddress(srcLumaByteBuffer));
    if (srcLumaPtr == NULL) {
        return false;
    }

    ANativeWindow *win = ANativeWindow_fromSurface(env, dstSurface);
    ANativeWindow_acquire(win);

    ANativeWindow_Buffer buf;
    if (int err = ANativeWindow_lock(win, &buf, NULL)) {
        LOGE("ANativeWindow_lock failed with error code %d\n", err);
        ANativeWindow_release(win);
        return false;
    }

    ANativeWindow_setBuffersGeometry(win, srcWidth, srcHeight, 0 /*format unchanged*/);

    uint8_t *dstLumaPtr = reinterpret_cast<uint8_t *>(buf.bits);
    if (dstLumaPtr == NULL) {
        ANativeWindow_unlockAndPost(win);
        ANativeWindow_release(win);
        return false;
    }

    if (buf.format != IMAGE_FORMAT_YV12) {
        LOGE("ANativeWindow buffer locked but its format was not YV12.");
        ANativeWindow_unlockAndPost(win);
        ANativeWindow_release(win);
        return false;
    }

    if (!checkBufferSizesMatch(srcWidth, srcHeight, &buf)) {
        LOGE("ANativeWindow buffer locked but its size was %d x %d, expected "
                "%d x %d", buf.width, buf.height, srcWidth, srcHeight);
        ANativeWindow_unlockAndPost(win);
        ANativeWindow_release(win);
        return false;
    }

    uint32_t dstLumaSizeBytes = buf.stride * buf.height;
    uint32_t dstChromaRowStrideBytes = ALIGN(buf.stride / 2, 16);
    // Size of one chroma plane.
    uint32_t dstChromaSizeBytes = dstChromaRowStrideBytes * buf.height / 2;
    uint8_t *dstChromaVPtr = dstLumaPtr + dstLumaSizeBytes;
    uint8_t *dstChromaUPtr = dstLumaPtr + dstLumaSizeBytes + dstChromaSizeBytes;

    // Make these static so that we can reuse device allocations across frames.
    // It doesn't matter now, but useful for GPU backends.
    static buffer_t srcBuf = { 0 };
    static buffer_t dstBuf = { 0 };
    static buffer_t dstChromaBuf = { 0 };

    srcBuf.host = srcLumaPtr;
    srcBuf.host_dirty = true;
    srcBuf.extent[0] = srcWidth;
    srcBuf.extent[1] = srcHeight;
    srcBuf.extent[2] = 0;
    srcBuf.extent[3] = 0;
    srcBuf.stride[0] = 1;
    srcBuf.stride[1] = srcLumaRowStrideBytes;
    srcBuf.min[0] = 0;
    srcBuf.min[1] = 0;
    srcBuf.elem_size = 1;

    dstBuf.host = dstLumaPtr;
    dstBuf.extent[0] = buf.width;  // src and dst width/height actually match.
    dstBuf.extent[1] = buf.height;
    dstBuf.extent[2] = 0;
    dstBuf.extent[3] = 0;
    dstBuf.stride[0] = 1;
    dstBuf.stride[1] = buf.stride;  // src and dst strides actually match.
    dstBuf.min[0] = 0;
    dstBuf.min[1] = 0;
    dstBuf.elem_size = 1;

    static bool first_call = true;
    static unsigned counter = 0;
    static unsigned times[16];
    if (first_call) {
        LOGD("According to Halide, host system has %d cpus\n",
             halide_host_cpu_count());
        first_call = false;
        for (int t = 0; t < 16; t++) {
            times[t] = 0;
        }
    }

    // Set chrominance to 128 to appear grayscale.
    // The dst chroma is guaranteed to be tightly packed since it's YV12.
    memset(dstChromaVPtr, 128, dstChromaSizeBytes * 2);

    int64_t t1 = halide_current_time_ns();
    int err = edge_detect(&srcBuf, &dstBuf);
    if (err != halide_error_code_success) {
        LOGE("edge_detect failed with error code: %d", err);
    }

    int64_t t2 = halide_current_time_ns();
    unsigned elapsed_us = (t2 - t1) / 1000;

    times[counter & 15] = elapsed_us;
    counter++;
    unsigned min = times[0];
    for (int i = 1; i < 16; i++) {
        if (times[i] < min) {
            min = times[i];
        }
    }
    LOGD("Time taken: %d us (minimum: %d us)", elapsed_us, min);

    ANativeWindow_unlockAndPost(win);
    ANativeWindow_release(win);

    return (err != halide_error_code_success);
}
コード例 #12
0
ファイル: native.cpp プロジェクト: 202198/Halide
JNIEXPORT void JNICALL Java_com_example_hellohalide_CameraPreview_processFrame(JNIEnv * env, jobject obj, jbyteArray jSrc, jobject surf) {

    halide_set_error_handler(handler);

    unsigned char *src = (unsigned char *)env->GetByteArrayElements(jSrc, NULL);

    ANativeWindow *win = ANativeWindow_fromSurface(env, surf);
    ANativeWindow_acquire(win);

    static bool first_call = true;
    static unsigned counter = 0;
    static unsigned times[16];
    if (first_call) {
      LOGD("Resetting buffer format");
      ANativeWindow_setBuffersGeometry(win, 640, 360, 0);
      first_call = false;
      for (int t = 0; t < 16; t++) times[t] = 0;
    }

    ANativeWindow_Buffer buf;
    ARect rect = {0, 0, 640, 360};
    ANativeWindow_lock(win, &buf, &rect);

    uint8_t *dst = (uint8_t *)buf.bits;
    buffer_t srcBuf = {0}, dstBuf = {0};
    srcBuf.host = (uint8_t *)src;
    srcBuf.extent[0] = 642;
    srcBuf.extent[1] = 362;
    srcBuf.extent[2] = 1;
    srcBuf.extent[3] = 1;
    srcBuf.stride[0] = 1;
    srcBuf.stride[1] = 640;
    srcBuf.min[0] = -1;
    srcBuf.min[1] = -1;
    srcBuf.elem_size = 1;

    dstBuf.host = dst;
    dstBuf.extent[0] = 640;
    dstBuf.extent[1] = 360;
    dstBuf.extent[2] = 1;
    dstBuf.extent[3] = 1;
    dstBuf.stride[0] = 1;
    dstBuf.stride[1] = 640;
    dstBuf.min[0] = 0;
    dstBuf.min[1] = 0;
    dstBuf.elem_size = 1;

    timeval t1, t2;
    gettimeofday(&t1, NULL);
    halide(&srcBuf, &dstBuf);
    gettimeofday(&t2, NULL);
    unsigned elapsed = (t2.tv_sec - t1.tv_sec)*1000000 + (t2.tv_usec - t1.tv_usec);

    times[counter & 15] = elapsed;
    counter++;
    unsigned min = times[0];
    for (int i = 1; i < 16; i++) {
        if (times[i] < min) min = times[i];
    }
    LOGD("Time taken: %d (%d)", elapsed, min);

    // Just copy over chrominance untouched
    memcpy(dst + 640*360, src + 640*480, 320*180);
    memcpy(dst + 640*360 + 320*180, src + 640*480 + 320*240, 320*180);

    ANativeWindow_unlockAndPost(win);
    ANativeWindow_release(win);

    env->ReleaseByteArrayElements(jSrc, (jbyte *)src, 0);
}