Exemple #1
0
static void engine_draw_frame(Engine* engine, const cv::Mat& frame)
{
    if (engine->app->window == NULL)
        return; // No window.

    ANativeWindow_Buffer buffer;
    if (ANativeWindow_lock(engine->app->window, &buffer, NULL) < 0)
    {
        LOGW("Unable to lock window buffer");
        return;
    }

    void* pixels = buffer.bits;

    int left_indent = (buffer.width-frame.cols)/2;
    int top_indent = (buffer.height-frame.rows)/2;

    for (int yy = top_indent; yy < std::min(frame.rows+top_indent, buffer.height); yy++)
    {
        unsigned char* line = (unsigned char*)pixels;
        memcpy(line+left_indent*4*sizeof(unsigned char), frame.ptr<unsigned char>(yy),
               std::min(frame.cols, buffer.width)*4*sizeof(unsigned char));
        // go to next line
        pixels = (int32_t*)pixels + buffer.stride;
    }
    ANativeWindow_unlockAndPost(engine->app->window);
}
Exemple #2
0
status_t Surface_getPixels(void* window, uint32_t* width, uint32_t* height, uint32_t* stride, void** pixels)
{
	ANativeWindow *native_window = (ANativeWindow *)window;

	ANativeWindow_Buffer buffer = {0};
	int32_t ret = ANativeWindow_lock(native_window, &buffer, NULL);
	if (ret != 0) {
		LOGE("failed to lock native window: %d", ret);
		return ERROR;
	}

	if ((WINDOW_FORMAT_RGBX_8888 != buffer.format) && (WINDOW_FORMAT_RGBA_8888 != buffer.format)) {
		// 2015.3.31 guoliangma added to fix "already locked" problem when window_format is invalid
		ANativeWindow_unlockAndPost(native_window);

		LOGE("native window format is not valid: %d", buffer.format);
		return ERROR;
	}

	if (NULL == buffer.bits) {
		// 2015.3.31 guoliangma added
		ANativeWindow_unlockAndPost(native_window);

		LOGE("native window bits is null");
		return ERROR;
	}

	*width	= buffer.width;
	*height	= buffer.height;
	*stride	= buffer.stride;
	*pixels = buffer.bits;
	return OK;
}
//rendering
JNIEXPORT void JNICALL Java_me_lake_librestreaming_core_ColorHelper_renderingSurface
(JNIEnv * env, jobject thiz,jobject javaSurface,jbyteArray pixelsArray,jint w,jint h,jint size) {
	ANativeWindow* window = ANativeWindow_fromSurface(env, javaSurface);
	if(window!=NULL)
	{
		ANativeWindow_setBuffersGeometry(window,w,h,COLOR_FORMAT_NV21);
		ANativeWindow_Buffer buffer;
		if (ANativeWindow_lock(window, &buffer, NULL) == 0) {
			unsigned char *pixels = (unsigned char*)(*env)->GetByteArrayElements(env,pixelsArray, 0);
			if(buffer.width==buffer.stride){
				memcpy(buffer.bits, pixels,  size);
			}else{
				int height = h*3/2;
				int width = w;
				int i=0;
				for(;i<height;++i)
					memcpy(buffer.bits +  buffer.stride * i
						, pixels + width * i
						, width);
			}
			(*env)->ReleaseByteArrayElements(env,pixelsArray,pixels,JNI_ABORT);
			ANativeWindow_unlockAndPost(window);
		}
		ANativeWindow_release(window);
	}
	return;
}
Exemple #4
0
// transfer specific frame data to the Surface(ANativeWindow)
int copyToSurface(uvc_frame_t *frame, ANativeWindow **window) {
	// ENTER();
	int result = 0;
	if (LIKELY(*window)) {
		ANativeWindow_Buffer buffer;
		if (LIKELY(ANativeWindow_lock(*window, &buffer, NULL) == 0)) {
			// source = frame data
			const uint8_t *src = (uint8_t *)frame->data;
			const int src_w = frame->width * PREVIEW_PIXEL_BYTES;
			const int src_step = frame->width * PREVIEW_PIXEL_BYTES;
			// destination = Surface(ANativeWindow)
			uint8_t *dest = (uint8_t *)buffer.bits;
			const int dest_w = buffer.width * PREVIEW_PIXEL_BYTES;
			const int dest_step = buffer.stride * PREVIEW_PIXEL_BYTES;
			// use lower transfer bytes
			const int w = src_w < dest_w ? src_w : dest_w;
			// use lower height
			const int h = frame->height < buffer.height ? frame->height : buffer.height;
			// transfer from frame data to the Surface
			copyFrame(src, dest, w, h, src_step, dest_step);
			ANativeWindow_unlockAndPost(*window);
		} else {
			result = -1;
		}
	} else {
		result = -1;
	}
	return result; //RETURN(result, int);
}
Exemple #5
0
bool TransferQueue::tryUpdateQueueWithBitmap(const TileRenderInfo* renderInfo,
                                          int x, int y, const SkBitmap& bitmap)
{
    m_transferQueueItemLocks.lock();
    bool ready = readyForUpdate();
    TextureUploadType currentUploadType = m_currentUploadType;
    m_transferQueueItemLocks.unlock();
    if (!ready) {
        XLOG("Quit bitmap update: not ready! for tile x y %d %d",
             renderInfo->x, renderInfo->y);
        return false;
    }
    if (currentUploadType == GpuUpload) {
        // a) Dequeue the Surface Texture and write into the buffer
        if (!m_ANW.get()) {
            XLOG("ERROR: ANW is null");
            return false;
        }

        ANativeWindow_Buffer buffer;
        if (ANativeWindow_lock(m_ANW.get(), &buffer, 0))
            return false;

        uint8_t* img = (uint8_t*)buffer.bits;
        int row, col;
        int bpp = 4; // Now we only deal with RGBA8888 format.
        int width = TilesManager::instance()->tileWidth();
        int height = TilesManager::instance()->tileHeight();
        if (!x && !y && bitmap.width() == width && bitmap.height() == height) {
            bitmap.lockPixels();
            uint8_t* bitmapOrigin = static_cast<uint8_t*>(bitmap.getPixels());
            if (buffer.stride != bitmap.width())
                // Copied line by line since we need to handle the offsets and stride.
                for (row = 0 ; row < bitmap.height(); row ++) {
                    uint8_t* dst = &(img[buffer.stride * row * bpp]);
                    uint8_t* src = &(bitmapOrigin[bitmap.width() * row * bpp]);
                    memcpy(dst, src, bpp * bitmap.width());
                }
            else
                memcpy(img, bitmapOrigin, bpp * bitmap.width() * bitmap.height());

            bitmap.unlockPixels();
        } else {
            // TODO: implement the partial invalidate here!
            XLOG("ERROR: don't expect to get here yet before we support partial inval");
        }

        ANativeWindow_unlockAndPost(m_ANW.get());
    }

    m_transferQueueItemLocks.lock();
    // b) After update the Surface Texture, now udpate the transfer queue info.
    addItemInTransferQueue(renderInfo, currentUploadType, &bitmap);

    m_transferQueueItemLocks.unlock();
    XLOG("Bitmap updated x, y %d %d, baseTile %p",
         renderInfo->x, renderInfo->y, renderInfo->baseTile);
    return true;
}
Exemple #6
0
void VideoPlay::PlayAudioVideo()
{
	clock_t start, end;
	int us = 0;
	int n = 0;
	int x = 0;
	while (1)
	{
		if (m_eState == State_Pause)
		{
			sem_wait(&semPlay);
		}
		else if (m_eState == State_Stop)
			break;
		start = clock();
		ANativeWindow_Buffer windowBuffer;
		if (ANativeWindow_lock(m_nativeWindow, &windowBuffer, NULL) < 0)
		{
			LOGE("cannot lock window");
		}
		else
		{
			sem_wait(&semVideoFull);
			pthread_mutex_lock(&mutexVideo);
			unsigned char* tmp = (unsigned char*) m_videoBuff.front();
			m_videoBuff.pop();
			pthread_mutex_unlock(&mutexVideo);
			sem_post(&semVideoEmpty);
			unsigned int* data = (unsigned int*) windowBuffer.bits;
			for (int i = 0; i < m_height; i++)
			{
				for (int j = 0; j < m_nWidth; j++)
				{
					int nFlag = 0;
					memcpy(&nFlag, tmp + (m_nWidth * i + j) * 3, 3);
					data[i * windowBuffer.stride + j] = nFlag;
				}
			}
			delete[] tmp;
			tmp = NULL;
			ANativeWindow_unlockAndPost(m_nativeWindow);
			end = clock();
			us += m_ptm - (end - start);
			n++;
			if (n % 25 == 0)
			{
				x++;
				LOGI("m_ptm=%d,第%d秒", m_ptm, x);
			}
			if (us > 0)
			{
				usleep(us);
				us = 0;
			}
		}
	}

}
Exemple #7
0
/*
 * Class:     com_mcxiaoke_ndk_Native
 * Method:    renderNW
 * Signature: (JLandroid/view/Surface;)Z
 */
JNIEXPORT jboolean JNICALL Java_com_mcxiaoke_ndk_Native_renderNW
(JNIEnv *env, jclass clazz, jlong avi, jobject surface)
{
    jboolean isFrameRead = JNI_FALSE;

    long frameSize = 0;
    int keyFrame = 0;

    // Get the native window from the surface
    ANativeWindow* nativeWindow = ANativeWindow_fromSurface(
                                      env, surface);
    if (0 == nativeWindow)
    {
        ThrowException(env, "java/io/RuntimeException",
                       "Unable to get native window from surface.");
        goto exit;
    }

    // Lock the native window and get access to raw buffer
    ANativeWindow_Buffer windowBuffer;
    if (0 > ANativeWindow_lock(nativeWindow, &windowBuffer, 0))
    {
        ThrowException(env, "java/io/RuntimeException",
                       "Unable to lock native window.");
        goto release;
    }

    // Read AVI frame bytes to raw buffer
    frameSize = AVI_read_frame((avi_t*) avi,
                               (char*) windowBuffer.bits,
                               &keyFrame);

    // Check if frame is successfully read
    if (0 < frameSize)
    {
        isFrameRead = JNI_TRUE;
    }

    // Unlock and post the buffer for displaying
    if (0 > ANativeWindow_unlockAndPost(nativeWindow))
    {
        ThrowException(env, "java/io/RuntimeException",
                       "Unable to unlock and post to native window.");
        goto release;
    }

release:
    // Release the native window
    ANativeWindow_release(nativeWindow);
    nativeWindow = 0;

exit:
    return isFrameRead;
}
Exemple #8
0
void UVCPreview::clearDisplay() {
	ENTER();

	ANativeWindow_Buffer buffer;
	pthread_mutex_lock(&capture_mutex);
	{
		if (LIKELY(mCaptureWindow)) {
			if (LIKELY(ANativeWindow_lock(mCaptureWindow, &buffer, NULL) == 0)) {
				uint8_t *dest = (uint8_t *)buffer.bits;
				const size_t bytes = buffer.width * PREVIEW_PIXEL_BYTES;
				const int stride = buffer.stride * PREVIEW_PIXEL_BYTES;
				for (int i = 0; i < buffer.height; i++) {
					memset(dest, 0, bytes);
					dest += stride;
				}
				ANativeWindow_unlockAndPost(mCaptureWindow);
			}
		}
	}
	pthread_mutex_unlock(&capture_mutex);
	pthread_mutex_lock(&preview_mutex);
	{
		if (LIKELY(mPreviewWindow)) {
			if (LIKELY(ANativeWindow_lock(mPreviewWindow, &buffer, NULL) == 0)) {
				uint8_t *dest = (uint8_t *)buffer.bits;
				const size_t bytes = buffer.width * PREVIEW_PIXEL_BYTES;
				const int stride = buffer.stride * PREVIEW_PIXEL_BYTES;
				for (int i = 0; i < buffer.height; i++) {
					memset(dest, 0, bytes);
					dest += stride;
				}
				ANativeWindow_unlockAndPost(mPreviewWindow);
			}
		}
	}
	pthread_mutex_unlock(&preview_mutex);

	EXIT();
}
	void ANativeWindowRenderer::render(MediaBuffer* buffer){		
		if(!mInited)
			return;
		
		ANativeWindow_Buffer buf;
		int err;

		ARect bounds(mRect);
		err = ANativeWindow_lock(mNativeWindow, &buf, &bounds);
		if(err != 0){
			ALOGE("fail to ANativeWindow_lock error:%d!", err);
			return;
		}
		
        mConverter->convert(buffer->data(),
							mWidth, mHeight,
							mRect.left, mRect.top, mRect.right, mRect.bottom,
							buf.bits,
							buf.stride, buf.height,
							0, 0, mCropWidth - 1, mCropHeight - 1);

		//#define DUMP_YUV_TO_FILE
#ifdef DUMP_YUV_TO_FILE
		FILE* f1 = fopen("/data/yuv", "ab+");
		if(f1 != NULL){
			size_t res = fwrite(buffer->data, 1, buffer->size, f1);
			fclose(f1);
			ALOGV("fwrite %d of %d to /data/yuv!", res, buffer->size);
		}else
			ALOGE("can not fopen /data/yuv!!");
#endif

		//#define DUMP_RGB_TO_FILE
#ifdef DUMP_RGB_TO_FILE
		FILE* f2 = fopen("/data/rgb", "ab+");
		if(f2 != NULL){
			size_t res = fwrite(buf.bits, 1, buf.stride*buf.height*2, f2);
			fclose(f2);
			ALOGV("fwrite %d of %d to /data/rgb!", res, buf.stride*buf.height*2);
		}else
			ALOGE("can not fopen /data/rgb!!");
#endif

		err = ANativeWindow_unlockAndPost(mNativeWindow);
		if(err != 0) {
			ALOGE("failed to ANativeWindow_unlockAndPost error %d", err);
			return;
		}
	}
Exemple #10
0
    jint Java_ws_websca_benchscaw_MainActivity_directRender( JNIEnv* env, jobject thiz, jobject surface ) {
        //__android_log_write(ANDROID_LOG_DEBUG, "Benchscaw JNI native nextFrame cpu flags:", "flags");



        //__android_log_write(ANDROID_LOG_DEBUG, "Benchscaw JNI native nextFrame", "av_read_frame");
        int done = av_read_frame(pFormatCtx, &packet);
        if(done>=0) {
            // Is this a packet from the video stream?
            if(packet.stream_index==videoStream) {
                // Decode video frame
                //__android_log_write(ANDROID_LOG_DEBUG, "Benchscaw JNI native nextFrame", "avcodec_decode_video2");
                avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                // Did we get a video frame?
                if(frameFinished) {
                    ANativeWindow* window = ANativeWindow_fromSurface(env, surface);
                    ANativeWindow_Buffer buffer;
                    if (ANativeWindow_lock(window, &buffer, NULL) == 0) {
                        // Convert the image from its native format to RGB
                        AVPicture pict;
                        pict.data[0] = (uint8_t*)buffer.bits;
                        pict.linesize[0] = buffer.stride*4;
                        sws_scale
                        (
                            sws_ctx,
                            (uint8_t const * const *)pFrame->data,
                            pFrame->linesize,
                            0,
                            pCodecCtx->height,
                            pict.data,
                            pict.linesize
                        );

                        //char str[200];
                        //sprintf(str, "%i", buffer.width);
                        ///__android_log_write(ANDROID_LOG_DEBUG, "width", str);
                        ANativeWindow_unlockAndPost(window);
                    }
                    ANativeWindow_release(window);
                }
            }

            // Free the packet that was allocated by av_read_frame
            //__android_log_write(ANDROID_LOG_DEBUG, "Benchscaw JNI native nextFrame", "av_free_packet");
            av_free_packet(&packet);
        }
        return done;
    }
Exemple #11
0
void android_main(struct android_app* app) {
    // Make sure glue isn't stripped.
    app_dummy();

    app->userData = NULL;
    app->onAppCmd = handle_app_command;
    app->onInputEvent = handle_input;

    while (1) {
        // Read all pending events. If app_has_focus is true, then we are going 
        // to read any events that are ready then render the screen. If we don't
        // have focus, we are going to block and spin around the poll loop until
        // we get focus again, preventing us from doing a bunch of rendering 
        // when the app isn't even visible.
        int ident;
        int events;
        struct android_poll_source* source;

        while ((ident=ALooper_pollAll(app_has_focus ? 0 : -1, NULL, &events, (void**)&source)) >= 0) {
            // Process this event.
            if (source != NULL) {
                source->process(app, source);
            }

            // Check if we are exiting.
            if (app->destroyRequested != 0) {
                LOGI("Engine thread destroy requested!");
                return;
            }
        }


        /* Now that we've delt with input, draw stuff */
        if (app->window != NULL) {
            ++tick;
            LOGI("Rendering frame %d", tick);
            ANativeWindow_Buffer buffer;
            if (ANativeWindow_lock(app->window, &buffer, NULL) < 0) {
                LOGW("Unable to lock window buffer");
                continue;
            }

            draw_frame(&buffer);

            ANativeWindow_unlockAndPost(app->window);
        }
    }
}
void decodeAndRender(JNIEnv *pEnv) {
	ANativeWindow_Buffer 	windowBuffer;
	AVPacket        		packet;
	int 					i=0;
	int            			frameFinished;
	int 					lineCnt;
	while(av_read_frame(formatCtx, &packet)>=0 && !stop) {
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) {
			// Decode video frame
			avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished,
			   &packet);
			// Did we get a video frame?
			if(frameFinished) {
				// Convert the image from its native format to RGBA
				sws_scale
				(
					sws_ctx,
					(uint8_t const * const *)decodedFrame->data,
					decodedFrame->linesize,
					0,
					codecCtx->height,
					frameRGBA->data,
					frameRGBA->linesize
				);
				// lock the window buffer
				if (ANativeWindow_lock(window, &windowBuffer, NULL) < 0) {
					LOGE("cannot lock window");
				} else {
					// draw the frame on buffer
					LOGI("copy buffer %d:%d:%d", width, height, width*height*4);
					LOGI("window buffer: %d:%d:%d", windowBuffer.width,
							windowBuffer.height, windowBuffer.stride);
					memcpy(windowBuffer.bits, buffer,  width * height * 4);
					// unlock the window buffer and post it to display
					ANativeWindow_unlockAndPost(window);
					// count number of frames
					++i;
				}
			}
		}
		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	LOGI("total No. of frames decoded and rendered %d", i);
	finish(pEnv);
}
Exemple #13
0
void drawSomething(struct android_app* app) {
	ANativeWindow_Buffer lWindowBuffer;
	//step 1. set window buffer format and size
	ANativeWindow* lWindow = app->window;
	ANativeWindow_setBuffersGeometry(lWindow, 0, 0, WINDOW_FORMAT_RGBA_8888);
	//step 2.lock the window's next drawing surface
	if (ANativeWindow_lock(lWindow, &lWindowBuffer, NULL) < 0) {
		LOGE(1, "cannot lock window");
		return;
	}
//	LOGI(2, "height %d, width %d, stride %d", lWindowBuffer.height, lWindowBuffer.width, lWindowBuffer.stride);
	//step 3. clear the buffer
	memset(lWindowBuffer.bits, 0, lWindowBuffer.stride*lWindowBuffer.height*sizeof(uint32_t));
	//step 4. draw something
	int sqh = 150, sqw = 100;
	USERDATA *userData = (USERDATA*)app->userData;
	if (-1 == userData->prevWst) {
		//initialize
		userData->prevWst = lWindowBuffer.stride/2 - sqw/2;
	}
	int wst = userData->prevWst + userData->xMove;
	//update
	userData->prevWst = wst;
	if (wst < 0) {
		wst = 0;
	} else if (wst > lWindowBuffer.stride - sqw) {
		wst = lWindowBuffer.stride - sqw;
	}
	int wed = wst + sqw;
	int hst = lWindowBuffer.height/2 - sqh/2;
	int hed = hst + sqh;
//	LOGI(2, "drawing square width: %d:%d, height: %d:%d", wst, wed, hst, hed);
	for (int i = hst; i < hed; ++i) {
		for (int j = wst; j < wed; ++j) {
			((char*)(lWindowBuffer.bits))[(i*lWindowBuffer.stride + j)*sizeof(uint32_t)] = (char)255;			//R
			((char*)(lWindowBuffer.bits))[(i*lWindowBuffer.stride + j)*sizeof(uint32_t) + 1] = (char)0;		//G
			((char*)(lWindowBuffer.bits))[(i*lWindowBuffer.stride + j)*sizeof(uint32_t) + 2] = (char)0;		//B
			((char*)(lWindowBuffer.bits))[(i*lWindowBuffer.stride + j)*sizeof(uint32_t) + 3] = (char)255;		//A
		}
	}
	//step 5. unlock the window's drawing surface and post the new buffer to display
	ANativeWindow_unlockAndPost(lWindow);
	if (app->redrawNeeded) {
		app->redrawNeeded = 0;
	}
}
Exemple #14
0
void Graphics::redraw() {
  if (_app->window != NULL) {
    ANativeWindow_Buffer buffer;
    if (ANativeWindow_lock(_app->window, &buffer, NULL) < 0) {
      trace("Unable to lock window buffer");
    } else {
      void *pixels = buffer.bits;
      int width = MIN(_w, MIN(buffer.width, _screen->_w));
      int height = MIN(_h, MIN(buffer.height, _screen->_h));
      for (int y = 0; y < height; y++) {
        pixel_t *line = _screen->getLine(y);
        memcpy((pixel_t *)pixels, line, width * sizeof(pixel_t));
        pixels = (pixel_t*)pixels + buffer.stride;
      }
      ANativeWindow_unlockAndPost(_app->window);
    }
  }
}
Exemple #15
0
/**
 * The main function rendering a frame. In our case, it is yuv to RGBA8888
 * converter
 */
void CameraEngine::DrawFrame(void) {
  if (!cameraReady_ || !yuvReader_) return;
  AImage* image = yuvReader_->GetNextImage();
  if (!image) {
    return;
  }

  ANativeWindow_acquire(app_->window);
  ANativeWindow_Buffer buf;
  if (ANativeWindow_lock(app_->window, &buf, nullptr) < 0) {
    yuvReader_->DeleteImage(image);
    return;
  }

  yuvReader_->DisplayImage(&buf, image);
  ANativeWindow_unlockAndPost(app_->window);
  ANativeWindow_release(app_->window);
}
/**
 * Just the current frame in the display.
 */
static void engine_draw_frame(struct engine* engine)
{
    if (engine->app->window != NULL) {
        suseconds_t t1, t2;

        ANativeWindow_Buffer buffer;
        ANativeWindow_lock(engine->app->window, &buffer, 0);
        engine->canvas = ps_canvas_replace_data(engine->canvas, 
                (uint8_t*)buffer.bits, engine->fmt, engine->width, engine->height, buffer.stride * engine->bpp); 

        memset((uint8_t*)buffer.bits, 0xFF, engine->height * buffer.stride * engine->bpp);
        t1 = get_time();
        draw_test(0, engine->context);
        t2 = get_time();

        LOGI("draw a frame use %.4f ms", ((double)t2 - (double)t1)/1000);
        ANativeWindow_unlockAndPost(engine->app->window);
    }
}
/**
 * Initialize an context for the current display.
 */
static int engine_init_display(struct engine* engine)
{
    int w, h, f, s, b;
    ps_color_format fmt;    

    ps_initialize();

    w = ANativeWindow_getWidth(engine->app->window);
    h = ANativeWindow_getHeight(engine->app->window);
    f = ANativeWindow_getFormat(engine->app->window);

    
    if (f == WINDOW_FORMAT_RGBA_8888 || f == WINDOW_FORMAT_RGBX_8888) {
        b = 4;
        fmt = COLOR_FORMAT_ARGB;
    } else {
        b = 2;
        fmt = COLOR_FORMAT_RGB565;
    }

    ANativeWindow_Buffer buffer;
    ANativeWindow_lock(engine->app->window, &buffer, 0);
    s = buffer.stride * b;
    // create canvas with fake buffer bits !
    engine->canvas = ps_canvas_create_with_data((uint8_t*)buffer.bits, fmt, w, h, s);
    engine->context = ps_context_create(engine->canvas, 0);
    init_context(engine->context, engine->canvas, (unsigned char*)buffer.bits);    
    set_image_data(selt2565.bits, COLOR_FORMAT_RGB565, selt2565.width, selt2565.height, selt2565.pitch);
    set_pattern_data(pat565.bits, COLOR_FORMAT_RGB565, pat565.width, pat565.height, pat565.pitch);
   // ps_scale(engine->context, 4, 4); // zoom 

    engine->width = w;
    engine->height = h;
    engine->bpp = b;
    engine->fmt = fmt;
    ANativeWindow_unlockAndPost(engine->app->window);

    return 0;

}
	void Java_com_test_surfaceview_TestNative_ShowYuv()
	{
		int width,height;
		width = 352;
		height = 240;
		int size = width * height*3/2;
		unsigned char *data = new unsigned char[size];
		getYV12Data("/sdcard/yuv/fg001.yuv",data,size);
		unsigned char * out =new unsigned char[width * height*3];
		YV12_to_RGB24(data,out,width,height);
		ANativeWindow_Buffer buf;
		if(ANativeWindow_lock(g_nativeWindow,&buf,NULL) <0 )
		{
			LOGE("cannot lock window");
		}
		else
		{
			memcpy(buf.bits,out,width * height *3);
			ANativeWindow_unlockAndPost(g_nativeWindow);
		}

	}
Exemple #19
0
static void engine_draw_frame(Engine* engine, const cv::Mat& frame)
{
    if (engine->app->window == NULL)
        return; // No window.

    ANativeWindow_Buffer buffer;
    if (ANativeWindow_lock(engine->app->window, &buffer, NULL) < 0)
    {
        LOGW("Unable to lock window buffer");
        return;
    }

    int32_t* pixels = (int32_t*)buffer.bits;

    int left_indent = (buffer.width-frame.cols)/2;
    int top_indent = (buffer.height-frame.rows)/2;

    if (top_indent > 0)
    {
        memset(pixels, 0, top_indent*buffer.stride*sizeof(int32_t));
        pixels += top_indent*buffer.stride;
    }

    for (int yy = 0; yy < frame.rows; yy++)
    {
        if (left_indent > 0)
        {
            memset(pixels, 0, left_indent*sizeof(int32_t));
            memset(pixels+left_indent+frame.cols, 0, (buffer.stride-frame.cols-left_indent)*sizeof(int32_t));
        }
        int32_t* line = pixels + left_indent;
        size_t line_size = frame.cols*4*sizeof(unsigned char);
        memcpy(line, frame.ptr<unsigned char>(yy), line_size);
        // go to next line
        pixels += buffer.stride;
    }
    ANativeWindow_unlockAndPost(engine->app->window);
}
Exemple #20
0
    jint Java_ws_websca_benchscaw_MainActivity_surfaceTest( JNIEnv* env, jobject thiz, jobject surface )
    {
        //__android_log_write(ANDROID_LOG_DEBUG, "Java_ws_websca_benchscaw_MainActivity_surfaceTest", "surfaceTest()");
        if(ffmpegNextFrame()<0)
            return -1;
        //__android_log_write(ANDROID_LOG_DEBUG, "Java_ws_websca_benchscaw_MainActivity_surfaceTest", "ANativeWindow_fromSurface()");
        ANativeWindow* window = ANativeWindow_fromSurface(env, surface);
        ANativeWindow_Buffer buffer;
        //__android_log_write(ANDROID_LOG_DEBUG, "Java_ws_websca_benchscaw_MainActivity_surfaceTest", "ANativeWindow_lock()");
        if (ANativeWindow_lock(window, &buffer, NULL) == 0) {
            int y=0;

            for(y=0; y<pFrameRGB->height; y++)
            {
                memcpy((uint8_t *)buffer.bits+(y*buffer.stride*4), pFrameRGB->data[0]+y*pFrameRGB->linesize[0], pFrameRGB->width*4);
            }
            //__android_log_write(ANDROID_LOG_DEBUG, "Java_ws_websca_benchscaw_MainActivity_surfaceTest", "ANativeWindow_unlock_and_post()");
            ANativeWindow_unlockAndPost(window);
        }
        //__android_log_write(ANDROID_LOG_DEBUG, "Java_ws_websca_benchscaw_MainActivity_surfaceTest", "ANativeWindow_release()");
        ANativeWindow_release(window);
        return 1;
    }
Exemple #21
0
void SwViewport::PrepareToDraw() {

    NativeWindow = ANativeWindow_fromSurface(Jni, (jobject)Window);

    VERBOSE("Got native window %p", NativeWindow);
    int width = ANativeWindow_getWidth(NativeWindow);
    int height = ANativeWindow_getHeight(NativeWindow);
    int format = ANativeWindow_getFormat(NativeWindow);
    VERBOSE("Size: %ix%i, Format: %i", width, height, format);
    ANativeWindow_Buffer buffer;
    ARect rc;
    rc.left = 0;
    rc.top =0;
    rc.right = width;
    rc.bottom = height;
    buffer.bits = nullptr;
    ANativeWindow_lock(NativeWindow, &buffer, &rc);
    VERBOSE("Locked data: %p", buffer.bits);
    SkColorType color = SKIA_COLOR_FORMAT;
    int stride  = buffer.stride;

    if(format == WINDOW_FORMAT_RGB_565)
    {
        color = kRGB_565_SkColorType;
        stride*=2;
    }
    else
        stride *= 4;
    VERBOSE("Using color format: %i, stride: %i", color, stride);



    Surface.reset(SkSurface::NewRasterDirect(SkImageInfo::Make(width, height, color, kOpaque_SkAlphaType),
                  buffer.bits, stride));
    VERBOSE("Surface: %p", Surface.get());
}
Exemple #22
0
int nv_avc_redraw(void) {
	ANativeWindow_Buffer buffer;
	int ret = 0;

	// Check if there's a new frame
	if (update_rgb_frame()) {
		// Lock down a render buffer
		if (ANativeWindow_lock(window, &buffer, NULL) >= 0) {
			// Draw the frame to the buffer
			if (render_rgb_to_buffer(buffer.bits, 
				decoder_ctx->width *
				decoder_ctx->height *
				BYTES_PER_PIXEL)) {
				// A new frame will be drawn
				ret = 1;
			}

			// Draw the frame to the surface
			ANativeWindow_unlockAndPost(window);
		}
	}
	
	return ret;
}
Exemple #23
0
JNIEXPORT void JNICALL Java_com_example_hellohalide_CameraPreview_processFrame(
    JNIEnv *env, jobject obj, jbyteArray jSrc, jint j_w, jint j_h, jobject surf) {

    const int w = j_w, h = j_h;

    halide_set_error_handler(handler);

    unsigned char *src = (unsigned char *)env->GetByteArrayElements(jSrc, NULL);
    if (!src) {
        LOGD("src is null\n");
        return;
    }

    ANativeWindow *win = ANativeWindow_fromSurface(env, surf);
    ANativeWindow_acquire(win);

    static bool first_call = true;
    static unsigned counter = 0;
    static unsigned times[16];
    if (first_call) {
        LOGD("According to Halide, host system has %d cpus\n", halide_host_cpu_count());
        LOGD("Resetting buffer format");
        ANativeWindow_setBuffersGeometry(win, w, h, 0);
        first_call = false;
        for (int t = 0; t < 16; t++) times[t] = 0;
    }

    ANativeWindow_Buffer buf;
    ARect rect = {0, 0, w, h};

    if (int err = ANativeWindow_lock(win, &buf, NULL)) {
        LOGD("ANativeWindow_lock failed with error code %d\n", err);
        return;
    }

    uint8_t *dst = (uint8_t *)buf.bits;

    // If we're using opencl, use the gpu backend for it.
    halide_set_ocl_device_type("gpu");

    // Make these static so that we can reuse device allocations across frames.
    static buffer_t srcBuf = {0};
    static buffer_t dstBuf = {0};

    if (dst) {
        srcBuf.host = (uint8_t *)src;
        srcBuf.host_dirty = true;
        srcBuf.extent[0] = w;
        srcBuf.extent[1] = h;
        srcBuf.extent[2] = 0;
        srcBuf.extent[3] = 0;
        srcBuf.stride[0] = 1;
        srcBuf.stride[1] = w;
        srcBuf.min[0] = 0;
        srcBuf.min[1] = 0;
        srcBuf.elem_size = 1;

        dstBuf.host = dst;
        dstBuf.extent[0] = w;
        dstBuf.extent[1] = h;
        dstBuf.extent[2] = 0;
        dstBuf.extent[3] = 0;
        dstBuf.stride[0] = 1;
        dstBuf.stride[1] = w;
        dstBuf.min[0] = 0;
        dstBuf.min[1] = 0;
        dstBuf.elem_size = 1;

        // Just copy over chrominance untouched
        memcpy(dst + w*h, src + w*h, (w*h)/2);

        int64_t t1 = halide_current_time_ns();
        halide_generated(&srcBuf, &dstBuf);

        if (dstBuf.dev) {
            halide_copy_to_host(NULL, &dstBuf);
        }

        int64_t t2 = halide_current_time_ns();
        unsigned elapsed_us = (t2 - t1)/1000;


        times[counter & 15] = elapsed_us;
        counter++;
        unsigned min = times[0];
        for (int i = 1; i < 16; i++) {
            if (times[i] < min) min = times[i];
        }
        LOGD("Time taken: %d (%d)", elapsed_us, min);
    }

    ANativeWindow_unlockAndPost(win);
    ANativeWindow_release(win);

    env->ReleaseByteArrayElements(jSrc, (jbyte *)src, 0);
}
void convert_image(VideoState *state, AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr, int width, int height) {
	AVCodecContext *codecCtx;
	AVCodec *codec;
	AVFrame *frame;

	*got_packet_ptr = 0;

	if (width == -1) {
		width = pCodecCtx->width;
	}

	if (height == -1) {
		height = pCodecCtx->height;
	}

	codec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
	if (!codec) {
	    printf("avcodec_find_decoder() failed to find decoder\n");
		goto fail;
	}

    codecCtx = avcodec_alloc_context3(codec);
	if (!codecCtx) {
		printf("avcodec_alloc_context3 failed\n");
		goto fail;
	}

	codecCtx->bit_rate = pCodecCtx->bit_rate;
	//codecCtx->width = pCodecCtx->width;
	//codecCtx->height = pCodecCtx->height;
	codecCtx->width = width;
	codecCtx->height = height;
	codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
	codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	codecCtx->time_base.num = pCodecCtx->time_base.num;
	codecCtx->time_base.den = pCodecCtx->time_base.den;

	if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) {
	  	printf("avcodec_open2() failed\n");
		goto fail;
	}

	frame = av_frame_alloc();

	if (!frame) {
		goto fail;
	}

	// Determine required buffer size and allocate buffer
	int numBytes = avpicture_get_size(TARGET_IMAGE_FORMAT, codecCtx->width, codecCtx->height);
	void * buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

	// set the frame parameters
	frame->format = TARGET_IMAGE_FORMAT;
    frame->width = codecCtx->width;
    frame->height = codecCtx->height;

    avpicture_fill(((AVPicture *)frame),
    		buffer,
    		TARGET_IMAGE_FORMAT,
    		codecCtx->width,
    		codecCtx->height);

	/*struct SwsContext *scalerCtx = sws_getContext(pCodecCtx->width,
			pCodecCtx->height,
			pCodecCtx->pix_fmt,
			//pCodecCtx->width,
			//pCodecCtx->height,
			width,
			height,
			TARGET_IMAGE_FORMAT,
	        SWS_BILINEAR,
	        NULL,
	        NULL,
	        NULL);

	if (!scalerCtx) {
		printf("sws_getContext() failed\n");
		goto fail;
	}*/

    sws_scale(state->sws_ctx,
    		(const uint8_t * const *) pFrame->data,
    		pFrame->linesize,
    		0,
    		pFrame->height,
    		frame->data,
    		frame->linesize);

	int ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr);

	if (ret >= 0 && state->native_window) {
		ANativeWindow_setBuffersGeometry(state->native_window, width, height, WINDOW_FORMAT_RGBA_8888);

		ANativeWindow_Buffer windowBuffer;

		if (ANativeWindow_lock(state->native_window, &windowBuffer, NULL) == 0) {
			int h = 0;

			for (h = 0; h < height; h++)  {
			  memcpy(windowBuffer.bits + h * windowBuffer.stride * 4,
			         buffer + h * frame->linesize[0],
			         width*4);
			}

			ANativeWindow_unlockAndPost(state->native_window);
		}
	}

	if (ret < 0) {
		*got_packet_ptr = 0;
	}

	// TODO is this right?
	fail:
    av_free(frame);

    if (buffer) {
    	free(buffer);
    }

	if (codecCtx) {
		avcodec_close(codecCtx);
	    av_free(codecCtx);
	}

	//if (scalerCtx) {
	//	sws_freeContext(scalerCtx);
	//}

	if (ret < 0 || !*got_packet_ptr) {
		av_free_packet(avpkt);
	}
}
int SDL_Android_NativeWindow_display_l(ANativeWindow *native_window, SDL_VoutOverlay *overlay)
{
    int retval;

    if (!native_window)
        return -1;

    if (!overlay) {
        ALOGE("SDL_Android_NativeWindow_display_l: NULL overlay");
        return -1;
    }

    if (overlay->w <= 0 || overlay->h <= 0) {
        ALOGE("SDL_Android_NativeWindow_display_l: invalid overlay dimensions(%d, %d)", overlay->w, overlay->h);
        return -1;
    }

    int curr_w = ANativeWindow_getWidth(native_window);
    int curr_h = ANativeWindow_getHeight(native_window);
    int curr_format = ANativeWindow_getFormat(native_window);
    int buff_w = CAPALIGN(overlay->w, 2);
    int buff_h = CAPALIGN(overlay->h, 2);

    AndroidHalFourccDescriptor *overlayDesc = native_window_get_desc(overlay->format);
    if (!overlayDesc) {
        ALOGE("SDL_Android_NativeWindow_display_l: unknown overlay format: %d", overlay->format);
        return -1;
    }

    AndroidHalFourccDescriptor *voutDesc = native_window_get_desc(curr_format);
    if (!voutDesc || voutDesc->hal_format != overlayDesc->hal_format) {
        ALOGD("ANativeWindow_setBuffersGeometry: w=%d, h=%d, f=%.4s(0x%x) => w=%d, h=%d, f=%.4s(0x%x)",
            curr_w, curr_h, (char*) &curr_format, curr_format,
            buff_w, buff_h, (char*) &overlay->format, overlay->format);
        retval = ANativeWindow_setBuffersGeometry(native_window, buff_w, buff_h, overlayDesc->hal_format);
        if (retval < 0) {
            ALOGE("SDL_Android_NativeWindow_display_l: ANativeWindow_setBuffersGeometry: failed %d", retval);
            return retval;
        }

        if (!voutDesc) {
            ALOGE("SDL_Android_NativeWindow_display_l: unknown hal format %d", curr_format);
            return -1;
        }
    }

    ANativeWindow_Buffer out_buffer;
    retval = ANativeWindow_lock(native_window, &out_buffer, NULL);
    if (retval < 0) {
        ALOGE("SDL_Android_NativeWindow_display_l: ANativeWindow_lock: failed %d", retval);
        return retval;
    }

    if (out_buffer.width != buff_w || out_buffer.height != buff_h) {
        ALOGE("unexpected native window buffer (%p)(w:%d, h:%d, fmt:'%.4s'0x%x), expecting (w:%d, h:%d, fmt:'%.4s'0x%x)",
            native_window,
            out_buffer.width, out_buffer.height, (char*)&out_buffer.format, out_buffer.format,
            buff_w, buff_h, (char*)&overlay->format, overlay->format);
        // TODO: 8 set all black
        ANativeWindow_unlockAndPost(native_window);
        ANativeWindow_setBuffersGeometry(native_window, buff_w, buff_h, overlayDesc->hal_format);
        return -1;
    }

    int render_ret = voutDesc->render(&out_buffer, overlay);
    if (render_ret < 0) {
        // TODO: 8 set all black
        // return after unlock image;
    }

    retval = ANativeWindow_unlockAndPost(native_window);
    if (retval < 0) {
        ALOGE("SDL_Android_NativeWindow_display_l: ANativeWindow_unlockAndPost: failed %d", retval);
        return retval;
    }

    return render_ret;
}
	void Java_com_test_surfaceview_TestNative_Draw(JNIEnv* env,jobject obj,jstring jstr)
	{
		LOGI("Java_com_test_surfaceview_TestNative_Draw");
		char* path = jstring2string(env,jstr);
		if(g_assetManager == NULL)
		{
			LOGE("Get AAssetManager ERROR!");
		}
		 AAsset* asset = AAssetManager_open(g_assetManager, path,AASSET_MODE_UNKNOWN);
		 free(path);
		 path = NULL;
		 if(asset == NULL)
			 return ;
		 int  bufferLen = AAsset_getLength(asset);
		 unsigned char* buffer = (unsigned char*)malloc(bufferLen+1);
		 buffer[bufferLen] = 0;
		 int numBytesRead = AAsset_read(asset, buffer, bufferLen);
		 g_buffer = buffer;
		 g_bufferLen = bufferLen;
		 AAsset_close(asset);
		 LOGI("g_bufferLen=%d",g_bufferLen);
		BITMAPFILEHEADER  filehead;
		BITMAPINFOHEADER  head;
		memcpy(&filehead,g_buffer,sizeof(BITMAPFILEHEADER));
		memcpy(&head,g_buffer+sizeof(BITMAPFILEHEADER),sizeof(BITMAPINFOHEADER));
		if(filehead.bfType != 0x4D42)
		{
			free(buffer);
			buffer=NULL;
			return ;
		}
		ANativeWindow_setBuffersGeometry(g_nativeWindow,head.biWidth,head.biHeight,WINDOW_FORMAT_RGB_565);
		g_nWidth = ANativeWindow_getWidth(g_nativeWindow);
		g_nHeight = ANativeWindow_getHeight(g_nativeWindow);
		int nfomat  = ANativeWindow_getFormat(g_nativeWindow);
		LOGI("bfOffBits=%d,g_nWidth=%d,g_nHeight=%d,nfomat=%d",filehead.bfOffBits,g_nWidth,g_nHeight,nfomat);
		ANativeWindow_Buffer windowBuffer;
		if(ANativeWindow_lock(g_nativeWindow,&windowBuffer,NULL) <0 )
		{
			LOGE("cannot lock window");
		}
		else
		{
//			unsigned char* tmp =(unsigned char*)( g_buffer+filehead.bfOffBits);
//			unsigned int* data =(unsigned int*) windowBuffer.bits;
//			for(int i=0;i<head.biHeight;i++)
//			{
//				for(int j=0;j<head.biWidth;j++)
//				{
//					int nFlag = 0;
//					memcpy(&nFlag,tmp+(head.biWidth*i+j)*3,3);
//					data[i*windowBuffer.stride+j] = nFlag;
//				}
//				//memcpy(data+i*buffer.stride*2,tmp+(600-i-1)*800*2,800*2);
//			}
			unsigned short* data =(unsigned short*) windowBuffer.bits;
			unsigned char* tmp = (unsigned char*)(g_buffer+filehead.bfOffBits);
			LOGI("%d,%d,%d",head.biWidth,head.biHeight,head.biBitCount);
			int offsize = head.biBitCount/8;
			int lineByte=(head.biWidth* head.biBitCount/8+3)/4*4;
			LOGI("lineByte=%d",lineByte);
			for(int i=0;i<head.biHeight;i++)
			{
				for(int j=0;j<head.biWidth;j++,tmp+=offsize)
				{
					if(offsize>2)
						data[(head.biHeight-i-1)*windowBuffer.stride+j] = RGB565(*(tmp+2),*(tmp+1),*(tmp));
					else
						memcpy(&data[(head.biHeight-i-1)*windowBuffer.stride+j],tmp,2);
				}
				tmp+=(lineByte-offsize*head.biWidth);
			}
			LOGI("buffer.stride=%d,buffer.format=%d",windowBuffer.stride,windowBuffer.format);
			ANativeWindow_unlockAndPost(g_nativeWindow);
		}
		free(buffer);
		buffer=NULL;
		LOGI("Java_com_test_surfaceview_TestNative_Draw END!");
	}
Exemple #27
0
void android_main(struct android_app* app) {
    // Make sure glue isn't stripped.
    app_dummy();

    pressed = false;
    cd = NULL;

    app->userData = NULL;
    app->onAppCmd = handle_app_command;
    app->onInputEvent = handle_input;

    //LOGI("asset: %p", app->activity->assetManager);
    SpriteCollection::am = app->activity->assetManager;


    //CassisDisplay cd2(0);
    CairoMenuSelector cms;
    cd = &cms;

    Log::log<<COPYRIGHTTEXT;
    Log::log.commit();

    if (app->savedState != NULL) {
      Log::log<<"We are starting with a previous saved state; restore from it.";
      Log::log.commit();
      try
	{
	  cms.deserialize((char*)app->savedState);
	}
      catch (DeserializeException de)
	{
	  LOGE("Can not deserialize state! Quitting");
	  return;
	}
      Log::log<<"Done!";
      Log::log.commit();
    }

    while (1) {
        // Read all pending events. If app_has_focus is true, then we are going 
        // to read any events that are ready then render the screen. If we don't
        // have focus, we are going to block and spin around the poll loop until
        // we get focus again, preventing us from doing a bunch of rendering 
        // when the app isn't even visible.
        int ident;
        int events;
        struct android_poll_source* source;

        while ((ident=ALooper_pollAll(app_has_focus ? 0 : -1, NULL, &events, (void**)&source)) >= 0) {
            // Process this event.
            if (source != NULL) {
                source->process(app, source);
            }

            // Check if we are exiting.
            if (app->destroyRequested != 0) {
                LOGI("Engine thread destroy requested!");
                return;
            }
        }


        /* Now that we've delt with input, draw stuff */
        if (app->window != NULL) {
            ++tick;
	    //            LOGI("Rendering frame %d", tick);
            ANativeWindow_Buffer buffer;
            if (ANativeWindow_lock(app->window, &buffer, NULL) < 0) {
                LOGW("Unable to lock window buffer");
                continue;
            }

            draw_frame(&buffer, cd);

            ANativeWindow_unlockAndPost(app->window);
        }
    }
}
Exemple #28
0
__unused JNIEXPORT void JNICALL
Java_com_picsart_studio_gifencoder_GifInfoHandle_bindSurface(JNIEnv *env, jclass __unused handleClass, jlong gifInfo,
                                                    jobject jsurface, jlongArray savedState, jboolean isOpaque) {

    GifInfo *info = (GifInfo *) (intptr_t) gifInfo;
    if (info->surfaceDescriptor == NULL) {
        info->surfaceDescriptor = malloc(sizeof(SurfaceDescriptor));
        if (!initSurfaceDescriptor(info->surfaceDescriptor, env)) {
            free(info->surfaceDescriptor);
            info->surfaceDescriptor = NULL;
            return;
        }
    }

    POLL_TYPE eftd_ctr;
    int pollResult;

    while (1) {
        pollResult = poll(&info->surfaceDescriptor->eventPollFd, 1, 0);
        if (pollResult == 0)
            break;
        else if (pollResult > 0) {
            if (read(info->surfaceDescriptor->eventPollFd.fd, &eftd_ctr, POLL_TYPE_SIZE) != POLL_TYPE_SIZE) {
                throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "Read on flushing failed");
                return;
            }
        }
        else {
            throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "Poll on flushing failed");
            return;
        }
    }

    const int32_t windowFormat = isOpaque == JNI_TRUE ? WINDOW_FORMAT_RGBX_8888 : WINDOW_FORMAT_RGBA_8888;
    info->isOpaque = isOpaque;

    struct ANativeWindow *window = ANativeWindow_fromSurface(env, jsurface);
    if (ANativeWindow_setBuffersGeometry(window, (int32_t) info->gifFilePtr->SWidth,
                                         (int32_t) info->gifFilePtr->SHeight,
                                         windowFormat) != 0) {
        ANativeWindow_release(window);
        throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "Buffers geometry setting failed");
        return;
    }

    struct ANativeWindow_Buffer buffer = {.bits =NULL};
    void *oldBufferBits;

    if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
        ANativeWindow_release(window);
        throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "Window lock failed");
        return;
    }
    const size_t bufferSize = buffer.stride * buffer.height * sizeof(argb);

    info->stride = buffer.stride;
    long invalidationDelayMillis;
    if (info->surfaceDescriptor->surfaceBackupPtr) {
        memcpy(buffer.bits, info->surfaceDescriptor->surfaceBackupPtr, bufferSize);
        invalidationDelayMillis = 0;
        info->surfaceDescriptor->renderHelper = 1;
        info->surfaceDescriptor->slurpHelper = 0;
    }
    else {
        if (savedState != NULL){
            invalidationDelayMillis = restoreSavedState(info, env, savedState, buffer.bits);
            if (invalidationDelayMillis <0)
                invalidationDelayMillis =0;
        }
        else
            invalidationDelayMillis = 0;
        info->surfaceDescriptor->renderHelper = 0;
        info->surfaceDescriptor->slurpHelper = 1;
    }

    info->lastFrameRemainder = -1;
    ANativeWindow_unlockAndPost(window);

    if (info->loopCount != 0 && info->currentLoop == info->loopCount) {
        ANativeWindow_release(window);
        pollResult = poll(&info->surfaceDescriptor->eventPollFd, 1, -1);
        if (pollResult < 0) {
            throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "animation end poll failed");
        }
        return;
    }

    pthread_t thread;
    if (pthread_create(&thread, NULL, slurp, info) != 0) {
        ANativeWindow_release(window);
        throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "pthread_create failed");
        return;
    }

    while (1) {
        pollResult = poll(&info->surfaceDescriptor->eventPollFd, 1, (int) invalidationDelayMillis);
        long renderingStartTime = getRealTime();

        if (pollResult < 0) {
            throwException(env, ILLEGAL_STATE_EXCEPTION_ERRNO, "Poll failed");
            break;
        }
        else if (pollResult > 0) {
            if (info->surfaceDescriptor->surfaceBackupPtr == NULL) {
                info->surfaceDescriptor->surfaceBackupPtr = malloc(bufferSize);
                if (info->surfaceDescriptor->surfaceBackupPtr == NULL) {
                    throwException(env, OUT_OF_MEMORY_ERROR, OOME_MESSAGE);
                    break;
                }
            }
            memcpy(info->surfaceDescriptor->surfaceBackupPtr, buffer.bits, bufferSize);
            break;
        }
        oldBufferBits = buffer.bits;
        THROW_AND_BREAK_ON_NONZERO_RESULT(ANativeWindow_lock(window, &buffer, NULL), "Window lock failed");

        if (info->currentIndex == 0)
            prepareCanvas(buffer.bits, info);
        else
            memcpy(buffer.bits, oldBufferBits, bufferSize);

        pthread_mutex_lock(&info->surfaceDescriptor->renderMutex);
        while (info->surfaceDescriptor->renderHelper == 0) {
            pthread_cond_wait(&info->surfaceDescriptor->renderCond, &info->surfaceDescriptor->renderMutex);
        }
        info->surfaceDescriptor->renderHelper = 0;
        pthread_mutex_unlock(&info->surfaceDescriptor->renderMutex);

        const uint_fast32_t frameDuration = getBitmap(buffer.bits, info);

        pthread_mutex_lock(&info->surfaceDescriptor->slurpMutex);
        info->surfaceDescriptor->slurpHelper = 1;
        pthread_cond_signal(&info->surfaceDescriptor->slurpCond);
        pthread_mutex_unlock(&info->surfaceDescriptor->slurpMutex);

        ANativeWindow_unlockAndPost(window);

        invalidationDelayMillis = calculateInvalidationDelay(info, renderingStartTime, frameDuration);

        if (info->lastFrameRemainder >= 0) {
            invalidationDelayMillis = info->lastFrameRemainder;
            info->lastFrameRemainder = -1;
        }
    }

    ANativeWindow_release(window);
    pthread_mutex_lock(&info->surfaceDescriptor->slurpMutex);
    info->surfaceDescriptor->slurpHelper = 2;
    pthread_cond_signal(&info->surfaceDescriptor->slurpCond);
    pthread_mutex_unlock(&info->surfaceDescriptor->slurpMutex);
    THROW_ON_NONZERO_RESULT(pthread_join(thread, NULL), "join failed");
}
Exemple #29
0
//视频线程
void *video_thread(void *minstance){
	playInstance *instance = (playInstance *)minstance;
	LOGE("视频线程开始\n");
	struct timespec time;
	time.tv_sec=10;//网络不好最多等10秒
	time.tv_nsec=0;
	struct threadmsg msg;
	int packet_count = 0;

	while(1){
		if(instance->stop){
			break;
		}
		msg.data=NULL;

		thread_queue_get(instance->video_queue,&time,&msg);

		if(msg.msgtype==-1){//正常退出
			LOGE("视频线程正常退出\n");
			break;
		}

		if(msg.data ==NULL){
			LOGE("视频线程超时退出");

			break;
		}
		AVPacket *packet_p = msg.data;
		AVPacket pavpacket = *packet_p;

		packet_count ++;

		if(packet_count == 1){//拿到第一个视频包
			instance->vs->video_start_time = av_gettime();
			LOGE("视频开始时间 %lld\n",instance->vs->video_start_time);
		}

		if(instance->disable_video){
			av_free_packet(packet_p);
			av_free(msg.data);
			continue;
		}
		ANativeWindow_Buffer windowBuffer;

		//延时同步
		int64_t pkt_pts = pavpacket.pts;
		double show_time = pkt_pts * (instance->vs->video_time_base);
		int64_t show_time_micro = show_time * 1000000;
		int64_t played_time = av_gettime() - instance->vs->video_start_time;
		int64_t delta_time = show_time_micro - played_time;
//		LOGE("播放时间 %lld,PTS时间: %lld,差距时间: %lld\n",played_time,show_time_micro,delta_time);
		if(delta_time< -(0.2 * 1000000)){
			LOGE("视频跳帧\n");
		}else if(delta_time>0){
			av_usleep(delta_time);
		}

		int frame_finished=0;
		avcodec_decode_video2(instance->vs->pCodecCtx, instance->vs->pFrame, &frame_finished,&pavpacket);//将pavpacket中的数据解码成,放入pFram中
		if(frame_finished){
			sws_scale//对解码后的数据进行色彩空间转换,yuv420p 转为rgba8888
				(
					instance->vs->sws_ctx,
					(uint8_t const * const *)(instance->vs->pFrame)->data,
					(instance->vs->pFrame)->linesize,
					0,
					instance->vs->pCodecCtx->height,
					instance->vs->RGBAFrame->data,
					instance->vs->RGBAFrame->linesize
				);
			if (!(instance->disable_video) && ANativeWindow_lock(instance->window, &windowBuffer, NULL) < 0) {
				LOGE("cannot lock window");
				continue;
			}else if(!instance->disable_video){
				memcpy(windowBuffer.bits, instance->vs->buffer,  instance->display_width * instance->display_height * 4);//将解码出来的数据复制到surfaceview对应的内存区域
				ANativeWindow_unlockAndPost(instance->window);//释放对surface的锁,并且更新对应surface数据进行显示
			}
		}
		av_free_packet(packet_p);
		av_free(msg.data);
	}
	return NULL;
}
Exemple #30
0
static void * decode_and_render(FFmpegInfo *pInfo, int nframe)
{
    if (!pInfo) return NULL;

    LOGI("lock native window ...");
    ANativeWindow_acquire(pInfo->window);

    /* acquire window's information: width/height/format */
    AVPixelFormat pix_fmt;
    int win_format = ANativeWindow_getFormat(pInfo->window);
    int win_width = ANativeWindow_getWidth(pInfo->window);
    int win_height = ANativeWindow_getHeight(pInfo->window);
    if (win_format == WINDOW_FORMAT_RGBA_8888)
        pix_fmt = AV_PIX_FMT_RGBA;
    else if (win_format == WINDOW_FORMAT_RGB_565)
        pix_fmt = AV_PIX_FMT_RGB565;
    else {
        LOGE("unsupport window format");
        ANativeWindow_release(pInfo->window);
        return NULL;
    }

    LOGI("alloc decoded buffer w-%d, h-%d, f-%d", win_width, win_height, win_format);
    if (!pInfo->win_data || 
        pInfo->win_width != win_width || 
        pInfo->win_height != win_height || 
        pInfo->win_format != win_format ) {

        /* release old data */
        if (pInfo->win_data) {
            av_free(pInfo->win_data[0]);
            pInfo->win_data[0] = {NULL};
        }

        /* alloc decoded buffer */
        int ret = av_image_alloc(pInfo->win_data, pInfo->win_linesize,
                             win_width, win_height, pix_fmt, 1);
        if (ret < 0) {
            LOGE("Could not allocate raw video buffer");            
            ANativeWindow_release(pInfo->window);
            return NULL;
        }
        pInfo->win_bufsize = ret;
        pInfo->win_format = win_format;
        pInfo->win_width = win_width;
        pInfo->win_height = win_height;
    }

    /* read video frame and decode */
    int num_frames = 0;
    LOGI("read video frame and decode");
    pInfo->video_timestamp = -1;
    av_seek_frame(pInfo->fmt_ctx, pInfo->video_stream_idx, pInfo->video_timestamp, AVSEEK_FLAG_BACKWARD);

    pInfo->video_timestamp = 0;
    while (av_read_frame(pInfo->fmt_ctx, &pInfo->pkt) >= 0) {
        if (num_frames >= nframe) {
            break;
        }

        AVPacket orig_pkt = pInfo->pkt;
        /* process video stream */
        if (pInfo->pkt.stream_index == pInfo->video_stream_idx) {
            num_frames ++;
            AVCodecContext* dec_ctx = pInfo->video_dec_ctx;
            AVFrame* frame = pInfo->frame;
            pInfo->video_timestamp = frame->pts;
            LOGI("pkt.flags = %d, frame.pts=%d", pInfo->pkt.flags, frame->pts);

            /* decode video */
            int got_frame = 0;
            int ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pInfo->pkt);
            if (ret < 0) {
                LOGE("Error decoding video frame");
                break;
            }

            /* decode success */
            if (got_frame) {
                /* copy video data from aligned buffer into unaligned */
                av_image_copy(pInfo->video_dst_data, pInfo->video_dst_linesize, 
                              (const uint8_t **)(frame->data), frame->linesize,
                              dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height);

                /* convert to required format */
                int ret = yuv2rgb(dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 
                                  pInfo->video_dst_linesize, pInfo->video_dst_data,
                                  pInfo->win_width, pInfo->win_height, pix_fmt, 
                                  pInfo->win_linesize, pInfo->win_data);

                /* do paint */
                LOGI("do paint on surface, ret=%d, decoded w=%d, h=%d, fmt=from %d to %d", ret, 
                      dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, pix_fmt);
                struct ANativeWindow_Buffer data;
                ANativeWindow_lock(pInfo->window, &data, NULL);
                memcpy(data.bits, pInfo->win_data[0], pInfo->win_bufsize);
                ANativeWindow_unlockAndPost(pInfo->window);
            }

            /* seek to next key frame */
            av_seek_frame(pInfo->fmt_ctx, pInfo->video_stream_idx, pInfo->video_timestamp, AVSEEK_FLAG_BACKWARD);
        }
        av_free_packet(&orig_pkt);
    }
    //av_free_packet(&pkt);

    ANativeWindow_release(pInfo->window);
    LOGI("decode OK, number=%d", num_frames);
    return NULL;
}