static EGLBoolean droid_window_dequeue_buffer(struct dri2_egl_surface *dri2_surf) { #if ANDROID_VERSION >= 0x0402 int fence_fd; if (dri2_surf->window->dequeueBuffer(dri2_surf->window, &dri2_surf->buffer, &fence_fd)) return EGL_FALSE; /* If access to the buffer is controlled by a sync fence, then block on the * fence. * * It may be more performant to postpone blocking until there is an * immediate need to write to the buffer. But doing so would require adding * hooks to the DRI2 loader. * * From the ANativeWindow::dequeueBuffer documentation: * * The libsync fence file descriptor returned in the int pointed to by * the fenceFd argument will refer to the fence that must signal * before the dequeued buffer may be written to. A value of -1 * indicates that the caller may access the buffer immediately without * waiting on a fence. If a valid file descriptor is returned (i.e. * any value except -1) then the caller is responsible for closing the * file descriptor. */ if (fence_fd >= 0) { /* From the SYNC_IOC_WAIT documentation in <linux/sync.h>: * * Waits indefinitely if timeout < 0. */ int timeout = -1; sync_wait(fence_fd, timeout); close(fence_fd); } dri2_surf->buffer->common.incRef(&dri2_surf->buffer->common); #else if (dri2_surf->window->dequeueBuffer(dri2_surf->window, &dri2_surf->buffer)) return EGL_FALSE; dri2_surf->buffer->common.incRef(&dri2_surf->buffer->common); dri2_surf->window->lockBuffer(dri2_surf->window, dri2_surf->buffer); #endif return EGL_TRUE; }
void HwComposerBackend_v10::swap(EGLNativeDisplayType display, EGLSurface surface) { HWC_PLUGIN_ASSERT_ZERO(!(hwc_list->retireFenceFd == -1)); // Wait for vsync before posting new frame pthread_mutex_lock(&vsync_mutex); pthread_cond_wait(&vsync_cond, &vsync_mutex); pthread_mutex_unlock(&vsync_mutex); hwc_list->dpy = EGL_NO_DISPLAY; hwc_list->sur = EGL_NO_SURFACE; HWC_PLUGIN_ASSERT_ZERO(hwc_device->prepare(hwc_device, hwc_numDisplays, hwc_mList)); // (dpy, sur) is the target of SurfaceFlinger's OpenGL ES composition for // HWC_DEVICE_VERSION_1_0. They aren't relevant to prepare. The set call // should commit this surface atomically to the display along with any // overlay layers. hwc_list->dpy = eglGetCurrentDisplay(); hwc_list->sur = eglGetCurrentSurface(EGL_DRAW); dump_display_contents(hwc_list); HWC_PLUGIN_ASSERT_ZERO(hwc_device->set(hwc_device, hwc_numDisplays, hwc_mList)); if (hwc_list->retireFenceFd != -1) { sync_wait(hwc_list->retireFenceFd, -1); close(hwc_list->retireFenceFd); hwc_list->retireFenceFd = -1; } }
/* * Hook called by EGL when modifications to the render buffer are done. * This unlocks and post the buffer. * * The window holds a reference to the buffer between dequeueBuffer and * either queueBuffer or cancelBuffer, so clients only need their own * reference if they might use the buffer after queueing or canceling it. * Holding a reference to a buffer after queueing or canceling it is only * allowed if a specific buffer count has been set. * * The fenceFd argument specifies a libsync fence file descriptor for a * fence that must signal before the buffer can be accessed. If the buffer * can be accessed immediately then a value of -1 should be used. The * caller must not use the file descriptor after it is passed to * queueBuffer, and the ANativeWindow implementation is responsible for * closing it. * * Returns 0 on success or -errno on error. */ int HWComposerNativeWindow::queueBuffer(BaseNativeWindowBuffer* buffer, int fenceFd) { TRACE("%lu %d", pthread_self(), fenceFd); HWComposerNativeWindowBuffer* fbnb = (HWComposerNativeWindowBuffer*) buffer; assert(static_cast<HWComposerNativeWindowBuffer *>(buffer) == fbnb); fbnb->fenceFd = fenceFd; pthread_mutex_lock(&_mutex); /* Front buffer hasn't yet been picked up for posting */ while (m_frontBuf && m_frontBuf->busy >= 2) { pthread_cond_wait(&_cond, &_mutex); } assert(fbnb->busy==1); fbnb->busy = 2; m_frontBuf = fbnb; m_freeBufs++; sync_wait(fenceFd, -1); ::close(fenceFd); pthread_cond_signal(&_cond); TRACE("%lu %p %p",pthread_self(), m_frontBuf, fbnb); pthread_mutex_unlock(&_mutex); return 0; }
status_t Fence::wait(unsigned int timeout) { ATRACE_CALL(); if (mFenceFd == -1) { return NO_ERROR; } int err = sync_wait(mFenceFd, timeout); return err < 0 ? -errno : status_t(NO_ERROR); }
void HWCDisplayPrimary::HandleFrameCapture() { if (output_buffer_.release_fence_fd >= 0) { frame_capture_status_ = sync_wait(output_buffer_.release_fence_fd, 1000); ::close(output_buffer_.release_fence_fd); output_buffer_.release_fence_fd = -1; } frame_capture_buffer_queued_ = false; post_processed_output_ = false; output_buffer_ = {}; }
void zx_frame_end() { unsigned dev; // frame done device_scan_asc(frame) zx_device[dev]->frame(); video_update(); //tape_frame(); sync_wait(); }
static bool brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence, uint64_t timeout) { int32_t timeout_i32; if (fence->signalled) return true; switch (fence->type) { case BRW_FENCE_TYPE_BO_WAIT: if (!fence->batch_bo) { /* There may be no batch if intel_batchbuffer_flush() failed. */ return false; } /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns * immediately for timeouts <= 0. The best we can do is to clamp the * timeout to INT64_MAX. This limits the maximum timeout from 584 years to * 292 years - likely not a big deal. */ if (timeout > INT64_MAX) timeout = INT64_MAX; if (brw_bo_wait(fence->batch_bo, timeout) != 0) return false; fence->signalled = true; brw_bo_unreference(fence->batch_bo); fence->batch_bo = NULL; return true; case BRW_FENCE_TYPE_SYNC_FD: if (fence->sync_fd == -1) return false; if (timeout > INT32_MAX) timeout_i32 = -1; else timeout_i32 = timeout; if (sync_wait(fence->sync_fd, timeout_i32) == -1) return false; fence->signalled = true; return true; } assert(!"bad enum brw_fence_type"); return false; }
void MinuiBackendAdf::Sync(GRSurfaceAdf* surf) { static constexpr unsigned int warningTimeout = 3000; if (surf == nullptr) return; if (surf->fence_fd >= 0) { int err = sync_wait(surf->fence_fd, warningTimeout); if (err < 0) { perror("adf sync fence wait error\n"); } close(surf->fence_fd); surf->fence_fd = -1; } }
void RotMem::Mem::setReleaseFd(const int& fence) { int ret = 0; if(mRelFence[mCurrOffset] >= 0) { //Wait for previous usage of this buffer to be over. //Can happen if rotation takes > vsync and a fast producer. i.e queue //happens in subsequent vsyncs either because content is 60fps or //because the producer is hasty sometimes. ret = sync_wait(mRelFence[mCurrOffset], 1000); if(ret < 0) { ALOGE("%s: sync_wait error!! error no = %d err str = %s", __FUNCTION__, errno, strerror(errno)); } ::close(mRelFence[mCurrOffset]); } mRelFence[mCurrOffset] = fence; }
status_t Fence::waitForever(unsigned int warningTimeout, const char* logname) { ATRACE_CALL(); if (mFenceFd == -1) { return NO_ERROR; } int err = sync_wait(mFenceFd, warningTimeout); if (err < 0 && errno == ETIME) { ALOGE("%s: fence %d didn't signal in %u ms", logname, mFenceFd, warningTimeout); // [MTK] {{{ // temporarily remove to avoid infinite waiting //err = sync_wait(mFenceFd, TIMEOUT_NEVER); return status_t(NO_ERROR); // [MTK] }}} } return err < 0 ? -errno : status_t(NO_ERROR); }
static int mpcs_consumer_thread(void) { int fence, merged, tmp, valid, it, i; int *producer_timelines = test_data_mpsc.producer_timelines; int consumer_timeline = test_data_mpsc.consumer_timeline; int iterations = test_data_mpsc.iterations; int n = test_data_mpsc.threads; for (it = 1; it <= iterations; it++) { fence = sw_sync_fence_create(producer_timelines[0], "name", it); for (i = 1; i < n; i++) { tmp = sw_sync_fence_create(producer_timelines[i], "name", it); merged = sync_merge("name", tmp, fence); sw_sync_fence_destroy(tmp); sw_sync_fence_destroy(fence); fence = merged; } valid = sw_sync_fence_is_valid(fence); ASSERT(valid, "Failure merging fences\n"); /* * Make sure we see an increment from every producer thread. * Vary the means by which we wait. */ if (iterations % 8 != 0) { ASSERT(sync_wait(fence, -1) > 0, "Producers did not increment as expected\n"); } else { ASSERT(busy_wait_on_fence(fence) == 0, "Producers did not increment as expected\n"); } ASSERT(test_data_mpsc.counter == n * it, "Counter value mismatch!\n"); /* Release the producer threads */ ASSERT(sw_sync_timeline_inc(consumer_timeline, 1) == 0, "Failure releasing producer threads\n"); sw_sync_fence_destroy(fence); } return 0; }
int BaseNativeWindow::_dequeueBuffer_DEPRECATED(ANativeWindow* window, ANativeWindowBuffer** buffer) { BaseNativeWindowBuffer* temp = static_cast<BaseNativeWindowBuffer*>(*buffer); int fenceFd = -1; int ret = static_cast<BaseNativeWindow*>(window)->dequeueBuffer(&temp, &fenceFd); *buffer = static_cast<ANativeWindowBuffer*>(temp); #if ANDROID_VERSION_MAJOR>=4 && ANDROID_VERSION_MINOR>=2 if (fenceFd >= 0) { sync_wait(fenceFd, -1); close(fenceFd); } #endif return ret; }
static int mpsc_producer_thread(void *d) { int id = (long)d; int fence, valid, i; int *producer_timelines = test_data_mpsc.producer_timelines; int consumer_timeline = test_data_mpsc.consumer_timeline; int iterations = test_data_mpsc.iterations; for (i = 0; i < iterations; i++) { fence = sw_sync_fence_create(consumer_timeline, "fence", i); valid = sw_sync_fence_is_valid(fence); ASSERT(valid, "Failure creating fence\n"); /* * Wait for the consumer to finish. Use alternate * means of waiting on the fence */ if ((iterations + id) % 8 != 0) { ASSERT(sync_wait(fence, -1) > 0, "Failure waiting on fence\n"); } else { ASSERT(busy_wait_on_fence(fence) == 0, "Failure waiting on fence\n"); } /* * Every producer increments the counter, the consumer * checks and erases it */ pthread_mutex_lock(&test_data_mpsc.lock); test_data_mpsc.counter++; pthread_mutex_unlock(&test_data_mpsc.lock); ASSERT(sw_sync_timeline_inc(producer_timelines[id], 1) == 0, "Error advancing producer timeline\n"); sw_sync_fence_destroy(fence); } return 0; }
void present(void *user_data, struct ANativeWindow *window, struct ANativeWindowBuffer *buffer) { int oldretire = mList[0]->retireFenceFd; mList[0]->retireFenceFd = -1; fblayer->handle = buffer->handle; fblayer->acquireFenceFd = HWCNativeBufferGetFence(buffer); fblayer->releaseFenceFd = -1; int err = hwcDevicePtr->prepare(hwcDevicePtr, HWC_NUM_DISPLAY_TYPES, mList); assert(err == 0); err = hwcDevicePtr->set(hwcDevicePtr, HWC_NUM_DISPLAY_TYPES, mList); assert(err == 0); HWCNativeBufferSetFence(buffer, fblayer->releaseFenceFd); if (oldretire != -1) { sync_wait(oldretire, -1); close(oldretire); } }
void HWComposer::present(HWComposerNativeWindowBuffer *buffer) { int oldretire = mlist[0]->retireFenceFd; mlist[0]->retireFenceFd = -1; fblayer->handle = buffer->handle; fblayer->acquireFenceFd = getFenceBufferFd(buffer); fblayer->releaseFenceFd = -1; int err = hwcdevice->prepare(hwcdevice, num_displays, mlist); HWC_PLUGIN_EXPECT_ZERO(err); err = hwcdevice->set(hwcdevice, num_displays, mlist); HWC_PLUGIN_EXPECT_ZERO(err); setFenceBufferFd(buffer, fblayer->releaseFenceFd); if (oldretire != -1) { sync_wait(oldretire, -1); close(oldretire); } }
int WaylandNativeWindow::queueBuffer(BaseNativeWindowBuffer* buffer, int fenceFd) { WaylandNativeWindowBuffer *wnb = (WaylandNativeWindowBuffer*) buffer; int ret = 0; HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer", "-%p", wnb); lock(); if (debugenvchecked == 0) { if (getenv("HYBRIS_WAYLAND_DUMP_BUFFERS") != NULL) debugenvchecked = 2; else debugenvchecked = 1; } if (debugenvchecked == 2) { HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_dumping_buffer", "-%p", wnb); hybris_dump_buffer_to_file(wnb->getNativeBuffer()); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_dumping_buffer", "-%p", wnb); } #if ANDROID_VERSION_MAJOR>=4 && ANDROID_VERSION_MINOR>=2 HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_waiting_for_fence", "-%p", wnb); if (fenceFd >= 0) { sync_wait(fenceFd, -1); close(fenceFd); } HYBRIS_TRACE_END("wayland-platform", "queueBuffer_waiting_for_fence", "-%p", wnb); #endif HYBRIS_TRACE_COUNTER("wayland-platform", "fronted.size", "%i", fronted.size()); HYBRIS_TRACE_END("wayland-platform", "queueBuffer", "-%p", wnb); unlock(); return NO_ERROR; }
bool PlatfBufferManager::blit(buffer_handle_t srcHandle, buffer_handle_t destHandle, const crop_t& destRect, bool filter, bool async) { int fenceFd; if (gralloc_blit_handle_to_handle_img(mGralloc, srcHandle, destHandle, destRect.w, destRect.h, destRect.x, destRect.y, 0, -1, &fenceFd)) { ETRACE("Blit failed"); return false; } if (!async) { sync_wait(fenceFd, -1); } close(fenceFd); return true; }
int Mapper::unlock(buffer_handle_t bufferHandle) const { auto buffer = const_cast<native_handle_t*>(bufferHandle); int releaseFence = -1; Error error; auto ret = mMapper->unlock(buffer, [&](const auto& tmpError, const auto& tmpReleaseFence) { error = tmpError; if (error != Error::NONE) { return; } auto fenceHandle = tmpReleaseFence.getNativeHandle(); if (fenceHandle && fenceHandle->numFds == 1) { int fd = dup(fenceHandle->data[0]); if (fd >= 0) { releaseFence = fd; } else { ALOGD("failed to dup unlock release fence"); sync_wait(fenceHandle->data[0], -1); } } }); if (!ret.isOk()) { error = kTransactionError; } if (error != Error::NONE) { ALOGE("unlock(%p) failed with %d", buffer, error); } return releaseFence; }
int main(int argc, char **argv) { EGLDisplay display; EGLConfig ecfg; EGLint num_config; EGLint attr[] = { // some attributes to set up our egl-interface EGL_BUFFER_SIZE, 32, EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, EGL_NONE }; EGLSurface surface; EGLint ctxattr[] = { EGL_CONTEXT_CLIENT_VERSION, 2, EGL_NONE }; EGLContext context; EGLBoolean rv; int err; hw_module_t *hwcModule = 0; hwc_composer_device_1_t *hwcDevicePtr = 0; err = hw_get_module(HWC_HARDWARE_MODULE_ID, (const hw_module_t **) &hwcModule); assert(err == 0); err = hwc_open_1(hwcModule, &hwcDevicePtr); assert(err == 0); hwcDevicePtr->blank(hwcDevicePtr, 0, 0); uint32_t configs[5]; size_t numConfigs = 5; err = hwcDevicePtr->getDisplayConfigs(hwcDevicePtr, 0, configs, &numConfigs); assert (err == 0); int32_t attr_values[2]; uint32_t attributes[] = { HWC_DISPLAY_WIDTH, HWC_DISPLAY_HEIGHT, HWC_DISPLAY_NO_ATTRIBUTE }; hwcDevicePtr->getDisplayAttributes(hwcDevicePtr, 0, configs[0], attributes, attr_values); printf("width: %i height: %i\n", attr_values[0], attr_values[1]); HWComposerNativeWindow *win = new HWComposerNativeWindow(attr_values[0], attr_values[1], HAL_PIXEL_FORMAT_RGBA_8888); display = eglGetDisplay(NULL); assert(eglGetError() == EGL_SUCCESS); assert(display != EGL_NO_DISPLAY); rv = eglInitialize(display, 0, 0); assert(eglGetError() == EGL_SUCCESS); assert(rv == EGL_TRUE); eglChooseConfig((EGLDisplay) display, attr, &ecfg, 1, &num_config); assert(eglGetError() == EGL_SUCCESS); assert(rv == EGL_TRUE); surface = eglCreateWindowSurface((EGLDisplay) display, ecfg, (EGLNativeWindowType) static_cast<ANativeWindow *> (win), NULL); assert(eglGetError() == EGL_SUCCESS); assert(surface != EGL_NO_SURFACE); context = eglCreateContext((EGLDisplay) display, ecfg, EGL_NO_CONTEXT, ctxattr); assert(eglGetError() == EGL_SUCCESS); assert(context != EGL_NO_CONTEXT); assert(eglMakeCurrent((EGLDisplay) display, surface, surface, context) == EGL_TRUE); const char *version = (const char *)glGetString(GL_VERSION); assert(version); printf("%s\n",version); size_t size = sizeof(hwc_display_contents_1_t) + 2 * sizeof(hwc_layer_1_t); hwc_display_contents_1_t *list = (hwc_display_contents_1_t *) malloc(size); hwc_display_contents_1_t **mList = (hwc_display_contents_1_t **) malloc(HWC_NUM_DISPLAY_TYPES * sizeof(hwc_display_contents_1_t *)); const hwc_rect_t r = { 0, 0, attr_values[0], attr_values[1] }; int counter = 0; for (; counter < HWC_NUM_DISPLAY_TYPES; counter++) mList[counter] = list; hwc_layer_1_t *layer = &list->hwLayers[0]; memset(layer, 0, sizeof(hwc_layer_1_t)); layer->compositionType = HWC_FRAMEBUFFER; layer->hints = 0; layer->flags = 0; layer->handle = 0; layer->transform = 0; layer->blending = HWC_BLENDING_NONE; layer->sourceCrop = r; layer->displayFrame = r; layer->visibleRegionScreen.numRects = 1; layer->visibleRegionScreen.rects = &layer->displayFrame; layer->acquireFenceFd = -1; layer->releaseFenceFd = -1; layer = &list->hwLayers[1]; memset(layer, 0, sizeof(hwc_layer_1_t)); layer->compositionType = HWC_FRAMEBUFFER_TARGET; layer->hints = 0; layer->flags = 0; layer->handle = 0; layer->transform = 0; layer->blending = HWC_BLENDING_NONE; layer->sourceCrop = r; layer->displayFrame = r; layer->visibleRegionScreen.numRects = 1; layer->visibleRegionScreen.rects = &layer->displayFrame; layer->acquireFenceFd = -1; layer->releaseFenceFd = -1; list->retireFenceFd = -1; list->flags = HWC_GEOMETRY_CHANGED; list->numHwLayers = 2; GLuint vertexShader = load_shader ( vertex_src , GL_VERTEX_SHADER ); // load vertex shader GLuint fragmentShader = load_shader ( fragment_src , GL_FRAGMENT_SHADER ); // load fragment shader GLuint shaderProgram = glCreateProgram (); // create program object glAttachShader ( shaderProgram, vertexShader ); // and attach both... glAttachShader ( shaderProgram, fragmentShader ); // ... shaders to it glLinkProgram ( shaderProgram ); // link the program glUseProgram ( shaderProgram ); // and select it for usage //// now get the locations (kind of handle) of the shaders variables position_loc = glGetAttribLocation ( shaderProgram , "position" ); phase_loc = glGetUniformLocation ( shaderProgram , "phase" ); offset_loc = glGetUniformLocation ( shaderProgram , "offset" ); if ( position_loc < 0 || phase_loc < 0 || offset_loc < 0 ) { return 1; } //glViewport ( 0 , 0 , 800, 600); // commented out so it uses the initial window dimensions glClearColor ( 1. , 1. , 1. , 1.); // background color float phase = 0; int i, oldretire = -1, oldrelease = -1, oldrelease2 = -1; for (i=0; i<1020*60; ++i) { glClear(GL_COLOR_BUFFER_BIT); glUniform1f ( phase_loc , phase ); // write the value of phase to the shaders phase phase = fmodf ( phase + 0.5f , 2.f * 3.141f ); // and update the local variable glUniform4f ( offset_loc , offset_x , offset_y , 0.0 , 0.0 ); glVertexAttribPointer ( position_loc, 3, GL_FLOAT, GL_FALSE, 0, vertexArray ); glEnableVertexAttribArray ( position_loc ); glDrawArrays ( GL_TRIANGLE_STRIP, 0, 5 ); eglSwapBuffers ( (EGLDisplay) display, surface ); // get the rendered buffer to the screen HWComposerNativeWindowBuffer *front; win->lockFrontBuffer(&front); mList[0]->hwLayers[1].handle = front->handle; mList[0]->hwLayers[0].handle = NULL; mList[0]->hwLayers[0].flags = HWC_SKIP_LAYER; oldretire = mList[0]->retireFenceFd; oldrelease = mList[0]->hwLayers[1].releaseFenceFd; oldrelease2 = mList[0]->hwLayers[0].releaseFenceFd; int err = hwcDevicePtr->prepare(hwcDevicePtr, HWC_NUM_DISPLAY_TYPES, mList); assert(err == 0); err = hwcDevicePtr->set(hwcDevicePtr, HWC_NUM_DISPLAY_TYPES, mList); assert(err == 0); assert(mList[0]->hwLayers[0].releaseFenceFd == -1); win->unlockFrontBuffer(front); if (oldrelease != -1) { sync_wait(oldrelease, -1); close(oldrelease); } if (oldrelease2 != -1) { sync_wait(oldrelease2, -1); close(oldrelease2); } if (oldretire != -1) { sync_wait(oldretire, -1); close(oldretire); } } printf("stop\n"); #if 0 (*egldestroycontext)((EGLDisplay) display, context); printf("destroyed context\n"); (*egldestroysurface)((EGLDisplay) display, surface); printf("destroyed surface\n"); (*eglterminate)((EGLDisplay) display); printf("terminated\n"); android_dlclose(baz); #endif }
int WaylandNativeWindow::queueBuffer(BaseNativeWindowBuffer* buffer, int fenceFd) { WaylandNativeWindowBuffer *wnb = (WaylandNativeWindowBuffer*) buffer; int ret = 0; HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer", "-%p", wnb); lock(); wnb->busy = 1; unlock(); /* XXX locking/something is a bit fishy here */ HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_wait_for_frame_callback", "-%p", wnb); while (this->frame_callback && ret != -1) { ret = wl_display_dispatch_queue(m_display, this->wl_queue); } if (ret < 0) { TRACE("wl_display_dispatch_queue returned an error"); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_wait_for_frame_callback", "-%p", wnb); check_fatal_error(m_display); return ret; } HYBRIS_TRACE_END("wayland-platform", "queueBuffer_wait_for_frame_callback", "-%p", wnb); lock(); if (debugenvchecked == 0) { if (getenv("HYBRIS_WAYLAND_DUMP_BUFFERS") != NULL) debugenvchecked = 2; else debugenvchecked = 1; } if (debugenvchecked == 2) { HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_dumping_buffer", "-%p", wnb); hybris_dump_buffer_to_file(wnb->getNativeBuffer()); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_dumping_buffer", "-%p", wnb); } #if ANDROID_VERSION_MAJOR>=4 && ANDROID_VERSION_MINOR>=2 HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_waiting_for_fence", "-%p", wnb); sync_wait(fenceFd, -1); close(fenceFd); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_waiting_for_fence", "-%p", wnb); #endif this->frame_callback = wl_surface_frame(m_window->surface); wl_callback_add_listener(this->frame_callback, &frame_listener, this); wl_proxy_set_queue((struct wl_proxy *) this->frame_callback, this->wl_queue); if (wnb->wlbuffer == NULL) { wnb->wlbuffer_from_native_handle(m_android_wlegl); TRACE("%p add listener with %p inside", wnb, wnb->wlbuffer); wl_buffer_add_listener(wnb->wlbuffer, &wl_buffer_listener, this); wl_proxy_set_queue((struct wl_proxy *) wnb->wlbuffer, this->wl_queue); } TRACE("%p DAMAGE AREA: %dx%d", wnb, wnb->width, wnb->height); HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_attachdamagecommit", "-resource@%i", wl_proxy_get_id((struct wl_proxy *) wnb->wlbuffer)); wl_surface_attach(m_window->surface, wnb->wlbuffer, 0, 0); wl_surface_damage(m_window->surface, 0, 0, wnb->width, wnb->height); wl_surface_commit(m_window->surface); wl_display_flush(m_display); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_attachdamagecommit", "-resource@%i", wl_proxy_get_id((struct wl_proxy *) wnb->wlbuffer)); //--m_freeBufs; //pthread_cond_signal(&cond); fronted.push_back(wnb); HYBRIS_TRACE_COUNTER("wayland-platform", "fronted.size", "%i", fronted.size()); if (fronted.size() == m_bufList.size()) { HYBRIS_TRACE_BEGIN("wayland-platform", "queueBuffer_wait_for_nonfronted_buffer", "-%p", wnb); /* We have fronted all our buffers, let's wait for one of them to be free */ do { unlock(); ret = wl_display_dispatch_queue(m_display, this->wl_queue); lock(); if (ret == -1) { check_fatal_error(m_display); break; } HYBRIS_TRACE_COUNTER("wayland-platform", "fronted.size", "%i", fronted.size()); if (fronted.size() != m_bufList.size()) break; } while (1); HYBRIS_TRACE_END("wayland-platform", "queueBuffer_wait_for_nonfronted_buffer", "-%p", wnb); } HYBRIS_TRACE_END("wayland-platform", "queueBuffer", "-%p", wnb); unlock(); return NO_ERROR; }
static int sync_barrier(int fd, int seq) { if (sync_wake(fd, seq)) return -1; return sync_wait(fd, seq + 1); }
int sync_wait_parent(int fd[2], int seq) { return sync_wait(fd[0], seq); }
int sync_wait_child(int fd[2], int seq) { return sync_wait(fd[1], seq); }
bool CopyBit::draw(hwc_context_t *ctx, hwc_display_contents_1_t *list, int dpy, int32_t *fd) { // draw layers marked for COPYBIT int retVal = true; int copybitLayerCount = 0; LayerProp *layerProp = ctx->layerProp[dpy]; if(mCopyBitDraw == false) // there is no layer marked for copybit return false ; //render buffer private_handle_t *renderBuffer = getCurrentRenderBuffer(); if (!renderBuffer) { ALOGE("%s: Render buffer layer handle is NULL", __FUNCTION__); return false; } //Wait for the previous frame to complete before rendering onto it if(mRelFd[mCurRenderBufferIndex] >= 0) { sync_wait(mRelFd[mCurRenderBufferIndex], 1000); close(mRelFd[mCurRenderBufferIndex]); mRelFd[mCurRenderBufferIndex] = -1; } //Clear the visible region on the render buffer //XXX: Do this only when needed. hwc_rect_t clearRegion; getNonWormholeRegion(list, clearRegion); clear(renderBuffer, clearRegion); int renderTransform = list->hwLayers[list->numHwLayers - 1].transform; for (int i = 0; i < ctx->listStats[dpy].numAppLayers; i++) { hwc_layer_1_t *layer = &list->hwLayers[i]; if(!(layerProp[i].mFlags & HWC_COPYBIT)) { ALOGD_IF(DEBUG_COPYBIT, "%s: Not Marked for copybit", __FUNCTION__); continue; } int ret = -1; if (list->hwLayers[i].acquireFenceFd != -1 ) { // Wait for acquire Fence on the App buffers. ret = sync_wait(list->hwLayers[i].acquireFenceFd, 1000); if(ret < 0) { ALOGE("%s: sync_wait error!! error no = %d err str = %s", __FUNCTION__, errno, strerror(errno)); } close(list->hwLayers[i].acquireFenceFd); list->hwLayers[i].acquireFenceFd = -1; } retVal = drawLayerUsingCopybit(ctx, &(list->hwLayers[i]), renderBuffer, renderTransform, dpy); copybitLayerCount++; if(retVal < 0) { ALOGE("%s : drawLayerUsingCopybit failed", __FUNCTION__); } } if (copybitLayerCount) { copybit_device_t *copybit = getCopyBitDevice(); // Async mode if (copybit->flush_get_fence(copybit, fd) < 0) *fd = -1; } return true; }
int ExynosVirtualDisplay::set(hwc_display_contents_1_t* contents) { hwc_layer_1_t *overlay_layer = NULL; hwc_layer_1_t *target_layer = NULL; hwc_layer_1_t *fb_layer = NULL; int number_of_fb = 0; for (size_t i = 0; i < contents->numHwLayers; i++) { hwc_layer_1_t &layer = contents->hwLayers[i]; if (layer.flags & HWC_SKIP_LAYER) { ALOGV("skipping layer %d", i); continue; } if (layer.compositionType == HWC_FRAMEBUFFER) { if (!layer.handle) continue; ALOGV("framebuffer layer %d", i); fb_layer = &layer; number_of_fb++; } if (layer.compositionType == HWC_OVERLAY) { if (!layer.handle) continue; if (layer.flags & HWC_SKIP_RENDERING) { layer.releaseFenceFd = layer.acquireFenceFd; continue; } ALOGV("overlay layer %d", i); overlay_layer = &layer; continue; } if (layer.compositionType == HWC_FRAMEBUFFER_TARGET) { if (!layer.handle) continue; ALOGV("FB target layer %d", i); target_layer = &layer; continue; } } if (target_layer) { int ret = 0; ExynosMPPModule &gsc = *mMPPs[0]; gsc.mDstBuffers[gsc.mCurrentBuf] = contents->outbuf; gsc.mDstBufFence[gsc.mCurrentBuf] = contents->outbufAcquireFenceFd; private_handle_t *dstHandle = private_handle_t::dynamicCast(contents->outbuf); if (mPrevCompositionType == COMPOSITION_GLES || (mPrevCompositionType != mCompositionType)) { ALOGV("COMPOSITION_GLES"); if (target_layer->acquireFenceFd >= 0) contents->retireFenceFd = target_layer->acquireFenceFd; if (contents->outbufAcquireFenceFd >= 0) { close(contents->outbufAcquireFenceFd); contents->outbufAcquireFenceFd = -1; } } else if (overlay_layer && mPrevCompositionType == COMPOSITION_MIXED) { void *newFbHandle = NULL; if (fb_layer) newFbHandle = (void *)fb_layer->handle; if (isLayerResized(overlay_layer) || (!isLayerFullSize(overlay_layer) && fb_layer && (mPrevFbHandle != newFbHandle))) { memset(mDstHandles, 0x0, sizeof(int) * MAX_BUFFER_COUNT); } if (isNewHandle(dstHandle)) { if (mIsSecureDRM) { private_handle_t *secureHandle = private_handle_t::dynamicCast(mPhysicallyLinearBuffer); ret = mG2D->runSecureCompositor(*target_layer, dstHandle, secureHandle, 0xff, 0xff000000, BLIT_OP_SOLID_FILL, true); } else { ret = mG2D->runCompositor(*target_layer, dstHandle, 0, 0xff, 0xff000000, BLIT_OP_SOLID_FILL, true, 0, 0, 0); } } if (number_of_fb > 0) { ALOGV("COMPOSITION_MIXED"); ret = gsc.processM2M(*overlay_layer, dstHandle->format, NULL, false); if (ret < 0) ALOGE("failed to configure gscaler for video layer"); if (gsc.mDstConfig.releaseFenceFd >= 0) { if (sync_wait(gsc.mDstConfig.releaseFenceFd, 1000) < 0) ALOGE("sync_wait error"); close(gsc.mDstConfig.releaseFenceFd); gsc.mDstConfig.releaseFenceFd = -1; } if (target_layer->acquireFenceFd > 0) { close(target_layer->acquireFenceFd); target_layer->acquireFenceFd = -1; } if (mIsSecureDRM) { ALOGV("Secure DRM playback"); private_handle_t *targetBufferHandle = private_handle_t::dynamicCast(target_layer->handle); unsigned long srcAddr = getMappedAddrFBTarget(targetBufferHandle->fd); private_handle_t *secureHandle = private_handle_t::dynamicCast(mPhysicallyLinearBuffer); if (mPrevFbHandle != newFbHandle) { ALOGV("Buffer of fb layer is changed, number_of_fb %d, newFbHandle 0x%x, target_layer->handle 0x%x", number_of_fb, newFbHandle, target_layer->handle); mPrevFbHandle = newFbHandle; if (srcAddr && mPhysicallyLinearBufferAddr) { memcpy((void *)mPhysicallyLinearBufferAddr, (void *)srcAddr, mWidth * mHeight * 4); mPrevFbHandle = newFbHandle; } else { ALOGE("can't memcpy for secure G2D input buffer"); } } ret = mG2D->runSecureCompositor(*target_layer, dstHandle, secureHandle, 0xff, 0, BLIT_OP_SRC_OVER, false); if (ret < 0) { mG2D->TerminateSecureG2D(); unmapAddrFBTarget(); ALOGE("runSecureCompositor is failed"); } } else { /* Normal video layer + Blending */ ALOGV("Normal DRM playback"); ret = mG2D->runCompositor(*target_layer, dstHandle, 0, 0xff, 0, BLIT_OP_SRC_OVER, false, 0, 0, 0); if (ret < 0) { ALOGE("runCompositor is failed"); } if (target_layer->releaseFenceFd > 0) { close(target_layer->releaseFenceFd); target_layer->releaseFenceFd = -1; } } } else { ALOGV("COMPOSITION_HWC"); ret = gsc.processM2M(*overlay_layer, dstHandle->format, NULL, false); if (ret < 0) ALOGE("failed to configure gscaler for video layer"); contents->retireFenceFd = gsc.mDstConfig.releaseFenceFd; } } } mPrevCompositionType = mCompositionType; return 0; }
static void test_sync_file_fences(struct pipe_context *ctx) { struct pipe_screen *screen = ctx->screen; bool pass = true; enum pipe_fd_type fd_type = PIPE_FD_TYPE_NATIVE_SYNC; if (!screen->get_param(screen, PIPE_CAP_NATIVE_FENCE_FD)) return; struct cso_context *cso = cso_create_context(ctx, 0); struct pipe_resource *buf = pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 1024 * 1024); struct pipe_resource *tex = util_create_texture2d(screen, 4096, 1024, PIPE_FORMAT_R8_UNORM, 0); struct pipe_fence_handle *buf_fence = NULL, *tex_fence = NULL; /* Run 2 clears, get fencess. */ uint32_t value = 0; ctx->clear_buffer(ctx, buf, 0, buf->width0, &value, sizeof(value)); ctx->flush(ctx, &buf_fence, PIPE_FLUSH_FENCE_FD); struct pipe_box box; u_box_2d(0, 0, tex->width0, tex->height0, &box); ctx->clear_texture(ctx, tex, 0, &box, &value); ctx->flush(ctx, &tex_fence, PIPE_FLUSH_FENCE_FD); pass = pass && buf_fence && tex_fence; /* Export fences. */ int buf_fd = screen->fence_get_fd(screen, buf_fence); int tex_fd = screen->fence_get_fd(screen, tex_fence); pass = pass && buf_fd >= 0 && tex_fd >= 0; /* Merge fences. */ int merged_fd = sync_merge("test", buf_fd, tex_fd); pass = pass && merged_fd >= 0; /* (Re)import all fences. */ struct pipe_fence_handle *re_buf_fence = NULL, *re_tex_fence = NULL; struct pipe_fence_handle *merged_fence = NULL; ctx->create_fence_fd(ctx, &re_buf_fence, buf_fd, fd_type); ctx->create_fence_fd(ctx, &re_tex_fence, tex_fd, fd_type); ctx->create_fence_fd(ctx, &merged_fence, merged_fd, fd_type); pass = pass && re_buf_fence && re_tex_fence && merged_fence; /* Run another clear after waiting for everything. */ struct pipe_fence_handle *final_fence = NULL; ctx->fence_server_sync(ctx, merged_fence); value = 0xff; ctx->clear_buffer(ctx, buf, 0, buf->width0, &value, sizeof(value)); ctx->flush(ctx, &final_fence, PIPE_FLUSH_FENCE_FD); pass = pass && final_fence; /* Wait for the last fence. */ int final_fd = screen->fence_get_fd(screen, final_fence); pass = pass && final_fd >= 0; pass = pass && sync_wait(final_fd, -1) == 0; /* Check that all fences are signalled. */ pass = pass && sync_wait(buf_fd, 0) == 0; pass = pass && sync_wait(tex_fd, 0) == 0; pass = pass && sync_wait(merged_fd, 0) == 0; pass = pass && screen->fence_finish(screen, NULL, buf_fence, 0); pass = pass && screen->fence_finish(screen, NULL, tex_fence, 0); pass = pass && screen->fence_finish(screen, NULL, re_buf_fence, 0); pass = pass && screen->fence_finish(screen, NULL, re_tex_fence, 0); pass = pass && screen->fence_finish(screen, NULL, merged_fence, 0); pass = pass && screen->fence_finish(screen, NULL, final_fence, 0); /* Cleanup. */ #ifndef PIPE_OS_WINDOWS if (buf_fd >= 0) close(buf_fd); if (tex_fd >= 0) close(tex_fd); if (merged_fd >= 0) close(merged_fd); if (final_fd >= 0) close(final_fd); #endif screen->fence_reference(screen, &buf_fence, NULL); screen->fence_reference(screen, &tex_fence, NULL); screen->fence_reference(screen, &re_buf_fence, NULL); screen->fence_reference(screen, &re_tex_fence, NULL); screen->fence_reference(screen, &merged_fence, NULL); screen->fence_reference(screen, &final_fence, NULL); cso_destroy_context(cso); pipe_resource_reference(&buf, NULL); pipe_resource_reference(&tex, NULL); util_report_result(pass); }