static DFBResult region_buffer_lock( CoreLayerRegion *region, CoreSurface *surface, CoreSurfaceBufferRole role ) { DFBResult ret; CoreSurfaceBuffer *buffer; CoreSurfaceAllocation *allocation; CoreLayerContext *context; D_ASSERT( region != NULL ); D_MAGIC_ASSERT( surface, CoreSurface ); context = region->context; D_MAGIC_ASSERT( context, CoreLayerContext ); /* First unlock any previously locked buffer. */ if (region->surface_lock.buffer) { D_MAGIC_ASSERT( region->surface_lock.buffer, CoreSurfaceBuffer ); dfb_surface_unlock_buffer( region->surface_lock.buffer->surface, ®ion->surface_lock ); } if (dfb_surface_lock( surface )) return DFB_FUSION; buffer = dfb_surface_get_buffer( surface, role ); D_MAGIC_ASSERT( buffer, CoreSurfaceBuffer ); /* Lock the surface buffer. */ ret = dfb_surface_buffer_lock( buffer, CSAID_LAYER0 + context->layer_id, CSAF_READ, ®ion->surface_lock ); if (ret) { D_DERROR( ret, "Core/LayerRegion: Could not lock region surface for SetRegion()!\n" ); dfb_surface_unlock( surface ); return ret; } allocation = region->surface_lock.allocation; D_ASSERT( allocation != NULL ); /* If hardware has written or is writing... */ if (allocation->accessed[CSAID_GPU] & CSAF_WRITE) { D_DEBUG_AT( Core_Layers, " -> Waiting for pending writes...\n" ); /* ...wait for the operation to finish. */ dfb_gfxcard_sync(); /* TODO: wait for serial instead */ allocation->accessed[CSAID_GPU] &= ~CSAF_WRITE; } /* surface is unlocked by caller */ return DFB_OK; }
DFBResult ICore_Real::WaitIdle( ) { D_DEBUG_AT( DirectFB_CoreDFB, "ICore_Real::%s()\n", __FUNCTION__ ); D_MAGIC_ASSERT( obj, CoreDFB ); return dfb_gfxcard_sync(); }
static DFBResult region_buffer_lock( CoreLayerRegion *region, CoreSurface *surface, CoreSurfaceBufferRole role ) { DFBResult ret; CoreSurfaceAccessFlags flags; CoreSurfaceBuffer *buffer; CoreSurfaceAllocation *allocation; /* First unlock any previously locked buffer. */ if (region->surface_lock.buffer) { D_MAGIC_ASSERT( region->surface_lock.buffer, CoreSurfaceBuffer ); dfb_surface_unlock_buffer( region->surface_lock.buffer->surface, ®ion->surface_lock ); } /* Determine flags to use. */ if (surface->config.caps & DSCAPS_SYSTEMONLY) flags = CSAF_CPU_READ; else flags = CSAF_GPU_READ; if (dfb_surface_lock( surface )) return DFB_FUSION; buffer = dfb_surface_get_buffer( surface, role ); D_MAGIC_ASSERT( buffer, CoreSurfaceBuffer ); /* Lock the surface buffer. */ ret = dfb_surface_buffer_lock( buffer, flags, ®ion->surface_lock ); if (ret) { D_DERROR( ret, "Core/LayerRegion: Could not lock region surface for SetRegion()!\n" ); dfb_surface_unlock( surface ); return ret; } allocation = region->surface_lock.allocation; D_ASSERT( allocation != NULL ); /* If hardware has written or is writing... */ if (allocation->accessed & CSAF_GPU_WRITE) { D_DEBUG_AT( Core_Layers, " -> Waiting for pending writes...\n" ); /* ...wait for the operation to finish. */ dfb_gfxcard_sync(); /* TODO: wait for serial instead */ allocation->accessed &= ~CSAF_GPU_WRITE; } return DFB_OK; }
DFBResult dfb_surfacemanager_assure_system( SurfaceManager *manager, SurfaceBuffer *buffer ) { CoreSurface *surface = buffer->surface; D_MAGIC_ASSERT( manager, SurfaceManager ); if (buffer->policy == CSP_VIDEOONLY) { D_BUG( "surface_manager_assure_system() called on video only surface" ); return DFB_BUG; } if (buffer->system.health == CSH_STORED) return DFB_OK; else if (buffer->video.health == CSH_STORED) { int i; char *src = dfb_system_video_memory_virtual( buffer->video.offset ); char *dst = buffer->system.addr; /* from video_access_by_software() in surface.c */ if (buffer->video.access & VAF_HARDWARE_WRITE) { dfb_gfxcard_sync(); buffer->video.access &= ~VAF_HARDWARE_WRITE; } buffer->video.access |= VAF_SOFTWARE_READ; for (i=0; i<surface->height; i++) { direct_memcpy( dst, src, DFB_BYTES_PER_LINE(buffer->format, surface->width) ); src += buffer->video.pitch; dst += buffer->system.pitch; } if (buffer->format == DSPF_YV12 || buffer->format == DSPF_I420) { for (i=0; i<surface->height; i++) { direct_memcpy( dst, src, DFB_BYTES_PER_LINE(buffer->format, surface->width / 2) ); src += buffer->video.pitch / 2; dst += buffer->system.pitch / 2; } } else if (buffer->format == DSPF_NV12 || buffer->format == DSPF_NV21) { for (i=0; i<surface->height/2; i++) { direct_memcpy( dst, src, DFB_BYTES_PER_LINE(buffer->format, surface->width) ); src += buffer->video.pitch; dst += buffer->system.pitch; } } else if (buffer->format == DSPF_NV16) { for (i=0; i<surface->height; i++) { direct_memcpy( dst, src, DFB_BYTES_PER_LINE(buffer->format, surface->width) ); src += buffer->video.pitch; dst += buffer->system.pitch; } } buffer->system.health = CSH_STORED; dfb_surface_notify_listeners( surface, CSNF_SYSTEM ); return DFB_OK; } D_BUG( "no valid surface instance" ); return DFB_BUG; }
DFBResult dfb_surfacemanager_allocate( SurfaceManager *manager, SurfaceBuffer *buffer ) { int pitch; int length; Chunk *c; Chunk *best_free = NULL; Chunk *best_occupied = NULL; CoreSurface *surface = buffer->surface; D_MAGIC_ASSERT( manager, SurfaceManager ); if (!manager->length || manager->suspended) return DFB_NOVIDEOMEMORY; /* calculate the required length depending on limitations */ pitch = MAX( surface->width, surface->min_width ); if (pitch < manager->max_power_of_two_pixelpitch && surface->height < manager->max_power_of_two_height) pitch = 1 << direct_log2( pitch ); if (manager->pixelpitch_align > 1) { pitch += manager->pixelpitch_align - 1; pitch -= pitch % manager->pixelpitch_align; } pitch = DFB_BYTES_PER_LINE( buffer->format, pitch ); if (pitch < manager->max_power_of_two_bytepitch && surface->height < manager->max_power_of_two_height) pitch = 1 << direct_log2( pitch ); if (manager->bytepitch_align > 1) { pitch += manager->bytepitch_align - 1; pitch -= pitch % manager->bytepitch_align; } length = DFB_PLANE_MULTIPLY( buffer->format, MAX( surface->height, surface->min_height ) * pitch ); if (manager->byteoffset_align > 1) { length += manager->byteoffset_align - 1; length -= length % manager->byteoffset_align; } /* Do a pre check before iterating through all chunks. */ if (length > manager->available - manager->heap_offset) return DFB_NOVIDEOMEMORY; buffer->video.pitch = pitch; /* examine chunks */ c = manager->chunks; while (c) { if (c->length >= length) { if (c->buffer) { c->tolerations++; if (c->tolerations > 0xff) c->tolerations = 0xff; if (!c->buffer->video.locked && c->buffer->policy <= buffer->policy && c->buffer->policy != CSP_VIDEOONLY && ((buffer->policy > c->buffer->policy) || (c->tolerations > manager->min_toleration/8 + 2))) { /* found a nice place to chill */ if (!best_occupied || best_occupied->length > c->length || best_occupied->tolerations < c->tolerations) /* first found or better one? */ best_occupied = c; } } else { /* found a nice place to chill */ if (!best_free || best_free->length > c->length) /* first found or better one? */ best_free = c; } } c = c->next; } /* if we found a place */ if (best_free) { occupy_chunk( manager, best_free, buffer, length ); return DFB_OK; } if (best_occupied) { CoreSurface *kicked = best_occupied->buffer->surface; D_DEBUG_AT( Core_SM, "Kicking out buffer at %d (%d) with tolerations %d...\n", best_occupied->offset, best_occupied->length, best_occupied->tolerations ); dfb_surfacemanager_assure_system( manager, best_occupied->buffer ); best_occupied->buffer->video.health = CSH_INVALID; dfb_surface_notify_listeners( kicked, CSNF_VIDEO ); best_occupied = free_chunk( manager, best_occupied ); dfb_gfxcard_sync(); occupy_chunk( manager, best_occupied, buffer, length ); return DFB_OK; } D_DEBUG_AT( Core_SM, "Couldn't allocate enough heap space for video memory surface!\n" ); /* no luck */ return DFB_NOVIDEOMEMORY; }
static DFBResult DisplaySurface( DFBX11 *x11, X11LayerData *lds, VdpPresentationQueue queue, CoreSurfaceBufferLock *lock ) { DirectResult ret; DFBX11Shared *shared = x11->shared; DFBX11CallPresentationQueueDisplay display; display.presentation_queue = queue; display.clip_width = 0; display.clip_height = 0; display.earliest_presentation_time = 0; if (lock && lds->config.dest.x == 0 && lds->config.dest.y == 0 && lds->config.dest.w == shared->screen_size.w && lds->config.dest.h == shared->screen_size.h) { display.surface = (VdpOutputSurface) (unsigned long) lock->handle; } else { CardState state; DFBRectangle rect; dfb_state_init( &state, x11->core ); state.destination = shared->vdp_core_surface; state.source = lock ? lock->buffer->surface : NULL; state.clip.x1 = 0; state.clip.y1 = 0; state.clip.x2 = shared->screen_size.w - 1; state.clip.y2 = shared->screen_size.h - 1; rect.x = 0; rect.y = 0; rect.w = shared->screen_size.w; rect.h = shared->screen_size.h; dfb_gfxcard_fillrectangles( &rect, 1, &state ); if (lock) dfb_gfxcard_stretchblit( &lds->config.source, &lds->config.dest, &state ); dfb_gfxcard_sync(); state.destination = NULL; state.source = NULL; dfb_state_destroy( &state ); display.surface = shared->vdp_surface; } ret = fusion_call_execute2( &x11->shared->call, FCEF_ONEWAY, X11_VDPAU_PRESENTATION_QUEUE_DISPLAY, &display, sizeof(display), NULL ); if (ret) { D_DERROR( ret, "DirectFB/X11/VDPAU: fusion_call_execute2() failed!\n" ); return ret; } return DFB_OK; }
DFBResult dfb_layer_region_flip_update( CoreLayerRegion *region, const DFBRegion *update, DFBSurfaceFlipFlags flags ) { DFBResult ret = DFB_OK; DFBRegion rotated; CoreLayer *layer; CoreLayerContext *context; CoreSurface *surface; const DisplayLayerFuncs *funcs; if (update) D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x ) <- [%d, %d - %dx%d]\n", region, update, flags, DFB_RECTANGLE_VALS_FROM_REGION( update ) ); else D_DEBUG_AT( Core_Layers, "dfb_layer_region_flip_update( %p, %p, 0x%08x )\n", region, update, flags ); D_ASSERT( region != NULL ); D_ASSERT( region->context != NULL ); /* Lock the region. */ if (dfb_layer_region_lock( region )) return DFB_FUSION; D_ASSUME( region->surface != NULL ); /* Check for NULL surface. */ if (!region->surface) { D_DEBUG_AT( Core_Layers, " -> No surface => no update!\n" ); dfb_layer_region_unlock( region ); return DFB_UNSUPPORTED; } context = region->context; surface = region->surface; layer = dfb_layer_at( context->layer_id ); D_ASSERT( layer->funcs != NULL ); funcs = layer->funcs; /* Unfreeze region? */ if (D_FLAGS_IS_SET( region->state, CLRSF_FROZEN )) { D_FLAGS_CLEAR( region->state, CLRSF_FROZEN ); if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { ret = set_region( region, ®ion->config, CLRCF_ALL, surface ); if (ret) D_DERROR( ret, "Core/LayerRegion: set_region() in dfb_layer_region_flip_update() failed!\n" ); } else if (D_FLAGS_ARE_SET( region->state, CLRSF_ENABLED | CLRSF_ACTIVE )) { ret = realize_region( region ); if (ret) D_DERROR( ret, "Core/LayerRegion: realize_region() in dfb_layer_region_flip_update() failed!\n" ); } if (ret) { dfb_layer_region_unlock( region ); return ret; } } /* Depending on the buffer mode... */ switch (region->config.buffermode) { case DLBM_TRIPLE: case DLBM_BACKVIDEO: /* Check if simply swapping the buffers is possible... */ if (!(flags & DSFLIP_BLIT) && !surface->rotation && (!update || (update->x1 == 0 && update->y1 == 0 && update->x2 == surface->config.size.w - 1 && update->y2 == surface->config.size.h - 1))) { D_DEBUG_AT( Core_Layers, " -> Going to swap buffers...\n" ); /* Use the driver's routine if the region is realized. */ if (D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { D_ASSUME( funcs->FlipRegion != NULL ); ret = region_buffer_lock( region, surface, CSBR_BACK ); if (ret) { dfb_layer_region_unlock( region ); return ret; } D_DEBUG_AT( Core_Layers, " -> Flipping region using driver...\n" ); if (funcs->FlipRegion) ret = funcs->FlipRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, flags, ®ion->surface_lock ); dfb_surface_unlock( surface ); } else { D_DEBUG_AT( Core_Layers, " -> Flipping region not using driver...\n" ); /* Just do the hardware independent work. */ dfb_surface_lock( surface ); dfb_surface_flip( surface, false ); dfb_surface_unlock( surface ); } break; } /* fall through */ case DLBM_BACKSYSTEM: D_DEBUG_AT( Core_Layers, " -> Going to copy portion...\n" ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAITFORSYNC) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } D_DEBUG_AT( Core_Layers, " -> Copying content from back to front buffer...\n" ); /* ...or copy updated contents from back to front buffer. */ dfb_back_to_front_copy_rotation( surface, update, surface->rotation ); if ((flags & DSFLIP_WAITFORSYNC) == DSFLIP_WAIT) { D_DEBUG_AT( Core_Layers, " -> Waiting for VSync...\n" ); dfb_layer_wait_vsync( layer ); } /* fall through */ case DLBM_FRONTONLY: /* Tell the driver about the update if the region is realized. */ if (funcs->UpdateRegion && D_FLAGS_IS_SET( region->state, CLRSF_REALIZED )) { if (surface) { CoreSurfaceAllocation *allocation; allocation = region->surface_lock.allocation; D_ASSERT( allocation != NULL ); /* If hardware has written or is writing... */ if (allocation->accessed[CSAID_GPU] & CSAF_WRITE) { D_DEBUG_AT( Core_Layers, " -> Waiting for pending writes...\n" ); /* ...wait for the operation to finish. */ if (!(flags & DSFLIP_PIPELINE)) dfb_gfxcard_sync(); /* TODO: wait for serial instead */ allocation->accessed[CSAID_GPU] &= ~CSAF_WRITE; } dfb_surface_lock( surface ); dfb_surface_allocation_update( allocation, CSAF_READ ); dfb_surface_unlock( surface ); } D_DEBUG_AT( Core_Layers, " -> Notifying driver about updated content...\n" ); dfb_region_from_rotated( &rotated, update, &surface->config.size, surface->rotation ); ret = funcs->UpdateRegion( layer, layer->driver_data, layer->layer_data, region->region_data, surface, &rotated, ®ion->surface_lock ); } break; default: D_BUG("unknown buffer mode"); ret = DFB_BUG; } D_DEBUG_AT( Core_Layers, " -> done.\n" ); /* Unlock the region. */ dfb_layer_region_unlock( region ); return ret; }
static void manage_interlocks( CoreSurfaceAllocation *allocation, CoreSurfaceAccessorID accessor, CoreSurfaceAccessFlags access ) { int locks; locks = dfb_surface_allocation_locks( allocation ); #if 1 /* * Manage access interlocks. * * SOON FIXME: Clearing flags only when not locked yet. Otherwise nested GPU/CPU locks are a problem. */ /* Software read/write access... */ if (accessor == CSAID_CPU) { /* If hardware has written or is writing... */ if (allocation->accessed[CSAID_GPU] & CSAF_WRITE) { /* ...wait for the operation to finish. */ dfb_gfxcard_sync(); /* TODO: wait for serial instead */ /* Software read access after hardware write requires flush of the (bus) read cache. */ dfb_gfxcard_flush_read_cache(); if (!locks) { /* ...clear hardware write access. */ allocation->accessed[CSAID_GPU] = (CoreSurfaceAccessFlags)(allocation->accessed[CSAID_GPU] & ~CSAF_WRITE); /* ...clear hardware read access (to avoid syncing twice). */ allocation->accessed[CSAID_GPU] = (CoreSurfaceAccessFlags)(allocation->accessed[CSAID_GPU] & ~CSAF_READ); } } /* Software write access... */ if (access & CSAF_WRITE) { /* ...if hardware has (to) read... */ if (allocation->accessed[CSAID_GPU] & CSAF_READ) { /* ...wait for the operation to finish. */ dfb_gfxcard_sync(); /* TODO: wait for serial instead */ /* ...clear hardware read access. */ if (!locks) allocation->accessed[CSAID_GPU] = (CoreSurfaceAccessFlags)(allocation->accessed[CSAID_GPU] & ~CSAF_READ); } } } /* Hardware read access... */ if (accessor == CSAID_GPU && access & CSAF_READ) { /* ...if software has written before... */ if (allocation->accessed[CSAID_CPU] & CSAF_WRITE) { /* ...flush texture cache. */ dfb_gfxcard_flush_texture_cache(); /* ...clear software write access. */ if (!locks) allocation->accessed[CSAID_CPU] = (CoreSurfaceAccessFlags)(allocation->accessed[CSAID_CPU] & ~CSAF_WRITE); } } if (! D_FLAGS_ARE_SET( allocation->accessed[accessor], access )) { /* FIXME: surface_enter */ } #endif /* Collect... */ allocation->accessed[accessor] = (CoreSurfaceAccessFlags)(allocation->accessed[accessor] | access); }