static Bool radeon_glamor_prepare_access_cpu_ro(ScrnInfoPtr scrn, PixmapPtr pixmap, struct radeon_pixmap *priv) { RADEONInfoPtr info; Bool need_sync; if (!priv) return TRUE; info = RADEONPTR(scrn); need_sync = radeon_glamor_gpu_pending(info->gpu_synced, priv->gpu_write); return radeon_glamor_prepare_access_cpu(scrn, RADEONPTR(scrn), pixmap, priv, need_sync); }
void RADEONVIP_reset(ScrnInfoPtr pScrn, RADEONPortPrivPtr pPriv) { RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; RADEONWaitForIdleMMIO(pScrn); switch(info->ChipFamily){ case CHIP_FAMILY_RV250: case CHIP_FAMILY_RV350: case CHIP_FAMILY_R350: case CHIP_FAMILY_R300: OUTREG(RADEON_VIPH_CONTROL, 0x003F0009); /* slowest, timeout in 16 phases */ OUTREG(RADEON_VIPH_TIMEOUT_STAT, (INREG(RADEON_VIPH_TIMEOUT_STAT) & 0xFFFFFF00) | RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS); OUTREG(RADEON_VIPH_DV_LAT, 0x444400FF); /* set timeslice */ OUTREG(RADEON_VIPH_BM_CHUNK, 0x0); OUTREG(RADEON_TEST_DEBUG_CNTL, INREG(RADEON_TEST_DEBUG_CNTL) & (~RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN)); break; case CHIP_FAMILY_RV380: OUTREG(RADEON_VIPH_CONTROL, 0x003F000D); /* slowest, timeout in 16 phases */ OUTREG(RADEON_VIPH_TIMEOUT_STAT, (INREG(RADEON_VIPH_TIMEOUT_STAT) & 0xFFFFFF00) | RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS); OUTREG(RADEON_VIPH_DV_LAT, 0x444400FF); /* set timeslice */ OUTREG(RADEON_VIPH_BM_CHUNK, 0x0); OUTREG(RADEON_TEST_DEBUG_CNTL, INREG(RADEON_TEST_DEBUG_CNTL) & (~RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN)); break; default: OUTREG(RADEON_VIPH_CONTROL, 0x003F0004); /* slowest, timeout in 16 phases */ OUTREG(RADEON_VIPH_TIMEOUT_STAT, (INREG(RADEON_VIPH_TIMEOUT_STAT) & 0xFFFFFF00) | RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS); OUTREG(RADEON_VIPH_DV_LAT, 0x444400FF); /* set timeslice */ OUTREG(RADEON_VIPH_BM_CHUNK, 0x151); OUTREG(RADEON_TEST_DEBUG_CNTL, INREG(RADEON_TEST_DEBUG_CNTL) & (~RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN)); } }
static Bool RADEONVIP_write(GENERIC_BUS_Ptr b, uint32_t address, uint32_t count, uint8_t *buffer) { ScrnInfoPtr pScrn = b->pScrn; RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; uint32_t status; if((count!=4)) { xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "Attempt to access VIP bus with non-stadard transaction length\n"); return FALSE; } RADEONWaitForFifo(pScrn, 2); OUTREG(RADEON_VIPH_REG_ADDR, address & (~0x2000)); while(VIP_BUSY == (status = RADEONVIP_idle(b))); if(VIP_IDLE != status) return FALSE; RADEONWaitForFifo(pScrn, 2); switch(count){ case 4: OUTREG(RADEON_VIPH_REG_DATA, *(uint32_t *)buffer); break; } write_mem_barrier(); while(VIP_BUSY == (status = RADEONVIP_idle(b))); if(VIP_IDLE != status) return FALSE; return TRUE; }
static Bool RADEONVIP_fifo_write(GENERIC_BUS_Ptr b, uint32_t address, uint32_t count, uint8_t *buffer) { ScrnInfoPtr pScrn = b->pScrn; RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; uint32_t status; uint32_t i; RADEONWaitForFifo(pScrn, 2); OUTREG(VIPH_REG_ADDR, (address & (~0x2000)) | 0x1000); while(VIP_BUSY == (status = RADEONVIP_fifo_idle(b, 0x0f))); if(VIP_IDLE != status){ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "cannot write %x to VIPH_REG_ADDR\n", (unsigned int)address); return FALSE; } RADEONWaitForFifo(pScrn, 2); for (i = 0; i < count; i+=4) { OUTREG(VIPH_REG_DATA, *(uint32_t*)(buffer + i)); write_mem_barrier(); while(VIP_BUSY == (status = RADEONVIP_fifo_idle(b, 0x0f))); if(VIP_IDLE != status) { xf86DrvMsg(pScrn->scrnIndex, X_INFO, "cannot write to VIPH_REG_DATA\n"); return FALSE; } } return TRUE; }
static void RADEONDoPrepareCopy(ScrnInfoPtr pScrn, uint32_t src_pitch_offset, uint32_t dst_pitch_offset, uint32_t datatype, int rop, Pixel planemask) { RADEONInfoPtr info = RADEONPTR(pScrn); /* setup 2D state */ info->state_2d.dp_gui_master_cntl = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (datatype << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP[rop].rop | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS); info->state_2d.dp_cntl = ((info->accel_state->xdir >= 0 ? RADEON_DST_X_LEFT_TO_RIGHT : 0) | (info->accel_state->ydir >= 0 ? RADEON_DST_Y_TOP_TO_BOTTOM : 0)); info->state_2d.dp_brush_frgd_clr = 0xffffffff; info->state_2d.dp_brush_bkgd_clr = 0x00000000; info->state_2d.dp_src_frgd_clr = 0xffffffff; info->state_2d.dp_src_bkgd_clr = 0x00000000; info->state_2d.dp_write_mask = planemask; info->state_2d.dst_pitch_offset = dst_pitch_offset; info->state_2d.src_pitch_offset = src_pitch_offset; info->state_2d.default_sc_bottom_right = (RADEON_DEFAULT_SC_RIGHT_MAX | RADEON_DEFAULT_SC_BOTTOM_MAX); Emit2DState(pScrn, RADEON_2D_EXA_COPY); }
/** * radeon_glamor_close_screen() unwraps its wrapped screen functions and tears * down our screen private, before calling down to the next CloseScreen. */ static Bool radeon_glamor_close_screen(CLOSE_SCREEN_ARGS_DECL) { RADEONInfoPtr info = RADEONPTR(xf86ScreenToScrn(pScreen)); #ifdef RENDER PictureScreenPtr ps = GetPictureScreenIfSet(pScreen); #endif pScreen->CreateGC = info->glamor.SavedCreateGC; pScreen->CloseScreen = info->glamor.SavedCloseScreen; pScreen->GetImage = info->glamor.SavedGetImage; pScreen->GetSpans = info->glamor.SavedGetSpans; pScreen->CopyWindow = info->glamor.SavedCopyWindow; pScreen->ChangeWindowAttributes = info->glamor.SavedChangeWindowAttributes; pScreen->BitmapToRegion = info->glamor.SavedBitmapToRegion; #ifdef RENDER if (ps) { ps->Composite = info->glamor.SavedComposite; ps->Glyphs = info->glamor.SavedGlyphs; ps->UnrealizeGlyph = info->glamor.SavedUnrealizeGlyph; ps->Trapezoids = info->glamor.SavedTrapezoids; ps->AddTraps = info->glamor.SavedAddTraps; ps->Triangles = info->glamor.SavedTriangles; ps->UnrealizeGlyph = info->glamor.SavedUnrealizeGlyph; } #endif return (*pScreen->CloseScreen) (CLOSE_SCREEN_ARGS); }
void radeon_legacy_free_memory(ScrnInfoPtr pScrn, void *mem_struct) { RADEONInfoPtr info = RADEONPTR(pScrn); #ifdef XF86DRM_MODE if (info->cs) { struct radeon_bo *bo = mem_struct; radeon_bo_unref(bo); return; } #endif #ifdef USE_EXA ScreenPtr pScreen = screenInfo.screens[pScrn->scrnIndex]; if (info->useEXA) { ExaOffscreenArea *area = mem_struct; if (area != NULL) exaOffscreenFree(pScreen, area); area = NULL; } #endif /* USE_EXA */ #ifdef USE_XAA if (!info->useEXA) { FBLinearPtr linear = mem_struct; if (linear != NULL) xf86FreeOffscreenLinear(linear); linear = NULL; } #endif /* USE_XAA */ }
void radeon_glamor_flush(ScrnInfoPtr pScrn) { RADEONInfoPtr info = RADEONPTR(pScrn); if (info->use_glamor) glamor_block_handler(pScrn->pScreen); }
void RADEONInitVideo(ScreenPtr pScreen) { ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen); RADEONInfoPtr info = RADEONPTR(pScrn); RADEONEntPtr pRADEONEnt = RADEONEntPriv(pScrn); XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL; XF86VideoAdaptorPtr texturedAdaptor = NULL; int num_adaptors; /* no overlay or 3D on RN50 */ if (info->ChipFamily == CHIP_FAMILY_RV100 && !pRADEONEnt->HasCRTC2) return; num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors); newAdaptors = malloc((num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr *)); if (newAdaptors == NULL) return; memcpy(newAdaptors, adaptors, num_adaptors * sizeof(XF86VideoAdaptorPtr)); adaptors = newAdaptors; if (info->use_glamor) { texturedAdaptor = radeon_glamor_xv_init(pScreen, 16); if (texturedAdaptor != NULL) { adaptors[num_adaptors++] = texturedAdaptor; xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Set up textured video (glamor)\n"); } else xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "Failed to set up textured video (glamor)\n"); } else if ((info->ChipFamily < CHIP_FAMILY_RS400) || (info->directRenderingEnabled) ) { texturedAdaptor = RADEONSetupImageTexturedVideo(pScreen); if (texturedAdaptor != NULL) { adaptors[num_adaptors++] = texturedAdaptor; xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Set up textured video\n"); } else xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "Failed to set up textured video\n"); } else xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Textured video requires CP on R5xx/R6xx/R7xx/IGP\n"); if(num_adaptors) xf86XVScreenInit(pScreen, adaptors, num_adaptors); if(texturedAdaptor) { XF86MCAdaptorPtr xvmcAdaptor = RADEONCreateAdaptorXvMC(pScreen, (char *)texturedAdaptor->name); if(xvmcAdaptor) { if(!xf86XvMCScreenInit(pScreen, 1, &xvmcAdaptor)) xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "[XvMC] Failed to initialize extension.\n"); else xf86DrvMsg(pScrn->scrnIndex, X_INFO, "[XvMC] Extension initialized.\n"); } } if(newAdaptors) free(newAdaptors); }
void radeon_glamor_exchange_buffers(PixmapPtr src, PixmapPtr dst) { RADEONInfoPtr info = RADEONPTR(xf86ScreenToScrn(dst->drawable.pScreen)); if (!info->use_glamor) return; glamor_egl_exchange_buffers(src, dst); }
Bool radeon_glamor_create_textured_pixmap(PixmapPtr pixmap, struct radeon_pixmap *priv) { ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen); RADEONInfoPtr info = RADEONPTR(scrn); if ((info->use_glamor) == 0) return TRUE; return glamor_egl_create_textured_pixmap(pixmap, priv->bo->handle, pixmap->devKind); }
Bool radeon_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle, struct radeon_surface *surface) { ScrnInfoPtr pScrn = xf86ScreenToScrn(ppix->drawable.pScreen); RADEONInfoPtr info = RADEONPTR(pScrn); struct radeon_bo *bo; int ihandle = (int)(long)fd_handle; uint32_t size = ppix->devKind * ppix->drawable.height; bo = radeon_gem_bo_open_prime(info->bufmgr, ihandle, size); if (!bo) return FALSE; memset(surface, 0, sizeof(struct radeon_surface)); if (info->ChipFamily >= CHIP_FAMILY_R600 && info->surf_man) { surface->npix_x = ppix->drawable.width; surface->npix_y = ppix->drawable.height; surface->npix_z = 1; surface->blk_w = 1; surface->blk_h = 1; surface->blk_d = 1; surface->array_size = 1; surface->bpe = ppix->drawable.bitsPerPixel / 8; surface->nsamples = 1; /* we are requiring a recent enough libdrm version */ surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX; surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE); surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE); if (radeon_surface_best(info->surf_man, surface)) { return FALSE; } if (radeon_surface_init(info->surf_man, surface)) { return FALSE; } /* we have to post hack the surface to reflect the actual size of the shared pixmap */ surface->level[0].pitch_bytes = ppix->devKind; surface->level[0].nblk_x = ppix->devKind / surface->bpe; } radeon_set_pixmap_bo(ppix, bo); close(ihandle); /* we have a reference from the alloc and one from set pixmap bo, drop one */ radeon_bo_unref(bo); return TRUE; }
/** * @param screen screen being initialized */ void radeon_glamor_screen_init(ScreenPtr screen) { RADEONInfoPtr info = RADEONPTR(xf86ScreenToScrn(screen)); /* * Replace various fb screen functions */ info->glamor.SavedCloseScreen = screen->CloseScreen; screen->CloseScreen = radeon_glamor_close_screen; info->glamor.SavedCreateGC = screen->CreateGC; screen->CreateGC = radeon_glamor_create_gc; info->glamor.SavedGetImage = screen->GetImage; screen->GetImage = radeon_glamor_get_image; info->glamor.SavedGetSpans = screen->GetSpans; screen->GetSpans = radeon_glamor_get_spans; info->glamor.SavedCreatePixmap = screen->CreatePixmap; info->glamor.SavedDestroyPixmap = screen->DestroyPixmap; info->glamor.SavedCopyWindow = screen->CopyWindow; screen->CopyWindow = radeon_glamor_copy_window; info->glamor.SavedBitmapToRegion = screen->BitmapToRegion; screen->BitmapToRegion = radeon_glamor_bitmap_to_region; #ifdef RENDER { PictureScreenPtr ps = GetPictureScreenIfSet(screen); if (ps) { info->glamor.SavedComposite = ps->Composite; ps->Composite = radeon_glamor_composite; info->glamor.SavedUnrealizeGlyph = ps->UnrealizeGlyph; ps->Glyphs = radeon_glamor_glyphs; ps->Triangles = radeon_glamor_triangles; ps->Trapezoids = radeon_glamor_trapezoids; info->glamor.SavedAddTraps = ps->AddTraps; ps->AddTraps = radeon_glamor_add_traps; } } #endif }
static RegionPtr radeon_glamor_copy_area(DrawablePtr pSrcDrawable, DrawablePtr pDstDrawable, GCPtr pGC, int srcx, int srcy, int width, int height, int dstx, int dsty) { ScreenPtr screen = pDstDrawable->pScreen; ScrnInfoPtr scrn = xf86ScreenToScrn(screen); RADEONInfoPtr info = RADEONPTR(scrn); PixmapPtr src_pixmap = get_drawable_pixmap(pSrcDrawable); PixmapPtr dst_pixmap = get_drawable_pixmap(pDstDrawable); struct radeon_pixmap *src_priv = radeon_get_pixmap_private(src_pixmap); struct radeon_pixmap *dst_priv = radeon_get_pixmap_private(dst_pixmap); RegionPtr ret = NULL; if (info->accel_state->force || (src_priv && !src_priv->bo) || (dst_priv && !dst_priv->bo)) { if (!radeon_glamor_prepare_access_gpu(dst_priv)) goto fallback; if (src_priv != dst_priv && !radeon_glamor_prepare_access_gpu(src_priv)) goto fallback; ret = info->glamor.SavedCopyArea(pSrcDrawable, pDstDrawable, pGC, srcx, srcy, width, height, dstx, dsty); radeon_glamor_finish_access_gpu_rw(info, dst_priv); if (src_priv != dst_priv) radeon_glamor_finish_access_gpu_ro(info, src_priv); return ret; } fallback: if (radeon_glamor_prepare_access_cpu_rw(scrn, dst_pixmap, dst_priv)) { if (pSrcDrawable == pDstDrawable || radeon_glamor_prepare_access_cpu_ro(scrn, src_pixmap, src_priv)) { ret = fbCopyArea(pSrcDrawable, pDstDrawable, pGC, srcx, srcy, width, height, dstx, dsty); if (pSrcDrawable != pDstDrawable) radeon_glamor_finish_access_cpu(src_pixmap); } radeon_glamor_finish_access_cpu(dst_pixmap); } return ret; }
/** * radeon_glamor_validate_gc() sets the ops to our implementations, which may be * accelerated or may sync the card and fall back to fb. */ static void radeon_glamor_validate_gc(GCPtr pGC, unsigned long changes, DrawablePtr pDrawable) { ScrnInfoPtr scrn = xf86ScreenToScrn(pGC->pScreen); RADEONInfoPtr info = RADEONPTR(scrn); glamor_validate_gc(pGC, changes, pDrawable); info->glamor.SavedCopyArea = pGC->ops->CopyArea; if (radeon_get_pixmap_private(get_drawable_pixmap(pDrawable)) || (pGC->stipple && radeon_get_pixmap_private(pGC->stipple)) || (pGC->fillStyle == FillTiled && radeon_get_pixmap_private(pGC->tile.pixmap))) pGC->ops = (GCOps *)&radeon_glamor_ops; else pGC->ops = &radeon_glamor_nodstbo_ops; }
static void radeon_glamor_triangles(CARD8 op, PicturePtr src, PicturePtr dst, PictFormatPtr maskFormat, INT16 xSrc, INT16 ySrc, int ntri, xTriangle *tri) { ScrnInfoPtr scrn = xf86ScreenToScrn(dst->pDrawable->pScreen); if (radeon_glamor_picture_prepare_access_cpu_rw(scrn, dst)) { if (radeon_glamor_picture_prepare_access_cpu_ro(scrn, src)) { RADEONInfoPtr info = RADEONPTR(scrn); info->glamor.SavedTriangles(op, src, dst, maskFormat, xSrc, ySrc, ntri, tri); radeon_glamor_picture_finish_access_cpu(src); } radeon_glamor_picture_finish_access_cpu(dst); } }
static Bool radeon_glamor_prepare_access_cpu_rw(ScrnInfoPtr scrn, PixmapPtr pixmap, struct radeon_pixmap *priv) { RADEONInfoPtr info; uint_fast32_t gpu_synced; Bool need_sync; if (!priv) return TRUE; info = RADEONPTR(scrn); gpu_synced = info->gpu_synced; need_sync = radeon_glamor_gpu_pending(gpu_synced, priv->gpu_write) | radeon_glamor_gpu_pending(gpu_synced, priv->gpu_read); return radeon_glamor_prepare_access_cpu(scrn, info, pixmap, priv, need_sync); }
/* Emit blit with arbitrary source and destination offsets and pitches */ static void RADEONBlitChunk(ScrnInfoPtr pScrn, struct radeon_bo *src_bo, struct radeon_bo *dst_bo, uint32_t datatype, uint32_t src_pitch_offset, uint32_t dst_pitch_offset, int srcX, int srcY, int dstX, int dstY, int w, int h, uint32_t src_domain, uint32_t dst_domain) { RADEONInfoPtr info = RADEONPTR(pScrn); if (src_bo && dst_bo) { BEGIN_ACCEL_RELOC(6, 2); } else if (src_bo && dst_bo == NULL) { BEGIN_ACCEL_RELOC(6, 1); } else { BEGIN_RING(2*6); } OUT_RING_REG(RADEON_DP_GUI_MASTER_CNTL, RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | (datatype << 8) | RADEON_GMC_SRC_DATATYPE_COLOR | RADEON_ROP3_S | RADEON_DP_SRC_SOURCE_MEMORY | RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); OUT_RING_REG(RADEON_SRC_PITCH_OFFSET, src_pitch_offset); if (src_bo) { OUT_RING_RELOC(src_bo, src_domain, 0); } OUT_RING_REG(RADEON_DST_PITCH_OFFSET, dst_pitch_offset); if (dst_bo) { OUT_RING_RELOC(dst_bo, 0, dst_domain); } OUT_RING_REG(RADEON_SRC_Y_X, (srcY << 16) | srcX); OUT_RING_REG(RADEON_DST_Y_X, (dstY << 16) | dstX); OUT_RING_REG(RADEON_DST_HEIGHT_WIDTH, (h << 16) | w); ADVANCE_RING(); BEGIN_RING(2*2); OUT_RING_REG(RADEON_DSTCACHE_CTLSTAT, RADEON_RB2D_DC_FLUSH_ALL); OUT_RING_REG(RADEON_WAIT_UNTIL, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE); ADVANCE_RING(); }
static void RADEONSetColorKey(ScrnInfoPtr pScrn, CARD32 colorKey) { RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; CARD32 min, max; CARD8 r, g, b; if (info->CurrentLayout.depth > 8) { CARD32 rbits, gbits, bbits; rbits = (colorKey & pScrn->mask.red) >> pScrn->offset.red; gbits = (colorKey & pScrn->mask.green) >> pScrn->offset.green; bbits = (colorKey & pScrn->mask.blue) >> pScrn->offset.blue; r = rbits << (8 - pScrn->weight.red); g = gbits << (8 - pScrn->weight.green); b = bbits << (8 - pScrn->weight.blue); }
Bool radeon_glamor_create_textured_pixmap(PixmapPtr pixmap) { ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen); RADEONInfoPtr info = RADEONPTR(scrn); struct radeon_pixmap *priv; if ((info->use_glamor) == 0) return TRUE; priv = radeon_get_pixmap_private(pixmap); if (!priv->stride) priv->stride = pixmap->devKind; if (glamor_egl_create_textured_pixmap(pixmap, priv->bo->handle, priv->stride)) return TRUE; else return FALSE; }
static uint32_t RADEONVIP_idle(GENERIC_BUS_Ptr b) { ScrnInfoPtr pScrn = b->pScrn; RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; uint32_t timeout; RADEONWaitForIdleMMIO(pScrn); timeout = INREG(RADEON_VIPH_TIMEOUT_STAT); if(timeout & RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT) /* lockup ?? */ { RADEONWaitForFifo(pScrn, 2); OUTREG(RADEON_VIPH_TIMEOUT_STAT, (timeout & 0xffffff00) | RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK); RADEONWaitForIdleMMIO(pScrn); return (INREG(RADEON_VIPH_CONTROL) & 0x2000) ? VIP_BUSY : VIP_RESET; } RADEONWaitForIdleMMIO(pScrn); return (INREG(RADEON_VIPH_CONTROL) & 0x2000) ? VIP_BUSY : VIP_IDLE ; }
Bool radeon_glamor_create_screen_resources(ScreenPtr screen) { ScrnInfoPtr scrn = xf86ScreenToScrn(screen); RADEONInfoPtr info = RADEONPTR(scrn); if (!info->use_glamor) return TRUE; if (!glamor_glyphs_init(screen)) return FALSE; if (!glamor_egl_create_textured_screen_ext(screen, info->front_bo->handle, scrn->displayWidth * info->pixel_bytes, NULL)) return FALSE; return TRUE; }
static void radeon_glamor_glyphs(CARD8 op, PicturePtr src, PicturePtr dst, PictFormatPtr maskFormat, INT16 xSrc, INT16 ySrc, int nlist, GlyphListPtr list, GlyphPtr *glyphs) { ScrnInfoPtr scrn = xf86ScreenToScrn(dst->pDrawable->pScreen); if (radeon_glamor_picture_prepare_access_cpu_rw(scrn, dst)) { if (radeon_glamor_picture_prepare_access_cpu_ro(scrn, src)) { RADEONInfoPtr info = RADEONPTR(scrn); info->glamor.SavedGlyphs(op, src, dst, maskFormat, xSrc, ySrc, nlist, list, glyphs); radeon_glamor_picture_finish_access_cpu(src); } radeon_glamor_picture_finish_access_cpu(dst); } }
static uint32_t RADEONVIP_fifo_idle(GENERIC_BUS_Ptr b, uint8_t channel) { ScrnInfoPtr pScrn = b->pScrn; RADEONInfoPtr info = RADEONPTR(pScrn); unsigned char *RADEONMMIO = info->MMIO; uint32_t timeout; RADEONWaitForIdleMMIO(pScrn); timeout = INREG(VIPH_TIMEOUT_STAT); if((timeout & 0x0000000f) & channel) /* lockup ?? */ { xf86DrvMsg(b->pScrn->scrnIndex, X_INFO, "RADEON_fifo_idle\n"); RADEONWaitForFifo(pScrn, 2); OUTREG(VIPH_TIMEOUT_STAT, (timeout & 0xfffffff0) | channel); RADEONWaitForIdleMMIO(pScrn); return (INREG(VIPH_CONTROL) & 0x2000) ? VIP_BUSY : VIP_RESET; } RADEONWaitForIdleMMIO(pScrn); return (INREG(VIPH_CONTROL) & 0x2000) ? VIP_BUSY : VIP_IDLE ; }
static void Emit2DState(ScrnInfoPtr pScrn, int op) { RADEONInfoPtr info = RADEONPTR(pScrn); int has_src; /* don't emit if no operation in progress */ if (info->state_2d.op == 0 && op == 0) return; has_src = info->state_2d.src_pitch_offset || info->state_2d.src_bo; if (has_src) { BEGIN_ACCEL_RELOC(10, 2); } else { BEGIN_ACCEL_RELOC(9, 1); } OUT_RING_REG(RADEON_DEFAULT_SC_BOTTOM_RIGHT, info->state_2d.default_sc_bottom_right); OUT_RING_REG(RADEON_DP_GUI_MASTER_CNTL, info->state_2d.dp_gui_master_cntl); OUT_RING_REG(RADEON_DP_BRUSH_FRGD_CLR, info->state_2d.dp_brush_frgd_clr); OUT_RING_REG(RADEON_DP_BRUSH_BKGD_CLR, info->state_2d.dp_brush_bkgd_clr); OUT_RING_REG(RADEON_DP_SRC_FRGD_CLR, info->state_2d.dp_src_frgd_clr); OUT_RING_REG(RADEON_DP_SRC_BKGD_CLR, info->state_2d.dp_src_bkgd_clr); OUT_RING_REG(RADEON_DP_WRITE_MASK, info->state_2d.dp_write_mask); OUT_RING_REG(RADEON_DP_CNTL, info->state_2d.dp_cntl); OUT_RING_REG(RADEON_DST_PITCH_OFFSET, info->state_2d.dst_pitch_offset); OUT_RING_RELOC(info->state_2d.dst_bo, 0, info->state_2d.dst_domain); if (has_src) { OUT_RING_REG(RADEON_SRC_PITCH_OFFSET, info->state_2d.src_pitch_offset); OUT_RING_RELOC(info->state_2d.src_bo, RADEON_GEM_DOMAIN_GTT|RADEON_GEM_DOMAIN_VRAM, 0); } ADVANCE_RING(); if (op) info->state_2d.op = op; info->reemit_current2d = Emit2DState; }
static void radeon_glamor_poly_fill_rect(DrawablePtr pDrawable, GCPtr pGC, int nrect, xRectangle *prect) { ScrnInfoPtr scrn = xf86ScreenToScrn(pDrawable->pScreen); RADEONInfoPtr info = RADEONPTR(scrn); PixmapPtr pixmap = get_drawable_pixmap(pDrawable); struct radeon_pixmap *priv = radeon_get_pixmap_private(pixmap); if ((info->accel_state->force || (priv && !priv->bo)) && radeon_glamor_prepare_access_gpu(priv)) { info->glamor.SavedPolyFillRect(pDrawable, pGC, nrect, prect); radeon_glamor_finish_access_gpu_rw(info, priv); return; } if (radeon_glamor_prepare_access_cpu_rw(scrn, pixmap, priv)) { if (radeon_glamor_prepare_access_gc(scrn, pGC)) { fbPolyFillRect(pDrawable, pGC, nrect, prect); radeon_glamor_finish_access_gc(pGC); } radeon_glamor_finish_access_cpu(pixmap); } }
void EVERGREENDisplayTexturedVideo(ScrnInfoPtr pScrn, RADEONPortPrivPtr pPriv) { RADEONInfoPtr info = RADEONPTR(pScrn); struct radeon_accel_state *accel_state = info->accel_state; PixmapPtr pPixmap = pPriv->pPixmap; BoxPtr pBox = REGION_RECTS(&pPriv->clip); int nBox = REGION_NUM_RECTS(&pPriv->clip); int dstxoff, dstyoff; struct r600_accel_object src_obj, dst_obj; cb_config_t cb_conf; tex_resource_t tex_res; tex_sampler_t tex_samp; shader_config_t vs_conf, ps_conf; /* * y' = y - .0625 * u' = u - .5 * v' = v - .5; * * r = 1.1643 * y' + 0.0 * u' + 1.5958 * v' * g = 1.1643 * y' - 0.39173 * u' - 0.81290 * v' * b = 1.1643 * y' + 2.017 * u' + 0.0 * v' * * DP3 might look like the straightforward solution * but we'd need to move the texture yuv values in * the same reg for this to work. Therefore use MADs. * Brightness just adds to the off constant. * Contrast is multiplication of luminance. * Saturation and hue change the u and v coeffs. * Default values (before adjustments - depend on colorspace): * yco = 1.1643 * uco = 0, -0.39173, 2.017 * vco = 1.5958, -0.8129, 0 * off = -0.0625 * yco + -0.5 * uco[r] + -0.5 * vco[r], * -0.0625 * yco + -0.5 * uco[g] + -0.5 * vco[g], * -0.0625 * yco + -0.5 * uco[b] + -0.5 * vco[b], * * temp = MAD(yco, yuv.yyyy, off) * temp = MAD(uco, yuv.uuuu, temp) * result = MAD(vco, yuv.vvvv, temp) */ /* TODO: calc consts in the shader */ const float Loff = -0.0627; const float Coff = -0.502; float uvcosf, uvsinf; float yco; float uco[3], vco[3], off[3]; float bright, cont, gamma; int ref = pPriv->transform_index; Bool needgamma = FALSE; float *ps_alu_consts; const_config_t ps_const_conf; float *vs_alu_consts; const_config_t vs_const_conf; cont = RTFContrast(pPriv->contrast); bright = RTFBrightness(pPriv->brightness); gamma = (float)pPriv->gamma / 1000.0; uvcosf = RTFSaturation(pPriv->saturation) * cos(RTFHue(pPriv->hue)); uvsinf = RTFSaturation(pPriv->saturation) * sin(RTFHue(pPriv->hue)); /* overlay video also does pre-gamma contrast/sat adjust, should we? */ yco = trans[ref].RefLuma * cont; uco[0] = -trans[ref].RefRCr * uvsinf; uco[1] = trans[ref].RefGCb * uvcosf - trans[ref].RefGCr * uvsinf; uco[2] = trans[ref].RefBCb * uvcosf; vco[0] = trans[ref].RefRCr * uvcosf; vco[1] = trans[ref].RefGCb * uvsinf + trans[ref].RefGCr * uvcosf; vco[2] = trans[ref].RefBCb * uvsinf; off[0] = Loff * yco + Coff * (uco[0] + vco[0]) + bright; off[1] = Loff * yco + Coff * (uco[1] + vco[1]) + bright; off[2] = Loff * yco + Coff * (uco[2] + vco[2]) + bright; // XXX gamma = 1.0; if (gamma != 1.0) { needgamma = TRUE; /* note: gamma correction is out = in ^ gamma; gpu can only do LG2/EX2 therefore we transform into in ^ gamma = 2 ^ (log2(in) * gamma). Lots of scalar ops, unfortunately (better solution?) - without gamma that's 3 inst, with gamma it's 10... could use different gamma factors per channel, if that's of any use. */ } CLEAR (cb_conf); CLEAR (tex_res); CLEAR (tex_samp); CLEAR (vs_conf); CLEAR (ps_conf); CLEAR (vs_const_conf); CLEAR (ps_const_conf); dst_obj.offset = 0; src_obj.offset = 0; dst_obj.bo = radeon_get_pixmap_bo(pPixmap); dst_obj.tiling_flags = radeon_get_pixmap_tiling(pPixmap); dst_obj.surface = radeon_get_pixmap_surface(pPixmap); dst_obj.pitch = exaGetPixmapPitch(pPixmap) / (pPixmap->drawable.bitsPerPixel / 8); src_obj.pitch = pPriv->src_pitch; src_obj.width = pPriv->w; src_obj.height = pPriv->h; src_obj.bpp = 16; src_obj.domain = RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT; src_obj.bo = pPriv->src_bo[pPriv->currentBuffer]; src_obj.tiling_flags = 0; src_obj.surface = NULL; dst_obj.width = pPixmap->drawable.width; dst_obj.height = pPixmap->drawable.height; dst_obj.bpp = pPixmap->drawable.bitsPerPixel; dst_obj.domain = RADEON_GEM_DOMAIN_VRAM; if (!R600SetAccelState(pScrn, &src_obj, NULL, &dst_obj, accel_state->xv_vs_offset, accel_state->xv_ps_offset, 3, 0xffffffff)) return; #ifdef COMPOSITE dstxoff = -pPixmap->screen_x + pPixmap->drawable.x; dstyoff = -pPixmap->screen_y + pPixmap->drawable.y; #else dstxoff = 0; dstyoff = 0; #endif radeon_vbo_check(pScrn, &accel_state->vbo, 16); radeon_vbo_check(pScrn, &accel_state->cbuf, 512); radeon_cp_start(pScrn); evergreen_set_default_state(pScrn); evergreen_set_generic_scissor(pScrn, 0, 0, accel_state->dst_obj.width, accel_state->dst_obj.height); evergreen_set_screen_scissor(pScrn, 0, 0, accel_state->dst_obj.width, accel_state->dst_obj.height); evergreen_set_window_scissor(pScrn, 0, 0, accel_state->dst_obj.width, accel_state->dst_obj.height); /* PS bool constant */ switch(pPriv->id) { case FOURCC_YV12: case FOURCC_I420: evergreen_set_bool_consts(pScrn, SQ_BOOL_CONST_ps, (1 << 0)); break; case FOURCC_UYVY: case FOURCC_YUY2: default: evergreen_set_bool_consts(pScrn, SQ_BOOL_CONST_ps, (0 << 0)); break; } /* Shader */ vs_conf.shader_addr = accel_state->vs_mc_addr; vs_conf.shader_size = accel_state->vs_size; vs_conf.num_gprs = 2; vs_conf.stack_size = 0; vs_conf.bo = accel_state->shaders_bo; evergreen_vs_setup(pScrn, &vs_conf, RADEON_GEM_DOMAIN_VRAM); ps_conf.shader_addr = accel_state->ps_mc_addr; ps_conf.shader_size = accel_state->ps_size; ps_conf.num_gprs = 3; ps_conf.stack_size = 1; ps_conf.clamp_consts = 0; ps_conf.export_mode = 2; ps_conf.bo = accel_state->shaders_bo; evergreen_ps_setup(pScrn, &ps_conf, RADEON_GEM_DOMAIN_VRAM); /* Texture */ switch(pPriv->id) { case FOURCC_YV12: case FOURCC_I420: accel_state->src_size[0] = accel_state->src_obj[0].pitch * pPriv->h; /* Y texture */ tex_res.id = 0; tex_res.w = accel_state->src_obj[0].width; tex_res.h = accel_state->src_obj[0].height; tex_res.pitch = accel_state->src_obj[0].pitch; tex_res.depth = 0; tex_res.dim = SQ_TEX_DIM_2D; tex_res.base = accel_state->src_obj[0].offset; tex_res.mip_base = accel_state->src_obj[0].offset; tex_res.size = accel_state->src_size[0]; tex_res.bo = accel_state->src_obj[0].bo; tex_res.mip_bo = accel_state->src_obj[0].bo; tex_res.surface = NULL; tex_res.format = FMT_8; tex_res.dst_sel_x = SQ_SEL_X; /* Y */ tex_res.dst_sel_y = SQ_SEL_1; tex_res.dst_sel_z = SQ_SEL_1; tex_res.dst_sel_w = SQ_SEL_1; tex_res.base_level = 0; tex_res.last_level = 0; tex_res.perf_modulation = 0; tex_res.interlaced = 0; if (accel_state->src_obj[0].tiling_flags == 0) tex_res.array_mode = 1; evergreen_set_tex_resource(pScrn, &tex_res, accel_state->src_obj[0].domain); /* Y sampler */ tex_samp.id = 0; tex_samp.clamp_x = SQ_TEX_CLAMP_LAST_TEXEL; tex_samp.clamp_y = SQ_TEX_CLAMP_LAST_TEXEL; tex_samp.clamp_z = SQ_TEX_WRAP; /* xxx: switch to bicubic */ tex_samp.xy_mag_filter = SQ_TEX_XY_FILTER_BILINEAR; tex_samp.xy_min_filter = SQ_TEX_XY_FILTER_BILINEAR; tex_samp.z_filter = SQ_TEX_Z_FILTER_NONE; tex_samp.mip_filter = 0; /* no mipmap */ evergreen_set_tex_sampler(pScrn, &tex_samp); /* U or V texture */ tex_res.id = 1; tex_res.format = FMT_8; tex_res.w = accel_state->src_obj[0].width >> 1; tex_res.h = accel_state->src_obj[0].height >> 1; tex_res.pitch = RADEON_ALIGN(accel_state->src_obj[0].pitch >> 1, pPriv->hw_align); tex_res.dst_sel_x = SQ_SEL_X; /* V or U */ tex_res.dst_sel_y = SQ_SEL_1; tex_res.dst_sel_z = SQ_SEL_1; tex_res.dst_sel_w = SQ_SEL_1; tex_res.interlaced = 0; tex_res.base = accel_state->src_obj[0].offset + pPriv->planev_offset; tex_res.mip_base = accel_state->src_obj[0].offset + pPriv->planev_offset; tex_res.size = tex_res.pitch * (pPriv->h >> 1); if (accel_state->src_obj[0].tiling_flags == 0) tex_res.array_mode = 1; evergreen_set_tex_resource(pScrn, &tex_res, accel_state->src_obj[0].domain); /* U or V sampler */ tex_samp.id = 1; evergreen_set_tex_sampler(pScrn, &tex_samp); /* U or V texture */ tex_res.id = 2; tex_res.format = FMT_8; tex_res.w = accel_state->src_obj[0].width >> 1; tex_res.h = accel_state->src_obj[0].height >> 1; tex_res.pitch = RADEON_ALIGN(accel_state->src_obj[0].pitch >> 1, pPriv->hw_align); tex_res.dst_sel_x = SQ_SEL_X; /* V or U */ tex_res.dst_sel_y = SQ_SEL_1; tex_res.dst_sel_z = SQ_SEL_1; tex_res.dst_sel_w = SQ_SEL_1; tex_res.interlaced = 0; tex_res.base = accel_state->src_obj[0].offset + pPriv->planeu_offset; tex_res.mip_base = accel_state->src_obj[0].offset + pPriv->planeu_offset; tex_res.size = tex_res.pitch * (pPriv->h >> 1); if (accel_state->src_obj[0].tiling_flags == 0) tex_res.array_mode = 1; evergreen_set_tex_resource(pScrn, &tex_res, accel_state->src_obj[0].domain); /* UV sampler */ tex_samp.id = 2; evergreen_set_tex_sampler(pScrn, &tex_samp); break; case FOURCC_UYVY: case FOURCC_YUY2: default: accel_state->src_size[0] = accel_state->src_obj[0].pitch * pPriv->h; /* Y texture */ tex_res.id = 0; tex_res.w = accel_state->src_obj[0].width; tex_res.h = accel_state->src_obj[0].height; tex_res.pitch = accel_state->src_obj[0].pitch >> 1; tex_res.depth = 0; tex_res.dim = SQ_TEX_DIM_2D; tex_res.base = accel_state->src_obj[0].offset; tex_res.mip_base = accel_state->src_obj[0].offset; tex_res.size = accel_state->src_size[0]; tex_res.bo = accel_state->src_obj[0].bo; tex_res.mip_bo = accel_state->src_obj[0].bo; tex_res.surface = NULL; tex_res.format = FMT_8_8; if (pPriv->id == FOURCC_UYVY) tex_res.dst_sel_x = SQ_SEL_Y; /* Y */ else tex_res.dst_sel_x = SQ_SEL_X; /* Y */ tex_res.dst_sel_y = SQ_SEL_1; tex_res.dst_sel_z = SQ_SEL_1; tex_res.dst_sel_w = SQ_SEL_1; tex_res.base_level = 0; tex_res.last_level = 0; tex_res.perf_modulation = 0; tex_res.interlaced = 0; if (accel_state->src_obj[0].tiling_flags == 0) tex_res.array_mode = 1; evergreen_set_tex_resource(pScrn, &tex_res, accel_state->src_obj[0].domain); /* Y sampler */ tex_samp.id = 0; tex_samp.clamp_x = SQ_TEX_CLAMP_LAST_TEXEL; tex_samp.clamp_y = SQ_TEX_CLAMP_LAST_TEXEL; tex_samp.clamp_z = SQ_TEX_WRAP; tex_samp.xy_mag_filter = SQ_TEX_XY_FILTER_BILINEAR; tex_samp.xy_min_filter = SQ_TEX_XY_FILTER_BILINEAR; tex_samp.z_filter = SQ_TEX_Z_FILTER_NONE; tex_samp.mip_filter = 0; /* no mipmap */ evergreen_set_tex_sampler(pScrn, &tex_samp); /* UV texture */ tex_res.id = 1; tex_res.format = FMT_8_8_8_8; tex_res.w = accel_state->src_obj[0].width >> 1; tex_res.h = accel_state->src_obj[0].height; tex_res.pitch = accel_state->src_obj[0].pitch >> 2; if (pPriv->id == FOURCC_UYVY) { tex_res.dst_sel_x = SQ_SEL_X; /* V */ tex_res.dst_sel_y = SQ_SEL_Z; /* U */ } else { tex_res.dst_sel_x = SQ_SEL_Y; /* V */ tex_res.dst_sel_y = SQ_SEL_W; /* U */ } tex_res.dst_sel_z = SQ_SEL_1; tex_res.dst_sel_w = SQ_SEL_1; tex_res.interlaced = 0; tex_res.base = accel_state->src_obj[0].offset; tex_res.mip_base = accel_state->src_obj[0].offset; tex_res.size = accel_state->src_size[0]; if (accel_state->src_obj[0].tiling_flags == 0) tex_res.array_mode = 1; evergreen_set_tex_resource(pScrn, &tex_res, accel_state->src_obj[0].domain); /* UV sampler */ tex_samp.id = 1; evergreen_set_tex_sampler(pScrn, &tex_samp); break; } cb_conf.id = 0; cb_conf.w = accel_state->dst_obj.pitch; cb_conf.h = accel_state->dst_obj.height; cb_conf.base = accel_state->dst_obj.offset; cb_conf.bo = accel_state->dst_obj.bo; cb_conf.surface = accel_state->dst_obj.surface; switch (accel_state->dst_obj.bpp) { case 16: if (pPixmap->drawable.depth == 15) { cb_conf.format = COLOR_1_5_5_5; cb_conf.comp_swap = 1; /* ARGB */ } else { cb_conf.format = COLOR_5_6_5; cb_conf.comp_swap = 2; /* RGB */ } #if X_BYTE_ORDER == X_BIG_ENDIAN cb_conf.endian = ENDIAN_8IN16; #endif break; case 32: cb_conf.format = COLOR_8_8_8_8; cb_conf.comp_swap = 1; /* ARGB */ #if X_BYTE_ORDER == X_BIG_ENDIAN cb_conf.endian = ENDIAN_8IN32; #endif break; default: return; } cb_conf.source_format = EXPORT_4C_16BPC; cb_conf.blend_clamp = 1; cb_conf.pmask = 0xf; cb_conf.rop = 3; if (accel_state->dst_obj.tiling_flags == 0) { cb_conf.array_mode = 1; cb_conf.non_disp_tiling = 1; } evergreen_set_render_target(pScrn, &cb_conf, accel_state->dst_obj.domain); evergreen_set_spi(pScrn, (1 - 1), 1); /* PS alu constants */ ps_const_conf.size_bytes = 256; ps_const_conf.type = SHADER_TYPE_PS; ps_alu_consts = radeon_vbo_space(pScrn, &accel_state->cbuf, 256); ps_const_conf.bo = accel_state->cbuf.vb_bo; ps_const_conf.const_addr = accel_state->cbuf.vb_mc_addr + accel_state->cbuf.vb_offset; ps_const_conf.cpu_ptr = (uint32_t *)(char *)ps_alu_consts; ps_alu_consts[0] = off[0]; ps_alu_consts[1] = off[1]; ps_alu_consts[2] = off[2]; ps_alu_consts[3] = yco; ps_alu_consts[4] = uco[0]; ps_alu_consts[5] = uco[1]; ps_alu_consts[6] = uco[2]; ps_alu_consts[7] = gamma; ps_alu_consts[8] = vco[0]; ps_alu_consts[9] = vco[1]; ps_alu_consts[10] = vco[2]; ps_alu_consts[11] = 0.0; radeon_vbo_commit(pScrn, &accel_state->cbuf); evergreen_set_alu_consts(pScrn, &ps_const_conf, RADEON_GEM_DOMAIN_GTT); /* VS alu constants */ vs_const_conf.size_bytes = 256; vs_const_conf.type = SHADER_TYPE_VS; vs_alu_consts = radeon_vbo_space(pScrn, &accel_state->cbuf, 256); vs_const_conf.bo = accel_state->cbuf.vb_bo; vs_const_conf.const_addr = accel_state->cbuf.vb_mc_addr + accel_state->cbuf.vb_offset; vs_const_conf.cpu_ptr = (uint32_t *)(char *)vs_alu_consts; vs_alu_consts[0] = 1.0 / pPriv->w; vs_alu_consts[1] = 1.0 / pPriv->h; vs_alu_consts[2] = 0.0; vs_alu_consts[3] = 0.0; radeon_vbo_commit(pScrn, &accel_state->cbuf); evergreen_set_alu_consts(pScrn, &vs_const_conf, RADEON_GEM_DOMAIN_GTT); if (pPriv->vsync) { xf86CrtcPtr crtc; if (pPriv->desired_crtc) crtc = pPriv->desired_crtc; else crtc = radeon_pick_best_crtc(pScrn, pPriv->drw_x, pPriv->drw_x + pPriv->dst_w, pPriv->drw_y, pPriv->drw_y + pPriv->dst_h); if (crtc) evergreen_cp_wait_vline_sync(pScrn, pPixmap, crtc, pPriv->drw_y - crtc->y, (pPriv->drw_y - crtc->y) + pPriv->dst_h); } while (nBox--) { int srcX, srcY, srcw, srch; int dstX, dstY, dstw, dsth; float *vb; dstX = pBox->x1 + dstxoff; dstY = pBox->y1 + dstyoff; dstw = pBox->x2 - pBox->x1; dsth = pBox->y2 - pBox->y1; srcX = pPriv->src_x; srcX += ((pBox->x1 - pPriv->drw_x) * pPriv->src_w) / pPriv->dst_w; srcY = pPriv->src_y; srcY += ((pBox->y1 - pPriv->drw_y) * pPriv->src_h) / pPriv->dst_h; srcw = (pPriv->src_w * dstw) / pPriv->dst_w; srch = (pPriv->src_h * dsth) / pPriv->dst_h; vb = radeon_vbo_space(pScrn, &accel_state->vbo, 16); vb[0] = (float)dstX; vb[1] = (float)dstY; vb[2] = (float)srcX; vb[3] = (float)srcY; vb[4] = (float)dstX; vb[5] = (float)(dstY + dsth); vb[6] = (float)srcX; vb[7] = (float)(srcY + srch); vb[8] = (float)(dstX + dstw); vb[9] = (float)(dstY + dsth); vb[10] = (float)(srcX + srcw); vb[11] = (float)(srcY + srch); radeon_vbo_commit(pScrn, &accel_state->vbo); pBox++; } evergreen_finish_op(pScrn, 16); DamageDamageRegion(pPriv->pDraw, &pPriv->clip); }
static void radeon_glamor_composite(CARD8 op, PicturePtr pSrc, PicturePtr pMask, PicturePtr pDst, INT16 xSrc, INT16 ySrc, INT16 xMask, INT16 yMask, INT16 xDst, INT16 yDst, CARD16 width, CARD16 height) { ScrnInfoPtr scrn = xf86ScreenToScrn(pDst->pDrawable->pScreen); RADEONInfoPtr info; PixmapPtr pixmap; struct radeon_pixmap *dst_priv, *src_priv = NULL, *mask_priv = NULL; Bool gpu_done = FALSE; if (pDst->alphaMap || pSrc->alphaMap || (pMask && pMask->alphaMap)) goto fallback; pixmap = get_drawable_pixmap(pDst->pDrawable); if (&pixmap->drawable != pDst->pDrawable || pixmap->usage_hint != RADEON_CREATE_PIXMAP_SCANOUT) goto fallback; dst_priv = radeon_get_pixmap_private(pixmap); if (!radeon_glamor_prepare_access_gpu(dst_priv)) goto fallback; info = RADEONPTR(scrn); if (!pSrc->pDrawable || ((pixmap = get_drawable_pixmap(pSrc->pDrawable)) && (src_priv = radeon_get_pixmap_private(pixmap)) && radeon_glamor_prepare_access_gpu(src_priv))) { if (!pMask || !pMask->pDrawable || ((pixmap = get_drawable_pixmap(pMask->pDrawable)) && (mask_priv = radeon_get_pixmap_private(pixmap)) && radeon_glamor_prepare_access_gpu(mask_priv))) { info->glamor.SavedComposite(op, pSrc, pMask, pDst, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height); gpu_done = TRUE; if (mask_priv) radeon_glamor_finish_access_gpu_ro(info, mask_priv); } if (src_priv) radeon_glamor_finish_access_gpu_ro(info, src_priv); } radeon_glamor_finish_access_gpu_rw(info, dst_priv); if (gpu_done) return; fallback: if (radeon_glamor_picture_prepare_access_cpu_rw(scrn, pDst)) { if (radeon_glamor_picture_prepare_access_cpu_ro(scrn, pSrc)) { if (!pMask || radeon_glamor_picture_prepare_access_cpu_ro(scrn, pMask)) { fbComposite(op, pSrc, pMask, pDst, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height); if (pMask) radeon_glamor_picture_finish_access_cpu(pMask); } radeon_glamor_picture_finish_access_cpu(pSrc); } radeon_glamor_picture_finish_access_cpu(pDst); } }
/* Allocates memory, either by resizing the allocation pointed to by mem_struct, * or by freeing mem_struct (if non-NULL) and allocating a new space. The size * is measured in bytes, and the offset from the beginning of card space is * returned. */ uint32_t radeon_legacy_allocate_memory(ScrnInfoPtr pScrn, void **mem_struct, int size, int align, int domain) { ScreenPtr pScreen = screenInfo.screens[pScrn->scrnIndex]; RADEONInfoPtr info = RADEONPTR(pScrn); uint32_t offset = 0; #ifdef XF86DRM_MODE if (info->cs) { struct radeon_bo *video_bo; if (*mem_struct) radeon_legacy_free_memory(pScrn, *mem_struct); video_bo = radeon_bo_open(info->bufmgr, 0, size, align, domain, 0); *mem_struct = video_bo; if (!video_bo) return 0; return (uint32_t)-1; } #endif #ifdef USE_EXA if (info->useEXA) { ExaOffscreenArea *area = *mem_struct; if (area != NULL) { if (area->size >= size) return area->offset; exaOffscreenFree(pScreen, area); } area = exaOffscreenAlloc(pScreen, size, align, TRUE, NULL, NULL); *mem_struct = area; if (area == NULL) return 0; offset = area->offset; } #endif /* USE_EXA */ #ifdef USE_XAA if (!info->useEXA) { FBLinearPtr linear = *mem_struct; int cpp = info->CurrentLayout.bitsPerPixel / 8; /* XAA allocates in units of pixels at the screen bpp, so adjust size * appropriately. */ size = (size + cpp - 1) / cpp; align = (align + cpp - 1) / cpp; if (linear) { if(linear->size >= size) return linear->offset * cpp; if(xf86ResizeOffscreenLinear(linear, size)) return linear->offset * cpp; xf86FreeOffscreenLinear(linear); } linear = xf86AllocateOffscreenLinear(pScreen, size, align, NULL, NULL, NULL); *mem_struct = linear; if (!linear) { int max_size; xf86QueryLargestOffscreenLinear(pScreen, &max_size, align, PRIORITY_EXTREME); if (max_size < size) return 0; xf86PurgeUnlockedOffscreenAreas(pScreen); linear = xf86AllocateOffscreenLinear(pScreen, size, align, NULL, NULL, NULL); *mem_struct = linear; if (!linear) return 0; } offset = linear->offset * cpp; } #endif /* USE_XAA */ return offset; }
/* Calculate appropriate tiling and pitch for a pixmap and allocate a BO that * can hold it. */ struct radeon_bo* radeon_alloc_pixmap_bo(ScrnInfoPtr pScrn, int width, int height, int depth, int usage_hint, int bitsPerPixel, int *new_pitch, struct radeon_surface *new_surface, uint32_t *new_tiling) { RADEONInfoPtr info = RADEONPTR(pScrn); int pitch, base_align; uint32_t size, heighta; int cpp = bitsPerPixel / 8; uint32_t tiling = 0; struct radeon_surface surface; struct radeon_bo *bo; int domain = RADEON_GEM_DOMAIN_VRAM; if (usage_hint) { if (info->allowColorTiling) { if (usage_hint & RADEON_CREATE_PIXMAP_TILING_MACRO) tiling |= RADEON_TILING_MACRO; if (usage_hint & RADEON_CREATE_PIXMAP_TILING_MICRO) tiling |= RADEON_TILING_MICRO; } if (usage_hint & RADEON_CREATE_PIXMAP_DEPTH) tiling |= RADEON_TILING_MACRO | RADEON_TILING_MICRO; #ifdef CREATE_PIXMAP_USAGE_SHARED if ((usage_hint & 0xffff) == CREATE_PIXMAP_USAGE_SHARED) { tiling = 0; domain = RADEON_GEM_DOMAIN_GTT; } #endif } /* Small pixmaps must not be macrotiled on R300, hw cannot sample them * correctly because samplers automatically switch to macrolinear. */ if (info->ChipFamily >= CHIP_FAMILY_R300 && info->ChipFamily <= CHIP_FAMILY_RS740 && (tiling & RADEON_TILING_MACRO) && !RADEONMacroSwitch(width, height, bitsPerPixel, tiling, info->ChipFamily >= CHIP_FAMILY_RV350)) { tiling &= ~RADEON_TILING_MACRO; } heighta = RADEON_ALIGN(height, drmmode_get_height_align(pScrn, tiling)); pitch = RADEON_ALIGN(width, drmmode_get_pitch_align(pScrn, cpp, tiling)) * cpp; base_align = drmmode_get_base_align(pScrn, cpp, tiling); size = RADEON_ALIGN(heighta * pitch, RADEON_GPU_PAGE_SIZE); memset(&surface, 0, sizeof(struct radeon_surface)); if (info->ChipFamily >= CHIP_FAMILY_R600 && info->surf_man) { if (width) { surface.npix_x = width; /* need to align height to 8 for old kernel */ surface.npix_y = RADEON_ALIGN(height, 8); surface.npix_z = 1; surface.blk_w = 1; surface.blk_h = 1; surface.blk_d = 1; surface.array_size = 1; surface.last_level = 0; surface.bpe = cpp; surface.nsamples = 1; if (height < 128) { /* disable 2d tiling for small surface to work around * the fact that ddx align height to 8 pixel for old * obscure reason i can't remember */ tiling &= ~RADEON_TILING_MACRO; } surface.flags = RADEON_SURF_SCANOUT; /* we are requiring a recent enough libdrm version */ surface.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX; surface.flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE); surface.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE); if ((tiling & RADEON_TILING_MICRO)) { surface.flags = RADEON_SURF_CLR(surface.flags, MODE); surface.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE); } if ((tiling & RADEON_TILING_MACRO)) { surface.flags = RADEON_SURF_CLR(surface.flags, MODE); surface.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE); } if (usage_hint & RADEON_CREATE_PIXMAP_SZBUFFER) { surface.flags |= RADEON_SURF_ZBUFFER; surface.flags |= RADEON_SURF_SBUFFER; } if (radeon_surface_best(info->surf_man, &surface)) { return NULL; } if (radeon_surface_init(info->surf_man, &surface)) { return NULL; } size = surface.bo_size; base_align = surface.bo_alignment; pitch = surface.level[0].pitch_bytes; tiling = 0; switch (surface.level[0].mode) { case RADEON_SURF_MODE_2D: tiling |= RADEON_TILING_MACRO; tiling |= surface.bankw << RADEON_TILING_EG_BANKW_SHIFT; tiling |= surface.bankh << RADEON_TILING_EG_BANKH_SHIFT; tiling |= surface.mtilea << RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT; tiling |= eg_tile_split(surface.tile_split) << RADEON_TILING_EG_TILE_SPLIT_SHIFT; tiling |= eg_tile_split(surface.stencil_tile_split) << RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT; break; case RADEON_SURF_MODE_1D: tiling |= RADEON_TILING_MICRO; break; default: break; } } } bo = radeon_bo_open(info->bufmgr, 0, size, base_align, domain, 0); if (bo && tiling && radeon_bo_set_tiling(bo, tiling, pitch) == 0) *new_tiling = tiling; *new_surface = surface; *new_pitch = pitch; return bo; }