static void nouveau_fifo_irq_handler(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->Engine; uint32_t status, reassign; reassign = NV_READ(NV03_PFIFO_CACHES) & 1; while ((status = NV_READ(NV03_PFIFO_INTR_0))) { uint32_t chid, get; NV_WRITE(NV03_PFIFO_CACHES, 0); chid = engine->fifo.channel_id(dev); get = NV_READ(NV03_PFIFO_CACHE1_GET); if (status & NV_PFIFO_INTR_CACHE_ERROR) { uint32_t mthd, data; int ptr; ptr = get >> 2; if (dev_priv->card_type < NV_40) { mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr)); } else { mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr)); data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr)); } DRM_INFO("PFIFO_CACHE_ERROR - " "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1); status &= ~NV_PFIFO_INTR_CACHE_ERROR; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); } if (status & NV_PFIFO_INTR_DMA_PUSHER) { DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid); status &= ~NV_PFIFO_INTR_DMA_PUSHER; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get) NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4); } if (status) { DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status); NV_WRITE(NV03_PFIFO_INTR_0, status); NV_WRITE(NV03_PMC_INTR_EN_0, 0); } NV_WRITE(NV03_PFIFO_CACHES, reassign); }
int nv10_fifo_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc))) return ret; /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ RAMFC_WR(DMA_PUT , chan->pushbuf_base); RAMFC_WR(DMA_GET , chan->pushbuf_base); RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | #ifdef __BIG_ENDIAN NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0); /* enable the fifo dma operation */ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id)); return 0; }
static void nv20_graph_rdi(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int i, writecount = 32; uint32_t rdi_index = 0x2c80000; if (dev_priv->chipset == 0x20) { rdi_index = 0x3d0000; writecount = 15; } NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index); for (i = 0; i < writecount; i++) NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); nouveau_wait_for_idle(dev); }
void nouveau_irq_uninstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* Master disable */ NV_WRITE(NV03_PMC_INTR_EN_0, 0); }
int nv20_graph_save_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t inst; if (!chan->ramin_grctx) return -EINVAL; inst = chan->ramin_grctx->instance >> 4; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); nouveau_wait_for_idle(dev); return 0; }
int nv50_mc_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); return 0; }
int nouveau_irq_postinstall(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* Master enable */ NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); return 0; }
void nv10_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id)); nouveau_gpuobj_ref_del(dev, &chan->ramfc); }
int nv40_fifo_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; if ((ret = nouveau_fifo_init(dev))) return ret; NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); return 0; }
int nv04_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows * nvidia reading PFB_CFG_0, then writing back its original value. * (which was 0x701114 in this case) */ NV_WRITE(NV04_PFB_CFG0, 0x1114); return 0; }
/* first module load, setup the mmio/fb mapping */ int nouveau_firstopen(struct drm_device *dev) { #if defined(__powerpc__) struct drm_nouveau_private *dev_priv = dev->dev_private; struct device_node *dn; #endif int ret; /* Map any PCI resources we need on the card */ ret = nouveau_init_card_mappings(dev); if (ret) return ret; #if defined(__powerpc__) /* Put the card in BE mode if it's not */ if (NV_READ(NV03_PMC_BOOT_1)) NV_WRITE(NV03_PMC_BOOT_1,0x00000001); DRM_MEMORYBARRIER(); #endif #if defined(__linux__) && defined(__powerpc__) /* if we have an OF card, copy vbios to RAMIN */ dn = pci_device_to_OF_node(dev->pdev); if (dn) { int size; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); #else const uint32_t *bios = get_property(dn, "NVDA,BMP", &size); #endif if (bios) { int i; for(i=0;i<size;i+=4) NV_WI32(i, bios[i/4]); DRM_INFO("OF bios successfully copied (%d bytes)\n",size); } else DRM_INFO("Unable to get the OF bios\n"); } else DRM_INFO("Unable to get the OF node\n"); #endif return 0; }
int nv10_fifo_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); tmp = RAMFC_RD(DMA_INSTANCE); NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); if (dev_priv->chipset >= 0x17) { NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, RAMFC_RD(ACQUIRE_VALUE)); NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, RAMFC_RD(ACQUIRE_TIMEOUT)); NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, RAMFC_RD(SEMAPHORE)); NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, RAMFC_RD(DMA_SUBROUTINE)); } /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); return 0; }
int nv40_fifo_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp, tmp2; NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); /* No idea what 0x2058 is.. */ tmp = RAMFC_RD(DMA_FETCH); tmp2 = NV_READ(0x2058) & 0xFFF; tmp2 |= (tmp & 0x30000000); NV_WRITE(0x2058, tmp2); tmp &= ~0x30000000; NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); /* NVIDIA does this next line twice... */ NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); NV_WRITE(0x3300, RAMFC_RD(UNK_50)); /* not sure what part is PUT, and which is GET.. never seen a non-zero * value appear in a mmio-trace yet.. */ #if 0 tmp = NV_READ(UNK_84); NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???); NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???); #endif /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); /* Set channel active, and in DMA mode */ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); /* Reset DMA_CTL_AT_INFO to INVALID */ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); return 0; }
int nv30_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; // uint32_t vramsz, tmp; int ret, i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) return ret; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); NV_WRITE(0x400890, 0x01b463ff); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475); NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); NV_WRITE(0x400B80, 0x1003d888); NV_WRITE(0x400B84, 0x0c000000); NV_WRITE(0x400098, 0x00000000); NV_WRITE(0x40009C, 0x0005ad00); NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2 NV_WRITE(0x4000a0, 0x00000000); NV_WRITE(0x4000a4, 0x00000008); NV_WRITE(0x4008a8, 0xb784a400); NV_WRITE(0x400ba0, 0x002f8685); NV_WRITE(0x400ba4, 0x00231f3f); NV_WRITE(0x4008a4, 0x40000020); if (dev_priv->chipset == 0x34) { NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002); } NV_WRITE(0x4000c0, 0x00000016); /* copy tile info from PFB */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); /* which is NV40_PGRAPH_TSIZE0(i) ?? */ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); /* which is NV40_PGRAPH_TILE0(i) ?? */ } NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(0x0040075c , 0x00000001); NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); /* begin RAM config */ // vramsz = drm_get_resource_len(dev, 0) - 1; NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); if (dev_priv->chipset != 0x34) { NV_WRITE(0x400750, 0x00EA0000); NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0)); NV_WRITE(0x400750, 0x00EA0004); NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1)); } #if 0 NV_WRITE(0x400820, 0); NV_WRITE(0x400824, 0); NV_WRITE(0x400864, vramsz-1); NV_WRITE(0x400868, vramsz-1); NV_WRITE(0x400B20, 0x00000000); NV_WRITE(0x400B04, 0xFFFFFFFF); /* per-context state, doesn't belong here */ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); #endif return 0; }
int nv20_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = (struct drm_nouveau_private *)dev->dev_private; uint32_t tmp, vramsz; int ret, i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) return ret; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); nv20_graph_rdi(dev); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); NV_WRITE(0x40009C , 0x00000040); if (dev_priv->chipset >= 0x25) { NV_WRITE(0x400890, 0x00080000); NV_WRITE(0x400610, 0x304B1FB6); NV_WRITE(0x400B80, 0x18B82880); NV_WRITE(0x400B84, 0x44000000); NV_WRITE(0x400098, 0x40000080); NV_WRITE(0x400B88, 0x000000ff); } else { NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */ NV_WRITE(0x400094, 0x00000005); NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */ NV_WRITE(0x400B84, 0x24000000); NV_WRITE(0x400098, 0x00000040); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); } /* copy tile info from PFB */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); /* which is NV40_PGRAPH_TSIZE0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); /* which is NV40_PGRAPH_TILE0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); } for (i = 0; i < 8; i++) { NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4)); } NV_WRITE(0x4009a0, NV_READ(0x100324)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ vramsz = drm_get_resource_len(dev, 0) - 1; NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1)); NV_WRITE(0x400820, 0); NV_WRITE(0x400824, 0); NV_WRITE(0x400864, vramsz-1); NV_WRITE(0x400868, vramsz-1); /* interesting.. the below overwrites some of the tile setup above.. */ NV_WRITE(0x400B20, 0x00000000); NV_WRITE(0x400B04, 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); return 0; }