void adreno_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; int ret; gpu->funcs->pm_suspend(gpu); /* reset ringbuffer: */ gpu->rb->cur = gpu->rb->start; /* reset completed fence seqno: */ adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; adreno_gpu->memptrs->rptr = 0; adreno_gpu->memptrs->wptr = 0; gpu->funcs->pm_resume(gpu); disable_irq(gpu->irq); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); /* hmm, oh well? */ } enable_irq(gpu->irq); }
int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; DBG("%s", gpu->name); ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); return ret; } /* Setup REG_CP_RB_CNTL: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, /* size is log2(quad-words): */ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); /* Setup ringbuffer address: */ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova); if (!adreno_is_a430(adreno_gpu)) { adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, rptr)); } return 0; }
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); switch (param) { case MSM_PARAM_GPU_ID: *value = adreno_gpu->info->revn; return 0; case MSM_PARAM_GMEM_SIZE: *value = adreno_gpu->gmem; return 0; case MSM_PARAM_GMEM_BASE: *value = 0x100000; return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | (adreno_gpu->rev.minor << 8) | (adreno_gpu->rev.major << 16) | (adreno_gpu->rev.core << 24); return 0; case MSM_PARAM_MAX_FREQ: *value = adreno_gpu->base.fast_rate; return 0; case MSM_PARAM_TIMESTAMP: if (adreno_gpu->funcs->get_timestamp) return adreno_gpu->funcs->get_timestamp(gpu, value); return -EINVAL; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; } }
void adreno_show(struct msm_gpu *gpu, struct seq_file *m) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int i; seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->fctx->last_fence); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); /* dump these out in a form that can be parsed by demsm: */ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { uint32_t start = adreno_gpu->registers[i]; uint32_t end = adreno_gpu->registers[i+1]; uint32_t addr; for (addr = start; addr <= end; addr++) { uint32_t val = gpu_read(gpu, addr); seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); } } }
static uint32_t ring_freewords(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t size = gpu->rb->size / 4; uint32_t wptr = get_wptr(gpu->rb); uint32_t rptr = get_rptr(adreno_gpu); return (rptr + (size - 1) - wptr) % size; }
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; return gmu->freq; }
void adreno_flush(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr = get_wptr(gpu->rb); /* ensure writes to ringbuffer have hit system memory: */ mb(); adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); }
bool adreno_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr = get_wptr(gpu->rb); /* wait for CP to drain ringbuffer: */ if (!spin_until(get_rptr(adreno_gpu) == wptr)) return true; /* TODO maybe we need to reset GPU here to recover from hang? */ DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); return false; }
/* Dump common gpu status and scratch registers on any hang, to make * the hangcheck logs more useful. The scratch registers seem always * safe to read when GPU has hung (unlike some other regs, depending * on how the GPU hung), and they are useful to match up to cmdstream * dumps when debugging hangs: */ void adreno_dump_info(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); printk("revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid); printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, gpu->fctx->last_fence); printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("rb wptr: %d\n", get_wptr(gpu->rb)); }
static void a4xx_destroy(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu); DBG("%s", gpu->name); adreno_gpu_cleanup(adreno_gpu); #ifdef CONFIG_MSM_OCMEM if (a4xx_gpu->ocmem_base) ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl); #endif kfree(a4xx_gpu); }
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 perf_index = 0; if (freq == gmu->freq) return; for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) if (freq == gmu->gpu_freqs[perf_index]) break; __a6xx_gmu_set_freq(gmu, perf_index); }
void adreno_flush(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr; /* * Mask wptr value that we calculate to fit in the HW range. This is * to account for the possibility that the last command fit exactly into * the ringbuffer and rb->next hasn't wrapped to zero yet */ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); /* ensure writes to ringbuffer have hit system memory: */ mb(); adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); }
/* would be nice to not have to duplicate the _show() stuff with printk(): */ void adreno_dump(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int i; /* dump these out in a form that can be parsed by demsm: */ printk("IO:region %s 00000000 00020000\n", gpu->name); for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { uint32_t start = adreno_gpu->registers[i]; uint32_t end = adreno_gpu->registers[i+1]; uint32_t addr; for (addr = start; addr <= end; addr++) { uint32_t val = gpu_read(gpu, addr); printk("IO:R %08x %08x\n", addr<<2, val); } } }
/* * a4xx_enable_hwcg() - Program the clock control registers * @device: The adreno device pointer */ static void a4xx_enable_hwcg(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); unsigned int i; for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112); for (i = 0; i < 4; i++) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222); /* Disable L1 clocking in A420 due to CCU issues with it */ for (i = 0; i < 4; i++) { if (adreno_is_a420(adreno_gpu)) { gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i), 0x00002020); } else { gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i), 0x00022020); } } for (i = 0; i < 4; i++) { gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i), 0x00000922); } for (i = 0; i < 4; i++) { gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i), 0x00000000); } for (i = 0; i < 4; i++) { gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i), 0x00000001); } gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000); /* Early A430's have a timing issue with SP/TP power collapse; disabling HW clock gating prevents it. */ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2) gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0); else gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA); gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0); }
static int a3xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); uint32_t *ptr, len; int i, ret; DBG("%s", gpu->name); if (adreno_is_a305(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); } else if (adreno_is_a320(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); /* Enable 1K sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); } else if (adreno_is_a330v2(adreno_gpu)) { /* * Most of the VBIF registers on 8974v2 have the correct * values at power on, so we won't modify those if we don't * need to */ /* Enable 1k sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); } else if (adreno_is_a330(adreno_gpu)) { /* Set up 16 deep read/write request queues: */ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818); /* Enable WR-REQ: */ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); /* Set up round robin arbitration between both AXI ports: */ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); /* Set up AOOO: */ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); /* Enable 1K sort: */ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); /* Disable VBIF clock gating. This is to enable AXI running * higher frequency than GPU: */ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001); } else { BUG(); } /* Make all blocks contribute to the GPU BUSY perf counter: */ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); /* Tune the hystersis counters for SP and CP idle detection: */ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10); gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); /* Enable the RBBM error reporting bits. This lets us get * useful information on failure: */ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001); /* Enable AHB error reporting: */ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff); /* Turn on the power counters: */ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000); /* Turn on hang detection - this spews a lot of useful information * into the RBBM registers on a hang: */ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff); /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); /* Enable Clock gating: */ if (adreno_is_a320(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); else if (adreno_is_a330v2(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); else if (adreno_is_a330(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); if (adreno_is_a330v2(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); else if (adreno_is_a330(adreno_gpu)) gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); /* Set the OCMEM base address for A330, etc */ if (a3xx_gpu->ocmem_hdl) { gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, (unsigned int)(a3xx_gpu->ocmem_base >> 14)); }
static int a4xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu); uint32_t *ptr, len; int i, ret; if (adreno_is_a420(adreno_gpu)) { gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F); gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4); gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018); gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); } else if (adreno_is_a430(adreno_gpu)) { gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018); gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); } else { BUG(); } /* Make all blocks contribute to the GPU BUSY perf counter */ gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); /* Tune the hystersis counters for SP and CP idle detection */ gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10); gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); if (adreno_is_a430(adreno_gpu)) { gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30); } /* Enable the RBBM error reporting bits */ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001); /* Enable AHB error reporting*/ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff); /* Enable power counters*/ gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030); /* * Turn on hang detection - this spews a lot of useful information * into the RBBM registers on a hang: */ gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL, (1 << 30) | 0xFFFF); gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR, (unsigned int)(a4xx_gpu->ocmem_base >> 14)); /* Turn on performance counters: */ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); /* use the first CP counter for timestamp queries.. userspace may set * this as well but it selects the same counter/countable: */ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT); if (adreno_is_a430(adreno_gpu)) gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07); /* Disable L2 bypass to avoid UCHE out of bounds errors */ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000); gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000); gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) | (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0)); /* On A430 enable SP regfile sleep for power savings */ /* TODO downstream does this for !420, so maybe applies for 405 too? */ if (!adreno_is_a420(adreno_gpu)) { gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0, 0x00000441); gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1, 0x00000441); } a4xx_enable_hwcg(gpu); /* * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2 * due to timing issue with HLSQ_TP_CLK_EN */ if (adreno_is_a420(adreno_gpu)) { unsigned int val; val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ); val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK; val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT; gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val); } /* setup access protection: */ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007); /* RBBM registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010); gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020); gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040); gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080); gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100); gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200); /* CP registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800); gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600); /* RB registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300); /* HLSQ registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800); /* VPC registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980); /* SMMU registers */ gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000); gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK); ret = adreno_hw_init(gpu); if (ret) return ret; /* Load PM4: */ ptr = (uint32_t *)(adreno_gpu->pm4->data); len = adreno_gpu->pm4->size / 4; DBG("loading PM4 ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ ptr = (uint32_t *)(adreno_gpu->pfp->data); len = adreno_gpu->pfp->size / 4; DBG("loading PFP ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]); /* clear ME_HALT to start micro engine */ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0); return a4xx_me_init(gpu) ? 0 : -EINVAL; }
uint32_t adreno_last_fence(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); return adreno_gpu->memptrs->fence; }
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = gpu->rb; unsigned i; for (i = 0; i < submit->nr_cmds; i++) { switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: /* ignore IB-targets */ break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); OUT_RING(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].size); OUT_PKT2(ring); break; } } OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); OUT_RING(ring, submit->fence->seqno); if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { /* Flush HLSQ lazy updates to make sure there is nothing * pending for indirect loads after the timestamp has * passed: */ OUT_PKT3(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, HLSQ_FLUSH); OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); OUT_RING(ring, 0x00000000); } OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, fence)); OUT_RING(ring, submit->fence->seqno); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ OUT_PKT3(ring, CP_INTERRUPT, 1); OUT_RING(ring, 0x80000000); /* Workaround for missing irq issue on 8x16/a306. Unsure if the * root cause is a platform issue or some a306 quirk, but this * keeps things humming along: */ if (adreno_is_a306(adreno_gpu)) { OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); OUT_RING(ring, 0x00000000); OUT_PKT3(ring, CP_INTERRUPT, 1); OUT_RING(ring, 0x80000000); } #if 0 if (adreno_is_a3xx(adreno_gpu)) { /* Dummy set-constant to trigger context rollover */ OUT_PKT3(ring, CP_SET_CONSTANT, 2); OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); OUT_RING(ring, 0x00000000); } #endif gpu->funcs->flush(gpu); }