void kgsl_ringbuffer_stop(struct kgsl_ringbuffer *rb) { KGSL_CMD_VDBG("enter (rb=%p)\n", rb); if (rb->flags & KGSL_FLAGS_STARTED) { KGSL_CMD_DBG("disabling CP interrupts: mask %08x\n", 0); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); /* ME_HALT */ kgsl_yamato_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000); rb->flags &= ~KGSL_FLAGS_STARTED; } KGSL_CMD_VDBG("return %d\n", 0); }
/* functions */ void kgsl_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct kgsl_yamato_device *yamato_device = KGSL_YAMATO_DEVICE(device); struct kgsl_ringbuffer *rb = &yamato_device->ringbuffer; KGSL_CMD_VDBG("enter (device=%p)\n", device); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { kgsl_yamato_regread(device, REG_CP_INT_STATUS, &status); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN("Looped %d times to read REG_CP_INT_STATUS\n", num_reads); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { /* This indicates that we could not read CP_INT_STAT. * As a precaution just wake up processes so * they can check their timestamps. Since, we * did not ack any interrupts this interrupt will * be generated again */ KGSL_DRV_WARN("Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); } else KGSL_DRV_WARN("Spurious interrput detected\n"); return; } if (status & CP_INT_CNTL__RB_INT_MASK) { /* signal intr completion event */ unsigned int enableflag = 0; kgsl_sharedmem_writel(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), enableflag); wmb(); KGSL_CMD_WARN("ringbuffer rb interrupt\n"); } if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) { KGSL_CMD_FATAL("ringbuffer TO packet in IB interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer opcode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer protected mode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer reserved bit error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__IB_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer IB error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__SW_INT_MASK) KGSL_CMD_DBG("ringbuffer software interrupt\n"); if (status & CP_INT_CNTL__IB2_INT_MASK) KGSL_CMD_DBG("ringbuffer ib2 interrupt\n"); if (status & (~GSL_CP_INT_MASK)) KGSL_CMD_DBG("bad bits in REG_CP_INT_STATUS %08x\n", status); /* only ack bits we understand */ status &= GSL_CP_INT_MASK; kgsl_yamato_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN("ringbuffer ib1/rb interrupt\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); atomic_notifier_call_chain(&(device->ts_notifier_list), KGSL_DEVICE_YAMATO, NULL); } KGSL_CMD_VDBG("return\n"); }
int kgsl_ringbuffer_start(struct kgsl_ringbuffer *rb, unsigned int init_ram) { int status; /*cp_rb_cntl_u cp_rb_cntl; */ union reg_cp_rb_cntl cp_rb_cntl; unsigned int *cmds, rb_cntl; struct kgsl_device *device = rb->device; uint cmds_gpu; KGSL_CMD_VDBG("enter (rb=%p)\n", rb); if (rb->flags & KGSL_FLAGS_STARTED) { KGSL_CMD_VDBG("already started return %d\n", 0); return 0; } if (init_ram) { rb->timestamp = 0; GSL_RB_INIT_TIMESTAMP(rb); } kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0, sizeof(struct kgsl_rbmemptrs)); kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA, (rb->sizedwords << 2)); kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_BASE, (rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET)); /* setup WPTR delay */ kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */); /*setup REG_CP_RB_CNTL */ kgsl_yamato_regread(device, REG_CP_RB_CNTL, &rb_cntl); cp_rb_cntl.val = rb_cntl; /* size of ringbuffer */ cp_rb_cntl.f.rb_bufsz = kgsl_ringbuffer_sizelog2quadwords(rb->sizedwords); /* quadwords to read before updating mem RPTR */ cp_rb_cntl.f.rb_blksz = rb->blksizequadwords; cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */ /* mem RPTR writebacks */ cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE; kgsl_yamato_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val); kgsl_yamato_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr); kgsl_yamato_regwrite(device, REG_CP_RB_RPTR_ADDR, rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_RPTR_OFFSET); /* explicitly clear all cp interrupts */ kgsl_yamato_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF); /* setup scratch/timestamp */ kgsl_yamato_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp)); kgsl_yamato_regwrite(device, REG_SCRATCH_UMSK, GSL_RB_MEMPTRS_SCRATCH_MASK); /* load the CP ucode */ status = kgsl_ringbuffer_load_pm4_ucode(device); if (status != 0) { KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n", status); return status; } /* load the prefetch parser ucode */ status = kgsl_ringbuffer_load_pfp_ucode(device); if (status != 0) { KGSL_DRV_ERR("kgsl_ringbuffer_load_pfp_ucode failed %d\n", status); return status; } kgsl_yamato_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804); rb->rptr = 0; rb->wptr = 0; /* clear ME_HALT to start micro engine */ kgsl_yamato_regwrite(device, REG_CP_ME_CNTL, 0); /* ME_INIT */ cmds = kgsl_ringbuffer_allocspace(rb, 19); cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19); GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT); /* All fields present (bits 9:0) */ GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff); /* Disable/Enable Real-Time Stream processing (present but ignored) */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL)); GSL_RB_WRITE(cmds, cmds_gpu, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE)); /* Vertex and Pixel Shader Start Addresses in instructions * (3 DWORDS per instruction) */ GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180); /* Maximum Contexts */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001); /* Write Confirm Interval and The CP will wait the * wait_interval * 16 clocks between polling */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); /* NQ and External Memory Swap */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); /* Protected mode error checking */ GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL); /* Disable header dumping and Header dump address */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); /* Header dump size */ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); kgsl_ringbuffer_submit(rb); /* idle device to validate ME INIT */ status = kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT); KGSL_CMD_DBG("enabling CP interrupts: mask %08lx\n", GSL_CP_INT_MASK); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK); if (status == 0) rb->flags |= KGSL_FLAGS_STARTED; KGSL_CMD_VDBG("return %d\n", status); return status; }
void kgsl_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct kgsl_yamato_device *yamato_device = (struct kgsl_yamato_device *) device; struct kgsl_ringbuffer *rb = &device->ringbuffer; KGSL_CMD_VDBG("enter (device=%p)\n", device); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { kgsl_yamato_regread(device, REG_CP_INT_STATUS, &status); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN("Looped %d times to read REG_CP_INT_STATUS\n", num_reads); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { KGSL_DRV_WARN("Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); } else KGSL_DRV_WARN("Spurious interrput detected\n"); return; } if (status & CP_INT_CNTL__RB_INT_MASK) { unsigned int enableflag = 0; kgsl_sharedmem_writel(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), enableflag); wmb(); KGSL_CMD_WARN("ringbuffer rb interrupt\n"); } if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) { KGSL_CMD_FATAL("ringbuffer TO packet in IB interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer opcode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer protected mode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer reserved bit error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__IB_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer IB error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__SW_INT_MASK) KGSL_CMD_DBG("ringbuffer software interrupt\n"); if (status & CP_INT_CNTL__IB2_INT_MASK) KGSL_CMD_DBG("ringbuffer ib2 interrupt\n"); if (status & (~GSL_CP_INT_MASK)) KGSL_CMD_DBG("bad bits in REG_CP_INT_STATUS %08x\n", status); status &= GSL_CP_INT_MASK; kgsl_yamato_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN("ringbuffer ib1/rb interrupt\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); atomic_notifier_call_chain(&(device->ts_notifier_list), KGSL_DEVICE_YAMATO, NULL); } KGSL_CMD_VDBG("return\n"); }
static int kgsl_ringbuffer_start(struct kgsl_ringbuffer *rb) { int status; union reg_cp_rb_cntl cp_rb_cntl; unsigned int *cmds, rb_cntl; struct kgsl_device *device = rb->device; KGSL_CMD_VDBG("enter (rb=%p)\n", rb); if (rb->flags & KGSL_FLAGS_STARTED) { KGSL_CMD_VDBG("return %d\n", 0); return 0; } kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0, sizeof(struct kgsl_rbmemptrs)); kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA, (rb->sizedwords << 2)); kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_BASE, (rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET)); kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 ); kgsl_yamato_regread(device, REG_CP_RB_CNTL, &rb_cntl); cp_rb_cntl.val = rb_cntl; cp_rb_cntl.f.rb_bufsz = kgsl_ringbuffer_sizelog2quadwords(rb->sizedwords); cp_rb_cntl.f.rb_blksz = rb->blksizequadwords; cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE; kgsl_yamato_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val); kgsl_yamato_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr); kgsl_yamato_regwrite(device, REG_CP_RB_RPTR_ADDR, rb->memptrs_desc.gpuaddr + GSL_RB_MEMPTRS_RPTR_OFFSET); kgsl_yamato_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF); kgsl_yamato_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp)); kgsl_yamato_regwrite(device, REG_SCRATCH_UMSK, GSL_RB_MEMPTRS_SCRATCH_MASK); status = kgsl_ringbuffer_load_pm4_ucode(device); if (status != 0) { KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n", status); return status; } status = kgsl_ringbuffer_load_pfp_ucode(device); if (status != 0) { KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n", status); return status; } kgsl_yamato_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804); rb->rptr = 0; rb->wptr = 0; rb->timestamp = 0; GSL_RB_INIT_TIMESTAMP(rb); INIT_LIST_HEAD(&rb->memqueue); kgsl_yamato_regwrite(device, REG_CP_ME_CNTL, 0); cmds = kgsl_ringbuffer_allocspace(rb, 19); GSL_RB_WRITE(cmds, PM4_HDR_ME_INIT); GSL_RB_WRITE(cmds, 0x000003ff); GSL_RB_WRITE(cmds, 0x00000000); GSL_RB_WRITE(cmds, 0x00000000); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL)); GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE)); GSL_RB_WRITE(cmds, 0x80000180); GSL_RB_WRITE(cmds, 0x00000001); GSL_RB_WRITE(cmds, 0x00000000); GSL_RB_WRITE(cmds, 0x00000000); GSL_RB_WRITE(cmds, GSL_RB_PROTECTED_MODE_CONTROL); GSL_RB_WRITE(cmds, 0x00000000); GSL_RB_WRITE(cmds, 0x00000000); kgsl_ringbuffer_submit(rb); status = kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT); KGSL_CMD_DBG("enabling CP interrupts: mask %08lx\n", GSL_CP_INT_MASK); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK); if (status == 0) rb->flags |= KGSL_FLAGS_STARTED; KGSL_CMD_VDBG("return %d\n", status); return status; }
/* functions */ void kgsl_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0; struct kgsl_ringbuffer *rb = &device->ringbuffer; KGSL_CMD_VDBG("enter (device=%p)\n", device); kgsl_yamato_regread(device, REG_CP_INT_STATUS, &status); if (status & CP_INT_CNTL__RB_INT_MASK) { /* signal intr completion event */ int init_reftimestamp = 0x7fffffff; int enableflag = 0; kgsl_sharedmem_write(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), &enableflag, 4); kgsl_sharedmem_write(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), &init_reftimestamp, 4); KGSL_CMD_WARN("ringbuffer rb interrupt\n"); } if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN("ringbuffer ib1/rb interrupt\n"); wake_up_interruptible_all(&device->ib1_wq); } if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) { KGSL_CMD_FATAL("ringbuffer TO packet in IB interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer opcode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer protected mode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer reserved bit error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__IB_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer IB error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__SW_INT_MASK) KGSL_CMD_DBG("ringbuffer software interrupt\n"); if (status & CP_INT_CNTL__IB2_INT_MASK) KGSL_CMD_DBG("ringbuffer ib2 interrupt\n"); if (status & (~GSL_CP_INT_MASK)) KGSL_CMD_DBG("bad bits in REG_CP_INT_STATUS %08x\n", status); /* only ack bits we understand */ status &= GSL_CP_INT_MASK; kgsl_yamato_regwrite(device, REG_CP_INT_ACK, status); KGSL_CMD_VDBG("return\n"); }