static void a2xx_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; int i; adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { adreno_regread(device, REG_CP_INT_STATUS, &status); adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN(device, "Looped %d times to read REG_CP_INT_STATUS\n", num_reads); trace_kgsl_a2xx_irq_status(device, master_status, status); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&device->wait_queue); } else KGSL_DRV_WARN(device, "Spurious interrput detected\n"); return; } for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) { if (status & kgsl_cp_error_irqs[i].mask) { KGSL_CMD_CRIT(rb->device, "%s\n", kgsl_cp_error_irqs[i].message); kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF); } } status &= CP_INT_MASK; adreno_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n"); queue_work(device->work_queue, &device->ts_expired_ws); wake_up_interruptible_all(&device->wait_queue); } }
/* Caller must hold the device mutex. */ int kgsl_pwrctrl_wake(struct kgsl_device *device) { int status = KGSL_SUCCESS; struct kgsl_pwrctrl *pwr = &device->pwrctrl; BUG_ON(!mutex_is_locked(&device->mutex)); if (device->state == KGSL_STATE_SUSPEND) return status; KGSL_DRV_INFO("GRP_CLK= %lu BUS CLK= %lu\n", kgsl_get_clkrate(pwr->grp_clk), kgsl_get_clkrate(pwr->ebi1_clk)); /* Turn on the core clocks */ status = kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_CLK_ON); if (device->state != KGSL_STATE_NAP) { kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_AXI_ON); } /* Enable state before turning on irq */ device->state = KGSL_STATE_ACTIVE; KGSL_DRV_WARN("state -> ACTIVE, device %d\n", device->id); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_ON); /* Re-enable HW access */ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT); KGSL_DRV_VDBG("<-- kgsl_yamato_wake(). Return value %d\n", status); wake_lock(&device->idle_wakelock); return status; }
static void a2xx_rbbm_intrcallback(struct kgsl_device *device) { unsigned int status = 0; unsigned int rderr = 0; unsigned int addr = 0; const char *source; adreno_regread(device, REG_RBBM_INT_STATUS, &status); if (status & RBBM_INT_CNTL__RDERR_INT_MASK) { adreno_regread(device, REG_RBBM_READ_ERROR, &rderr); source = (rderr & RBBM_READ_ERROR_REQUESTER) ? "host" : "cp"; addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2; if (addr == REG_CP_INT_STATUS && rderr & RBBM_READ_ERROR_ERROR && rderr & RBBM_READ_ERROR_REQUESTER) KGSL_DRV_WARN(device, "rbbm read error interrupt: %s reg: %04X\n", source, addr); else KGSL_DRV_CRIT(device, "rbbm read error interrupt: %s reg: %04X\n", source, addr); }
static inline void kgsl_fence_event_cb(struct kgsl_device *device, void *priv, u32 timestamp) { struct kgsl_fence_event_priv *ev = priv; if (ev != NULL) { kgsl_sync_timeline_signal(ev->timeline, ev->timestamp); kfree(ev); } else { KGSL_DRV_WARN(device, "kgsl_sync_timeline_signal Failed ..!!\n"); } }
/* Caller must hold the device mutex. */ int kgsl_pwrctrl_sleep(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; KGSL_DRV_INFO("device %d, current state= %d, resquested change= %d!!\n", device->id, device->state, device->requested_state); KGSL_DRV_INFO("GRP_CLK= %lu BUS CLK= %lu\n", kgsl_get_clkrate(pwr->grp_clk), kgsl_get_clkrate(pwr->ebi1_clk)); /* Work through the legal state transitions */ if (device->requested_state == KGSL_STATE_NAP) { if (device->ftbl.device_isidle(device)) goto nap; } else if (device->requested_state == KGSL_STATE_SLEEP) { if (device->state == KGSL_STATE_NAP || device->ftbl.device_isidle(device)) goto sleep; } device->requested_state = KGSL_STATE_NONE; return KGSL_FAILURE; sleep: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_OFF); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_AXI_OFF); goto clk_off; nap: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_OFF); clk_off: kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_CLK_OFF); device->state = device->requested_state; device->requested_state = KGSL_STATE_NONE; wake_unlock(&device->idle_wakelock); KGSL_DRV_WARN("state -> NAP/SLEEP(%d), device %d\n", device->state, device->id); return KGSL_SUCCESS; }
static void a2xx_rbbm_intrcallback(struct kgsl_device *device) { unsigned int status = 0; unsigned int rderr = 0; adreno_regread(device, REG_RBBM_INT_STATUS, &status); if (status & RBBM_INT_CNTL__RDERR_INT_MASK) { union rbbm_read_error_u rerr; adreno_regread(device, REG_RBBM_READ_ERROR, &rderr); rerr.val = rderr; if (rerr.f.read_address == REG_CP_INT_STATUS && rerr.f.read_error && rerr.f.read_requester) KGSL_DRV_WARN(device, "rbbm read error interrupt: %08x\n", rderr); else KGSL_DRV_CRIT(device, "rbbm read error interrupt: %08x\n", rderr); } status &= RBBM_INT_MASK; adreno_regwrite(device, REG_RBBM_INT_ACK, status); }
/* functions */ void kgsl_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct kgsl_yamato_device *yamato_device = KGSL_YAMATO_DEVICE(device); struct kgsl_ringbuffer *rb = &yamato_device->ringbuffer; kgsl_yamato_regread_isr(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { kgsl_yamato_regread_isr(device, REG_CP_INT_STATUS, &status); kgsl_yamato_regread_isr(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN(device, "Looped %d times to read REG_CP_INT_STATUS\n", num_reads); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { /* This indicates that we could not read CP_INT_STAT. * As a precaution just wake up processes so * they can check their timestamps. Since, we * did not ack any interrupts this interrupt will * be generated again */ KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&device->wait_queue); } else KGSL_DRV_WARN(device, "Spurious interrput detected\n"); return; } if (status & CP_INT_CNTL__RB_INT_MASK) { /* signal intr completion event */ unsigned int enableflag = 0; kgsl_sharedmem_writel(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), enableflag); wmb(); KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n"); } if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) { KGSL_CMD_CRIT(rb->device, "ringbuffer TO packet in IB interrupt\n"); kgsl_yamato_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) { KGSL_CMD_CRIT(rb->device, "ringbuffer opcode error interrupt\n"); kgsl_yamato_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) { KGSL_CMD_CRIT(rb->device, "ringbuffer protected mode error interrupt\n"); kgsl_yamato_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) { KGSL_CMD_CRIT(rb->device, "ringbuffer reserved bit error interrupt\n"); kgsl_yamato_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__IB_ERROR_MASK) { KGSL_CMD_CRIT(rb->device, "ringbuffer IB error interrupt\n"); kgsl_yamato_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0); } if (status & CP_INT_CNTL__SW_INT_MASK) KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n"); if (status & CP_INT_CNTL__IB2_INT_MASK) KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n"); if (status & (~GSL_CP_INT_MASK)) KGSL_CMD_WARN(rb->device, "bad bits in REG_CP_INT_STATUS %08x\n", status); /* only ack bits we understand */ status &= GSL_CP_INT_MASK; kgsl_yamato_regwrite_isr(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n"); wake_up_interruptible_all(&device->wait_queue); atomic_notifier_call_chain(&(device->ts_notifier_list), KGSL_DEVICE_YAMATO, NULL); } }
static void a2xx_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; int i; adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { adreno_regread(device, REG_CP_INT_STATUS, &status); adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN(device, "Looped %d times to read REG_CP_INT_STATUS\n", num_reads); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { /* This indicates that we could not read CP_INT_STAT. * As a precaution just wake up processes so * they can check their timestamps. Since, we * did not ack any interrupts this interrupt will * be generated again */ KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&device->wait_queue); } else KGSL_DRV_WARN(device, "Spurious interrput detected\n"); return; } if (status & CP_INT_CNTL__RB_INT_MASK) { /* signal intr completion event */ unsigned int enableflag = 0; kgsl_sharedmem_writel(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), enableflag); wmb(); KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n"); } for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) { if (status & kgsl_cp_error_irqs[i].mask) { KGSL_CMD_CRIT(rb->device, "%s\n", kgsl_cp_error_irqs[i].message); /* * on fatal errors, turn off the interrupts to * avoid storming. This has the side effect of * forcing a PM dump when the timestamp times out */ kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF); } } /* only ack bits we understand */ status &= CP_INT_MASK; adreno_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n"); queue_work(device->work_queue, &device->ts_expired_ws); wake_up_interruptible_all(&device->wait_queue); atomic_notifier_call_chain(&(device->ts_notifier_list), device->id, NULL); } }
void kgsl_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct kgsl_yamato_device *yamato_device = (struct kgsl_yamato_device *) device; struct kgsl_ringbuffer *rb = &device->ringbuffer; KGSL_CMD_VDBG("enter (device=%p)\n", device); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { kgsl_yamato_regread(device, REG_CP_INT_STATUS, &status); kgsl_yamato_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN("Looped %d times to read REG_CP_INT_STATUS\n", num_reads); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { KGSL_DRV_WARN("Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); } else KGSL_DRV_WARN("Spurious interrput detected\n"); return; } if (status & CP_INT_CNTL__RB_INT_MASK) { unsigned int enableflag = 0; kgsl_sharedmem_writel(&rb->device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), enableflag); wmb(); KGSL_CMD_WARN("ringbuffer rb interrupt\n"); } if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) { KGSL_CMD_FATAL("ringbuffer TO packet in IB interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer opcode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer protected mode error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer reserved bit error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__IB_ERROR_MASK) { KGSL_CMD_FATAL("ringbuffer IB error interrupt\n"); kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0); kgsl_ringbuffer_dump(rb); } if (status & CP_INT_CNTL__SW_INT_MASK) KGSL_CMD_DBG("ringbuffer software interrupt\n"); if (status & CP_INT_CNTL__IB2_INT_MASK) KGSL_CMD_DBG("ringbuffer ib2 interrupt\n"); if (status & (~GSL_CP_INT_MASK)) KGSL_CMD_DBG("bad bits in REG_CP_INT_STATUS %08x\n", status); status &= GSL_CP_INT_MASK; kgsl_yamato_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN("ringbuffer ib1/rb interrupt\n"); wake_up_interruptible_all(&yamato_device->ib1_wq); atomic_notifier_call_chain(&(device->ts_notifier_list), KGSL_DEVICE_YAMATO, NULL); } KGSL_CMD_VDBG("return\n"); }