void adreno_coresight_disable(struct coresight_device *csdev) { struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent); struct adreno_device *adreno_dev; if (device == NULL) return; adreno_dev = ADRENO_DEVICE(device); if (adreno_dev->gpudev->coresight_disable) return adreno_dev->gpudev->coresight_disable(device); }
/* * kgsl_iommu_sync_lock - Acquire Sync Lock between GPU and CPU * @mmu - Pointer to mmu device * @cmds - Pointer to array of commands * * Return - int - number of commands. */ inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu, unsigned int *cmds) { struct kgsl_device *device = mmu->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_iommu *iommu = mmu->device->mmu.priv; struct remote_iommu_petersons_spinlock *lock_vars = iommu->sync_lock_vars; unsigned int *start = cmds; if (!iommu->sync_lock_initialized) return 0; *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 1; cmds += adreno_add_idle_cmds(adreno_dev, cmds); *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5); /* MEM SPACE = memory, FUNCTION = equals */ *cmds++ = 0x13; *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2); *cmds++ = lock_vars->turn; *cmds++ = 0; cmds += adreno_add_idle_cmds(adreno_dev, cmds); *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5); /* MEM SPACE = memory, FUNCTION = equals */ *cmds++ = 0x13; *cmds++ = lock_vars->flag[PROC_GPU]; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = 0x1; *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3); *cmds++ = lock_vars->flag[PROC_APPS]; *cmds++ = lock_vars->turn; *cmds++ = 0; cmds += adreno_add_idle_cmds(adreno_dev, cmds); return cmds - start; }
static void a2xx_cp_intrcallback(struct kgsl_device *device) { unsigned int status = 0, num_reads = 0, master_status = 0; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; int i; adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { adreno_regread(device, REG_CP_INT_STATUS, &status); adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); num_reads++; } if (num_reads > 1) KGSL_DRV_WARN(device, "Looped %d times to read REG_CP_INT_STATUS\n", num_reads); trace_kgsl_a2xx_irq_status(device, master_status, status); if (!status) { if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n"); wake_up_interruptible_all(&device->wait_queue); } else KGSL_DRV_WARN(device, "Spurious interrput detected\n"); return; } for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) { if (status & kgsl_cp_error_irqs[i].mask) { KGSL_CMD_CRIT(rb->device, "%s\n", kgsl_cp_error_irqs[i].message); kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF); } } status &= CP_INT_MASK; adreno_regwrite(device, REG_CP_INT_ACK, status); if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n"); queue_work(device->work_queue, &device->ts_expired_ws); wake_up_interruptible_all(&device->wait_queue); } }
static ssize_t kgsl_istore_read( struct file *file, char __user *buff, size_t buff_count, loff_t *ppos) { int i, count, remaining, pos = 0, tot = 0; struct kgsl_device *device = file->private_data; const int rowc = 8; struct adreno_device *adreno_dev; if (!ppos || !device) return 0; adreno_dev = ADRENO_DEVICE(device); count = adreno_dev->istore_size * adreno_dev->instruction_size; remaining = count; for (i = 0; i < count; i += rowc) { unsigned int vals[rowc]; int j, ss; int linec = min(remaining, rowc); remaining -= rowc; if (pos >= *ppos) { for (j = 0; j < linec; ++j) kgsl_regread_nolock(device, ADRENO_ISTORE_START + i + j, vals + j); } else memset(vals, 0, sizeof(vals)); ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4, linec*4, buff); if (ss < 0) return ss; if (pos >= *ppos) { if (tot+ss >= buff_count) return tot; tot += ss; buff += ss; *ppos += ss; } pos += ss; } return tot; }
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); kgsl_sharedmem_free(&rb->buffer_desc); kgsl_sharedmem_free(&rb->memptrs_desc); kfree(adreno_dev->pfp_fw); kfree(adreno_dev->pm4_fw); adreno_dev->pfp_fw = NULL; adreno_dev->pm4_fw = NULL; memset(rb, 0, sizeof(struct adreno_ringbuffer)); }
/* Caller must hold the device mutex. */ int kgsl_pwrctrl_sleep(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_pwrctrl *pwr = &device->pwrctrl; KGSL_PWR_INFO(device, "sleep device %d\n", device->id); /* Work through the legal state transitions */ if (device->requested_state == KGSL_STATE_NAP) { if (device->ftbl->isidle(device)) goto nap; } else if (device->requested_state == KGSL_STATE_SLEEP) { if (device->state == KGSL_STATE_NAP || device->ftbl->isidle(device)) goto sleep; } device->requested_state = KGSL_STATE_NONE; return -EBUSY; sleep: device->ftbl->suspend_context(device); device->ftbl->stop(device); kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); if (pwr->pwrlevels[0].gpu_freq > 0) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. gpu_freq); device->pwrctrl.time = 0; kgsl_pwrscale_sleep(device); goto clk_off; nap: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); clk_off: kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF); device->state = device->requested_state; device->requested_state = KGSL_STATE_NONE; wake_unlock(&device->idle_wakelock); pm_qos_update_request(&device->pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d ts 0x%x\n", device->state, device->id, adreno_dev->ringbuffer.timestamp); return 0; }
/** * adreno_drawctxt_create - create a new adreno draw context * @device - KGSL device to create the context on * @pagetable - Pagetable for the context * @context- Generic KGSL context structure * @flags - flags for the context (passed from user space) * * Create a new draw context for the 3D core. Return 0 on success, * or error code on failure. */ int adreno_drawctxt_create(struct kgsl_device *device, struct kgsl_pagetable *pagetable, struct kgsl_context *context, uint32_t flags) { struct adreno_context *drawctxt; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; int ret; drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL); if (drawctxt == NULL) return -ENOMEM; drawctxt->pagetable = pagetable; drawctxt->bin_base_offset = 0; drawctxt->id = context->id; rb->timestamp[context->id] = 0; if (flags & KGSL_CONTEXT_PREAMBLE) drawctxt->flags |= CTXT_FLAGS_PREAMBLE; if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC) drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC; if (flags & KGSL_CONTEXT_PER_CONTEXT_TS) drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS; ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt); if (ret) goto err; kgsl_sharedmem_writel(&device->memstore, KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts), KGSL_INIT_REFTIMESTAMP); kgsl_sharedmem_writel(&device->memstore, KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0); kgsl_sharedmem_writel(&device->memstore, KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0); kgsl_sharedmem_writel(&device->memstore, KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0); context->devctxt = drawctxt; return 0; err: kfree(drawctxt); return ret; }
/** * _ringbuffer_setup_common() - Ringbuffer start * @rb: Pointer to adreno ringbuffer * * Setup ringbuffer for GPU. */ static void _ringbuffer_setup_common(struct adreno_ringbuffer *rb) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb_temp; int i; FOR_EACH_RINGBUFFER(adreno_dev, rb_temp, i) { kgsl_sharedmem_set(rb_temp->device, &(rb_temp->buffer_desc), 0, 0xAA, KGSL_RB_SIZE); rb_temp->wptr = 0; rb_temp->rptr = 0; adreno_iommu_set_pt_generate_rb_cmds(rb_temp, device->mmu.defaultpagetable); }
void adreno_debugfs_init(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); if (!device->d_debugfs || IS_ERR(device->d_debugfs)) return; debugfs_create_file("cff_dump", 0644, device->d_debugfs, device, &kgsl_cff_dump_enable_fops); debugfs_create_u32("wait_timeout", 0644, device->d_debugfs, &adreno_dev->wait_timeout); debugfs_create_u32("ib_check", 0644, device->d_debugfs, &adreno_dev->ib_check_level); debugfs_create_file("active_cnt", 0444, device->d_debugfs, device, &_active_count_fops); }
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PM4_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PM4_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PM4_FW; } else { KGSL_DRV_ERR(device, "Could not load PM4 file\n"); return -EINVAL; } if (adreno_dev->pm4_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); err: return ret; }
/** * adreno_context_restore() - generic context restore handler * @rb: The RB in which context is to be restored * * Basic context restore handler that writes the context identifier * to the ringbuffer and issues pagetable switch commands if necessary. */ static void adreno_context_restore(struct adreno_ringbuffer *rb) { struct kgsl_device *device = rb->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt = rb->drawctxt_active; unsigned int cmds[11]; int ret; if (!drawctxt) return; /* * write the context identifier to the ringbuffer, write to both * the global index and the index of the RB in which the context * operates. The global values will always be reliable since we * could be in middle of RB switch in which case the RB value may * not be accurate */ cmds[0] = cp_nop_packet(1); cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + KGSL_MEMSTORE_RB_OFFSET(rb, current_context); cmds[4] = drawctxt->base.id; cmds[5] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[6] = device->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); cmds[7] = drawctxt->base.id; /* Flush the UCHE for new context */ cmds[8] = cp_type0_packet( adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2); cmds[9] = 0; if (adreno_is_a4xx(adreno_dev)) cmds[10] = 0x12; else if (adreno_is_a3xx(adreno_dev)) cmds[10] = 0x90000000; ret = adreno_ringbuffer_issuecmds(rb, KGSL_CMD_FLAGS_NONE, cmds, 11); if (ret) { /* * A failure to submit commands to ringbuffer means RB may * be full, in this case wait for idle and use CPU */ ret = adreno_idle(device); BUG_ON(ret); _adreno_context_restore_cpu(rb, drawctxt); } }
static ssize_t gfx_store_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct kgsl_device *device = dev_get_drvdata(dev->parent); struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct coresight_attr *csight_attr = container_of(attr, struct coresight_attr, attr); unsigned int regval = 0; regval = coresight_convert_reg(buf); if (adreno_dev->gpudev->coresight_config_debug_reg) adreno_dev->gpudev->coresight_config_debug_reg(device, csight_attr->regname, regval); return size; }
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, struct adreno_submit_time *time) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); BUG_ON(rb->wptr == 0); _cff_write_ringbuffer(rb); if (time != NULL) { unsigned long flags; local_irq_save(flags); if (!adreno_is_a3xx(adreno_dev)) { adreno_readreg64(adreno_dev, ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &time->ticks); if (ADRENO_GPUREV(adreno_dev) >= 400 && ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530) time->ticks &= 0xFFFFFFFF; } else time->ticks = 0; time->ktime = local_clock(); getnstimeofday(&time->utime); local_irq_restore(flags); } mb(); if (adreno_preempt_state(adreno_dev, ADRENO_DISPATCHER_PREEMPT_CLEAR) && (adreno_dev->cur_rb == rb)) { kgsl_pwrscale_busy(rb->device); adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr); } }
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PFP_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PFP_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PFP_FW; } else { KGSL_DRV_ERR(device, "Could not load PFP firmware\n"); return -EINVAL; } if (adreno_dev->pfp_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; } KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", adreno_dev->pfp_fw[0]); adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < adreno_dev->pfp_fw_size; i++) adreno_regwrite(device, REG_CP_PFP_UCODE_DATA, adreno_dev->pfp_fw[i]); err: return ret; }
/* * snapshot_freeze_obj_list() - Take a list of ib objects and freeze their * memory for snapshot * @device: Device being snapshotted * @ptbase: The pagetable base of the process to which IB belongs * @ib_obj_list: List of the IB objects * * Returns 0 on success else error code */ static int snapshot_freeze_obj_list(struct kgsl_device *device, phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list) { int ret = 0; struct adreno_ib_object *ib_objs; unsigned int ib2base; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i; adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ib2base); for (i = 0; i < ib_obj_list->num_objs; i++) { int temp_ret; int index; int freeze = 1; ib_objs = &(ib_obj_list->obj_list[i]); /* Make sure this object is not going to be saved statically */ for (index = 0; index < objbufptr; index++) { if ((objbuf[index].gpuaddr <= ib_objs->gpuaddr) && ((objbuf[index].gpuaddr + (objbuf[index].dwords << 2)) >= (ib_objs->gpuaddr + ib_objs->size)) && (objbuf[index].ptbase == ptbase)) { freeze = 0; break; } } if (freeze) { /* Save current IB2 statically */ if (ib2base == ib_objs->gpuaddr) { push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase, ib_objs->gpuaddr, ib_objs->size >> 2); } else { temp_ret = kgsl_snapshot_get_object(device, ptbase, ib_objs->gpuaddr, ib_objs->size, ib_objs->snapshot_obj_type); if (temp_ret < 0) { if (ret >= 0) ret = temp_ret; } else { snapshot_frozen_objsize += temp_ret; } } }
long adreno_compat_ioctl(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_device *device = dev_priv->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int result = 0; switch (cmd) { case IOCTL_KGSL_PERFCOUNTER_QUERY_COMPAT: { struct kgsl_perfcounter_query_compat *query32 = data; struct kgsl_perfcounter_query query; query.groupid = query32->groupid; query.countables = (unsigned int __user *)(uintptr_t) query32->countables; query.count = query32->count; query.max_counters = query32->max_counters; result = adreno_perfcounter_query_group(adreno_dev, query.groupid, query.countables, query.count, &query.max_counters); query32->max_counters = query.max_counters; break; } case IOCTL_KGSL_PERFCOUNTER_READ_COMPAT: { struct kgsl_perfcounter_read_compat *read32 = data; struct kgsl_perfcounter_read read; read.reads = (struct kgsl_perfcounter_read_group __user *) (uintptr_t)read32->reads; read.count = read32->count; result = kgsl_active_count_get(device); if (result) break; result = adreno_perfcounter_read_group(adreno_dev, read.reads, read.count); kgsl_active_count_put(device); break; } default: KGSL_DRV_INFO(dev_priv->device, "invalid ioctl code %08x\n", cmd); result = -ENOIOCTLCMD; break; } return result; }
ssize_t adreno_coresight_show_register(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int val = 0; struct kgsl_device *device = dev_get_drvdata(dev->parent); struct adreno_device *adreno_dev; struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr); if (device == NULL) return -EINVAL; adreno_dev = ADRENO_DEVICE(device); if (cattr->reg == NULL) return -EINVAL; /* * Return the current value of the register if coresight is enabled, * otherwise report 0 */ mutex_lock(&device->mutex); if (test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) { /* * If the device isn't power collapsed read the actual value * from the hardware - otherwise return the cached value */ if (device->state == KGSL_STATE_ACTIVE || device->state == KGSL_STATE_NAP) { if (!kgsl_active_count_get(device)) { kgsl_regread(device, cattr->reg->offset, &cattr->reg->value); kgsl_active_count_put(device); } } val = cattr->reg->value; } mutex_unlock(&device->mutex); return snprintf(buf, PAGE_SIZE, "0x%X", val); }
void adreno_debugfs_init(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); if (!device->d_debugfs || IS_ERR(device->d_debugfs)) return; debugfs_create_file("cff_dump", 0644, device->d_debugfs, device, &kgsl_cff_dump_enable_fops); debugfs_create_u32("wait_timeout", 0644, device->d_debugfs, &adreno_dev->wait_timeout); debugfs_create_u32("ib_check", 0644, device->d_debugfs, &adreno_dev->ib_check_level); /* By Default enable fast hang detection */ adreno_dev->fast_hang_detect = 1; debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs, &adreno_dev->fast_hang_detect); }
ssize_t adreno_coresight_store_register(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct kgsl_device *device = dev_get_drvdata(dev->parent); struct adreno_device *adreno_dev; struct adreno_coresight_attr *cattr = TO_ADRENO_CORESIGHT_ATTR(attr); unsigned long val; int ret; if (device == NULL) return -EINVAL; adreno_dev = ADRENO_DEVICE(device); if (cattr->reg == NULL) return -EINVAL; ret = kstrtoul(buf, 0, &val); if (ret) return ret; mutex_lock(&device->mutex); /* Ignore writes while coresight is off */ if (!test_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) goto out; cattr->reg->value = val; /* Program the hardware if it is not power collapsed */ if (device->state == KGSL_STATE_ACTIVE || device->state == KGSL_STATE_NAP) { if (!kgsl_active_count_get(device)) { kgsl_regwrite(device, cattr->reg->offset, cattr->reg->value); kgsl_active_count_put(device); } } out: mutex_unlock(&device->mutex); return size; }
void adreno_debugfs_init(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); if (!device->d_debugfs || IS_ERR(device->d_debugfs)) return; debugfs_create_file("istore", 0400, device->d_debugfs, device, &kgsl_istore_fops); debugfs_create_file("sx_debug", 0400, device->d_debugfs, device, &kgsl_sx_debug_fops); debugfs_create_file("cp_debug", 0400, device->d_debugfs, device, &kgsl_cp_debug_fops); debugfs_create_file("mh_debug", 0400, device->d_debugfs, device, &kgsl_mh_debug_fops); debugfs_create_file("cff_dump", 0644, device->d_debugfs, device, &kgsl_cff_dump_enable_fops); debugfs_create_u32("wait_timeout", 0644, device->d_debugfs, &adreno_dev->wait_timeout); debugfs_create_u32("ib_check", 0644, device->d_debugfs, &adreno_dev->ib_check_level); /* By Default enable fast hang detection */ adreno_dev->fast_hang_detect = 1; debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs, &adreno_dev->fast_hang_detect); /* Create post mortem control files */ pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs); if (IS_ERR(pm_d_debugfs)) return; debugfs_create_file("dump", 0600, pm_d_debugfs, device, &pm_dump_fops); debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device, &pm_regs_enabled_fops); debugfs_create_file("ib_dump_on_pagefault", 0644, device->d_debugfs, device, &ib_dump_on_pagef_enabled_fops); }
void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) { /* no need to save GMEM or shader, the context is * being destroyed. */ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); drawctxt->flags |= CTXT_FLAGS_BEING_DESTROYED; #ifdef CONFIG_MSM_KGSL_GPU_USAGE device->current_process_priv = NULL; #endif adreno_drawctxt_switch(adreno_dev, NULL, 0); } if (device->state != KGSL_STATE_HUNG) adreno_idle(device); if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active) kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id, KGSL_MMUFLAGS_PTUPDATE); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); struct kgsl_device *device = &adreno_dev->dev; uint64_t gpuaddr; unsigned int *hostptr; size_t size; if (device->cff_dump_enable == 0) return; BUG_ON(rb->wptr < rb->last_wptr); size = (rb->wptr - rb->last_wptr) * sizeof(unsigned int); hostptr = RB_HOSTPTR(rb, rb->last_wptr); gpuaddr = RB_GPUADDR(rb, rb->last_wptr); kgsl_cffdump_memcpy(device, gpuaddr, hostptr, size); }
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); if (adreno_dev->pfp_fw != NULL) kfree(adreno_dev->pfp_fw); if (adreno_dev->pm4_fw != NULL) kfree(adreno_dev->pm4_fw); adreno_dev->pfp_fw = NULL; adreno_dev->pm4_fw = NULL; memset(rb, 0, sizeof(struct adreno_ringbuffer)); return 0; }
int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i; if (adreno_dev->pfp_fw == NULL) { int ret = adreno_ringbuffer_read_pfp_ucode(device); if (ret) return ret; } KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", adreno_dev->pfp_fw_version); adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0); for (i = 1; i < adreno_dev->pfp_fw_size; i++) adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_data, adreno_dev->pfp_fw[i]); return 0; }
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i, ret = 0; if (adreno_dev->pm4_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->pm4_fwfile, &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; kfree(ptr); goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); //2012-11-29 liuyan kgsl patch begin //adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT); //2012-11-29 liuyan kgsl patch end adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); err: return ret; }
/** * adreno_coresight_enable() - Generic function to enable coresight debugging * @csdev: Pointer to coresight's device struct * * This is a generic function to enable coresight debug bus on adreno * devices. This should be used in all cases of enabling * coresight debug bus for adreno devices. This function is registered as the * coresight enable function with coresight driver. It should only be called * through coresight driver as that would ensure that the necessary setup * required to be done on coresight driver's part is also done. */ static int adreno_coresight_enable(struct coresight_device *csdev) { struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent); struct adreno_device *adreno_dev; struct adreno_gpudev *gpudev; struct adreno_coresight *coresight; int ret = 0; if (device == NULL) return -ENODEV; adreno_dev = ADRENO_DEVICE(device); gpudev = ADRENO_GPU_DEVICE(adreno_dev); coresight = gpudev->coresight; if (coresight == NULL) return -ENODEV; mutex_lock(&device->mutex); if (!test_and_set_bit(ADRENO_DEVICE_CORESIGHT, &adreno_dev->priv)) { int i; /* Reset all the debug registers to their default values */ for (i = 0; i < coresight->count; i++) coresight->registers[i].value = coresight->registers[i].initial; ret = kgsl_active_count_get(device); if (!ret) { ret = _adreno_coresight_set(adreno_dev); kgsl_active_count_put(device); } } mutex_unlock(&device->mutex); return ret; }
/** * adreno_drawctxt_create - create a new adreno draw context * @device - KGSL device to create the context on * @pagetable - Pagetable for the context * @context- Generic KGSL context structure * @flags - flags for the context (passed from user space) * * Create a new draw context for the 3D core. Return 0 on success, * or error code on failure. */ int adreno_drawctxt_create(struct kgsl_device *device, struct kgsl_pagetable *pagetable, struct kgsl_context *context, uint32_t flags) { struct adreno_context *drawctxt; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret; drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL); if (drawctxt == NULL) return -ENOMEM; drawctxt->pagetable = pagetable; drawctxt->bin_base_offset = 0; /* FIXME: Deal with preambles */ ret = adreno_dev->gpudev->ctxt_gpustate_shadow(adreno_dev, drawctxt); if (ret) goto err; /* Save the shader instruction memory on context switching */ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; if (!(flags & KGSL_CONTEXT_NO_GMEM_ALLOC)) { /* create gmem shadow */ ret = adreno_dev->gpudev->ctxt_gmem_shadow(adreno_dev, drawctxt); if (ret != 0) goto err; } context->devctxt = drawctxt; return 0; err: kgsl_sharedmem_free(&drawctxt->gpustate); kfree(drawctxt); return ret; }
int adreno_ringbuffer_init(struct kgsl_device *device) { int status; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; rb->device = device; /* * It is silly to convert this to words and then back to bytes * immediately below, but most of the rest of the code deals * in words, so we might as well only do the math once */ rb->sizedwords = KGSL_RB_SIZE >> 2; /* allocate memory for ringbuffer */ status = kgsl_allocate_contiguous(&rb->buffer_desc, (rb->sizedwords << 2)); if (status != 0) { adreno_ringbuffer_close(rb); return status; } /* allocate memory for polling and timestamps */ /* This really can be at 4 byte alignment boundry but for using MMU * we need to make it at page boundary */ status = kgsl_allocate_contiguous(&rb->memptrs_desc, sizeof(struct kgsl_rbmemptrs)); if (status != 0) { adreno_ringbuffer_close(rb); return status; } /* overlay structure on memptrs memory */ rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr; return 0; }
int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int i; if (adreno_dev->pm4_fw == NULL) { int ret = adreno_ringbuffer_read_pm4_ucode(device); if (ret) return ret; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw_version); adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); return 0; }
static int a3xx_snapshot_cp_pfp_ram(struct kgsl_device *device, void *snapshot, int remain, void *priv) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_snapshot_debug *header = snapshot; unsigned int *data = snapshot + sizeof(*header); int i, size = adreno_dev->pfp_fw_size - 1; if (remain < DEBUG_SECTION_SZ(size)) { SNAPSHOT_ERR_NOMEM(device, "CP PFP RAM DEBUG"); return 0; } header->type = SNAPSHOT_DEBUG_CP_PFP_RAM; header->size = size; kgsl_regwrite(device, A3XX_CP_PFP_UCODE_ADDR, 0x0); for (i = 0; i < size; i++) adreno_regread(device, A3XX_CP_PFP_UCODE_DATA, &data[i]); return DEBUG_SECTION_SZ(size); }