irqreturn_t kgsl_g12_isr(int irq, void *data) { irqreturn_t result = IRQ_NONE; struct kgsl_device *device = &kgsl_driver.g12_device; unsigned int status; kgsl_g12_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status); if (status & GSL_VGC_INT_MASK) { kgsl_g12_regwrite(device, ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK); result = IRQ_HANDLED; if (status & REG_VGC_IRQSTATUS__FIFO_MASK) KGSL_DRV_ERR("g12 fifo interrupt\n"); else if (status & REG_VGC_IRQSTATUS__MH_MASK) KGSL_DRV_ERR("g12 mh interrupt\n"); else if (status & REG_VGC_IRQSTATUS__G2D_MASK) { KGSL_DRV_VDBG("g12 g2d interrupt\n"); queue_work(device->irq_wq, &(device->irq_work)); } else KGSL_DRV_ERR( "bad bits in ADDR_VGC_IRQ_STATUS %08x\n", status); }
static int kgsl_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { int status = 0; int i; const struct firmware *fw = NULL; unsigned int *fw_ptr = NULL; size_t fw_word_size = 0; status = request_firmware(&fw, YAMATO_PFP_FW, kgsl_driver.base_dev[KGSL_DEVICE_YAMATO]); if (status != 0) { KGSL_DRV_ERR("request_firmware for %s failed with error %d\n", YAMATO_PFP_FW, status); return status; } if ((fw->size % sizeof(uint32_t)) != 0) { KGSL_DRV_ERR("bad firmware size %d.\n", fw->size); release_firmware(fw); return -EINVAL; } fw_ptr = (unsigned int *)fw->data; fw_word_size = fw->size/sizeof(uint32_t); KGSL_DRV_INFO("loading pfp ucode version: %d\n", fw_ptr[0]); kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < fw_word_size; i++) kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_DATA, fw_ptr[i]); release_firmware(fw); return status; }
static int kgsl_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { int status = 0; int i; const struct firmware *fw = NULL; unsigned int *fw_ptr = NULL; size_t fw_word_size = 0; status = request_firmware(&fw, YAMATO_PM4_FW, kgsl_driver.base_dev[KGSL_DEVICE_YAMATO]); if (status != 0) { KGSL_DRV_ERR("request_firmware failed for %s with error %d\n", YAMATO_PM4_FW, status); goto done; } if ((fw->size % (sizeof(uint32_t)*3)) != 4) { KGSL_DRV_ERR("bad firmware size %d.\n", fw->size); status = -EINVAL; goto done; } fw_ptr = (unsigned int *)fw->data; fw_word_size = fw->size/sizeof(uint32_t); KGSL_DRV_INFO("loading pm4 ucode version: %d\n", fw_ptr[0]); kgsl_yamato_regwrite(device, REG_CP_DEBUG, 0x02000000); kgsl_yamato_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < fw_word_size; i++) kgsl_yamato_regwrite(device, REG_CP_ME_RAM_DATA, fw_ptr[i]); done: release_firmware(fw); return status; }
/* * kgsl_iommu_disable_clk_event - An event function that is executed when * the required timestamp is reached. It disables the IOMMU clocks if * the timestamp on which the clocks can be disabled has expired. * @device - The kgsl device pointer * @data - The data passed during event creation, it is the MMU pointer * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL * @ts - The current timestamp that has expired for the device * * Disables IOMMU clocks if timestamp has expired * Return - void */ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data, unsigned int id, unsigned int ts) { struct kgsl_mmu *mmu = data; struct kgsl_iommu *iommu = mmu->priv; if (!iommu->clk_event_queued) { if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) KGSL_DRV_ERR(device, "IOMMU disable clock event being cancelled, " "iommu_last_cmd_ts: %x, retired ts: %x\n", iommu->iommu_last_cmd_ts, ts); return; } if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) { kgsl_iommu_disable_clk(mmu); iommu->clk_event_queued = false; } else { /* add new event to fire when ts is reached, this can happen * if we queued an event and someone requested the clocks to * be disbaled on a later timestamp */ if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts, kgsl_iommu_clk_disable_event, mmu, mmu)) { KGSL_DRV_ERR(device, "Failed to add IOMMU disable clk event\n"); iommu->clk_event_queued = false; } } }
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags)) { trace_kgsl_rail(device, state); if (internal_pwr_rail_ctl(pwr->pwr_rail, false)) { KGSL_DRV_ERR(device, "call internal_pwr_rail_ctl failed\n"); return; } if (pwr->gpu_reg) regulator_disable(pwr->gpu_reg); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->power_flags)) { trace_kgsl_rail(device, state); if (internal_pwr_rail_ctl(pwr->pwr_rail, true)) { KGSL_PWR_ERR(device, "call internal_pwr_rail_ctl failed\n"); return; } if (pwr->gpu_reg) { int status = regulator_enable(pwr->gpu_reg); if (status) KGSL_DRV_ERR(device, "regulator_enable " "failed: %d\n", status); } } } }
static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data, unsigned int id, unsigned int ts) { struct kgsl_mmu *mmu = data; struct kgsl_iommu *iommu = mmu->priv; if (!iommu->clk_event_queued) { if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) KGSL_DRV_ERR(device, "IOMMU disable clock event being cancelled, " "iommu_last_cmd_ts: %x, retired ts: %x\n", iommu->iommu_last_cmd_ts, ts); return; } if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) { kgsl_iommu_disable_clk(mmu); iommu->clk_event_queued = false; } else { if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts, kgsl_iommu_clk_disable_event, mmu, mmu)) { KGSL_DRV_ERR(device, "Failed to add IOMMU disable clk event\n"); iommu->clk_event_queued = false; } } }
static int kgsl_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { int status = 0; int i; const struct firmware *fw = NULL; unsigned int *fw_ptr = NULL; size_t fw_word_size = 0; if (device->chip_id == KGSL_CHIPID_LEIA_REV470) { status = request_firmware(&fw, LEIA_PM4_470_FW, kgsl_driver.base_dev[KGSL_DEVICE_YAMATO]); if (status != 0) { KGSL_DRV_ERR( "request_firmware failed for %s \ with error %d\n", LEIA_PM4_470_FW, status); goto error; } } else { status = request_firmware(&fw, YAMATO_PM4_FW, kgsl_driver.base_dev[KGSL_DEVICE_YAMATO]); if (status != 0) { KGSL_DRV_ERR( "request_firmware failed for %s \ with error %d\n", YAMATO_PM4_FW, status); goto error; } } /*this firmware must come in 3 word chunks. plus 1 word of version*/ if ((fw->size % (sizeof(uint32_t)*3)) != 4) { KGSL_DRV_ERR("bad firmware size %d.\n", fw->size); status = -EINVAL; goto error_release_fw; } fw_ptr = (unsigned int *)fw->data; fw_word_size = fw->size/sizeof(uint32_t); KGSL_DRV_INFO("loading pm4 ucode version: %d\n", fw_ptr[0]); kgsl_yamato_regwrite(device, REG_CP_DEBUG, 0x02000000); kgsl_yamato_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < fw_word_size; i++) kgsl_yamato_regwrite(device, REG_CP_ME_RAM_DATA, fw_ptr[i]); error_release_fw: release_firmware(fw); error: return status; }
/* Push a new buffer object onto the list */ static void push_object(struct kgsl_device *device, int type, phys_addr_t ptbase, uint32_t gpuaddr, int dwords) { int index; void *ptr; struct kgsl_mem_entry *entry = NULL; /* * Sometimes IBs can be reused in the same dump. Because we parse from * oldest to newest, if we come across an IB that has already been used, * assume that it has been reused and update the list with the newest * size. */ for (index = 0; index < objbufptr; index++) { if (objbuf[index].gpuaddr == gpuaddr && objbuf[index].ptbase == ptbase) { objbuf[index].dwords = dwords; return; } } if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) { KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n"); return; } /* * adreno_convertaddr verifies that the IB size is valid - at least in * the context of it being smaller then the allocated memory space */ ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2, &entry); if (ptr == NULL) { KGSL_DRV_ERR(device, "snapshot: Can't find GPU address for %x\n", gpuaddr); return; } /* Put it on the list of things to parse */ objbuf[objbufptr].type = type; objbuf[objbufptr].gpuaddr = gpuaddr; objbuf[objbufptr].ptbase = ptbase; objbuf[objbufptr].dwords = dwords; objbuf[objbufptr].entry = entry; objbuf[objbufptr++].ptr = ptr; }
/* Snapshot the memory for an indirect buffer */ static int snapshot_ib(struct kgsl_device *device, void *snapshot, int remain, void *priv) { struct kgsl_snapshot_ib *header = snapshot; struct kgsl_snapshot_obj *obj = priv; unsigned int *src = obj->ptr; unsigned int *dst = snapshot + sizeof(*header); int i; if (remain < (obj->dwords << 2) + sizeof(*header)) { KGSL_DRV_ERR(device, "snapshot: Not enough memory for the ib section"); return 0; } /* Write the sub-header for the section */ header->gpuaddr = obj->gpuaddr; header->ptbase = obj->ptbase; header->size = obj->dwords; /* Write the contents of the ib */ for (i = 0; i < obj->dwords; i++) { *dst = *src; /* If another IB is discovered, then push it on the list too */ if (adreno_cmd_is_ib(*src)) push_object(device, SNAPSHOT_OBJ_TYPE_IB, obj->ptbase, *(src + 1), *(src + 2)); src++; dst++; } return (obj->dwords << 2) + sizeof(*header); }
int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret = 0; if (adreno_dev->pfp_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->pfp_fwfile, &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; kfree(ptr); goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5]; } err: return ret; }
int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret = 0; if (adreno_dev->pm4_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->pm4_fwfile, &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; kfree(ptr); goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1]; } err: return ret; }
irqreturn_t kgsl_g12_isr(int irq, void *data) { irqreturn_t result = IRQ_NONE; struct kgsl_device *device = &kgsl_driver.g12_device; unsigned int status; kgsl_g12_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status); if (status & GSL_VGC_INT_MASK) { kgsl_g12_regwrite(device, ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK); result = IRQ_HANDLED; if (status & REG_VGC_IRQSTATUS__FIFO_MASK) KGSL_DRV_ERR("g12 fifo interrupt\n"); if (status & REG_VGC_IRQSTATUS__MH_MASK) kgsl_mh_intrcallback(device); if (status & REG_VGC_IRQSTATUS__G2D_MASK) { int count; KGSL_DRV_VDBG("g12 g2d interrupt\n"); kgsl_g12_regread(device, ADDR_VGC_IRQ_ACTIVE_CNT >> 2, &count); count >>= 8; count &= 255; device->timestamp += count; wake_up_interruptible(&(device->wait_timestamp_wq)); }
/* * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks * @mmu - The kgsl MMU pointer * @ts - Timestamp on which the clocks should be disabled * @ts_valid - Indicates whether ts parameter is valid, if this parameter * is false then it means that the calling function wants to disable the * IOMMU clocks immediately without waiting for any timestamp * * Creates an event to disable the IOMMU clocks on timestamp and if event * already exists then updates the timestamp of disabling the IOMMU clocks * with the passed in ts if it is greater than the current value at which * the clocks will be disabled * Return - void */ static void kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts, bool ts_valid) { struct kgsl_iommu *iommu = mmu->priv; if (iommu->clk_event_queued) { if (ts_valid && (0 < timestamp_cmp(ts, iommu->iommu_last_cmd_ts))) iommu->iommu_last_cmd_ts = ts; } else { if (ts_valid) { iommu->iommu_last_cmd_ts = ts; iommu->clk_event_queued = true; if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL, ts, kgsl_iommu_clk_disable_event, mmu, mmu)) { KGSL_DRV_ERR(mmu->device, "Failed to add IOMMU disable clk event\n"); iommu->clk_event_queued = false; } } else { kgsl_iommu_disable_clk(mmu); } } }
static int _load_firmware(struct kgsl_device *device, const char *fwfile, void **data, int *len) { const struct firmware *fw = NULL; int ret; ret = request_firmware(&fw, fwfile, device->dev); if (ret) { KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n", fwfile, ret); return ret; } *data = kmalloc(fw->size, GFP_KERNEL); if (*data) { memcpy(*data, fw->data, fw->size); *len = fw->size; } else KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size); release_firmware(fw); return (*data != NULL) ? 0 : -ENOMEM; }
int kgsl_g12_cmdwindow_write(struct kgsl_device *device, enum kgsl_cmdwindow_type target, unsigned int addr, unsigned int data) { unsigned int cmdwinaddr; unsigned int cmdstream; KGSL_DRV_INFO("enter (device=%p,addr=%08x,data=0x%x)\n", device, addr, data); if (target < KGSL_CMDWINDOW_MIN || target > KGSL_CMDWINDOW_MAX) { KGSL_DRV_ERR("dev %p invalid target\n", device); return -EINVAL; } if (target == KGSL_CMDWINDOW_MMU) cmdstream = ADDR_VGC_MMUCOMMANDSTREAM; else cmdstream = ADDR_VGC_COMMANDSTREAM; cmdwinaddr = ((target << KGSL_G12_CMDWINDOW_TARGET_SHIFT) & KGSL_G12_CMDWINDOW_TARGET_MASK); cmdwinaddr |= ((addr << KGSL_G12_CMDWINDOW_ADDR_SHIFT) & KGSL_G12_CMDWINDOW_ADDR_MASK); kgsl_g12_regwrite(device, cmdstream >> 2, cmdwinaddr); kgsl_g12_regwrite(device, cmdstream >> 2, data); return 0; }
int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret; if (adreno_dev->pfp_fw == NULL) { int len; void *ptr; ret = _load_firmware(device, adreno_dev->gpucore->pfpfw_name, &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad PFP microcode size: %d\n", len); kfree(ptr); ret = -ENOMEM; goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5]; } return 0; err: KGSL_DRV_CRIT(device, "Failed to read pfp microcode %s\n", adreno_dev->gpucore->pfpfw_name); return ret; }
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { int i = 0, j = 1, ret = 0; struct tz_priv *priv; struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int tz_pwrlevels[KGSL_MAX_PWRLEVELS + 1]; priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); if (pwrscale->priv == NULL) return -ENOMEM; priv->idle_dcvs = 0; priv->governor = TZ_GOVERNOR_ONDEMAND; spin_lock_init(&tz_lock); kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); for (i = 0; i < pwr->num_pwrlevels - 1; i++) { if (i == 0) tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq; else if (pwr->pwrlevels[i].gpu_freq != pwr->pwrlevels[i - 1].gpu_freq) { j++; tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq; } } tz_pwrlevels[0] = j; ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, sizeof(tz_pwrlevels), NULL, 0); if (ret) { KGSL_DRV_ERR(device, "Fall back to idle based GPU DCVS algo"); priv->idle_dcvs = 1; } return 0; }
static irqreturn_t z180_irq_handler(struct kgsl_device *device) { irqreturn_t result = IRQ_NONE; unsigned int status; struct z180_device *z180_dev = Z180_DEVICE(device); z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status); trace_kgsl_z180_irq_status(device, status); if (status & GSL_VGC_INT_MASK) { z180_regwrite(device, ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK); result = IRQ_HANDLED; if (status & REG_VGC_IRQSTATUS__FIFO_MASK) KGSL_DRV_ERR(device, "z180 fifo interrupt\n"); if (status & REG_VGC_IRQSTATUS__MH_MASK) kgsl_mh_intrcallback(device); if (status & REG_VGC_IRQSTATUS__G2D_MASK) { int count; z180_regread(device, ADDR_VGC_IRQ_ACTIVE_CNT >> 2, &count); count >>= 8; count &= 255; z180_dev->timestamp += count; queue_work(device->work_queue, &device->ts_expired_ws); wake_up_interruptible(&device->wait_queue); }
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PM4_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PM4_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PM4_FW; } else { KGSL_DRV_ERR(device, "Could not load PM4 file\n"); return -EINVAL; } if (adreno_dev->pm4_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PM4 size is 3 dword aligned plus 1 dword of version */ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pm4_fw_size = len / sizeof(uint32_t); adreno_dev->pm4_fw = ptr; } KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, adreno_dev->pm4_fw[i]); err: return ret; }
/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_get_pt_base_addr(mmu, mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask << iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift); /* For v1 SMMU GPU needs to be idle for tlb invalidate as well */ if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); /* Acquire GPU-CPU sync Lock here */ msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1); mb(); } } /* Release GPU-CPU sync Lock here */ msm_iommu_unlock(); /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); } } if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL, 1); mb(); } } msm_iommu_unlock(); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static void adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, int wptr_ahead) { int nopcount; unsigned int freecmds; unsigned int *cmds; uint cmds_gpu; struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout); unsigned long wait_time; /* if wptr ahead, fill the remaining with NOPs */ if (wptr_ahead) { /* -1 for header */ nopcount = rb->sizedwords - rb->wptr - 1; cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr; GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount)); /* Make sure that rptr is not 0 before submitting * commands at the end of ringbuffer. We do not * want the rptr and wptr to become equal when * the ringbuffer is not empty */ do { GSL_RB_GET_READPTR(rb, &rb->rptr); } while (!rb->rptr); rb->wptr++; adreno_ringbuffer_submit(rb); rb->wptr = 0; } wait_time = jiffies + wait_timeout; /* wait for space in ringbuffer */ while (1) { GSL_RB_GET_READPTR(rb, &rb->rptr); freecmds = rb->rptr - rb->wptr; if (freecmds == 0 || freecmds > numcmds) break; if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); if (!adreno_dump_and_recover(rb->device)) wait_time = jiffies + wait_timeout; else /* GPU is hung and we cannot recover */ BUG(); } } }
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); const char *fwfile; int i, ret = 0; if (adreno_is_a220(adreno_dev)) { fwfile = A220_PFP_470_FW; } else if (adreno_is_a225(adreno_dev)) { fwfile = A225_PFP_FW; } else if (adreno_is_a20x(adreno_dev)) { fwfile = A200_PFP_FW; } else { KGSL_DRV_ERR(device, "Could not load PFP firmware\n"); return -EINVAL; } if (adreno_dev->pfp_fw == NULL) { int len; unsigned int *ptr; ret = _load_firmware(device, fwfile, (void *) &ptr, &len); if (ret) goto err; /* PFP size shold be dword aligned */ if (len % sizeof(uint32_t) != 0) { KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); ret = -EINVAL; goto err; } adreno_dev->pfp_fw_size = len / sizeof(uint32_t); adreno_dev->pfp_fw = ptr; } KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", adreno_dev->pfp_fw[0]); adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); for (i = 1; i < adreno_dev->pfp_fw_size; i++) adreno_regwrite(device, REG_CP_PFP_UCODE_DATA, adreno_dev->pfp_fw[i]); err: return ret; }
/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (flags & KGSL_MMUFLAGS_PTUPDATE) { kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); /* Set asid */ KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); } } /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static int adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, int wptr_ahead) { int nopcount; unsigned int freecmds; unsigned int *cmds; unsigned int gpuaddr; unsigned long wait_time; unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); unsigned long wait_time_part; unsigned int rptr; /* if wptr ahead, fill the remaining with NOPs */ if (wptr_ahead) { /* -1 for header */ nopcount = KGSL_RB_DWORDS - rb->wptr - 1; cmds = RB_HOSTPTR(rb, rb->wptr); gpuaddr = RB_GPUADDR(rb, rb->wptr); *cmds = cp_nop_packet(nopcount); kgsl_cffdump_write(rb->device, gpuaddr, *cmds); /* Make sure that rptr is not 0 before submitting * commands at the end of ringbuffer. We do not * want the rptr and wptr to become equal when * the ringbuffer is not empty */ do { rptr = adreno_get_rptr(rb); } while (!rptr); rb->wptr = 0; } wait_time = jiffies + wait_timeout; wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); /* wait for space in ringbuffer */ while (1) { rptr = adreno_get_rptr(rb); freecmds = rptr - rb->wptr; if (freecmds == 0 || freecmds > numcmds) break; if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr); return -ETIMEDOUT; } } return 0; }
int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, unsigned int pwrflag) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; switch (pwrflag) { case KGSL_PWRFLAGS_POWER_OFF: if (pwr->power_flags & KGSL_PWRFLAGS_POWER_ON) { KGSL_DRV_INFO("power off, device %d\n", device->id); if (internal_pwr_rail_ctl(pwr->pwr_rail, KGSL_FALSE)) { KGSL_DRV_ERR( "call internal_pwr_rail_ctl failed\n"); return KGSL_FAILURE; } if (pwr->gpu_reg) regulator_disable(pwr->gpu_reg); pwr->power_flags &= ~(KGSL_PWRFLAGS_POWER_ON); pwr->power_flags |= KGSL_PWRFLAGS_POWER_OFF; } return KGSL_SUCCESS; case KGSL_PWRFLAGS_POWER_ON: if (pwr->power_flags & KGSL_PWRFLAGS_POWER_OFF) { KGSL_DRV_INFO("power on, device %d\n", device->id); if (internal_pwr_rail_ctl(pwr->pwr_rail, KGSL_TRUE)) { KGSL_DRV_ERR( "call internal_pwr_rail_ctl failed\n"); return KGSL_FAILURE; } if (pwr->gpu_reg) regulator_enable(pwr->gpu_reg); pwr->power_flags &= ~(KGSL_PWRFLAGS_POWER_OFF); pwr->power_flags |= KGSL_PWRFLAGS_POWER_ON; } return KGSL_SUCCESS; default: return KGSL_FAILURE; } }
int adreno_drawctxt_switch(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, unsigned int flags) { struct kgsl_device *device = &adreno_dev->dev; int ret = 0; /* already current? */ if (adreno_dev->drawctxt_active == drawctxt) return ret; trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active, drawctxt, flags); /* Get a refcount to the new instance */ if (drawctxt) { if (!_kgsl_context_get(&drawctxt->base)) return -EINVAL; ret = kgsl_mmu_setstate(&device->mmu, drawctxt->base.proc_priv->pagetable, adreno_dev->drawctxt_active ? adreno_dev->drawctxt_active->base.id : KGSL_CONTEXT_INVALID); /* Set the new context */ ret = adreno_context_restore(adreno_dev, drawctxt); if (ret) { KGSL_DRV_ERR(device, "Error in GPU context %d restore: %d\n", drawctxt->base.id, ret); return ret; } } else { /* * No context - set the default pagetable and thats it. * If there isn't a current context, the kgsl_mmu_setstate * will use the CPU path so we don't need to give * it a valid context id. */ ret = kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable, adreno_dev->drawctxt_active->base.id); } /* Put the old instance of the active drawctxt */ if (adreno_dev->drawctxt_active) kgsl_context_put(&adreno_dev->drawctxt_active->base); adreno_dev->drawctxt_active = drawctxt; return 0; }
static int kgsl_regread_nolock(struct kgsl_device *device, unsigned int offsetwords, unsigned int *value) { unsigned int *reg; if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) { KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords); return -ERANGE; } reg = (unsigned int *)(device->regspace.mmio_virt_base + (offsetwords << 2)); *value = __raw_readl(reg); return 0; }
int adreno_drawctxt_switch(struct adreno_device *adreno_dev, struct adreno_ringbuffer *rb, struct adreno_context *drawctxt, unsigned int flags) { struct kgsl_device *device = &adreno_dev->dev; struct kgsl_pagetable *new_pt; int ret = 0; /* We always expect a valid rb */ BUG_ON(!rb); /* already current? */ if (rb->drawctxt_active == drawctxt) return ret; trace_adreno_drawctxt_switch(rb, drawctxt, flags); /* Get a refcount to the new instance */ if (drawctxt) { if (!_kgsl_context_get(&drawctxt->base)) return -EINVAL; new_pt = drawctxt->base.proc_priv->pagetable; } else { /* No context - set the default pagetable and thats it. */ new_pt = device->mmu.defaultpagetable; } ret = adreno_iommu_set_pt(rb, new_pt); if (ret) { KGSL_DRV_ERR(device, "Failed to set pagetable on rb %d\n", rb->id); return ret; } /* Put the old instance of the active drawctxt */ if (rb->drawctxt_active) kgsl_context_put(&rb->drawctxt_active->base); rb->drawctxt_active = drawctxt; /* Set the new context */ adreno_context_restore(rb); return 0; }
/* Dump another item on the current pending list */ static void *dump_object(struct kgsl_device *device, int obj, void *snapshot, int *remain) { switch (objbuf[obj].type) { case SNAPSHOT_OBJ_TYPE_IB: snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_IB, snapshot, remain, snapshot_ib, &objbuf[obj]); break; default: KGSL_DRV_ERR(device, "snapshot: Invalid snapshot object type: %d\n", objbuf[obj].type); break; } return snapshot; }