static void gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct gk20a_mem *mem = to_gk20a_mem(*pmem); *pmem = NULL; if (unlikely(mem == NULL)) return; #if defined(__NetBSD__) if (likely(mem->base.pages)) { const bus_dma_tag_t dmat = nv_device(pfb)->platformdev->dmat; bus_dmamap_unload(dmat, mem->base.pages); bus_dmamem_unmap(dmat, mem->cpuaddr, mem->dmasize); bus_dmamap_destroy(dmat, mem->base.pages); bus_dmamem_free(dmat, &mem->dmaseg, 1); } #else struct device *dev = nv_device_base(nv_device(pfb)); if (likely(mem->cpuaddr)) dma_free_coherent(dev, mem->base.size << PAGE_SHIFT, mem->cpuaddr, mem->handle); kfree(mem->base.pages); #endif kfree(mem); }
int nouveau_i2c_port_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u8 index, const struct i2c_algorithm *algo, const struct nouveau_i2c_func *func, int size, void **pobject) { struct nouveau_device *device = nv_device(engine); struct nouveau_i2c *i2c = (void *)engine; struct nouveau_i2c_port *port; int ret; ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject); port = *pobject; if (ret) return ret; snprintf(port->adapter.name, sizeof(port->adapter.name), "nouveau-%s-%d", device->name, index); port->adapter.owner = THIS_MODULE; port->adapter.dev.parent = nv_device_base(device); port->index = index; port->aux = -1; port->func = func; mutex_init(&port->mutex); if ( algo == &nouveau_i2c_bit_algo && !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) { struct i2c_algo_bit_data *bit; bit = kzalloc(sizeof(*bit), GFP_KERNEL); if (!bit) return -ENOMEM; bit->udelay = 10; bit->timeout = usecs_to_jiffies(2200); bit->data = port; bit->pre_xfer = nouveau_i2c_pre_xfer; bit->post_xfer = nouveau_i2c_post_xfer; bit->setsda = nouveau_i2c_setsda; bit->setscl = nouveau_i2c_setscl; bit->getsda = nouveau_i2c_getsda; bit->getscl = nouveau_i2c_getscl; port->adapter.algo_data = bit; ret = i2c_bit_add_bus(&port->adapter); } else { port->adapter.algo_data = port; port->adapter.algo = algo; ret = i2c_add_adapter(&port->adapter); } if (ret == 0) list_add_tail(&port->head, &i2c->ports); return ret; }
static void gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct device *dev = nv_device_base(nv_device(pfb)); struct gk20a_mem *mem = to_gk20a_mem(*pmem); *pmem = NULL; if (unlikely(mem == NULL)) return; if (likely(mem->cpuaddr)) dma_free_coherent(dev, mem->base.size << PAGE_SHIFT, mem->cpuaddr, mem->handle); kfree(mem->base.pages); kfree(mem); }
int _nouveau_xtensa_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nouveau_xtensa *xtensa = (void *)object; const struct firmware *fw; char name[32]; int i, ret; u32 tmp; ret = nouveau_engine_init(&xtensa->base); if (ret) return ret; if (!xtensa->gpu_fw) { snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", xtensa->addr >> 12); ret = request_firmware(&fw, name, nv_device_base(device)); if (ret) { nv_warn(xtensa, "unable to load firmware %s\n", name); return ret; } if (fw->size > 0x40000) { nv_warn(xtensa, "firmware %s too large\n", name); release_firmware(fw); return -EINVAL; } ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, &xtensa->gpu_fw); if (ret) { release_firmware(fw); return ret; } nv_debug(xtensa, "Loading firmware to address: 0x%"PRIxMAX"\n", (uintmax_t)xtensa->gpu_fw->addr); for (i = 0; i < fw->size / 4; i++) nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i)); release_firmware(fw); }
static ssize_t nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) { struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); struct nvif_control_pstate_info_v0 info = {}; size_t cnt = PAGE_SIZE; char *buf = b; int ret, i; ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO, &info, sizeof(info)); if (ret) return ret; for (i = 0; i < info.count + 1; i++) { const s32 state = i < info.count ? i : NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT; struct nvif_control_pstate_attr_v0 attr = { .state = state, .index = 0, }; ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR, &attr, sizeof(attr)); if (ret) return ret; if (i < info.count) snappendf(buf, cnt, "%02x:", attr.state); else snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" : info.pwrsrc == 1 ? "AC" : "--"); attr.index = 0; do { attr.state = state; ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR, &attr, sizeof(attr)); if (ret) return ret; snappendf(buf, cnt, " %s %d", attr.name, attr.min); if (attr.min != attr.max) snappendf(buf, cnt, "-%d", attr.max); snappendf(buf, cnt, " %s", attr.unit); } while (attr.index); if (state >= 0) { if (info.ustate_ac == state) snappendf(buf, cnt, " AC"); if (info.ustate_dc == state) snappendf(buf, cnt, " DC"); if (info.pstate == state) snappendf(buf, cnt, " *"); } else { if (info.ustate_ac < -1) snappendf(buf, cnt, " AC"); if (info.ustate_dc < -1) snappendf(buf, cnt, " DC"); } snappendf(buf, cnt, "\n"); } return strlen(b); } static ssize_t nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL }; long value, ret; char *tmp; if ((tmp = strchr(buf, '\n'))) *tmp = '\0'; if (!strncasecmp(buf, "dc:", 3)) { args.pwrsrc = 0; buf += 3; } else if (!strncasecmp(buf, "ac:", 3)) { args.pwrsrc = 1; buf += 3; } if (!strcasecmp(buf, "none")) args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN; else if (!strcasecmp(buf, "auto")) args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON; else { ret = kstrtol(buf, 16, &value); if (ret) return ret; args.ustate = value; } ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); if (ret < 0) return ret; return count; } static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR, nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set); void nouveau_sysfs_fini(struct drm_device *dev) { struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->device; if (sysfs && sysfs->ctrl.priv) { device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); nvif_object_fini(&sysfs->ctrl); } drm->sysfs = NULL; kfree(sysfs); } int nouveau_sysfs_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->device; struct nouveau_sysfs *sysfs; int ret; if (!nouveau_pstate) return 0; sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL); if (!sysfs) return -ENOMEM; ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL, NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, &sysfs->ctrl); if (ret == 0) device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); return 0; }
static int gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { #if !defined(__NetBSD__) struct device *dev = nv_device_base(nv_device(pfb)); int i; #endif struct gk20a_mem *mem; u32 type = memtype & 0xff; u32 npages, order; nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, (unsigned long long)size, align, ncmin); npages = size >> PAGE_SHIFT; if (npages == 0) npages = 1; if (align == 0) align = PAGE_SIZE; align >>= PAGE_SHIFT; /* round alignment to the next power of 2, if needed */ #if defined(__NetBSD__) order = fls32(align); #else order = fls(align); #endif if ((align & (align - 1)) == 0) order--; align = BIT(order); /* ensure returned address is correctly aligned */ npages = max(align, npages); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; mem->base.size = npages; mem->base.memtype = type; #if defined(__NetBSD__) int ret, nsegs; if (align == 0) align = PAGE_SIZE; const bus_dma_tag_t dmat = nv_device(pfb)->platformdev->dmat; const bus_size_t dmasize = npages << PAGE_SHIFT; ret = -bus_dmamem_alloc(dmat, dmasize, align, 0, &mem->dmaseg, 1, &nsegs, BUS_DMA_WAITOK); if (ret) { fail0: kfree(mem); return ret; } KASSERT(nsegs == 1); ret = -bus_dmamap_create(dmat, dmasize, nsegs, dmasize, 0, BUS_DMA_WAITOK, &mem->base.pages); if (ret) { fail1: bus_dmamem_free(dmat, &mem->dmaseg, nsegs); goto fail0; } ret = -bus_dmamem_map(dmat, &mem->dmaseg, nsegs, dmasize, &mem->cpuaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT); if (ret) { fail2: bus_dmamap_destroy(dmat, mem->base.pages); goto fail1; } memset(mem->cpuaddr, 0, dmasize); ret = -bus_dmamap_load(dmat, mem->base.pages, mem->cpuaddr, dmasize, NULL, BUS_DMA_WAITOK); if (ret) { fail3: __unused bus_dmamem_unmap(dmat, mem->cpuaddr, dmasize); goto fail2; } nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %"PRIxPADDR ", vaddr: %p\n", npages << PAGE_SHIFT, align, mem->base.pages->dm_segs[0].ds_addr, mem->cpuaddr); mem->dmasize = dmasize; mem->base.offset = (u64)mem->base.pages->dm_segs[0].ds_addr; *pmem = &mem->base; #else mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL); if (!mem->base.pages) { kfree(mem); return -ENOMEM; } *pmem = &mem->base; mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT, &mem->handle, GFP_KERNEL); if (!mem->cpuaddr) { nv_error(pfb, "%s: cannot allocate memory!\n", __func__); gk20a_ram_put(pfb, pmem); return -ENOMEM; } align <<= PAGE_SHIFT; /* alignment check */ if (unlikely(mem->handle & (align - 1))) nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n", &mem->handle, align); nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n", npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr); for (i = 0; i < npages; i++) mem->base.pages[i] = mem->handle + (PAGE_SIZE * i); mem->base.offset = (u64)mem->base.pages[0]; #endif return 0; }
static int gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct device *dev = nv_device_base(nv_device(pfb)); struct gk20a_mem *mem; u32 type = memtype & 0xff; u32 npages, order; int i; nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size, align, ncmin); npages = size >> PAGE_SHIFT; if (npages == 0) npages = 1; if (align == 0) align = PAGE_SIZE; align >>= PAGE_SHIFT; /* round alignment to the next power of 2, if needed */ order = fls(align); if ((align & (align - 1)) == 0) order--; align = BIT(order); /* ensure returned address is correctly aligned */ npages = max(align, npages); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; mem->base.size = npages; mem->base.memtype = type; mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL); if (!mem->base.pages) { kfree(mem); return -ENOMEM; } *pmem = &mem->base; mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT, &mem->handle, GFP_KERNEL); if (!mem->cpuaddr) { nv_error(pfb, "%s: cannot allocate memory!\n", __func__); gk20a_ram_put(pfb, pmem); return -ENOMEM; } align <<= PAGE_SHIFT; /* alignment check */ if (unlikely(mem->handle & (align - 1))) nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n", &mem->handle, align); nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n", npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr); for (i = 0; i < npages; i++) mem->base.pages[i] = mem->handle + (PAGE_SIZE * i); mem->base.offset = (u64)mem->base.pages[0]; return 0; }