u32 nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr, struct nvbios_M0203E *info) { struct nvbios_M0203T M0203T; u8 cnt, len, idx = 0xff; u32 data; if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) { nv_warn(bios, "M0203T not found\n"); return 0x00000000; } while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) { switch (M0203T.type) { case M0203T_TYPE_RAMCFG: if (info->strap != ramcfg) continue; return data; default: nv_warn(bios, "M0203T type %02x\n", M0203T.type); return 0x00000000; } } return data; }
static int nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) { struct nvc0_clock_priv *priv = (void *)clk; struct nouveau_bios *bios = nouveau_bios(priv); struct nvbios_pll info; int N, fN, M, P; int ret; ret = nvbios_pll_parse(bios, type, &info); if (ret) return ret; ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P); if (ret < 0) return ret; switch (info.type) { case PLL_VPLL0: case PLL_VPLL1: nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); nv_wr32(priv, info.reg + 0x10, fN << 16); break; default: nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); ret = -EINVAL; break; } return ret; }
u16 dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u16 i2c = 0x0000; u16 dcb = dcb_table(bios, ver, hdr, cnt, len); if (dcb) { if (*ver >= 0x15) i2c = nv_ro16(bios, dcb + 2); if (*ver >= 0x30) i2c = nv_ro16(bios, dcb + 4); } if (i2c && *ver >= 0x42) { nv_warn(bios, "ccb %02x not supported\n", *ver); return 0x0000; } if (i2c && *ver >= 0x30) { *ver = nv_ro08(bios, i2c + 0); *hdr = nv_ro08(bios, i2c + 1); *cnt = nv_ro08(bios, i2c + 2); *len = nv_ro08(bios, i2c + 3); } else { *ver = *ver; /* use DCB version */ *hdr = 0; *cnt = 16; *len = 4; } return i2c; }
int nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) { struct nv50_devinit_priv *priv = (void *)devinit; struct nouveau_bios *bios = nouveau_bios(priv); struct nvbios_pll info; int N, fN, M, P; int ret; ret = nvbios_pll_parse(bios, type, &info); if (ret) return ret; ret = nva3_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P); if (ret < 0) return ret; switch (info.type) { case PLL_VPLL0: case PLL_VPLL1: nv_wr32(priv, info.reg + 0, 0x50000610); nv_mask(priv, info.reg + 4, 0x003fffff, (P << 16) | (M << 8) | N); nv_wr32(priv, info.reg + 8, fN); break; default: nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq); ret = -EINVAL; break; } return ret; }
int _nouveau_xtensa_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nouveau_xtensa *xtensa = (void *)object; const struct firmware *fw; char name[32]; int i, ret; u32 tmp; ret = nouveau_engine_init(&xtensa->base); if (ret) return ret; if (!xtensa->gpu_fw) { snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x", xtensa->addr >> 12); ret = request_firmware(&fw, name, nv_device_base(device)); if (ret) { nv_warn(xtensa, "unable to load firmware %s\n", name); return ret; } if (fw->size > 0x40000) { nv_warn(xtensa, "firmware %s too large\n", name); release_firmware(fw); return -EINVAL; } ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, &xtensa->gpu_fw); if (ret) { release_firmware(fw); return ret; } nv_debug(xtensa, "Loading firmware to address: 0x%"PRIxMAX"\n", (uintmax_t)xtensa->gpu_fw->addr); for (i = 0; i < fw->size / 4; i++) nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i)); release_firmware(fw); }
void nv84_bar_flush(struct nouveau_bar *bar) { struct nv50_bar_priv *priv = (void *)bar; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); nv_wr32(bar, 0x070000, 0x00000001); if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) nv_warn(priv, "flush timeout\n"); spin_unlock_irqrestore(&priv->lock, flags); }
int nouveau_engine_create_(struct nouveau_object *parent, struct nouveau_object *engobj, struct nouveau_oclass *oclass, bool enable, const char *iname, const char *fname, int length, void **pobject) { struct nouveau_engine *engine; int ret; ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS, iname, fname, length, pobject); engine = *pobject; if (ret) return ret; if (parent) { struct nouveau_device *device = nv_device(parent); int engidx = nv_engidx(nv_object(engine)); if (device->disable_mask & (1ULL << engidx)) { if (!nouveau_boolopt(device->cfgopt, iname, false)) { nv_debug(engine, "engine disabled by hw/fw\n"); return -ENODEV; } nv_warn(engine, "ignoring hw/fw engine disable\n"); } if (!nouveau_boolopt(device->cfgopt, iname, enable)) { if (!enable) nv_warn(engine, "disabled, %s=1 to enable\n", iname); return -ENODEV; } } INIT_LIST_HEAD(&engine->contexts); spin_lock_init(&engine->lock); return 0; }
int nouveau_fb_bios_memtype(struct nouveau_bios *bios) { const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; struct nvbios_M0203E M0203E; u8 ver, hdr; if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) { switch (M0203E.type) { case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2; case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3; case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3; case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5; default: nv_warn(bios, "M0203E type %02x\n", M0203E.type); return NV_MEM_TYPE_UNKNOWN; } } nv_warn(bios, "M0203E not matched!\n"); return NV_MEM_TYPE_UNKNOWN; }
static void nv41_vm_flush(struct nouveau_vm *vm) { struct nv04_vm_priv *priv = (void *)vm->vmm; mutex_lock(&nv_subdev(priv)->mutex); nv_wr32(priv, 0x100810, 0x00000022); if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { nv_warn(priv, "flush timeout, 0x%08x\n", nv_rd32(priv, 0x100810)); } nv_wr32(priv, 0x100810, 0x00000000); mutex_unlock(&nv_subdev(priv)->mutex); }
static void gk20a_ibus_intr(struct nvkm_subdev *subdev) { struct gk20a_ibus_priv *priv = (void *)subdev; u32 status0 = nv_rd32(priv, 0x120058); if (status0 & 0x7) { nv_debug(priv, "resetting priv ring\n"); gk20a_ibus_init_priv_ring(priv); } /* Acknowledge interrupt */ nv_mask(priv, 0x12004c, 0x2, 0x2); if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00)) nv_warn(priv, "timeout waiting for ringmaster ack\n"); }
static int nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_devinit_priv *priv; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_devinit_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; return 0; }
static int nv84_crypt_init(struct nouveau_object *object) { struct nv84_crypt_priv *priv = (void *)object; int ret; nv_warn(object, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_engine_init(&priv->base); if (ret) return ret; nv_wr32(priv, 0x102130, 0xffffffff); nv_wr32(priv, 0x102140, 0xffffffbf); nv_wr32(priv, 0x10200c, 0x00000010); return 0; }
void _nouveau_xtensa_intr(struct nouveau_subdev *subdev) { struct nouveau_xtensa *xtensa = (void *)subdev; u32 unk104 = nv_ro32(xtensa, 0xd04); u32 intr = nv_ro32(xtensa, 0xc20); u32 chan = nv_ro32(xtensa, 0xc28); u32 unk10c = nv_ro32(xtensa, 0xd0c); if (intr & 0x10) nv_warn(xtensa, "Watchdog interrupt, engine hung.\n"); nv_wo32(xtensa, 0xc20, intr); intr = nv_ro32(xtensa, 0xc20); if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) { nv_debug(xtensa, "Enabling FIFO_CTRL\n"); nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val); } }
static int nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_dmaeng_priv *priv; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nouveau_dmaobj_sclass; priv->base.bind = nvc0_dmaobj_bind; return 0; }
static int nvc0_software_context_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_software_chan *chan; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_software_context_create(parent, engine, oclass, &chan); *pobject = nv_object(chan); if (ret) return ret; chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; chan->base.vblank.event.func = nvc0_software_vblsem_release; return 0; }
static int nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_software_priv *priv; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_software_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->cclass = &nvc0_software_cclass; nv_engine(priv)->sclass = nvc0_software_sclass; nv_subdev(priv)->intr = nv04_software_intr; return 0; }
static int nv84_crypt_object_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_gpuobj *obj; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent, 16, 16, 0, &obj); *pobject = nv_object(obj); if (ret) return ret; nv_wo32(obj, 0x00, nv_mclass(obj)); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); return 0; }
static int nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv84_crypt_priv *priv; int ret; nv_warn(parent, "[%s]\n", __PRETTY_FUNCTION__); ret = nouveau_engine_create(parent, engine, oclass, true, "PCRYPT", "crypt", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00004000; nv_subdev(priv)->intr = nv84_crypt_intr; nv_engine(priv)->cclass = &nv84_crypt_cclass; nv_engine(priv)->sclass = nv84_crypt_sclass; nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush; return 0; }
int nouveau_bar_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int length, void **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_bar *bar; int ret; ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL", "bar", length, pobject); bar = *pobject; if (ret) return ret; if (nv_device_resource_len(device, 3) != 0) { bar->iomem = ioremap(nv_device_resource_start(device, 3), nv_device_resource_len(device, 3)); if (!bar->iomem) nv_warn(bar, "PRAMIN ioremap failed\n"); } return 0; }
static int nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 datasize, struct nouveau_object **pobject) { struct nva3_ram *ram; int ret, i; ret = nv50_ram_create(parent, engine, oclass, &ram); *pobject = nv_object(ram); if (ret) return ret; switch (ram->base.type) { case NV_MEM_TYPE_DDR3: ram->base.calc = nva3_ram_calc; ram->base.prog = nva3_ram_prog; ram->base.tidy = nva3_ram_tidy; break; default: nv_warn(ram, "reclocking of this ram type unsupported\n"); return 0; } ram->fuc.r_0x004000 = ramfuc_reg(0x004000); ram->fuc.r_0x004004 = ramfuc_reg(0x004004); ram->fuc.r_0x004018 = ramfuc_reg(0x004018); ram->fuc.r_0x004128 = ramfuc_reg(0x004128); ram->fuc.r_0x004168 = ramfuc_reg(0x004168); ram->fuc.r_0x100200 = ramfuc_reg(0x100200); ram->fuc.r_0x100210 = ramfuc_reg(0x100210); for (i = 0; i < 9; i++) ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4)); ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0); ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4); ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc); ram->fuc.r_0x10053c = ramfuc_reg(0x10053c); ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0); ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4); ram->fuc.r_0x100714 = ramfuc_reg(0x100714); ram->fuc.r_0x100718 = ramfuc_reg(0x100718); ram->fuc.r_0x10071c = ramfuc_reg(0x10071c); ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask); ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask); ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask); ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804); ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask); ram->fuc.r_0x111100 = ramfuc_reg(0x111100); ram->fuc.r_0x111104 = ramfuc_reg(0x111104); ram->fuc.r_0x611200 = ramfuc_reg(0x611200); if (ram->base.ranks > 1) { ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8); ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc); ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8); ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec); } else { ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0); ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4); ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0); ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4); } return 0; }
static int nv50_devinit_init(struct nouveau_object *object) { struct nouveau_bios *bios = nouveau_bios(object); struct nv50_devinit_priv *priv = (void *)object; struct nvbios_outp info; struct dcb_output outp; u8 ver = 0xff, hdr, cnt, len; int ret, i = 0; nv_warn(object, "[%s]\n", __PRETTY_FUNCTION__); if (!priv->base.post) { if (!nv_rdvgac(priv, 0, 0x00) && !nv_rdvgac(priv, 0, 0x1a)) { nv_info(priv, "adaptor not initialised\n"); priv->base.post = true; } } ret = nouveau_devinit_init(&priv->base); if (ret) return ret; /* if we ran the init tables, we have to execute the first script * pointer of each dcb entry's display encoder table in order * to properly initialise each encoder. */ while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) { if (nvbios_outp_match(bios, outp.hasht, outp.hashm, &ver, &hdr, &cnt, &len, &info)) { struct nvbios_init init = { .subdev = nv_subdev(priv), .bios = bios, .offset = info.script[0], .outp = &outp, .crtc = -1, .execute = 1, }; nvbios_exec(&init); } i++; } return 0; } static int nv50_devinit_fini(struct nouveau_object *object, bool suspend) { struct nv50_devinit_priv *priv = (void *)object; return nouveau_devinit_fini(&priv->base, suspend); } struct nouveau_oclass nv50_devinit_oclass = { .handle = NV_SUBDEV(DEVINIT, 0x50), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_devinit_ctor, .dtor = nv50_devinit_dtor, .init = nv50_devinit_init, .fini = nv50_devinit_fini, }, };
static int gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { #if !defined(__NetBSD__) struct device *dev = nv_device_base(nv_device(pfb)); int i; #endif struct gk20a_mem *mem; u32 type = memtype & 0xff; u32 npages, order; nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, (unsigned long long)size, align, ncmin); npages = size >> PAGE_SHIFT; if (npages == 0) npages = 1; if (align == 0) align = PAGE_SIZE; align >>= PAGE_SHIFT; /* round alignment to the next power of 2, if needed */ #if defined(__NetBSD__) order = fls32(align); #else order = fls(align); #endif if ((align & (align - 1)) == 0) order--; align = BIT(order); /* ensure returned address is correctly aligned */ npages = max(align, npages); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; mem->base.size = npages; mem->base.memtype = type; #if defined(__NetBSD__) int ret, nsegs; if (align == 0) align = PAGE_SIZE; const bus_dma_tag_t dmat = nv_device(pfb)->platformdev->dmat; const bus_size_t dmasize = npages << PAGE_SHIFT; ret = -bus_dmamem_alloc(dmat, dmasize, align, 0, &mem->dmaseg, 1, &nsegs, BUS_DMA_WAITOK); if (ret) { fail0: kfree(mem); return ret; } KASSERT(nsegs == 1); ret = -bus_dmamap_create(dmat, dmasize, nsegs, dmasize, 0, BUS_DMA_WAITOK, &mem->base.pages); if (ret) { fail1: bus_dmamem_free(dmat, &mem->dmaseg, nsegs); goto fail0; } ret = -bus_dmamem_map(dmat, &mem->dmaseg, nsegs, dmasize, &mem->cpuaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT); if (ret) { fail2: bus_dmamap_destroy(dmat, mem->base.pages); goto fail1; } memset(mem->cpuaddr, 0, dmasize); ret = -bus_dmamap_load(dmat, mem->base.pages, mem->cpuaddr, dmasize, NULL, BUS_DMA_WAITOK); if (ret) { fail3: __unused bus_dmamem_unmap(dmat, mem->cpuaddr, dmasize); goto fail2; } nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %"PRIxPADDR ", vaddr: %p\n", npages << PAGE_SHIFT, align, mem->base.pages->dm_segs[0].ds_addr, mem->cpuaddr); mem->dmasize = dmasize; mem->base.offset = (u64)mem->base.pages->dm_segs[0].ds_addr; *pmem = &mem->base; #else mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL); if (!mem->base.pages) { kfree(mem); return -ENOMEM; } *pmem = &mem->base; mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT, &mem->handle, GFP_KERNEL); if (!mem->cpuaddr) { nv_error(pfb, "%s: cannot allocate memory!\n", __func__); gk20a_ram_put(pfb, pmem); return -ENOMEM; } align <<= PAGE_SHIFT; /* alignment check */ if (unlikely(mem->handle & (align - 1))) nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n", &mem->handle, align); nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n", npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr); for (i = 0; i < npages; i++) mem->base.pages[i] = mem->handle + (PAGE_SIZE * i); mem->base.offset = (u64)mem->base.pages[0]; #endif return 0; }
int dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info) { u8 ver, len; u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); if (ent) { if (ver >= 0x41) { if (!(nv_ro32(bios, ent) & 0x80000000)) info->type = DCB_I2C_UNUSED; else info->type = DCB_I2C_PMGR; } else if (ver >= 0x30) { info->type = nv_ro08(bios, ent + 0x03); } else { info->type = nv_ro08(bios, ent + 0x03) & 0x07; if (info->type == 0x07) info->type = DCB_I2C_UNUSED; } info->drive = DCB_I2C_UNUSED; info->sense = DCB_I2C_UNUSED; info->share = DCB_I2C_UNUSED; info->auxch = DCB_I2C_UNUSED; switch (info->type) { case DCB_I2C_NV04_BIT: info->drive = nv_ro08(bios, ent + 0); info->sense = nv_ro08(bios, ent + 1); return 0; case DCB_I2C_NV4E_BIT: info->drive = nv_ro08(bios, ent + 1); return 0; case DCB_I2C_NVIO_BIT: info->drive = nv_ro08(bios, ent + 0) & 0x0f; if (nv_ro08(bios, ent + 1) & 0x01) info->share = nv_ro08(bios, ent + 1) >> 1; return 0; case DCB_I2C_NVIO_AUX: info->auxch = nv_ro08(bios, ent + 0) & 0x0f; if (nv_ro08(bios, ent + 1) & 0x01) info->share = info->auxch; return 0; case DCB_I2C_PMGR: info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0; if (info->drive == 0x1f) info->drive = DCB_I2C_UNUSED; info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5; if (info->auxch == 0x1f) info->auxch = DCB_I2C_UNUSED; info->share = info->auxch; return 0; case DCB_I2C_UNUSED: return 0; default: nv_warn(bios, "unknown i2c type %d\n", info->type); info->type = DCB_I2C_UNUSED; return 0; } } if (bios->bmp_offset && idx < 2) { /* BMP (from v4.0 has i2c info in the structure, it's in a * fixed location on earlier VBIOS */ if (nv_ro08(bios, bios->bmp_offset + 5) < 4) ent = 0x0048; else ent = 0x0036 + bios->bmp_offset; if (idx == 0) { info->drive = nv_ro08(bios, ent + 4); if (!info->drive) info->drive = 0x3f; info->sense = nv_ro08(bios, ent + 5); if (!info->sense) info->sense = 0x3e; } else if (idx == 1) { info->drive = nv_ro08(bios, ent + 6); if (!info->drive) info->drive = 0x37; info->sense = nv_ro08(bios, ent + 7); if (!info->sense) info->sense = 0x36; } info->type = DCB_I2C_NV04_BIT; info->share = DCB_I2C_UNUSED; return 0; } return -ENOENT; }
static int gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, u32 memtype, struct nouveau_mem **pmem) { struct device *dev = nv_device_base(nv_device(pfb)); struct gk20a_mem *mem; u32 type = memtype & 0xff; u32 npages, order; int i; nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size, align, ncmin); npages = size >> PAGE_SHIFT; if (npages == 0) npages = 1; if (align == 0) align = PAGE_SIZE; align >>= PAGE_SHIFT; /* round alignment to the next power of 2, if needed */ order = fls(align); if ((align & (align - 1)) == 0) order--; align = BIT(order); /* ensure returned address is correctly aligned */ npages = max(align, npages); mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; mem->base.size = npages; mem->base.memtype = type; mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL); if (!mem->base.pages) { kfree(mem); return -ENOMEM; } *pmem = &mem->base; mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT, &mem->handle, GFP_KERNEL); if (!mem->cpuaddr) { nv_error(pfb, "%s: cannot allocate memory!\n", __func__); gk20a_ram_put(pfb, pmem); return -ENOMEM; } align <<= PAGE_SHIFT; /* alignment check */ if (unlikely(mem->handle & (align - 1))) nv_warn(pfb, "memory not aligned as requested: %pad (0x%x)\n", &mem->handle, align); nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n", npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr); for (i = 0; i < npages; i++) mem->base.pages[i] = mem->handle + (PAGE_SIZE * i); mem->base.offset = (u64)mem->base.pages[0]; return 0; }
u16 nouveau_dcb_table(struct nouveau_device *device, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u16 dcb = 0x0000; if (device->card_type > NV_04) dcb = nv_ro16(device, 0x36); if (!dcb) { nv_warn(device, "DCB table not found\n"); return dcb; } *ver = nv_ro08(device, dcb); if (*ver >= 0x42) { nv_warn(device, "DCB *ver 0x%02x unknown\n", *ver); return 0x0000; } else if (*ver >= 0x30) { if (nv_ro32(device, dcb + 6) == 0x4edcbdcb) { *hdr = nv_ro08(device, dcb + 1); *cnt = nv_ro08(device, dcb + 2); *len = nv_ro08(device, dcb + 3); return dcb; } } else if (*ver >= 0x20) { if (nv_ro32(device, dcb + 4) == 0x4edcbdcb) { u16 i2c = nv_ro16(device, dcb + 2); *hdr = 8; *cnt = (i2c - dcb) / 8; *len = 8; return dcb; } } else if (*ver >= 0x15) { if (!nv_memcmp(device, dcb - 7, "DEV_REC", 7)) { u16 i2c = nv_ro16(device, dcb + 2); *hdr = 4; *cnt = (i2c - dcb) / 10; *len = 10; return dcb; } } else { /* * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but * always has the same single (crt) entry, even when tv-out * present, so the conclusion is this version cannot really * be used. * * v1.2 tables (some NV6/10, and NV15+) normally have the * same 5 entries, which are not specific to the card and so * no use. * * v1.2 does have an I2C table that read_dcb_i2c_table can * handle, but cards exist (nv11 in #14821) with a bad i2c * table pointer, so use the indices parsed in * parse_bmp_structure. * * v1.1 (NV5+, maybe some NV4) is entirely unhelpful */ nv_warn(device, "DCB contains no useful data\n"); return 0x0000; } nv_warn(device, "DCB header validation failed\n"); return 0x0000; }
int dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info) { u8 ver, len; u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); if (ent) { info->data = nv_ro32(bios, ent + 0); info->type = nv_ro08(bios, ent + 3); if (ver < 0x30) { info->type &= 0x07; if (info->type == 0x07) info->type = 0xff; } switch (info->type) { case DCB_I2C_NV04_BIT: info->drive = nv_ro08(bios, ent + 0); info->sense = nv_ro08(bios, ent + 1); return 0; case DCB_I2C_NV4E_BIT: info->drive = nv_ro08(bios, ent + 1); return 0; case DCB_I2C_NVIO_BIT: case DCB_I2C_NVIO_AUX: info->drive = nv_ro08(bios, ent + 0); return 0; case DCB_I2C_UNUSED: return 0; default: nv_warn(bios, "unknown i2c type %d\n", info->type); info->type = DCB_I2C_UNUSED; return 0; } } if (bios->bmp_offset && idx < 2) { /* BMP (from v4.0 has i2c info in the structure, it's in a * fixed location on earlier VBIOS */ if (nv_ro08(bios, bios->bmp_offset + 5) < 4) ent = 0x0048; else ent = 0x0036 + bios->bmp_offset; if (idx == 0) { info->drive = nv_ro08(bios, ent + 4); if (!info->drive) info->drive = 0x3f; info->sense = nv_ro08(bios, ent + 5); if (!info->sense) info->sense = 0x3e; } else if (idx == 1) { info->drive = nv_ro08(bios, ent + 6); if (!info->drive) info->drive = 0x37; info->sense = nv_ro08(bios, ent + 7); if (!info->sense) info->sense = 0x36; } info->type = DCB_I2C_NV04_BIT; return 0; } return -ENOENT; }