/** * INIT_IO_RESTRICT_PROG - opcode 0x32 * */ static void init_io_restrict_prog(struct nvbios_init *init) { struct nouveau_bios *bios = init->bios; u16 port = nv_ro16(bios, init->offset + 1); u8 index = nv_ro08(bios, init->offset + 3); u8 mask = nv_ro08(bios, init->offset + 4); u8 shift = nv_ro08(bios, init->offset + 5); u8 count = nv_ro08(bios, init->offset + 6); u32 reg = nv_ro32(bios, init->offset + 7); u8 conf, i; trace("IO_RESTRICT_PROG\tR[0x%06x] = " "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n", reg, port, index, mask, shift); init->offset += 11; conf = (init_rdvgai(init, port, index) & mask) >> shift; for (i = 0; i < count; i++) { u32 data = nv_ro32(bios, init->offset); if (i == conf) { trace("\t0x%08x *\n", data); init_wr32(init, reg, data); } else { trace("\t0x%08x\n", data); } init->offset += 4; } trace("}]\n"); }
u16 nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_volt *info) { u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len); memset(info, 0x00, sizeof(*info)); switch (!!volt * *ver) { case 0x12: info->vidmask = nv_ro08(bios, volt + 0x04); break; case 0x20: info->vidmask = nv_ro08(bios, volt + 0x05); break; case 0x30: info->vidmask = nv_ro08(bios, volt + 0x04); break; case 0x40: info->base = nv_ro32(bios, volt + 0x04); info->step = nv_ro16(bios, volt + 0x08); info->vidmask = nv_ro08(bios, volt + 0x0b); /*XXX*/ info->min = 0; info->max = info->base; break; case 0x50: info->vidmask = nv_ro08(bios, volt + 0x06); info->min = nv_ro32(bios, volt + 0x0a); info->max = nv_ro32(bios, volt + 0x0e); info->base = nv_ro32(bios, volt + 0x12) & 0x00ffffff; info->step = nv_ro16(bios, volt + 0x16); break; } return volt; }
int dcb_outp_foreach(struct nouveau_bios *bios, void *data, int (*exec)(struct nouveau_bios *, void *, int, u16)) { int ret, idx = -1; u8 ver, len; u16 outp; while ((outp = dcb_outp(bios, ++idx, &ver, &len))) { if (nv_ro32(bios, outp) == 0x00000000) break; /* seen on an NV11 with DCB v1.5 */ if (nv_ro32(bios, outp) == 0xffffffff) break; /* seen on an NV17 with DCB v2.0 */ if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED) continue; if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL) break; ret = exec(bios, data, idx, outp); if (ret) return ret; } return 0; }
/* Fetch and adjust GPU GET pointer * * Returns: * value >= 0, the adjusted GET pointer * -EINVAL if GET pointer currently outside main push buffer * -EBUSY if timeout exceeded */ static inline int READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) { uint64_t val; val = nv_ro32(chan->object, chan->user_get); if (chan->user_get_hi) val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32; /* reset counter as long as GET is still advancing, this is * to avoid misdetecting a GPU lockup if the GPU happens to * just be processing an operation that takes a long time */ if (val != *prev_get) { *prev_get = val; *timeout = 0; } if ((++*timeout & 0xff) == 0) { udelay(1); if (*timeout > 100000) return -EBUSY; } if (val < chan->push.vma.offset || val > chan->push.vma.offset + (chan->dma.max << 2)) return -EINVAL; return (val - chan->push.vma.offset) >> 2; }
static bool init_condition_met(struct nvbios_init *init, u8 cond) { struct nouveau_bios *bios = init->bios; u16 table = init_condition_table(init); if (table) { u32 reg = nv_ro32(bios, table + (cond * 12) + 0); u32 msk = nv_ro32(bios, table + (cond * 12) + 4); u32 val = nv_ro32(bios, table + (cond * 12) + 8); trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n", cond, reg, msk, val); return (init_rd32(init, reg) & msk) == val; } return false; }
static int nv50_dma_push_wait(struct nouveau_channel *chan, int count) { uint32_t cnt = 0, prev_get = 0; while (chan->dma.ib_free < count) { uint32_t get = nv_ro32(chan->object, 0x88); if (get != prev_get) { prev_get = get; cnt = 0; } if ((++cnt & 0xff) == 0) { DRM_UDELAY(1); if (cnt > 100000) return -EBUSY; } chan->dma.ib_free = get - chan->dma.ib_put; if (chan->dma.ib_free <= 0) chan->dma.ib_free += chan->dma.ib_max; } return 0; }
static u32 nv84_fence_read(struct nouveau_channel *chan) { struct nouveau_fifo_chan *fifo = (void *)chan->object; struct nv84_fence_priv *priv = chan->drm->fence; return nv_ro32(priv->mem, fifo->chid * 16); }
u32 nvbios_P0260Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz) { struct bit_entry bit_P; u32 data = 0x00000000; if (!bit_entry(bios, 'P', &bit_P)) { if (bit_P.version == 2 && bit_P.length > 0x63) data = nv_ro32(bios, bit_P.offset + 0x60); if (data) { *ver = nv_ro08(bios, data + 0); switch (*ver) { case 0x10: *hdr = nv_ro08(bios, data + 1); *cnt = nv_ro08(bios, data + 2); *len = 4; *xnr = nv_ro08(bios, data + 3); *xsz = 4; return data; default: break; } } } return 0x00000000; }
u32 nvbios_M0205Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz) { struct bit_entry bit_M; u32 data = 0x00000000; if (!bit_entry(bios, 'M', &bit_M)) { if (bit_M.version == 2 && bit_M.length > 0x08) data = nv_ro32(bios, bit_M.offset + 0x05); if (data) { *ver = nv_ro08(bios, data + 0x00); switch (*ver) { case 0x10: *hdr = nv_ro08(bios, data + 0x01); *len = nv_ro08(bios, data + 0x02); *ssz = nv_ro08(bios, data + 0x03); *snr = nv_ro08(bios, data + 0x04); *cnt = nv_ro08(bios, data + 0x05); return data; default: break; } } } return 0x00000000; }
void gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match) { struct nvkm_bios *bios = nvkm_bios(gpio); u8 ver, len; u16 entry; int ent = -1; while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { u32 data = nv_ro32(bios, entry); u8 line = (data & 0x0000003f); u8 defs = !!(data & 0x00000080); u8 func = (data & 0x0000ff00) >> 8; u8 unk0 = (data & 0x00ff0000) >> 16; u8 unk1 = (data & 0x1f000000) >> 24; if ( func == DCB_GPIO_UNUSED || (match != DCB_GPIO_UNUSED && match != func)) continue; gpio->set(gpio, 0, func, line, defs); nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0); if (unk1--) nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line); } }
void nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match) { struct nvkm_bios *bios = nvkm_bios(gpio); u8 ver, len; u16 entry; int ent = -1; while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { static const u32 regs[] = { 0xe100, 0xe28c }; u32 data = nv_ro32(bios, entry); u8 line = (data & 0x0000001f); u8 func = (data & 0x0000ff00) >> 8; u8 defs = !!(data & 0x01000000); u8 unk0 = !!(data & 0x02000000); u8 unk1 = !!(data & 0x04000000); u32 val = (unk1 << 16) | unk0; u32 reg = regs[line >> 4]; u32 lsh = line & 0x0f; if ( func == DCB_GPIO_UNUSED || (match != DCB_GPIO_UNUSED && match != func)) continue; gpio->set(gpio, 0, func, line, defs); nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh); } }
void _nouveau_xtensa_intr(struct nouveau_subdev *subdev) { struct nouveau_xtensa *xtensa = (void *)subdev; u32 unk104 = nv_ro32(xtensa, 0xd04); u32 intr = nv_ro32(xtensa, 0xc20); u32 chan = nv_ro32(xtensa, 0xc28); u32 unk10c = nv_ro32(xtensa, 0xd0c); if (intr & 0x10) nv_warn(xtensa, "Watchdog interrupt, engine hung.\n"); nv_wo32(xtensa, 0xc20, intr); intr = nv_ro32(xtensa, 0xc20); if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) { nv_debug(xtensa, "Enabling FIFO_CTRL\n"); nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val); } }
int g84_temp_get(struct nvkm_therm *therm) { struct nvkm_fuse *fuse = nvkm_fuse(therm); if (nv_ro32(fuse, 0x1a8) == 1) return nv_rd32(therm, 0x20400); else return -ENODEV; }
int nve0_ram_init(struct nouveau_object *object) { struct nouveau_fb *pfb = (void *)object->parent; struct nve0_ram *ram = (void *)object; struct nouveau_bios *bios = nouveau_bios(pfb); static const u8 train0[] = { 0x00, 0xff, 0xff, 0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0x00, }; static const u32 train1[] = { 0x00000000, 0xffffffff, 0x55555555, 0xaaaaaaaa, 0x33333333, 0xcccccccc, 0xf0f0f0f0, 0x0f0f0f0f, 0x00ff00ff, 0xff00ff00, 0x0000ffff, 0xffff0000, }; u8 ver, hdr, cnt, len, snr, ssz; u32 data, save; int ret, i; ret = nouveau_ram_init(&ram->base); if (ret) return ret; /* run a bunch of tables from rammap table. there's actually * individual pointers for each rammap entry too, but, nvidia * seem to just run the last two entries' scripts early on in * their init, and never again.. we'll just run 'em all once * for now. * * i strongly suspect that each script is for a separate mode * (likely selected by 0x10f65c's lower bits?), and the * binary driver skips the one that's already been setup by * the init tables. */ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); if (!data || hdr < 0x15) return -EINVAL; cnt = nv_ro08(bios, data + 0x14); /* guess at count */ data = nv_ro32(bios, data + 0x10); /* guess u32... */ save = nv_rd32(pfb, 0x10f65c); for (i = 0; i < cnt; i++) { nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4); nvbios_exec(&(struct nvbios_init) { .subdev = nv_subdev(pfb), .bios = bios, .offset = nv_ro32(bios, data), /* guess u32 */ .execute = 1, }); data += 4; }
static int nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, uint32_t offset) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); if (dev_priv->card_type < NV_40) return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); return (ctx != 0); }
void g84_sensor_setup(struct nvkm_therm *therm) { struct nvkm_fuse *fuse = nvkm_fuse(therm); /* enable temperature reading for cards with insane defaults */ if (nv_ro32(fuse, 0x1a8) == 1) { nv_mask(therm, 0x20008, 0x80008000, 0x80000000); nv_mask(therm, 0x2000c, 0x80000003, 0x00000000); mdelay(20); /* wait for the temperature to stabilize */ } }
u32 nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr) { u32 data = nv_ro16(bios, base + 0x18); if (data) { data += base; switch (nv_ro32(bios, data + 0x00)) { case 0x52494350: /* PCIR */ case 0x53494752: /* RGIS */ case 0x5344504e: /* NPDS */ *hdr = nv_ro16(bios, data + 0x0a); *ver = nv_ro08(bios, data + 0x0c); break; default: nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n", data, nv_ro32(bios, data + 0x00)); data = 0; break; } } return data; }
u32 nvbios_P0260Xp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr, struct nvbios_P0260X *info) { u32 data = nvbios_P0260Xe(bios, idx, ver, hdr); memset(info, 0x00, sizeof(*info)); switch (!!data * *ver) { case 0x10: info->data = nv_ro32(bios, data); return data; default: break; } return 0x00000000; }
u16 dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, struct dcb_output *outp) { u16 dcb = dcb_outp(bios, idx, ver, len); if (dcb) { if (*ver >= 0x20) { u32 conn = nv_ro32(bios, dcb + 0x00); outp->or = (conn & 0x0f000000) >> 24; outp->location = (conn & 0x00300000) >> 20; outp->bus = (conn & 0x000f0000) >> 16; outp->connector = (conn & 0x0000f000) >> 12; outp->heads = (conn & 0x00000f00) >> 8; outp->i2c_index = (conn & 0x000000f0) >> 4; outp->type = (conn & 0x0000000f); outp->link = 0; } else {
bool nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data) { struct nouveau_timer *ptimer = nouveau_timer(obj); u64 time0; time0 = ptimer->read(ptimer); do { if (nv_iclass(obj, NV_SUBDEV_CLASS)) { if ((nv_rd32(obj, addr) & mask) != data) return true; } else { if ((nv_ro32(obj, addr) & mask) != data) return true; } } while (ptimer->read(ptimer) - time0 < nsec); return false; }
u32 nvbios_rammapEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p) { u32 data = nvbios_rammapEe(bios, idx, ver, hdr, cnt, len), temp; memset(p, 0x00, sizeof(*p)); p->rammap_ver = *ver; p->rammap_hdr = *hdr; switch (!!data * *ver) { case 0x10: p->rammap_min = nv_ro16(bios, data + 0x00); p->rammap_max = nv_ro16(bios, data + 0x02); p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1; p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3; break; case 0x11: p->rammap_min = nv_ro16(bios, data + 0x00); p->rammap_max = nv_ro16(bios, data + 0x02); p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0; p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2; p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4; temp = nv_ro32(bios, data + 0x09); p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0; p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9; p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18; p->rammap_11_0a_0800 = (temp & 0x00080000) >> 19; p->rammap_11_0b_01f0 = (temp & 0x01f00000) >> 20; p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25; p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26; p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27; p->rammap_11_0d = nv_ro08(bios, data + 0x0d); p->rammap_11_0e = nv_ro08(bios, data + 0x0e); p->rammap_11_0f = nv_ro08(bios, data + 0x0f); p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2; break; default: data = 0; break; } return data; }
u16 nvbios_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *info) { u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len); if (data && *hdr >= 0x0a) { info->type = nv_ro16(bios, data + 0x00); info->mask = nv_ro32(bios, data + 0x02); if (*ver <= 0x20) /* match any link */ info->mask |= 0x00c0; info->script[0] = nv_ro16(bios, data + 0x06); info->script[1] = nv_ro16(bios, data + 0x08); info->script[2] = 0x0000; if (*hdr >= 0x0c) info->script[2] = nv_ro16(bios, data + 0x0a); return data; } return 0x0000; }
static int nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) { struct nouveau_object *object = handle->object; struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; union { struct nvif_ioctl_rd_v0 v0; } *args = data; int ret; nv_ioctl(object, "rd size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { nv_ioctl(object, "rd vers %d size %d addr %016llx\n", args->v0.version, args->v0.size, args->v0.addr); switch (args->v0.size) { case 1: if (ret = -ENODEV, ofuncs->rd08) { args->v0.data = nv_ro08(object, args->v0.addr); ret = 0; } break; case 2: if (ret = -ENODEV, ofuncs->rd16) { args->v0.data = nv_ro16(object, args->v0.addr); ret = 0; } break; case 4: if (ret = -ENODEV, ofuncs->rd32) { args->v0.data = nv_ro32(object, args->v0.addr); ret = 0; } break; default: ret = -EINVAL; break; } } return ret; }
int nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid, u32 handle, u32 context) { struct nouveau_bar *bar = nouveau_bar(ramht); u32 co, ho; co = ho = nouveau_ramht_hash(ramht, chid, handle); do { if (!nv_ro32(ramht, co + 4)) { nv_wo32(ramht, co + 0, handle); nv_wo32(ramht, co + 4, context); if (bar) bar->flush(bar); return co; } co += 8; if (co >= nv_gpuobj(ramht)->size) co = 0; } while (co != ho); return -ENOMEM; }
int _nouveau_falcon_init(struct nouveau_object *object) { struct nouveau_device *device = nv_device(object); struct nouveau_falcon *falcon = (void *)object; const struct firmware *fw; char name[32] = "internal"; int ret, i; u32 caps; /* enable engine, and determine its capabilities */ ret = nouveau_engine_init(&falcon->base); if (ret) return ret; if (device->chipset < 0xa3 || device->chipset == 0xaa || device->chipset == 0xac) { falcon->version = 0; falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; } else { caps = nv_ro32(falcon, 0x12c); falcon->version = (caps & 0x0000000f); falcon->secret = (caps & 0x00000030) >> 4; } caps = nv_ro32(falcon, 0x108); falcon->code.limit = (caps & 0x000001ff) << 8; falcon->data.limit = (caps & 0x0003fe00) >> 1; nv_debug(falcon, "falcon version: %d\n", falcon->version); nv_debug(falcon, "secret level: %d\n", falcon->secret); nv_debug(falcon, "code limit: %d\n", falcon->code.limit); nv_debug(falcon, "data limit: %d\n", falcon->data.limit); /* wait for 'uc halted' to be signalled before continuing */ if (falcon->secret && falcon->version < 4) { if (!falcon->version) nv_wait(falcon, 0x008, 0x00000010, 0x00000010); else nv_wait(falcon, 0x180, 0x80000000, 0); nv_wo32(falcon, 0x004, 0x00000010); } /* disable all interrupts */ nv_wo32(falcon, 0x014, 0xffffffff); /* no default ucode provided by the engine implementation, try and * locate a "self-bootstrapping" firmware image for the engine */ if (!falcon->code.data) { snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", device->chipset, falcon->addr >> 12); ret = request_firmware(&fw, name, &device->pdev->dev); if (ret == 0) { falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); falcon->code.size = fw->size; falcon->data.data = NULL; falcon->data.size = 0; release_firmware(fw); } falcon->external = true; }
u16 nouveau_dcb_table(struct nouveau_device *device, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u16 dcb = 0x0000; if (device->card_type > NV_04) dcb = nv_ro16(device, 0x36); if (!dcb) { nv_warn(device, "DCB table not found\n"); return dcb; } *ver = nv_ro08(device, dcb); if (*ver >= 0x42) { nv_warn(device, "DCB *ver 0x%02x unknown\n", *ver); return 0x0000; } else if (*ver >= 0x30) { if (nv_ro32(device, dcb + 6) == 0x4edcbdcb) { *hdr = nv_ro08(device, dcb + 1); *cnt = nv_ro08(device, dcb + 2); *len = nv_ro08(device, dcb + 3); return dcb; } } else if (*ver >= 0x20) { if (nv_ro32(device, dcb + 4) == 0x4edcbdcb) { u16 i2c = nv_ro16(device, dcb + 2); *hdr = 8; *cnt = (i2c - dcb) / 8; *len = 8; return dcb; } } else if (*ver >= 0x15) { if (!nv_memcmp(device, dcb - 7, "DEV_REC", 7)) { u16 i2c = nv_ro16(device, dcb + 2); *hdr = 4; *cnt = (i2c - dcb) / 10; *len = 10; return dcb; } } else { /* * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but * always has the same single (crt) entry, even when tv-out * present, so the conclusion is this version cannot really * be used. * * v1.2 tables (some NV6/10, and NV15+) normally have the * same 5 entries, which are not specific to the card and so * no use. * * v1.2 does have an I2C table that read_dcb_i2c_table can * handle, but cards exist (nv11 in #14821) with a bad i2c * table pointer, so use the indices parsed in * parse_bmp_structure. * * v1.1 (NV5+, maybe some NV4) is entirely unhelpful */ nv_warn(device, "DCB contains no useful data\n"); return 0x0000; } nv_warn(device, "DCB header validation failed\n"); return 0x0000; }
static u32 nv84_fence_read(struct nouveau_channel *chan) { struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); return nv_ro32(priv->mem, chan->id * 16); }
u32 nv10_fence_read(struct nouveau_channel *chan) { return nv_ro32(chan->object, 0x0048); }
u16 nvbios_timingEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p) { u16 data = nvbios_timingEe(bios, idx, ver, hdr, cnt, len), temp; p->timing_ver = *ver; p->timing_hdr = *hdr; switch (!!data * *ver) { case 0x10: p->timing_10_WR = nv_ro08(bios, data + 0x00); p->timing_10_CL = nv_ro08(bios, data + 0x02); p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07; p->timing_10_CWL = nv_ro08(bios, data + 0x13); break; case 0x20: p->timing[0] = nv_ro32(bios, data + 0x00); p->timing[1] = nv_ro32(bios, data + 0x04); p->timing[2] = nv_ro32(bios, data + 0x08); p->timing[3] = nv_ro32(bios, data + 0x0c); p->timing[4] = nv_ro32(bios, data + 0x10); p->timing[5] = nv_ro32(bios, data + 0x14); p->timing[6] = nv_ro32(bios, data + 0x18); p->timing[7] = nv_ro32(bios, data + 0x1c); p->timing[8] = nv_ro32(bios, data + 0x20); p->timing[9] = nv_ro32(bios, data + 0x24); p->timing[10] = nv_ro32(bios, data + 0x28); p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0; p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4; p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6; p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0; temp = nv_ro16(bios, data + 0x2c); p->timing_20_2c_003f = (temp & 0x003f) >> 0; p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6; p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0; p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3; temp = nv_ro16(bios, data + 0x31); p->timing_20_31_0007 = (temp & 0x0007) >> 0; p->timing_20_31_0078 = (temp & 0x0078) >> 3; p->timing_20_31_0780 = (temp & 0x0780) >> 7; p->timing_20_31_0800 = (temp & 0x0800) >> 11; p->timing_20_31_7000 = (temp & 0x7000) >> 12; p->timing_20_31_8000 = (temp & 0x8000) >> 15; break; default: data = 0; break; } return data; }
static u32 nv04_instobj_rd32(struct nouveau_object *object, u64 addr) { struct nv04_instobj_priv *node = (void *)object; return nv_ro32(object->engine, node->mem->offset + addr); }