void nv50_irq_user_isr(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_user_irq_engine *user_irq = &dev_priv->engine.user_irq; struct nouveau_user_irq_handler *handler = &user_irq->handler[0]; u32 intr; nv_mask(dev, 0x1140, 0x04000000, 0); intr = nv_rd32(dev, 0x1100); if ((intr & 0x04000000) == 0) { NV_WARN(dev, "User ISR called without user interrupt"); return; } nv_mask(dev, 0x1100, 0x04000000, 0x04000000); if (handler->service_routine == NULL) { NV_WARN(dev, "User IRQ raised without registered handler"); return; } schedule_work(&handler->work); return; }
int nv04_timer_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; u32 m, n, d; nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000); nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF); /* aim for 31.25MHz, which gives us nanosecond timestamps */ d = 1000000 / 32; /* determine base clock for timer source */ if (dev_priv->chipset < 0x40) { n = nouveau_hw_get_clock(dev, PLL_CORE); } else if (dev_priv->chipset == 0x40) { /*XXX: figure this out */ n = 0; } else { n = dev_priv->crystal; m = 1; while (n < (d * 2)) { n += (n / m); m++; } nv_wr32(dev, 0x009220, m - 1); } if (!n) { NV_WARN(dev, "PTIMER: unknown input clock freq\n"); if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) || !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) { nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1); nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1); } return 0; } /* reduce ratio to acceptable values */ while (((n % 5) == 0) && ((d % 5) == 0)) { n /= 5; d /= 5; } while (((n % 2) == 0) && ((d % 2) == 0)) { n /= 2; d /= 2; } while (n > 0xffff || d > 0xffff) { n >>= 1; d >>= 1; } nv_wr32(dev, NV04_PTIMER_NUMERATOR, n); nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d); return 0; }
static void pscnv_gem_pager_dtor(void *handle) { struct drm_gem_object *gem_obj = handle; struct pscnv_bo *bo = gem_obj->driver_private; struct drm_device *dev = gem_obj->dev; vm_object_t devobj; DRM_LOCK(dev); devobj = cdev_pager_lookup(handle); if (devobj != NULL) { vm_size_t page_count = OFF_TO_IDX(bo->size); vm_page_t m; int i; VM_OBJECT_LOCK(devobj); for (i = 0; i < page_count; i++) { m = vm_page_lookup(devobj, i); if (!m) continue; if (pscnv_mem_debug > 0) NV_WARN(dev, "Freeing %010llx + %08llx (%p\n", bo->start, i * PAGE_SIZE, m); cdev_pager_free_page(devobj, m); } VM_OBJECT_UNLOCK(devobj); vm_object_deallocate(devobj); } else { DRM_UNLOCK(dev); NV_ERROR(dev, "Could not find handle %p bo %p\n", handle, bo); return; } if (pscnv_mem_debug > 0) NV_WARN(dev, "Freed %010llx (%p)\n", bo->start, bo); //kfree(bo->fake_pages); if (bo->chan) pscnv_chan_unref(bo->chan); else drm_gem_object_unreference_unlocked(gem_obj); DRM_UNLOCK(dev); }
int nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct reservation_object *robj, struct nouveau_bo **pnvbo) { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; struct nvif_mmu *mmu = &cli->mmu; struct nvif_vmm *vmm = &cli->vmm.vmm; size_t acc_size; int type = ttm_bo_type_device; int ret, i, pi = -1; if (!size) { NV_WARN(drm, "skipped size %016llx\n", size); return -EINVAL; } if (sg) type = ttm_bo_type_sg; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) return -ENOMEM; INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); nvbo->bo.bdev = &drm->ttm.bdev; nvbo->cli = cli; /* This is confusing, and doesn't actually mean we want an uncached * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated * into in nouveau_gem_new(). */ if (flags & TTM_PL_FLAG_UNCACHED) { /* Determine if we can get a cache-coherent map, forcing * uncached mapping if we can't. */ if (!nouveau_drm_use_coherent_gpu_mapping(drm)) nvbo->force_coherent = true; } if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { nvbo->kind = (tile_flags & 0x0000ff00) >> 8; if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { kfree(nvbo); return -EINVAL; } nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; } else
static void legacy_perf_init(struct drm_device *dev) { struct nouveau_device *device = nouveau_dev(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; struct nouveau_pm *pm = nouveau_pm(dev); char *perf, *entry, *bmp = &bios->data[bios->offset]; int headerlen, use_straps; if (bmp[5] < 0x5 || bmp[6] < 0x14) { NV_DEBUG(drm, "BMP version too old for perf\n"); return; } perf = ROMPTR(dev, bmp[0x73]); if (!perf) { NV_DEBUG(drm, "No memclock table pointer found.\n"); return; } switch (perf[0]) { case 0x12: case 0x14: case 0x18: use_straps = 0; headerlen = 1; break; case 0x01: use_straps = perf[1] & 1; headerlen = (use_straps ? 8 : 2); break; default: NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]); return; } entry = perf + headerlen; if (use_straps) entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; sprintf(pm->perflvl[0].name, "performance_level_0"); pm->perflvl[0].memory = ROM16(entry[0]) * 20; pm->nr_perflvl = 1; }
static void nvc0_fifo_chan_kill(struct pscnv_engine *eng, struct pscnv_chan *ch) { struct drm_device *dev = ch->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_fifo_ctx *fifo_ctx = ch->engdata[PSCNV_ENGINE_FIFO]; uint32_t status; unsigned long flags; BUG_ON(!fifo_ctx); /* bit 28: active, * bit 12: loaded, * bit 0: enabled */ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); status = nv_rd32(dev, 0x3004 + ch->cid * 8); nv_wr32(dev, 0x3004 + ch->cid * 8, status & ~1); nv_wr32(dev, 0x2634, ch->cid); if (!nv_wait(dev, 0x2634, ~0, ch->cid)) NV_WARN(dev, "WARNING: PFIFO.KICK_CHID 2634 = 0x%08x (instead of kicked channel id: %08x)\n", nv_rd32(dev, 0x2634), ch->cid); nvc0_fifo_playlist_update(dev); nvc0_fifo_intr_engine(dev); if (nv_rd32(dev, 0x3004 + ch->cid * 8) & 0x1110) { // 0x1110 mask contains ACQUIRE_PENDING, UNK8, LOADED bits status = nv_rd32(dev, 0x3004 + ch->cid * 8); NV_WARN(dev, "WARNING: PFIFO kickoff fail: PFIFO.CHAN_TABLE[%d].STATE = %08x" " (%s %s %s %s %s)\n", ch->cid, status, (status & 0x0010) ? "ACQUIRE_PENDING" : "", (status & 0x0100) ? "UNK8" : "", (status & 0x1000) ? "LOADED" : "", pgf_unit_str((status >> 16) & 0x1f), (status & 0x10000000) ? "PENDING" : ""); }
static int pscnv_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { struct drm_gem_object *gem_obj = handle; struct drm_device *dev = gem_obj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_bo *bo = gem_obj->driver_private; int ret; *color = 0; /* ...? */ if (!bo->chan && !bo->dmapages && (ret = -dev_priv->vm->map_user(bo))) return (ret); if (bo->chan) pscnv_chan_ref(bo->chan); /* else */ drm_gem_object_reference(gem_obj); NV_WARN(dev, "Mapping bo %p, handle %p, chan %p\n", bo, handle, bo->chan); return (0); }
void pscnv_dma_exit(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_dma *dma = dev_priv->dma; if (!dma) { NV_WARN(dev, "DMA: pscnv_dma_exit() called but DMA was never initialized!\n"); return; } NV_INFO(dev, "DMA: Exiting...\n"); //No need to undo pscnv_ib_init_subch, since it only performs //subchannel configuration on a channel we are about to close anyways... //No need to undo pscnv_ib_add_fence(), as pscnv_ib_chan_kill() inside pscnv_ib_chan_free() takes care of this pscnv_ib_chan_free(dma->ib_chan); pscnv_vspace_unref(dma->vs); //Undo pscnv_vspace_new should not be necessary, as pscnv_vspace_unref() does freeing, unless we have one reference too many mutex_destroy(&dma->lock); kfree(dma); dev_priv->dma = dma = 0; }
void nouveau_temp_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 *temp = NULL; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version == 1) temp = ROMPTR(bios, P.data[12]); else if (P.version == 2) temp = ROMPTR(bios, P.data[16]); else NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); nouveau_temp_vbios_parse(dev, temp); } nouveau_temp_probe_i2c(dev); }
static struct nouveau_pm_memtiming * nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P, u16 memclk, u8 *entry, u8 recordlen, u8 entries) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nvbios *bios = &dev_priv->vbios; u8 ramcfg; int i; /* perf v2 has a separate "timing map" table, we have to match * the target memory clock to a specific entry, *then* use * ramcfg to select the correct subentry */ if (P->version == 2) { u8 *tmap = ROMPTR(bios, P->data[4]); if (!tmap) { NV_DEBUG(dev, "no timing map pointer\n"); return NULL; } if (tmap[0] != 0x10) { NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]); return NULL; } entry = tmap + tmap[1]; recordlen = tmap[2] + (tmap[4] * tmap[3]); for (i = 0; i < tmap[5]; i++, entry += recordlen) { if (memclk >= ROM16(entry[0]) && memclk <= ROM16(entry[2])) break; } if (i == tmap[5]) { NV_WARN(dev, "no match in timing map table\n"); return NULL; } entry += tmap[2]; recordlen = tmap[3]; entries = tmap[4]; } ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2; if (bios->ram_restrict_tbl_ptr) ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg]; if (ramcfg >= entries) { NV_WARN(dev, "ramcfg strap out of bounds!\n"); return NULL; } entry += ramcfg * recordlen; if (entry[1] >= pm->memtimings.nr_timing) { if (entry[1] != 0xff) NV_WARN(dev, "timingset %d does not exist\n", entry[1]); return NULL; } return &pm->memtimings.timing[entry[1]]; }
void nouveau_perf_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; struct nouveau_pm_memtimings *memtimings = &pm->memtimings; struct nouveau_pm_tbl_header mt_hdr; u8 version, headerlen, recordlen, entries; u8 *perf, *entry; int vid, i; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version != 1 && P.version != 2) { NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); return; } perf = ROMPTR(bios, P.data[0]); version = perf[0]; headerlen = perf[1]; if (version < 0x40) { recordlen = perf[3] + (perf[4] * perf[5]); entries = perf[2]; } else { recordlen = perf[2] + (perf[3] * perf[4]); entries = perf[5]; } } else { if (bios->data[bios->offset + 6] < 0x25) { legacy_perf_init(dev); return; } perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); if (!perf) { NV_DEBUG(dev, "perf table pointer invalid\n"); return; } version = perf[1]; headerlen = perf[0]; recordlen = perf[3]; entries = perf[2]; } if (entries > NOUVEAU_PM_MAX_LEVEL) { NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n"); entries = NOUVEAU_PM_MAX_LEVEL; } entry = perf + headerlen; /* For version 0x15, initialize memtiming table */ if(version == 0x15) { memtimings->timing = kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); if(!memtimings) { NV_WARN(dev,"Could not allocate memtiming table\n"); return; } mt_hdr.entry_cnt = entries; mt_hdr.entry_len = 14; mt_hdr.version = version; mt_hdr.header_len = 4; } for (i = 0; i < entries; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; perflvl->timing = NULL; if (entry[0] == 0xff) { entry += recordlen; continue; } switch (version) { case 0x12: case 0x13: case 0x15: perflvl->fanspeed = entry[55]; if (recordlen > 56) perflvl->volt_min = entry[56]; perflvl->core = ROM32(entry[1]) * 10; perflvl->memory = ROM32(entry[5]) * 20; break; case 0x21: case 0x23: case 0x24: perflvl->fanspeed = entry[4]; perflvl->volt_min = entry[5]; perflvl->shader = ROM16(entry[6]) * 1000; perflvl->core = perflvl->shader; perflvl->core += (signed char)entry[8] * 1000; if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) perflvl->memory = ROM16(entry[11]) * 1000; else perflvl->memory = ROM16(entry[11]) * 2000; break; case 0x25: perflvl->fanspeed = entry[4]; perflvl->volt_min = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; break; case 0x30: perflvl->memscript = ROM16(entry[2]); case 0x35: perflvl->fanspeed = entry[6]; perflvl->volt_min = entry[7]; perflvl->core = ROM16(entry[8]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; /*XXX: confirm on 0x35 */ perflvl->unk05 = ROM16(entry[16]) * 1000; break; case 0x40: #define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000 perflvl->fanspeed = 0; /*XXX*/ perflvl->volt_min = entry[2]; if (dev_priv->card_type == NV_50) { perflvl->core = subent(0); perflvl->shader = subent(1); perflvl->memory = subent(2); perflvl->vdec = subent(3); perflvl->unka0 = subent(4); } else { perflvl->hub06 = subent(0); perflvl->hub01 = subent(1); perflvl->copy = subent(2); perflvl->shader = subent(3); perflvl->rop = subent(4); perflvl->memory = subent(5); perflvl->vdec = subent(6); perflvl->daemon = subent(10); perflvl->hub07 = subent(11); perflvl->core = perflvl->shader / 2; } break; } /* make sure vid is valid */ nouveau_perf_voltage(dev, &P, perflvl); if (pm->voltage.supported && perflvl->volt_min) { vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); if (vid < 0) { NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); entry += recordlen; continue; } } /* get the corresponding memory timings */ if (version == 0x15) { memtimings->timing[i].id = i; nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]); perflvl->timing = &memtimings->timing[i]; } else if (version > 0x15) { /* last 3 args are for < 0x40, ignored for >= 0x40 */ perflvl->timing = nouveau_perf_timing(dev, &P, perflvl->memory / 1000, entry + perf[3], perf[5], perf[4]); } snprintf(perflvl->name, sizeof(perflvl->name), "performance_level_%d", i); perflvl->id = i; pm->nr_perflvl++; entry += recordlen; } }
void nouveau_perf_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 version, headerlen, recordlen, entries; u8 *perf, *entry; int vid, i; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version != 1 && P.version != 2) { NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); return; } perf = ROMPTR(bios, P.data[0]); version = perf[0]; headerlen = perf[1]; if (version < 0x40) { recordlen = perf[3] + (perf[4] * perf[5]); entries = perf[2]; } else { recordlen = perf[2] + (perf[3] * perf[4]); entries = perf[5]; } } else { if (bios->data[bios->offset + 6] < 0x25) { legacy_perf_init(dev); return; } perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); if (!perf) { NV_DEBUG(dev, "perf table pointer invalid\n"); return; } version = perf[1]; headerlen = perf[0]; recordlen = perf[3]; entries = perf[2]; } entry = perf + headerlen; for (i = 0; i < entries; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; if (entry[0] == 0xff) { entry += recordlen; continue; } switch (version) { case 0x12: case 0x13: case 0x15: perflvl->fanspeed = entry[55]; perflvl->voltage = entry[56]; perflvl->core = ROM32(entry[1]) * 10; perflvl->memory = ROM32(entry[5]) * 20; break; case 0x21: case 0x23: case 0x24: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) perflvl->memory = ROM16(entry[11]) * 1000; else perflvl->memory = ROM16(entry[11]) * 2000; break; case 0x25: perflvl->fanspeed = entry[4]; perflvl->voltage = entry[5]; perflvl->core = ROM16(entry[6]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; break; case 0x30: perflvl->memscript = ROM16(entry[2]); case 0x35: perflvl->fanspeed = entry[6]; perflvl->voltage = entry[7]; perflvl->core = ROM16(entry[8]) * 1000; perflvl->shader = ROM16(entry[10]) * 1000; perflvl->memory = ROM16(entry[12]) * 1000; /*XXX: confirm on 0x35 */ perflvl->unk05 = ROM16(entry[16]) * 1000; break; case 0x40: #define subent(n) entry[perf[2] + ((n) * perf[3])] perflvl->fanspeed = 0; /*XXX*/ perflvl->voltage = entry[2]; perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; break; } /* make sure vid is valid */ if (pm->voltage.supported && perflvl->voltage) { vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); if (vid < 0) { NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); entry += recordlen; continue; } } snprintf(perflvl->name, sizeof(perflvl->name), "performance_level_%d", i); perflvl->id = i; pm->nr_perflvl++; entry += recordlen; } }
static int pscnv_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct drm_gem_object *gem_obj = vm_obj->handle; struct pscnv_bo *bo = gem_obj->driver_private; struct drm_device *dev = gem_obj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; vm_page_t m = NULL; vm_page_t oldm; vm_memattr_t mattr; vm_paddr_t paddr; const char *what; if (bo->chan) { paddr = dev_priv->fb_phys + offset + nvc0_fifo_ctrl_offs(dev, bo->chan->cid); mattr = VM_MEMATTR_UNCACHEABLE; what = "fifo"; } else switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) { case PSCNV_GEM_VRAM_SMALL: case PSCNV_GEM_VRAM_LARGE: paddr = dev_priv->fb_phys + bo->map1->start + offset; mattr = VM_MEMATTR_WRITE_COMBINING; what = "vram"; break; case PSCNV_GEM_SYSRAM_SNOOP: case PSCNV_GEM_SYSRAM_NOSNOOP: paddr = bo->dmapages[OFF_TO_IDX(offset)]; mattr = VM_MEMATTR_WRITE_BACK; what = "sysram"; break; default: return (EINVAL); } if (offset >= bo->size) { if (pscnv_mem_debug > 0) NV_WARN(dev, "Reading %p + %08llx (%s) is past max size %08llx\n", bo, offset, what, bo->size); return (VM_PAGER_ERROR); } DRM_LOCK(dev); if (pscnv_mem_debug > 0) NV_WARN(dev, "Connecting %p+%08llx (%s) at phys %010llx\n", bo, offset, what, paddr); vm_object_pip_add(vm_obj, 1); if (*mres != NULL) { oldm = *mres; vm_page_lock(oldm); vm_page_remove(oldm); vm_page_unlock(oldm); *mres = NULL; } else oldm = NULL; //VM_OBJECT_LOCK(vm_obj); m = vm_phys_fictitious_to_vm_page(paddr); if (m == NULL) { DRM_UNLOCK(dev); return -EFAULT; } KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); if ((m->flags & VPO_BUSY) != 0) { DRM_UNLOCK(dev); return -EFAULT; } pmap_page_set_memattr(m, mattr); m->valid = VM_PAGE_BITS_ALL; *mres = m; vm_page_lock(m); vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); vm_page_unlock(m); vm_page_busy(m); printf("fault %p %jx %x phys %x", gem_obj, offset, prot, m->phys_addr); DRM_UNLOCK(dev); if (oldm != NULL) { vm_page_lock(oldm); vm_page_free(oldm); vm_page_unlock(oldm); } vm_object_pip_wakeup(vm_obj); return (VM_PAGER_OK); }
void nouveau_volt_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_voltage *voltage = &pm->voltage; struct nvbios *bios = &dev_priv->vbios; struct bit_entry P; u8 *volt = NULL, *entry; int i, headerlen, recordlen, entries, vidmask, vidshift; if (bios->type == NVBIOS_BIT) { if (bit_table(dev, 'P', &P)) return; if (P.version == 1) volt = ROMPTR(dev, P.data[16]); else if (P.version == 2) volt = ROMPTR(dev, P.data[12]); else { NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); } } else { if (bios->data[bios->offset + 6] < 0x27) { NV_DEBUG(dev, "BMP version too old for voltage\n"); return; } volt = ROMPTR(dev, bios->data[bios->offset + 0x98]); } if (!volt) { NV_DEBUG(dev, "voltage table pointer invalid\n"); return; } switch (volt[0]) { case 0x10: case 0x11: case 0x12: headerlen = 5; recordlen = volt[1]; entries = volt[2]; vidshift = 0; vidmask = volt[4]; break; case 0x20: headerlen = volt[1]; recordlen = volt[3]; entries = volt[2]; vidshift = 0; /* could be vidshift like 0x30? */ vidmask = volt[5]; break; case 0x30: headerlen = volt[1]; recordlen = volt[2]; entries = volt[3]; vidmask = volt[4]; /* no longer certain what volt[5] is, if it's related to * the vid shift then it's definitely not a function of * how many bits are set. * * after looking at a number of nva3+ vbios images, they * all seem likely to have a static shift of 2.. lets * go with that for now until proven otherwise. */ vidshift = 2; break; case 0x40: headerlen = volt[1]; recordlen = volt[2]; entries = volt[3]; /* not a clue what the entries are for.. */ vidmask = volt[11]; /* guess.. */ vidshift = 0; break; default: NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); return; } /* validate vid mask */ voltage->vid_mask = vidmask; if (!voltage->vid_mask) return; i = 0; while (vidmask) { if (i > nr_vidtag) { NV_DEBUG(dev, "vid bit %d unknown\n", i); return; } if (!nouveau_gpio_func_valid(dev, vidtag[i])) { NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); return; } vidmask >>= 1; i++; } /* parse vbios entries into common format */ voltage->version = volt[0]; if (voltage->version < 0x40) { voltage->nr_level = entries; voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); if (!voltage->level) return; entry = volt + headerlen; for (i = 0; i < entries; i++, entry += recordlen) { voltage->level[i].voltage = entry[0] * 10000; voltage->level[i].vid = entry[1] >> vidshift; } } else {