static struct nouveau_bo * nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) { struct nouveau_bo *pushbuf = NULL; int location, ret; if (nouveau_vram_pushbuf) location = TTM_PL_FLAG_VRAM; else location = TTM_PL_FLAG_TT; ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, true, &pushbuf); if (ret) { NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); return NULL; } ret = nouveau_bo_pin(pushbuf, location); if (ret) { NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); nouveau_bo_ref(NULL, &pushbuf); return NULL; } return pushbuf; }
static int nouveau_hwmon_init(struct drm_device *dev) { #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *hwmon_dev; int ret; if (!pm->temp_get) return -ENODEV; hwmon_dev = hwmon_device_register(&dev->pdev->dev); if (IS_ERR(hwmon_dev)) { ret = PTR_ERR(hwmon_dev); NV_ERROR(dev, "Unable to register hwmon device: %d\n", ret); return ret; } dev_set_drvdata(hwmon_dev, dev); ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); if (ret) { NV_ERROR(dev, "Unable to create hwmon sysfs file: %d\n", ret); hwmon_device_unregister(hwmon_dev); return ret; } pm->hwmon = hwmon_dev; #endif return 0; }
static void nv50_sor_dp_link_train(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct bit_displayport_encoder_table *dpe; int dpe_headerlen; dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); if (!dpe) { NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); return; } if (dpe->script0) { NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), nv_encoder->dcb); } if (!nouveau_dp_link_train(encoder)) NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); if (dpe->script1) { NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), nv_encoder->dcb); } }
int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t start, uint32_t end, uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; int target, ret; mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, start, end, 0); if (mem) mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; } if (dev_priv->card_type < NV_50) { if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) target = NV_MEM_TARGET_VRAM; else target = NV_MEM_TARGET_GART; offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; } else { target = NV_MEM_TARGET_VM; offset = chan->notifier_bo->vma.offset; } offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); return ret; } nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; ret = nouveau_ramht_insert(chan, handle, nobj); nouveau_gpuobj_ref(NULL, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } *b_offset = mem->start; return 0; }
static int nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; switch (type) { case TTM_PL_SYSTEM: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: if (dev_priv->card_type >= NV_50) { man->func = &nouveau_vram_manager; man->io_reserve_fastpath = false; man->use_io_reserve_lru = true; } else { man->func = &ttm_bo_manager_func; } man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; break; case NOUVEAU_GART_SGDMA: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->gpu_offset = dev_priv->gart_info.aper_base; break; default: NV_ERROR(dev, "Unknown GART type: %d\n", dev_priv->gart_info.type); return -EINVAL; } break; default: NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; }
static enum drm_connector_status nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; enum drm_connector_status status = connector_status_disconnected; uint32_t dpms_state, load_pattern, load_state; int or = nv_encoder->or; nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); return status; } /* Use bios provided value if possible. */ if (dev_priv->vbios->dactestval) { load_pattern = dev_priv->vbios->dactestval; NV_DEBUG(dev, "Using bios provided load_pattern of %d\n", load_pattern); } else { load_pattern = 340; NV_DEBUG(dev, "Using default load_pattern of %d\n", load_pattern); } nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); mdelay(45); /* give it some time to process */ load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) status = connector_status_connected; if (status == connector_status_connected) NV_DEBUG(dev, "Load was detected on output with or %d\n", or); else NV_DEBUG(dev, "Load was not detected on output with or %d\n", or); return status; }
/* free_node operations that do not actually free reserved memory, but just * set up the nodes in the tree, should be silent */ static void pscnv_mm_free_node(struct pscnv_mm_node *node, bool silent) { struct pscnv_mm_node *prev = PSCNV_RB_PREV(pscnv_mm_head, entry, node); struct pscnv_mm_node *next = PSCNV_RB_NEXT(pscnv_mm_head, entry, node); int i; if (pscnv_mm_debug >= 2 || (!silent && pscnv_mm_debug >= 1)) NV_INFO(node->mm->dev, "MM: [%s] Freeing node %llx..%llx of type %d\n", node->mm->name, node->start, node->start + node->size, node->type); if (node->next) { node->next->prev = NULL; node->next = NULL; } if (pscnv_mm_debug >= 1 && node->prev) NV_ERROR(node->mm->dev, "A node that's about to be freed should not have a valid prev pointer!\n"); node->prev = NULL; node->type = PSCNV_MM_TYPE_FREE; if (prev->type == PSCNV_MM_TYPE_FREE) { if (pscnv_mm_debug >= 2) NV_INFO(node->mm->dev, "MM: Merging left with node %llx..%llx\n", prev->start, prev->start + prev->size); if (prev->start + prev->size != node->start) { NV_ERROR(node->mm->dev, "MM: node %llx..%llx not contiguous with prev %llx..%llx\n", node->start, node->start + node->size, prev->start, prev->start + prev->size); pscnv_mm_dump(PSCNV_RB_ROOT(&node->mm->head)); } else { node->start = prev->start; node->size += prev->size; PSCNV_RB_REMOVE(pscnv_mm_head, &node->mm->head, prev); kfree(prev); } } if (next->type == PSCNV_MM_TYPE_FREE) { if (pscnv_mm_debug >= 2) NV_INFO(node->mm->dev, "MM: Merging right with node %llx..%llx\n", next->start, next->start + next->size); if (node->start + node->size != next->start) { NV_ERROR(node->mm->dev, "MM: node %llx..%llx not contiguous with next %llx..%llx\n", node->start, node->start + node->size, next->start, next->start + next->size); pscnv_mm_dump(PSCNV_RB_ROOT(&node->mm->head)); } else { node->size += next->size; PSCNV_RB_REMOVE(pscnv_mm_head, &node->mm->head, next); kfree(next); } } for (i = 0; i < GTYPES; i++) { uint64_t s, e; pscnv_mm_getfree(node, i, &s, &e); node->gap[i] = e - s; } pscnv_mm_augup(node); }
struct nouveau_channel * nv50_graph_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t inst; int i; /* Be sure we're not in the middle of a context switch or bad things * will happen, such as unloading the wrong pgraph context. */ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) NV_ERROR(dev, "Ctxprog is still running\n"); inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) return NULL; inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; } return NULL; }
static void nv50_dac_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; int ret; if (!nv_encoder->crtc) return; nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); ret = RING_SPACE(evo, 4); if (ret) { NV_ERROR(dev, "no space while disconnecting DAC\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); OUT_RING (evo, 0); BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING (evo, 0); nv_encoder->crtc = NULL; }
uint32_t pscnv_ramht_find(struct pscnv_ramht *ramht, uint32_t handle) { /* XXX: do this properly. */ uint32_t hash = pscnv_ramht_hash(ramht, handle); uint32_t start = hash * 8; uint32_t pos = start; uint32_t res; if (pscnv_ramht_debug >= 2) NV_INFO(ramht->vo->dev, "Handle %x hash %x\n", handle, hash); spin_lock (&ramht->lock); do { if (!nv_rv32(ramht->vo, ramht->offset + pos + 4)) break; if (nv_rv32(ramht->vo, ramht->offset + pos) == handle) { res = nv_rv32(ramht->vo, ramht->offset + pos + 4); spin_unlock (&ramht->lock); return res; } pos += 8; if (pos == 8 << ramht->bits) pos = 0; } while (pos != start); spin_unlock (&ramht->lock); NV_ERROR(ramht->vo->dev, "RAMHT object %x not found\n", handle); return 0; }
int nouveau_load(struct drm_device *dev, unsigned long flags) { struct drm_nouveau_private *dev_priv; uint32_t reg0; resource_size_t mmio_start_offs; int ret; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (!dev_priv) { ret = -ENOMEM; goto err_out; } dev->dev_private = dev_priv; dev_priv->dev = dev; dev_priv->flags = flags & NOUVEAU_FLAGS; NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); dev_priv->wq = create_workqueue("nouveau"); if (!dev_priv->wq) { ret = -EINVAL; goto err_priv; } /* resource 0 is mmio regs */ /* resource 1 is linear FB */ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ /* resource 6 is bios */ /* map the mmio regs */ mmio_start_offs = pci_resource_start(dev->pdev, 0); dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000); if (!dev_priv->mmio) { NV_ERROR(dev, "Unable to initialize the mmio mapping. " "Please report your setup to " DRIVER_EMAIL "\n"); ret = -EINVAL; goto err_wq; } NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", (unsigned long long)mmio_start_offs); #ifdef __BIG_ENDIAN /* Put the card in BE mode if it's not */ if (nv_rd32(dev, NV03_PMC_BOOT_1)) nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); DRM_MEMORYBARRIER(); #endif /* Time to determine the card architecture */ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); /* We're dealing with >=NV10 */ if ((reg0 & 0x0f000000) > 0) { /* Bit 27-20 contain the architecture in hex */ dev_priv->chipset = (reg0 & 0xff00000) >> 20; /* NV04 or NV05 */ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
static int nouveau_cli_init(struct nouveau_drm *drm, const char *sname, struct nouveau_cli *cli) { u64 device = nouveau_name(drm->dev); int ret; snprintf(cli->name, sizeof(cli->name), "%s", sname); cli->dev = drm->dev; mutex_init(&cli->mutex); usif_client_init(cli); if (cli == &drm->client) { ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, cli->name, device, &cli->base); } else { ret = nvif_client_init(&drm->client.base, cli->name, device, &cli->base); } if (ret) { NV_ERROR(drm, "Client allocation failed: %d\n", ret); goto done; } ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE, &(struct nv_device_v0) { .device = ~0, }, sizeof(struct nv_device_v0),
static int nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; int ret; if (perflvl == pm->cur) return 0; if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) { ret = pm->voltage_set(dev, perflvl->volt_min); if (ret) { NV_ERROR(dev, "voltage_set %d failed: %d\n", perflvl->volt_min, ret); } } if (pm->clocks_pre) { void *state = pm->clocks_pre(dev, perflvl); if (IS_ERR(state)) return PTR_ERR(state); pm->clocks_set(dev, state); } else if (pm->clock_set) { nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); } pm->cur = perflvl; return 0; }
static int nouveau_sysfs_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *d = &dev->pdev->dev; int ret, i; ret = device_create_file(d, &dev_attr_performance_level); if (ret) return ret; for (i = 0; i < pm->nr_perflvl; i++) { struct nouveau_pm_level *perflvl = &pm->perflvl[i]; perflvl->dev_attr.attr.name = perflvl->name; perflvl->dev_attr.attr.mode = S_IRUGO; perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; perflvl->dev_attr.store = NULL; sysfs_attr_init(&perflvl->dev_attr.attr); ret = device_create_file(d, &perflvl->dev_attr); if (ret) { NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", perflvl->id, i); perflvl->dev_attr.attr.name = NULL; nouveau_pm_fini(dev); return ret; } } return 0; }
static void nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) { struct drm_device *dev = nv_crtc->base.dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_channel *evo = nv50_display(dev)->master; int ret; NV_DEBUG(drm, "\n"); if (update && nv_crtc->cursor.visible) return; ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2); if (ret) { NV_ERROR(drm, "no space while unhiding cursor\n"); return; } if (nv_device(drm->device)->chipset != 0x50) { BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); OUT_RING(evo, NvEvoVRAM); } BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW); OUT_RING(evo, nv_crtc->cursor.offset >> 8); if (update) { BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING(evo, 0); FIRE_RING(evo); nv_crtc->cursor.visible = true; } }
static void nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) { struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; struct drm_device *dev = nv_crtc->base.dev; int ret; NV_DEBUG_KMS(dev, "\n"); if (update && !nv_crtc->cursor.visible) return; ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); if (ret) { NV_ERROR(dev, "no space while hiding cursor\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); OUT_RING(evo, 0); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); } if (update) { BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); OUT_RING(evo, 0); FIRE_RING(evo); nv_crtc->cursor.visible = false; } }
static int pscnv_vspace_bind (struct pscnv_vspace *vs, int fake) { struct drm_nouveau_private *dev_priv = vs->dev->dev_private; unsigned long flags; int i; BUG_ON(vs->vid); spin_lock_irqsave(&dev_priv->vm->vs_lock, flags); if (fake) { vs->vid = -fake; BUG_ON(dev_priv->vm->fake_vspaces[fake]); dev_priv->vm->fake_vspaces[fake] = vs; spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags); return 0; } else { for (i = 1; i < 128; i++) if (!dev_priv->vm->vspaces[i]) { vs->vid = i; dev_priv->vm->vspaces[i] = vs; spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags); return 0; } spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags); NV_ERROR(vs->dev, "VM: Out of vspaces\n"); return -ENOSPC; } }
struct pscnv_swaptask * pscnv_swaptask_new(struct pscnv_client *tgt) { struct drm_device *dev = tgt->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_swaptask *st; int serial; st = kzalloc(sizeof(struct pscnv_swaptask), GFP_KERNEL); if (!st) { NV_ERROR(dev, "pscnv_swaptask_new: out of memory\n"); return NULL; } serial = atomic_inc_return(&dev_priv->swapping->swaptask_serial); if (pscnv_swapping_debug >= 3) { NV_INFO(dev, "pscnv_swaptask_new: new swaptask %d for client %d\n", serial, tgt->pid); } INIT_LIST_HEAD(&st->list); pscnv_chunk_list_init(&st->selected); st->tgt = tgt; st->dev = dev; st->serial = serial; init_completion(&st->completion); return st; }
struct pscnv_vspace * pscnv_vspace_new (struct drm_device *dev, uint64_t size, uint32_t flags, int fake) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_vspace *res = kzalloc(sizeof *res, GFP_KERNEL); if (!res) { NV_ERROR(dev, "VM: Couldn't alloc vspace\n"); return 0; } res->dev = dev; res->size = size; res->flags = flags; kref_init(&res->ref); mutex_init(&res->lock); if (pscnv_vspace_bind(res, fake)) { kfree(res); return 0; } NV_INFO(dev, "VM: Allocating vspace %d\n", res->vid); if (dev_priv->vm->do_vspace_new(res)) { pscnv_vspace_unbind(res); kfree(res); return 0; } return res; }
/* called once on driver load */ int pscnv_swapping_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_swapping *swapping; int ret; if (pscnv_swapping_debug >= 1) { NV_INFO(dev, "pscnv_swapping: initalizing....\n"); } dev_priv->swapping = kzalloc(sizeof(struct pscnv_swapping), GFP_KERNEL); if (!dev_priv->swapping) { NV_ERROR(dev, "Out of memory\n"); return -ENOMEM; } swapping = dev_priv->swapping; swapping->dev = dev; atomic_set(&swapping->swaptask_serial, 0); init_completion(&swapping->next_swap); INIT_DELAYED_WORK(&swapping->increase_vram_work, increase_vram_work_func); ret = schedule_delayed_work(&swapping->increase_vram_work, PSCNV_INCREASE_RATE); return ret; }
int pscnv_ramht_insert(struct pscnv_ramht *ramht, uint32_t handle, uint32_t context) { /* XXX: check if the object exists already... */ struct drm_nouveau_private *dev_priv = ramht->vo->dev->dev_private; uint32_t hash = pscnv_ramht_hash(ramht, handle); uint32_t start = hash * 8; uint32_t pos = start; if (pscnv_ramht_debug >= 2) NV_INFO(ramht->vo->dev, "Handle %x hash %x\n", handle, hash); spin_lock (&ramht->lock); do { if (!nv_rv32(ramht->vo, ramht->offset + pos + 4)) { nv_wv32(ramht->vo, ramht->offset + pos, handle); nv_wv32(ramht->vo, ramht->offset + pos + 4, context); dev_priv->vm->bar_flush(ramht->vo->dev); spin_unlock (&ramht->lock); if (pscnv_ramht_debug >= 1) NV_INFO(ramht->vo->dev, "Adding RAMHT entry for object %x at %x, context %x\n", handle, pos, context); return 0; } pos += 8; if (pos == 8 << ramht->bits) pos = 0; } while (pos != start); spin_unlock (&ramht->lock); NV_ERROR(ramht->vo->dev, "No RAMHT space for object %x\n", handle); return -ENOMEM; }
void nv50_cursor_fini(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; int idx = nv_crtc->index; NV_DEBUG_KMS(dev, "\n"); nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx))); } }
irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; u32 stat; int i; stat = nv_rd32(dev, NV03_PMC_INTR_0); if (!stat) return IRQ_NONE; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); for (i = 0; i < 32 && stat; i++) { if (!(stat & (1 << i)) || !dev_priv->irq_handler[i]) continue; dev_priv->irq_handler[i](dev); stat &= ~(1 << i); } if (dev_priv->msi_enabled) nv_wr08(dev, 0x00088068, 0xff); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); if (stat && nouveau_ratelimit()) NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat); return IRQ_HANDLED; }
static int nvc0_graph_construct_context(struct nouveau_channel *chan) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct drm_device *dev = chan->dev; int ret, i; u32 *ctx; ctx = kmalloc(priv->grctx_size, GFP_KERNEL); if (!ctx) return -ENOMEM; if (!nouveau_ctxfw) { nv_wr32(dev, 0x409840, 0x80000000); nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); nv_wr32(dev, 0x409504, 0x00000001); if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) { NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n"); nvc0_graph_ctxctl_debug(dev); ret = -EBUSY; goto err; } } else {
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_fb *pfb = nouveau_fb(drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } ret = nouveau_gem_new(dev, req->info.size, req->align, req->info.domain, req->info.tile_mode, req->info.tile_flags, &nvbo); if (ret) return ret; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); if (ret == 0) { ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(nvbo->gem); return ret; }
int pscnv_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; int dma_bits = 32; #ifdef __linux__ if (dev_priv->card_type >= NV_50 && pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) dma_bits = 40; ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); if (ret) { NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); return ret; } #else if (dev_priv->card_type >= NV_50) dma_bits = 40; #endif dev_priv->dma_mask = DMA_BIT_MASK(dma_bits); spin_lock_init(&dev_priv->pramin_lock); mutex_init(&dev_priv->vram_mutex); switch (dev_priv->card_type) { case NV_50: ret = nv50_vram_init(dev); break; case NV_D0: case NV_C0: ret = nvc0_vram_init(dev); break; default: NV_ERROR(dev, "No VRAM allocator for NV%02x!\n", dev_priv->chipset); ret = -ENOSYS; } if (ret) return ret; dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), drm_get_resource_len(dev, 1), DRM_MTRR_WC); return 0; }
void nouveau_fbcon_gpu_lockup(struct fb_info *info) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); info->flags |= FBINFO_HWACCEL_DISABLED; }
static void nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); uint32_t mode_ctl = 0; int ret; NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); switch (nv_encoder->dcb->type) { case OUTPUT_TMDS: if (nv_encoder->dcb->sorconf.link & 1) { if (adjusted_mode->clock < 165000) mode_ctl = 0x0100; else mode_ctl = 0x0500; } else mode_ctl = 0x0200; break; case OUTPUT_DP: mode_ctl |= 0x00050000; if (nv_encoder->dcb->sorconf.link & 1) mode_ctl |= 0x00000800; else mode_ctl |= 0x00000900; break; default: break; } if (crtc->index == 1) mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1; else mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0; if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC; if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC; ret = RING_SPACE(evo, 2); if (ret) { NV_ERROR(dev, "no space while connecting SOR\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); OUT_RING(evo, mode_ctl); }
static void pscnv_swapping_swap_out(void *data, struct pscnv_client *cl) { struct drm_device *dev = cl->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_swaptask *st = data; struct pscnv_chunk *cnk; int ret; size_t i; BUG_ON(st->tgt != cl); if (pscnv_swapping_debug >= 2) { NV_INFO(dev, "pscnv_swapping_swap_out: [client %d] begin swaptask " "%d with %lu chunks\n", cl->pid, st->serial, st->selected.size); } dev_priv->last_mem_alloc_change_time = jiffies; for (i = 0; i < st->selected.size; i++) { cnk = st->selected.chunks[i]; if (pscnv_chunk_expect_alloc_type(cnk, PSCNV_CHUNK_VRAM, "pscnv_swapping_swap_out")) { continue; } /* until now: one chunk after the other * increases swapped out counter and vram_demand (on fail) */ ret = pscnv_vram_to_host(cnk); if (ret) { NV_ERROR(dev, "pscnv_swapping_swap_out: [client %d] vram_to_host" " failed for chunk %08x/%d-%u\n", cl->pid, cnk->bo->cookie, cnk->bo->serial, cnk->idx); /* continue and try with next */ } mutex_lock(&dev_priv->clients->lock); pscnv_chunk_list_remove_unlocked(&cl->swap_pending, cnk); if (ret) { /* failure, return to swapping_options */ pscnv_chunk_list_add_unlocked(&cl->swapping_options, cnk); } else { pscnv_chunk_list_add_unlocked(&cl->already_swapped, cnk); } mutex_unlock(&dev_priv->clients->lock); } if (pscnv_swapping_debug >= 2) { NV_INFO(dev, "pscnv_swapping_swap_out: [client %d] end swaptask %d\n", cl->pid, st->serial); } complete(&st->completion); }