Ejemplo n.º 1
0
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
                   struct nouveau_tile_reg **new_tile)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct drm_device *dev = dev_priv->dev;
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    uint64_t offset;

    if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
        /* Nothing to do. */
        *new_tile = NULL;
        return 0;
    }

    offset = new_mem->start << PAGE_SHIFT;

    if (dev_priv->chan_vm) {
        nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
    } else if (dev_priv->card_type >= NV_10) {
        *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
                                        nvbo->tile_mode,
                                        nvbo->tile_flags);
    }

    return 0;
}
Ejemplo n.º 2
0
static int
nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
	      u32 flags, struct nouveau_vma *vma)
{
	struct nv50_bar_priv *priv = (void *)bar;
	int ret;

	ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
	if (ret)
		return ret;

	nouveau_vm_map(vma, mem);
	return 0;
}
Ejemplo n.º 3
0
static int
nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
	      u32 flags, struct nouveau_vma *vma)
{
	struct nvc0_bar_priv *priv = (void *)bar;
	int ret;

	ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
	if (ret)
		return ret;

	nouveau_vm_map(vma, mem);
	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
	return 0;
}
Ejemplo n.º 4
0
int
nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
		      u32 access, struct nouveau_vma *vma)
{
	struct nouveau_instobj *iobj = (void *)
		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
	struct nouveau_mem **mem = (void *)(iobj + 1);
	int ret;

	ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
	if (ret)
		return ret;

	nouveau_vm_map(vma, *mem);
	return 0;
}
Ejemplo n.º 5
0
int
nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
{
	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
	struct nv50_gpuobj_node *node = gpuobj->node;
	int ret;

	ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
			     NV_MEM_ACCESS_RW, &node->vram->bar_vma);
	if (ret)
		return ret;

	nouveau_vm_map(&node->vram->bar_vma, node->vram);
	gpuobj->pinst = node->vram->bar_vma.offset;
	return 0;
}
Ejemplo n.º 6
0
int
nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
		 u32 size, u32 align)
{
	struct drm_device *dev = gpuobj->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
	struct nv50_gpuobj_node *node = NULL;
	int ret;

	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node)
		return -ENOMEM;
	node->align = align;

	size  = (size + 4095) & ~4095;
	align = max(align, (u32)4096);

	ret = vram->get(dev, size, align, 0, 0, &node->vram);
	if (ret) {
		kfree(node);
		return ret;
	}

	gpuobj->vinst = node->vram->offset;

	if (gpuobj->flags & NVOBJ_FLAG_VM) {
		u32 flags = NV_MEM_ACCESS_RW;
		if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
			flags |= NV_MEM_ACCESS_SYS;

		ret = nouveau_vm_get(chan->vm, size, 12, flags,
				     &node->chan_vma);
		if (ret) {
			vram->put(dev, &node->vram);
			kfree(node);
			return ret;
		}

		nouveau_vm_map(&node->chan_vma, node->vram);
		gpuobj->linst = node->chan_vma.offset;
	}

	gpuobj->size = size;
	gpuobj->node = node;
	return 0;
}
Ejemplo n.º 7
0
static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
	struct nouveau_mem *node = mem->mm_node;

	if (ttm->sg) {
		node->sg    = ttm->sg;
		node->pages = NULL;
	} else {
		node->sg    = NULL;
		node->pages = nvbe->ttm.dma_address;
	}
	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;

	nouveau_vm_map(&node->vma[0], node);
	nvbe->node = node;
	return 0;
}
Ejemplo n.º 8
0
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
    struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
    struct drm_device *dev = dev_priv->dev;
    int ret;

    mem->bus.addr = NULL;
    mem->bus.offset = 0;
    mem->bus.size = mem->num_pages << PAGE_SHIFT;
    mem->bus.base = 0;
    mem->bus.is_iomem = false;
    if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
        return -EINVAL;
    switch (mem->mem_type) {
    case TTM_PL_SYSTEM:
        /* System memory */
        return 0;
    case TTM_PL_TT:
#if __OS_HAS_AGP
        if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
            mem->bus.offset = mem->start << PAGE_SHIFT;
            mem->bus.base = dev_priv->gart_info.aper_base;
            mem->bus.is_iomem = true;
        }
#endif
        break;
    case TTM_PL_VRAM:
    {
        struct nouveau_vram *vram = mem->mm_node;
        u8 page_shift;

        if (!dev_priv->bar1_vm) {
            mem->bus.offset = mem->start << PAGE_SHIFT;
            mem->bus.base = pci_resource_start(dev->pdev, 1);
            mem->bus.is_iomem = true;
            break;
        }

        if (dev_priv->card_type == NV_C0)
            page_shift = vram->page_shift;
        else
            page_shift = 12;

        ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
                             page_shift, NV_MEM_ACCESS_RW,
                             &vram->bar_vma);
        if (ret)
            return ret;

        nouveau_vm_map(&vram->bar_vma, vram);
        if (ret) {
            nouveau_vm_put(&vram->bar_vma);
            return ret;
        }

        mem->bus.offset = vram->bar_vma.offset;
        if (dev_priv->card_type == NV_50) /*XXX*/
            mem->bus.offset -= 0x0020000000ULL;
        mem->bus.base = pci_resource_start(dev->pdev, 1);
        mem->bus.is_iomem = true;
    }
    break;
    default:
        return -EINVAL;
    }
    return 0;
}