static int
nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
	struct nouveau_channel *chan;
	int ret, i;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + pgd,
				      chan->ramin->vinst + pgd,
				      0x4000, NVOBJ_FLAG_ZERO_ALLOC,
				      &chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	for (i = 0; i < 0x4000; i += 8) {
		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
	}

	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
				      chan->ramin->pinst + fc,
				      chan->ramin->vinst + fc, 0x100,
				      NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
	if (ret) {
		nv50_channel_del(&chan);
		return ret;
	}

	*pchan = chan;
	return 0;
}
Example #2
0
int i915_gem_init_stolen(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int bios_reserved = 0;

#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped) {
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}
#endif

	if (dev_priv->gtt.stolen_size == 0)
		return 0;

	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
	if (dev_priv->mm.stolen_base == 0)
		return 0;

	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);

	if (IS_VALLEYVIEW(dev))
		bios_reserved = 1024*1024; /* top 1M on VLV/BYT */

	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
		return 0;

	/* Basic memrange allocator for stolen space */
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
		    bios_reserved);

	return 0;
}
Example #3
0
/**
 * drm_vma_offset_manager_init - Initialize new offset-manager
 * @mgr: Manager object
 * @page_offset: Offset of available memory area (page-based)
 * @size: Size of available address space range (page-based)
 *
 * Initialize a new offset-manager. The offset and area size available for the
 * manager are given as @page_offset and @size. Both are interpreted as
 * page-numbers, not bytes.
 *
 * Adding/removing nodes from the manager is locked internally and protected
 * against concurrent access. However, node allocation and destruction is left
 * for the caller. While calling into the vma-manager, a given node must
 * always be guaranteed to be referenced.
 */
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
				 unsigned long page_offset, unsigned long size)
{
	lockinit(&mgr->vm_lock, "drmvml", 0, LK_CANRECURSE);
	mgr->vm_addr_space_rb = RB_ROOT;
	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
Example #4
0
int
drm_gem_init(struct drm_device *dev)
{
    struct drm_gem_mm *mm;

    spin_lock_init(&dev->object_name_lock);
    idr_init(&dev->object_name_idr);

    mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
    if (!mm) {
        DRM_ERROR("out of memory\n");
        return -ENOMEM;
    }

    dev->mm_private = mm;

    if (drm_ht_create(&mm->offset_hash, 12)) {
        kfree(mm);
        return -ENOMEM;
    }

    if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
                    DRM_FILE_PAGE_OFFSET_SIZE)) {
        drm_ht_remove(&mm->offset_hash);
        kfree(mm);
        return -ENOMEM;
    }

    return 0;
}
Example #5
0
int i915_gem_init_stolen(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int bios_reserved = 0;

	if (dev_priv->gtt.stolen_size == 0)
		return 0;

	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
	if (dev_priv->mm.stolen_base == 0)
		return 0;

	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);

	if (IS_VALLEYVIEW(dev))
		bios_reserved = 1024*1024; /* top 1M on VLV/BYT */

	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
		return 0;

	/* Basic memrange allocator for stolen space */
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
		    bios_reserved);

	return 0;
}
Example #6
0
int
nv20_fb_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	int i;

	if (dev_priv->chipset >= 0x25)
		drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
	else
		drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);

	/* Turn all the tiling regions off. */
	pfb->num_tiles = NV10_PFB_TILE__SIZE;
	for (i = 0; i < pfb->num_tiles; i++)
		pfb->set_tile_region(dev, i);

	return 0;
}
Example #7
0
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;

	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;

	drm_mm_init(&rman->mm, 0, p_size);
	spin_lock_init(&rman->lock);
	man->priv = rman;
	return 0;
}
Example #8
0
/**
 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
 *
 * @man: TTM memory type manager
 * @p_size: maximum size of VRAM
 *
 * Allocate and initialize the VRAM manager.
 */
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
				unsigned long p_size)
{
	struct amdgpu_vram_mgr *mgr;

	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
	if (!mgr)
		return -ENOMEM;

	drm_mm_init(&mgr->mm, 0, p_size);
	spin_lock_init(&mgr->lock);
	man->priv = mgr;
	return 0;
}
Example #9
0
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;

	rman = kmalloc(sizeof(*rman), M_DRM, M_ZERO | M_WAITOK);
	if (!rman)
		return -ENOMEM;

	drm_mm_init(&rman->mm, 0, p_size);

	lockinit(&rman->lock, "ttmrman", 0, LK_CANRECURSE);
	man->priv = rman;
	return 0;
}
Example #10
0
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
{
	struct host1x_device *device = to_host1x_device(drm->dev);
	struct tegra_drm *tegra;
	int err;

	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
	if (!tegra)
		return -ENOMEM;

	if (iommu_present(&platform_bus_type)) {
		u64 carveout_start, carveout_end, gem_start, gem_end;
		struct iommu_domain_geometry *geometry;
		unsigned long order;

		tegra->domain = iommu_domain_alloc(&platform_bus_type);
		if (!tegra->domain) {
			err = -ENOMEM;
			goto free;
		}

		err = iova_cache_get();
		if (err < 0)
			goto domain;

		geometry = &tegra->domain->geometry;
		gem_start = geometry->aperture_start;
		gem_end = geometry->aperture_end - CARVEOUT_SZ;
		carveout_start = gem_end + 1;
		carveout_end = geometry->aperture_end;

		order = __ffs(tegra->domain->pgsize_bitmap);
		init_iova_domain(&tegra->carveout.domain, 1UL << order,
				 carveout_start >> order);

		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;

		drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
		mutex_init(&tegra->mm_lock);

		DRM_DEBUG("IOMMU apertures:\n");
		DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
		DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
			  carveout_end);
	}
Example #11
0
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;
	int ret;

	rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
	ret = drm_mm_init(&rman->mm, 0, p_size);
	if (ret) {
		free(rman, M_TTM_RMAN);
		return ret;
	}

	mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
	man->priv = rman;
	return 0;
}
Example #12
0
int ttm_bo_device_init(struct ttm_bo_device *bdev,
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
		       uint64_t file_page_offset,
		       bool need_dma32)
{
	int ret = -EINVAL;

	rwlock_init(&bdev->vm_lock);
	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
	if (unlikely(ret != 0))
		goto out_no_sys;

	bdev->addr_space_rb = RB_ROOT;
	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
	if (unlikely(ret != 0))
		goto out_no_addr_mm;

	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	bdev->nice_mode = true;
	INIT_LIST_HEAD(&bdev->ddestroy);
	bdev->dev_mapping = NULL;
	bdev->glob = glob;
	bdev->need_dma32 = need_dma32;
	bdev->val_seq = 0;
	spin_lock_init(&bdev->fence_lock);
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);

	return 0;
out_no_addr_mm:
	ttm_bo_clean_mm(bdev, 0);
out_no_sys:
	return ret;
}
Example #13
0
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
			unsigned long p_size)
{
	int ret = -EINVAL;
	struct ttm_mem_type_manager *man;

	if (type >= TTM_NUM_MEM_TYPES) {
		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
		return ret;
	}

	man = &bdev->man[type];
	if (man->has_type) {
		printk(KERN_ERR TTM_PFX
		       "Memory manager already initialized for type %d\n",
		       type);
		return ret;
	}

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;

	ret = 0;
	if (type != TTM_PL_SYSTEM) {
		if (!p_size) {
			printk(KERN_ERR TTM_PFX
			       "Zero size memory manager type %d\n",
			       type);
			return ret;
		}
		ret = drm_mm_init(&man->manager, 0, p_size);
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);

	return 0;
}
Example #14
0
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;
	int ret;

	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;

	ret = drm_mm_init(&rman->mm, 0, p_size);
	if (ret) {
		kfree(rman);
		return ret;
	}

	mtx_init(&rman->lock, IPL_NONE);
	man->priv = rman;
	return 0;
}
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct nouveau_bo *ntfy = NULL;
	uint32_t flags, ttmpl;
	int ret;

	if (nouveau_vram_notify) {
		flags = NOUVEAU_GEM_DOMAIN_VRAM;
		ttmpl = TTM_PL_FLAG_VRAM;
	} else {
		flags = NOUVEAU_GEM_DOMAIN_GART;
		ttmpl = TTM_PL_FLAG_TT;
	}

	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(ntfy, ttmpl);
	if (ret)
		goto out_err;

	ret = nouveau_bo_map(ntfy);
	if (ret)
		goto out_err;

	ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
	if (ret)
		goto out_err;

	chan->notifier_bo = ntfy;
out_err:
	if (ret)
		drm_gem_object_unreference_unlocked(ntfy->gem);

	return ret;
}
Example #16
0
static int
nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
		 struct nouveau_channel **pchan,
		 struct nouveau_gpuobj *pgd, u64 vm_size)
{
	struct nouveau_channel *chan;
	int ret;

	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;
	chan->dev = dev;

	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
	if (ret) {
		nvc0_channel_del(&chan);
		return ret;
	}

	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));

	*pchan = chan;
	return 0;
}
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
	struct drm_device *dev = chan->dev;
	struct nouveau_bo *ntfy = NULL;
	uint32_t flags;
	int ret;

	if (nouveau_vram_notify)
		flags = TTM_PL_FLAG_VRAM;
	else
		flags = TTM_PL_FLAG_TT;

	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
			      0, 0x0000, false, true, &ntfy);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(ntfy, flags);
	if (ret)
		goto out_err;

	ret = nouveau_bo_map(ntfy);
	if (ret)
		goto out_err;

	ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
	if (ret)
		goto out_err;

	chan->notifier_bo = ntfy;
out_err:
	if (ret)
		drm_gem_object_unreference_unlocked(ntfy->gem);

	return ret;
}
int
nv50_instmem_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv50_instmem_priv *priv;
	struct nouveau_channel *chan;
	struct nouveau_vm *vm;
	int ret, i;
	u32 tmp;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;
	dev_priv->engine.instmem.priv = priv;

	/*                                       */
	for (i = 0x1700; i <= 0x1710; i += 4)
		priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);

	/*                    */
	ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
	if (ret) {
		NV_ERROR(dev, "Failed to init RAMIN heap\n");
		goto error;
	}

	/*      */
	ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
			     &dev_priv->bar3_vm);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
				 0x1000, NVOBJ_FLAG_DONT_MAP |
				 NVOBJ_FLAG_ZERO_ALLOC,
				 &dev_priv->bar3_vm->pgt[0].obj[0]);
	if (ret)
		goto error;
	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;

	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);

	ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
	if (ret)
		goto error;
	dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;

	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
				  &priv->bar3_dmaobj);
	if (ret)
		goto error;

	nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
	nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));

	dev_priv->engine.instmem.flush(dev);
	dev_priv->ramin_available = true;

	tmp = nv_ro32(chan->ramin, 0);
	nv_wo32(chan->ramin, 0, ~tmp);
	if (nv_ro32(chan->ramin, 0) != ~tmp) {
		NV_ERROR(dev, "PRAMIN readback failed\n");
		ret = -EIO;
		goto error;
	}
	nv_wo32(chan->ramin, 0, tmp);

	/*      */
	ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
	if (ret)
		goto error;

	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
	if (ret)
		goto error;
	nouveau_vm_ref(NULL, &vm, NULL);

	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
				  &priv->bar1_dmaobj);
	if (ret)
		goto error;

	nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
	for (i = 0; i < 8; i++)
		nv_wr32(dev, 0x1900 + (i*4), 0);

	/*                                                             
                                      
  */
	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
			     &dev_priv->chan_vm);
	if (ret)
		return ret;

	return 0;

error:
	nv50_instmem_takedown(dev);
	return ret;
}
Example #19
0
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
	resource_size_t reserved_base, stolen_top;
	resource_size_t reserved_total, reserved_size;

	mutex_init(&dev_priv->mm.stolen_lock);

	if (intel_vgpu_active(dev_priv)) {
		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
		return 0;
	}

	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}

	if (resource_size(&intel_graphics_stolen_res) == 0)
		return 0;

	dev_priv->dsm = intel_graphics_stolen_res;

	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
		return 0;

	GEM_BUG_ON(dev_priv->dsm.start == 0);
	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);

	stolen_top = dev_priv->dsm.end + 1;
	reserved_base = stolen_top;
	reserved_size = 0;

	switch (INTEL_GEN(dev_priv)) {
	case 2:
	case 3:
		break;
	case 4:
		if (!IS_G4X(dev_priv))
			break;
		/* fall through */
	case 5:
		g4x_get_stolen_reserved(dev_priv,
					&reserved_base, &reserved_size);
		break;
	case 6:
		gen6_get_stolen_reserved(dev_priv,
					 &reserved_base, &reserved_size);
		break;
	case 7:
		if (IS_VALLEYVIEW(dev_priv))
			vlv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			gen7_get_stolen_reserved(dev_priv,
						 &reserved_base, &reserved_size);
		break;
	case 8:
	case 9:
	case 10:
		if (IS_LP(dev_priv))
			chv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			bdw_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		break;
	case 11:
	default:
		icl_get_stolen_reserved(dev_priv, &reserved_base,
					&reserved_size);
		break;
	}

	/*
	 * Our expectation is that the reserved space is at the top of the
	 * stolen region and *never* at the bottom. If we see !reserved_base,
	 * it likely means we failed to read the registers correctly.
	 */
	if (!reserved_base) {
		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
			  &reserved_base, &reserved_size);
		reserved_base = stolen_top;
		reserved_size = 0;
	}

	dev_priv->dsm_reserved =
		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);

	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
			  &dev_priv->dsm_reserved, &dev_priv->dsm);
		return 0;
	}

	/* It is possible for the reserved area to end before the end of stolen
	 * memory, so just consider the start. */
	reserved_total = stolen_top - reserved_base;

	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
			 (u64)resource_size(&dev_priv->dsm) >> 10,
			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);

	dev_priv->stolen_usable_size =
		resource_size(&dev_priv->dsm) - reserved_total;

	/* Basic memrange allocator for stolen space. */
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);

	return 0;
}
Example #20
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
    const struct platform_device_id *id;
    const struct armada_variant *variant;
    struct armada_private *priv;
    struct resource *res[ARRAY_SIZE(priv->dcrtc)];
    struct resource *mem = NULL;
    int ret, n, i;

    memset(res, 0, sizeof(res));

    for (n = i = 0; ; n++) {
        struct resource *r = platform_get_resource(dev->platformdev,
                             IORESOURCE_MEM, n);
        if (!r)
            break;

        /* Resources above 64K are graphics memory */
        if (resource_size(r) > SZ_64K)
            mem = r;
        else if (i < ARRAY_SIZE(priv->dcrtc))
            res[i++] = r;
        else
            return -EINVAL;
    }

    if (!mem)
        return -ENXIO;

    if (!devm_request_mem_region(dev->dev, mem->start,
                                 resource_size(mem), "armada-drm"))
        return -EBUSY;

    priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
    if (!priv) {
        DRM_ERROR("failed to allocate private\n");
        return -ENOMEM;
    }

    platform_set_drvdata(dev->platformdev, dev);
    dev->dev_private = priv;

    /* Get the implementation specific driver data. */
    id = platform_get_device_id(dev->platformdev);
    if (!id)
        return -ENXIO;

    variant = (const struct armada_variant *)id->driver_data;

    INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
    INIT_KFIFO(priv->fb_unref);

    /* Mode setting support */
    drm_mode_config_init(dev);
    dev->mode_config.min_width = 320;
    dev->mode_config.min_height = 200;

    /*
     * With vscale enabled, the maximum width is 1920 due to the
     * 1920 by 3 lines RAM
     */
    dev->mode_config.max_width = 1920;
    dev->mode_config.max_height = 2048;

    dev->mode_config.preferred_depth = 24;
    dev->mode_config.funcs = &armada_drm_mode_config_funcs;
    drm_mm_init(&priv->linear, mem->start, resource_size(mem));

    /* Create all LCD controllers */
    for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
        int irq;

        if (!res[n])
            break;

        irq = platform_get_irq(dev->platformdev, n);
        if (irq < 0)
            goto err_kms;

        ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
                                     variant, NULL);
        if (ret)
            goto err_kms;
    }

    if (is_componentized(dev->dev)) {
        ret = component_bind_all(dev->dev, dev);
        if (ret)
            goto err_kms;
    } else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
        ret = armada_drm_connector_slave_create(dev, &tda19988_config);
        if (ret)
            goto err_kms;
#endif
    }

    ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
    if (ret)
        goto err_comp;

    dev->irq_enabled = true;
    dev->vblank_disable_allowed = 1;

    ret = armada_fbdev_init(dev);
    if (ret)
        goto err_comp;

    drm_kms_helper_poll_init(dev);

    return 0;

err_comp:
    if (is_componentized(dev->dev))
        component_unbind_all(dev->dev, dev);
err_kms:
    drm_mode_config_cleanup(dev);
    drm_mm_takedown(&priv->linear);
    flush_work(&priv->fb_unref_work);

    return ret;
}
Example #21
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
	struct armada_private *priv;
	struct resource *mem = NULL;
	int ret, n;

	for (n = 0; ; n++) {
		struct resource *r = platform_get_resource(dev->platformdev,
							   IORESOURCE_MEM, n);
		if (!r)
			break;

		/* Resources above 64K are graphics memory */
		if (resource_size(r) > SZ_64K)
			mem = r;
		else
			return -EINVAL;
	}

	if (!mem)
		return -ENXIO;

	if (!devm_request_mem_region(dev->dev, mem->start,
			resource_size(mem), "armada-drm"))
		return -EBUSY;

	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		DRM_ERROR("failed to allocate private\n");
		return -ENOMEM;
	}

	platform_set_drvdata(dev->platformdev, dev);
	dev->dev_private = priv;

	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
	INIT_KFIFO(priv->fb_unref);

	/* Mode setting support */
	drm_mode_config_init(dev);
	dev->mode_config.min_width = 320;
	dev->mode_config.min_height = 200;

	/*
	 * With vscale enabled, the maximum width is 1920 due to the
	 * 1920 by 3 lines RAM
	 */
	dev->mode_config.max_width = 1920;
	dev->mode_config.max_height = 2048;

	dev->mode_config.preferred_depth = 24;
	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
	mutex_init(&priv->linear_lock);

	ret = component_bind_all(dev->dev, dev);
	if (ret)
		goto err_kms;

	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
	if (ret)
		goto err_comp;

	dev->irq_enabled = true;
	dev->vblank_disable_allowed = 1;

	ret = armada_fbdev_init(dev);
	if (ret)
		goto err_comp;

	drm_kms_helper_poll_init(dev);

	return 0;

 err_comp:
	component_unbind_all(dev->dev, dev);
 err_kms:
	drm_mode_config_cleanup(dev);
	drm_mm_takedown(&priv->linear);
	flush_work(&priv->fb_unref_work);

	return ret;
}