Esempio n. 1
0
static void
rnic_init(struct iwch_dev *rnicp)
{

	idr_init(&rnicp->cqidr);
	idr_init(&rnicp->qpidr);
	idr_init(&rnicp->mmidr);
	mtx_init(&rnicp->lock, "iwch rnic lock", NULL, MTX_DEF|MTX_DUPOK);

	rnicp->attr.vendor_id = 0x168;
	rnicp->attr.vendor_part_id = 7;
	rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
	rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
	rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
	rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
	rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
	rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
	rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
	rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
	rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
	rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
	rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
	rnicp->attr.can_resize_wq = 0;
	rnicp->attr.max_rdma_reads_per_qp = 8;
	rnicp->attr.max_rdma_read_resources =
	    rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
	rnicp->attr.max_rdma_read_qp_depth = 8;	/* IRD */
	rnicp->attr.max_rdma_read_depth =
	    rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
	rnicp->attr.rq_overflow_handled = 0;
	rnicp->attr.can_modify_ird = 0;
	rnicp->attr.can_modify_ord = 0;
	rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
	rnicp->attr.stag0_value = 1;
	rnicp->attr.zbva_support = 1;
	rnicp->attr.local_invalidate_fence = 1;
	rnicp->attr.cq_overflow_detection = 1;

	return;
}
Esempio n. 2
0
File: drm_gem.c Progetto: E-LLP/n900
int
drm_gem_init(struct drm_device *dev)
{
	spin_lock_init(&dev->object_name_lock);
	idr_init(&dev->object_name_idr);
	atomic_set(&dev->object_count, 0);
	atomic_set(&dev->object_memory, 0);
	atomic_set(&dev->pin_count, 0);
	atomic_set(&dev->pin_memory, 0);
	atomic_set(&dev->gtt_count, 0);
	atomic_set(&dev->gtt_memory, 0);
	return 0;
}
Esempio n. 3
0
struct p9_idpool *p9_idpool_create(void)
{
	struct p9_idpool *p;

	p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	spin_lock_init(&p->lock);
	idr_init(&p->pool);

	return p;
}
Esempio n. 4
0
/**
 * ipc_init_ids	- initialise ipc identifiers
 * @ids: ipc identifier set
 *
 * Set up the sequence range to use for the ipc identifier range (limited
 * below IPCMNI) then initialise the keys hashtable and ids idr.
 */
int ipc_init_ids(struct ipc_ids *ids)
{
	int err;
	ids->in_use = 0;
	ids->seq = 0;
	ids->next_id = -1;
	init_rwsem(&ids->rwsem);
	err = rhashtable_init(&ids->key_ht, &ipc_kht_params);
	if (err)
		return err;
	idr_init(&ids->ipcs_idr);
	ids->tables_initialized = true;
	return 0;
}
Esempio n. 5
0
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	int ret, size;

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
		   htt->max_num_pending_tx);

	spin_lock_init(&htt->tx_lock);
	idr_init(&htt->pending_tx);

	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
						  &htt->txbuf.paddr,
						  GFP_KERNEL);
	if (!htt->txbuf.vaddr) {
		ath10k_err(ar, "failed to alloc tx buffer\n");
		ret = -ENOMEM;
		goto free_idr_pending_tx;
	}

	ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
	if (ret) {
		ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
		goto free_txbuf;
	}

	ret = ath10k_htt_tx_alloc_txq(htt);
	if (ret) {
		ath10k_err(ar, "failed to alloc txq: %d\n", ret);
		goto free_frag_desc;
	}

	return 0;

free_frag_desc:
	ath10k_htt_tx_free_cont_frag_desc(htt);

free_txbuf:
	size = htt->max_num_pending_tx *
			  sizeof(struct ath10k_htt_txbuf);
	dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
			  htt->txbuf.paddr);

free_idr_pending_tx:
	idr_destroy(&htt->pending_tx);

	return ret;
}
Esempio n. 6
0
/*
 * Initialize everything, well, just everything in Posix clocks/timers ;)
 */
static __init int init_posix_timers(void)
{
	struct k_clock clock_realtime = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_clock_realtime_get,
		.clock_set	= posix_clock_realtime_set,
		.clock_adj	= posix_clock_realtime_adj,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};
	struct k_clock clock_monotonic = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_ktime_get_ts,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};
	struct k_clock clock_monotonic_raw = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_get_monotonic_raw,
	};
	struct k_clock clock_realtime_coarse = {
		.clock_getres	= posix_get_coarse_res,
		.clock_get	= posix_get_realtime_coarse,
	};
	struct k_clock clock_monotonic_coarse = {
		.clock_getres	= posix_get_coarse_res,
		.clock_get	= posix_get_monotonic_coarse,
	};

	register_posix_clock(CLOCK_REALTIME, &clock_realtime);
	register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
	register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
	register_posix_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
	register_posix_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);

	posix_timers_cache = kmem_cache_create("posix_timers_cache",
					sizeof (struct k_itimer), 0, SLAB_PANIC,
					NULL);
	idr_init(&posix_timers_id);
	return 0;
}
Esempio n. 7
0
/*
 * init the sessions structures
 */
NTSTATUS smbsrv_init_sessions(struct smbsrv_connection *smb_conn, uint64_t limit)
{
    /*
     * the idr_* functions take 'int' as limit,
     * and only work with a max limit 0x00FFFFFF
     */
    limit &= 0x00FFFFFF;

    smb_conn->sessions.idtree_vuid	= idr_init(smb_conn);
    NT_STATUS_HAVE_NO_MEMORY(smb_conn->sessions.idtree_vuid);
    smb_conn->sessions.idtree_limit	= limit;
    smb_conn->sessions.list		= NULL;

    return NT_STATUS_OK;
}
Esempio n. 8
0
static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
	drm_sis_private_t *dev_priv;

	pci_set_master(dev->pdev);

	dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	idr_init(&dev_priv->object_idr);
	dev->dev_private = (void *)dev_priv;
	dev_priv->chipset = chipset;

	return 0;
}
Esempio n. 9
0
void ipc_init_ids(struct ipc_ids *ids)
{
	init_rwsem(&ids->rw_mutex);

	ids->in_use = 0;
	ids->seq = 0;
	{
		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
		if (seq_limit > USHRT_MAX)
			ids->seq_max = USHRT_MAX;
		 else
		 	ids->seq_max = seq_limit;
	}

	idr_init(&ids->ipcs_idr);
}
Esempio n. 10
0
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	int ret, size;

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
		   htt->max_num_pending_tx);

	mtx_init(&htt->tx_lock, device_get_nameunit(ar->sc_dev),
	    "athp htt tx", MTX_DEF);
	mtx_init(&htt->tx_comp_lock, device_get_nameunit(ar->sc_dev),
	    "athp htt comp tx", MTX_DEF);
	idr_init(&htt->pending_tx);

	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->sc_dev,
				       sizeof(struct ath10k_htt_txbuf), 4, 0);
	if (!htt->tx_pool) {
		ret = -ENOMEM;
		goto free_idr_pending_tx;
	}

	if (!ar->hw_params.continuous_frag_desc)
		goto skip_frag_desc_alloc;

	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
	if (athp_descdma_alloc(ar, &htt->frag_desc.dd, "htt frag_desc",
	    8, size) != 0) {
		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
		ret = -ENOMEM;
		goto free_tx_pool;
	}
	htt->frag_desc.vaddr = (void *) htt->frag_desc.dd.dd_desc;
	htt->frag_desc.paddr = htt->frag_desc.dd.dd_desc_paddr;

skip_frag_desc_alloc:
	return 0;

free_tx_pool:
	dma_pool_destroy(htt->tx_pool);
free_idr_pending_tx:
	mtx_destroy(&htt->tx_lock);
	idr_destroy(&htt->pending_tx);
	return ret;
}
Esempio n. 11
0
/*
 * Initialize everything, well, just everything in Posix clocks/timers ;)
 */
static __init int init_posix_timers(void)
{
	struct k_clock clock_realtime = {
		.clock_getres = hrtimer_get_res,
	};
	struct k_clock clock_monotonic = {
		.clock_getres = hrtimer_get_res,
		.clock_get = posix_ktime_get_ts,
		.clock_set = do_posix_clock_nosettime,
	};

	register_posix_clock(CLOCK_REALTIME, &clock_realtime);
	register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);

	posix_timers_cache = kmem_cache_create("posix_timers_cache",
					sizeof (struct k_itimer), 0, 0, NULL, NULL);
	idr_init(&posix_timers_id);
	return 0;
}
Esempio n. 12
0
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
		   htt->max_num_pending_tx);

	spin_lock_init(&htt->tx_lock);
	idr_init(&htt->pending_tx);

	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
				       sizeof(struct ath10k_htt_txbuf), 4, 0);
	if (!htt->tx_pool) {
		idr_destroy(&htt->pending_tx);
		return -ENOMEM;
	}

	return 0;
}
Esempio n. 13
0
static NTSTATUS smbd_smb2_session_setup(struct smbd_smb2_request *smb2req,
					uint64_t in_session_id,
					uint8_t in_security_mode,
					DATA_BLOB in_security_buffer,
					uint16_t *out_session_flags,
					DATA_BLOB *out_security_buffer,
					uint64_t *out_session_id)
{
	struct smbd_smb2_session *session;

	*out_session_flags = 0;
	*out_session_id = 0;

	if (in_session_id == 0) {
		int id;

		/* create a new session */
		session = talloc_zero(smb2req->sconn, struct smbd_smb2_session);
		if (session == NULL) {
			return NT_STATUS_NO_MEMORY;
		}
		session->status = NT_STATUS_MORE_PROCESSING_REQUIRED;
		id = idr_get_new_random(smb2req->sconn->smb2.sessions.idtree,
					session,
					smb2req->sconn->smb2.sessions.limit);
		if (id == -1) {
			return NT_STATUS_INSUFFICIENT_RESOURCES;
		}
		session->vuid = id;

		session->tcons.idtree = idr_init(session);
		if (session->tcons.idtree == NULL) {
			return NT_STATUS_NO_MEMORY;
		}
		session->tcons.limit = 0x0000FFFE;
		session->tcons.list = NULL;

		DLIST_ADD_END(smb2req->sconn->smb2.sessions.list, session,
			      struct smbd_smb2_session *);
		session->sconn = smb2req->sconn;
		talloc_set_destructor(session, smbd_smb2_session_destructor);
	} else {
Esempio n. 14
0
static int __init rpmsg_init(void)
{
	int ret;

	idr_init(&vprocs);

	ret = bus_register(&rpmsg_bus);
	if (ret) {
		pr_err("failed to register rpmsg bus: %d\n", ret);
		return ret;
	}

	ret = register_virtio_driver(&virtio_ipc_driver);
	if (ret) {
		pr_err("failed to register virtio driver: %d\n", ret);
		bus_unregister(&rpmsg_bus);
	}

	return ret;
}
Esempio n. 15
0
int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
{
	struct page *pages;
	int order;

	/* Start at 2 because it's defined as 2^(1+PSS) */
	iommu->pasid_max = 2 << ecap_pss(iommu->ecap);

	/* Eventually I'm promised we will get a multi-level PASID table
	 * and it won't have to be physically contiguous. Until then,
	 * limit the size because 8MiB contiguous allocations can be hard
	 * to come by. The limit of 0x20000, which is 1MiB for each of
	 * the PASID and PASID-state tables, is somewhat arbitrary. */
	if (iommu->pasid_max > 0x20000)
		iommu->pasid_max = 0x20000;

	order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!pages) {
		pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
			iommu->name);
		return -ENOMEM;
	}
	iommu->pasid_table = page_address(pages);
	pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);

	if (ecap_dis(iommu->ecap)) {
		/* Just making it explicit... */
		BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
		pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
		if (pages)
			iommu->pasid_state_table = page_address(pages);
		else
			pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
				iommu->name);
	}

	idr_init(&iommu->pasid_idr);

	return 0;
}
Esempio n. 16
0
static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
{
	struct cxl_afu *afu;

	if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
		return NULL;

	afu->adapter = adapter;
	afu->dev.parent = &adapter->dev;
	afu->dev.release = cxl_release_afu;
	afu->slice = slice;
	idr_init(&afu->contexts_idr);
	mutex_init(&afu->contexts_lock);
	spin_lock_init(&afu->afu_cntl_lock);
	mutex_init(&afu->spa_mutex);

	afu->prefault_mode = CXL_PREFAULT_NONE;
	afu->irqs_max = afu->adapter->user_irqs;

	return afu;
}
Esempio n. 17
0
/**
 * drm_gem_init - Initialize the GEM device fields
 * @dev: drm_devic structure to initialize
 */
int
drm_gem_init(struct drm_device *dev)
{
    struct drm_vma_offset_manager *vma_offset_manager;

    mutex_init(&dev->object_name_lock);
    idr_init(&dev->object_name_idr);

    vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
    if (!vma_offset_manager) {
        DRM_ERROR("out of memory\n");
        return -ENOMEM;
    }

    dev->vma_offset_manager = vma_offset_manager;
    drm_vma_offset_manager_init(vma_offset_manager,
                                DRM_FILE_PAGE_OFFSET_START,
                                DRM_FILE_PAGE_OFFSET_SIZE);

    return 0;
}
Esempio n. 18
0
int
drm_gem_init(struct drm_device *dev)
{
	struct drm_gem_mm *mm;

	mutex_init(&dev->object_name_lock);
	idr_init(&dev->object_name_idr);

	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
	if (!mm) {
		DRM_ERROR("out of memory\n");
		return -ENOMEM;
	}

	dev->mm_private = mm;
	drm_vma_offset_manager_init(&mm->vma_manager,
				    DRM_FILE_PAGE_OFFSET_START,
				    DRM_FILE_PAGE_OFFSET_SIZE);

	return 0;
}
Esempio n. 19
0
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	int ret, size;

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
		   htt->max_num_pending_tx);

	spin_lock_init(&htt->tx_lock);
	idr_init(&htt->pending_tx);

	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
				       sizeof(struct ath10k_htt_txbuf), 4, 0);
	if (!htt->tx_pool) {
		ret = -ENOMEM;
		goto free_idr_pending_tx;
	}

	if (!ar->hw_params.continuous_frag_desc)
		goto skip_frag_desc_alloc;

	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
						  &htt->frag_desc.paddr,
						  GFP_DMA);
	if (!htt->frag_desc.vaddr) {
		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
		ret = -ENOMEM;
		goto free_tx_pool;
	}

skip_frag_desc_alloc:
	return 0;

free_tx_pool:
	dma_pool_destroy(htt->tx_pool);
free_idr_pending_tx:
	idr_destroy(&htt->pending_tx);
	return ret;
}
Esempio n. 20
0
static int __init ib_ucm_init(void)
{
	int result;

	result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
	if (result) {
		ucm_dbg("Error <%d> registering dev\n", result);
		goto err_chr;
	}

	cdev_init(&ib_ucm_cdev, &ib_ucm_fops);

	result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
	if (result) {
 		ucm_dbg("Error <%d> adding cdev\n", result);
		goto err_cdev;
	}

	ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
	if (IS_ERR(ib_ucm_class)) {
		result = PTR_ERR(ib_ucm_class);
 		ucm_dbg("Error <%d> creating class\n", result);
		goto err_class;
	}

	class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");

	idr_init(&ctx_id_table);
	init_MUTEX(&ctx_id_mutex);

	return 0;
err_class:
	cdev_del(&ib_ucm_cdev);
err_cdev:
	unregister_chrdev_region(IB_UCM_DEV, 1);
err_chr:
	return result;
}
Esempio n. 21
0
static int
drm_init(void)
{
	extern int linux_guarantee_initialized(void);
	int error;

	error = linux_guarantee_initialized();
	if (error)
		return error;

	if (ISSET(boothowto, AB_DEBUG))
		drm_debug = ~(unsigned int)0;

	spin_lock_init(&drm_minor_lock);
	idr_init(&drm_minors_idr);
	linux_mutex_init(&drm_global_mutex);
	drm_connector_ida_init();
	drm_global_init();
	drm_sysctl_init(&drm_def);
	drm_i2c_encoders_init();

	return 0;
}
Esempio n. 22
0
static int __init drm_core_init(void)
{
	int ret = -ENOMEM;

	drm_global_init();
	drm_connector_ida_init();
	idr_init(&drm_minors_idr);

	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
		goto err_p1;

	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
	if (IS_ERR(drm_class)) {
		printk(KERN_ERR "DRM: Error creating drm class.\n");
		ret = PTR_ERR(drm_class);
		goto err_p2;
	}

	drm_debugfs_root = debugfs_create_dir("dri", NULL);
	if (!drm_debugfs_root) {
		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
		ret = -1;
		goto err_p3;
	}

	DRM_INFO("Initialized %s %d.%d.%d %s\n",
		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
	return 0;
err_p3:
	drm_sysfs_destroy();
err_p2:
	unregister_chrdev(DRM_MAJOR, "drm");

	idr_destroy(&drm_minors_idr);
err_p1:
	return ret;
}
Esempio n. 23
0
/**
 * amdgpu_driver_open_kms - drm callback for open
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device open, init vm on cayman+ (all asics).
 * Returns 0 on success, error on failure.
 */
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv;
	int r;

	file_priv->driver_priv = NULL;

	r = pm_runtime_get_sync(dev->dev);
	if (r < 0)
		return r;

	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
	if (unlikely(!fpriv))
		return -ENOMEM;

	r = amdgpu_vm_init(adev, &fpriv->vm);
	if (r)
		goto error_free;

	mutex_init(&fpriv->bo_list_lock);
	idr_init(&fpriv->bo_list_handles);

	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);

	file_priv->driver_priv = fpriv;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
	return 0;

error_free:
	kfree(fpriv);

	return r;
}
Esempio n. 24
0
int
drm_gem_init(struct drm_device *dev)
{
	struct drm_gem_mm *mm;

	spin_lock_init(&dev->object_name_lock);
	idr_init(&dev->object_name_idr);
	atomic_set(&dev->object_count, 0);
	atomic_set(&dev->object_memory, 0);
	atomic_set(&dev->pin_count, 0);
	atomic_set(&dev->pin_memory, 0);
	atomic_set(&dev->gtt_count, 0);
	atomic_set(&dev->gtt_memory, 0);

	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
	if (!mm) {
		DRM_ERROR("out of memory\n");
		return -ENOMEM;
	}

	dev->mm_private = mm;

	if (drm_ht_create(&mm->offset_hash, 19)) {
		kfree(mm);
		return -ENOMEM;
	}

	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
			DRM_FILE_PAGE_OFFSET_SIZE)) {
		drm_ht_remove(&mm->offset_hash);
		kfree(mm);
		return -ENOMEM;
	}

	return 0;
}
Esempio n. 25
0
int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
	drm_via_private_t *dev_priv;
	int ret = 0;

	dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	idr_init(&dev_priv->object_idr);
	dev->dev_private = (void *)dev_priv;

	dev_priv->chipset = chipset;

	pci_set_master(dev->pdev);

	ret = drm_vblank_init(dev, 1);
	if (ret) {
		kfree(dev_priv);
		return ret;
	}

	return 0;
}
Esempio n. 26
0
/* Return a server_id with a unique task_id element.  Free the
 * returned pointer to de-allocate the task_id via a talloc destructor
 * (ie, use talloc_free()) */
static struct server_id *new_server_id_task(TALLOC_CTX *mem_ctx)
{
    struct messaging_context *msg_ctx;
    struct server_id *server_id;
    int task_id;
    if (!task_id_tree) {
        task_id_tree = idr_init(NULL);
        if (!task_id_tree) {
            return NULL;
        }
    }

    msg_ctx = server_messaging_context();
    if (msg_ctx == NULL) {
        return NULL;
    }

    server_id = talloc(mem_ctx, struct server_id);

    if (!server_id) {
        return NULL;
    }
    *server_id = messaging_server_id(msg_ctx);

    /* 0 is the default server_id, so we need to start with 1 */
    task_id = idr_get_new_above(task_id_tree, server_id, 1, INT32_MAX);

    if (task_id == -1) {
        talloc_free(server_id);
        return NULL;
    }

    talloc_set_destructor(server_id, free_task_id);
    server_id->task_id = task_id;
    return server_id;
}
Esempio n. 27
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	void *addr;
	int err, i, num_bufs, buf_size, total_buf_size;
	struct rpmsg_channel_info *ch;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	spin_lock_init(&vrp->endpoints_lock);
	mutex_init(&vrp->svq_lock);
	mutex_init(&vrp->rm_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vi;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* Platform must supply pre-allocated uncached buffers for now */
	vdev->config->get(vdev, VPROC_BUF_ADDR, &addr, sizeof(addr));
	vdev->config->get(vdev, VPROC_BUF_NUM, &num_bufs,
							sizeof(num_bufs));
	vdev->config->get(vdev, VPROC_BUF_SZ, &buf_size, sizeof(buf_size));

	total_buf_size = num_bufs * buf_size;

	dev_dbg(&vdev->dev, "%d buffers, size %d, addr 0x%x, total 0x%x\n",
		num_bufs, buf_size, (unsigned int) addr, total_buf_size);

	vrp->num_bufs = num_bufs;
	vrp->buf_size = buf_size;
	vrp->rbufs = addr;
	vrp->sbufs = addr + total_buf_size / 2;

	/* simulated addr base to make virt_to_page happy */
	vdev->config->get(vdev, VPROC_SIM_BASE, &vrp->sim_base,
							sizeof(vrp->sim_base));

	/* set up the receive buffers */
	for (i = 0; i < num_bufs / 2; i++) {
		struct scatterlist sg;
		void *tmpaddr = vrp->rbufs + i * buf_size;
		void *simaddr = vrp->sim_base + i * buf_size;

		sg_init_one(&sg, simaddr, buf_size);
		err = virtqueue_add_buf_gfp(vrp->rvq, &sg, 0, 1, tmpaddr,
								GFP_KERNEL);
		WARN_ON(err < 0); /* sanity check; this can't really happen */
	}

	/* tell the remote processor it can start sending data */
	virtqueue_kick(vrp->rvq);

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	dev_info(&vdev->dev, "rpmsg backend virtproc probed successfully\n");

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto vqs_del;
		}
	}

	/* look for platform-specific static channels */
	vdev->config->get(vdev, VPROC_STATIC_CHANNELS, &ch, sizeof(ch));

	for (i = 0; ch && ch[i].name[0]; i++)
		rpmsg_create_channel(vrp, &ch[i]);

	return 0;

vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vi:
	kfree(vrp);
	return err;
}
Esempio n. 28
0
/**
 * Called at device open time, sets up the structure for handling refcounting
 * of mm objects.
 */
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
    idr_init(&file_private->object_idr);
    spin_lock_init(&file_private->table_lock);
}
Esempio n. 29
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;
	char host_log[100] = {0};

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->global_kms_state_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
			mem_size *= 2;

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
Esempio n. 30
0
int qxl_device_init(struct qxl_device *qdev,
		    struct drm_device *ddev,
		    struct pci_dev *pdev,
		    unsigned long flags)
{
	int r;

	qdev->dev = &pdev->dev;
	qdev->ddev = ddev;
	qdev->pdev = pdev;
	qdev->flags = flags;

	mutex_init(&qdev->gem.mutex);
	mutex_init(&qdev->update_area_mutex);
	mutex_init(&qdev->release_mutex);
	mutex_init(&qdev->surf_evict_mutex);
	INIT_LIST_HEAD(&qdev->gem.objects);

	qdev->rom_base = pci_resource_start(pdev, 2);
	qdev->rom_size = pci_resource_len(pdev, 2);
	qdev->vram_base = pci_resource_start(pdev, 0);
	qdev->surfaceram_base = pci_resource_start(pdev, 1);
	qdev->surfaceram_size = pci_resource_len(pdev, 1);
	qdev->io_base = pci_resource_start(pdev, 3);

	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
	qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
	DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
		 (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
		 (int)pci_resource_len(pdev, 0) / 1024,
		 (void *)qdev->surfaceram_base,
		 (void *)pci_resource_end(pdev, 1),
		 (int)qdev->surfaceram_size / 1024 / 1024,
		 (int)qdev->surfaceram_size / 1024);

	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
	if (!qdev->rom) {
		pr_err("Unable to ioremap ROM\n");
		return -ENOMEM;
	}

	qxl_check_device(qdev);

	r = qxl_bo_init(qdev);
	if (r) {
		DRM_ERROR("bo init failed %d\n", r);
		return r;
	}

	qdev->ram_header = ioremap(qdev->vram_base +
				   qdev->rom->ram_header_offset,
				   sizeof(*qdev->ram_header));

	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
					     sizeof(struct qxl_command),
					     QXL_COMMAND_RING_SIZE,
					     qdev->io_base + QXL_IO_NOTIFY_CMD,
					     false,
					     &qdev->display_event);

	qdev->cursor_ring = qxl_ring_create(
				&(qdev->ram_header->cursor_ring_hdr),
				sizeof(struct qxl_command),
				QXL_CURSOR_RING_SIZE,
				qdev->io_base + QXL_IO_NOTIFY_CMD,
				false,
				&qdev->cursor_event);

	qdev->release_ring = qxl_ring_create(
				&(qdev->ram_header->release_ring_hdr),
				sizeof(uint64_t),
				QXL_RELEASE_RING_SIZE, 0, true,
				NULL);

	/* TODO - slot initialization should happen on reset. where is our
	 * reset handler? */
	qdev->n_mem_slots = qdev->rom->slots_end;
	qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
	qdev->slot_id_bits = qdev->rom->slot_id_bits;
	qdev->va_slot_mask =
		(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);

	qdev->mem_slots =
		kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
			GFP_KERNEL);

	idr_init(&qdev->release_idr);
	spin_lock_init(&qdev->release_idr_lock);

	idr_init(&qdev->surf_id_idr);
	spin_lock_init(&qdev->surf_id_idr_lock);

	mutex_init(&qdev->async_io_mutex);

	/* reset the device into a known state - no memslots, no primary
	 * created, no surfaces. */
	qxl_io_reset(qdev);

	/* must initialize irq before first async io - slot creation */
	r = qxl_irq_init(qdev);
	if (r)
		return r;

	/*
	 * Note that virtual is surface0. We rely on the single ioremap done
	 * before.
	 */
	qdev->main_mem_slot = setup_slot(qdev, 0,
		(unsigned long)qdev->vram_base,
		(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
		(unsigned long)qdev->surfaceram_base,
		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
	DRM_INFO("main mem slot %d [%lx,%x)\n",
		qdev->main_mem_slot,
		(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);


	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
	INIT_WORK(&qdev->gc_work, qxl_gc_work);

	r = qxl_fb_init(qdev);
	if (r)
		return r;

	return 0;
}