Exemple #1
0
void p9_idpool_destroy(struct p9_idpool *p)
{
	idr_destroy(&p->pool);
	kfree(p);
}
Exemple #2
0
void sem_exit_ns(struct ipc_namespace *ns)
{
	free_ipcs(ns, &sem_ids(ns), freeary);
	idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
Exemple #3
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
    struct vmw_private *dev_priv;
    int ret;
    uint32_t svga_id;
    enum vmw_res_type i;
    bool refuse_dma = false;

    dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
    if (unlikely(dev_priv == NULL)) {
        DRM_ERROR("Failed allocating a device private struct.\n");
        return -ENOMEM;
    }

    pci_set_master(dev->pdev);

    dev_priv->dev = dev;
    dev_priv->vmw_chipset = chipset;
    dev_priv->last_read_seqno = (uint32_t) -100;
    mutex_init(&dev_priv->cmdbuf_mutex);
    mutex_init(&dev_priv->release_mutex);
    mutex_init(&dev_priv->binding_mutex);
    rwlock_init(&dev_priv->resource_lock);
    ttm_lock_init(&dev_priv->reservation_sem);
    spin_lock_init(&dev_priv->hw_lock);
    spin_lock_init(&dev_priv->waiter_lock);
    spin_lock_init(&dev_priv->cap_lock);
    spin_lock_init(&dev_priv->svga_lock);

    for (i = vmw_res_context; i < vmw_res_max; ++i) {
        idr_init(&dev_priv->res_idr[i]);
        INIT_LIST_HEAD(&dev_priv->res_lru[i]);
    }

    mutex_init(&dev_priv->init_mutex);
    init_waitqueue_head(&dev_priv->fence_queue);
    init_waitqueue_head(&dev_priv->fifo_queue);
    dev_priv->fence_queue_waiters = 0;
    atomic_set(&dev_priv->fifo_queue_waiters, 0);

    dev_priv->used_memory_size = 0;

    dev_priv->io_start = pci_resource_start(dev->pdev, 0);
    dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
    dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

    dev_priv->enable_fb = enable_fbdev;

    vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
    svga_id = vmw_read(dev_priv, SVGA_REG_ID);
    if (svga_id != SVGA_ID_2) {
        ret = -ENOSYS;
        DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
        goto out_err0;
    }

    dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
    ret = vmw_dma_select_mode(dev_priv);
    if (unlikely(ret != 0)) {
        DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
        refuse_dma = true;
    }

    dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
    dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
    dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
    dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

    vmw_get_initial_size(dev_priv);

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        dev_priv->max_gmr_ids =
            vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
        dev_priv->max_gmr_pages =
            vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
        dev_priv->memory_size =
            vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
        dev_priv->memory_size -= dev_priv->vram_size;
    } else {
        /*
         * An arbitrary limit of 512MiB on surface
         * memory. But all HWV8 hardware supports GMR2.
         */
        dev_priv->memory_size = 512*1024*1024;
    }
    dev_priv->max_mob_pages = 0;
    dev_priv->max_mob_size = 0;
    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        uint64_t mem_size =
            vmw_read(dev_priv,
                     SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

        dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
        dev_priv->prim_bb_mem =
            vmw_read(dev_priv,
                     SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
        dev_priv->max_mob_size =
            vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
        dev_priv->stdu_max_width =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
        dev_priv->stdu_max_height =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
        dev_priv->texture_max_width = vmw_read(dev_priv,
                                               SVGA_REG_DEV_CAP);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
        dev_priv->texture_max_height = vmw_read(dev_priv,
                                                SVGA_REG_DEV_CAP);
    } else {
        dev_priv->texture_max_width = 8192;
        dev_priv->texture_max_height = 8192;
        dev_priv->prim_bb_mem = dev_priv->vram_size;
    }

    vmw_print_capabilities(dev_priv->capabilities);

    ret = vmw_dma_masks(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        DRM_INFO("Max GMR ids is %u\n",
                 (unsigned)dev_priv->max_gmr_ids);
        DRM_INFO("Max number of GMR pages is %u\n",
                 (unsigned)dev_priv->max_gmr_pages);
        DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
                 (unsigned)dev_priv->memory_size / 1024);
    }
    DRM_INFO("Maximum display memory size is %u kiB\n",
             dev_priv->prim_bb_mem / 1024);
    DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
             dev_priv->vram_start, dev_priv->vram_size / 1024);
    DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
             dev_priv->mmio_start, dev_priv->mmio_size / 1024);

    ret = vmw_ttm_global_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;


    vmw_master_init(&dev_priv->fbdev_master);
    ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
    dev_priv->active_master = &dev_priv->fbdev_master;


    dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
                                           dev_priv->mmio_size);

    dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
                                     dev_priv->mmio_size);

    if (unlikely(dev_priv->mmio_virt == NULL)) {
        ret = -ENOMEM;
        DRM_ERROR("Failed mapping MMIO.\n");
        goto out_err3;
    }

    /* Need mmio memory to check for fifo pitchlock cap. */
    if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
            !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
            !vmw_fifo_have_pitchlock(dev_priv)) {
        ret = -ENOSYS;
        DRM_ERROR("Hardware has no pitchlock\n");
        goto out_err4;
    }

    dev_priv->tdev = ttm_object_device_init
                     (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

    if (unlikely(dev_priv->tdev == NULL)) {
        DRM_ERROR("Unable to initialize TTM object management.\n");
        ret = -ENOMEM;
        goto out_err4;
    }

    dev->dev_private = dev_priv;

    ret = pci_request_regions(dev->pdev, "vmwgfx probe");
    dev_priv->stealth = (ret != 0);
    if (dev_priv->stealth) {
        /**
         * Request at least the mmio PCI resource.
         */

        DRM_INFO("It appears like vesafb is loaded. "
                 "Ignore above error if any.\n");
        ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
        if (unlikely(ret != 0)) {
            DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
            goto out_no_device;
        }
    }

    if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret != 0) {
            DRM_ERROR("Failed installing irq: %d\n", ret);
            goto out_no_irq;
        }
    }

    dev_priv->fman = vmw_fence_manager_init(dev_priv);
    if (unlikely(dev_priv->fman == NULL)) {
        ret = -ENOMEM;
        goto out_no_fman;
    }

    ret = ttm_bo_device_init(&dev_priv->bdev,
                             dev_priv->bo_global_ref.ref.object,
                             &vmw_bo_driver,
                             dev->anon_inode->i_mapping,
                             VMWGFX_FILE_PAGE_OFFSET,
                             false);
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing TTM buffer object driver.\n");
        goto out_no_bdev;
    }

    /*
     * Enable VRAM, but initially don't use it until SVGA is enabled and
     * unhidden.
     */
    ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
                         (dev_priv->vram_size >> PAGE_SHIFT));
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing memory manager for VRAM.\n");
        goto out_no_vram;
    }
    dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

    dev_priv->has_gmr = true;
    if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
            refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
                                         VMW_PL_GMR) != 0) {
        DRM_INFO("No GMR memory available. "
                 "Graphics memory resources are very limited.\n");
        dev_priv->has_gmr = false;
    }

    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        dev_priv->has_mob = true;
        if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
                           VMW_PL_MOB) != 0) {
            DRM_INFO("No MOB memory available. "
                     "3D will be disabled.\n");
            dev_priv->has_mob = false;
        }
    }

    if (dev_priv->has_mob) {
        spin_lock(&dev_priv->cap_lock);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
        dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        spin_unlock(&dev_priv->cap_lock);
    }


    ret = vmw_kms_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_no_kms;
    vmw_overlay_init(dev_priv);

    ret = vmw_request_device(dev_priv);
    if (ret)
        goto out_no_fifo;

    DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

    if (dev_priv->enable_fb) {
        vmw_fifo_resource_inc(dev_priv);
        vmw_svga_enable(dev_priv);
        vmw_fb_init(dev_priv);
    }

    dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
    register_pm_notifier(&dev_priv->pm_nb);

    return 0;

out_no_fifo:
    vmw_overlay_close(dev_priv);
    vmw_kms_close(dev_priv);
out_no_kms:
    if (dev_priv->has_mob)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
    if (dev_priv->has_gmr)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
    (void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
    vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
    if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
        drm_irq_uninstall(dev_priv->dev);
out_no_irq:
    if (dev_priv->stealth)
        pci_release_region(dev->pdev, 2);
    else
        pci_release_regions(dev->pdev);
out_no_device:
    ttm_object_device_release(&dev_priv->tdev);
out_err4:
    iounmap(dev_priv->mmio_virt);
out_err3:
    arch_phys_wc_del(dev_priv->mmio_mtrr);
    vmw_ttm_global_release(dev_priv);
out_err0:
    for (i = vmw_res_context; i < vmw_res_max; ++i)
        idr_destroy(&dev_priv->res_idr[i]);

    if (dev_priv->ctx.staged_bindings)
        vmw_binding_state_free(dev_priv->ctx.staged_bindings);
    kfree(dev_priv);
    return ret;
}
Exemple #4
0
void shm_exit_ns(struct ipc_namespace *ns)
{
	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
Exemple #5
0
static void __exit uio_exit(void)
{
	release_uio_class();
	idr_destroy(&uio_idr);
}
Exemple #6
0
void msg_exit_ns(struct ipc_namespace *ns)
{
	free_ipcs(ns, &msg_ids(ns), freeque);
	idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
Exemple #7
0
static void __exit w1_ds2760_exit(void)
{
	w1_unregister_family(&w1_ds2760_family);
	idr_destroy(&bat_idr);
}
Exemple #8
0
void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
{
	idr_destroy(&c2dev->qp_table.idr);
}
Exemple #9
0
/*
 * We are about to suspend. Save the TPM state
 * so that it can be restored.
 */
int tpm_pm_suspend(struct device *dev)
{
	struct tpm_chip *chip = dev_get_drvdata(dev);
	struct tpm_cmd_t cmd;
	int rc, try;

	u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };

	if (chip == NULL)
		return -ENODEV;

	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
		tpm2_shutdown(chip, TPM2_SU_STATE);
		return 0;
	}

	/* for buggy tpm, flush pcrs with extend to selected dummy */
	if (tpm_suspend_pcr) {
		cmd.header.in = pcrextend_header;
		cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
		memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
		       TPM_DIGEST_SIZE);
		rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
				      "extending dummy pcr before suspend");
	}

	/* now do the actual savestate */
	for (try = 0; try < TPM_RETRY; try++) {
		cmd.header.in = savestate_header;
		rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);

		/*
		 * If the TPM indicates that it is too busy to respond to
		 * this command then retry before giving up.  It can take
		 * several seconds for this TPM to be ready.
		 *
		 * This can happen if the TPM has already been sent the
		 * SaveState command before the driver has loaded.  TCG 1.2
		 * specification states that any communication after SaveState
		 * may cause the TPM to invalidate previously saved state.
		 */
		if (rc != TPM_WARN_RETRY)
			break;
		msleep(TPM_TIMEOUT_RETRY);
	}

	if (rc)
		dev_err(&chip->dev,
			"Error (%d) sending savestate before suspend\n", rc);
	else if (try > 0)
		dev_warn(&chip->dev, "TPM savestate took %dms\n",
			 try * TPM_TIMEOUT_RETRY);

	return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);

/*
 * Resume from a power safe. The BIOS already restored
 * the TPM state.
 */
int tpm_pm_resume(struct device *dev)
{
	struct tpm_chip *chip = dev_get_drvdata(dev);

	if (chip == NULL)
		return -ENODEV;

	return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);

#define TPM_GETRANDOM_RESULT_SIZE	18
static struct tpm_input_header tpm_getrandom_header = {
	.tag = TPM_TAG_RQU_COMMAND,
	.length = cpu_to_be32(14),
	.ordinal = TPM_ORD_GET_RANDOM
};

/**
 * tpm_get_random() - Get random bytes from the tpm's RNG
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @out: destination buffer for the random bytes
 * @max: the max number of bytes to write to @out
 *
 * Returns < 0 on error and the number of bytes read on success
 */
int tpm_get_random(u32 chip_num, u8 *out, size_t max)
{
	struct tpm_chip *chip;
	struct tpm_cmd_t tpm_cmd;
	u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
	int err, total = 0, retries = 5;
	u8 *dest = out;

	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
		return -EINVAL;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL)
		return -ENODEV;

	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
		err = tpm2_get_random(chip, out, max);
		tpm_put_ops(chip);
		return err;
	}

	do {
		tpm_cmd.header.in = tpm_getrandom_header;
		tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);

		err = tpm_transmit_cmd(chip, &tpm_cmd,
				   TPM_GETRANDOM_RESULT_SIZE + num_bytes,
				   "attempting get random");
		if (err)
			break;

		recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
		memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);

		dest += recd;
		total += recd;
		num_bytes -= recd;
	} while (retries-- && total < max);

	tpm_put_ops(chip);
	return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);

/**
 * tpm_seal_trusted() - seal a trusted key
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @options: authentication values and other options
 * @payload: the key data in clear and encrypted form
 *
 * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
 * are supported.
 */
int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
		     struct trusted_key_options *options)
{
	struct tpm_chip *chip;
	int rc;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	rc = tpm2_seal_trusted(chip, payload, options);

	tpm_put_ops(chip);
	return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);

/**
 * tpm_unseal_trusted() - unseal a trusted key
 * @chip_num: A specific chip number for the request or TPM_ANY_NUM
 * @options: authentication values and other options
 * @payload: the key data in clear and encrypted form
 *
 * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
 * are supported.
 */
int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
		       struct trusted_key_options *options)
{
	struct tpm_chip *chip;
	int rc;

	chip = tpm_chip_find_get(chip_num);
	if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	rc = tpm2_unseal_trusted(chip, payload, options);

	tpm_put_ops(chip);

	return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);

static int __init tpm_init(void)
{
	int rc;

	tpm_class = class_create(THIS_MODULE, "tpm");
	if (IS_ERR(tpm_class)) {
		pr_err("couldn't create tpm class\n");
		return PTR_ERR(tpm_class);
	}

	rc = alloc_chrdev_region(&tpm_devt, 0, TPM_NUM_DEVICES, "tpm");
	if (rc < 0) {
		pr_err("tpm: failed to allocate char dev region\n");
		class_destroy(tpm_class);
		return rc;
	}

	return 0;
}

static void __exit tpm_exit(void)
{
	idr_destroy(&dev_nums_idr);
	class_destroy(tpm_class);
	unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
void __exit dca_sysfs_exit(void)
{
	class_destroy(dca_class);
	idr_destroy(&dca_idr);
}
Exemple #11
0
void vgem_fence_close(struct vgem_file *vfile)
{
	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
	idr_destroy(&vfile->fence_idr);
}
Exemple #12
0
void
ida_destroy(struct ida *ida)
{
    idr_destroy(&ida->idr);
    free(ida->free_bitmap, M_IDR);
}
Exemple #13
0
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);
	dma_pool_destroy(htt->tx_pool);
}
Exemple #14
0
void siw_idr_release(struct siw_dev *sdev)
{
	idr_destroy(&sdev->qp_idr);
	idr_destroy(&sdev->cq_idr);
	idr_destroy(&sdev->pd_idr);
}
Exemple #15
0
void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
{
	idr_destroy(&dd->vnic.vesw_idr);
}