Пример #1
0
/*
 * visorchannel_create() - creates the struct visorchannel abstraction for a
 *                         data area in memory, but does NOT modify this data
 *                         area
 * @physaddr:      physical address of start of channel
 * @gfp:           gfp_t to use when allocating memory for the data struct
 * @guid:          GUID that identifies channel type;
 * @needs_lock:    must specify true if you have multiple threads of execution
 *                 that will be calling visorchannel methods of this
 *                 visorchannel at the same time
 *
 * Return: pointer to visorchannel that was created if successful,
 *         otherwise NULL
 */
struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
					 const guid_t *guid, bool needs_lock)
{
	struct visorchannel *channel;
	int err;
	size_t size = sizeof(struct channel_header);

	if (physaddr == 0)
		return NULL;

	channel = kzalloc(sizeof(*channel), gfp);
	if (!channel)
		return NULL;
	channel->needs_lock = needs_lock;
	spin_lock_init(&channel->insert_lock);
	spin_lock_init(&channel->remove_lock);
	/*
	 * Video driver constains the efi framebuffer so it will get a conflict
	 * resource when requesting its full mem region. Since we are only
	 * using the efi framebuffer for video we can ignore this. Remember that
	 * we haven't requested it so we don't try to release later on.
	 */
	channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;
	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(physaddr, size);
		goto err_destroy_channel;
	}
	channel->physaddr = physaddr;
	channel->nbytes = size;
	err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
	if (err)
		goto err_destroy_channel;
	size = (ulong)channel->chan_hdr.size;
	memunmap(channel->mapped);
	if (channel->requested)
		release_mem_region(channel->physaddr, channel->nbytes);
	channel->mapped = NULL;
	channel->requested = request_mem_region(channel->physaddr, size,
						VISOR_DRV_NAME);
	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;
	channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(channel->physaddr, size);
		goto err_destroy_channel;
	}
	channel->nbytes = size;
	guid_copy(&channel->guid, guid);
	return channel;

err_destroy_channel:
	visorchannel_destroy(channel);
	return NULL;
}
Пример #2
0
struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
{
	acpi_handle handle;
	union acpi_object *obj;
	struct nhlt_resource_desc  *nhlt_ptr = NULL;
	struct nhlt_acpi_table *nhlt_table = NULL;

	if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
		dev_err(dev, "Requested NHLT device not found\n");
		return NULL;
	}

	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
	if (obj && obj->type == ACPI_TYPE_BUFFER) {
		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
		nhlt_table = (struct nhlt_acpi_table *)
				memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
				MEMREMAP_WB);
		ACPI_FREE(obj);
		return nhlt_table;
	}

	dev_err(dev, "device specific method to extract NHLT blob failed\n");
	return NULL;
}
Пример #3
0
struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
{
	acpi_handle handle;
	union acpi_object *obj;
	struct nhlt_resource_desc  *nhlt_ptr = NULL;
	struct nhlt_acpi_table *nhlt_table = NULL;

	handle = ACPI_HANDLE(dev);
	if (!handle) {
		dev_err(dev, "Didn't find ACPI_HANDLE\n");
		return NULL;
	}

	obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
	if (obj && obj->type == ACPI_TYPE_BUFFER) {
		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
		nhlt_table = (struct nhlt_acpi_table *)
				memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
				MEMREMAP_WB);
		ACPI_FREE(obj);
		if (nhlt_table && (strncmp(nhlt_table->header.signature,
					NHLT_ACPI_HEADER_SIG,
					strlen(NHLT_ACPI_HEADER_SIG)) != 0)) {
			memunmap(nhlt_table);
			dev_err(dev, "NHLT ACPI header signature incorrect\n");
			return NULL;
		}
		return nhlt_table;
	}

	dev_err(dev, "device specific method to extract NHLT blob failed\n");
	return NULL;
}
Пример #4
0
/* read binary bios log from EFI configuration table */
int tpm_read_log_efi(struct tpm_chip *chip)
{

	struct linux_efi_tpm_eventlog *log_tbl;
	struct tpm_bios_log *log;
	u32 log_size;
	u8 tpm_log_version;

	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
		return -ENODEV;

	log = &chip->log;

	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
	if (!log_tbl) {
		pr_err("Could not map UEFI TPM log table !\n");
		return -ENOMEM;
	}

	log_size = log_tbl->size;
	memunmap(log_tbl);

	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
			   MEMREMAP_WB);
	if (!log_tbl) {
		pr_err("Could not map UEFI TPM log table payload!\n");
		return -ENOMEM;
	}

	/* malloc EventLog space */
	log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
	if (!log->bios_event_log)
		goto err_memunmap;
	log->bios_event_log_end = log->bios_event_log + log_size;

	tpm_log_version = log_tbl->version;
	memunmap(log_tbl);
	return tpm_log_version;

err_memunmap:
	memunmap(log_tbl);
	return -ENOMEM;
}
Пример #5
0
static ssize_t setup_data_data_read(struct file *fp,
				    struct kobject *kobj,
				    struct bin_attribute *bin_attr,
				    char *buf,
				    loff_t off, size_t count)
{
	int nr, ret = 0;
	u64 paddr;
	struct setup_data *data;
	void *p;

	ret = kobj_to_setup_data_nr(kobj, &nr);
	if (ret)
		return ret;

	ret = get_setup_data_paddr(nr, &paddr);
	if (ret)
		return ret;
	data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
	if (!data)
		return -ENOMEM;

	if (off > data->len) {
		ret = -EINVAL;
		goto out;
	}

	if (count > data->len - off)
		count = data->len - off;

	if (!count)
		goto out;

	ret = count;
	p = memremap(paddr + sizeof(*data), data->len, MEMREMAP_WB);
	if (!p) {
		ret = -ENOMEM;
		goto out;
	}
	memcpy(buf, p + off, count);
	memunmap(p);
out:
	memunmap(data);
	return ret;
}
Пример #6
0
/*
 * sfi_un/map_memory calls early_memremap/memunmap which is a __init function
 * and introduces section mismatch. So use __ref to make it calm.
 */
static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
{
	if (!phys || !size)
		return NULL;

	if (sfi_use_memremap)
		return memremap(phys, size, MEMREMAP_WB);
	else
		return early_memremap(phys, size);
}
Пример #7
0
/*
 * To be called after the EFI page tables have been populated. If a memory
 * attributes table is available, its contents will be used to update the
 * mappings with tightened permissions as described by the table.
 * This requires the UEFI memory map to have already been populated with
 * virtual addresses.
 */
int __init efi_memattr_apply_permissions(struct mm_struct *mm,
					 efi_memattr_perm_setter fn)
{
	efi_memory_attributes_table_t *tbl;
	int i, ret;

	if (tbl_size <= sizeof(*tbl))
		return 0;

	/*
	 * We need the EFI memory map to be setup so we can use it to
	 * lookup the virtual addresses of all entries in the  of EFI
	 * Memory Attributes table. If it isn't available, this
	 * function should not be called.
	 */
	if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
		return 0;

	tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
	if (!tbl) {
		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
		       efi.mem_attr_table);
		return -ENOMEM;
	}

	if (efi_enabled(EFI_DBG))
		pr_info("Processing EFI Memory Attributes table:\n");

	for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
		efi_memory_desc_t md;
		unsigned long size;
		bool valid;
		char buf[64];

		valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
				       &md);
		size = md.num_pages << EFI_PAGE_SHIFT;
		if (efi_enabled(EFI_DBG) || !valid)
			pr_info("%s 0x%012llx-0x%012llx %s\n",
				valid ? "" : "!", md.phys_addr,
				md.phys_addr + size - 1,
				efi_md_typeattr_format(buf, sizeof(buf), &md));

		if (valid)
			ret = fn(mm, &md);
	}
	memunmap(tbl);
	return ret;
}
Пример #8
0
/**
 * intel_gvt_init_opregion - initialize host opergion related stuffs
 * @gvt: a GVT device
 *
 * Returns:
 * Zero on success, negative error code if failed.
 */
int intel_gvt_init_opregion(struct intel_gvt *gvt)
{
	gvt_dbg_core("init host opregion\n");

	pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
			&gvt->opregion.opregion_pa);

	gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
					     INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
	if (!gvt->opregion.opregion_va) {
		gvt_err("fail to map host opregion\n");
		return -EFAULT;
	}
	return 0;
}
Пример #9
0
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
	struct irte *old_ir_table;
	phys_addr_t irt_phys;
	unsigned int i;
	size_t size;
	u64 irta;

	if (!is_kdump_kernel()) {
		pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
			iommu->name);
		clear_ir_pre_enabled(iommu);
		iommu_disable_irq_remapping(iommu);
		return -EINVAL;
	}

	/* Check whether the old ir-table has the same size as ours */
	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
	     != INTR_REMAP_TABLE_REG_SIZE)
		return -EINVAL;

	irt_phys = irta & VTD_PAGE_MASK;
	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);

	/* Map the old IR table */
	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
	if (!old_ir_table)
		return -ENOMEM;

	/* Copy data over */
	memcpy(iommu->ir_table->base, old_ir_table, size);

	__iommu_flush_cache(iommu, iommu->ir_table->base, size);

	/*
	 * Now check the table for used entries and mark those as
	 * allocated in the bitmap
	 */
	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
		if (iommu->ir_table->base[i].present)
			bitmap_set(iommu->ir_table->bitmap, i, 1);
	}

	memunmap(old_ir_table);

	return 0;
}
Пример #10
0
void *devm_memremap(struct device *dev, resource_size_t offset,
		size_t size, unsigned long flags)
{
	void **ptr, *addr;

	ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	addr = memremap(offset, size, flags);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
	} else
		devres_free(ptr);

	return addr;
}
Пример #11
0
static int __init get_setup_data_total_num(u64 pa_data, int *nr)
{
	int ret = 0;
	struct setup_data *data;

	*nr = 0;
	while (pa_data) {
		*nr += 1;
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data) {
			ret = -ENOMEM;
			goto out;
		}
		pa_data = data->next;
		memunmap(data);
	}

out:
	return ret;
}
Пример #12
0
static int get_setup_data_paddr(int nr, u64 *paddr)
{
	int i = 0;
	struct setup_data *data;
	u64 pa_data = boot_params.hdr.setup_data;

	while (pa_data) {
		if (nr == i) {
			*paddr = pa_data;
			return 0;
		}
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data)
			return -ENOMEM;

		pa_data = data->next;
		memunmap(data);
		i++;
	}
	return -EINVAL;
}
Пример #13
0
void *devm_memremap(struct device *dev, resource_size_t offset,
		size_t size, unsigned long flags)
{
	void **ptr, *addr;

	ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
			dev_to_node(dev));
	if (!ptr)
		return ERR_PTR(-ENOMEM);

	addr = memremap(offset, size, flags);
	if (addr) {
		*ptr = addr;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
		return ERR_PTR(-ENXIO);
	}

	return addr;
}
Пример #14
0
static int __init get_setup_data_size(int nr, size_t *size)
{
	int i = 0;
	struct setup_data *data;
	u64 pa_data = boot_params.hdr.setup_data;

	while (pa_data) {
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data)
			return -ENOMEM;
		if (nr == i) {
			*size = data->len;
			memunmap(data);
			return 0;
		}

		pa_data = data->next;
		memunmap(data);
		i++;
	}
	return -EINVAL;
}
Пример #15
0
static ssize_t type_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	int nr, ret;
	u64 paddr;
	struct setup_data *data;

	ret = kobj_to_setup_data_nr(kobj, &nr);
	if (ret)
		return ret;

	ret = get_setup_data_paddr(nr, &paddr);
	if (ret)
		return ret;
	data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
	if (!data)
		return -ENOMEM;

	ret = sprintf(buf, "0x%x\n", data->type);
	memunmap(data);
	return ret;
}
Пример #16
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;
	char host_log[100] = {0};

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->global_kms_state_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
			mem_size *= 2;

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
void __init efi_bgrt_init(void)
{
	acpi_status status;
	void *image;
	struct bmp_header bmp_header;

	if (acpi_disabled)
		return;

	status = acpi_get_table("BGRT", 0,
	                        (struct acpi_table_header **)&bgrt_tab);
	if (ACPI_FAILURE(status))
		return;

	if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
		pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
		       bgrt_tab->header.length, sizeof(*bgrt_tab));
		return;
	}
	if (bgrt_tab->version != 1) {
		pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
		       bgrt_tab->version);
		return;
	}
	if (bgrt_tab->status & 0xfe) {
		pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
		       bgrt_tab->status);
		return;
	}
	if (bgrt_tab->status != 1) {
		pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
			 bgrt_tab->status);
		return;
	}
	if (bgrt_tab->image_type != 0) {
		pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
		       bgrt_tab->image_type);
		return;
	}
	if (!bgrt_tab->image_address) {
		pr_err("Ignoring BGRT: null image address\n");
		return;
	}

	image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
	if (!image) {
		pr_err("Ignoring BGRT: failed to map image header memory\n");
		return;
	}

	memcpy(&bmp_header, image, sizeof(bmp_header));
	memunmap(image);
	bgrt_image_size = bmp_header.size;

	bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
	if (!bgrt_image) {
		pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
		       bgrt_image_size);
		return;
	}

	image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
	if (!image) {
		pr_err("Ignoring BGRT: failed to map image memory\n");
		kfree(bgrt_image);
		bgrt_image = NULL;
		return;
	}

	memcpy(bgrt_image, image, bgrt_image_size);
	memunmap(image);
}
Пример #18
0
/*
 * visorchannel_create_guts() - creates the struct visorchannel abstraction
 *                              for a data area in memory, but does NOT modify
 *                              this data area
 * @physaddr:      physical address of start of channel
 * @channel_bytes: size of the channel in bytes; this may 0 if the channel has
 *                 already been initialized in memory (which is true for all
 *                 channels provided to guest environments by the s-Par
 *                 back-end), in which case the actual channel size will be
 *                 read from the channel header in memory
 * @gfp:           gfp_t to use when allocating memory for the data struct
 * @guid:          uuid that identifies channel type; this may 0 if the channel
 *                 has already been initialized in memory (which is true for all
 *                 channels provided to guest environments by the s-Par
 *                 back-end), in which case the actual channel guid will be
 *                 read from the channel header in memory
 * @needs_lock:    must specify true if you have multiple threads of execution
 *                 that will be calling visorchannel methods of this
 *                 visorchannel at the same time
 *
 * Return: pointer to visorchannel that was created if successful,
 *         otherwise NULL
 */
static struct visorchannel *
visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
			 gfp_t gfp, uuid_le guid, bool needs_lock)
{
	struct visorchannel *channel;
	int err;
	size_t size = sizeof(struct channel_header);

	if (physaddr == 0)
		return NULL;

	channel = kzalloc(sizeof(*channel), gfp);
	if (!channel)
		return NULL;

	channel->needs_lock = needs_lock;
	spin_lock_init(&channel->insert_lock);
	spin_lock_init(&channel->remove_lock);

	/*
	 * Video driver constains the efi framebuffer so it will get a
	 * conflict resource when requesting its full mem region. Since
	 * we are only using the efi framebuffer for video we can ignore
	 * this. Remember that we haven't requested it so we don't try to
	 * release later on.
	 */
	channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;

	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(physaddr, size);
		goto err_destroy_channel;
	}

	channel->physaddr = physaddr;
	channel->nbytes = size;

	err = visorchannel_read(channel, 0, &channel->chan_hdr,
				sizeof(struct channel_header));
	if (err)
		goto err_destroy_channel;

	/* we had better be a CLIENT of this channel */
	if (channel_bytes == 0)
		channel_bytes = (ulong)channel->chan_hdr.size;
	if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
		guid = channel->chan_hdr.chtype;

	memunmap(channel->mapped);
	if (channel->requested)
		release_mem_region(channel->physaddr, channel->nbytes);
	channel->mapped = NULL;
	channel->requested = request_mem_region(channel->physaddr,
						channel_bytes, MYDRVNAME);
	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;

	channel->mapped = memremap(channel->physaddr, channel_bytes,
			MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(channel->physaddr, channel_bytes);
		goto err_destroy_channel;
	}

	channel->nbytes = channel_bytes;
	channel->guid = guid;
	return channel;

err_destroy_channel:
	visorchannel_destroy(channel);
	return NULL;
}