示例#1
0
文件: skl-nhlt.c 项目: val2k/linux
struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
{
	acpi_handle handle;
	union acpi_object *obj;
	struct nhlt_resource_desc  *nhlt_ptr = NULL;
	struct nhlt_acpi_table *nhlt_table = NULL;

	handle = ACPI_HANDLE(dev);
	if (!handle) {
		dev_err(dev, "Didn't find ACPI_HANDLE\n");
		return NULL;
	}

	obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
	if (obj && obj->type == ACPI_TYPE_BUFFER) {
		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
		nhlt_table = (struct nhlt_acpi_table *)
				memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
				MEMREMAP_WB);
		ACPI_FREE(obj);
		if (nhlt_table && (strncmp(nhlt_table->header.signature,
					NHLT_ACPI_HEADER_SIG,
					strlen(NHLT_ACPI_HEADER_SIG)) != 0)) {
			memunmap(nhlt_table);
			dev_err(dev, "NHLT ACPI header signature incorrect\n");
			return NULL;
		}
		return nhlt_table;
	}

	dev_err(dev, "device specific method to extract NHLT blob failed\n");
	return NULL;
}
示例#2
0
/*
 * visorchannel_create() - creates the struct visorchannel abstraction for a
 *                         data area in memory, but does NOT modify this data
 *                         area
 * @physaddr:      physical address of start of channel
 * @gfp:           gfp_t to use when allocating memory for the data struct
 * @guid:          GUID that identifies channel type;
 * @needs_lock:    must specify true if you have multiple threads of execution
 *                 that will be calling visorchannel methods of this
 *                 visorchannel at the same time
 *
 * Return: pointer to visorchannel that was created if successful,
 *         otherwise NULL
 */
struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
					 const guid_t *guid, bool needs_lock)
{
	struct visorchannel *channel;
	int err;
	size_t size = sizeof(struct channel_header);

	if (physaddr == 0)
		return NULL;

	channel = kzalloc(sizeof(*channel), gfp);
	if (!channel)
		return NULL;
	channel->needs_lock = needs_lock;
	spin_lock_init(&channel->insert_lock);
	spin_lock_init(&channel->remove_lock);
	/*
	 * Video driver constains the efi framebuffer so it will get a conflict
	 * resource when requesting its full mem region. Since we are only
	 * using the efi framebuffer for video we can ignore this. Remember that
	 * we haven't requested it so we don't try to release later on.
	 */
	channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;
	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(physaddr, size);
		goto err_destroy_channel;
	}
	channel->physaddr = physaddr;
	channel->nbytes = size;
	err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
	if (err)
		goto err_destroy_channel;
	size = (ulong)channel->chan_hdr.size;
	memunmap(channel->mapped);
	if (channel->requested)
		release_mem_region(channel->physaddr, channel->nbytes);
	channel->mapped = NULL;
	channel->requested = request_mem_region(channel->physaddr, size,
						VISOR_DRV_NAME);
	if (!channel->requested && !guid_equal(guid, &visor_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;
	channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(channel->physaddr, size);
		goto err_destroy_channel;
	}
	channel->nbytes = size;
	guid_copy(&channel->guid, guid);
	return channel;

err_destroy_channel:
	visorchannel_destroy(channel);
	return NULL;
}
示例#3
0
文件: efi.c 项目: AlexShiLucky/linux
/* read binary bios log from EFI configuration table */
int tpm_read_log_efi(struct tpm_chip *chip)
{

	struct linux_efi_tpm_eventlog *log_tbl;
	struct tpm_bios_log *log;
	u32 log_size;
	u8 tpm_log_version;

	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
		return -ENODEV;

	if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
		return -ENODEV;

	log = &chip->log;

	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
	if (!log_tbl) {
		pr_err("Could not map UEFI TPM log table !\n");
		return -ENOMEM;
	}

	log_size = log_tbl->size;
	memunmap(log_tbl);

	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
			   MEMREMAP_WB);
	if (!log_tbl) {
		pr_err("Could not map UEFI TPM log table payload!\n");
		return -ENOMEM;
	}

	/* malloc EventLog space */
	log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
	if (!log->bios_event_log)
		goto err_memunmap;
	log->bios_event_log_end = log->bios_event_log + log_size;

	tpm_log_version = log_tbl->version;
	memunmap(log_tbl);
	return tpm_log_version;

err_memunmap:
	memunmap(log_tbl);
	return -ENOMEM;
}
示例#4
0
static ssize_t setup_data_data_read(struct file *fp,
				    struct kobject *kobj,
				    struct bin_attribute *bin_attr,
				    char *buf,
				    loff_t off, size_t count)
{
	int nr, ret = 0;
	u64 paddr;
	struct setup_data *data;
	void *p;

	ret = kobj_to_setup_data_nr(kobj, &nr);
	if (ret)
		return ret;

	ret = get_setup_data_paddr(nr, &paddr);
	if (ret)
		return ret;
	data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
	if (!data)
		return -ENOMEM;

	if (off > data->len) {
		ret = -EINVAL;
		goto out;
	}

	if (count > data->len - off)
		count = data->len - off;

	if (!count)
		goto out;

	ret = count;
	p = memremap(paddr + sizeof(*data), data->len, MEMREMAP_WB);
	if (!p) {
		ret = -ENOMEM;
		goto out;
	}
	memcpy(buf, p + off, count);
	memunmap(p);
out:
	memunmap(data);
	return ret;
}
示例#5
0
int main(int argc, char **argv)
{
	int i, cur;

	tgetopt(argc, argv);

	if ( toptset('h') )
	{
		printf(usage, argv[0]);
		exit(0);
	}

	if ( toptset('c') ) 
		config_file = toptargs('c');

	syslog_open("updategroups", LOG_PID, LOG_NEWS);

	if ( (master = memmap(sizeof(MASTER))) == NULL )
		die("Can't allocate master memory");

	if ( (groups = memmap(sizeof(ACTIVE) * MAX_GROUPS)) == NULL )
		die("Can't allocate groups memory");

	loadconfig();
	load_servers();

	cur = -1;
	for (i=master->numservers-2; i>=0; i-- )
		if ( (master->lservers[i])->Level != cur )
		{
			cur = (master->lservers[i])->Level;
			update_server( master->lservers[i] );
		}

	write_active();
	write_newsgroups();

	memunmap(master, sizeof(MASTER));
	memunmap(groups, sizeof(ACTIVE) * MAX_GROUPS);

	syslog_close();

	exit(0);
}
示例#6
0
static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
{
	if (!virt || !size)
		return;

	if (sfi_use_memremap)
		memunmap(virt);
	else
		early_memunmap(virt, size);
}
示例#7
0
void
visorchannel_destroy(struct visorchannel *channel)
{
	if (!channel)
		return;
	if (channel->mapped) {
		memunmap(channel->mapped);
		if (channel->requested)
			release_mem_region(channel->physaddr, channel->nbytes);
	}
	kfree(channel);
}
示例#8
0
static int __init get_setup_data_size(int nr, size_t *size)
{
	int i = 0;
	struct setup_data *data;
	u64 pa_data = boot_params.hdr.setup_data;

	while (pa_data) {
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data)
			return -ENOMEM;
		if (nr == i) {
			*size = data->len;
			memunmap(data);
			return 0;
		}

		pa_data = data->next;
		memunmap(data);
		i++;
	}
	return -EINVAL;
}
示例#9
0
static int vmw_driver_unload(struct drm_device *dev)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	enum vmw_res_type i;

	unregister_pm_notifier(&dev_priv->pm_nb);

	if (dev_priv->ctx.res_ht_initialized)
		drm_ht_remove(&dev_priv->ctx.res_ht);
	vfree(dev_priv->ctx.cmd_bounce);
	if (dev_priv->enable_fb) {
		vmw_fb_off(dev_priv);
		vmw_fb_close(dev_priv);
		vmw_fifo_resource_dec(dev_priv);
		vmw_svga_disable(dev_priv);
	}

	vmw_kms_close(dev_priv);
	vmw_overlay_close(dev_priv);

	if (dev_priv->has_gmr)
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);

	vmw_release_device_early(dev_priv);
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	(void) ttm_bo_device_release(&dev_priv->bdev);
	vmw_release_device_late(dev_priv);
	vmw_fence_manager_takedown(dev_priv->fman);
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);

	ttm_object_device_release(&dev_priv->tdev);
	memunmap(dev_priv->mmio_virt);
	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	vmw_ttm_global_release(dev_priv);

	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	kfree(dev_priv);

	return 0;
}
示例#10
0
文件: create.c 项目: richj25/pthreads
int create(pthread_t *ptid, const pthread_attr_t *attr, void * (*start_routine) (void *), void *arg)
{
    int flags;
    volatile pid_t* ctidloc;
    int tid;
    thread_func_t *user_thread;

    user_thread = (thread_func_t *)malloc(sizeof(thread_func_t));

    user_thread->thread_func = start_routine;
    user_thread->thread_data = arg;
    user_thread->detachstate = attr->detachstate;
    user_thread->ptid = -1;

    void *stack = memmap(NULL,attr->stacksize, \
                  PROT_READ | PROT_WRITE, \
                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, \
                  -1, 0);

    if (stack == MAP_FAILED) {
        free(user_thread);
        return ENOMEM;
    }

    *ptid = initialize_thread_db_entry(attr->detachstate,attr->stacksize,stack,user_thread);

    if (*ptid == -1) {
        memunmap(stack,attr->stacksize);
        return EAGAIN;
    }

    user_thread->ptid = *ptid;

    flags = CLONE_VM | CLONE_THREAD | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |\
            CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_IO | SIGCHLD;

    ctidloc = get_tid_location_from_db_entry(*ptid);

    tid = clone(thread_wrapper,stack+attr->stacksize, flags, user_thread, ctidloc, NULL, ctidloc);

    if (tid == -1) {
        recycle_thread_db_entry(*ptid);
        return errno;
    }

    set_nonvolatile_tid(*ptid,tid);

    return 0;
}
示例#11
0
文件: memattr.c 项目: 513855417/linux
/*
 * To be called after the EFI page tables have been populated. If a memory
 * attributes table is available, its contents will be used to update the
 * mappings with tightened permissions as described by the table.
 * This requires the UEFI memory map to have already been populated with
 * virtual addresses.
 */
int __init efi_memattr_apply_permissions(struct mm_struct *mm,
					 efi_memattr_perm_setter fn)
{
	efi_memory_attributes_table_t *tbl;
	int i, ret;

	if (tbl_size <= sizeof(*tbl))
		return 0;

	/*
	 * We need the EFI memory map to be setup so we can use it to
	 * lookup the virtual addresses of all entries in the  of EFI
	 * Memory Attributes table. If it isn't available, this
	 * function should not be called.
	 */
	if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
		return 0;

	tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
	if (!tbl) {
		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
		       efi.mem_attr_table);
		return -ENOMEM;
	}

	if (efi_enabled(EFI_DBG))
		pr_info("Processing EFI Memory Attributes table:\n");

	for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
		efi_memory_desc_t md;
		unsigned long size;
		bool valid;
		char buf[64];

		valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
				       &md);
		size = md.num_pages << EFI_PAGE_SHIFT;
		if (efi_enabled(EFI_DBG) || !valid)
			pr_info("%s 0x%012llx-0x%012llx %s\n",
				valid ? "" : "!", md.phys_addr,
				md.phys_addr + size - 1,
				efi_md_typeattr_format(buf, sizeof(buf), &md));

		if (valid)
			ret = fn(mm, &md);
	}
	memunmap(tbl);
	return ret;
}
示例#12
0
static int iommu_load_old_irte(struct intel_iommu *iommu)
{
	struct irte *old_ir_table;
	phys_addr_t irt_phys;
	unsigned int i;
	size_t size;
	u64 irta;

	if (!is_kdump_kernel()) {
		pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
			iommu->name);
		clear_ir_pre_enabled(iommu);
		iommu_disable_irq_remapping(iommu);
		return -EINVAL;
	}

	/* Check whether the old ir-table has the same size as ours */
	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
	     != INTR_REMAP_TABLE_REG_SIZE)
		return -EINVAL;

	irt_phys = irta & VTD_PAGE_MASK;
	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);

	/* Map the old IR table */
	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
	if (!old_ir_table)
		return -ENOMEM;

	/* Copy data over */
	memcpy(iommu->ir_table->base, old_ir_table, size);

	__iommu_flush_cache(iommu, iommu->ir_table->base, size);

	/*
	 * Now check the table for used entries and mark those as
	 * allocated in the bitmap
	 */
	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
		if (iommu->ir_table->base[i].present)
			bitmap_set(iommu->ir_table->bitmap, i, 1);
	}

	memunmap(old_ir_table);

	return 0;
}
示例#13
0
static int __init get_setup_data_total_num(u64 pa_data, int *nr)
{
	int ret = 0;
	struct setup_data *data;

	*nr = 0;
	while (pa_data) {
		*nr += 1;
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data) {
			ret = -ENOMEM;
			goto out;
		}
		pa_data = data->next;
		memunmap(data);
	}

out:
	return ret;
}
示例#14
0
static int get_setup_data_paddr(int nr, u64 *paddr)
{
	int i = 0;
	struct setup_data *data;
	u64 pa_data = boot_params.hdr.setup_data;

	while (pa_data) {
		if (nr == i) {
			*paddr = pa_data;
			return 0;
		}
		data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
		if (!data)
			return -ENOMEM;

		pa_data = data->next;
		memunmap(data);
		i++;
	}
	return -EINVAL;
}
示例#15
0
static ssize_t type_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	int nr, ret;
	u64 paddr;
	struct setup_data *data;

	ret = kobj_to_setup_data_nr(kobj, &nr);
	if (ret)
		return ret;

	ret = get_setup_data_paddr(nr, &paddr);
	if (ret)
		return ret;
	data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
	if (!data)
		return -ENOMEM;

	ret = sprintf(buf, "0x%x\n", data->type);
	memunmap(data);
	return ret;
}
示例#16
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
	uint32_t svga_id;
	enum vmw_res_type i;
	bool refuse_dma = false;
	char host_log[100] = {0};

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (unlikely(dev_priv == NULL)) {
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

	pci_set_master(dev->pdev);

	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->global_kms_state_mutex);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
	spin_lock_init(&dev_priv->svga_lock);

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
	dev_priv->fence_queue_waiters = 0;
	dev_priv->fifo_queue_waiters = 0;

	dev_priv->used_memory_size = 0;

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

	dev_priv->enable_fb = enable_fbdev;

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		goto out_err0;
	}

	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}

	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
	}
	dev_priv->max_mob_pages = 0;
	dev_priv->max_mob_size = 0;
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
			mem_size *= 2;

		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
		dev_priv->prim_bb_mem = dev_priv->vram_size;
	}

	vmw_print_capabilities(dev_priv->capabilities);

	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
	}
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	ret = vmw_ttm_global_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_err0;


	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
		goto out_err3;
	}

	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

	dev_priv->tdev = ttm_object_device_init
		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
			 "Ignore above error if any.\n");
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}

	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
		ret = drm_irq_install(dev, dev->pdev->irq);
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

	dev_priv->fman = vmw_fence_manager_init(dev_priv);
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
		goto out_no_fman;
	}

	ret = ttm_bo_device_init(&dev_priv->bdev,
				 dev_priv->bo_global_ref.ref.object,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}

	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}


	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
	vmw_overlay_init(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

	if (dev_priv->enable_fb) {
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
		vmw_fb_init(dev_priv);
	}

	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

	return 0;

out_no_fifo:
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
		drm_irq_uninstall(dev_priv->dev);
out_no_irq:
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	kfree(dev_priv);
	return ret;
}
void __init efi_bgrt_init(void)
{
	acpi_status status;
	void *image;
	struct bmp_header bmp_header;

	if (acpi_disabled)
		return;

	status = acpi_get_table("BGRT", 0,
	                        (struct acpi_table_header **)&bgrt_tab);
	if (ACPI_FAILURE(status))
		return;

	if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
		pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
		       bgrt_tab->header.length, sizeof(*bgrt_tab));
		return;
	}
	if (bgrt_tab->version != 1) {
		pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
		       bgrt_tab->version);
		return;
	}
	if (bgrt_tab->status & 0xfe) {
		pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
		       bgrt_tab->status);
		return;
	}
	if (bgrt_tab->status != 1) {
		pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
			 bgrt_tab->status);
		return;
	}
	if (bgrt_tab->image_type != 0) {
		pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
		       bgrt_tab->image_type);
		return;
	}
	if (!bgrt_tab->image_address) {
		pr_err("Ignoring BGRT: null image address\n");
		return;
	}

	image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
	if (!image) {
		pr_err("Ignoring BGRT: failed to map image header memory\n");
		return;
	}

	memcpy(&bmp_header, image, sizeof(bmp_header));
	memunmap(image);
	bgrt_image_size = bmp_header.size;

	bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
	if (!bgrt_image) {
		pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
		       bgrt_image_size);
		return;
	}

	image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
	if (!image) {
		pr_err("Ignoring BGRT: failed to map image memory\n");
		kfree(bgrt_image);
		bgrt_image = NULL;
		return;
	}

	memcpy(bgrt_image, image, bgrt_image_size);
	memunmap(image);
}
示例#18
0
/**
 * intel_gvt_clean_opregion - clean host opergion related stuffs
 * @gvt: a GVT device
 *
 */
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
	memunmap(gvt->opregion.opregion_va);
	gvt->opregion.opregion_va = NULL;
}
示例#19
0
/*
 * visorchannel_create_guts() - creates the struct visorchannel abstraction
 *                              for a data area in memory, but does NOT modify
 *                              this data area
 * @physaddr:      physical address of start of channel
 * @channel_bytes: size of the channel in bytes; this may 0 if the channel has
 *                 already been initialized in memory (which is true for all
 *                 channels provided to guest environments by the s-Par
 *                 back-end), in which case the actual channel size will be
 *                 read from the channel header in memory
 * @gfp:           gfp_t to use when allocating memory for the data struct
 * @guid:          uuid that identifies channel type; this may 0 if the channel
 *                 has already been initialized in memory (which is true for all
 *                 channels provided to guest environments by the s-Par
 *                 back-end), in which case the actual channel guid will be
 *                 read from the channel header in memory
 * @needs_lock:    must specify true if you have multiple threads of execution
 *                 that will be calling visorchannel methods of this
 *                 visorchannel at the same time
 *
 * Return: pointer to visorchannel that was created if successful,
 *         otherwise NULL
 */
static struct visorchannel *
visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
			 gfp_t gfp, uuid_le guid, bool needs_lock)
{
	struct visorchannel *channel;
	int err;
	size_t size = sizeof(struct channel_header);

	if (physaddr == 0)
		return NULL;

	channel = kzalloc(sizeof(*channel), gfp);
	if (!channel)
		return NULL;

	channel->needs_lock = needs_lock;
	spin_lock_init(&channel->insert_lock);
	spin_lock_init(&channel->remove_lock);

	/*
	 * Video driver constains the efi framebuffer so it will get a
	 * conflict resource when requesting its full mem region. Since
	 * we are only using the efi framebuffer for video we can ignore
	 * this. Remember that we haven't requested it so we don't try to
	 * release later on.
	 */
	channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;

	channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(physaddr, size);
		goto err_destroy_channel;
	}

	channel->physaddr = physaddr;
	channel->nbytes = size;

	err = visorchannel_read(channel, 0, &channel->chan_hdr,
				sizeof(struct channel_header));
	if (err)
		goto err_destroy_channel;

	/* we had better be a CLIENT of this channel */
	if (channel_bytes == 0)
		channel_bytes = (ulong)channel->chan_hdr.size;
	if (uuid_le_cmp(guid, NULL_UUID_LE) == 0)
		guid = channel->chan_hdr.chtype;

	memunmap(channel->mapped);
	if (channel->requested)
		release_mem_region(channel->physaddr, channel->nbytes);
	channel->mapped = NULL;
	channel->requested = request_mem_region(channel->physaddr,
						channel_bytes, MYDRVNAME);
	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
		/* we only care about errors if this is not the video channel */
		goto err_destroy_channel;

	channel->mapped = memremap(channel->physaddr, channel_bytes,
			MEMREMAP_WB);
	if (!channel->mapped) {
		release_mem_region(channel->physaddr, channel_bytes);
		goto err_destroy_channel;
	}

	channel->nbytes = channel_bytes;
	channel->guid = guid;
	return channel;

err_destroy_channel:
	visorchannel_destroy(channel);
	return NULL;
}
示例#20
0
void skl_nhlt_free(void *addr)
{
	memunmap(addr);
}
示例#21
0
HI_S32 vo_video_csc_config(HI_U32 u32LayerId, HI_U32 enCscMatrix, HI_U32 u32Luma, HI_U32 u32Contrast, HI_U32 u32Hue, HI_U32 u32Satuature, HI_U32 u32Gain)
{
    HI_S32      s32Ret;
    CscCoef_S   stCscCoef;
    HI_U8       *pAddr = NULL;
    HI_U32      *pRegAddr = NULL;
    HI_U32      u32CscIdc, u32CscOdc, u32CscP0, u32CscP1, u32CscP2, u32CscP3, u32CscP4;
    HI_U32      u32DacCtrl0_2; 
    HI_S32      s32Matrix;
    
    usage();

    if (u32LayerId < 0 || u32LayerId > 3)
    {
        printf ("layer err \n");
        usage();
        return -1;
    }
    
    if (u32Contrast < 0 || u32Contrast > 100)
    {
        printf ("u32Contrast err \n");
        usage();
        return -1;
    }
    if (u32Hue < 0 || u32Hue > 100)
    {
        printf ("u32Hue err \n");
        usage();
        return -1;
    }
    if (u32Luma < 0 || u32Luma > 100)
    {
        printf ("u32Luma err \n");
        usage();
        return -1;
    }
    if (u32Satuature < 0 || u32Satuature > 100)
    {
        printf ("u32Satuature err \n");
        usage();
        return -1;
    }
    if (u32Gain < 0x0 || u32Gain > 0x3F)
    {
        printf ("u32Gain err \n");
        usage();
        return -1;
    }
    if (enCscMatrix < 0 || enCscMatrix > 2)
    {
        printf ("enCscMatrix err \n");
        usage();
        return -1;
    }
    switch(enCscMatrix)
    {
       case 0:s32Matrix = HAL_CSC_MODE_RGB_TO_RGB;break;
       case 1:s32Matrix = HAL_CSC_MODE_BT601_TO_BT709;break;
       case 2:s32Matrix = HAL_CSC_MODE_BT709_TO_BT601;break;
       default: return -1;
    }
    vou_cal_csc_matrix(u32Luma, u32Contrast, u32Hue, u32Satuature,s32Matrix, &stCscCoef);

    u32CscIdc = ((get_xdc_buma(stCscCoef.csc_in_dc2) & 0x1ff)
            | ((get_xdc_buma(stCscCoef.csc_in_dc1) & 0x1ff) << 9)
            | ((get_xdc_buma(stCscCoef.csc_in_dc0) & 0x1ff) << 18));
    u32CscOdc = ((get_xdc_buma(stCscCoef.csc_out_dc2) & 0x1ff)
            | ((get_xdc_buma(stCscCoef.csc_out_dc1) & 0x1ff) << 9)
            | ((get_xdc_buma(stCscCoef.csc_out_dc0) & 0x1ff) << 18));
    u32CscP0 = ((conver_csc_coef(stCscCoef.csc_coef00) & 0x1fff)
            | ((conver_csc_coef(stCscCoef.csc_coef01) & 0x1fff) << 16));
    u32CscP1 = ((conver_csc_coef(stCscCoef.csc_coef02) & 0x1fff)
            | ((conver_csc_coef(stCscCoef.csc_coef10) & 0x1fff) << 16));
    u32CscP2 = ((conver_csc_coef(stCscCoef.csc_coef11) & 0x1fff)
            | ((conver_csc_coef(stCscCoef.csc_coef12) & 0x1fff) << 16));
    u32CscP3 = ((conver_csc_coef(stCscCoef.csc_coef20) & 0x1fff)
            | ((conver_csc_coef(stCscCoef.csc_coef21) & 0x1fff) << 16));
    u32CscP4 = (conver_csc_coef(stCscCoef.csc_coef22) & 0x1fff);
    
    memopen();
    pAddr = (HI_U8 *)memmap(VO_BASE_REG, VO_VIDEO_CSC_SIZE);
    
    if(NULL == pAddr)
    {
        printf("err: memmap failure\n");
        return HI_FAILURE;
    }
    //printf("sys_map 0x%x\n", pAddr);

    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCIDC));
    u32CscIdc |= (*pRegAddr & 0x08000000);   //csc_en
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscIdc);
    *pRegAddr = u32CscIdc;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCODC));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscOdc);
    *pRegAddr = u32CscOdc;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCP0));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscP0);
    *pRegAddr = u32CscP0;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCP1));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscP1);
    *pRegAddr = u32CscP1;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCP2));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscP2);
    *pRegAddr = u32CscP2;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCP3));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscP3);
    *pRegAddr = u32CscP3;
    pRegAddr = (HI_U32 *)(vo_get_layer_addr(u32LayerId, pAddr + VHDCSCP4));
    //printf("addr 0x%x, old value 0x%x, new value 0x%x\n", pRegAddr, *pRegAddr, u32VgaCscP4);
    *pRegAddr = u32CscP4;

    pRegAddr = (HI_U32 *)(pAddr + (DACCTRL0_2 - VO_BASE_REG));
    set_cvbs_gain(u32LayerId, pRegAddr, u32Gain);

    memunmap(VO_BASE_REG, VO_VIDEO_CSC_SIZE);
    memclose();

    return HI_SUCCESS;
}
示例#22
0
文件: memremap.c 项目: DenisLug/mptcp
static void devm_memremap_release(struct device *dev, void *res)
{
	memunmap(res);
}
示例#23
0
文件: memremap.c 项目: DenisLug/mptcp
void devm_memunmap(struct device *dev, void *addr)
{
	WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
			       addr));
	memunmap(addr);
}
示例#24
0
文件: skl-nhlt.c 项目: val2k/linux
void skl_nhlt_free(struct nhlt_acpi_table *nhlt)
{
	memunmap((void *) nhlt);
}