otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
{
	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
	int result = 0;
	unsigned int vdc_start;
	uint32_t pci_address = 0;
	uint8_t pci_dev_revision = 0;
	hdmi_context_t *ctx = NULL;

	if (pdev == NULL || context == NULL) {
		rc = OTM_HDMI_ERR_INTERNAL;
		goto exit;
	}
	ctx = (hdmi_context_t *)context;

	pr_debug("get resource start\n");
	result = pci_read_config_dword(pdev, 16, &vdc_start);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pci_address = vdc_start + PS_VDC_OFFSET;

	pr_debug("map IO region\n");
	/* Map IO region and save its length */
	ctx->io_length = PS_VDC_SIZE;
	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
	if (!ctx->io_address) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	pr_debug("get PCI dev revision\n");
	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	ctx->dev.id = pci_dev_revision;
	/* Store this context for use by MSIC PCI driver */
	g_context = ctx;

	/* Handle CTP specific GPIO configuration */
	ctx->gpio_hpd_pin = get_gpio_by_name(PS_MSIC_HPD_GPIO_PIN_NAME);
	if (-1 == ctx->gpio_hpd_pin) {
		ctx->gpio_hpd_pin = PS_MSIC_HPD_GPIO_PIN;
		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
				PS_MSIC_HPD_GPIO_PIN);
	}

exit:
	return rc;
}
Beispiel #2
0
/*
 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
 * and introduces section mismatch. So use __ref to make it calm.
 */
static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
{
	pr_emerg("Entering sfi_map_memory, phys = %llx, size = %d\n", phys, size);

	if (!phys || !size)
		return NULL;

	if (sfi_use_ioremap)
		return ioremap_cache(phys, size);
	else
		return early_ioremap(phys, size);
}
/* configure_shm - Negotiate Shared Memory configuration with teetz. */
static int configure_shm(struct tee_tz *ptee)
{
	struct smc_param param = { 0 };
	size_t shm_size = -1;
	int ret = 0;

	dev_dbg(DEV, ">\n");
	BUG_ON(!CAPABLE(ptee->tee));

	mutex_lock(&ptee->mutex);
	param.a0 = TEESMC32_ST_FASTCALL_GET_SHM_CONFIG;
	tee_smc_call(&param);
	mutex_unlock(&ptee->mutex);

	if (param.a0 != TEESMC_RETURN_OK) {
		dev_err(DEV, "shm service not available: %X", (uint)param.a0);
		ret = -EINVAL;
		goto out;
	}

	ptee->shm_paddr = param.a1;
	shm_size = param.a2;
	ptee->shm_cached = (bool)param.a3;

	if (ptee->shm_cached)
		ptee->shm_vaddr = ioremap_cache(ptee->shm_paddr, shm_size);
	else
		ptee->shm_vaddr = ioremap_nocache(ptee->shm_paddr, shm_size);

	if (ptee->shm_vaddr == NULL) {
		dev_err(DEV, "shm ioremap failed\n");
		ret = -ENOMEM;
		goto out;
	}

	ptee->shm_pool = tee_shm_pool_create(DEV, shm_size,
					     ptee->shm_vaddr, ptee->shm_paddr);

	if (!ptee->shm_pool) {
		dev_err(DEV, "shm pool creation failed (%zu)", shm_size);
		ret = -EINVAL;
		goto out;
	}

	if (ptee->shm_cached)
		tee_shm_pool_set_cached(ptee->shm_pool);
out:
	dev_dbg(DEV, "< ret=%d pa=0x%lX va=0x%p size=%zu, %scached",
		ret, ptee->shm_paddr, ptee->shm_vaddr, shm_size,
		(ptee->shm_cached == 1) ? "" : "un");
	return ret;
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
	unsigned long align, int cached, void *caller)
{
	unsigned long paddr;
	void __iomem *vaddr;

	unsigned long aligned_size;
	int log_align = ilog2(align);

	struct alloc *node;

	aligned_size = PFN_ALIGN(size);
	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
	if (!paddr)
		return NULL;

	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
	if (!node)
		goto out;

	if (cached)
		vaddr = ioremap_cache(paddr, aligned_size);
	else
		vaddr = ioremap(paddr, aligned_size);

	if (!vaddr)
		goto out_kfree;

	/*
	 * Just cast to an unsigned long to avoid warnings about casting from a
	 * pointer to an integer of different size. The pointer is only 32-bits
	 * so we lose no data.
	 */
	node->vaddr = (unsigned long)vaddr;
	node->paddr = paddr;
	node->len = aligned_size;
	node->mpool = mpool;
	node->caller = caller;
	if (add_alloc(node))
		goto out_kfree;

	mpool->free -= aligned_size;

	return vaddr;
out_kfree:
	if (vaddr)
		iounmap(vaddr);
	kfree(node);
out:
	gen_pool_free(mpool->gpool, paddr, aligned_size);
	return NULL;
}
Beispiel #5
0
/**
 * memremap() - remap an iomem_resource as cacheable memory
 * @offset: iomem resource start address
 * @size: size of remap
 * @flags: either MEMREMAP_WB or MEMREMAP_WT
 *
 * memremap() is "ioremap" for cases where it is known that the resource
 * being mapped does not have i/o side effects and the __iomem
 * annotation is not applicable.
 *
 * MEMREMAP_WB - matches the default mapping for System RAM on
 * the architecture.  This is usually a read-allocate write-back cache.
 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
 * memremap() will bypass establishing a new mapping and instead return
 * a pointer into the direct map.
 *
 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 * cache or are written through to memory and never exist in a
 * cache-dirty state with respect to program visibility.  Attempts to
 * map System RAM with this mapping type will fail.
 */
void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{
	int is_ram = region_intersects(offset, size,
				       IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
	void *addr = NULL;

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	/* Try all mapping types requested until one returns non-NULL */
	if (flags & MEMREMAP_WB) {
		flags &= ~MEMREMAP_WB;
		/*
		 * MEMREMAP_WB is special in that it can be satisifed
		 * from the direct map.  Some archs depend on the
		 * capability of memremap() to autodetect cases where
		 * the requested range is potentially in System RAM.
		 */
		if (is_ram == REGION_INTERSECTS)
			addr = try_ram_remap(offset, size);
		if (!addr)
			addr = ioremap_cache(offset, size);
	}

	/*
	 * If we don't have a mapping yet and more request flags are
	 * pending then we will be attempting to establish a new virtual
	 * address mapping.  Enforce that this mapping is not aliasing
	 * System RAM.
	 */
	if (!addr && is_ram == REGION_INTERSECTS && flags) {
		WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
				&offset, (unsigned long) size);
		return NULL;
	}

	if (!addr && (flags & MEMREMAP_WT)) {
		flags &= ~MEMREMAP_WT;
		addr = ioremap_wt(offset, size);
	}

	return addr;
}
Beispiel #6
0
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
                               struct bin_attribute *bin_attr, char *buf,
                               loff_t pos, size_t count)
{
    char *memconsole;
    ssize_t ret;

    memconsole = ioremap_cache(memconsole_baseaddr, memconsole_length);
    if (!memconsole) {
        pr_err("memconsole: ioremap_cache failed\n");
        return -ENOMEM;
    }
    ret = memory_read_from_buffer(buf, count, &pos, memconsole,
                                  memconsole_length);
    iounmap(memconsole);
    return ret;
}
otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
{
	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
	int result = 0;
	unsigned int vdc_start;
	uint32_t pci_address = 0;
	uint8_t pci_dev_revision = 0;
	hdmi_context_t *ctx = NULL;

	if (pdev == NULL || context == NULL) {
		rc = OTM_HDMI_ERR_INTERNAL;
		goto exit;
	}
	ctx = (hdmi_context_t *)context;

	pr_debug("get resource start\n");
	result = pci_read_config_dword(pdev, 16, &vdc_start);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pci_address = vdc_start + PS_VDC_OFFSET;

	pr_debug("map IO region\n");
	/* Map IO region and save its length */
	ctx->io_length = PS_VDC_SIZE;
	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
	if (!ctx->io_address) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	pr_debug("get PCI dev revision\n");
	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	ctx->dev.id = pci_dev_revision;
	/* Store this context for use by MSIC PCI driver */
	g_context = ctx;
exit:
	return rc;
}
Beispiel #8
0
static BOOL
mapit(MEMREGION *memregion)
{
	ulong physaddr = (ulong) (memregion->physaddr);
	ulong nbytes = memregion->nbytes;

	memregion->requested = FALSE;
	if (!request_mem_region(physaddr, nbytes, MYDRVNAME))
		ERRDRV("cannot reserve channel memory @0x%lx for 0x%lx-- no big deal", physaddr, nbytes);
	else
		memregion->requested = TRUE;
	memregion->mapped = ioremap_cache(physaddr, nbytes);
	if (memregion->mapped == NULL) {
		ERRDRV("cannot ioremap_cache channel memory @0x%lx for 0x%lx",
		       physaddr, nbytes);
		return FALSE;
	}
	return TRUE;
}
Beispiel #9
0
static int __init obtain_memory(void)
{
  region = request_mem_region(start, size, DRIVER_NAME);
  if(!region)
  {
    printk(KERN_WARNING PRINTK_PREFIX "ERROR: request_mem_region failed\n");
    return -ENOMEM;
  }

  mappedMemory = (u64)ioremap_cache(start, size);
  if(!mappedMemory)
  {
    printk(KERN_WARNING PRINTK_PREFIX "ERROR: ioremap failed\n");
    return -ENOMEM;
  }

  printk(KERN_NOTICE PRINTK_PREFIX "Mapped physical memory to address 0x%010llX\n", mappedMemory);

  return 0;
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
	__le64 __iomem *release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	/*
	 * The cpu-release-addr may or may not be inside the linear mapping.
	 * As ioremap_cache will either give us a new mapping or reuse the
	 * existing linear mapping, we can use it to cover both cases. In
	 * either case the memory will be MT_NORMAL.
	 */
	release_addr = ioremap_cache(cpu_release_addr[cpu],
				     sizeof(*release_addr));
	if (!release_addr)
		return -ENOMEM;

	/*
	 * We write the release address as LE regardless of the native
	 * endianess of the kernel. Therefore, any boot-loaders that
	 * read this address need to convert this address to the
	 * boot-loader's endianess before jumping. This is mandated by
	 * the boot protocol.
	 */
	writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
	__flush_dcache_area((__force void *)release_addr,
			    sizeof(*release_addr));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	iounmap(release_addr);

	return 0;
}
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
		size_t csize, unsigned long offset, int userbuf)
{
	void  *vaddr;

	if (!csize)
		return 0;

	vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!vaddr)
		return -ENOMEM;

	if (userbuf) {
		if (copy_to_user(buf, vaddr + offset, csize)) {
			iounmap(vaddr);
			return -EFAULT;
		}
	} else
		memcpy(buf, vaddr + offset, csize);

	set_iounmap_nonlazy();
	iounmap(vaddr);
	return csize;
}
Beispiel #12
0
static OSDEV_Status_t AllocatorIoctlAllocateData( AllocatorContext_t    *AllocatorContext,
						  unsigned int           ParameterAddress )
{
allocator_ioctl_allocate_t      params;
#if defined(MULTICOM406)
ICS_ERROR IcsErr;
#endif

//

    OSDEV_CopyToDeviceSpace( &params, ParameterAddress, sizeof(allocator_ioctl_allocate_t) );

//

    AllocatorContext->Size              = params.RequiredSize;
    memcpy( AllocatorContext->PartitionName, params.PartitionName, ALLOCATOR_MAX_PARTITION_NAME_SIZE );

//

    AllocatorContext->Memory        = OSDEV_MallocPartitioned( 	AllocatorContext->PartitionName, 
								AllocatorContext->Size );

    if( AllocatorContext->Memory == NULL )
    {
	OSDEV_Print( "AllocatorIoctlAllocateData : Unable to allocate memory\n" );
	return OSDEV_Error;
    }

    //
    // The memory supplied by BPA2 is physical, get the necessary mappings
    //

    AllocatorContext->CachedAddress	= NULL;
    AllocatorContext->UnCachedAddress	= NULL;
    AllocatorContext->PhysicalAddress	= NULL;

        
    AllocatorContext->CachedAddress	= ioremap_cache((unsigned int)AllocatorContext->Memory,AllocatorContext->Size);
    AllocatorContext->PhysicalAddress	= AllocatorContext->Memory ;
    AllocatorContext->UnCachedAddress	= (unsigned char *)OSDEV_IOReMap( (unsigned int)AllocatorContext->PhysicalAddress, AllocatorContext->Size );

#if defined(MULTICOM406)
    IcsErr = ICS_region_add(AllocatorContext->CachedAddress, AllocatorContext->PhysicalAddress, AllocatorContext->Size,
                         ICS_CACHED, ics_cpu_mask(), &AllocatorContext->CachedRegion);
    if( IcsErr != ICS_SUCCESS )
    {
        OSDEV_Print( "AllocatorIoctlAllocateData : - Unable to allocate Cached ICS region.\n" );
        return OSDEV_Error;
    }

    IcsErr = ICS_region_add(AllocatorContext->UnCachedAddress, AllocatorContext->PhysicalAddress, AllocatorContext->Size,
                            ICS_UNCACHED, ics_cpu_mask(), &AllocatorContext->UnCachedRegion);
    if( IcsErr != ICS_SUCCESS )
    {
        OSDEV_Print( "AllocatorIoctlAllocateData : - Unable to allocate Uncached ICS region.\n" );
        return OSDEV_Error;
    }
#endif

/*    
    OSDEV_Print("Alloc - Phys %p - C %p - UC %p  -- Size 0x%x\n",AllocatorContext->PhysicalAddress,
                AllocatorContext->CachedAddress, AllocatorContext->UnCachedAddress,AllocatorContext->Size);
*/    
    //
    // Copy the data into the parameters and pass back 
    //

    params.CachedAddress	= AllocatorContext->CachedAddress;
    params.UnCachedAddress	= AllocatorContext->UnCachedAddress;
    params.PhysicalAddress	= AllocatorContext->PhysicalAddress;

    OSDEV_CopyToUserSpace( ParameterAddress, &params, sizeof(allocator_ioctl_allocate_t) );

//

    return OSDEV_NoError;
}
Beispiel #13
0
static int __init erst_init(void)
{
    int rc = 0;
    acpi_status status;
    struct apei_exec_context ctx;
    struct apei_resources erst_resources;
    struct resource *r;

    if (acpi_disabled)
        goto err;

    if (erst_disable) {
        pr_info(ERST_PFX
                "Error Record Serialization Table (ERST) support is disabled.\n");
        goto err;
    }

    status = acpi_get_table(ACPI_SIG_ERST, 0,
                            (struct acpi_table_header **)&erst_tab);
    if (status == AE_NOT_FOUND) {
        pr_info(ERST_PFX "Table is not found!\n");
        goto err;
    } else if (ACPI_FAILURE(status)) {
        const char *msg = acpi_format_exception(status);
        pr_err(ERST_PFX "Failed to get table, %s\n", msg);
        rc = -EINVAL;
        goto err;
    }

    rc = erst_check_table(erst_tab);
    if (rc) {
        pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
        goto err;
    }

    apei_resources_init(&erst_resources);
    erst_exec_ctx_init(&ctx);
    rc = apei_exec_collect_resources(&ctx, &erst_resources);
    if (rc)
        goto err_fini;
    rc = apei_resources_request(&erst_resources, "APEI ERST");
    if (rc)
        goto err_fini;
    rc = apei_exec_pre_map_gars(&ctx);
    if (rc)
        goto err_release;
    rc = erst_get_erange(&erst_erange);
    if (rc) {
        if (rc == -ENODEV)
            pr_info(ERST_PFX
                    "The corresponding hardware device or firmware implementation "
                    "is not available.\n");
        else
            pr_err(ERST_PFX
                   "Failed to get Error Log Address Range.\n");
        goto err_unmap_reg;
    }

    r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
    if (!r) {
        pr_err(ERST_PFX
               "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
               (unsigned long long)erst_erange.base,
               (unsigned long long)erst_erange.base + erst_erange.size);
        rc = -EIO;
        goto err_unmap_reg;
    }
    rc = -ENOMEM;
    erst_erange.vaddr = ioremap_cache(erst_erange.base,
                                      erst_erange.size);
    if (!erst_erange.vaddr)
        goto err_release_erange;

    pr_info(ERST_PFX
            "Error Record Serialization Table (ERST) support is initialized.\n");

    return 0;

err_release_erange:
    release_mem_region(erst_erange.base, erst_erange.size);
err_unmap_reg:
    apei_exec_post_unmap_gars(&ctx);
err_release:
    apei_resources_release(&erst_resources);
err_fini:
    apei_resources_fini(&erst_resources);
err:
    erst_disable = 1;
    return rc;
}
static phys_addr_t check_cbmem(void)
{
	struct sysinfo sysi;
	phys_addr_t top_of_ram, scan_addr;

	/* Get CBMEM TOC address from ACPI if available. */
	scan_addr = get_address_from_acpi(CBMEM_TOC_ACPI_NAME);

	/*
	 * Otherwise determine where to start looking for CBMEM signature:
	 * take the top of usable memory and align it up to 128K boundary.
	 */
	if (!scan_addr) {
		si_meminfo(&sysi);
		top_of_ram = (phys_addr_t) sysi.totalram << PAGE_SHIFT;
		scan_addr = ALIGN(top_of_ram, CBMEM_ALIGNMENT) +
			CBMEM_ALIGNMENT;
	}

	while (scan_addr % MEMORY_BOUNDARY) {
		struct cbmem_entry __iomem *pcbm;
		int i, remap_size = sizeof(struct cbmem_entry) * 16;

		/*
		 * See if we reached reserved memory. Bail out if so, as it is
		 * not mappable and is above the region where the CBMEM could
		 * be.
		 */
		if (e820_any_mapped(scan_addr,
				    scan_addr + remap_size,
				    E820_RESERVED))
			break;

		pcbm = ioremap_cache(scan_addr, remap_size);
		if (!pcbm) {
			scan_addr += CBMEM_ALIGNMENT;
			continue;
		}

		if (pcbm->magic != CBMEM_ENTRY_MAGIC) {
			iounmap(pcbm);
			scan_addr += CBMEM_ALIGNMENT;
			continue;
		}

		/* CBMEM found. Is the console log there? */
		for (i = 1; i < MAX_CBMEM_ENTRIES; i++) {
			if ((pcbm[i].magic == CBMEM_ENTRY_MAGIC) &&
			    (pcbm[i].id == CBMEM_CONSOLE_ID)) {
				/* Yes, return its address. */
				phys_addr_t ret = pcbm[i].base;
				iounmap(pcbm);
				return ret;
			}
		}
		iounmap(pcbm);
		break;
	}

	pr_warn("memconsole: CBMEM console structure not found!\n");
	return 0;
}
otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
{
	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
	int result = 0;
	unsigned int vdc_start;
	uint32_t pci_address = 0;
	uint8_t pci_dev_revision = 0;
	hdmi_context_t *ctx = NULL;

	if (pdev == NULL || context == NULL) {
		rc = OTM_HDMI_ERR_INTERNAL;
		goto exit;
	}
	ctx = (hdmi_context_t *)context;

	pr_debug("get resource start\n");
	result = pci_read_config_dword(pdev, 16, &vdc_start);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pci_address = vdc_start + PS_VDC_OFFSET;

	pr_debug("map IO region\n");
	/* Map IO region and save its length */
	ctx->io_length = PS_VDC_SIZE;
	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
	if (!ctx->io_address) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	pr_debug("get PCI dev revision\n");
	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	ctx->dev.id = pci_dev_revision;
	/* Store this context for use by MSIC PCI driver */
	g_context = ctx;

	ctx->is_connected_overridden = true;

	/* Handle CTP specific GPIO configuration */
	ctx->gpio_hpd_pin = get_gpio_by_name(PS_MSIC_HPD_GPIO_PIN_NAME);
	if (-1 == ctx->gpio_hpd_pin) {
		ctx->gpio_hpd_pin = PS_MSIC_HPD_GPIO_PIN;
		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
				PS_MSIC_HPD_GPIO_PIN);
	}

	ctx->gpio_ls_en_pin = get_gpio_by_name(PS_MSIC_LS_EN_GPIO_PIN_NAME);
	if (-1 == ctx->gpio_ls_en_pin) {
		ctx->gpio_ls_en_pin = PS_MSIC_LS_OE_GPIO_PIN;
		pr_debug("get_gpio_by_name failed! Use default pin %d\n",
				PS_MSIC_LS_OE_GPIO_PIN);
	}

	if (gpio_request(ctx->gpio_ls_en_pin, "CTP_HDMI_LS_OE")) {
		pr_err("%s: Unable to request gpio %d\n", __func__,
				ctx->gpio_ls_en_pin);
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	if (!gpio_is_valid(ctx->gpio_ls_en_pin)) {
		pr_err("%s: Unable to validate gpio %d\n", __func__,
				ctx->gpio_ls_en_pin);
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	/* Set the GPIO based on cable status */
	__ps_gpio_configure_edid_read();

exit:
	return rc;
}
Beispiel #16
0
/* pasemi_dma_init - Initialize the PA Semi DMA library
 *
 * This function initializes the DMA library. It must be called before
 * any other function in the library.
 *
 * Returns 0 on success, errno on failure.
 */
int pasemi_dma_init(void)
{
	static DEFINE_SPINLOCK(init_lock);
	struct pci_dev *iob_pdev;
	struct pci_dev *pdev;
	struct resource res;
	struct device_node *dn;
	int i, intf, err = 0;
	unsigned long timeout;
	u32 tmp;

	if (!machine_is(pasemi))
		return -ENODEV;

	spin_lock(&init_lock);

	/* Make sure we haven't already initialized */
	if (dma_pdev)
		goto out;

	iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
	if (!iob_pdev) {
		BUG();
		pr_warn("Can't find I/O Bridge\n");
		err = -ENODEV;
		goto out;
	}
	iob_regs = map_onedev(iob_pdev, 0);

	dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
	if (!dma_pdev) {
		BUG();
		pr_warn("Can't find DMA controller\n");
		err = -ENODEV;
		goto out;
	}
	dma_regs = map_onedev(dma_pdev, 0);
	base_hw_irq = virq_to_hw(dma_pdev->irq);

	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
	num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;

	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
	num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;

	intf = 0;
	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
	     pdev;
	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
		mac_regs[intf++] = map_onedev(pdev, 0);

	pci_dev_put(pdev);

	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
	     pdev;
	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
		mac_regs[intf++] = map_onedev(pdev, 0);

	pci_dev_put(pdev);

	dn = pci_device_to_OF_node(iob_pdev);
	if (dn)
		err = of_address_to_resource(dn, 1, &res);
	if (!dn || err) {
		/* Fallback for old firmware */
		res.start = 0xfd800000;
		res.end = res.start + 0x1000;
	}
	dma_status = ioremap_cache(res.start, resource_size(&res));
	pci_dev_put(iob_pdev);

	for (i = 0; i < MAX_TXCH; i++)
		__set_bit(i, txch_free);

	for (i = 0; i < MAX_RXCH; i++)
		__set_bit(i, rxch_free);

	timeout = jiffies + HZ;
	pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
	while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
		if (time_after(jiffies, timeout)) {
			pr_warn("Warning: Could not disable RX section\n");
			break;
		}
	}

	timeout = jiffies + HZ;
	pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
	while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
		if (time_after(jiffies, timeout)) {
			pr_warn("Warning: Could not disable TX section\n");
			break;
		}
	}

	/* setup resource allocations for the different DMA sections */
	tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
	pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);

	/* enable tx section */
	pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);

	/* enable rx section */
	pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);

	for (i = 0; i < MAX_FLAGS; i++)
		__set_bit(i, flags_free);

	for (i = 0; i < MAX_FUN; i++)
		__set_bit(i, fun_free);

	/* clear all status flags */
	pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
	pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);

	pr_info("PA Semi PWRficient DMA library initialized "
		"(%d tx, %d rx channels)\n", num_txch, num_rxch);

out:
	spin_unlock(&init_lock);
	return err;
}
Beispiel #17
0
static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
{
	return (__force void *)ioremap_cache(offset, size);
}
Beispiel #18
0
otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev)
{
	otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS;
	int result = 0;
	struct pci_dev *msic_pdev = NULL;
	unsigned int vdc_start;
	uint32_t pci_address = 0;
	uint8_t pci_dev_revision = 0;
	hdmi_context_t *ctx = NULL;

	if (pdev == NULL || context == NULL) {
		rc = OTM_HDMI_ERR_INTERNAL;
		goto exit;
	}
	ctx = (hdmi_context_t *)context;

	pr_debug("\nget resource start\n");
	result = pci_read_config_dword(pdev, 16, &vdc_start);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pci_address = vdc_start + PS_VDC_OFFSET;

	pr_debug("\nmap IO region\n");
	/* Map IO region and save its length */
	ctx->io_length = PS_VDC_SIZE;
	ctx->io_address = ioremap_cache(pci_address, ctx->io_length);
	if (!ctx->io_address) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	/* Map IO region for IRQ registers */
	ctx->dev.irq_io_address = ioremap_nocache(PS_MSIC_VRINT_ADDR,
						PS_MSIC_VRINT_IOADDR_LEN);
	if (!ctx->dev.irq_io_address) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}

	pr_debug("\nget PCI dev revision\n");
	result = pci_read_config_byte(pdev, 8, &pci_dev_revision);
	if (result != 0) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	ctx->dev.id = pci_dev_revision;

	pr_debug("pci_get_device for 0x%x\n", PS_MSIC_PCI_DEVICE_ID);
	msic_pdev = pci_get_device(PCI_VENDOR_INTEL,
					PS_MSIC_PCI_DEVICE_ID, msic_pdev);
	if (msic_pdev == NULL) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pr_debug("pci_enable_device for 0x%x\n",
					PS_MSIC_PCI_DEVICE_ID);
	result = pci_enable_device(msic_pdev);
	if (result) {
		rc = OTM_HDMI_ERR_FAILED;
		goto exit;
	}
	pr_debug("IRQ number assigned = %d\n", msic_pdev->irq);
	ctx->irq_number = msic_pdev->irq;

exit:
	return rc;
}
/* register_outercache_mutex - Negotiate/Disable outer cache shared mutex */
static int register_outercache_mutex(struct tee_tz *ptee, bool reg)
{
	unsigned long *vaddr = NULL;
	int ret = 0;
	struct smc_param param;
	uintptr_t paddr = 0;

	dev_dbg(ptee->tee->dev, ">\n");
	BUG_ON(!CAPABLE(ptee->tee));

	if ((reg == true) && (ptee->tz_outer_cache_mutex != NULL)) {
		dev_err(DEV, "outer cache shared mutex already registered\n");
		return -EINVAL;
	}
	if ((reg == false) && (ptee->tz_outer_cache_mutex == NULL))
		return 0;

	mutex_lock(&ptee->mutex);

	if (reg == false) {
		vaddr = ptee->tz_outer_cache_mutex;
		ptee->tz_outer_cache_mutex = NULL;
		goto out;
	}

	memset(&param, 0, sizeof(param));
	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
	param.a1 = TEESMC_ST_L2CC_MUTEX_GET_ADDR;
	tee_smc_call(&param);

	if (param.a0 != TEESMC_RETURN_OK) {
		dev_warn(DEV, "no TZ l2cc mutex service supported\n");
		goto out;
	}
	paddr = param.a2;
	dev_dbg(DEV, "outer cache shared mutex paddr 0x%lx\n", paddr);

	vaddr = ioremap_cache(paddr, sizeof(u32));
	if (vaddr == NULL) {
		dev_warn(DEV, "TZ l2cc mutex disabled: ioremap failed\n");
		ret = -ENOMEM;
		goto out;
	}

	dev_dbg(DEV, "outer cache shared mutex vaddr %p\n", vaddr);
	if (outer_tz_mutex(vaddr) == false) {
		dev_warn(DEV, "TZ l2cc mutex disabled: outer cache refused\n");
		goto out;
	}

	memset(&param, 0, sizeof(param));
	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
	param.a1 = TEESMC_ST_L2CC_MUTEX_ENABLE;
	tee_smc_call(&param);

	if (param.a0 != TEESMC_RETURN_OK) {

		dev_warn(DEV, "TZ l2cc mutex disabled: TZ enable failed\n");
		goto out;
	}
	ptee->tz_outer_cache_mutex = vaddr;

out:
	if (ptee->tz_outer_cache_mutex == NULL) {
		memset(&param, 0, sizeof(param));
		param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
		param.a1 = TEESMC_ST_L2CC_MUTEX_DISABLE;
		tee_smc_call(&param);
		outer_tz_mutex(NULL);
		if (vaddr)
			iounmap(vaddr);
		dev_dbg(DEV, "outer cache shared mutex disabled\n");
	}

	mutex_unlock(&ptee->mutex);
	dev_dbg(DEV, "< teetz outer mutex: ret=%d pa=0x%lX va=0x%p %sabled\n",
		ret, paddr, vaddr, ptee->tz_outer_cache_mutex ? "en" : "dis");
	return ret;
}
/*
 * Probe for the NAND device.
 */
static int __init stm_nand_emi_probe(struct platform_device *pdev)
{
	struct platform_nand_data *pdata = pdev->dev.platform_data;
	struct plat_stmnand_data *stmdata = pdata->ctrl.priv;

	struct stm_nand_emi *data;
	struct nand_timing_data *tm;

	int res = 0;

	/* Allocate memory for the driver structure (and zero it) */
	data = kzalloc(sizeof(struct stm_nand_emi), GFP_KERNEL);
	if (!data) {
		printk(KERN_ERR NAME
		       ": Failed to allocate device structure.\n");
		return -ENOMEM;
	}

	/* Get EMI Bank base address */
	data->emi_bank = pdev->id;
	data->emi_base = emi_bank_base(data->emi_bank) +
		stmdata->emi_withinbankoffset;
	data->emi_size = (1 << 18) + 1;

	/* Configure EMI Bank */
	if (nand_config_emi(data->emi_bank, stmdata->timing_data) != 0) {
		printk(KERN_ERR NAME ": Failed to configure EMI bank "
		       "for NAND device\n");
		goto out1;
	}

	/* Request IO Memory */
	if (!request_mem_region(data->emi_base, data->emi_size, pdev->name)) {
		printk(KERN_ERR NAME ": Request mem 0x%x region failed\n",
		       data->emi_base);
		res = -ENODEV;
		goto out1;
	}

	/* Map base address */
	data->io_base = ioremap_nocache(data->emi_base, 4096);
	if (!data->io_base) {
		printk(KERN_ERR NAME ": ioremap failed for io_base 0x%08x\n",
		       data->emi_base);
		res = -ENODEV;
		goto out2;
	}

#ifdef CONFIG_STM_NAND_EMI_CACHED
	/* Map data address through cache line */
	data->io_data = ioremap_cache(data->emi_base + 4096, 4096);
	if (!data->io_data) {
		printk(KERN_ERR NAME ": ioremap failed for io_data 0x%08x\n",
		       data->emi_base + 4096);
		res = -ENOMEM;
		goto out3;
	}
#else
	data->io_data = data->io_base;
#endif
	/* Map cmd and addr addresses (emi_addr_17 and emi_addr_18) */
	data->io_cmd = ioremap_nocache(data->emi_base | (1 << 17), 1);
	if (!data->io_cmd) {
		printk(KERN_ERR NAME ": ioremap failed for io_cmd 0x%08x\n",
		       data->emi_base | (1 << 17));
		res = -ENOMEM;
		goto out4;
	}

	data->io_addr = ioremap_nocache(data->emi_base | (1 << 18), 1);
	if (!data->io_addr) {
		printk(KERN_ERR NAME ": ioremap failed for io_addr 0x%08x\n",
		       data->emi_base | (1 << 18));
		res = -ENOMEM;
		goto out5;
	}

	data->chip.priv = data;
	data->mtd.priv = &data->chip;
	data->mtd.owner = THIS_MODULE;

	/* Assign more sensible name (default is string from nand_ids.c!) */
	data->mtd.name = pdev->dev.bus_id;

	tm = stmdata->timing_data;

	data->chip.IO_ADDR_R = data->io_base;
	data->chip.IO_ADDR_W = data->io_base;
	data->chip.chip_delay = tm->chip_delay;
	data->chip.cmd_ctrl = nand_cmd_ctrl_emi;

	/* Do we have access to NAND_RBn? */
	if (stmdata->rbn_port >= 0) {
		data->rbn = stpio_request_pin(stmdata->rbn_port,
					      stmdata->rbn_pin,
					      "nand_RBn", STPIO_IN);
		if (data->rbn) {
			data->chip.dev_ready = nand_device_ready;
		} else {
			printk(KERN_INFO NAME ": nand_rbn unavailable. "
			       "Falling back to chip_delay\n");
			/* Set a default delay if not previosuly specified */
			if (data->chip.chip_delay == 0)
				data->chip.chip_delay = 30;
		}
	}

	/* Set IO routines for acessing NAND pages */
#if defined(CONFIG_STM_NAND_EMI_FDMA)
	data->chip.read_buf = nand_read_buf_dma;
	data->chip.write_buf = nand_write_buf_dma;
	data->dma_chan = -1;
	data->init_fdma_jiffies = 0;
	init_fdma_nand_ratelimit(data);
	data->nand_phys_addr = data->emi_base;

#elif defined(CONFIG_STM_NAND_EMI_LONGSL)
	data->chip.read_buf = nand_readsl_buf;
	data->chip.write_buf = nand_writesl_buf;

#elif defined(CONFIG_STM_NAND_EMI_CACHED)
	data->chip.read_buf = nand_read_buf_cached_block;
	data->chip.write_buf = nand_write_buf_cached_block;

#elif defined(CONFIG_STM_NAND_EMI_BYTE)
	/* Default byte orientated routines */
#else
#error "Must specify CONFIG_STM_NAND_EMI_xxxx mode"
#endif

	data->chip.ecc.mode = NAND_ECC_SOFT;

	/* Copy chip options from platform data */
	data->chip.options = pdata->chip.options;

	platform_set_drvdata(pdev, data);

	/* Scan to find existance of the device */
	if (nand_scan(&data->mtd, 1)) {
		printk(KERN_ERR NAME ": nand_scan failed\n");
		res = -ENXIO;
		goto out6;
	}

#ifdef CONFIG_MTD_PARTITIONS
	res = parse_mtd_partitions(&data->mtd, part_probes, &data->parts, 0);
	if (res > 0) {
		add_mtd_partitions(&data->mtd, data->parts, res);
		return 0;
	}
	if (pdata->chip.partitions) {
		data->parts = pdata->chip.partitions;
		res = add_mtd_partitions(&data->mtd, data->parts,
					 pdata->chip.nr_partitions);
	} else
#endif
		res = add_mtd_device(&data->mtd);
	if (!res)
		return res;

	/* Release resources on error */
 out6:

	nand_release(&data->mtd);
	if (data->rbn)
		stpio_free_pin(data->rbn);
	platform_set_drvdata(pdev, NULL);
	iounmap(data->io_addr);
 out5:
	iounmap(data->io_cmd);
 out4:
#ifdef CONFIG_STM_NAND_EMI_CACHED
	iounmap(data->io_data);
 out3:
#endif
	iounmap(data->io_base);
 out2:
	release_mem_region(data->emi_base, data->emi_size);
 out1:
	kfree(data);
	return res;
}
/*
 * For Guests, device memory can be used as normal memory, so we cast away the
 * __iomem to quieten sparse.
 */
static inline void *lguest_map(unsigned long phys_addr, unsigned long pages)
{
	return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages);
}
Beispiel #22
0
static int 
#if(LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
__devinit
#endif
ivshmem_pci_probe(struct pci_dev *dev,
					const struct pci_device_id *id)
{
	struct uio_info *info;
	struct ivshmem_info * ivshmem_info;
#ifdef IRQ_SUPPORT
	int nvectors = 4;
#endif

	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	ivshmem_info = kzalloc(sizeof(struct ivshmem_info), GFP_KERNEL);
	if (!ivshmem_info) {
		kfree(info);
		return -ENOMEM;
	}

	if (pci_enable_device(dev))
		goto out_free;

	if (pci_request_regions(dev, "ivshmem"))
		goto out_disable;

	info->mem[0].addr = pci_resource_start(dev, 0);
	if (!info->mem[0].addr)
		goto out_release;

	info->mem[0].size = pci_resource_len(dev, 0);
	info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
	if (!info->mem[0].internal_addr) {
		goto out_release;
	}

	info->mem[0].memtype = UIO_MEM_PHYS;

	info->mem[1].addr = pci_resource_start(dev, 2);
	if (!info->mem[1].addr)
		goto out_unmap;

    info->mem[1].internal_addr = ioremap_cache(pci_resource_start(dev, 2),
				     pci_resource_len(dev, 2));
	if (!info->mem[1].internal_addr)
		goto out_unmap;

#if 0
    info->mem[1].internal_addr = pci_ioremap_bar(dev, 2);
	if (!info->mem[1].internal_addr)
		goto out_unmap;
#endif

	info->mem[1].size = pci_resource_len(dev, 2);
	info->mem[1].memtype = UIO_MEM_PHYS;

	ivshmem_info->uio = info;
	ivshmem_info->dev = dev;

#ifdef IRQ_SUPPORT
	if (request_msix_vectors(ivshmem_info, nvectors) != 0) {
		printk(KERN_INFO "regular IRQs\n");
		info->irq = dev->irq;
		info->irq_flags = IRQF_SHARED;
		info->handler = ivshmem_handler;
		writel(0xffffffff, info->mem[0].internal_addr + IntrMask);
	} else {
		printk(KERN_INFO "MSI-X enabled\n");
		info->irq = -1;
	}
#else
	info->irq = -1;
#endif

	info->name = "ivshmem";
	info->version = "0.0.1";

	if (uio_register_device(&dev->dev, info))
		goto out_unmap2;

	pci_set_drvdata(dev, info);


	return 0;
out_unmap2:
	iounmap(info->mem[2].internal_addr);
out_unmap:
	iounmap(info->mem[0].internal_addr);
out_release:
	pci_release_regions(dev);
out_disable:
	pci_disable_device(dev);
out_free:
	kfree (info);
	return -ENODEV;
}