void *ion_cp_heap_map_kernel(struct ion_heap *heap,
				   struct ion_buffer *buffer,
				   unsigned long flags)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);
	void *ret_value = NULL;

	mutex_lock(&cp_heap->lock);
	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
	    ((cp_heap->heap_protected == HEAP_PROTECTED) &&
	      !ION_IS_CACHED(flags))) {

		if (ion_cp_request_region(cp_heap)) {
			mutex_unlock(&cp_heap->lock);
			return NULL;
		}

		if (cp_heap->reusable) {
			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
					cp_heap->reserved_vrange, flags);

		} else {
			if (ION_IS_CACHED(flags))
				ret_value = ioremap_cached(buffer->priv_phys,
							   buffer->size);
			else
				ret_value = ioremap(buffer->priv_phys,
						    buffer->size);
		}

		if (!ret_value) {
			ion_cp_release_region(cp_heap);
		} else {
			if (ION_IS_CACHED(buffer->flags))
				++cp_heap->kmap_cached_count;
			else
				++cp_heap->kmap_uncached_count;
		}
	}
	mutex_unlock(&cp_heap->lock);
	return ret_value;
}
Ejemplo n.º 2
0
void __iomem *_IORemapWrapper(struct IMG_CPU_PHYADDR BasePAddr,
			  u32 ui32Bytes, u32 ui32MappingFlags,
			  char *pszFileName, u32 ui32Line)
{
	void __iomem *pvIORemapCookie = NULL;

	switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) {
	case PVRSRV_HAP_CACHED:
#if defined(__arm__)
		pvIORemapCookie = ioremap_cached(BasePAddr.uiAddr, ui32Bytes);
#else
		pvIORemapCookie = ioremap(BasePAddr.uiAddr, ui32Bytes);
#endif
		break;
	case PVRSRV_HAP_WRITECOMBINE:
#if defined(__arm__)
		pvIORemapCookie = ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
#else
		pvIORemapCookie = ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
#endif
		break;
	case PVRSRV_HAP_UNCACHED:
		pvIORemapCookie = ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
		break;
	default:
		PVR_DPF(PVR_DBG_ERROR,
			 "IORemapWrapper: unknown mapping flags");
		return NULL;
	}

#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
	if (pvIORemapCookie)
		DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
				       (void __force *)pvIORemapCookie,
				       (void __force *)pvIORemapCookie,
				       BasePAddr.uiAddr, NULL, ui32Bytes,
				       pszFileName, ui32Line);
#endif

	return pvIORemapCookie;
}
static int fmem_probe(struct platform_device *pdev)
{
	struct fmem_platform_data *pdata = pdev->dev.platform_data;

	if (!pdata->size)
		return -ENODEV;

	fmem_data.virt = ioremap_cached(pdata->phys, pdata->size);
	if (!fmem_data.virt)
		return -ENOMEM;

	fmem_data.phys = pdata->phys;
	fmem_data.size = pdata->size;

	pr_info("fmem phys %lx virt %p size %lx\n",
		fmem_data.phys, fmem_data.virt, fmem_data.size);

	spin_lock_init(&fmem_state_lock);

	return 0;
}
Ejemplo n.º 4
0
static int secure_monitor_probe(struct platform_device *pdev)
{	
	int ret=0;
	printk("%s:%d\n", __FUNCTION__, __LINE__);
	secure_monitor_buf.pfbuf = kmalloc(FLASH_BUF_SIZE, GFP_KERNEL);
	if(!secure_monitor_buf.pfbuf){
		printk("nandbuf create fail!\n");
		ret = -ENOMEM;
		goto flash_monitor_probe_exit;		
	}
	secure_monitor_buf.psbuf = ioremap_cached(SHARE_MEM_PHY_START, SHARE_MEM_PHY_SIZE);
	if(!secure_monitor_buf.psbuf){
		printk("ioremap share memory fail \n");
		ret = -ENOMEM;
		goto flash_monitor_probe_exit1;				
	}
	
	secure_task = kthread_run(secure_writer_monitor, (void*)(&secure_monitor_buf), "secure_flash");
	if(!secure_task){
		printk("create secure task failed \n");
		ret = -ENODEV;
		goto flash_monitor_probe_exit2;				
	}	
	goto flash_monitor_probe_exit;
	
flash_monitor_probe_exit2:
	if(secure_monitor_buf.psbuf)
		iounmap(secure_monitor_buf.psbuf);
	secure_monitor_buf.psbuf = NULL;
		
flash_monitor_probe_exit1:
	if(secure_monitor_buf.pfbuf)
		kfree(secure_monitor_buf.pfbuf);
	secure_monitor_buf.pfbuf = NULL;	

flash_monitor_probe_exit:		
	return ret;
}
Ejemplo n.º 5
0
BOOL MfcDataBufMemMapping()
{
	__D("\n");

	/* Physical register address mapping */
	phyDATA_BUF = S3C6400_BASEADDR_MFC_DATA_BUF;

	// STREAM BUFFER, FRAME BUFFER  <-- virtual data buffer address mapping
	vir_pDATA_BUF = (typeof(vir_pDATA_BUF))ioremap_cached(phyDATA_BUF, MFC_DATA_BUF_SIZE);
	if (vir_pDATA_BUF == NULL) {
		__E("For DATA_BUF : fail to mapping data buffer\n");
		goto err1;
	}

	__D("VIRTUAL ADDR DATA BUF : vir_pDATA_BUF = 0x%X\n",
		(unsigned int)vir_pDATA_BUF);
	
	return TRUE;

  err1:
	phyDATA_BUF = 0;
	return FALSE;
}
Ejemplo n.º 6
0
static int __init pxa2xx_flash_probe(struct platform_device *pdev)
{
	struct flash_platform_data *flash = pdev->dev.platform_data;
	struct pxa2xx_flash_info *info;
	struct mtd_partition *parts;
	struct resource *res;
	int ret = 0;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	memset(info, 0, sizeof(struct pxa2xx_flash_info));
	info->map.name = (char *) flash->name;
	info->map.bankwidth = flash->width;
	info->map.phys = res->start;
	info->map.size = res->end - res->start + 1;
	info->parts = flash->parts;
	info->nr_parts = flash->nr_parts;

	info->map.virt = ioremap(info->map.phys, info->map.size);
	if (!info->map.virt) {
		printk(KERN_WARNING "Failed to ioremap %s\n",
		       info->map.name);
		return -ENOMEM;
	}
	info->map.cached =
		ioremap_cached(info->map.phys, info->map.size);
	if (!info->map.cached)
		printk(KERN_WARNING "Failed to ioremap cached %s\n",
		       info->map.name);
	info->map.inval_cache = pxa2xx_map_inval_cache;
	simple_map_init(&info->map);

	printk(KERN_NOTICE
	       "Probing %s at physical address 0x%08lx"
	       " (%d-bit bankwidth)\n",
	       info->map.name, (unsigned long)info->map.phys,
	       info->map.bankwidth * 8);

	info->mtd = do_map_probe(flash->map_name, &info->map);

	if (!info->mtd) {
		iounmap((void *)info->map.virt);
		if (info->map.cached)
			iounmap(info->map.cached);
		return -EIO;
	}
	info->mtd->owner = THIS_MODULE;

#ifdef CONFIG_MTD_PARTITIONS
	ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);

	if (ret > 0) {
		info->nr_parts = ret;
		info->parts = parts;
	}
#endif

	if (info->nr_parts) {
		add_mtd_partitions(info->mtd, info->parts,
				   info->nr_parts);
	} else {
		printk("Registering %s as whole device\n",
		       info->map.name);
		add_mtd_device(info->mtd);
	}

	platform_set_drvdata(pdev, info);
	return 0;
}
Ejemplo n.º 7
0
static int __init init_lubbock(void)
{
	int flashboot = (LUB_CONF_SWITCHES & 1);
	int ret = 0, i;

	lubbock_maps[0].bankwidth = lubbock_maps[1].bankwidth =
		(BOOT_DEF & 1) ? 2 : 4;

	/* Compensate for the nROMBT switch which swaps the flash banks */
	printk(KERN_NOTICE "Lubbock configured to boot from %s (bank %d)\n",
	       flashboot?"Flash":"ROM", flashboot);

	lubbock_maps[flashboot^1].name = "Lubbock Application Flash";
	lubbock_maps[flashboot].name = "Lubbock Boot ROM";

	for (i = 0; i < 2; i++) {
		lubbock_maps[i].virt = ioremap(lubbock_maps[i].phys, WINDOW_SIZE);
		if (!lubbock_maps[i].virt) {
			printk(KERN_WARNING "Failed to ioremap %s\n", lubbock_maps[i].name);
			if (!ret)
				ret = -ENOMEM;
			continue;
		}
		lubbock_maps[i].cached = ioremap_cached(lubbock_maps[i].phys, WINDOW_SIZE);
		if (!lubbock_maps[i].cached)
			printk(KERN_WARNING "Failed to ioremap cached %s\n", lubbock_maps[i].name);
		simple_map_init(&lubbock_maps[i]);

		printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit bankwidth)\n",
		       lubbock_maps[i].name, lubbock_maps[i].phys,
		       lubbock_maps[i].bankwidth * 8);

		mymtds[i] = do_map_probe("cfi_probe", &lubbock_maps[i]);

		if (!mymtds[i]) {
			iounmap((void *)lubbock_maps[i].virt);
			if (lubbock_maps[i].cached)
				iounmap(lubbock_maps[i].cached);
			if (!ret)
				ret = -EIO;
			continue;
		}
		mymtds[i]->owner = THIS_MODULE;

		ret = parse_mtd_partitions(mymtds[i], probes,
					   &parsed_parts[i], 0);

		if (ret > 0)
			nr_parsed_parts[i] = ret;
	}

	if (!mymtds[0] && !mymtds[1])
		return ret;

	for (i = 0; i < 2; i++) {
		if (!mymtds[i]) {
			printk(KERN_WARNING "%s is absent. Skipping\n", lubbock_maps[i].name);
		} else if (nr_parsed_parts[i]) {
			add_mtd_partitions(mymtds[i], parsed_parts[i], nr_parsed_parts[i]);
		} else if (!i) {
			printk("Using static partitions on %s\n", lubbock_maps[i].name);
			add_mtd_partitions(mymtds[i], lubbock_partitions, ARRAY_SIZE(lubbock_partitions));
		} else {
			printk("Registering %s as whole device\n", lubbock_maps[i].name);
			add_mtd_device(mymtds[i]);
		}
	}
	return 0;
}
Ejemplo n.º 8
0
int his_modem_load_vxworks(char *part_name)
{
    int ret = 0;
    int offset = 0;
    int skip_len = 0;
    u32 image_total_length = 0;
    void *image_load_addr = 0;
    decompress_fn inflate_fn = NULL;

    struct image_head head;

    hi_trace(HI_INFO, ">>loading:%s.....\r\n", part_name);

    ret = bsp_nand_read(part_name, (FSZ)0, &head, sizeof(struct image_head) , &skip_len);
    if (NAND_OK != ret)
    {
        hi_trace(HI_ERR, "fail to read vxworks image head, error code 0x%x\r\n", ret);
        return NAND_ERROR;
    }

    /*coverity[uninit_use_in_call] */
    if (memcmp(head.image_name, CCORE_IMAGE_NAME, sizeof(CCORE_IMAGE_NAME)))
    {
        hi_trace(HI_ERR, "vxworks image error!!.\r\n");
        return NAND_ERROR;
    }

    /*coverity[uninit_use] */
    if (head.image_length + 2*IDIO_LEN + OEM_CA_LEN > PRODUCT_CFG_FLASH_CCORE_LEN)
    {
        hi_trace(HI_ERR, "loadsize is incorrect, 0x%x!\r\n",
            head.image_length + 2*IDIO_LEN + OEM_CA_LEN);
        return NAND_ERROR;
    }

    /*coverity[uninit_use_in_call] */
    g_ccore_entry = (u32)ioremap_cached(head.load_addr, DDR_MCORE_SIZE - (MCORE_TEXT_START_ADDR - DDR_MCORE_ADDR));
    if(!g_ccore_entry)
    {
        hi_trace(HI_ERR, "ioremap failed.\r\n");
        return NAND_ERROR;
    }

    offset += sizeof(struct image_head) + skip_len;
    image_total_length = (u32)head.image_length + 2*IDIO_LEN + OEM_CA_LEN;

    /*coverity[uninit_use] */
    if (head.is_compressed)
    {
        image_load_addr = (void*)g_ccore_entry - (MCORE_TEXT_START_ADDR - DDR_MCORE_ADDR)
            + DDR_MCORE_SIZE - image_total_length;
    }
    else
    {
        image_load_addr = (void*)g_ccore_entry;
    }

    ret = bsp_nand_read(part_name, offset, image_load_addr, image_total_length, &skip_len);
    if(NAND_OK != ret)
    {
        hi_trace(HI_ERR, "fail to read vxworks image, error code 0x%x\r\n", ret);
        goto exit;
    }

    ret = bsp_sec_check((u32)image_load_addr, head.image_length);
    if (ret)
    {
        hi_trace(HI_ERR, "fail to check vxworks image, error code 0x%x\r\n", ret);
        goto exit;
    }

    if (head.is_compressed)
    {
        hi_trace(HI_INFO, ">>start to decompress vxworks image ...\r\n");
        inflate_fn = decompress_method((const unsigned char *)image_load_addr, 2, NULL);
        if (inflate_fn)
        {
            ret = inflate_fn((unsigned char*)image_load_addr,
                head.image_length, NULL, NULL, (unsigned char*)g_ccore_entry,
                NULL, (void(*)(char*))printk);
            if (ret)
            {
                hi_trace(HI_ERR, "fail to decompress vxworks image, error code 0x%x\r\n", ret);
                goto exit;
            }
        }
        else
        {
            hi_trace(HI_ERR, "fail to get decompress method\r\n");
            goto exit;
        }
    }

    /* flush cache */
    __dma_single_cpu_to_dev_noverify((const void *)g_ccore_entry,
                     DDR_MCORE_SIZE - (MCORE_TEXT_START_ADDR - DDR_MCORE_ADDR),
                     CACHE_DMA_TO_DEVICE);

    hi_trace(HI_INFO, ">>load vxworks ok, entey %#x, length %#x\r\n", head.load_addr, head.image_length);

exit:
    iounmap((void volatile *)g_ccore_entry);

    return ret;
}
Ejemplo n.º 9
0
/*******************************************************************************
**
**  gckGALDEVICE_Construct
**
**  Constructor.
**
**  INPUT:
**
**  OUTPUT:
**
**      gckGALDEVICE * Device
**          Pointer to a variable receiving the gckGALDEVICE object pointer on
**          success.
*/
gceSTATUS
gckGALDEVICE_Construct(
    IN gctINT IrqLine,
    IN gctUINT32 RegisterMemBase,
    IN gctSIZE_T RegisterMemSize,
    IN gctINT IrqLine2D,
    IN gctUINT32 RegisterMemBase2D,
    IN gctSIZE_T RegisterMemSize2D,
    IN gctINT IrqLineVG,
    IN gctUINT32 RegisterMemBaseVG,
    IN gctSIZE_T RegisterMemSizeVG,
    IN gctUINT32 ContiguousBase,
    IN gctSIZE_T ContiguousSize,
    IN gctSIZE_T BankSize,
    IN gctINT FastClear,
    IN gctINT Compression,
    IN gctUINT32 PhysBaseAddr,
    IN gctUINT32 PhysSize,
    IN gctINT Signal,
    IN gctUINT LogFileSize,
    IN struct device *pdev,
    IN gctINT PowerManagement,
    OUT gckGALDEVICE *Device
    )
{
    gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
    gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
    gctUINT32 horizontalTileSize, verticalTileSize;
    struct resource* mem_region;
    gctUINT32 physAddr;
    gctUINT32 physical;
    gckGALDEVICE device;
    gceSTATUS status;
    gctINT32 i;
    gceHARDWARE_TYPE type;
    gckDB sharedDB = gcvNULL;
    gckKERNEL kernel = gcvNULL;

    gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
                   "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
                   "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
                   "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
                   "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
                   IrqLine, RegisterMemBase, RegisterMemSize,
                   IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
                   IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
                   ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
                   PhysBaseAddr, PhysSize, Signal);

    /* Allocate device structure. */
    device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);

    if (!device)
    {
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    memset(device, 0, sizeof(struct _gckGALDEVICE));

   device->dbgnode = gcvNULL;
   if(LogFileSize != 0)
   {
	if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0)
	{
		gcmkTRACE_ZONE(
		gcvLEVEL_ERROR, gcvZONE_DRIVER,
		"%s(%d): Failed to create  the debug file system  %s/%s \n",
		__FUNCTION__, __LINE__,
		PARENT_FILE, DEBUG_FILE
		);
	}
	else
	{
		/*Everything is OK*/
	 	gckDebugFileSystemSetCurrentNode(device->dbgnode);
	}
    }
#ifdef CONFIG_PM
    /*Init runtime pm for gpu*/
    pm_runtime_enable(pdev);
    device->pmdev = pdev;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    /*get gpu regulator*/
    device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
    if (IS_ERR(device->gpu_regulator)) {
	gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
		"%s(%d): Failed to get gpu regulator  %s/%s \n",
		__FUNCTION__, __LINE__,
		PARENT_FILE, DEBUG_FILE);
	gcmkONERROR(gcvSTATUS_NOT_FOUND);
    }
#endif
    /*Initialize the clock structure*/
    if (IrqLine != -1) {
        device->clk_3d_core = clk_get(pdev, "gpu3d_clk");
        if (!IS_ERR(device->clk_3d_core)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
            if (cpu_is_mx6q()) {
	            device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
	            if (IS_ERR(device->clk_3d_shader)) {
	                IrqLine = -1;
	                clk_put(device->clk_3d_core);
	                device->clk_3d_core = NULL;
	                device->clk_3d_shader = NULL;
	                gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
	            }
	          }
#else
	            device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
	            device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
	            if (IS_ERR(device->clk_3d_shader)) {
	                IrqLine = -1;
	                clk_put(device->clk_3d_core);
	                device->clk_3d_core = NULL;
	                device->clk_3d_shader = NULL;
	                gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
	            }
#endif
        } else {
            IrqLine = -1;
            device->clk_3d_core = NULL;
            gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
        }
    }
    if ((IrqLine2D != -1) || (IrqLineVG != -1)) {
        device->clk_2d_core = clk_get(pdev, "gpu2d_clk");
        if (IS_ERR(device->clk_2d_core)) {
            IrqLine2D = -1;
            IrqLineVG = -1;
            device->clk_2d_core = NULL;
            gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
        } else {
	    if (IrqLine2D != -1) {
                device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
                if (IS_ERR(device->clk_2d_axi)) {
                    device->clk_2d_axi = NULL;
                    IrqLine2D = -1;
                    gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
                }
            }
            if (IrqLineVG != -1) {
                device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
                if (IS_ERR(device->clk_vg_axi)) {
                    IrqLineVG = -1;
	                device->clk_vg_axi = NULL;
	                gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
                }
            }
        }
    }

    if (IrqLine != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_MAJOR]    = RegisterMemBase;
        device->requestedRegisterMemSizes[gcvCORE_MAJOR]    = RegisterMemSize;
    }

    if (IrqLine2D != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_2D]       = RegisterMemBase2D;
        device->requestedRegisterMemSizes[gcvCORE_2D]       = RegisterMemSize2D;
    }

    if (IrqLineVG != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_VG]       = RegisterMemBaseVG;
        device->requestedRegisterMemSizes[gcvCORE_VG]       = RegisterMemSizeVG;
    }

    device->requestedContiguousBase  = 0;
    device->requestedContiguousSize  = 0;


    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        physical = device->requestedRegisterMemBases[i];

        /* Set up register memory region. */
        if (physical != 0)
        {
            mem_region = request_mem_region(
                physical, device->requestedRegisterMemSizes[i], "galcore register region"
                );

            if (mem_region == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->registerBases[i] = (gctPOINTER) ioremap_nocache(
                physical, device->requestedRegisterMemSizes[i]);

            if (device->registerBases[i] == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            physical += device->requestedRegisterMemSizes[i];
        }
        else
        {
            device->registerBases[i] = gcvNULL;
        }
    }

    /* Set the base address */
    device->baseAddress = PhysBaseAddr;

    /* Construct the gckOS object. */
    gcmkONERROR(gckOS_Construct(device, &device->os));

    if (IrqLine != -1)
    {
        /* Construct the gckKERNEL object. */
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_MAJOR, device,
            gcvNULL, &device->kernels[gcvCORE_MAJOR]));

        sharedDB = device->kernels[gcvCORE_MAJOR]->db;

        /* Initialize core mapping */
        for (i = 0; i < 8; i++)
        {
            device->coreMapping[i] = gcvCORE_MAJOR;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_MAJOR]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetFastClear(
            device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
            ));

        gcmkONERROR(gckHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_MAJOR] = gcvNULL;
    }

    if (IrqLine2D != -1)
    {
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_2D, device,
            sharedDB, &device->kernels[gcvCORE_2D]));

        if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;

        /* Verify the hardware type */
        gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));

        if (type != gcvHARDWARE_2D)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): Unexpected hardware type: %d\n",
                __FUNCTION__, __LINE__,
                type
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_2D;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_2D]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_2D]->hardware, PowerManagement
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_2D] = gcvNULL;
    }

    if (IrqLineVG != -1)
    {
#if gcdENABLE_VG
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_VG, device,
            sharedDB, &device->kernels[gcvCORE_VG]));
        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL
            && device->kernels[gcvCORE_2D] == gcvNULL
            )
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_VG;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
        }


        gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
            device->kernels[gcvCORE_VG]->vg->hardware,
            PowerManagement
            ));
#endif
    }
    else
    {
        device->kernels[gcvCORE_VG] = gcvNULL;
    }

    /* Initialize the ISR. */
    device->irqLines[gcvCORE_MAJOR] = IrqLine;
    device->irqLines[gcvCORE_2D]    = IrqLine2D;
    device->irqLines[gcvCORE_VG]    = IrqLineVG;

    /* Initialize the kernel thread semaphores. */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
    }

    device->signal = Signal;

    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->kernels[i] != gcvNULL) break;
    }

    if (i == gcdMAX_GPU_COUNT)
	{
		gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
	}

#if gcdENABLE_VG
    if (i == gcvCORE_VG)
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
                device->kernels[i]->vg->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));
            /* query the amount of video memory */
        gcmkONERROR(gckVGHARDWARE_QueryMemory(
            device->kernels[i]->vg->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }
    else
#endif
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckHARDWARE_QuerySystemMemory(
                device->kernels[i]->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));

            /* query the amount of video memory */
        gcmkONERROR(gckHARDWARE_QueryMemory(
            device->kernels[i]->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }


    /* Grab the first availiable kernel */
    for (i = 0; i < gcdMAX_GPU_COUNT; i++)
    {
        if (device->irqLines[i] != -1)
        {
            kernel = device->kernels[i];
            break;
        }
    }

    /* Set up the internal memory region. */
    if (device->internalSize > 0)
    {
        status = gckVIDMEM_Construct(
            device->os,
            internalBaseAddress, device->internalSize, internalAlignment,
            0, &device->internalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->internalSize = 0;
        }
        else
        {
            /* Map internal memory. */
            device->internalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->internalSize);

            if (device->internalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
            device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
            physical += device->internalSize;
        }
    }

    if (device->externalSize > 0)
    {
        /* create the external memory heap */
        status = gckVIDMEM_Construct(
            device->os,
            externalBaseAddress, device->externalSize, externalAlignment,
            0, &device->externalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->externalSize = 0;
        }
        else
        {
            /* Map external memory. */
            device->externalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->externalSize);

            if (device->externalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
            device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
            physical += device->externalSize;
        }
    }

    /* set up the contiguous memory */
    device->contiguousSize = ContiguousSize;

    if (ContiguousSize > 0)
    {
        if (ContiguousBase == 0)
        {
            while (device->contiguousSize > 0)
            {
                /* Allocate contiguous memory. */
                status = _AllocateMemory(
                    device,
                    device->contiguousSize,
                    &device->contiguousBase,
                    &device->contiguousPhysical,
                    &physAddr
                    );

                if (gcmIS_SUCCESS(status))
                {
                    device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
                    status = gckVIDMEM_Construct(
                        device->os,
                        physAddr | device->systemMemoryBaseAddress,
                        device->contiguousSize,
                        64,
                        BankSize,
                        &device->contiguousVidMem
                        );

                    if (gcmIS_SUCCESS(status))
                    {
                        break;
                    }

                    gcmkONERROR(_FreeMemory(
                        device,
                        device->contiguousBase,
                        device->contiguousPhysical
                        ));

                    gcmRELEASE_NAME(device->contiguousPhysicalName);
                    device->contiguousBase     = gcvNULL;
                    device->contiguousPhysical = gcvNULL;
                }

                if (device->contiguousSize <= (4 << 20))
                {
                    device->contiguousSize = 0;
                }
                else
                {
                    device->contiguousSize -= (4 << 20);
                }
            }
        }
        else
        {
            /* Create the contiguous memory heap. */
            status = gckVIDMEM_Construct(
                device->os,
                ContiguousBase | device->systemMemoryBaseAddress,
                ContiguousSize,
                64, BankSize,
                &device->contiguousVidMem
                );

            if (gcmIS_ERROR(status))
            {
                /* Error, disable contiguous memory pool. */
                device->contiguousVidMem = gcvNULL;
                device->contiguousSize   = 0;
            }
            else
            {
                mem_region = request_mem_region(
                    ContiguousBase, ContiguousSize, "galcore managed memory"
                    );

                if (mem_region == gcvNULL)
                {
                    gcmkTRACE_ZONE(
                        gcvLEVEL_ERROR, gcvZONE_DRIVER,
                        "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
                        __FUNCTION__, __LINE__,
                        ContiguousSize, ContiguousBase
                        );

                    gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                }

                device->requestedContiguousBase  = ContiguousBase;
                device->requestedContiguousSize  = ContiguousSize;

#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
                if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
                {
                    device->contiguousBase
#if gcdPAGED_MEMORY_CACHEABLE
                        = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
#else
                        = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
#endif
                    if (device->contiguousBase == gcvNULL)
                    {
                        device->contiguousVidMem = gcvNULL;
                        device->contiguousSize = 0;

                        gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                    }
                }
#endif

                device->contiguousPhysical = gcvNULL;
                device->contiguousPhysicalName = 0;
                device->contiguousSize     = ContiguousSize;
                device->contiguousMapped   = gcvTRUE;
            }
        }
    }
Ejemplo n.º 10
0
static int __init init_mainstone(void)
{
	int SW7 = 0;  /* FIXME: get from SCR (Mst doc section 3.2.1.1) */
	int ret = 0, i;

	mainstone_maps[0].bankwidth = (BOOT_DEF & 1) ? 2 : 4;
	mainstone_maps[1].bankwidth = 4;

	/* Compensate for SW7 which swaps the flash banks */
	mainstone_maps[SW7].name = "processor flash";
	mainstone_maps[SW7 ^ 1].name = "main board flash";

	printk(KERN_NOTICE "Mainstone configured to boot from %s\n",
	       mainstone_maps[0].name);

	for (i = 0; i < 2; i++) {
		mainstone_maps[i].virt = ioremap(mainstone_maps[i].phys,
						 WINDOW_SIZE);
		if (!mainstone_maps[i].virt) {
			printk(KERN_WARNING "Failed to ioremap %s\n",
			       mainstone_maps[i].name);
			if (!ret)
				ret = -ENOMEM;
			continue;
		}
		mainstone_maps[i].cached =
			ioremap_cached(mainstone_maps[i].phys, WINDOW_SIZE);
		if (!mainstone_maps[i].cached)
			printk(KERN_WARNING "Failed to ioremap cached %s\n",
			       mainstone_maps[i].name);
		simple_map_init(&mainstone_maps[i]);

		printk(KERN_NOTICE
		       "Probing %s at physical address 0x%08lx"
		       " (%d-bit bankwidth)\n",
		       mainstone_maps[i].name, mainstone_maps[i].phys,
		       mainstone_maps[i].bankwidth * 8);

		mymtds[i] = do_map_probe("cfi_probe", &mainstone_maps[i]);

		if (!mymtds[i]) {
			iounmap((void *)mainstone_maps[i].virt);
			if (mainstone_maps[i].cached)
				iounmap(mainstone_maps[i].cached);
			if (!ret)
				ret = -EIO;
			continue;
		}
		mymtds[i]->owner = THIS_MODULE;

		ret = parse_mtd_partitions(mymtds[i], probes,
					   &parsed_parts[i], 0);

		if (ret > 0)
			nr_parsed_parts[i] = ret;
	}

	if (!mymtds[0] && !mymtds[1])
		return ret;

	for (i = 0; i < 2; i++) {
		if (!mymtds[i]) {
			printk(KERN_WARNING "%s is absent. Skipping\n",
			       mainstone_maps[i].name);
		} else if (nr_parsed_parts[i]) {
			add_mtd_partitions(mymtds[i], parsed_parts[i],
					   nr_parsed_parts[i]);
		} else if (!i) {
			printk("Using static partitions on %s\n",
			       mainstone_maps[i].name);
			add_mtd_partitions(mymtds[i], mainstone_partitions,
					   ARRAY_SIZE(mainstone_partitions));
		} else {
			printk("Registering %s as whole device\n",
			       mainstone_maps[i].name);
			add_mtd_device(mymtds[i]);
		}
	}
	return 0;
}
Ejemplo n.º 11
0
/*******************************************************************************
**
**  gckGALDEVICE_Construct
**
**  Constructor.
**
**  INPUT:
**
**  OUTPUT:
**
**      gckGALDEVICE * Device
**          Pointer to a variable receiving the gckGALDEVICE object pointer on
**          success.
*/
gceSTATUS
gckGALDEVICE_Construct(
    IN gctINT IrqLine,
    IN gctUINT32 RegisterMemBase,
    IN gctSIZE_T RegisterMemSize,
    IN gctINT IrqLine2D,
    IN gctUINT32 RegisterMemBase2D,
    IN gctSIZE_T RegisterMemSize2D,
    IN gctINT IrqLineVG,
    IN gctUINT32 RegisterMemBaseVG,
    IN gctSIZE_T RegisterMemSizeVG,
    IN gctUINT32 ContiguousBase,
    IN gctSIZE_T ContiguousSize,
    IN gctSIZE_T BankSize,
    IN gctINT FastClear,
    IN gctINT Compression,
    IN gctUINT32 PhysBaseAddr,
    IN gctUINT32 PhysSize,
    IN gctINT Signal,
    OUT gckGALDEVICE *Device
    )
{
    gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
    gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
    gctUINT32 horizontalTileSize, verticalTileSize;
    struct resource* mem_region;
    gctUINT32 physAddr;
    gctUINT32 physical;
    gckGALDEVICE device;
    gceSTATUS status;
    gctINT32 i;
    gceHARDWARE_TYPE type;
    gckDB sharedDB = gcvNULL;

    gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
                   "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
                   "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
                   "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
                   "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
                   IrqLine, RegisterMemBase, RegisterMemSize,
                   IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
                   IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
                   ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
                   PhysBaseAddr, PhysSize, Signal);

    /* Allocate device structure. */
    device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL | __GFP_NOWARN);

    if (!device)
    {
        gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
    }

    memset(device, 0, sizeof(struct _gckGALDEVICE));

    if (IrqLine != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_MAJOR]    = RegisterMemBase;
        device->requestedRegisterMemSizes[gcvCORE_MAJOR]    = RegisterMemSize;
    }

    if (IrqLine2D != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_2D]       = RegisterMemBase2D;
        device->requestedRegisterMemSizes[gcvCORE_2D]       = RegisterMemSize2D;
    }

    if (IrqLineVG != -1)
    {
        device->requestedRegisterMemBases[gcvCORE_VG]       = RegisterMemBaseVG;
        device->requestedRegisterMemSizes[gcvCORE_VG]       = RegisterMemSizeVG;
    }

    device->requestedContiguousBase  = 0;
    device->requestedContiguousSize  = 0;


    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        physical = device->requestedRegisterMemBases[i];

        /* Set up register memory region. */
        if (physical != 0)
        {
            mem_region = request_mem_region(
                physical, device->requestedRegisterMemSizes[i], "galcore register region"
                );

#if 0
            if (mem_region == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }
#endif

            device->registerBases[i] = (gctPOINTER) ioremap_nocache(
                physical, device->requestedRegisterMemSizes[i]);

            if (device->registerBases[i] == gcvNULL)
            {
                gcmkTRACE_ZONE(
                    gcvLEVEL_ERROR, gcvZONE_DRIVER,
                    "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
                    __FUNCTION__, __LINE__,
                    physical, device->requestedRegisterMemSizes[i]
                    );

                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            physical += device->requestedRegisterMemSizes[i];
        }
        else
        {
            device->registerBases[i] = gcvNULL;
        }
    }

    /* Set the base address */
    device->baseAddress = PhysBaseAddr;

    /* Construct the gckOS object. */
    gcmkONERROR(gckOS_Construct(device, &device->os));

    if (IrqLine != -1)
    {
        /* Construct the gckKERNEL object. */
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_MAJOR, device,
            gcvNULL, &device->kernels[gcvCORE_MAJOR]));

        sharedDB = device->kernels[gcvCORE_MAJOR]->db;

        /* Initialize core mapping */
        for (i = 0; i < 8; i++)
        {
            device->coreMapping[i] = gcvCORE_MAJOR;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_MAJOR]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR,
            device
            ));

        gcmkONERROR(gckHARDWARE_SetFastClear(
            device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
            ));


#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_MAJOR] = gcvNULL;
    }

    if (IrqLine2D != -1)
    {
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_2D, device,
            sharedDB, &device->kernels[gcvCORE_2D]));

        if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;

        /* Verify the hardware type */
        gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));

        if (type != gcvHARDWARE_2D)
        {
            gcmkTRACE_ZONE(
                gcvLEVEL_ERROR, gcvZONE_DRIVER,
                "%s(%d): Unexpected hardware type: %d\n",
                __FUNCTION__, __LINE__,
                type
                );

            gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
        }

        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_2D;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
        }

        /* Setup the ISR manager. */
        gcmkONERROR(gckHARDWARE_SetIsrManager(
            device->kernels[gcvCORE_2D]->hardware,
            (gctISRMANAGERFUNC) gckGALDEVICE_Setup_ISR_2D,
            (gctISRMANAGERFUNC) gckGALDEVICE_Release_ISR_2D,
            device
            ));

#if COMMAND_PROCESSOR_VERSION == 1
        /* Start the command queue. */
        gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
#endif
    }
    else
    {
        device->kernels[gcvCORE_2D] = gcvNULL;
    }

    if (IrqLineVG != -1)
    {
#if gcdENABLE_VG
        gcmkONERROR(gckKERNEL_Construct(
            device->os, gcvCORE_VG, device,
            sharedDB, &device->kernels[gcvCORE_VG]));
        /* Initialize core mapping */
        if (device->kernels[gcvCORE_MAJOR] == gcvNULL
            && device->kernels[gcvCORE_2D] == gcvNULL
            )
        {
            for (i = 0; i < 8; i++)
            {
                device->coreMapping[i] = gcvCORE_VG;
            }
        }
        else
        {
            device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
        }

#endif
    }
    else
    {
        device->kernels[gcvCORE_VG] = gcvNULL;
    }

    /* Initialize the ISR. */
    device->irqLines[gcvCORE_MAJOR] = IrqLine;
    device->irqLines[gcvCORE_2D]    = IrqLine2D;
    device->irqLines[gcvCORE_VG]    = IrqLineVG;

    /* Initialize the kernel thread semaphores. */
    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
    }

    device->signal = Signal;

    for (i = 0; i < gcdCORE_COUNT; i++)
    {
        if (device->kernels[i] != gcvNULL) break;
    }

    if (i == gcdCORE_COUNT) gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);

#if gcdENABLE_VG
    if (i == gcvCORE_VG)
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
                device->kernels[i]->vg->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));
            /* query the amount of video memory */
        gcmkONERROR(gckVGHARDWARE_QueryMemory(
            device->kernels[i]->vg->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }
    else
#endif
    {
        /* Query the ceiling of the system memory. */
        gcmkONERROR(gckHARDWARE_QuerySystemMemory(
                device->kernels[i]->hardware,
                &device->systemMemorySize,
                &device->systemMemoryBaseAddress
                ));

            /* query the amount of video memory */
        gcmkONERROR(gckHARDWARE_QueryMemory(
            device->kernels[i]->hardware,
            &device->internalSize, &internalBaseAddress, &internalAlignment,
            &device->externalSize, &externalBaseAddress, &externalAlignment,
            &horizontalTileSize, &verticalTileSize
            ));
    }


    /* Set up the internal memory region. */
    if (device->internalSize > 0)
    {
        status = gckVIDMEM_Construct(
            device->os,
            internalBaseAddress, device->internalSize, internalAlignment,
            0, &device->internalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->internalSize = 0;
        }
        else
        {
            /* Map internal memory. */
            device->internalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->internalSize);

            if (device->internalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->internalPhysical = (gctPHYS_ADDR) physical;
            physical += device->internalSize;
        }
    }

    if (device->externalSize > 0)
    {
        /* create the external memory heap */
        status = gckVIDMEM_Construct(
            device->os,
            externalBaseAddress, device->externalSize, externalAlignment,
            0, &device->externalVidMem
            );

        if (gcmIS_ERROR(status))
        {
            /* Error, disable internal heap. */
            device->externalSize = 0;
        }
        else
        {
            /* Map external memory. */
            device->externalLogical
                = (gctPOINTER) ioremap_nocache(physical, device->externalSize);

            if (device->externalLogical == gcvNULL)
            {
                gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
            }

            device->externalPhysical = (gctPHYS_ADDR) physical;
            physical += device->externalSize;
        }
    }

    /* set up the contiguous memory */
    device->contiguousSize = ContiguousSize;

    if (ContiguousSize > 0)
    {
        if (ContiguousBase == 0)
        {
            while (device->contiguousSize > 0)
            {
                /* Allocate contiguous memory. */
                status = _AllocateMemory(
                    device,
                    device->contiguousSize,
                    &device->contiguousBase,
                    &device->contiguousPhysical,
                    &physAddr
                    );

                if (gcmIS_SUCCESS(status))
                {
                    status = gckVIDMEM_Construct(
                        device->os,
                        physAddr | device->systemMemoryBaseAddress,
                        device->contiguousSize,
                        64,
                        BankSize,
                        &device->contiguousVidMem
                        );

                    if (gcmIS_SUCCESS(status))
                    {
                        break;
                    }

                    gcmkONERROR(_FreeMemory(
                        device,
                        device->contiguousBase,
                        device->contiguousPhysical
                        ));

                    device->contiguousBase     = gcvNULL;
                    device->contiguousPhysical = gcvNULL;
                }

                if (device->contiguousSize <= (4 << 20))
                {
                    device->contiguousSize = 0;
                }
                else
                {
                    device->contiguousSize -= (4 << 20);
                }
            }
        }
        else
        {
            /* Create the contiguous memory heap. */
            status = gckVIDMEM_Construct(
                device->os,
                (ContiguousBase - device->baseAddress) | device->systemMemoryBaseAddress,
                 ContiguousSize,
                64, BankSize,
                &device->contiguousVidMem
                );

            if (gcmIS_ERROR(status))
            {
                /* Error, disable contiguous memory pool. */
                device->contiguousVidMem = gcvNULL;
                device->contiguousSize   = 0;
            }
            else
            {
                mem_region = request_mem_region(
                    ContiguousBase, ContiguousSize, "galcore managed memory"
                    );

#if 0
                if (mem_region == gcvNULL)
                {
                    gcmkTRACE_ZONE(
                        gcvLEVEL_ERROR, gcvZONE_DRIVER,
                        "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
                        __FUNCTION__, __LINE__,
                        ContiguousSize, ContiguousBase
                        );

                    gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                }
#endif

                device->requestedContiguousBase  = ContiguousBase;
                device->requestedContiguousSize  = ContiguousSize;

#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
                if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
                {
                    device->contiguousBase
#if gcdPAGED_MEMORY_CACHEABLE
                        = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
#else
                        = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
#endif
                    if (device->contiguousBase == gcvNULL)
                    {
                        device->contiguousVidMem = gcvNULL;
                        device->contiguousSize = 0;

                        gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
                    }
                }
#endif

                device->contiguousPhysical = (gctPHYS_ADDR) ContiguousBase;
                device->contiguousSize     = ContiguousSize;
                device->contiguousMapped   = gcvTRUE;
            }
        }
    }
Ejemplo n.º 12
0
DPI_STATUS DPI_Capture_Framebuffer(unsigned int pvbuf, unsigned int bpp)
{
#if 0
    unsigned int i = 0;
    unsigned char *fbv;
    unsigned int fbsize = 0;
    unsigned int dpi_fb_bpp = 0;
    unsigned int w,h;
	BOOL dpi_needPowerOff = FALSE;
	if(!s_isDpiPowerOn){
		DPI_PowerOn();
		dpi_needPowerOff = TRUE;
		LCD_WaitForNotBusy();
	    LCD_WaitDPIIndication(FALSE);
		LCD_FBReset();
    	LCD_StartTransfer(TRUE);
		LCD_WaitDPIIndication(TRUE);
	}

    if(pvbuf == 0 || bpp == 0)
    {
        DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "DPI_Capture_Framebuffer, ERROR, parameters wrong: pvbuf=0x%08x, bpp=%d\n", pvbuf, bpp);
        return DPI_STATUS_OK;
    }

    if(DPI_FBGetFormat() == DPI_FB_FORMAT_RGB565)
    {
        dpi_fb_bpp = 16;
    }
    else if(DPI_FBGetFormat() == DPI_FB_FORMAT_RGB888)
    {
        dpi_fb_bpp = 24;
    }
    else
    {
        DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "DPI_Capture_Framebuffer, ERROR, dpi_fb_bpp is wrong: %d\n", dpi_fb_bpp);
        return DPI_STATUS_OK;
    }

    w = DISP_GetScreenWidth();
    h = DISP_GetScreenHeight();
    fbsize = w*h*dpi_fb_bpp/8;
	if(dpi_needPowerOff)
    	fbv = (unsigned char*)ioremap_cached((unsigned int)DPI_REG->FB[0].ADDR, fbsize);
	else
    	fbv = (unsigned char*)ioremap_cached((unsigned int)DPI_REG->FB[DPI_GetCurrentFB()].ADDR, fbsize);
 
    DISP_LOG_PRINT(ANDROID_LOG_INFO, "DPI", "current fb count is %d\n", DPI_GetCurrentFB());

    if(bpp == 32 && dpi_fb_bpp == 24)
    {			
    	if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)){
			unsigned int pix_count = w * h - 1;
    		for(i = 0;i < w*h; i++)
    		{
            	*(unsigned int*)(pvbuf+ (pix_count - i) * 4) = 0xff000000|fbv[i*3]|(fbv[i*3+1]<<8)|(fbv[i*3+2]<<16);
    		}
		}
		else{
    		for(i = 0;i < w*h; i++)
    		{
            	*(unsigned int*)(pvbuf+i*4) = 0xff000000|fbv[i*3]|(fbv[i*3+1]<<8)|(fbv[i*3+2]<<16);
    		}
		}
    }
    else if(bpp == 32 && dpi_fb_bpp == 16)
    {
        unsigned int t;
		unsigned short* fbvt = (unsigned short*)fbv;
    	
		if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)){
			unsigned int pix_count = w * h - 1;
			
    		for(i = 0;i < w*h; i++)
    		{
				t = fbvt[i];
            	*(unsigned int*)(pvbuf+ (pix_count - i) * 4) = 0xff000000|((t&0x001F)<<3)|((t&0x07E0)<<5)|((t&0xF800)<<8);
    		}
		}
		else{
        	for(i = 0;i < w*h; i++)
    		{
	    		t = fbvt[i];
            	*(unsigned int*)(pvbuf+i*4) = 0xff000000|((t&0x001F)<<3)|((t&0x07E0)<<5)|((t&0xF800)<<8);
    		}
		}
    }
    else if(bpp == 16 && dpi_fb_bpp == 16)
    {
		if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)){
			unsigned int pix_count = w * h - 1;
			unsigned short* fbvt = (unsigned short*)fbv;
    		for(i = 0;i < w*h; i++)
    		{
            	*(unsigned short*)(pvbuf+ (pix_count - i) * 2) = fbvt[i];
    		}
		}
		else
    		memcpy((void*)pvbuf, (void*)fbv, fbsize);
    }
    else if(bpp == 16 && dpi_fb_bpp == 24)
    {
		if(0 == strncmp(MTK_LCM_PHYSICAL_ROTATION, "180", 3)){
			unsigned int pix_count = w * h - 1;
    		for(i = 0;i < w*h; i++)
    		{
            	*(unsigned short*)(pvbuf+ (pix_count - i) * 2) = ((fbv[i*3+0]&0xF8)>>3)|
	            	                        				((fbv[i*3+1]&0xFC)<<3)|
														    ((fbv[i*3+2]&0xF8)<<8);
    		}
		}
		else{
    		for(i = 0;i < w*h; i++)
Ejemplo n.º 13
0
static int __init init_csbxxx(void)
{
  int ret = 0;
  const char *part_type = 0;

  csbxxx_map.virt = ioremap(csbxxx_map.phys, WINDOW_SIZE);
  if (!csbxxx_map.virt) {
    printk(KERN_WARNING "Failed to ioremap %s, MTD disabled\n", csbxxx_map.name);
    ret = -ENOMEM;
    goto err;
  }
  csbxxx_map.cached = ioremap_cached(csbxxx_map.phys, WINDOW_SIZE);
  if (!csbxxx_map.cached)
    printk(KERN_WARNING "Failed to ioremap cached %s\n", csbxxx_map.name);

  simple_map_init(&csbxxx_map);
  
  printk(KERN_NOTICE "Probing %s at physical address 0x%08lx (%d-bit bankwidth)\n",
	 csbxxx_map.name, csbxxx_map.phys, csbxxx_map.bankwidth * 8);
  
  mymtd = do_map_probe("cfi_probe", &csbxxx_map);
  
  if (!mymtd)
    goto err;

  mymtd->owner = THIS_MODULE;
  
  mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);

  if (mtd_parts_nb > 0)
    part_type = "command line";
  else if (mtd_parts_nb == 0)
    {
      mtd_parts = csbxxx_partitions;
      mtd_parts_nb = NB_OF(csbxxx_partitions);
      part_type = "static";
    }
  else goto err;

#if 1
#warning "TODO: is add_mtd_device needed?"
#else
  add_mtd_device(mymtd);
#endif
  if (mtd_parts_nb == 0)
    printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
  else
    {
      printk(KERN_NOTICE MSG_PREFIX
	     "using %s partition definition\n", part_type);
      add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
    }
  
  return 0;

err:
  if (csbxxx_map.virt)
    iounmap(csbxxx_map.virt);
  if (csbxxx_map.cached)
    iounmap(csbxxx_map.cached);
  if (!ret)
    ret = -EIO;

  return ret;
}
Ejemplo n.º 14
0
static int __init s3c_cmm_init(void)
{
	HANDLE 		h_Mutex;
	int			ret;
	FREE_MEM_T	*node;
	ALLOC_MEM_T	*alloc_node;
	
	
	printk(banner);

	// Mutex initialization
	h_Mutex = CreateCMMmutex();
	if (h_Mutex == NULL) 
	{
		LOG_MSG(LOG_ERROR, "s3c_cmm_init", "DD::CMM Mutex Initialize error\r\n");
		return FALSE;
	}

	ret = LockCMMMutex();

	ret = misc_register(&s3c_cmm_miscdev);


	// First 4MB will use cacheable memory
	CachedVirAddr = (unsigned char *)ioremap_cached( (unsigned long)CODEC_MEM_START, 		\
					(int)CODEC_CACHED_MEM_SIZE );

	// Second 4MB will use non-cacheable memory
	NonCachedVirAddr = (unsigned char *)ioremap_nocache( (unsigned long)(CODEC_MEM_START + 	\
					CODEC_CACHED_MEM_SIZE), (int)CODEC_NON_CACHED_MEM_SIZE );

	// init alloc list, if(AllocMemHead == AllocMemTail) then, the list is NULL
	alloc_node = (ALLOC_MEM_T *)kmalloc(sizeof(ALLOC_MEM_T), GFP_KERNEL);
	memset(alloc_node, 0x00, sizeof(ALLOC_MEM_T));
	alloc_node->next = alloc_node;
	alloc_node->prev = alloc_node;
	AllocMemHead = alloc_node;
	AllocMemTail = AllocMemHead;

	// init free list, if(FreeMemHead == FreeMemTail) then, the list is NULL
	node = (FREE_MEM_T *)kmalloc(sizeof(FREE_MEM_T), GFP_KERNEL);
	memset(node, 0x00, sizeof(FREE_MEM_T));
	node->next = node;
	node->prev = node;
	FreeMemHead = node;
	FreeMemTail = FreeMemHead;

	node = (FREE_MEM_T *)kmalloc(sizeof(FREE_MEM_T), GFP_KERNEL);
	memset(node, 0x00, sizeof(FREE_MEM_T));
	node->startAddr = CODEC_MEM_START;
	node->cacheFlag = 1;
	node->size = CODEC_CACHED_MEM_SIZE;
	InsertNodeToFreeList(node, -1);

	node = (FREE_MEM_T *)kmalloc(sizeof(FREE_MEM_T), GFP_KERNEL);
	memset(node, 0x00, sizeof(FREE_MEM_T));
	node->startAddr = CODEC_MEM_START + CODEC_CACHED_MEM_SIZE;
	node->cacheFlag = 0;
	node->size = CODEC_NON_CACHED_MEM_SIZE;
	InsertNodeToFreeList(node, -1);

	UnlockCMMMutex();

	return 0;
}
Ejemplo n.º 15
0
DPI_STATUS DPI_Capture_Framebuffer(unsigned int pvbuf, unsigned int bpp)
{
    unsigned int i = 0;
    unsigned char *fbv;
    unsigned int fbsize = 0;
    unsigned int dpi_fb_bpp = 0;
    unsigned int w,h;

	if(s_isDpiPowerOn == false)
	{
		DPI_PowerOn();
	}

    if(pvbuf == 0 || bpp == 0)
    {
        printk("DPI_Capture_Framebuffer, ERROR, parameters wrong: pvbuf=0x%08x, bpp=%d\n", pvbuf, bpp);
        return DPI_STATUS_OK;
    }

    if(DPI_FBGetFormat() == DPI_FB_FORMAT_RGB565)
    {
        dpi_fb_bpp = 16;
    }
    else if(DPI_FBGetFormat() == DPI_FB_FORMAT_RGB888)
    {
        dpi_fb_bpp = 24;
    }
    else
    {
        printk("DPI_Capture_Framebuffer, ERROR, dpi_fb_bpp is wrong: %d\n", dpi_fb_bpp);
        return DPI_STATUS_OK;
    }

    w = DISP_GetScreenWidth();
    h = DISP_GetScreenHeight();
    fbsize = w*h*dpi_fb_bpp/8;

    fbv = (unsigned char*)ioremap_cached((unsigned int)DPI_REG->FB[DPI_GetCurrentFB()].ADDR, fbsize);
 
    printk("current fb count is %d\n", DPI_GetCurrentFB());

    if(bpp == 32 && dpi_fb_bpp == 24)
    {
    	for(i = 0;i < w*h; i++)
    	{
            *(unsigned int*)(pvbuf+i*4) = 0xff000000|fbv[i*3]|(fbv[i*3+1]<<8)|(fbv[i*3+2]<<16);
    	}
    }
    else if(bpp == 32 && dpi_fb_bpp == 16)
    {
        unsigned int t;
	unsigned short* fbvt = (unsigned short*)fbv;
        for(i = 0;i < w*h; i++)
    	{
	    t = fbvt[i];
            *(unsigned int*)(pvbuf+i*4) = 0xff000000|((t&0x001F)<<3)|((t&0x07E0)<<5)|((t&0xF800)<<8);
    	}
    }
    else if(bpp == 16 && dpi_fb_bpp == 16)
    {
    	memcpy((void*)pvbuf, (void*)fbv, fbsize);
    }
    else if(bpp == 16 && dpi_fb_bpp == 24)
    {
    	for(i = 0;i < w*h; i++)
    	{
            *(unsigned short*)(pvbuf+i*2) = ((fbv[i*3+0]&0x1F)>>3)|
	                                    ((fbv[i*3+1]&0xFC)<<3)|
					    ((fbv[i*3+2]&0x1F)<<8);
    	}
    }
    else
    {
Ejemplo n.º 16
0
/* register_l2cc_mutex - Negotiate/Disable outer cache shared mutex */
static int register_l2cc_mutex(bool reg)
{
	unsigned long *vaddr = NULL;
	int ret = 0;
	struct smc_param param;
	uintptr_t paddr = 0;

	if ((reg == true) && (tz_outer_cache_mutex != NULL)) {
		dev_err(DEV, "outer cache shared mutex already registered\n");
		return -EINVAL;
	}
	if ((reg == false) && (tz_outer_cache_mutex == NULL))
		return 0;

	mutex_lock(&e_mutex_teez);

	if (reg == false) {
		vaddr = tz_outer_cache_mutex;
		tz_outer_cache_mutex = NULL;
		goto out;
	}

	memset(&param, 0, sizeof(param));
	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
	param.a1 = TEESMC_ST_L2CC_MUTEX_GET_ADDR;
	tee_smc_call(&param);

	if (param.a0 != TEESMC_RETURN_OK) {
		dev_warn(DEV, "no TZ l2cc mutex service supported\n");
		goto out;
	}
	paddr = param.a2;

	vaddr = ioremap_cached(paddr, sizeof(u32));
	if (vaddr == NULL) {
		dev_warn(DEV, "TZ l2cc mutex disabled: ioremap failed\n");
		ret = -ENOMEM;
		goto out;
	}

	if (outer_tz_mutex(vaddr) == false) {
		dev_warn(DEV, "TZ l2cc mutex disabled: outer cache refused\n");
		goto out;
	}

	memset(&param, 0, sizeof(param));
	param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
	param.a1 = TEESMC_ST_L2CC_MUTEX_ENABLE;
	tee_smc_call(&param);

	if (param.a0 != TEESMC_RETURN_OK) {
		dev_warn(DEV, "TZ l2cc mutex disabled: TZ enable failed\n");
		goto out;
	}
	tz_outer_cache_mutex = vaddr;

out:
	if (tz_outer_cache_mutex == NULL) {
		memset(&param, 0, sizeof(param));
		param.a0 = TEESMC32_ST_FASTCALL_L2CC_MUTEX;
		param.a1 = TEESMC_ST_L2CC_MUTEX_DISABLE;
		tee_smc_call(&param);
		outer_tz_mutex(NULL);
		if (vaddr)
			iounmap(vaddr);
		dev_info(DEV, "outer cache shared mutex disabled\n");
	}

	mutex_unlock(&e_mutex_teez);
	dev_dbg(DEV, "teetz outer mutex: ret=%d pa=0x%lX va=0x%p %sabled\n",
		ret, paddr, vaddr, tz_outer_cache_mutex ? "en" : "dis");
	return ret;
}