static int speedy_drv_load (void *buf, size_t size)
{
	gpGenStorageApiAddr_t *pGpGenStorageApi;
	pnandprofileheader_t *bootHeader;
	int ret = 0, i;
	int nand_block_size = 0;
	u32 hibdrv_area_size = 0, hibdrv_area_sect_id = 0;
	//void *tmpbuf = ioremap(0x00000100, 0x80000);
	void *tmpbuf = (void *)SND_LOADER_BASE;

#if 0 // milton 

#if 0
	/* Load to file. for debug */
	ret = speedy_dev_load("/media/sdcardb1/hibdrv.bin", buf, size);
#else
    for (i = 0; i < SPEEDY_SNAPSHOT_NUM; i++)
        savearea_num_check[i] = 0;

	printk("[%s][%d] jiffies=0x%x\n",__func__, __LINE__, jiffies);
    ret = gp_fastbot_binary_read(buf, size / 512);
    if (ret != 0) {
        printk("[%s][%d] gp_fastbot_binary_read error (return: %d)\n",
            __func__, __LINE__, ret);
        return ret;
    }

	printk("[%s][%d] jiffies=0x%x\n",__func__, __LINE__, jiffies);
    for (i = 0; i < SPEEDY_SNAPSHOT_NUM; i++) {
        int bootoffs, snapoffs, bootsize, snapsize;
        /* Get parameter: snapshot image x */
        ret =gp_fastbot_parameter_get(i, &bootoffs, &snapoffs, &bootsize, &snapsize);
        if (ret != 0)
            continue;
        savearea_num_check[i] = 1;
	    savearea[i].bootflag_area = bootoffs * 512;
	    savearea[i].bootflag_size = bootsize * 512;
	    savearea[i].snapshot_area = snapoffs * 512;
	    savearea[i].snapshot_size = snapsize * 512;
        speedy_printf("T-DEBUG: [%s][%d] %d: bootoffs=%d, bootsize=%d, snapoffs=%d, snapsize=%d\n",
            __func__, __LINE__, i, bootoffs, bootsize, snapoffs, snapsize);
    }
	printk("[%s][%d] jiffies=0x%x\n",__func__, __LINE__, jiffies);
#endif

	//get memory for bootloader using
	//get nand block size from sram(bootheader)
	bootHeader =  (pnandprofileheader_t *) ioremap(SRAM_BOOT_HEADER_START, sizeof(pnandprofileheader_t));
	nand_block_size = bootHeader->PagePerBlock*bootHeader->PageSize;
	iounmap(bootHeader);

	//buffer size = nand block size X 2 X 6 + BOOTLOADER_RESERVED_SIZE + NAND_BLOCK SIZE(for nand buffer using)
repeat_alloc:
	reserved_buffer_size = nand_block_size*2*6 + BOOTLOADER_RESERVED_SIZE + nand_block_size;
	ka = gp_chunk_malloc(0, reserved_buffer_size);
	//printk("gp_chunk_malloc return ka=%08x\n", (unsigned int)ka);
	if (ka == NULL) {
		printk("CHUNK_MEM_ALLOC: out of memory!(blocksize =0x%x, size:0x%08x), retry for half size\n", nand_block_size, reserved_buffer_size);
		nand_block_size >>= 1;
		if (nand_block_size < 65536) {
			printk("CHUNK_MEM_ALLOC: out of memory! hibernation fail(blocksize =0x%x, size:0x%08x)\n", nand_block_size, reserved_buffer_size);
			ret = -ENOMEM;
		}
		//@todo : force normal power when out of memory
	}
Exemple #2
0
static int32_t __init
gp_fb_init(
	void
)
{
	int32_t ret = 0;
	struct gp_fb_info *info;

	fbinfo = framebuffer_alloc(sizeof(struct gp_fb_info), NULL);
	if (!fbinfo)
		return -ENOMEM;

	info = fbinfo->par;

	/*Get panel resolution & allocate frame buffer (double buffer) */
	disp1_get_panel_res(&info->panelRes);
	info->fbsize = info->panelRes.width * info->panelRes.height * 4 * 2;
	info->fbmem = gp_chunk_malloc(current->tgid, info->fbsize);
	if (info->fbmem == NULL) {
		ret = -ENXIO;
		goto dealloc_fb;
	}

	memset(info->fbmem, 0x00, info->fbsize);

	fbinfo->fbops = &gp_fb_ops;
	fbinfo->flags = FBINFO_FLAG_DEFAULT;
	fbinfo->pseudo_palette = NULL;
	fbinfo->screen_base = info->fbmem;
	fbinfo->screen_size = 0;

	/* Resolution */
	fbinfo->var.xres = info->panelRes.width;
	fbinfo->var.yres = info->panelRes.height;
	fbinfo->var.xres_virtual = fbinfo->var.xres;
	fbinfo->var.yres_virtual = fbinfo->var.yres * 2;

	/* Timing */
	fbinfo->var.left_margin = 0;
	fbinfo->var.right_margin = 0;
	fbinfo->var.upper_margin = 0;
	fbinfo->var.lower_margin = 0;
	fbinfo->var.hsync_len = 0;
	fbinfo->var.vsync_len = 0;

	/* Color RGBA8888 */
	fbinfo->var.bits_per_pixel = 32;
	fbinfo->var.grayscale = 0;
	fbinfo->var.transp.offset	 = 0;
	fbinfo->var.red.offset		= 24;
	fbinfo->var.green.offset	= 16;
	fbinfo->var.blue.offset	= 8;
	fbinfo->var.transp.length	= 8;
	fbinfo->var.red.length		= 8;
	fbinfo->var.green.length	= 8;
	fbinfo->var.blue.length	= 8;
    fbinfo->var.nonstd	= 0;

	fbinfo->var.activate = FB_ACTIVATE_FORCE;
	fbinfo->var.accel_flags = 0;
	fbinfo->var.vmode = FB_VMODE_NONINTERLACED;

	/* fixed info */
	strcpy(fbinfo->fix.id, driver_name);
	fbinfo->fix.mmio_start  = 0x93000000;
	fbinfo->fix.mmio_len    = 0x1000;
	fbinfo->fix.type        = FB_TYPE_PACKED_PIXELS;
	fbinfo->fix.type_aux	= 0;
	fbinfo->fix.visual      = FB_VISUAL_TRUECOLOR;
	fbinfo->fix.xpanstep	= 0;
	fbinfo->fix.ypanstep	= 1;
	fbinfo->fix.ywrapstep	= 0;
	fbinfo->fix.accel	    = FB_ACCEL_NONE;
	fbinfo->fix.smem_start  = gp_chunk_pa(info->fbmem);
	fbinfo->fix.smem_len    = info->fbsize;
	fbinfo->fix.line_length = (fbinfo->var.xres_virtual * fbinfo->var.bits_per_pixel) / 8;

	gp_fb_activate(fbinfo);

	ret = register_framebuffer(fbinfo);
	if (ret < 0) {
		printk(KERN_ERR "Failed to register framebuffer device: %d\n", ret);
		goto release_fbmem;
	}
	printk(KERN_INFO "fb%d: %s frame buffer device\n",
		fbinfo->node, fbinfo->fix.id);

	return 0;

release_fbmem:
	gp_chunk_free(info->fbmem);

dealloc_fb:
	framebuffer_release(fbinfo);

	return ret;
}
/**
 * @brief   Chunkmem device ioctl function
 */
static long chunkmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	chunk_block_t block;
	void *ka;           /* kernel_addr */
	unsigned int va;    /* user_addr */
	unsigned int pa;    /* phy_addr*/
	long ret = 0;
	unsigned int offset = 0;

	switch (cmd) {
	case CHUNK_MEM_ALLOC:
	case CHUNK_MEM_SHARE:
	case CHUNK_MEM_MMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* alloc|share|mmap memory */
			if (cmd == CHUNK_MEM_MMAP) {
				DIAG_VERB("CHUNK_MEM_MMAP:\n");
				ka = gp_chunk_va(block.phy_addr);
				if (ka == NULL) {
					DIAG_ERROR("CHUNK_MEM_MMAP: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
					ret = -EFAULT; /* mmap fail */
					break;
				}
				/* page alignment */
				offset = block.phy_addr & ~PAGE_MASK;
				ka = (void *)((unsigned long)ka & PAGE_MASK);
				DIAG_VERB("CHUNK_MEM_MMAP: phy_addr                  = %08X\n", block.phy_addr);
				DIAG_VERB("CHUNK_MEM_MMAP: size                      = %08X\n", block.size);
				DIAG_VERB("CHUNK_MEM_MMAP: ka                        = %08X\n", (unsigned int)ka);
				DIAG_VERB("CHUNK_MEM_MMAP: offset                    = %08X\n", offset);
				DIAG_VERB("CHUNK_MEM_MMAP: PAGE_ALIGN(size + offset) = %08X\n", PAGE_ALIGN(block.size + offset));
			}
			else {
				if (cmd == CHUNK_MEM_ALLOC) {
					DIAG_VERB("CHUNK_MEM_ALLOC:\n");
					DIAG_VERB("size = %08X (%d)\n", block.size, block.size);
					ka = gp_chunk_malloc(current->tgid, block.size);
					DIAG_VERB("gp_chunk_malloc return ka=%08X\n", ka);
					if (ka == NULL) {
						DIAG_ERROR("CHUNK_MEM_ALLOC: out of memory! (%s:%08X)\n", current->comm, block.size);
						dlMalloc_Status(NULL);
						ret = -ENOMEM;
						break;
					}
					block.phy_addr = gp_chunk_pa(ka);
				}
				else { /* CHUNK_MEM_SHARE */
					DIAG_VERB("CHUNK_MEM_SHARE:\n");
					ka = gp_chunk_va(block.phy_addr);
					if ((ka == NULL) || (dlShare(ka) == 0)) {
						DIAG_ERROR("CHUNK_MEM_SHARE: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
						ret = -EFAULT; /* share fail */
						break;
					}
				}
				block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; /* actual allocated size */
				DIAG_VERB("actual size = %08X (%d)\n", block.size, block.size);
				DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			}

			/* mmap to userspace */
			down(&chunkmem->sem);
			down_write(&current->mm->mmap_sem);
			chunkmem->mmap_enable = 1; /* enable mmap in CHUNK_MEM_ALLOC */
			va = do_mmap_pgoff(
				file, 0, PAGE_ALIGN(block.size + offset),
				PROT_READ|PROT_WRITE,
				MAP_SHARED,
				(ka - chunkmem->vbase) >> PAGE_SHIFT);
			chunkmem->mmap_enable = 0; /* disable it */
			up_write(&current->mm->mmap_sem);
			up(&chunkmem->sem);
			if (IS_ERR_VALUE(va)) {
				ret = va; /* errcode */
				DIAG_ERROR("%s: chunkmem mmap fail(%d)! (%s)\n",
						   (cmd == CHUNK_MEM_MMAP) ? "CHUNK_MEM_MMAP" : ((cmd == CHUNK_MEM_ALLOC) ? "CHUNK_MEM_ALLOC" : "CHUNK_MEM_SHARE"),
						   ret, current->comm);
				break;
			}
			va += offset;
			block.addr = (void *)va;
			DIAG_VERB("va = %08X\n\n", va);

			if (copy_to_user((void __user*)arg, &block, sizeof(block))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_FREE:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* translate user_va to ka */
			DIAG_VERB("CHUNK_MEM_FREE:\n");
			DIAG_VERB("va = %08X\n", (unsigned int)block.addr);
			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa == 0) {
				DIAG_ERROR("CHUNK_MEM_FREE: chunkmem user_va_to_pa fail! (%s:%08X)\n", current->comm, block.addr);
				ret = -EFAULT;
				break;
			}
			DIAG_VERB("pa = %08X\n", pa);
			ka = gp_chunk_va(pa);                  /* phy_addr to kernel_addr */
			if (ka == NULL) {
				DIAG_ERROR("CHUNK_MEM_FREE: not a chunkmem address! (%s:%08X)\n", current->comm, pa);
				ret = -EFAULT;
				break;
			}
			block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK;
			DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			DIAG_VERB("actual size = %08X (%d)\n\n", block.size, block.size);

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, (unsigned int)block.addr, block.size);
			up_write(&current->mm->mmap_sem);

			/* free memory */
			gp_chunk_free(ka);
#if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF)
			dlMalloc_Status(NULL);
#endif
		}
		break;

	case CHUNK_MEM_INFO:
		{
			chunk_info_t info;

            if (copy_from_user(&info, (void __user*)arg, sizeof(info))) {
                ret = -EFAULT;
                break;
            }

            if (info.pid == (unsigned int)(-1)) {
                info.pid = current->tgid;
            }

#if CHUNK_SUSPEND_TEST
			if (info.pid) {
				dlMalloc_Status(NULL);
			}
			else {
				gp_chunk_suspend(my_save_data);
				memset(chunkmem->vbase, 0, chunkmem->size);
				/* restore */
				while (blocks != NULL) {
					data_block_t *block = blocks;
					blocks = block->next;
					DIAG_DEBUG("restore data: %p %08X\n", block->addr, block->size);
					memcpy(block->addr, &block->data, block->size);
					kfree(block);
				}
			}
#else
			down(&chunkmem->sem);
			dlMalloc_Status((mem_info_t *)&info);
			up(&chunkmem->sem);
#endif
			if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_VA2PA:
		{
			ret = -EFAULT;
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				break;
			}

			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa != 0) {
				ka = gp_chunk_va(pa);             /* phy_addr to kernel_addr */
				if (ka != NULL) {
					block.phy_addr = pa;
					if (copy_to_user((void __user*)arg, &block, sizeof(block)) == 0) {
						ret = 0;
					}
				}
			}
		}
		break;

	case CHUNK_MEM_MUNMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			va = (unsigned int)block.addr;
			/* page alignment */
			offset = va & ~PAGE_MASK;
			va &= PAGE_MASK;

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, va, PAGE_ALIGN(block.size + offset));
			up_write(&current->mm->mmap_sem);
		}
		break;
	
	case CHUNK_MEM_FREEALL:
		gp_chunk_free_all((unsigned int)arg);
		printk(KERN_WARNING "CHUNK_MEM_FREEALL(%ld)\n", arg);
		break;
	
	case CHUNK_MEM_DUMP:
		dlMalloc_Status(0);
		break;
	
	default:
		ret = -ENOTTY; /* Inappropriate ioctl for device */
		break;
	}

	return ret;
}