Esempio n. 1
0
static int LUKS_check_device_size(struct crypt_device *ctx, const struct luks_phdr *hdr, int falloc)
{
	struct device *device = crypt_metadata_device(ctx);
	uint64_t dev_sectors, hdr_sectors;

	if (!hdr->keyBytes)
		return -EINVAL;

	if (device_size(device, &dev_sectors)) {
		log_dbg("Cannot get device size for device %s.", device_path(device));
		return -EIO;
	}

	dev_sectors >>= SECTOR_SHIFT;
	hdr_sectors = LUKS_device_sectors(hdr);
	log_dbg("Key length %u, device size %" PRIu64 " sectors, header size %"
		PRIu64 " sectors.", hdr->keyBytes, dev_sectors, hdr_sectors);

	if (hdr_sectors > dev_sectors) {
		/* If it is header file, increase its size */
		if (falloc && !device_fallocate(device, hdr_sectors << SECTOR_SHIFT))
			return 0;

		log_err(ctx, _("Device %s is too small. (LUKS1 requires at least %" PRIu64 " bytes.)"),
			device_path(device), hdr_sectors * SECTOR_SIZE);
		return -EINVAL;
	}

	return 0;
}
Esempio n. 2
0
static int LUKS_check_device_size(struct crypt_device *ctx, size_t keyLength)
{
	struct device *device = crypt_metadata_device(ctx);
	uint64_t dev_sectors, hdr_sectors;

	if (!keyLength)
		return -EINVAL;

	if(device_size(device, &dev_sectors)) {
		log_dbg("Cannot get device size for device %s.", device_path(device));
		return -EIO;
	}

	dev_sectors >>= SECTOR_SHIFT;
	hdr_sectors = LUKS_device_sectors(keyLength);
	log_dbg("Key length %zu, device size %" PRIu64 " sectors, header size %"
		PRIu64 " sectors.",keyLength, dev_sectors, hdr_sectors);

	if (hdr_sectors > dev_sectors) {
		log_err(ctx, _("Device %s is too small. (LUKS requires at least %" PRIu64 " bytes.)\n"),
			device_path(device), hdr_sectors * SECTOR_SIZE);
		return -EINVAL;
	}

	return 0;
}
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
{
	struct mtd_info *mtd = NULL;
	struct cfi_private *cfi;

	/* First probe the map to see if we have CFI stuff there. */
	cfi = genprobe_ident_chips(map, cp);

	if (!cfi)
		return NULL;

	map->fldrv_priv = cfi;
	/* OK we liked it. Now find a driver for the command set it talks */

	mtd = check_cmd_set(map, 1); /* First the primary cmdset */
	if (!mtd)
		mtd = check_cmd_set(map, 0); /* Then the secondary */

	if (mtd) {
		if (device_size(mtd) > map->size) {
			printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
			       (unsigned long)device_size(mtd) >> 10,
			       (unsigned long)map->size >> 10);
			mtd->num_eraseblocks = map->size / mtd->erasesize;
			/* Since map->size is of type unsigned long, the size has to be
			   < 4GB, therefore assign mtd->size */
			mtd->size = map->size;
		}
		return mtd;
	}
Esempio n. 4
0
unsigned int smt_read_bbt(char *page)
{
	int i=0;
	int iSizeOfBlock,iSizeOfFlash;
	int iNumOfBlock;
	int isBad;
	int iCS = 0;
	int iNumOfBad = 0;
	unsigned int len = 0;
	uint32_t bbtSize;

	iSizeOfFlash = device_size(&(gNandInfo[iCS]->mtd));
	iSizeOfBlock =gNandInfo[iCS]->mtd.erasesize;
	bbtSize = brcmnand_get_bbt_size(&(gNandInfo[iCS]->mtd)); 

	iNumOfBlock = (iSizeOfFlash-bbtSize)/iSizeOfBlock;

	for(i=0;i<iNumOfBlock;i++)
	{

		isBad = brcmnand_smt_isbadBlcok(&(gNandInfo[iCS]->mtd), (loff_t)( i*iSizeOfBlock));

		if(isBad==1)
		{
			len+=sprintf(page+len,"[%4d] 0x%08x Is BAD \n"
							,i
							,i*iSizeOfBlock);
			iNumOfBad++;
		}
	}

	len+=sprintf(page+len,"Total Num of Bad Block is %d over %d\n",iNumOfBad,iNumOfBlock);

	return len;
}
Esempio n. 5
0
// SAMSUNG_MODE - kclee (2011.05.25) for gethering total bad block.
unsigned int smt_get_bbt()
{
	int i=0;
	int iSizeOfBlock,iSizeOfFlash;
	int iNumOfBlock;
	int isBad;
	int iCS = 0;
	int iNumOfBad = 0;
	uint32_t bbtSize;

	iSizeOfFlash = device_size(&(gNandInfo[iCS]->mtd));
	iSizeOfBlock =gNandInfo[iCS]->mtd.erasesize;
	bbtSize = brcmnand_get_bbt_size(&(gNandInfo[iCS]->mtd)); 

	iNumOfBlock = (iSizeOfFlash-bbtSize)/iSizeOfBlock;

	for(i=0;i<iNumOfBlock;i++)
	{

		isBad = brcmnand_smt_isbadBlcok(&(gNandInfo[iCS]->mtd), (loff_t)( i*iSizeOfBlock));

		if(isBad==1)
		{
			iNumOfBad++;
		}
	}

	return iNumOfBad;
}
static int LUKS2_check_device_size(struct crypt_device *cd, struct device *device,
				   uint64_t hdr_size, int falloc)
{
	uint64_t dev_size;

	if (device_size(device, &dev_size)) {
		log_dbg("Cannot get device size for device %s.", device_path(device));
		return -EIO;
	}

	log_dbg("Device size %" PRIu64 ", header size %"
		PRIu64 ".", dev_size, hdr_size);

	if (hdr_size > dev_size) {
		/* If it is header file, increase its size */
		if (falloc && !device_fallocate(device, hdr_size))
			return 0;

		log_err(cd, _("Device %s is too small. (LUKS2 requires at least %" PRIu64 " bytes.)"),
			device_path(device), hdr_size);
		return -EINVAL;
	}

	return 0;
}
Esempio n. 7
0
/*
 * Since v3.3 controller allocate the BBT for each flash device, we cannot use the entire flash, but 
 * have to reserve the end of the flash for the BBT, or ubiformat will corrupt the BBT.
 */
static void brcmnand_add_mtd_partitions(
			struct mtd_info *mtd,
		       struct mtd_partition *parts,
		       int nr_parts)
{
	uint64_t devSize = device_size(mtd);
	uint32_t bbtSize = brcmnand_get_bbt_size(mtd); 
	
	int i;

	//add_mtd_device(mtd);
	for (i=0; i<nr_parts; i++) {
		/* Adjust only partitions that specify full MTD size */
		if (MTDPART_SIZ_FULL == parts[i].size) {
			parts[i].size = devSize - bbtSize;
			printk(KERN_WARNING "Adjust partition %s size from entire device to %llx to avoid overlap with BBT reserved space\n",
				parts[i].name, parts[i].size);
			
		}
		/* Adjust partitions that overlap the BBT reserved area at the end of the flash */
		else if ((parts[i].offset + parts[i].size) > (devSize - bbtSize)) {
			uint64_t adjSize = devSize - bbtSize - parts[i].offset;
			
			printk(KERN_WARNING "Adjust partition %s size from %llx to %llx to avoid overlap with BBT reserved space\n",
				parts[i].name, parts[i].size, adjSize);
			parts[i].size = adjSize;
		}
	}
	add_mtd_partitions(mtd, parts, nr_parts);
}
Esempio n. 8
0
	CUGIP_DECL_HYBRID int
	size()
	{
		#if __CUDA_ARCH__
			return device_size();
		#else
			return host_size();
		#endif
	}
Esempio n. 9
0
int btrfs_prepare_device(int fd, char *file, int zero_end, u64 *block_count_ret,
			 int *mixed)
{
	u64 block_count;
	u64 bytenr;
	struct stat st;
	int i, ret;

	ret = fstat(fd, &st);
	if (ret < 0) {
		fprintf(stderr, "unable to stat %s\n", file);
		exit(1);
	}

	block_count = device_size(fd, &st);
	if (block_count == 0) {
		fprintf(stderr, "unable to find %s size\n", file);
		exit(1);
	}
	zero_end = 1;

	if (block_count < 1024 * 1024 * 1024 && !(*mixed)) {
		printf("SMALL VOLUME: forcing mixed metadata/data groups\n");
		*mixed = 1;
	}

	/*
	 * We intentionally ignore errors from the discard ioctl.  It is
	 * not necessary for the mkfs functionality but just an optimization.
	 */
	discard_blocks(fd, 0, block_count);

	ret = zero_dev_start(fd);
	if (ret) {
		fprintf(stderr, "failed to zero device start %d\n", ret);
		exit(1);
	}

	for (i = 0 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
		if (bytenr >= block_count)
			break;
		zero_blocks(fd, bytenr, BTRFS_SUPER_INFO_SIZE);
	}

	if (zero_end) {
		ret = zero_dev_end(fd, block_count);
		if (ret) {
			fprintf(stderr, "failed to zero device end %d\n", ret);
			exit(1);
		}
	}
	*block_count_ret = block_count;
	return 0;
}
Esempio n. 10
0
static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;

	switch (orig) {
	case SEEK_SET:
		break;
	case SEEK_CUR:
		offset += file->f_pos;
		break;
	case SEEK_END:
		offset += device_size(mtd);
		break;
	default:
		return -EINVAL;
	}

	if (offset >= 0 && offset <= device_size(mtd))
		return file->f_pos = offset;

	return -EINVAL;
}
Esempio n. 11
0
static void brcmnand_add_mtd_device(struct mtd_info *mtd, int csi)
{
#ifdef CONFIG_TIVO_YUNDO
	add_mtd_partitions(mtd,nand_mtd,NUMBER_OF_PART);
	smt_make_SkipTable(mtd,nand_mtd,NUMBER_OF_PART);  // Skip Alg. 2011-03-16 Peter Kang
#else
	uint64_t devSize = device_size(mtd);
	uint32_t bbtSize = brcmnand_get_bbt_size(mtd);

	//add_mtd_device(mtd);
	single_partition_map[0].size = devSize - bbtSize;
	sprintf(single_partition_map[0].name, "entire_device%02d", csi);
	add_mtd_partitions(mtd, single_partition_map, 1);
#endif
}
Esempio n. 12
0
int btrfs_prepare_device(int fd, char *file, int zero_end, u64 *block_count_ret)
{
	u64 block_count;
	u64 bytenr;
	struct stat st;
	int i, ret;

	ret = fstat(fd, &st);
	if (ret < 0) {
		fprintf(stderr, "unable to stat %s\n", file);
		exit(1);
	}

	block_count = device_size(fd, &st);
	if (block_count == 0) {
		fprintf(stderr, "unable to find %s size\n", file);
		exit(1);
	}
	zero_end = 1;

	if (block_count < 256 * 1024 * 1024) {
		fprintf(stderr, "device %s is too small "
		        "(must be at least 256 MB)\n", file);
		exit(1);
	}
	ret = zero_dev_start(fd);
	if (ret) {
		fprintf(stderr, "failed to zero device start %d\n", ret);
		exit(1);
	}

	for (i = 0 ; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
		if (bytenr >= block_count)
			break;
		zero_blocks(fd, bytenr, BTRFS_SUPER_INFO_SIZE);
	}

	if (zero_end) {
		ret = zero_dev_end(fd, block_count);
		if (ret) {
			fprintf(stderr, "failed to zero device end %d\n", ret);
			exit(1);
		}
	}
	*block_count_ret = block_count;
	return 0;
}
Esempio n. 13
0
unsigned int smt_read_mtdskiptbl(char *page)
{
	int i=0;
	int iSizeOfBlock,iSizeOfFlash;
	int iNumOfBlock;
	int iCS = 0;
	unsigned int len = 0;
	uint32_t bbtSize;

	iSizeOfFlash = device_size(&(gNandInfo[iCS]->mtd));
	iSizeOfBlock =gNandInfo[iCS]->mtd.erasesize;
	bbtSize = brcmnand_get_bbt_size(&(gNandInfo[iCS]->mtd)); 

	iNumOfBlock = (iSizeOfFlash-bbtSize)/iSizeOfBlock;

	for(i=0;i<iNumOfBlock;i++)
	{
		if(i==0)
		{
			if(gSkipTBL[0].iOffset !=0x00) 
				goto SHOW_TBL;
		}
		else
		{
//			if(  (gSkipTBL[i].iOffset !=0x00) && (gSkipTBL[i].iOffset!=gSkipTBL[i-1].iOffset) )
			if( (gSkipTBL[i].iOffset!=gSkipTBL[i-1].iOffset) ||(gSkipTBL[i].iOffset==USED_SKIP_BLOCK) )
				 goto SHOW_TBL;
		}

		continue;
		
SHOW_TBL:
		{
			len+=sprintf(page+len,"[%4d] (0x%08x) 0x%08x -> 0x%08x \n"
							,i
							,gSkipTBL[i].iOffset
							,i*iSizeOfBlock
							,(i*iSizeOfBlock+gSkipTBL[i].iOffset));
		}

	}
	return len;
}
Esempio n. 14
0
static struct inode *open_ext4_db(char *file)
{
	int fd;
	struct ext4_extent_header *ihdr;
	struct inode *inode;
	struct db_handle *db;
	struct block_device *bdev;

	fd = device_open(file);

	bdev = bdev_alloc(fd, 12);
	inode = inode_alloc(bdev->bd_super);

	/* For testing purpose, those data is hard-coded. */
	inode->i_writeback = inode_sb_writeback;
	memset(inode->i_uuid, 0xCC, sizeof(inode->i_uuid));
	inode->i_inum = 45;
	inode->i_csum = ext4_crc32c(~0, inode->i_uuid, sizeof(inode->i_uuid));
	inode->i_csum =
	    ext4_crc32c(inode->i_csum, &inode->i_inum, sizeof(inode->i_inum));
	inode->i_csum = ext4_crc32c(inode->i_csum, &inode->i_generation,
				    sizeof(inode->i_generation));

	if (!device_size(bdev))
		exit(EXIT_FAILURE);

	db = db_open(inode->i_sb);
	ihdr = ext4_ext_inode_hdr(inode);
	memcpy(ihdr, db->db_header->db_tree_base, sizeof(inode->i_data));

	if (ihdr->eh_magic != EXT4_EXT_MAGIC) {
		ext4_ext_init_i_blocks(ihdr);
		inode->i_data_dirty = 1;
	}

	inode->i_db = db;

	return inode;
}
Esempio n. 15
0
static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;
	char *kbuf;
	size_t retlen;
	size_t total_retlen=0;
	int ret=0;
	int len;

	DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");

	if (*ppos == device_size(mtd))
		return -ENOSPC;

	if (*ppos + count > device_size(mtd))
		count = device_size(mtd) - *ppos;

	if (!count)
		return 0;

	if (count > MAX_KMALLOC_SIZE)
		kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
	else
		kbuf=kmalloc(count, GFP_KERNEL);

	if (!kbuf)
		return -ENOMEM;

	while (count) {

		if (count > MAX_KMALLOC_SIZE)
			len = MAX_KMALLOC_SIZE;
		else
			len = count;

		if (copy_from_user(kbuf, buf, len)) {
			kfree(kbuf);
			return -EFAULT;
		}

		switch (mfi->mode) {
		case MTD_MODE_OTP_FACTORY:
			ret = -EROFS;
			break;
		case MTD_MODE_OTP_USER:
			if (!mtd->write_user_prot_reg) {
				ret = -EOPNOTSUPP;
				break;
			}
			ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
			break;

		case MTD_MODE_RAW:
		{
			struct mtd_oob_ops ops;

			ops.mode = MTD_OOB_RAW;
			ops.datbuf = kbuf;
			ops.oobbuf = NULL;
			ops.len = len;

			ret = mtd->write_oob(mtd, *ppos, &ops);
			retlen = ops.retlen;
			break;
		}
#if defined(HUMAX_PLATFORM_BASE)
		case MTD_MODE_OTP_SET_PASSWORD:
		case MTD_MODE_OTP_PROGRAM_PASSWORD:			
			if (!mtd->write_password_prot_reg) {
				ret = -EOPNOTSUPP;
				break;
			}
			ret = mtd->write_password_prot_reg(mfi->mode, mtd, *ppos, len, &retlen, kbuf);
			break;
#endif			

		default:
			ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
		}
		if (!ret) {
			*ppos += retlen;
			total_retlen += retlen;
			count -= retlen;
			buf += retlen;
		}
		else {
			kfree(kbuf);
			return ret;
		}
	}

	kfree(kbuf);
	return total_retlen;
} /* mtd_write */
Esempio n. 16
0
static int __devinit brcmnanddrv_probe(struct platform_device *pdev)
{
	struct brcmnand_platform_data *cfg = pdev->dev.platform_data;
	//struct flash_platform_data *pdata = pdev->dev.platform_data;
	//struct resource *res = pdev->resource;
	//unsigned long size = res->end - res->start + 1;
	int err = 0;

	gPageBuffer = NULL;
	info = kmalloc(sizeof(struct brcmnand_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	memset(info, 0, sizeof(struct brcmnand_info));

#ifndef CONFIG_MTD_BRCMNAND_EDU
	gPageBuffer = kmalloc(sizeof(struct nand_buffers), GFP_KERNEL);
	info->brcmnand.buffers = (struct nand_buffers*) gPageBuffer;
#else
	/* Align on 32B boundary for efficient DMA transfer */
	gPageBuffer = kmalloc(sizeof(struct nand_buffers) + 31, GFP_DMA);
	info->brcmnand.buffers = (struct nand_buffers*) (((unsigned int) gPageBuffer+31) & (~31));
#endif
	if (!info->brcmnand.buffers) {
		kfree(info);
		return -ENOMEM;
	}

	memset(info->brcmnand.buffers, 0, sizeof(struct nand_buffers));

	memset(gNandCS, 0, MAX_NAND_CS);
	gNumNand = 1;
	gNandCS[0] = cfg->chip_select;

	info->brcmnand.numchips = 1; // For now, we only support 1 chip
	info->brcmnand.chip_shift = 0; // Only 1 chip
	//info->brcmnand.regs = pdev->resource[0].start;
	info->brcmnand.priv = &info->mtd;

	//info->brcmnand.mmcontrol = NULL;  // THT: Sync Burst Read TBD.  pdata->mmcontrol;

	info->mtd.name = dev_name(&pdev->dev);
	info->mtd.priv = &info->brcmnand;
	info->mtd.owner = THIS_MODULE;

	/* Enable the following for a flash based bad block table */
	info->brcmnand.options |= NAND_USE_FLASH_BBT;
	

//printk("brcmnand_scan\n");
	if (brcmnand_scan(&info->mtd, gNumNand)) {
		err = -ENXIO;
		goto out_free_info;
	}

	printk("	numchips=%d, size=%llx\n", info->brcmnand.numchips, device_size(&(info->mtd)));

#ifdef CONFIG_MTD_PARTITIONS
	/* allow cmdlineparts to override the default map */
	err = parse_mtd_partitions(&info->mtd, part_probe_types,
		&info->parts, 0);
	if (err > 0) {
		info->nr_parts = err;
	} else {
		info->parts = cfg->nr_parts ? cfg->parts : NULL;
		info->nr_parts = cfg->nr_parts;
	}

	if (info->nr_parts)
		add_mtd_partitions(&info->mtd, info->parts, info->nr_parts);
	else
		add_mtd_device(&info->mtd);
#else
	add_mtd_device(&info->mtd);
#endif

//printk("	dev_set_drvdata\n");	
	dev_set_drvdata(&pdev->dev, info);
//printk("<-- brcmnanddrv_probe\n");

	return 0;


out_free_info:

	if (gPageBuffer)
		kfree(gPageBuffer);
	kfree(info);
	return err;
}
Esempio n. 17
0
static int VERITY_create_or_verify_hash(struct crypt_device *cd,
	int verify,
	int version,
	const char *hash_name,
	struct device *hash_device,
	struct device *data_device,
	size_t hash_block_size,
	size_t data_block_size,
	off_t data_blocks,
	off_t hash_position,
	char *root_hash,
	size_t digest_size,
	const char *salt,
	size_t salt_size)
{
	char calculated_digest[digest_size];
	FILE *data_file = NULL;
	FILE *hash_file = NULL, *hash_file_2;
	off_t hash_level_block[VERITY_MAX_LEVELS];
	off_t hash_level_size[VERITY_MAX_LEVELS];
	off_t data_file_blocks, s;
	size_t hash_per_block_bits;
	off_t data_device_size = 0, hash_device_size = 0;
	uint64_t dev_size;
	int levels, i, r;

    log_dbg("VERITY_create_or_verify_hash");
    log_dbg("verify: %d", verify);
    log_dbg("hash_name: %s", hash_name);
    log_dbg("data_device: %s", device_path(data_device));
    log_dbg("hash_block_size: %u", hash_block_size);
    log_dbg("data_block_size: %u", data_block_size);
    log_dbg("data_blocks: %lu", data_blocks);
    log_dbg("hash_device: %s", device_path(hash_device));
    log_dbg("offset: %lu", hash_position);

	log_dbg("Hash %s %s, data device %s, data blocks %lu" 
		", hash_device %s, offset %lu.",
		verify ? "verification" : "creation", hash_name,
		device_path(data_device), data_blocks,
		device_path(hash_device), hash_position);

	if (data_blocks < 0 || hash_position < 0) {
		log_err(cd, "Invalid size parameters for verity device.\n");
		return -EINVAL;
	}

	if (!data_blocks) {
		r = device_size(data_device, &dev_size);
		if (r < 0)
			return r;

		data_file_blocks = dev_size / data_block_size;
	} else
		data_file_blocks = data_blocks;

	if (mult_overflow(&data_device_size, data_blocks, data_block_size)) {
		log_err(cd, "Device offset overflow.\n");
		return -EINVAL;
	}

	hash_per_block_bits = get_bits_down(hash_block_size / digest_size);
	if (!hash_per_block_bits)
		return -EINVAL;

	levels = 0;
	if (data_file_blocks) {
		while (hash_per_block_bits * levels < 64 &&
		       (data_file_blocks - 1) >> (hash_per_block_bits * levels))
			levels++;
	}
	log_dbg("Using %d hash levels.", levels);

	if (levels > VERITY_MAX_LEVELS) {
		log_err(cd, "Too many tree levels for verity volume.\n");
		return -EINVAL;
	}

	for (i = levels - 1; i >= 0; i--) {
		hash_level_block[i] = hash_position;
		// verity position of block data_file_blocks at level i
		s = (data_file_blocks + ((off_t)1 << ((i + 1) * hash_per_block_bits)) - 1) >> ((i + 1) * hash_per_block_bits);
		hash_level_size[i] = s;
		if ((hash_position + s) < hash_position ||
		    (hash_position + s) < 0) {
			log_err(cd, "Device offset overflow.\n");
			return -EINVAL;
		}
		hash_position += s;
	}

	if (mult_overflow(&hash_device_size, hash_position, hash_block_size)) {
		log_err(cd, "Device offset overflow.\n");
		return -EINVAL;
	}

	//log_dbg("Data device size required: %" PRIu64 " bytes.",
	//	data_device_size);
	log_dbg("Data device size required: %lu bytes.",
		data_device_size);
	data_file = fopen(device_path(data_device), "r");
	if (!data_file) {
		log_err(cd, "Cannot open device %s.\n",
			device_path(data_device)
		);
		r = -EIO;
		goto out;
	}

	//log_dbg("Hash device size required: %" PRIu64 " bytes.",
    //		hash_device_size);
	log_dbg("Hash device size required: %lu bytes.",
		hash_device_size);
	hash_file = fopen(device_path(hash_device), verify ? "r" : "r+");
	if (!hash_file) {
		log_err(cd, "Cannot open device %s.\n",
			device_path(hash_device));
		r = -EIO;
		goto out;
	}

	memset(calculated_digest, 0, digest_size);

	for (i = 0; i < levels; i++) {
		if (!i) {
			r = create_or_verify(cd, data_file, hash_file,
						    0, data_block_size,
						    hash_level_block[i], hash_block_size,
						    data_file_blocks, version, hash_name, verify,
						    calculated_digest, digest_size, salt, salt_size);
			if (r)
				goto out;
		} else {
			hash_file_2 = fopen(device_path(hash_device), "r");
			if (!hash_file_2) {
				log_err(cd, "Cannot open device %s.\n",
					device_path(hash_device));
				r = -EIO;
				goto out;
			}
			r = create_or_verify(cd, hash_file_2, hash_file,
						    hash_level_block[i - 1], hash_block_size,
						    hash_level_block[i], hash_block_size,
						    hash_level_size[i - 1], version, hash_name, verify,
						    calculated_digest, digest_size, salt, salt_size);
			fclose(hash_file_2);
			if (r)
				goto out;
		}
	}

	if (levels)
		r = create_or_verify(cd, hash_file, NULL,
					    hash_level_block[levels - 1], hash_block_size,
					    0, hash_block_size,
					    1, version, hash_name, verify,
					    calculated_digest, digest_size, salt, salt_size);
	else
		r = create_or_verify(cd, data_file, NULL,
					    0, data_block_size,
					    0, hash_block_size,
					    data_file_blocks, version, hash_name, verify,
					    calculated_digest, digest_size, salt, salt_size);
out:
	if (verify) {
		if (r)
			log_err(cd, "Verification of data area failed.\n");
		else {
			log_dbg("Verification of data area succeeded.");
			r = memcmp(root_hash, calculated_digest, digest_size) ? -EPERM : 0;
			if (r)
				log_err(cd, "Verification of root hash failed.\n");
			else
				log_dbg("Verification of root hash succeeded.");
		}
	} else {
		if (r == -EIO)
			log_err(cd, "Input/output error while creating hash area.\n");
		else if (r)
			log_err(cd, "Creation of hash area failed.\n");
		else {
			fsync(fileno(hash_file));
			memcpy(root_hash, calculated_digest, digest_size);
		}
	}

	if (data_file)
		fclose(data_file);
	if (hash_file)
		fclose(hash_file);
	return r;
}
Esempio n. 18
0
static void smt_make_SkipTable(struct mtd_info *master,const struct mtd_partition *parts,int nbparts)
{
	int i,j,k;
	int iSizeOfBlock,iSizeOfFlash;
	int iNumOfBlock;
	int isBad;	
	int iStartblock,iEndblock;
	uint32_t bbtSize;
	char sSkipinfo[32];
	int isDefaultOption =0;

	if(gSkipFlag!=0)
	{
		printk("[SKT] Already Make Skip Table !!\n");
		return;
	}


	iSizeOfFlash = device_size(master);
	iSizeOfBlock =master->erasesize;

	iNumOfBlock = iSizeOfFlash/iSizeOfBlock;

	bbtSize = brcmnand_get_bbt_size(master); 
	
	gSkipTBL = kmalloc(iNumOfBlock* sizeof(struct smt_bbt_info), GFP_KERNEL);

	if(gSkipTBL==NULL)
	{
		printk("[SKT] Fail on Kmalloc for gSkipTBL !\n");
		return;
	}
	else
	{
		memset(gSkipTBL,0x00,iNumOfBlock* sizeof(struct smt_bbt_info));
	}
	
	

	sprintf(sSkipinfo,"mtd.skip");
	if(!strstr(arcs_cmdline,sSkipinfo))
	{
		isDefaultOption = 1;
	}
	
	printk("[SKT] Make Skip Table(%d) for BBM  w/ %s option\n",iNumOfBlock,(isDefaultOption==1)?"Default":"Input");		
	for(i=0;i<nbparts;i++)
	{
		if(isDefaultOption)
		{
		/* rootfs0 and rootfs1 */
		if( (i!=5) && (i!=9) )
			continue;
		}
		else
		{
			sprintf(sSkipinfo,"mtd.skip=%s",parts[i].name);
			if(!strstr(arcs_cmdline,sSkipinfo))
			{
				continue;
			}
		}
		
		iStartblock = parts[i].offset/iSizeOfBlock;
		iEndblock = parts[i].size/iSizeOfBlock+iStartblock;
		printk("[SKT] %2d  %20s %4d(0x%08X) ~ %4d(0x%08X)\n",i,parts[i].name,iStartblock,iStartblock*iSizeOfBlock,iEndblock,iEndblock*iSizeOfBlock);

		if((uint32_t)parts[i].size>=(iSizeOfFlash-bbtSize) )
		{
			printk("[SKT] Skip %s mtd In Case of Overall mtdpartition (0x%x Vs. 0x%x)\n",parts[i].name,(uint32_t)parts[i].size,(uint32_t)(iSizeOfFlash-bbtSize));
			continue;
		}

		for(j=iStartblock,k=iStartblock;j<iEndblock;j++)
		{
			isBad = brcmnand_smt_isbadBlcok(master, (loff_t)( j*iSizeOfBlock));
			if(isBad==1) 
				continue;
			else
			{
				gSkipTBL[k].iOffset = (j-k)*iSizeOfBlock;
				k++;
			}
		}
		for(;k<iEndblock;k++)
		{
			gSkipTBL[k].iOffset = USED_SKIP_BLOCK;
		}
	}


//	for(i=0;i<1024;i++)	printk("[%4d] 0x%8x\n",i,gSkipTBL[i].iOffset);

	gSkipFlag =1;
}
Esempio n. 19
0
/*
 * Erase an address range on the flash chip.  The address range may extend
 * one or more erase sectors.  Return an error is there is a problem erasing.
 */
static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
{
	struct m25p *flash = mtd_to_m25p(mtd);
	u32 addr,len;
	uint64_t tmpdiv;
	int rem, rem1;

	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
			flash->spi->dev.bus_id, __FUNCTION__, "at",
			(u32)instr->addr, instr->len);

	/* sanity checks */
	if (instr->addr + instr->len > device_size(&(flash->mtd)))
		return -EINVAL;
	tmpdiv = (uint64_t) instr->addr;
 	rem = do_div(tmpdiv, mtd->erasesize);
	tmpdiv = (uint64_t) instr->len;
	rem1 = do_div(tmpdiv, mtd->erasesize);
	if (rem != 0 || rem1 != 0) {
		return -EINVAL;
	}

	addr = instr->addr;
	len = instr->len;

	mutex_lock(&flash->lock);

	/* REVISIT in some cases we could speed up erasing large regions
	 * by using OPCODE_SE instead of OPCODE_BE_4K
	 */

	/* now erase those sectors */
	while (len) {
#ifdef CONFIG_MIPS_BRCM97XXX
		/* BSPI remaps each 4MB segment */
		if (erase_sector(flash, (addr + 0x400000) & 0xffffff)) {
#else
		if (erase_sector(flash, addr)) {
#endif
			instr->state = MTD_ERASE_FAILED;
			mutex_unlock(&flash->lock);
			return -EIO;
		}

		addr += mtd->erasesize;
		len -= mtd->erasesize;
	}

	mutex_unlock(&flash->lock);

	instr->state = MTD_ERASE_DONE;
	mtd_erase_callback(instr);

	return 0;
}

/*
 * Read an address range from the flash chip.  The address range
 * may be any size provided it is within the physical boundaries.
 */
static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
	size_t *retlen, u_char *buf)
{
	struct m25p *flash = mtd_to_m25p(mtd);
	struct spi_transfer t[2];
	struct spi_message m;
	size_t total_len = len;

	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
			flash->spi->dev.bus_id, __FUNCTION__, "from",
			(u32)from, len);

	/* sanity checks */
	if (!len)
		return 0;

	if (from + len > device_size(&(flash->mtd)))
		return -EINVAL;

	if (retlen)
		*retlen = 0;

	total_len = len;
	while(total_len) {
		len = total_len;

#if 0 //defined(BRCM_SPI_SS_WAR)
	/*
	 * For testing purposes only - read 12 bytes at a time:
	 *
	 * 3548a0 MSPI has a 12-byte limit (PR42350).
	 * MSPI emulated via BSPI has no such limit.
	 * In production BSPI is always used because it is much faster.
	 */
		if(len > 12)
			len = 12;
#endif
#ifdef CONFIG_MIPS_BRCM97XXX
		/* don't cross a 4MB boundary due to remapping */
		len = min(len, (0x400000 - ((u32)from & 0x3fffff)));
#endif
		spi_message_init(&m);
		memset(t, 0, (sizeof t));

		t[0].tx_buf = flash->command;
		t[0].len = sizeof(flash->command);
		spi_message_add_tail(&t[0], &m);

		t[1].rx_buf = buf;
		t[1].len = len;
		spi_message_add_tail(&t[1], &m);

		/* Byte count starts at zero. */

		mutex_lock(&flash->lock);

		/* Wait till previous write/erase is done. */
		if (wait_till_ready(flash)) {
			/* REVISIT status return?? */
			mutex_unlock(&flash->lock);
			return 1;
		}

		/* FIXME switch to OPCODE_FAST_READ.  It's required for higher
		 * clocks; and at this writing, every chip this driver handles
		 * supports that opcode.
		 */

		/* Set up the write data buffer. */
		flash->command[0] = OPCODE_READ;
#ifdef CONFIG_MIPS_BRCM97XXX
		/* BSPI remaps each 4MB segment */
		flash->command[1] = ((from >> 16) + 0x40) & 0xff;
#else
		flash->command[1] = from >> 16;
#endif
		flash->command[2] = from >> 8;
		flash->command[3] = from;

		spi_sync(flash->spi, &m);

		*retlen += m.actual_length - sizeof(flash->command);

		mutex_unlock(&flash->lock);

		from += len;
		buf += len;
		total_len -= len;
	}

	return 0;
}

/*
 * Write an address range to the flash chip.  Data must be written in
 * FLASH_PAGESIZE chunks.  The address range may be any size provided
 * it is within the physical boundaries.
 */
static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
	size_t *retlen, const u_char *buf)
{
	struct m25p *flash = mtd_to_m25p(mtd);
	u32 page_offset, page_size;
	struct spi_transfer t[2];
	struct spi_message m;

	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
			flash->spi->dev.bus_id, __FUNCTION__, "to",
			(u32)to, len);

	if (retlen)
		*retlen = 0;

	/* sanity checks */
	if (!len)
		return(0);

	if (to + len > device_size(&(flash->mtd)))
		return -EINVAL;
#ifdef BRCM_SPI_SS_WAR
	if(len > 12)
		return -EIO;
#endif

	spi_message_init(&m);
	memset(t, 0, (sizeof t));

	t[0].tx_buf = flash->command;
	t[0].len = sizeof(flash->command);
	spi_message_add_tail(&t[0], &m);

	t[1].tx_buf = buf;
	spi_message_add_tail(&t[1], &m);

	mutex_lock(&flash->lock);

	/* Wait until finished previous write command. */
	if (wait_till_ready(flash))
		return 1;

	write_enable(flash);

	/* Set up the opcode in the write buffer. */
	flash->command[0] = OPCODE_PP;
#ifdef CONFIG_MIPS_BRCM97XXX
	/* BSPI remaps each 4MB segment */
	flash->command[1] = ((to >> 16) + 0x40) & 0xff;
#else
	flash->command[1] = to >> 16;
#endif
	flash->command[2] = to >> 8;
	flash->command[3] = to;

	/* what page do we start with? */
	page_offset = to % FLASH_PAGESIZE;

	/* do all the bytes fit onto one page? */
	if (page_offset + len <= FLASH_PAGESIZE) {
		t[1].len = len;

		spi_sync(flash->spi, &m);

		*retlen = m.actual_length - sizeof(flash->command);
	} else {
		u32 i;

		/* the size of data remaining on the first page */
		page_size = FLASH_PAGESIZE - page_offset;

		t[1].len = page_size;
		spi_sync(flash->spi, &m);

		*retlen = m.actual_length - sizeof(flash->command);

		/* write everything in PAGESIZE chunks */
		for (i = page_size; i < len; i += page_size) {
			page_size = len - i;
			if (page_size > FLASH_PAGESIZE)
				page_size = FLASH_PAGESIZE;

			/* write the next page to flash */
#ifdef CONFIG_MIPS_BRCM97XXX
			/* BSPI remaps each 4MB segment */
			flash->command[1] = (((to + i) >> 16) + 0x40) & 0xff;
#else
			flash->command[1] = (to + i) >> 16;
#endif
			flash->command[2] = (to + i) >> 8;
			flash->command[3] = (to + i);

			t[1].tx_buf = buf + i;
			t[1].len = page_size;

			wait_till_ready(flash);

			write_enable(flash);

			spi_sync(flash->spi, &m);

			if (retlen)
				*retlen += m.actual_length
					- sizeof(flash->command);
		}
	}

	mutex_unlock(&flash->lock);

	return 0;
}


/****************************************************************************/

/*
 * SPI device driver setup and teardown
 */

struct flash_info {
	char		*name;

	/* JEDEC id zero means "no ID" (most older chips); otherwise it has
	 * a high byte of zero plus three data bytes: the manufacturer id,
	 * then a two byte device id.
	 */
	u32		jedec_id;

	/* The size listed here is what works with OPCODE_SE, which isn't
	 * necessarily called a "sector" by the vendor.
	 */
	unsigned	sector_size;
	u16		n_sectors;

	u16		flags;
#define	SECT_4K		0x01		/* OPCODE_BE_4K works uniformly */
};


/* NOTE: double check command sets and memory organization when you add
 * more flash chips.  This current list focusses on newer chips, which
 * have been converging on command sets which including JEDEC ID.
 */
static struct flash_info __devinitdata m25p_data [] = {

	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
	{ "at25fs010",  0x1f6601, 32 * 1024, 4, SECT_4K, },
	{ "at25fs040",  0x1f6604, 64 * 1024, 8, SECT_4K, },

	{ "at25df041a", 0x1f4401, 64 * 1024, 8, SECT_4K, },

	{ "at26f004",   0x1f0400, 64 * 1024, 8, SECT_4K, },
	{ "at26df081a", 0x1f4501, 64 * 1024, 16, SECT_4K, },
	{ "at26df161a", 0x1f4601, 64 * 1024, 32, SECT_4K, },
	{ "at26df321",  0x1f4701, 64 * 1024, 64, SECT_4K, },

	/* Spansion -- single (large) sector size only, at least
	 * for the chips listed here (without boot sectors).
	 */
	{ "s25sl004a", 0x010212, 64 * 1024, 8, },
	{ "s25sl008a", 0x010213, 64 * 1024, 16, },
	{ "s25sl016a", 0x010214, 64 * 1024, 32, },
	{ "s25sl032a", 0x010215, 64 * 1024, 64, },
	{ "s25sl064a", 0x010216, 64 * 1024, 128, },
#ifdef CONFIG_MIPS_BRCM97XXX
	{ "s25fl128p", 0x012018, 64 * 1024, 256, },
#endif

	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
	{ "sst25vf040b", 0xbf258d, 64 * 1024, 8, SECT_4K, },
	{ "sst25vf080b", 0xbf258e, 64 * 1024, 16, SECT_4K, },
	{ "sst25vf016b", 0xbf2541, 64 * 1024, 32, SECT_4K, },
	{ "sst25vf032b", 0xbf254a, 64 * 1024, 64, SECT_4K, },

	/* ST Microelectronics -- newer production may have feature updates */
	{ "m25p05",  0x202010,  32 * 1024, 2, },
	{ "m25p10",  0x202011,  32 * 1024, 4, },
	{ "m25p20",  0x202012,  64 * 1024, 4, },
	{ "m25p40",  0x202013,  64 * 1024, 8, },
#ifndef CONFIG_MIPS_BRCM97XXX
	/* ID 0 is detected when there's nothing on the bus */
	{ "m25p80",         0,  64 * 1024, 16, },
#endif
	{ "m25p16",  0x202015,  64 * 1024, 32, },
	{ "m25p32",  0x202016,  64 * 1024, 64, },
	{ "m25p64",  0x202017,  64 * 1024, 128, },
	{ "m25p128", 0x202018, 256 * 1024, 64, },

	{ "m45pe80", 0x204014,  64 * 1024, 16, },
	{ "m45pe16", 0x204015,  64 * 1024, 32, },

	{ "m25pe80", 0x208014,  64 * 1024, 16, },
	{ "m25pe16", 0x208015,  64 * 1024, 32, SECT_4K, },

	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
	{ "w25x10", 0xef3011, 64 * 1024, 2, SECT_4K, },
	{ "w25x20", 0xef3012, 64 * 1024, 4, SECT_4K, },
	{ "w25x40", 0xef3013, 64 * 1024, 8, SECT_4K, },
	{ "w25x80", 0xef3014, 64 * 1024, 16, SECT_4K, },
	{ "w25x16", 0xef3015, 64 * 1024, 32, SECT_4K, },
	{ "w25x32", 0xef3016, 64 * 1024, 64, SECT_4K, },
	{ "w25x64", 0xef3017, 64 * 1024, 128, SECT_4K, },
};

static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
{
	int			tmp;
	u8			code = OPCODE_RDID;
	u8			id[3];
	u32			jedec;
	struct flash_info	*info;

	/* JEDEC also defines an optional "extended device information"
	 * string for after vendor-specific data, after the three bytes
	 * we use here.  Supporting some chips might require using it.
	 */
	tmp = spi_write_then_read(spi, &code, 1, id, 3);
	if (tmp < 0) {
		DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
			spi->dev.bus_id, tmp);
		return NULL;
	}
	jedec = id[0];
	jedec = jedec << 8;
	jedec |= id[1];
	jedec = jedec << 8;
	jedec |= id[2];

	for (tmp = 0, info = m25p_data;
			tmp < ARRAY_SIZE(m25p_data);
			tmp++, info++) {
		if (info->jedec_id == jedec)
			return info;
	}
	dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
	return NULL;
}
Esempio n. 20
0
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;
	size_t retlen=0;
	size_t total_retlen=0;
	int ret=0;
	int len;
	char *kbuf = NULL;

	DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
#if defined(HUMAX_PLATFORM_BASE)
	if ( mfi->mode == MTD_MODE_OTP_GET_EXT_INFO)
	{
		if ( kbuf != NULL )
		{
			kfree(kbuf);
		}

		struct mtd_ext_info *pExInfo;
		struct mtd_ext_info_user *pUserExt = buf;
		char buf_string[8];
		kbuf = kmalloc(8, GFP_KERNEL);
		mtd->read_ext_info(mtd, kbuf);
		pExInfo = kbuf;
		pUserExt->devId = pExInfo->devId;
		pUserExt->mfrId = pExInfo->mfr;
		printk("get_ext_info(%08x,%08x)\n", pExInfo->devId, pExInfo->mfr);
		kfree(kbuf);
		return 8;

	}
#endif

	if (*ppos + count > device_size(mtd))
		count = device_size(mtd) - *ppos;

	if (!count)
		return 0;

	/* FIXME: Use kiovec in 2.5 to lock down the user's buffers
	   and pass them directly to the MTD functions */

	if (count > MAX_KMALLOC_SIZE)
		kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
	else
		kbuf=kmalloc(count, GFP_KERNEL);

	if (!kbuf)
		return -ENOMEM;

	while (count) {

		if (count > MAX_KMALLOC_SIZE)
			len = MAX_KMALLOC_SIZE;
		else
			len = count;

		switch (mfi->mode) {
		case MTD_MODE_OTP_FACTORY:
			ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
			break;
		case MTD_MODE_OTP_USER:
			ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
			break;
		case MTD_MODE_RAW:
		{
			struct mtd_oob_ops ops;

			ops.mode = MTD_OOB_RAW;
			ops.datbuf = kbuf;
			ops.oobbuf = NULL;
			ops.len = len;

			ret = mtd->read_oob(mtd, *ppos, &ops);
			retlen = ops.retlen;
			break;
		}
		default:
			ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
		}
		/* Nand returns -EBADMSG on ecc errors, but it returns
		 * the data. For our userspace tools it is important
		 * to dump areas with ecc errors !
		 * For kernel internal usage it also might return -EUCLEAN
		 * to signal the caller that a bitflip has occured and has
		 * been corrected by the ECC algorithm.
		 * Userspace software which accesses NAND this way
		 * must be aware of the fact that it deals with NAND
		 */
		if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
			*ppos += retlen;
			if (copy_to_user(buf, kbuf, retlen)) {
				kfree(kbuf);
				return -EFAULT;
			}
			else
				total_retlen += retlen;

			count -= retlen;
			buf += retlen;
			if (retlen == 0)
				count = 0;
		}
		else {
			kfree(kbuf);
			return ret;
		}

	}

	kfree(kbuf);
	return total_retlen;
} /* mtd_read */
Esempio n. 21
0
static int mtd_ioctl(struct inode *inode, struct file *file,
		     u_int cmd, u_long arg)
{
	struct mtd_file_info *mfi = file->private_data;
	struct mtd_info *mtd = mfi->mtd;
	void __user *argp = (void __user *)arg;
	int ret = 0;
	u_long size;
	struct mtd_info_user info;
	struct mtd_info_user64 info64;

	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");

	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
	if (cmd & IOC_IN) {
		if (!access_ok(VERIFY_READ, argp, size))
			return -EFAULT;
	}
	if (cmd & IOC_OUT) {
		if (!access_ok(VERIFY_WRITE, argp, size))
			return -EFAULT;
	}

	switch (cmd) {
	case MEMGETREGIONCOUNT:
		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
			return -EFAULT;
		break;

	case MEMGETREGIONINFO:
		{
		struct region_info_user ur;
		struct mtd_erase_region_info32 mer;

		if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
			return -EFAULT;

		if (ur.regionindex >= mtd->numeraseregions)
			return -EINVAL;
		mer.offset = (u_int32_t) mtd->eraseregions[ur.regionindex].offset;
		mer.erasesize = mtd->eraseregions[ur.regionindex].erasesize;
		mer.numblocks = mtd->eraseregions[ur.regionindex].numblocks;

		if (copy_to_user(argp, &mer, sizeof(struct mtd_erase_region_info32)))
			return -EFAULT;
		/*
		if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
				sizeof(struct mtd_erase_region_info)))
			return -EFAULT;
			*/
		break;
		}

	case MEMGETREGIONINFO64:
		{
		struct region_info_user64 ur;

		if(copy_from_user(&ur, argp, sizeof(struct region_info_user64)))
			return -EFAULT;
		if (ur.regionindex >= mtd->numeraseregions)
			return -EINVAL;
		if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 
					sizeof(struct mtd_erase_region_info)))
			return -EINVAL;
		break;
		}

	case MEMGETINFO:
		info.type	= mtd->type;
		info.flags	= mtd->flags;
		info.size	= device_size(mtd);
		info.erasesize	= mtd->erasesize;
		info.writesize	= mtd->writesize;
		info.oobsize	= mtd->oobsize;
		info.ecctype	= mtd->ecctype;
		info.eccsize	= mtd->eccsize;
		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 
			return -EFAULT;
		break;

	case MEMGETINFO64:
		info64.type		= mtd->type;
		info64.flags		= mtd->flags;
		info64.size		= device_size(mtd);
		info64.erasesize	= mtd->erasesize;
		info64.writesize	= mtd->writesize;
		info64.oobsize		= mtd->oobsize;
		info64.ecctype		= mtd->ecctype;
		info64.eccsize		= mtd->eccsize;
		if (copy_to_user(argp, &info64, sizeof(struct mtd_info_user64)))
			return -EFAULT;
		break;

	case MEMERASE:
	{
		struct erase_info32 *erase; /* Backward compatible older struct */
		struct erase_info *erase64; /* Actual struct sent to kernel */

		if(!(file->f_mode & 2))
			return -EPERM;

		erase=kmalloc(sizeof(struct erase_info32),GFP_KERNEL);
		erase64 = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
		if (!erase || !erase64)
			ret = -ENOMEM;
		else {
			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			memset (erase,0,sizeof(struct erase_info32));
			if (copy_from_user(&erase->addr, argp,
				    sizeof(struct erase_info_user))) {
				kfree(erase);
				kfree(erase64);
				return -EFAULT;
			}
			/* Send an erase64 to the kernel mtd layer */
			erase64->addr = (uint64_t) erase->addr;
			erase64->len = erase->len;

			erase64->mtd = mtd;
			erase64->callback = mtdchar_erase_callback;
			erase64->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.

			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase64);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase64->state != MTD_ERASE_DONE &&
				    erase64->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase64->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
			kfree(erase64);
		}
		break;
	}

	case MEMERASE64:
	{
		struct erase_info *erase;

		if(!(file->f_mode & 2))
			return -EPERM;

		erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
		if (!erase)
			ret = -ENOMEM;
		else {
			wait_queue_head_t waitq;
			DECLARE_WAITQUEUE(wait, current);

			init_waitqueue_head(&waitq);

			memset (erase,0,sizeof(struct erase_info));
			if (copy_from_user(&erase->addr, argp,
				    sizeof(struct erase_info_user64))) {
				kfree(erase);
				return -EFAULT;
			}
			erase->mtd = mtd;
			erase->callback = mtdchar_erase_callback;
			erase->priv = (unsigned long)&waitq;

			/*
			  FIXME: Allow INTERRUPTIBLE. Which means
			  not having the wait_queue head on the stack.

			  If the wq_head is on the stack, and we
			  leave because we got interrupted, then the
			  wq_head is no longer there when the
			  callback routine tries to wake us up.
			*/
			ret = mtd->erase(mtd, erase);
			if (!ret) {
				set_current_state(TASK_UNINTERRUPTIBLE);
				add_wait_queue(&waitq, &wait);
				if (erase->state != MTD_ERASE_DONE &&
				    erase->state != MTD_ERASE_FAILED)
					schedule();
				remove_wait_queue(&waitq, &wait);
				set_current_state(TASK_RUNNING);

				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
			}
			kfree(erase);
		}
		break;
	}

	case MEMWRITEOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;

		if(!(file->f_mode & 2))
			return -EPERM;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_READ, buf.ptr,
					buf.length) ? 0 : EFAULT;

		if (ret)
			return ret;

		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
			kfree(ops.oobbuf);
			return -EFAULT;
		}

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->write_oob(mtd, buf.start, &ops);

		if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
				 sizeof(uint32_t)))
		{
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;

	}

	case MEMWRITEOOB64:
	{
		struct mtd_oob_buf64 buf;
		struct mtd_oob_ops ops;

		if(!(file->f_mode & 2))
			return -EPERM;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf64)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->write_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_READ, buf.ptr,
					buf.length) ? 0 : EFAULT;

		if (ret)
			return ret;

		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;

		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
			kfree(ops.oobbuf);
			return -EFAULT;
		}

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->write_oob(mtd, buf.start, &ops);

		if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
				 sizeof(uint32_t)))
		{
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;

	}

	case MEMREADOOB:
	{
		struct mtd_oob_buf buf;
		struct mtd_oob_ops ops;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_WRITE, buf.ptr,
					buf.length) ? 0 : -EFAULT;
		if (ret)
			return ret;


		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;


		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->read_oob(mtd, buf.start, &ops);

		if (put_user(ops.retlen, (uint32_t __user *)argp)) {
			ret = -EFAULT;
		}
		else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
						    ops.retlen)) {
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;
	}

	case MEMREADOOB64:
	{
		struct mtd_oob_buf64 buf;
		struct mtd_oob_ops ops;

		if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf64)))
			return -EFAULT;

		if (buf.length > 4096)
			return -EINVAL;

		if (!mtd->read_oob)
			ret = -EOPNOTSUPP;
		else
			ret = access_ok(VERIFY_WRITE, buf.ptr,
					buf.length) ? 0 : -EFAULT;
		if (ret)
			return ret;


		ops.len = buf.length;
		ops.ooblen = buf.length;
		ops.ooboffs = buf.start & (mtd->oobsize - 1);
		ops.datbuf = NULL;
		ops.mode = MTD_OOB_PLACE;

		if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
			return -EINVAL;


		ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
		if (!ops.oobbuf)
			return -ENOMEM;

		buf.start &= ~(mtd->oobsize - 1);
		ret = mtd->read_oob(mtd, buf.start, &ops);

		if (put_user(ops.retlen, (uint32_t __user *)argp)) {
			ret = -EFAULT;
		}
		else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
						    ops.retlen)) {
			ret = -EFAULT;
		}

		kfree(ops.oobbuf);
		break;
	}

	case MEMLOCK:
	{
		struct erase_info_user info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->lock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->lock(mtd, info.start, info.length);
		break;
	}

	case MEMUNLOCK:
	{
		struct erase_info_user info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, info.start, info.length);
		break;
	}

	case MEMUNLOCK64:
	{
		struct erase_info_user64 info;

		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;

		if (!mtd->unlock)
			ret = -EOPNOTSUPP;
		else
			ret = mtd->unlock(mtd, info.start, info.length);
		break;
	}

	/* Legacy interface */
	case MEMGETOOBSEL:
	{
		struct nand_oobinfo32 oi;

		if (!mtd->ecclayout)
			return -EOPNOTSUPP;
		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
			return -EINVAL;

		oi.useecc = MTD_NANDECC_AUTOPLACE;
		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
		       sizeof(oi.oobfree));

		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo32)))
			return -EFAULT;
		break;
	}

	/* Legacy interface */
	case MEMGETOOBSEL64:
	{
		struct nand_oobinfo oi;

		if (!mtd->ecclayout)
			return -EOPNOTSUPP;
		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
			return -EINVAL;

		oi.useecc = MTD_NANDECC_AUTOPLACE;
		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
		       sizeof(oi.oobfree));

		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
			return -EFAULT;
		break;
	}


	case MEMGETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_isbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_isbad(mtd, offs);
		break;
	}

	case MEMSETBADBLOCK:
	{
		loff_t offs;

		if (copy_from_user(&offs, argp, sizeof(loff_t)))
			return -EFAULT;
		if (!mtd->block_markbad)
			ret = -EOPNOTSUPP;
		else
			return mtd->block_markbad(mtd, offs);
		break;
	}

#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
	case OTPSELECT:
	{
		int mode;
		if (copy_from_user(&mode, argp, sizeof(int)))
			return -EFAULT;

		mfi->mode = MTD_MODE_NORMAL;

		ret = otp_select_filemode(mfi, mode);

		file->f_pos = 0;
		break;
	}

	case OTPGETREGIONCOUNT:
	case OTPGETREGIONINFO:
	{
		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
		if (!buf)
			return -ENOMEM;
		ret = -EOPNOTSUPP;
		switch (mfi->mode) {
		case MTD_MODE_OTP_FACTORY:
			if (mtd->get_fact_prot_info)
				ret = mtd->get_fact_prot_info(mtd, buf, 4096);
			break;
		case MTD_MODE_OTP_USER:
			if (mtd->get_user_prot_info)
				ret = mtd->get_user_prot_info(mtd, buf, 4096);
			break;
		default:
			break;
		}
		if (ret >= 0) {
			if (cmd == OTPGETREGIONCOUNT) {
				int nbr = ret / sizeof(struct otp_info);
				ret = copy_to_user(argp, &nbr, sizeof(int));
			} else
				ret = copy_to_user(argp, buf, ret);
			if (ret)
				ret = -EFAULT;
		}
		kfree(buf);
		break;
	}

	case OTPLOCK:
	{
		struct otp_info info;

		if (mfi->mode != MTD_MODE_OTP_USER)
			return -EINVAL;
		if (copy_from_user(&info, argp, sizeof(info)))
			return -EFAULT;
		if (!mtd->lock_user_prot_reg)
			return -EOPNOTSUPP;
		ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
		break;
	}
#endif

	case ECCGETLAYOUT:
	{
		if (!mtd->ecclayout)
			return -EOPNOTSUPP;

		if (copy_to_user(argp, &mtd->ecclayout,
				 sizeof(struct nand_ecclayout)))
			return -EFAULT;
		break;
	}

	case ECCGETSTATS:
	{
		if (copy_to_user(argp, &mtd->ecc_stats,
				 sizeof(struct mtd_ecc_stats)))
			return -EFAULT;
		break;
	}

	case MTDFILEMODE:
	{
		mfi->mode = 0;

		switch(arg) {
		case MTD_MODE_OTP_FACTORY:
		case MTD_MODE_OTP_USER:
			ret = otp_select_filemode(mfi, arg);
			break;

		case MTD_MODE_RAW:
			if (!mtd->read_oob || !mtd->write_oob)
				return -EOPNOTSUPP;
			mfi->mode = arg;

		case MTD_MODE_NORMAL:
			break;
		default:
			ret = -EINVAL;
		}
		file->f_pos = 0;
		break;
	}

	default:
		ret = -ENOTTY;
	}

	return ret;
} /* memory_ioctl */