Beispiel #1
0
/**
 * gluebi_erase - erase operation of emulated MTD devices.
 * @mtd: the MTD device description object
 * @instr: the erase operation description
 *
 * This function calls the erase callback when finishes. Returns zero in case
 * of success and a negative error code in case of failure.
 */
static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
{
	int err, i, lnum, count;
	struct ubi_volume *vol;
	struct ubi_device *ubi;

	dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
		 (unsigned long long)instr->addr);

	if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
		return -EINVAL;

	if (instr->len < 0 || instr->addr + instr->len > mtd->size)
		return -EINVAL;

	if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
		return -EINVAL;

	lnum = mtd_div_by_eb(instr->addr, mtd);
	count = mtd_div_by_eb(instr->len, mtd);

	vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
	ubi = vol->ubi;

	if (ubi->ro_mode)
		return -EROFS;

	for (i = 0; i < count; i++) {
		err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
		if (err)
			goto out_err;
	}

	/*
	 * MTD erase operations are synchronous, so we have to make sure the
	 * physical eraseblock is wiped out.
	 */
	err = ubi_wl_flush(ubi);
	if (err)
		goto out_err;

	instr->state = MTD_ERASE_DONE;
	mtd_erase_callback(instr);
	return 0;

out_err:
	instr->state = MTD_ERASE_FAILED;
	instr->fail_addr = (long long)lnum * mtd->erasesize;
	return err;
}
Beispiel #2
0
static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
	int limit, device_pebs;
	uint64_t device_size;

	if (!max_beb_per1024)
		return 0;

	/*
	 * Here we are using size of the entire flash chip and
	 * not just the MTD partition size because the maximum
	 * number of bad eraseblocks is a percentage of the
	 * whole device and bad eraseblocks are not fairly
	 * distributed over the flash chip. So the worst case
	 * is that all the bad eraseblocks of the chip are in
	 * the MTD partition we are attaching (ubi->mtd).
	 */
	if (ubi->mtd->master)
		device_size = ubi->mtd->master->size;
	else
		device_size = ubi->mtd->size;

	device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
	limit = mult_frac(device_pebs, max_beb_per1024, 1024);

	/* Round it up */
	if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
		limit += 1;

	return limit;
}
Beispiel #3
0
static int mtdblock_open(struct mtd_blktrans_dev *dev)
{
	struct mtdblock_priv *priv = dev2priv(dev);
	struct mtd_info *mtd = dev->mtd;
	int i, total_blocks, good_blocks = 0;

	if (!priv->bb_map)
		return 0;

	/* Make sure that no new bad blocks were added behind our back */
	total_blocks = mtd_div_by_eb(mtd->size, mtd);

	for (i = 0; i < total_blocks; ++i) {
		if (mtd->block_isbad(mtd, i * mtd->erasesize))
			continue;
		if (unlikely(priv->bb_map[good_blocks++] != (u16)i))
			goto mismatch;
	}

	if (good_blocks != total_blocks) {
		for (i = good_blocks; i < total_blocks; ++i)
			if (unlikely(priv->bb_map[i] != 0xffff))
				goto mismatch;
	}

	return 0;

    mismatch:
	printk(KERN_ERR "mtdblock%d: bad block layout has changed\n",
	       dev->devnum);
	return -ENODEV;
}
Beispiel #4
0
static void *dbbt_data_create(struct mtd_info *mtd)
{
	int n;
	int n_bad_blocks = 0;
	void *dbbt = xzalloc(mtd->writesize);
	uint32_t *bb = dbbt + 0x8;
	uint32_t *n_bad_blocksp = dbbt + 0x4;
	int num_blocks = mtd_div_by_eb(mtd->size, mtd);

	for (n = 0; n < num_blocks; n++) {
		loff_t offset = n * mtd->erasesize;
		if (mtd_block_isbad(mtd, offset)) {
			n_bad_blocks++;
			*bb = n;
			bb++;
		}
	}

	if (!n_bad_blocks) {
		free(dbbt);
		return NULL;
	}

	*n_bad_blocksp = n_bad_blocks;

	return dbbt;
}
Beispiel #5
0
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
	unsigned int block_num, log_num, phymax;
	loff_t block_adr;
	u8 *oob;
	int i, ret;

	oob = kzalloc(mtd->oobsize, GFP_KERNEL);
	if (!oob)
		return -ENOMEM;

	phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);

	/* FTL reserves 5% of the blocks + 1 spare  */
	ftl->logmax = ((phymax * 95) / 100) - 1;

	ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
				     GFP_KERNEL);
	if (!ftl->log2phy) {
		ret = -ENOMEM;
		goto exit;
	}

	/* initialize ftl->log2phy */
	for (i = 0; i < ftl->logmax; i++)
		ftl->log2phy[i] = UINT_MAX;

	/* create physical-logical table */
	for (block_num = 0; block_num < phymax; block_num++) {
		block_adr = block_num * mtd->erasesize;

		if (mtd_block_isbad(mtd, block_adr))
			continue;

		if (sharpsl_nand_read_oob(mtd, block_adr, oob))
			continue;

		/* get logical block */
		log_num = sharpsl_nand_get_logical_num(oob);

		/* cut-off errors and skip the out-of-range values */
		if (log_num > 0 && log_num < ftl->logmax) {
			if (ftl->log2phy[log_num] == UINT_MAX)
				ftl->log2phy[log_num] = block_num;
		}
	}

	pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
		phymax, ftl->logmax, phymax - ftl->logmax);

	ret = 0;
exit:
	kfree(oob);
	return ret;
}
Beispiel #6
0
static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
				   loff_t from,
				   size_t len,
				   void *buf,
				   struct sharpsl_ftl *ftl)
{
	unsigned int log_num, final_log_num;
	unsigned int block_num;
	loff_t block_adr;
	loff_t block_ofs;
	size_t retlen;
	int err;

	log_num = mtd_div_by_eb((u32)from, mtd);
	final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);

	if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
		return -EINVAL;

	block_num = ftl->log2phy[log_num];
	block_adr = block_num * mtd->erasesize;
	block_ofs = mtd_mod_by_eb((u32)from, mtd);

	err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
	/* Ignore corrected ECC errors */
	if (mtd_is_bitflip(err))
		err = 0;

	if (!err && retlen != len)
		err = -EIO;

	if (err)
		pr_err("sharpslpart: error, read failed at %#llx\n",
		       block_adr + block_ofs);

	return err;
}
Beispiel #7
0
static int spectra_open(struct inode *inode, struct file *file)
{
	struct spectra_device *dev;
	struct mtd_info *mtd;
	int err;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (unlikely(!dev))
		return -ENOMEM;

	dev->mtd = mtd = get_mtd_device_nm(CEFDK_CFG_PARTITION);
	if (unlikely(IS_ERR(mtd))) {
		printk(KERN_ERR DRV_NAME
		       ": mtd partition '" CEFDK_CFG_PARTITION "' not found\n");
		err = -ENXIO;
		goto out_free;
	}

	if (unlikely(mtd->type != MTD_NANDFLASH)) {
		printk(KERN_ERR DRV_NAME ": not a NAND partition\n");
		err = -ENXIO;
		goto out_put;
	}

	BUG_ON(mtd->writesize > sizeof(dev->buf));

	dev->device_info.writesize = mtd->writesize;
	dev->device_info.oobsize = mtd->oobsize;
	dev->device_info.pagesize = mtd->writesize + mtd->oobsize;

	dev->device_info.erasesize = mtd->erasesize;
	dev->device_info.blockpages = mtd_div_by_ws(mtd->erasesize, mtd);
	dev->device_info.blocksize =
	    dev->device_info.pagesize * dev->device_info.blockpages;

	dev->device_info.blocks = CEFDK_CFG_PARTITION_OFFSET +
	    mtd_div_by_eb(mtd->size, mtd);

	DBG("sizeof(device_info) 0x%x\n", sizeof(dev->device_info));

	file->private_data = dev;
	return 0;

      out_put:
	put_mtd_device(dev->mtd);
      out_free:
	kfree(dev);
	return err;
}
Beispiel #8
0
static int mtdblock_bb_init(struct mtdblock_priv *priv)
{
	struct mtd_info *mtd = priv->dev.mtd;
	int i, total_blocks, good_blocks = 0;

	/* basic sanity checks */
	if (mtd->type != MTD_NANDFLASH || mtd->numeraseregions || !mtd->size
	    || !mtd->erasesize)
		return 0;

	total_blocks = mtd_div_by_eb(mtd->size, mtd);

	/* MTD layer currenly cannot support huge devices anyways,
	 * so we use u16 for bb_map to save some space.
	 */
	if (unlikely(total_blocks > 0xffff))
		return -EINVAL;

	priv->bb_map = kmalloc(total_blocks * sizeof(u16), GFP_KERNEL);
	if (unlikely(!priv->bb_map)) {
		printk(KERN_ERR
		       "mtdblock: failed to allocate %d bad-block map\n",
			total_blocks);
		return -ENOMEM;
	}

	for (i = 0; i < total_blocks; ++i) {
		if (mtd->block_isbad(mtd, i * mtd->erasesize))
			continue;
		priv->bb_map[good_blocks++] = (u16)i;
	}

	priv->sectors_per_block = mtd->erasesize / 512;
	if (good_blocks != total_blocks) {
		/* fill the rest of the map with something deterministic */
		for (i = good_blocks; i < total_blocks; ++i)
			priv->bb_map[i] = 0xffff;

		priv->dev.size = good_blocks * priv->sectors_per_block;
		printk(KERN_INFO "mtdblock%d: %d bad block(s) found\n",
		       priv->dev.devnum, total_blocks - good_blocks);
	}

	return 0;
}
Beispiel #9
0
static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
{
	struct mtd_info *mtd = cxt->mtd;
	u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
	u32 start_page = start_page_offset / record_size;
	u32 erase_pages = mtd->erasesize / record_size;
	struct erase_info erase;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	int ret;
	int page;

	init_waitqueue_head(&wait_q);
	erase.mtd = mtd;
	erase.callback = mtdoops_erase_callback;
	erase.addr = offset;
	erase.len = mtd->erasesize;
	erase.priv = (u_long)&wait_q;

	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&wait_q, &wait);

	ret = mtd->erase(mtd, &erase);
	if (ret) {
		set_current_state(TASK_RUNNING);
		remove_wait_queue(&wait_q, &wait);
		printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
		       (unsigned long long)erase.addr,
		       (unsigned long long)erase.len, mtddev);
		return ret;
	}

	schedule();  /* Wait for erase to finish. */
	remove_wait_queue(&wait_q, &wait);

	/* Mark pages as unused */
	for (page = start_page; page < start_page + erase_pages; page++)
		mark_page_unused(cxt, page);

	return 0;
}
static struct mtd_part *add_one_partition(struct mtd_info *master,
		const struct mtd_partition *part, int partno,
		uint64_t cur_offset)
{
	struct mtd_part *slave;

	/* allocate the partition structure */
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
	if (!slave) {
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
			master->name);
		del_mtd_partitions(master);
		return NULL;
	}
	list_add(&slave->list, &mtd_partitions);

	/* set up the MTD object for this partition */
	slave->mtd.type = master->type;
	slave->mtd.flags = master->flags & ~part->mask_flags;
	slave->mtd.size = part->size;
	slave->mtd.writesize = master->writesize;
	slave->mtd.oobsize = master->oobsize;
	slave->mtd.oobavail = master->oobavail;
	slave->mtd.subpage_sft = master->subpage_sft;

	slave->mtd.name = part->name;
	slave->mtd.owner = master->owner;

	slave->mtd.read = part_read;
	slave->mtd.write = part_write;

	if (master->panic_write)
		slave->mtd.panic_write = part_panic_write;

	if (master->point && master->unpoint) {
		slave->mtd.point = part_point;
		slave->mtd.unpoint = part_unpoint;
	}

	if (master->read_oob)
		slave->mtd.read_oob = part_read_oob;
	if (master->write_oob)
		slave->mtd.write_oob = part_write_oob;
	if (master->read_user_prot_reg)
		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
	if (master->read_fact_prot_reg)
		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
	if (master->write_user_prot_reg)
		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
	if (master->lock_user_prot_reg)
		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
	if (master->get_user_prot_info)
		slave->mtd.get_user_prot_info = part_get_user_prot_info;
	if (master->get_fact_prot_info)
		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
	if (master->sync)
		slave->mtd.sync = part_sync;
	if (!partno && master->suspend && master->resume) {
			slave->mtd.suspend = part_suspend;
			slave->mtd.resume = part_resume;
	}
	if (master->writev)
		slave->mtd.writev = part_writev;
	if (master->lock)
		slave->mtd.lock = part_lock;
	if (master->unlock)
		slave->mtd.unlock = part_unlock;
	if (master->block_isbad)
		slave->mtd.block_isbad = part_block_isbad;
	if (master->block_markbad)
		slave->mtd.block_markbad = part_block_markbad;
	slave->mtd.erase = part_erase;
	slave->master = master;
	slave->offset = part->offset;
	slave->index = partno;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
		slave->offset = cur_offset;
		if (mtd_mod_by_eb(cur_offset, master) != 0) {
			/* Round up to next erasesize */
			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
			printk(KERN_NOTICE "Moving partition %d: "
			       "0x%012llx -> 0x%012llx\n", partno,
			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
		}
	}
	if (slave->mtd.size == MTDPART_SIZ_FULL)
		slave->mtd.size = master->size - slave->offset;

	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);

	/* let's do some sanity checks */
	if (slave->offset >= master->size) {
		/* let's register it anyway to preserve ordering */
		slave->offset = 0;
		slave->mtd.size = 0;
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
			part->name);
		goto out_register;
	}
	if (slave->offset + slave->mtd.size > master->size) {
		slave->mtd.size = master->size - slave->offset;
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
			part->name, master->name, (unsigned long long)slave->mtd.size);
	}
	if (master->numeraseregions > 1) {
		/* Deal with variable erase size stuff */
		int i, max = master->numeraseregions;
		u64 end = slave->offset + slave->mtd.size;
		struct mtd_erase_region_info *regions = master->eraseregions;

		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
			;
		/* The loop searched for the region _behind_ the first one */
		i--;

		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
		BUG_ON(slave->mtd.erasesize == 0);
	} else {
		/* Single erase size */
		slave->mtd.erasesize = master->erasesize;
	}

	if ((slave->mtd.flags & MTD_WRITEABLE) &&
	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
		/* Doesn't start on a boundary of major erase size */
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
		slave->mtd.flags &= ~MTD_WRITEABLE;
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
			part->name);
	}
	if ((slave->mtd.flags & MTD_WRITEABLE) &&
	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
		slave->mtd.flags &= ~MTD_WRITEABLE;
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
			part->name);
	}

	slave->mtd.ecclayout = master->ecclayout;

#ifndef CONFIG_MTD_LAZYECCSTATS
	part_fill_badblockstats(&(slave->mtd));
#endif

out_register:
	if (part->mtdp) {
		/* store the object pointer (caller may or may not register it*/
		*part->mtdp = &slave->mtd;
		slave->registered = 0;
	} else {
		/* register our partition */
		add_mtd_device(&slave->mtd);
		slave->registered = 1;
	}
	return slave;
}
Beispiel #11
0
/**
 * ubi_attach_mtd_dev - attach an MTD device.
 * @mtd: MTD device description object
 * @ubi_num: number to assign to the new UBI device
 * @vid_hdr_offset: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
 *
 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
 * which case this function finds a vacant device number and assigns it
 * automatically. Returns the new UBI device number in case of success and a
 * negative error code in case of failure.
 *
 * Note, the invocations of this function has to be serialized by the
 * @ubi_devices_mutex.
 */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
		       int vid_hdr_offset, int max_beb_per1024)
{
	struct ubi_device *ubi;
	int i, err, ref = 0;

	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
		return -EINVAL;

	if (!max_beb_per1024)
		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;

	/*
	 * Check if we already have the same MTD device attached.
	 *
	 * Note, this function assumes that UBI devices creations and deletions
	 * are serialized, so it does not take the &ubi_devices_lock.
	 */
	for (i = 0; i < UBI_MAX_DEVICES; i++) {
		ubi = ubi_devices[i];
		if (ubi && mtd == ubi->mtd) {
			ubi_err("mtd%d is already attached to ubi%d",
				mtd->index, i);
			return -EEXIST;
		}
	}

	/*
	 * Make sure this MTD device is not emulated on top of an UBI volume
	 * already. Well, generally this recursion works fine, but there are
	 * different problems like the UBI module takes a reference to itself
	 * by attaching (and thus, opening) the emulated MTD device. This
	 * results in inability to unload the module. And in general it makes
	 * no sense to attach emulated MTD devices, so we prohibit this.
	 */
	if (mtd->type == MTD_UBIVOLUME) {
		ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
			mtd->index);
		return -EINVAL;
	}

	if (ubi_num == UBI_DEV_NUM_AUTO) {
		/* Search for an empty slot in the @ubi_devices array */
		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
			if (!ubi_devices[ubi_num])
				break;
		if (ubi_num == UBI_MAX_DEVICES) {
			ubi_err("only %d UBI devices may be created",
				UBI_MAX_DEVICES);
			return -ENFILE;
		}
	} else {
		if (ubi_num >= UBI_MAX_DEVICES)
			return -EINVAL;

		/* Make sure ubi_num is not busy */
		if (ubi_devices[ubi_num]) {
			ubi_err("ubi%d already exists", ubi_num);
			return -EEXIST;
		}
	}

	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
	if (!ubi)
		return -ENOMEM;

	ubi->mtd = mtd;
	ubi->ubi_num = ubi_num;
	ubi->vid_hdr_offset = vid_hdr_offset;
	ubi->autoresize_vol_id = -1;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_pool.used = ubi->fm_pool.size = 0;
	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;

	/*
	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
	 */
	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;

	ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
	ubi->fm_disabled = !fm_autoconvert;

	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
	    <= UBI_FM_MAX_START) {
		ubi_err("More than %i PEBs are needed for fastmap, sorry.",
			UBI_FM_MAX_START);
		ubi->fm_disabled = 1;
	}

	ubi_debug("default fastmap pool size: %d", ubi->fm_pool.max_size);
	ubi_debug("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
#else
	ubi->fm_disabled = 1;
#endif

	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);

	err = io_init(ubi, max_beb_per1024);
	if (err)
		goto out_free;

	err = -ENOMEM;
	ubi->peb_buf = vmalloc(ubi->peb_size);
	if (!ubi->peb_buf)
		goto out_free;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_size = ubi_calc_fm_size(ubi);
	ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
	if (!ubi->fm_buf)
		goto out_free;
#endif
	err = ubi_attach(ubi, 0);
	if (err) {
		ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
		goto out_free;
	}

	if (ubi->autoresize_vol_id != -1) {
		err = autoresize(ubi, ubi->autoresize_vol_id);
		if (err)
			goto out_detach;
	}

	err = uif_init(ubi, &ref);
	if (err)
		goto out_detach;

	ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
		mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
	ubi_debug("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
	ubi_debug("min./max. I/O unit sizes: %d/%d, sub-page size %d",
		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
	ubi_debug("VID header offset: %d (aligned %d), data offset: %d",
		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
	ubi_debug("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
	ubi_debug("user volume: %d, internal volumes: %d, max. volumes count: %d",
		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
		ubi->vtbl_slots);
	ubi_debug("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
		ubi->image_seq);
	ubi_debug("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);

	dev_add_param_int_ro(&ubi->dev, "peb_size", ubi->peb_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "leb_size", ubi->leb_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "vid_header_offset", ubi->vid_hdr_offset, "%d");
	dev_add_param_int_ro(&ubi->dev, "min_io_size", ubi->min_io_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "sub_page_size", ubi->hdrs_min_io_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "good_peb_count", ubi->good_peb_count, "%d");
	dev_add_param_int_ro(&ubi->dev, "bad_peb_count", ubi->bad_peb_count, "%d");
	dev_add_param_int_ro(&ubi->dev, "max_erase_counter", ubi->max_ec, "%d");
	dev_add_param_int_ro(&ubi->dev, "mean_erase_counter", ubi->mean_ec, "%d");
	dev_add_param_int_ro(&ubi->dev, "available_pebs", ubi->avail_pebs, "%d");
	dev_add_param_int_ro(&ubi->dev, "reserved_pebs", ubi->rsvd_pebs, "%d");

	/*
	 * The below lock makes sure we do not race with 'ubi_thread()' which
	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
	 */
	ubi->thread_enabled = 1;
	wake_up_process(ubi->bgt_thread);

	ubi_devices[ubi_num] = ubi;

	return ubi_num;

out_detach:
	ubi_wl_close(ubi);
	ubi_free_internal_volumes(ubi);
	vfree(ubi->vtbl);
out_free:
	vfree(ubi->peb_buf);
	vfree(ubi->fm_buf);
	kfree(ubi);
	return err;
}
Beispiel #12
0
/**
 * io_init - initialize I/O sub-system for a given UBI device.
 * @ubi: UBI device description object
 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
 *
 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
 * assumed:
 *   o EC header is always at offset zero - this cannot be changed;
 *   o VID header starts just after the EC header at the closest address
 *     aligned to @io->hdrs_min_io_size;
 *   o data starts just after the VID header at the closest address aligned to
 *     @io->min_io_size
 *
 * This function returns zero in case of success and a negative error code in
 * case of failure.
 */
static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
	dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
	dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));

	if (ubi->mtd->numeraseregions != 0) {
		/*
		 * Some flashes have several erase regions. Different regions
		 * may have different eraseblock size and other
		 * characteristics. It looks like mostly multi-region flashes
		 * have one "main" region and one or more small regions to
		 * store boot loader code or boot parameters or whatever. I
		 * guess we should just pick the largest region. But this is
		 * not implemented.
		 */
		ubi_err("multiple regions, not implemented");
		return -EINVAL;
	}

	if (ubi->vid_hdr_offset < 0)
		return -EINVAL;

	/*
	 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
	 * physical eraseblocks maximum.
	 */

	ubi->peb_size   = ubi->mtd->erasesize;
	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
	ubi->flash_size = ubi->mtd->size;

	if (mtd_can_have_bb(ubi->mtd)) {
		ubi->bad_allowed = 1;
		ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
	}

	if (ubi->mtd->type == MTD_NORFLASH) {
		ubi_assert(ubi->mtd->writesize == 1);
		ubi->nor_flash = 1;
	}

	ubi->min_io_size = ubi->mtd->writesize;
	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;

	/*
	 * Make sure minimal I/O unit is power of 2. Note, there is no
	 * fundamental reason for this assumption. It is just an optimization
	 * which allows us to avoid costly division operations.
	 */
	if (!is_power_of_2(ubi->min_io_size)) {
		ubi_err("min. I/O unit (%d) is not power of 2",
			ubi->min_io_size);
		return -EINVAL;
	}

	ubi_assert(ubi->hdrs_min_io_size > 0);
	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);

	ubi->max_write_size = ubi->mtd->writesize; /* FIXME: writebufsize */
	/*
	 * Maximum write size has to be greater or equivalent to min. I/O
	 * size, and be multiple of min. I/O size.
	 */
	if (ubi->max_write_size < ubi->min_io_size ||
	    ubi->max_write_size % ubi->min_io_size ||
	    !is_power_of_2(ubi->max_write_size)) {
		ubi_err("bad write buffer size %d for %d min. I/O unit",
			ubi->max_write_size, ubi->min_io_size);
		return -EINVAL;
	}

	/* Calculate default aligned sizes of EC and VID headers */
	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);

	dbg_gen("min_io_size      %d", ubi->min_io_size);
	dbg_gen("max_write_size   %d", ubi->max_write_size);
	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
	dbg_gen("ec_hdr_alsize    %d", ubi->ec_hdr_alsize);
	dbg_gen("vid_hdr_alsize   %d", ubi->vid_hdr_alsize);

	if (ubi->vid_hdr_offset == 0)
		/* Default offset */
		ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
				      ubi->ec_hdr_alsize;
	else {
		ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
						~(ubi->hdrs_min_io_size - 1);
		ubi->vid_hdr_shift = ubi->vid_hdr_offset -
						ubi->vid_hdr_aloffset;
	}

	/* Similar for the data offset */
	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);

	dbg_gen("vid_hdr_offset   %d", ubi->vid_hdr_offset);
	dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
	dbg_gen("vid_hdr_shift    %d", ubi->vid_hdr_shift);
	dbg_gen("leb_start        %d", ubi->leb_start);

	/* The shift must be aligned to 32-bit boundary */
	if (ubi->vid_hdr_shift % 4) {
		ubi_err("unaligned VID header shift %d",
			ubi->vid_hdr_shift);
		return -EINVAL;
	}

	/* Check sanity */
	if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
	    ubi->leb_start & (ubi->min_io_size - 1)) {
		ubi_err("bad VID header (%d) or data offsets (%d)",
			ubi->vid_hdr_offset, ubi->leb_start);
		return -EINVAL;
	}

	/*
	 * Set maximum amount of physical erroneous eraseblocks to be 10%.
	 * Erroneous PEB are those which have read errors.
	 */
	ubi->max_erroneous = ubi->peb_count / 10;
	if (ubi->max_erroneous < 16)
		ubi->max_erroneous = 16;
	dbg_gen("max_erroneous    %d", ubi->max_erroneous);

	/*
	 * It may happen that EC and VID headers are situated in one minimal
	 * I/O unit. In this case we can only accept this UBI image in
	 * read-only mode.
	 */
	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
		ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
		ubi->ro_mode = 1;
	}

	ubi->leb_size = ubi->peb_size - ubi->leb_start;

	if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
		ubi_msg("MTD device %d is write-protected, attach in read-only mode",
			ubi->mtd->index);
		ubi->ro_mode = 1;
	}

	/*
	 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
	 * unfortunately, MTD does not provide this information. We should loop
	 * over all physical eraseblocks and invoke mtd->block_is_bad() for
	 * each physical eraseblock. So, we leave @ubi->bad_peb_count
	 * uninitialized so far.
	 */

	return 0;
}
Beispiel #13
0
/**
 * dbbt_check - Check if DBBT is readable and consistent to the mtd BBT
 * @mtd: The mtd Nand device
 * @dbbt: The page where the DBBT is found
 *
 * This function checks if the DBBT is readable and consistent to the mtd
 * layers idea of bad blocks.
 *
 * return: 0 if the DBBT is readable and consistent to the mtd BBT, a
 * negative error code otherwise.
 */
static int dbbt_check(struct mtd_info *mtd, int page)
{
	int ret, needs_cleanup = 0;
	size_t r;
	void *dbbt_header;
	void *dbbt_entries = NULL;
	struct dbbt_block *dbbt;
	int num_blocks = mtd_div_by_eb(mtd->size, mtd);
	int n;

	dbbt_header = xmalloc(mtd->writesize);

	ret = mtd_read(mtd, page * mtd->writesize, mtd->writesize, &r, dbbt_header);
	if (ret == -EUCLEAN) {
		pr_warn("page %d needs cleaning\n", page);
		needs_cleanup = 1;
	} else if (ret < 0) {
		pr_err("Cannot read page %d: %s\n", page, strerror(-ret));
		goto out;
	}

	dbbt = dbbt_header;

	if (dbbt->FingerPrint != 0x54424244) {
		pr_err("dbbt at page %d is readable but does not contain a valid DBBT\n",
		       page);
		ret = -EINVAL;
		goto out;
	}

	if (dbbt->DBBTNumOfPages) {
		dbbt_entries = xmalloc(mtd->writesize);

		ret = mtd_read(mtd, (page + 4) * mtd->writesize, mtd->writesize, &r, dbbt_entries);
		if (ret == -EUCLEAN) {
			pr_warn("page %d needs cleaning\n", page);
			needs_cleanup = 1;
		} else if (ret < 0) {
			pr_err("Cannot read page %d: %s\n", page, strerror(-ret));
			goto out;
		}
	} else {
		dbbt_entries = NULL;
	}

	for (n = 0; n < num_blocks; n++) {
		if (mtd_peb_is_bad(mtd, n) != dbbt_block_is_bad(dbbt_entries, n)) {
			ret = -EINVAL;
			goto out;
		}
	}

	ret = 0;
out:
	free(dbbt_header);
	free(dbbt_entries);

	if (ret < 0)
		return ret;
	if (needs_cleanup)
		return -EUCLEAN;
	return 0;
}
Beispiel #14
0
/**
 * imx_bbu_firmware_max_blocks - get max number of blocks for firmware
 * @mtd: The mtd device
 *
 * We use 4 blocks for FCB/DBBT, the rest of the partition is
 * divided into two equally sized firmware slots. This function
 * returns the number of blocks available for one firmware slot.
 * The actually usable size may be smaller due to bad blocks.
 */
static int imx_bbu_firmware_max_blocks(struct mtd_info *mtd)
{
	return (mtd_div_by_eb(mtd->size, mtd) - 4) / 2;
}