示例#1
0
int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
			int nrvols)
{
	struct ubi_scan_info *ubi = info->ubi;
	int res, i, fastmap = info->fastmap;
	u32 fsize;

retry:
	/*
	 * We do a partial initializiation of @ubi. Cleaning fm_buf is
	 * not necessary.
	 */
	memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));

	ubi->read = info->read;

	/* Precalculate the offsets */
	ubi->vid_offset = info->vid_offset;
	ubi->leb_start = info->leb_start;
	ubi->leb_size = info->peb_size - ubi->leb_start;
	ubi->peb_count = info->peb_count;
	ubi->peb_offset = info->peb_offset;

	fsize = info->peb_size * info->peb_count;
	ubi->fsize_mb = fsize >> 20;

	/* Fastmap init */
	ubi->fm_size = ubi_calc_fm_size(ubi);
	ubi->fm_enabled = fastmap;

	for (i = 0; i < nrvols; i++) {
		struct ubispl_load *lv = lvols + i;

		generic_set_bit(lv->vol_id, ubi->toload);
	}

	ipl_scan(ubi);

	for (i = 0; i < nrvols; i++) {
		struct ubispl_load *lv = lvols + i;

		ubi_msg("Loading VolId #%d", lv->vol_id);
		res = ipl_load(ubi, lv->vol_id, lv->load_addr);
		if (res < 0) {
			if (fastmap) {
				fastmap = 0;
				goto retry;
			}
			ubi_warn("Failed");
			return res;
		}
	}
	return 0;
}
示例#2
0
文件: build.c 项目: AubrCool/barebox
/**
 * ubi_attach_mtd_dev - attach an MTD device.
 * @mtd: MTD device description object
 * @ubi_num: number to assign to the new UBI device
 * @vid_hdr_offset: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
 *
 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
 * which case this function finds a vacant device number and assigns it
 * automatically. Returns the new UBI device number in case of success and a
 * negative error code in case of failure.
 *
 * Note, the invocations of this function has to be serialized by the
 * @ubi_devices_mutex.
 */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
		       int vid_hdr_offset, int max_beb_per1024)
{
	struct ubi_device *ubi;
	int i, err, ref = 0;

	if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
		return -EINVAL;

	if (!max_beb_per1024)
		max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;

	/*
	 * Check if we already have the same MTD device attached.
	 *
	 * Note, this function assumes that UBI devices creations and deletions
	 * are serialized, so it does not take the &ubi_devices_lock.
	 */
	for (i = 0; i < UBI_MAX_DEVICES; i++) {
		ubi = ubi_devices[i];
		if (ubi && mtd == ubi->mtd) {
			ubi_err("mtd%d is already attached to ubi%d",
				mtd->index, i);
			return -EEXIST;
		}
	}

	/*
	 * Make sure this MTD device is not emulated on top of an UBI volume
	 * already. Well, generally this recursion works fine, but there are
	 * different problems like the UBI module takes a reference to itself
	 * by attaching (and thus, opening) the emulated MTD device. This
	 * results in inability to unload the module. And in general it makes
	 * no sense to attach emulated MTD devices, so we prohibit this.
	 */
	if (mtd->type == MTD_UBIVOLUME) {
		ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
			mtd->index);
		return -EINVAL;
	}

	if (ubi_num == UBI_DEV_NUM_AUTO) {
		/* Search for an empty slot in the @ubi_devices array */
		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
			if (!ubi_devices[ubi_num])
				break;
		if (ubi_num == UBI_MAX_DEVICES) {
			ubi_err("only %d UBI devices may be created",
				UBI_MAX_DEVICES);
			return -ENFILE;
		}
	} else {
		if (ubi_num >= UBI_MAX_DEVICES)
			return -EINVAL;

		/* Make sure ubi_num is not busy */
		if (ubi_devices[ubi_num]) {
			ubi_err("ubi%d already exists", ubi_num);
			return -EEXIST;
		}
	}

	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
	if (!ubi)
		return -ENOMEM;

	ubi->mtd = mtd;
	ubi->ubi_num = ubi_num;
	ubi->vid_hdr_offset = vid_hdr_offset;
	ubi->autoresize_vol_id = -1;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_pool.used = ubi->fm_pool.size = 0;
	ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;

	/*
	 * fm_pool.max_size is 5% of the total number of PEBs but it's also
	 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
	 */
	ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
		ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
	if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
		ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;

	ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
	ubi->fm_disabled = !fm_autoconvert;

	if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
	    <= UBI_FM_MAX_START) {
		ubi_err("More than %i PEBs are needed for fastmap, sorry.",
			UBI_FM_MAX_START);
		ubi->fm_disabled = 1;
	}

	ubi_debug("default fastmap pool size: %d", ubi->fm_pool.max_size);
	ubi_debug("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
#else
	ubi->fm_disabled = 1;
#endif

	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);

	err = io_init(ubi, max_beb_per1024);
	if (err)
		goto out_free;

	err = -ENOMEM;
	ubi->peb_buf = vmalloc(ubi->peb_size);
	if (!ubi->peb_buf)
		goto out_free;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi->fm_size = ubi_calc_fm_size(ubi);
	ubi->fm_buf = kzalloc(ubi->fm_size, GFP_KERNEL);
	if (!ubi->fm_buf)
		goto out_free;
#endif
	err = ubi_attach(ubi, 0);
	if (err) {
		ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
		goto out_free;
	}

	if (ubi->autoresize_vol_id != -1) {
		err = autoresize(ubi, ubi->autoresize_vol_id);
		if (err)
			goto out_detach;
	}

	err = uif_init(ubi, &ref);
	if (err)
		goto out_detach;

	ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
		mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
	ubi_debug("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
		ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
	ubi_debug("min./max. I/O unit sizes: %d/%d, sub-page size %d",
		ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
	ubi_debug("VID header offset: %d (aligned %d), data offset: %d",
		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
	ubi_debug("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
		ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
	ubi_debug("user volume: %d, internal volumes: %d, max. volumes count: %d",
		ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
		ubi->vtbl_slots);
	ubi_debug("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
		ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
		ubi->image_seq);
	ubi_debug("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
		ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);

	dev_add_param_int_ro(&ubi->dev, "peb_size", ubi->peb_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "leb_size", ubi->leb_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "vid_header_offset", ubi->vid_hdr_offset, "%d");
	dev_add_param_int_ro(&ubi->dev, "min_io_size", ubi->min_io_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "sub_page_size", ubi->hdrs_min_io_size, "%d");
	dev_add_param_int_ro(&ubi->dev, "good_peb_count", ubi->good_peb_count, "%d");
	dev_add_param_int_ro(&ubi->dev, "bad_peb_count", ubi->bad_peb_count, "%d");
	dev_add_param_int_ro(&ubi->dev, "max_erase_counter", ubi->max_ec, "%d");
	dev_add_param_int_ro(&ubi->dev, "mean_erase_counter", ubi->mean_ec, "%d");
	dev_add_param_int_ro(&ubi->dev, "available_pebs", ubi->avail_pebs, "%d");
	dev_add_param_int_ro(&ubi->dev, "reserved_pebs", ubi->rsvd_pebs, "%d");

	/*
	 * The below lock makes sure we do not race with 'ubi_thread()' which
	 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
	 */
	ubi->thread_enabled = 1;
	wake_up_process(ubi->bgt_thread);

	ubi_devices[ubi_num] = ubi;

	return ubi_num;

out_detach:
	ubi_wl_close(ubi);
	ubi_free_internal_volumes(ubi);
	vfree(ubi->vtbl);
out_free:
	vfree(ubi->peb_buf);
	vfree(ubi->fm_buf);
	kfree(ubi);
	return err;
}