Exemple #1
0
Fichier : dm.c Projet : wxlong/Test
static int clone_endio(struct bio *bio, unsigned int done, int error)
{
	int r = 0;
	struct target_io *tio = bio->bi_private;
	struct dm_io *io = tio->io;
	dm_endio_fn endio = tio->ti->type->end_io;

	if (bio->bi_size)
		return 1;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
		r = endio(tio->ti, bio, error, &tio->info);
		if (r < 0)
			error = r;

		else if (r > 0)
			/* the target wants another shot at the io */
			return 1;
	}

	free_tio(io->md, tio);
	dec_pending(io, error);
	bio_put(bio);
	return r;
}
Exemple #2
0
static int clone_endio(struct bio *bio, unsigned int done, int error)
{
	int r = 0;
	struct target_io *tio = bio->bi_private;
	struct mapped_device *md = tio->io->md;
	dm_endio_fn endio = tio->ti->type->end_io;

	if (bio->bi_size)
		return 1;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
		r = endio(tio->ti, bio, error, &tio->info);
		if (r < 0 || r == DM_ENDIO_REQUEUE)
			/*
			 * error and requeue request are handled
			 * in dec_pending().
			 */
			error = r;
		else if (r == DM_ENDIO_INCOMPLETE)
			/* The target will handle the io */
			return 1;
		else if (r) {
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
	}

	dec_pending(tio->io, error);

	/*
	 * Store md for cleanup instead of tio which is about to get freed.
	 */
	bio->bi_private = md->bs;

	bio_put(bio);
	free_tio(md, tio);
	return r;
}
Exemple #3
0
int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
				  unsigned int clusterpages,
				  struct page **pages,
				  unsigned int outlen,
				  unsigned short pageofs,
				  void (*endio)(struct page *))
{
	void *vin, *vout;
	unsigned int nr_pages, i, j;
	int ret;

	if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
		return -ENOTSUPP;

	nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);

	if (clusterpages == 1)
		vin = kmap_atomic(compressed_pages[0]);
	else
		vin = erofs_vmap(compressed_pages, clusterpages);

	preempt_disable();
	vout = erofs_pcpubuf[smp_processor_id()].data;

	ret = z_erofs_unzip_lz4(vin, vout + pageofs,
				clusterpages * PAGE_SIZE, outlen);

	if (ret >= 0) {
		outlen = ret;
		ret = 0;
	}

	for (i = 0; i < nr_pages; ++i) {
		j = min((unsigned int)PAGE_SIZE - pageofs, outlen);

		if (pages[i]) {
			if (ret < 0) {
				SetPageError(pages[i]);
			} else if (clusterpages == 1 &&
				   pages[i] == compressed_pages[0]) {
				memcpy(vin + pageofs, vout + pageofs, j);
			} else {
				void *dst = kmap_atomic(pages[i]);

				memcpy(dst + pageofs, vout + pageofs, j);
				kunmap_atomic(dst);
			}
			endio(pages[i]);
		}
		vout += PAGE_SIZE;
		outlen -= j;
		pageofs = 0;
	}
	preempt_enable();

	if (clusterpages == 1)
		kunmap_atomic(vin);
	else
		erofs_vunmap(vin, clusterpages);

	return ret;
}