示例#1
0
static int aio_ref_get(struct aio_output *output, struct mref_object *mref)
{
	loff_t total_size;

	if (unlikely(!output->brick->power.led_on))
		return -EBADFD;

	if (unlikely(!output->mf)) {
		MARS_ERR("brick is not switched on\n");
		return -EILSEQ;
	}

	if (unlikely(mref->ref_len <= 0)) {
		MARS_ERR("bad ref_len=%d\n", mref->ref_len);
		return -EILSEQ;
	}

	total_size = get_total_size(output);
	if (unlikely(total_size < 0)) {
		return total_size;
	}
	mref->ref_total_size = total_size;

	if (mref->ref_initialized) {
		_mref_get(mref);
		return mref->ref_len;
	}

	/* Buffered IO.
	 */
	if (!mref->ref_data) {
		struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output->brick, mref);
		if (unlikely(!mref_a)) {
			MARS_ERR("bad mref_a\n");
			return -EILSEQ;
		}
		if (unlikely(mref->ref_len <= 0)) {
			MARS_ERR("bad ref_len = %d\n", mref->ref_len);
			return -ENOMEM;
		}
		mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len));
		if (unlikely(!mref->ref_data)) {
			MARS_ERR("ENOMEM %d bytes\n", mref->ref_len);
			return -ENOMEM;
		}
#if 0 // ???
		mref->ref_flags = 0;
#endif
		mref_a->do_dealloc = true;
		atomic_inc(&output->total_alloc_count);
		atomic_inc(&output->alloc_count);
	}

	_mref_get_first(mref);
	return mref->ref_len;
}
示例#2
0
static void aio_ref_io(struct aio_output *output, struct mref_object *mref)
{
	struct aio_threadinfo *tinfo = &output->tinfo[0];
	struct aio_mref_aspect *mref_a;
	int err = -EINVAL;

	_mref_check(mref);

	if (unlikely(!output->brick->power.led_on)) {
		SIMPLE_CALLBACK(mref, -EBADFD);
		return;
	}

	_mref_get(mref);
	atomic_inc(&mars_global_io_flying);
	atomic_inc(&output->work_count);

	// statistics
	if (mref->ref_rw) {
		atomic_inc(&output->total_write_count);
		atomic_inc(&output->write_count);
	} else {
		atomic_inc(&output->total_read_count);
		atomic_inc(&output->read_count);
	}

	if (unlikely(!output->mf || !output->mf->mf_filp)) {
		goto done;
	}

	mapfree_set(output->mf, mref->ref_pos, -1);

	MARS_IO("AIO rw=%d pos=%lld len=%d data=%p\n", mref->ref_rw, mref->ref_pos, mref->ref_len, mref->ref_data);

	mref_a = aio_mref_get_aspect(output->brick, mref);
	if (unlikely(!mref_a)) {
		goto done;
	}

	_enqueue(tinfo, mref_a, mref->ref_prio, true);
	return;

done:
	_complete_mref(output, mref, err);
}
示例#3
0
static void client_ref_io(struct client_output *output, struct mref_object *mref)
{
	struct client_mref_aspect *mref_a;
	int error = -EINVAL;

	mref_a = client_mref_get_aspect(output->brick, mref);
	if (unlikely(!mref_a)) {
		goto error;
	}

	while (output->brick->max_flying > 0 && atomic_read(&output->fly_count) > output->brick->max_flying) {
		MARS_IO("sleeping request pos = %lld len = %d rw = %d (flying = %d)\n", mref->ref_pos, mref->ref_len, mref->ref_rw, atomic_read(&output->fly_count));
#ifdef IO_DEBUGGING
		brick_msleep(3000);
#else
		brick_msleep(1000 * 2 / HZ);
#endif
	}

	atomic_inc(&mars_global_io_flying);
	atomic_inc(&output->fly_count);
	_mref_get(mref);

	mref_a->submit_jiffies = jiffies;
	_hash_insert(output, mref_a);

	MARS_IO("added request id = %d pos = %lld len = %d rw = %d (flying = %d)\n", mref->ref_id, mref->ref_pos, mref->ref_len, mref->ref_rw, atomic_read(&output->fly_count));

	wake_up_interruptible(&output->event);

	return;

error:
	MARS_ERR("IO error = %d\n", error);
	SIMPLE_CALLBACK(mref, error);
	client_ref_put(output, mref);
}
示例#4
0
static int client_ref_get(struct client_output *output, struct mref_object *mref)
{
	int maxlen;

	if (mref->ref_initialized) {
		_mref_get(mref);
		return mref->ref_len;
	}

#if 1
	/* Limit transfers to page boundaries.
	 * Currently, this is more restrictive than necessary.
	 * TODO: improve performance by doing better when possible.
	 * This needs help from the server in some efficient way.
	 */
	maxlen = PAGE_SIZE - (mref->ref_pos & (PAGE_SIZE-1));
	if (mref->ref_len > maxlen)
		mref->ref_len = maxlen;
#endif

	if (!mref->ref_data) { // buffered IO
		struct client_mref_aspect *mref_a = client_mref_get_aspect(output->brick, mref);
		if (!mref_a)
			return -EILSEQ;

		mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len));
		if (!mref->ref_data)
			return -ENOMEM;

		mref_a->do_dealloc = true;
		mref->ref_flags = 0;
	}

	_mref_get_first(mref);
	return 0;
}
示例#5
0
文件: mars_aio.c 项目: ZyanKLee/mars
static int aio_ref_get(struct aio_output *output, struct mref_object *mref)
{
	struct file *file;
	struct inode *inode;
	loff_t total_size;

	if (unlikely(!output->mf)) {
		MARS_ERR("brick is not switched on\n");
		return -EILSEQ;
	}

	if (unlikely(mref->ref_len <= 0)) {
		MARS_ERR("bad ref_len=%d\n", mref->ref_len);
		return -EILSEQ;
	}

	if (mref->ref_initialized) {
		_mref_get(mref);
		return mref->ref_len;
	}

	file = output->mf->mf_filp;
	if (unlikely(!file)) {
		MARS_ERR("file is not open\n");
		return -EILSEQ;
	}
	if (unlikely(!file->f_mapping)) {
		MARS_ERR("file %p has no mapping\n", file);
		return -EILSEQ;
	}
	inode = file->f_mapping->host;
	if (unlikely(!inode)) {
		MARS_ERR("file %p has no inode\n", file);
		return -EILSEQ;
	}
	
	total_size = i_size_read(inode);
	mref->ref_total_size = total_size;
	/* Only check reads.
	 * Writes behind EOF are always allowed (sparse files)
	 */
	if (!mref->ref_may_write) {
		loff_t len = total_size - mref->ref_pos;
		if (unlikely(len <= 0)) {
			/* Special case: allow reads starting _exactly_ at EOF when a timeout is specified.
			 */
			if (len < 0 || mref->ref_timeout <= 0) {
				MARS_DBG("ENODATA %lld\n", len);
				return -ENODATA;
			}
		}
		// Shorten below EOF, but allow special case
		if (mref->ref_len > len && len > 0) {
			mref->ref_len = len;
		}
	}

	/* Buffered IO.
	 */
	if (!mref->ref_data) {
		struct aio_mref_aspect *mref_a = aio_mref_get_aspect(output->brick, mref);
		if (unlikely(!mref_a)) {
			MARS_ERR("bad mref_a\n");
			return -EILSEQ;
		}
		if (unlikely(mref->ref_len <= 0)) {
			MARS_ERR("bad ref_len = %d\n", mref->ref_len);
			return -ENOMEM;
		}
		mref->ref_data = brick_block_alloc(mref->ref_pos, (mref_a->alloc_len = mref->ref_len));
		if (unlikely(!mref->ref_data)) {
			MARS_ERR("ENOMEM %d bytes\n", mref->ref_len);
			return -ENOMEM;
		}
#if 0 // ???
		mref->ref_flags = 0;
#endif
		mref_a->do_dealloc = true;
		atomic_inc(&output->total_alloc_count);
		atomic_inc(&output->alloc_count);
	}

	_mref_get_first(mref);
	return mref->ref_len;
}
示例#6
0
文件: mars_usebuf.c 项目: 1and1/mars
static void usebuf_ref_io(struct usebuf_output *output, struct mref_object *mref)
{
	struct usebuf_input *input = output->brick->inputs[0];
	struct usebuf_mref_aspect *mref_a;
	struct usebuf_mref_aspect *sub_mref_a;
	struct mref_object *sub_mref;
	int error = -EILSEQ;

	might_sleep();

	_mref_check(mref);

	mref_a = usebuf_mref_get_aspect(output->brick, mref);
	if (unlikely(!mref_a)) {
		MARS_FAT("cannot get aspect\n");
		goto err;
	}

	sub_mref_a = mref_a->sub_mref_a;
	if (!sub_mref_a) {
		MARS_FAT("sub_mref is missing\n");
		goto err;
	}

	sub_mref = sub_mref_a->object;
	if (!sub_mref) {
		MARS_FAT("sub_mref is missing\n");
		goto err;
	}

	if (mref->ref_rw != 0 && sub_mref->ref_may_write == 0) {
		MARS_ERR("mref_may_write was not set before\n");
		goto err;
	}

	_mref_get(mref);

	sub_mref->ref_rw = mref->ref_rw;
	sub_mref->ref_len = mref->ref_len;
	mref_a->input = input;
	/* Optimization: when buffered IO is used and buffer is already
	 * uptodate, skip real IO operation.
	 */
	if (mref->ref_rw != 0) {
#ifdef DIRECT_WRITE
		sub_mref->ref_rw = 1;
#else // normal case
		sub_mref->ref_rw = 0;
		if (sub_mref->ref_flags & MREF_UPTODATE) {
			sub_mref->ref_rw = 1;
		}
#endif
	} else if (sub_mref->ref_flags & MREF_UPTODATE) {
		MARS_IO("direct _usebuf_endio\n");
		_usebuf_endio(sub_mref->object_cb);
		return;
	}
	if (mref->ref_data != sub_mref->ref_data) {
		if (sub_mref->ref_rw != 0) {
			_usebuf_copy(mref, sub_mref, 1);
			mref->ref_flags |= MREF_UPTODATE;
		}
	}

#ifdef FAKE_ALL
	_usebuf_endio(sub_mref->ref_cb);
	return;
#endif
	GENERIC_INPUT_CALL(input, mref_io, sub_mref);

	return;

err:
	SIMPLE_CALLBACK(mref, error);
	return;
}
示例#7
0
文件: mars_usebuf.c 项目: 1and1/mars
static int usebuf_ref_get(struct usebuf_output *output, struct mref_object *mref)
{
	struct usebuf_input *input = output->brick->inputs[0];
	struct usebuf_mref_aspect *mref_a;
	struct usebuf_mref_aspect *sub_mref_a;
	struct mref_object *sub_mref;
	int status = 0;

	might_sleep();

	mref_a = usebuf_mref_get_aspect(output->brick, mref);
	if (unlikely(!mref_a)) {
		MARS_FAT("cannot get aspect\n");
		return -EILSEQ;
	}

	sub_mref_a = mref_a->sub_mref_a;
	if (!sub_mref_a) {
		sub_mref = usebuf_alloc_mref(output->brick);
		if (unlikely(!sub_mref)) {
			MARS_FAT("cannot get sub_mref\n");
			return -ENOMEM;
		}

		sub_mref_a = usebuf_mref_get_aspect(output->brick, sub_mref);
		if (unlikely(!sub_mref_a)) {
			MARS_FAT("cannot get aspect\n");
			return -EILSEQ;
		}

		mref_a->sub_mref_a = sub_mref_a;
		sub_mref->ref_pos = mref->ref_pos;
		sub_mref->ref_len = mref->ref_len;
		sub_mref->ref_may_write = mref->ref_may_write;
#ifdef DIRECT_IO // shortcut solely for testing: do direct IO
		if (!mref->ref_data)
			MARS_ERR("NULL.......\n");
		sub_mref->ref_data = mref->ref_data;
#else // normal case: buffered IO
		sub_mref->ref_data = NULL;
#endif
		SETUP_CALLBACK(sub_mref, _usebuf_endio, mref_a);
		mref->ref_flags = 0;
	} else {
		sub_mref = sub_mref_a->object;
#if 1
		MARS_ERR("please do not use this broken feature\n");
#endif		
	}

	status = GENERIC_INPUT_CALL(input, mref_get, sub_mref);
	if (status < 0) {
		return status;
	}

	mref->ref_len = sub_mref->ref_len;
	//MARS_INF("GOT %p %p flags = %d\n", mref, sub_mref, sub_mref->ref_flags);
	if (!mref->ref_data) {
		MARS_INF("uiiiiiiiiiii\n");
		mref->ref_data = sub_mref->ref_data;
	}
	_mref_get(mref);

	return status;
}