Exemplo n.º 1
0
static status_t
cd_write(void* cookie, off_t pos, const void* buffer, size_t* _length)
{
	cd_handle* handle = (cd_handle*)cookie;
	size_t length = *_length;

	if (handle->info->capacity == 0)
		return B_DEV_NO_MEDIA;

	IORequest request;
	status_t status = request.Init(pos, (addr_t)buffer, length, true, 0);
	if (status != B_OK)
		return status;

	status = handle->info->io_scheduler->ScheduleRequest(&request);
	if (status != B_OK)
		return status;

	status = request.Wait(0, 0);
	if (status == B_OK)
		*_length = length;
	else
		dprintf("cd_write(): request.Wait() returned: %s\n", strerror(status));

	return status;
}
Exemplo n.º 2
0
    static void Notify(const IORequest &req) {
        FTRACE;
        hlog(HLOG_INFO, "IO request %lu completed, result is %ld",
             req.GetRequestNumber(), req.GetResult());
        IOManagerUnitTest &ut(*(reinterpret_cast<IOManagerUnitTest*>(req.GetUserData())));
        AutoUnlockMutex lock(ut.mLock);
        ++ut.mNumCompletions;
//        hlog(HLOG_DEBUG, "mNumCompletions now %d", ut.mNumCompletions);
        ut.mCond.Signal();
    }
Exemplo n.º 3
0
static int
dump_io_request(int argc, char** argv)
{
	if (argc != 2 || !strcmp(argv[1], "--help")) {
		kprintf("usage: %s <ptr-to-io-request>\n", argv[0]);
		return 0;
	}

	IORequest* request = (IORequest*)parse_expression(argv[1]);
	request->Dump();
	return 0;
}
Exemplo n.º 4
0
size_t
Model::Thread::ClosestRequestStartIndex(nanotime_t minRequestStartTime) const
{
	size_t lower = 0;
	size_t upper = fIORequestCount;
	while (lower < upper) {
		size_t mid = (lower + upper) / 2;
		IORequest* request = fIORequests[mid];

		if (request->ScheduledTime() < minRequestStartTime)
			lower = mid + 1;
		else
			upper = mid;
	}

	return lower;
}
Exemplo n.º 5
0
static status_t
das_write(void* cookie, off_t pos, const void* buffer, size_t* _length)
{
    das_handle* handle = (das_handle*)cookie;
    size_t length = *_length;

    IORequest request;
    status_t status = request.Init(pos, (void*)buffer, length, true, 0);
    if (status != B_OK)
        return status;

    status = handle->info->io_scheduler->ScheduleRequest(&request);
    if (status != B_OK)
        return status;

    status = request.Wait(0, 0);
    if (status == B_OK)
        *_length = length;
    else
        dprintf("das_write(): request.Wait() returned: %s\n", strerror(status));

    return status;
}
Exemplo n.º 6
0
status_t
vfs_asynchronous_write_pages(struct vnode* vnode, void* cookie, off_t pos,
	const generic_io_vec* vecs, size_t count, generic_size_t numBytes,
	uint32 flags, AsyncIOCallback* callback)
{
	IORequest* request = IORequest::Create((flags & B_VIP_IO_REQUEST) != 0);
	if (request == NULL) {
		callback->IOFinished(B_NO_MEMORY, true, 0);
		return B_NO_MEMORY;
	}

	status_t status = request->Init(pos, vecs, count, numBytes, true,
		flags | B_DELETE_IO_REQUEST);
	if (status != B_OK) {
		delete request;
		callback->IOFinished(status, true, 0);
		return status;
	}

	request->SetFinishedCallback(&AsyncIOCallback::IORequestCallback,
		callback);

	return vfs_vnode_io(vnode, cookie, request);
}
Exemplo n.º 7
0
/** Sends a IO task to a target worker
 * @param wi a worker that will execute this task
 * @param array_name a name of splits that will perform this IO task
 * @param store_name a name of external storage related to this IO task
 * @param type a tpye of this task (SAVE or LOAD)
 * @param id ID of this task (generally 0)
 * @param uid the real ID of this task
 * @return NULL
 */
static void dispatch_io(WorkerInfo *wi, const string &array_name,
                        const string &store_name, IORequest::Type type,
                        ::uint64_t id, ::uint64_t uid) {
  IORequest req;
  req.set_array_name(array_name);
  req.set_store_name(store_name);
  req.set_type(type);
  req.set_id(id);
  req.set_uid(uid);
  wi->IO(req);
  LOG_INFO("IO TaskID %19d - Sent to Worker %s", static_cast<int>(uid), wi->hostname().c_str());
}
Exemplo n.º 8
0
static status_t
do_iterative_fd_io_iterate(void* _cookie, io_request* request,
	bool* _partialTransfer)
{
	TRACE_RIO("[%ld] do_iterative_fd_io_iterate(request: %p)\n",
		find_thread(NULL), request);

	static const size_t kMaxSubRequests = 8;

	iterative_io_cookie* cookie = (iterative_io_cookie*)_cookie;

	request->DeleteSubRequests();

	off_t requestOffset = cookie->request_offset;
	size_t requestLength = request->Length()
		- (requestOffset - request->Offset());

	// get the next file vecs
	file_io_vec vecs[kMaxSubRequests];
	size_t vecCount = kMaxSubRequests;
	status_t error = cookie->get_vecs(cookie->cookie, request, requestOffset,
		requestLength, vecs, &vecCount);
	if (error != B_OK && error != B_BUFFER_OVERFLOW)
		return error;
	if (vecCount == 0) {
		*_partialTransfer = true;
		return B_OK;
	}
	TRACE_RIO("[%ld]  got %zu file vecs\n", find_thread(NULL), vecCount);

	// create subrequests for the file vecs we've got
	size_t subRequestCount = 0;
	for (size_t i = 0;
		i < vecCount && subRequestCount < kMaxSubRequests && error == B_OK;
		i++) {
		off_t vecOffset = vecs[i].offset;
		off_t vecLength = min_c(vecs[i].length, (off_t)requestLength);
		TRACE_RIO("[%ld]    vec %lu offset: %lld, length: %lld\n",
			find_thread(NULL), i, vecOffset, vecLength);

		// Special offset -1 means that this is part of sparse file that is
		// zero. We fill it in right here.
		if (vecOffset == -1) {
			if (request->IsWrite()) {
				panic("do_iterative_fd_io_iterate(): write to sparse file "
					"vector");
				error = B_BAD_VALUE;
				break;
			}

			error = request->ClearData(requestOffset, vecLength);
			if (error != B_OK)
				break;

			requestOffset += vecLength;
			requestLength -= vecLength;
			continue;
		}

		while (vecLength > 0 && subRequestCount < kMaxSubRequests) {
			TRACE_RIO("[%ld]    creating subrequest: offset: %lld, length: "
				"%lld\n", find_thread(NULL), vecOffset, vecLength);
			IORequest* subRequest;
			error = request->CreateSubRequest(requestOffset, vecOffset,
				vecLength, subRequest);
			if (error != B_OK)
				break;

			subRequestCount++;

			size_t lengthProcessed = subRequest->Length();
			vecOffset += lengthProcessed;
			vecLength -= lengthProcessed;
			requestOffset += lengthProcessed;
			requestLength -= lengthProcessed;
		}
	}

	// Only if we couldn't create any subrequests, we fail.
	if (error != B_OK && subRequestCount == 0)
		return error;

	// Reset the error code for the loop below
	error = B_OK;

	request->Advance(requestOffset - cookie->request_offset);
	cookie->request_offset = requestOffset;

	// If we don't have any sub requests at this point, that means all that
	// remained were zeroed sparse file vectors. So the request is done now.
	if (subRequestCount == 0) {
		ASSERT(request->RemainingBytes() == 0);
		request->SetStatusAndNotify(B_OK);
		return B_OK;
	}

	// Schedule the subrequests.
	IORequest* nextSubRequest = request->FirstSubRequest();
	while (nextSubRequest != NULL) {
		IORequest* subRequest = nextSubRequest;
		nextSubRequest = request->NextSubRequest(subRequest);

		if (error == B_OK) {
			TRACE_RIO("[%ld]  scheduling subrequest: %p\n", find_thread(NULL),
				subRequest);
			error = vfs_vnode_io(cookie->vnode, cookie->descriptor->cookie,
				subRequest);
		} else {
			// Once scheduling a subrequest failed, we cancel all subsequent
			// subrequests.
			subRequest->SetStatusAndNotify(B_CANCELED);
		}
	}

	// TODO: Cancel the subrequests that were scheduled successfully.

	return B_OK;
}
Exemplo n.º 9
0
void
IORequest::NotifyFinished()
{
	TRACE("IORequest::NotifyFinished(): request: %p\n", this);

	MutexLocker locker(fLock);

	if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
		// The request is not really done yet. If it has an iteration callback,
		// call it.
		if (fIterationCallback != NULL) {
			ResetStatus();
			locker.Unlock();
			bool partialTransfer = false;
			status_t error = fIterationCallback(fIterationCookie, this,
				&partialTransfer);
			if (error == B_OK && !partialTransfer)
				return;

			// Iteration failed, which means we're responsible for notifying the
			// requests finished.
			locker.Lock();
			fStatus = error;
			fPartialTransfer = true;
		}
	}

	ASSERT(fPendingChildren == 0);
	ASSERT(fChildren.IsEmpty()
		|| dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);

	// unlock the memory
	if (fBuffer->IsMemoryLocked())
		fBuffer->UnlockMemory(fTeam, fIsWrite);

	// Cache the callbacks before we unblock waiters and unlock. Any of the
	// following could delete this request, so we don't want to touch it
	// once we have started telling others that it is done.
	IORequest* parent = fParent;
	io_request_finished_callback finishedCallback = fFinishedCallback;
	void* finishedCookie = fFinishedCookie;
	status_t status = fStatus;
	size_t lastTransferredOffset = fRelativeParentOffset + fTransferSize;
	bool partialTransfer = status != B_OK || fPartialTransfer;
	bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;

	// unblock waiters
	fIsNotified = true;
	fFinishedCondition.NotifyAll();

	locker.Unlock();

	// notify callback
	if (finishedCallback != NULL) {
		finishedCallback(finishedCookie, this, status, partialTransfer,
			lastTransferredOffset);
	}

	// notify parent
	if (parent != NULL) {
		parent->SubRequestFinished(this, status, partialTransfer,
			lastTransferredOffset);
	}

	if (deleteRequest)
		delete this;
}
Exemplo n.º 10
0
status_t
IORequest::CreateSubRequest(off_t parentOffset, off_t offset, size_t length,
	IORequest*& _subRequest)
{
	ASSERT(parentOffset >= fOffset && length <= fLength
		&& parentOffset - fOffset <= fLength - length);

	// find start vec
	size_t vecOffset = parentOffset - fOffset;
	iovec* vecs = fBuffer->Vecs();
	int32 vecCount = fBuffer->VecCount();
	int32 startVec = 0;
	for (; startVec < vecCount; startVec++) {
		const iovec& vec = vecs[startVec];
		if (vecOffset < vec.iov_len)
			break;

		vecOffset -= vec.iov_len;
	}

	// count vecs
	size_t currentVecOffset = vecOffset;
	int32 endVec = startVec;
	size_t remainingLength = length;
	for (; endVec < vecCount; endVec++) {
		const iovec& vec = vecs[endVec];
		if (vec.iov_len - currentVecOffset >= remainingLength)
			break;

		remainingLength -= vec.iov_len - currentVecOffset;
		currentVecOffset = 0;
	}

	// create subrequest
	IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
	if (subRequest == NULL)
		return B_NO_MEMORY;

	status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
		endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
	if (error != B_OK) {
		delete subRequest;
		return error;
	}

	subRequest->fRelativeParentOffset = parentOffset - fOffset;
	subRequest->fTeam = fTeam;
	subRequest->fThread = fThread;

	_subRequest = subRequest;
	subRequest->SetParent(this);

	MutexLocker _(fLock);

	fChildren.Add(subRequest);
	fPendingChildren++;
	TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
		subRequest);

	return B_OK;
}