Ejemplo n.º 1
0
bool
DPCQueue::Cancel(DPCCallback* callback)
{
	InterruptsSpinLocker locker(fLock);

	// If the callback is queued, remove it.
	if (callback->fInQueue == this) {
		fCallbacks.Remove(callback);
		return true;
	}

	// The callback is not queued. If it isn't in progress, we're done, too.
	if (callback != fCallbackInProgress)
		return false;

	// The callback is currently being executed. We need to wait for it to be
	// done.

	// Set the respective condition, if not set yet. For the unlikely case that
	// there are multiple threads trying to cancel the callback at the same
	// time, the condition variable of the first thread will be used.
	ConditionVariable condition;
	if (fCallbackDoneCondition == NULL)
		fCallbackDoneCondition = &condition;

	// add our wait entry
	ConditionVariableEntry waitEntry;
	fCallbackDoneCondition->Add(&waitEntry);

	// wait
	locker.Unlock();
	waitEntry.Wait();

	return false;
}
Ejemplo n.º 2
0
status_t
HIDReport::WaitForReport(bigtime_t timeout)
{
	while (atomic_get(&fBusyCount) != 0)
		snooze(1000);

	ConditionVariableEntry conditionVariableEntry;
	fConditionVariable.Add(&conditionVariableEntry);
	status_t result = fParser->Device()->MaybeScheduleTransfer();
	if (result != B_OK) {
		TRACE_ALWAYS("scheduling transfer failed\n");
		conditionVariableEntry.Wait(B_RELATIVE_TIMEOUT, 0);
		return result;
	}

	result = conditionVariableEntry.Wait(B_RELATIVE_TIMEOUT, timeout);
	TRACE("waiting for report returned with result: %s\n", strerror(result));
	if (result != B_OK)
		return result;

	if (fReportStatus != B_OK)
		return fReportStatus;

	atomic_add(&fBusyCount, 1);
	return B_OK;
}
Ejemplo n.º 3
0
int
publishedConditionTimedWait(const void* waitChannel, const int timeout)
{
	ConditionVariableEntry variableEntry;

	status_t status = variableEntry.Wait(waitChannel, B_RELATIVE_TIMEOUT,
		ticks_to_usecs(timeout));

	if (status != B_OK)
		status = EWOULDBLOCK;
	return status;
}
Ejemplo n.º 4
0
void
core_dump_trap_thread()
{
	Thread* thread = thread_get_current_thread();
	ConditionVariableEntry conditionVariableEntry;
	TeamLocker teamLocker(thread->team);

	while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
			!= 0) {
		thread->team->CoreDumpCondition()->Add(&conditionVariableEntry);
		teamLocker.Unlock();
		conditionVariableEntry.Wait();
		teamLocker.Lock();
	}
}
Ejemplo n.º 5
0
void
Task::Wait()
{
	while (true) {
		MutexLocker locker(fLock);

		if (fFinished && !fPending)
			return;

		ConditionVariableEntry entry;
		fFinishCondition.Add(&entry);
		locker.Unlock();

		entry.Wait();
	}
}
Ejemplo n.º 6
0
static status_t
user_mutex_lock_locked(int32* mutex, addr_t physicalAddress, const char* name,
	uint32 flags, bigtime_t timeout, MutexLocker& locker)
{
	// mark the mutex locked + waiting
	int32 oldValue = atomic_or(mutex,
		B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING);

	// The mutex might have been unlocked (or disabled) in the meantime.
	if ((oldValue & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) == 0
			|| (oldValue & B_USER_MUTEX_DISABLED) != 0) {
		// clear the waiting flag and be done
		atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
		return B_OK;
	}

	// we have to wait

	// add the entry to the table
	UserMutexEntry entry;
	entry.address = physicalAddress;
	entry.locked = false;
	add_user_mutex_entry(&entry);

	// wait
	ConditionVariableEntry waitEntry;
	entry.condition.Init((void*)physicalAddress, "user mutex");
	entry.condition.Add(&waitEntry);

	locker.Unlock();
	status_t error = waitEntry.Wait(flags, timeout);
	locker.Lock();

	// dequeue
	if (!remove_user_mutex_entry(&entry)) {
		// no one is waiting anymore -- clear the waiting flag
		atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
	}

	if (error != B_OK
			&& (entry.locked || (*mutex & B_USER_MUTEX_DISABLED) != 0)) {
		// timeout or interrupt, but the mutex was unlocked or disabled in time
		error = B_OK;
	}

	return error;
}
void
PhysicalPageSlotQueue::GetSlots(PhysicalPageSlot*& slot1,
	PhysicalPageSlot*& slot2)
{
	InterruptsLocker locker;

	// wait for two free slot to turn up
	while (fSlots == NULL || fSlots->next == NULL) {
		ConditionVariableEntry entry;
		fFreeSlotsCondition.Add(&entry);
		locker.Unlock();
		entry.Wait();
		locker.Lock();
	}

	slot1 = fSlots;
	slot2 = slot1->next;
	fSlots = slot2->next;
}
Ejemplo n.º 8
0
status_t
IORequest::Wait(uint32 flags, bigtime_t timeout)
{
	MutexLocker locker(fLock);

	if (IsFinished() && fIsNotified)
		return Status();

	ConditionVariableEntry entry;
	fFinishedCondition.Add(&entry);

	locker.Unlock();

	status_t error = entry.Wait(flags, timeout);
	if (error != B_OK)
		return error;

	return Status();
}
PhysicalPageSlot*
PhysicalPageSlotQueue::GetSlot()
{
	InterruptsLocker locker;

	// wait for a free slot to turn up
	while (fSlots == NULL) {
		ConditionVariableEntry entry;
		fFreeSlotCondition.Add(&entry);
		locker.Unlock();
		entry.Wait();
		locker.Lock();
	}

	PhysicalPageSlot* slot = fSlots;
	fSlots = slot->next;

	return slot;
}
Ejemplo n.º 10
0
status_t
DPCQueue::_Thread()
{
	while (true) {
		InterruptsSpinLocker locker(fLock);

		// get the next pending callback
		DPCCallback* callback = fCallbacks.RemoveHead();
		if (callback == NULL) {
			// nothing is pending -- wait unless the queue is already closed
			if (_IsClosed())
				break;

			ConditionVariableEntry waitEntry;
			fPendingCallbacksCondition.Add(&waitEntry);

			locker.Unlock();
			waitEntry.Wait();

			continue;
		}

		callback->fInQueue = NULL;
		fCallbackInProgress = callback;

		// call the callback
		locker.Unlock();
		callback->DoDPC(this);
		locker.Lock();

		fCallbackInProgress = NULL;

		// wake up threads waiting for the callback to be done
		ConditionVariable* doneCondition = fCallbackDoneCondition;
		fCallbackDoneCondition = NULL;
		locker.Unlock();
		if (doneCondition != NULL)
			doneCondition->NotifyAll();
	}

	return B_OK;
}
Ejemplo n.º 11
0
void
Worker::_Worker()
{
	while (true) {
		MutexLocker locker(fLock);

		if (fTasks.IsEmpty()) {
			ConditionVariableEntry entry;
			fCondition.Add(&entry);
			locker.Unlock();

			status_t status = entry.Wait();
			if (status != B_OK)
				break;
		} else
			locker.Unlock();

		_Work();
	}
}
Ejemplo n.º 12
0
status_t
Volume::_PackageLoader()
{
	while (!fTerminating) {
		MutexLocker jobQueueLocker(fJobQueueLock);

		Job* job = fJobQueue.RemoveHead();
		if (job == NULL) {
			// no job yet -- wait for someone notifying us
			ConditionVariableEntry waitEntry;
			fJobQueueCondition.Add(&waitEntry);
			jobQueueLocker.Unlock();
			waitEntry.Wait();
			continue;
		}

		// do the job
		jobQueueLocker.Unlock();
		job->Do();
		delete job;
	}

	return B_OK;
}
Ejemplo n.º 13
0
status_t
unregister_generic_syscall(const char* subsystem, uint32 version)
{
	// TODO: we should only remove the syscall with the matching version

	while (true) {
		MutexLocker locker(sGenericSyscallLock);

		generic_syscall* syscall = find_generic_syscall(subsystem);
		if (syscall == NULL)
			return B_NAME_NOT_FOUND;

		syscall->valid = false;

		if (syscall->use_count != 0) {
			// Wait until the syscall isn't in use anymore
			ConditionVariableEntry entry;
			syscall->unused_condition.Add(&entry);

			locker.Unlock();

			entry.Wait();
			continue;
		}

		if (syscall->previous != NULL) {
			// reestablish the old syscall
			sGenericSyscalls.Add(syscall->previous);
		}

		sGenericSyscalls.Remove(syscall);
		delete syscall;

		return B_OK;
	}
}
Ejemplo n.º 14
0
status_t
PhysicalMemoryAllocator::Allocate(size_t size, void **logicalAddress,
	phys_addr_t *physicalAddress)
{
#ifdef HAIKU_TARGET_PLATFORM_HAIKU
	if (debug_debugger_running()) {
		if (size > fDebugChunkSize) {
			kprintf("usb allocation of %" B_PRIuSIZE
				" does not fit debug chunk size %" B_PRIuSIZE "!\n",
				size, fDebugChunkSize);
			return B_NO_MEMORY;
		}

		for (size_t i = 0; i < sizeof(fDebugUseMap) * 8; i++) {
			uint64 mask = 1LL << i;
			if ((fDebugUseMap & mask) == 0) {
				fDebugUseMap |= mask;
				*logicalAddress = (void *)((uint8 *)fLogicalBase + fDebugBase
					+ i * fDebugChunkSize);
				*physicalAddress = (phys_addr_t)(fPhysicalBase + fDebugBase
					+ i * fDebugChunkSize);
				return B_OK;
			}
		}

		return B_NO_MEMORY;
	}
#endif

	if (size == 0 || size > fBlockSize[fArrayCount - 1]) {
		TRACE_ERROR(("PMA: bad value for allocate (%ld bytes)\n", size));
		return B_BAD_VALUE;
	}

	size_t arrayLength = 0;
	int32 arrayToUse = 0;
	for (int32 i = 0; i < fArrayCount; i++) {
		if (fBlockSize[i] >= size) {
			arrayToUse = i;
			arrayLength = fArrayLength[i];
			break;
		}
	}

	if (!_Lock())
		return B_ERROR;

	while (true) {
		TRACE(("PMA: will use array %ld (blocksize: %ld) to allocate %ld bytes\n", arrayToUse, fBlockSize[arrayToUse], size));
		uint8 *targetArray = fArray[arrayToUse];
		uint32 arrayOffset = fArrayOffset[arrayToUse] % arrayLength;
		for (size_t i = arrayOffset + 1; i != arrayOffset; i++) {
			if (i >= arrayLength)
				i -= arrayLength;

 			if (targetArray[i] == 0) {
				// found a free slot
				fArrayOffset[arrayToUse] = i;

				// fill upwards to the smallest block
				uint32 fillSize = 1;
				uint32 arrayIndex = i;
				for (int32 j = arrayToUse; j >= 0; j--) {
					memset(&fArray[j][arrayIndex], 1, fillSize);
					fillSize <<= 1;
					arrayIndex <<= 1;
				}

				// fill downwards to the biggest block
				arrayIndex = i >> 1;
				for (int32 j = arrayToUse + 1; j < fArrayCount; j++) {
					fArray[j][arrayIndex]++;
					if (fArray[j][arrayIndex] > 1)
						break;

					arrayIndex >>= 1;
				}

				_Unlock();
				size_t offset = fBlockSize[arrayToUse] * i;
				*logicalAddress = (void *)((uint8 *)fLogicalBase + offset);
				*physicalAddress = (phys_addr_t)(fPhysicalBase + offset);
				return B_OK;
			}
		}

		// no slot found, we need to wait

		ConditionVariableEntry entry;
		fNoMemoryCondition.Add(&entry);
		fMemoryWaitersCount++;

		_Unlock();

		TRACE_ERROR(("PMA: found no free slot to store %ld bytes, waiting\n",
			size));

		entry.Wait();

		if (!_Lock())
			return B_ERROR;

		fMemoryWaitersCount--;
	}
Ejemplo n.º 15
0
/*!	Writes the specified data bytes to the inode's ring buffer. The
	request lock must be held when calling this method.
	Notifies readers if necessary, so that blocking readers will get started.
	Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed,
	and various semaphore errors (like B_WOULD_BLOCK in non-blocking mode). If
	the returned length is > 0, the returned error code can be ignored.
*/
status_t
Inode::WriteDataToBuffer(const void* _data, size_t* _length, bool nonBlocking,
	bool isUser)
{
	const uint8* data = (const uint8*)_data;
	size_t dataSize = *_length;
	size_t& written = *_length;
	written = 0;

	TRACE("Inode %p::WriteDataToBuffer(data = %p, bytes = %zu)\n", this, data,
		dataSize);

	// A request up to VFS_FIFO_ATOMIC_WRITE_SIZE bytes shall not be
	// interleaved with other writer's data.
	size_t minToWrite = 1;
	if (dataSize <= VFS_FIFO_ATOMIC_WRITE_SIZE)
		minToWrite = dataSize;

	while (dataSize > 0) {
		// Wait until enough space in the buffer is available.
		while (!fActive
				|| (fBuffer.Writable() < minToWrite && fReaderCount > 0)) {
			if (nonBlocking)
				return B_WOULD_BLOCK;

			ConditionVariableEntry entry;
			entry.Add(this);

			WriteRequest request(thread_get_current_thread(), minToWrite);
			fWriteRequests.Add(&request);

			mutex_unlock(&fRequestLock);
			status_t status = entry.Wait(B_CAN_INTERRUPT);
			mutex_lock(&fRequestLock);

			fWriteRequests.Remove(&request);

			if (status != B_OK)
				return status;
		}

		// write only as long as there are readers left
		if (fActive && fReaderCount == 0) {
			if (written == 0)
				send_signal(find_thread(NULL), SIGPIPE);
			return EPIPE;
		}

		// write as much as we can

		size_t toWrite = (fActive ? fBuffer.Writable() : 0);
		if (toWrite > dataSize)
			toWrite = dataSize;

		if (toWrite > 0) {
			ssize_t bytesWritten = fBuffer.Write(data, toWrite, isUser);
			if (bytesWritten < 0)
				return bytesWritten;
		}

		data += toWrite;
		dataSize -= toWrite;
		written += toWrite;

		NotifyBytesWritten(toWrite);
	}

	return B_OK;
}
Ejemplo n.º 16
0
static status_t
object_cache_maintainer(void*)
{
	while (true) {
		MutexLocker locker(sMaintenanceLock);

		// wait for the next request
		while (sMaintenanceQueue.IsEmpty()) {
			// perform memory manager maintenance, if needed
			if (MemoryManager::MaintenanceNeeded()) {
				locker.Unlock();
				MemoryManager::PerformMaintenance();
				locker.Lock();
				continue;
			}

			ConditionVariableEntry entry;
			sMaintenanceCondition.Add(&entry);
			locker.Unlock();
			entry.Wait();
			locker.Lock();
		}

		ObjectCache* cache = sMaintenanceQueue.RemoveHead();

		while (true) {
			bool resizeRequested = cache->maintenance_resize;
			bool deleteRequested = cache->maintenance_delete;

			if (!resizeRequested && !deleteRequested) {
				cache->maintenance_pending = false;
				cache->maintenance_in_progress = false;
				break;
			}

			cache->maintenance_resize = false;
			cache->maintenance_in_progress = true;

			locker.Unlock();

			if (deleteRequested) {
				delete_object_cache_internal(cache);
				break;
			}

			// resize the cache, if necessary

			MutexLocker cacheLocker(cache->lock);

			if (resizeRequested) {
				status_t error = object_cache_reserve_internal(cache,
					cache->min_object_reserve, 0);
				if (error != B_OK) {
					dprintf("object cache resizer: Failed to resize object "
						"cache %p!\n", cache);
					break;
				}
			}

			locker.Lock();
		}
	}

	// never can get here
	return B_OK;
}
Ejemplo n.º 17
0
/*!	Makes sure that \a objectCount objects can be allocated.
*/
static status_t
object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
	uint32 flags)
{
	// If someone else is already adding slabs, we wait for that to be finished
	// first.
	thread_id thread = find_thread(NULL);
	while (true) {
		if (objectCount <= cache->total_objects - cache->used_count)
			return B_OK;

		ObjectCacheResizeEntry* resizeEntry = NULL;
		if (cache->resize_entry_dont_wait != NULL) {
			resizeEntry = cache->resize_entry_dont_wait;
			if (thread == resizeEntry->thread)
				return B_WOULD_BLOCK;
			// Note: We could still have reentered the function, i.e.
			// resize_entry_can_wait would be ours. That doesn't matter much,
			// though, since after the don't-wait thread has done its job
			// everyone will be happy.
		} else if (cache->resize_entry_can_wait != NULL) {
			resizeEntry = cache->resize_entry_can_wait;
			if (thread == resizeEntry->thread)
				return B_WOULD_BLOCK;

			if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
				break;
		} else
			break;

		ConditionVariableEntry entry;
		resizeEntry->condition.Add(&entry);

		cache->Unlock();
		entry.Wait();
		cache->Lock();
	}

	// prepare the resize entry others can wait on
	ObjectCacheResizeEntry*& resizeEntry
		= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
			? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;

	ObjectCacheResizeEntry myResizeEntry;
	resizeEntry = &myResizeEntry;
	resizeEntry->condition.Init(cache, "wait for slabs");
	resizeEntry->thread = thread;

	// add new slabs until there are as many free ones as requested
	while (objectCount > cache->total_objects - cache->used_count) {
		slab* newSlab = cache->CreateSlab(flags);
		if (newSlab == NULL) {
			resizeEntry->condition.NotifyAll();
			resizeEntry = NULL;
			return B_NO_MEMORY;
		}

		cache->usage += cache->slab_size;
		cache->total_objects += newSlab->size;

		cache->empty.Add(newSlab);
		cache->empty_count++;
	}

	resizeEntry->condition.NotifyAll();
	resizeEntry = NULL;

	return B_OK;
}
Ejemplo n.º 18
0
status_t
_get_port_message_info_etc(port_id id, port_message_info* info,
	size_t infoSize, uint32 flags, bigtime_t timeout)
{
	if (info == NULL || infoSize != sizeof(port_message_info))
		return B_BAD_VALUE;
	if (!sPortsActive || id < 0)
		return B_BAD_PORT_ID;

	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
		| B_ABSOLUTE_TIMEOUT;
	int32 slot = id % sMaxPorts;

	MutexLocker locker(sPorts[slot].lock);

	if (sPorts[slot].id != id
		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
		T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
		TRACE(("_get_port_message_info_etc(): %s port %ld\n",
			sPorts[slot].id == id ? "closed" : "invalid", id));
		return B_BAD_PORT_ID;
	}

	while (sPorts[slot].read_count == 0) {
		// We need to wait for a message to appear
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
			return B_WOULD_BLOCK;

		ConditionVariableEntry entry;
		sPorts[slot].read_condition.Add(&entry);

		locker.Unlock();

		// block if no message, or, if B_TIMEOUT flag set, block with timeout
		status_t status = entry.Wait(flags, timeout);

		if (status != B_OK) {
			T(Info(sPorts[slot], 0, status));
			return status;
		}

		locker.Lock();

		if (sPorts[slot].id != id
			|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
			// the port is no longer there
			T(Info(sPorts[slot], 0, B_BAD_PORT_ID));
			return B_BAD_PORT_ID;
		}
	}

	// determine tail & get the length of the message
	port_message* message = sPorts[slot].messages.Head();
	if (message == NULL) {
		panic("port %ld: no messages found\n", sPorts[slot].id);
		return B_ERROR;
	}

	info->size = message->size;
	info->sender = message->sender;
	info->sender_group = message->sender_group;
	info->sender_team = message->sender_team;

	T(Info(sPorts[slot], message->code, B_OK));

	// notify next one, as we haven't read from the port
	sPorts[slot].read_condition.NotifyOne();

	return B_OK;
}
Ejemplo n.º 19
0
ssize_t
read_port_etc(port_id id, int32* _code, void* buffer, size_t bufferSize,
	uint32 flags, bigtime_t timeout)
{
	if (!sPortsActive || id < 0)
		return B_BAD_PORT_ID;
	if ((buffer == NULL && bufferSize > 0) || timeout < 0)
		return B_BAD_VALUE;

	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) != 0;
	bool peekOnly = !userCopy && (flags & B_PEEK_PORT_MESSAGE) != 0;
		// TODO: we could allow peeking for user apps now

	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
		| B_ABSOLUTE_TIMEOUT;

	int32 slot = id % sMaxPorts;

	MutexLocker locker(sPorts[slot].lock);

	if (sPorts[slot].id != id
		|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
		T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
		TRACE(("read_port_etc(): %s port %ld\n",
			sPorts[slot].id == id ? "closed" : "invalid", id));
		return B_BAD_PORT_ID;
	}

	while (sPorts[slot].read_count == 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
			return B_WOULD_BLOCK;

		// We need to wait for a message to appear
		ConditionVariableEntry entry;
		sPorts[slot].read_condition.Add(&entry);

		locker.Unlock();

		// block if no message, or, if B_TIMEOUT flag set, block with timeout
		status_t status = entry.Wait(flags, timeout);

		locker.Lock();

		if (sPorts[slot].id != id
			|| (is_port_closed(slot) && sPorts[slot].messages.IsEmpty())) {
			// the port is no longer there
			T(Read(sPorts[slot], 0, B_BAD_PORT_ID));
			return B_BAD_PORT_ID;
		}

		if (status != B_OK) {
			T(Read(sPorts[slot], 0, status));
			return status;
		}
	}

	// determine tail & get the length of the message
	port_message* message = sPorts[slot].messages.Head();
	if (message == NULL) {
		panic("port %ld: no messages found\n", sPorts[slot].id);
		return B_ERROR;
	}

	if (peekOnly) {
		size_t size = copy_port_message(message, _code, buffer, bufferSize,
			userCopy);

		T(Read(sPorts[slot], message->code, size));

		sPorts[slot].read_condition.NotifyOne();
			// we only peeked, but didn't grab the message
		return size;
	}

	sPorts[slot].messages.RemoveHead();
	sPorts[slot].total_count++;
	sPorts[slot].write_count++;
	sPorts[slot].read_count--;

	notify_port_select_events(slot, B_EVENT_WRITE);
	sPorts[slot].write_condition.NotifyOne();
		// make one spot in queue available again for write

	locker.Unlock();

	size_t size = copy_port_message(message, _code, buffer, bufferSize,
		userCopy);
	T(Read(sPorts[slot], message->code, size));

	put_port_message(message);
	return size;
}
Ejemplo n.º 20
0
static status_t
get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout,
	port_message** _message)
{
	size_t size = sizeof(port_message) + bufferSize;
	bool limitReached = false;

	while (true) {
		if (atomic_add(&sTotalSpaceInUse, size)
				> int32(kTotalSpaceLimit - size)) {
			// TODO: add per team limit
			// We are not allowed to create another heap area, as our
			// space limit has been reached - just wait until we get
			// some free space again.
			limitReached = true;

		wait:
			MutexLocker locker(sPortsLock);

			atomic_add(&sTotalSpaceInUse, -size);

			// TODO: we don't want to wait - but does that also mean we
			// shouldn't wait for the area creation?
			if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0
				&& timeout <= 0)
				return B_WOULD_BLOCK;

			ConditionVariableEntry entry;
			sNoSpaceCondition.Add(&entry);

			locker.Unlock();

			status_t status = entry.Wait(flags, timeout);
			if (status == B_TIMED_OUT)
				return B_TIMED_OUT;

			// just try again
			limitReached = false;
			continue;
		}

		int32 areaChangeCounter = atomic_get(&sAreaChangeCounter);

		// Quota is fulfilled, try to allocate the buffer

		port_message* message
			= (port_message*)heap_memalign(sPortAllocator, 0, size);
		if (message != NULL) {
			message->code = code;
			message->size = bufferSize;

			*_message = message;
			return B_OK;
		}

		if (atomic_or(&sAllocatingArea, 1) != 0) {
			// Just wait for someone else to create an area for us
			goto wait;
		}

		if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) {
			atomic_add(&sTotalSpaceInUse, -size);
			continue;
		}

		// Create a new area for the heap to use

		addr_t base;
		area_id area = create_area("port grown buffer", (void**)&base,
			B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK,
			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
		if (area < 0) {
			// it's time to let the userland feel our pain
			sNoSpaceCondition.NotifyAll();
			return B_NO_MEMORY;
		}

		heap_add_area(sPortAllocator, area, base, kBufferGrowRate);

		atomic_add(&sAreaChangeCounter, 1);
		sNoSpaceCondition.NotifyAll();
		atomic_and(&sAllocatingArea, 0);
	}
}
Ejemplo n.º 21
0
status_t
writev_port_etc(port_id id, int32 msgCode, const iovec* msgVecs,
	size_t vecCount, size_t bufferSize, uint32 flags, bigtime_t timeout)
{
	if (!sPortsActive || id < 0)
		return B_BAD_PORT_ID;
	if (bufferSize > PORT_MAX_MESSAGE_SIZE)
		return B_BAD_VALUE;

	// mask irrelevant flags (for acquire_sem() usage)
	flags &= B_CAN_INTERRUPT | B_KILL_CAN_INTERRUPT | B_RELATIVE_TIMEOUT
		| B_ABSOLUTE_TIMEOUT;
	if ((flags & B_RELATIVE_TIMEOUT) != 0
		&& timeout != B_INFINITE_TIMEOUT && timeout > 0) {
		// Make the timeout absolute, since we have more than one step where
		// we might have to wait
		flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
		timeout += system_time();
	}

	bool userCopy = (flags & PORT_FLAG_USE_USER_MEMCPY) > 0;

	int32 slot = id % sMaxPorts;
	status_t status;
	port_message* message = NULL;

	MutexLocker locker(sPorts[slot].lock);

	if (sPorts[slot].id != id) {
		TRACE(("write_port_etc: invalid port_id %ld\n", id));
		return B_BAD_PORT_ID;
	}
	if (is_port_closed(slot)) {
		TRACE(("write_port_etc: port %ld closed\n", id));
		return B_BAD_PORT_ID;
	}

	if (sPorts[slot].write_count <= 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0)
			return B_WOULD_BLOCK;

		sPorts[slot].write_count--;

		// We need to block in order to wait for a free message slot
		ConditionVariableEntry entry;
		sPorts[slot].write_condition.Add(&entry);

		locker.Unlock();

		status = entry.Wait(flags, timeout);

		locker.Lock();

		if (sPorts[slot].id != id || is_port_closed(slot)) {
			// the port is no longer there
			T(Write(sPorts[slot], 0, 0, B_BAD_PORT_ID));
			return B_BAD_PORT_ID;
		}

		if (status != B_OK)
			goto error;
	} else
		sPorts[slot].write_count--;

	status = get_port_message(msgCode, bufferSize, flags, timeout,
		&message);
	if (status != B_OK)
		goto error;

	// sender credentials
	message->sender = geteuid();
	message->sender_group = getegid();
	message->sender_team = team_get_current_team_id();

	if (bufferSize > 0) {
		uint32 i;
		if (userCopy) {
			// copy from user memory
			for (i = 0; i < vecCount; i++) {
				size_t bytes = msgVecs[i].iov_len;
				if (bytes > bufferSize)
					bytes = bufferSize;

				status_t status = user_memcpy(message->buffer,
					msgVecs[i].iov_base, bytes);
				if (status != B_OK) {
					put_port_message(message);
					goto error;
				}

				bufferSize -= bytes;
				if (bufferSize == 0)
					break;
			}
		} else {
			// copy from kernel memory
			for (i = 0; i < vecCount; i++) {
				size_t bytes = msgVecs[i].iov_len;
				if (bytes > bufferSize)
					bytes = bufferSize;

				memcpy(message->buffer, msgVecs[i].iov_base, bytes);

				bufferSize -= bytes;
				if (bufferSize == 0)
					break;
			}
		}
	}

	sPorts[slot].messages.Add(message);
	sPorts[slot].read_count++;

	T(Write(sPorts[slot], message->code, message->size, B_OK));

	notify_port_select_events(slot, B_EVENT_READ);
	sPorts[slot].read_condition.NotifyOne();
	return B_OK;

error:
	// Give up our slot in the queue again, and let someone else
	// try and fail
	T(Write(sPorts[slot], 0, 0, status));
	sPorts[slot].write_count++;
	notify_port_select_events(slot, B_EVENT_WRITE);
	sPorts[slot].write_condition.NotifyOne();

	return status;
}