void
PhysicalPageSlotQueue::PutSlot(PhysicalPageSlot* slot)
{
	InterruptsLocker locker;

	slot->next = fSlots;
	fSlots = slot;

	if (slot->next == NULL)
		fFreeSlotCondition.NotifyAll();
	else if (slot->next->next == NULL)
		fFreeSlotCondition.NotifyAll();
}
Exemplo n.º 2
0
void
Inode::NotifyEndClosed(bool writer)
{
	TRACE("Inode %p::%s(%s)\n", this, __FUNCTION__,
		writer ? "writer" : "reader");

	if (writer) {
		// Our last writer has been closed; if the pipe
		// contains no data, unlock all waiting readers
		TRACE("  buffer readable: %zu\n", fBuffer.Readable());
		if (fBuffer.Readable() == 0) {
			ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
			while (ReadRequest* request = iterator.Next())
				request->Notify();

			if (fReadSelectSyncPool)
				notify_select_event_pool(fReadSelectSyncPool, B_SELECT_READ);
		}
	} else {
		// Last reader is gone. Wake up all writers.
		fWriteCondition.NotifyAll();

		if (fWriteSelectSyncPool) {
			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_ERROR);
		}
	}
}
Exemplo n.º 3
0
void
Inode::NotifyBytesRead(size_t bytes)
{
	// notify writer, if something can be written now
	size_t writable = fBuffer.Writable();
	if (bytes > 0) {
		// notify select()ors only, if nothing was writable before
		if (writable == bytes) {
			if (fWriteSelectSyncPool)
				notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
		}

		// If any of the waiting writers has a minimal write count that has
		// now become satisfied, we notify all of them (condition variables
		// don't support doing that selectively).
		WriteRequest* request;
		WriteRequestList::Iterator iterator = fWriteRequests.GetIterator();
		while ((request = iterator.Next()) != NULL) {
			size_t minWriteCount = request->MinimalWriteCount();
			if (minWriteCount > 0 && minWriteCount <= writable
					&& minWriteCount > writable - bytes) {
				fWriteCondition.NotifyAll();
				break;
			}
		}
	}
}
Exemplo n.º 4
0
void
Inode::Close(int openMode, file_cookie* cookie)
{
	TRACE("Inode %p::Close(openMode = %d)\n", this, openMode);

	MutexLocker locker(RequestLock());

	// Notify all currently reading file descriptors
	ReadRequestList::Iterator iterator = fReadRequests.GetIterator();
	while (ReadRequest* request = iterator.Next()) {
		if (request->Cookie() == cookie)
			request->Notify(B_FILE_ERROR);
	}

	if ((openMode & O_ACCMODE) == O_WRONLY && --fWriterCount == 0)
		NotifyEndClosed(true);

	if ((openMode & O_ACCMODE) == O_RDONLY
		|| (openMode & O_ACCMODE) == O_RDWR) {
		if (--fReaderCount == 0)
			NotifyEndClosed(false);
	}

	if (fWriterCount == 0) {
		// Notify any still reading writers to stop
		// TODO: This only works reliable if there is only one writer - we could
		// do the same thing done for the read requests.
		fWriteCondition.NotifyAll(B_FILE_ERROR);
	}

	if (fReaderCount == 0 && fWriterCount == 0) {
		fActive = false;
		fBuffer.DeleteBuffer();
	}
}
Exemplo n.º 5
0
static status_t
low_resource_manager(void*)
{
	bigtime_t timeout = kLowResourceInterval;
	while (true) {
		int32 state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES);
		if (state != B_LOW_RESOURCE_CRITICAL) {
			acquire_sem_etc(sLowResourceWaitSem, 1, B_RELATIVE_TIMEOUT,
				timeout);
		}

		RecursiveLocker _(&sLowResourceLock);

		compute_state();
		state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES);

		TRACE(("low_resource_manager: state = %ld, %ld free pages, %lld free "
			"memory, %lu free semaphores\n", state, vm_page_num_free_pages(),
			vm_available_not_needed_memory(),
			sem_max_sems() - sem_used_sems()));

		if (state < B_LOW_RESOURCE_NOTE)
			continue;

		call_handlers(sLowResources);

		if (state == B_LOW_RESOURCE_WARNING)
			timeout = kWarnResourceInterval;
		else
			timeout = kLowResourceInterval;

		sLowResourceWaiterCondition.NotifyAll();
	}
	return 0;
}
Exemplo n.º 6
0
static void
put_port_message(port_message* message)
{
	size_t size = sizeof(port_message) + message->size;
	heap_free(sPortAllocator, message);

	atomic_add(&sTotalSpaceInUse, -size);
	sNoSpaceCondition.NotifyAll();
}
Exemplo n.º 7
0
static status_t
acpi_battery_control(void* _cookie, uint32 op, void* arg, size_t len)
{
	battery_device_cookie* device = (battery_device_cookie*)_cookie;
	status_t err;

	switch (op) {
		case IDENTIFY_DEVICE: {
			if (len < sizeof(uint32))
				return B_BAD_VALUE;

			uint32 magicId = kMagicACPIBatteryID;
			return user_memcpy(arg, &magicId, sizeof(magicId));
		}

		case GET_BATTERY_INFO: {
			if (len < sizeof(acpi_battery_info))
				return B_BAD_VALUE;

			acpi_battery_info batteryInfo;
			err = ReadBatteryStatus(device->driver_cookie, &batteryInfo);
			if (err != B_OK)
				return err;
			return user_memcpy(arg, &batteryInfo, sizeof(batteryInfo));
		}

		case GET_EXTENDED_BATTERY_INFO: {
			if (len < sizeof(acpi_extended_battery_info))
				return B_BAD_VALUE;

			acpi_extended_battery_info extBatteryInfo;
			err = ReadBatteryInfo(device->driver_cookie, &extBatteryInfo);
			if (err != B_OK)
				return err;
			return user_memcpy(arg, &extBatteryInfo, sizeof(extBatteryInfo));
		}

		case WATCH_BATTERY:
			sBatteryCondition.Wait();
			if (atomic_get(&(device->stop_watching))) {
				atomic_set(&(device->stop_watching), 0);
				return B_ERROR;
			}
			return B_OK;

		case STOP_WATCHING_BATTERY:
			atomic_set(&(device->stop_watching), 1);
			sBatteryCondition.NotifyAll();
			return B_OK;
	}

	return B_DEV_INVALID_IOCTL;
}
Exemplo n.º 8
0
static void
increase_object_reserve(ObjectCache* cache)
{
	MutexLocker locker(sMaintenanceLock);

	cache->maintenance_resize = true;

	if (!cache->maintenance_pending) {
		cache->maintenance_pending = true;
		sMaintenanceQueue.Add(cache);
		sMaintenanceCondition.NotifyAll();
	}
}
Exemplo n.º 9
0
status_t
DPCQueue::_Thread()
{
	while (true) {
		InterruptsSpinLocker locker(fLock);

		// get the next pending callback
		DPCCallback* callback = fCallbacks.RemoveHead();
		if (callback == NULL) {
			// nothing is pending -- wait unless the queue is already closed
			if (_IsClosed())
				break;

			ConditionVariableEntry waitEntry;
			fPendingCallbacksCondition.Add(&waitEntry);

			locker.Unlock();
			waitEntry.Wait();

			continue;
		}

		callback->fInQueue = NULL;
		fCallbackInProgress = callback;

		// call the callback
		locker.Unlock();
		callback->DoDPC(this);
		locker.Lock();

		fCallbackInProgress = NULL;

		// wake up threads waiting for the callback to be done
		ConditionVariable* doneCondition = fCallbackDoneCondition;
		fCallbackDoneCondition = NULL;
		locker.Unlock();
		if (doneCondition != NULL)
			doneCondition->NotifyAll();
	}

	return B_OK;
}
Exemplo n.º 10
0
void
Inode::Open(int openMode)
{
	MutexLocker locker(RequestLock());

	if ((openMode & O_ACCMODE) == O_WRONLY)
		fWriterCount++;

	if ((openMode & O_ACCMODE) == O_RDONLY || (openMode & O_ACCMODE) == O_RDWR)
		fReaderCount++;

	if (fReaderCount > 0 && fWriterCount > 0) {
		TRACE("Inode %p::Open(): fifo becomes active\n", this);
		fBuffer.CreateBuffer();
		fActive = true;

		// notify all waiting writers that they can start
		if (fWriteSelectSyncPool)
			notify_select_event_pool(fWriteSelectSyncPool, B_SELECT_WRITE);
		fWriteCondition.NotifyAll();
	}
}
Exemplo n.º 11
0
void
battery_notify_handler(acpi_handle device, uint32 value, void *context)
{
	TRACE("battery_notify_handler event 0x%x\n", int(value));
	sBatteryCondition.NotifyAll();
}
Exemplo n.º 12
0
void
request_memory_manager_maintenance()
{
	MutexLocker locker(sMaintenanceLock);
	sMaintenanceCondition.NotifyAll();
}
Exemplo n.º 13
0
static status_t
get_port_message(int32 code, size_t bufferSize, uint32 flags, bigtime_t timeout,
	port_message** _message)
{
	size_t size = sizeof(port_message) + bufferSize;
	bool limitReached = false;

	while (true) {
		if (atomic_add(&sTotalSpaceInUse, size)
				> int32(kTotalSpaceLimit - size)) {
			// TODO: add per team limit
			// We are not allowed to create another heap area, as our
			// space limit has been reached - just wait until we get
			// some free space again.
			limitReached = true;

		wait:
			MutexLocker locker(sPortsLock);

			atomic_add(&sTotalSpaceInUse, -size);

			// TODO: we don't want to wait - but does that also mean we
			// shouldn't wait for the area creation?
			if (limitReached && (flags & B_RELATIVE_TIMEOUT) != 0
				&& timeout <= 0)
				return B_WOULD_BLOCK;

			ConditionVariableEntry entry;
			sNoSpaceCondition.Add(&entry);

			locker.Unlock();

			status_t status = entry.Wait(flags, timeout);
			if (status == B_TIMED_OUT)
				return B_TIMED_OUT;

			// just try again
			limitReached = false;
			continue;
		}

		int32 areaChangeCounter = atomic_get(&sAreaChangeCounter);

		// Quota is fulfilled, try to allocate the buffer

		port_message* message
			= (port_message*)heap_memalign(sPortAllocator, 0, size);
		if (message != NULL) {
			message->code = code;
			message->size = bufferSize;

			*_message = message;
			return B_OK;
		}

		if (atomic_or(&sAllocatingArea, 1) != 0) {
			// Just wait for someone else to create an area for us
			goto wait;
		}

		if (areaChangeCounter != atomic_get(&sAreaChangeCounter)) {
			atomic_add(&sTotalSpaceInUse, -size);
			continue;
		}

		// Create a new area for the heap to use

		addr_t base;
		area_id area = create_area("port grown buffer", (void**)&base,
			B_ANY_KERNEL_ADDRESS, kBufferGrowRate, B_NO_LOCK,
			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
		if (area < 0) {
			// it's time to let the userland feel our pain
			sNoSpaceCondition.NotifyAll();
			return B_NO_MEMORY;
		}

		heap_add_area(sPortAllocator, area, base, kBufferGrowRate);

		atomic_add(&sAreaChangeCounter, 1);
		sNoSpaceCondition.NotifyAll();
		atomic_and(&sAllocatingArea, 0);
	}
}