Exemple #1
0
nuThreadPool::JobTicket nuThreadPool::JobArena::entryJob(const nuTaskSet& task_set)
{
  Job* p_job = new Job(task_set);
  queueEntry(p_job);
  setCondition(HAS_DATA);
  return JobTicket(p_job);
}
status_t
_user_xsi_semop(int semaphoreID, struct sembuf *ops, size_t numOps)
{
	TRACE(("xsi_semop: semaphoreID = %d, ops = %p, numOps = %ld\n",
		semaphoreID, ops, numOps));
	MutexLocker setHashLocker(sXsiSemaphoreSetLock);
	XsiSemaphoreSet *semaphoreSet = sSemaphoreHashTable.Lookup(semaphoreID);
	if (semaphoreSet == NULL) {
		TRACE_ERROR(("xsi_semop: semaphore set id %d not valid\n",
			semaphoreID));
		return EINVAL;
	}
	MutexLocker setLocker(semaphoreSet->Lock());
	setHashLocker.Unlock();

	if (!IS_USER_ADDRESS(ops)) {
		TRACE_ERROR(("xsi_semop: sembuf address is not valid\n"));
		return B_BAD_ADDRESS;
	}

	if (numOps < 0 || numOps >= MAX_XSI_SEMS_PER_TEAM) {
		TRACE_ERROR(("xsi_semop: numOps out of range\n"));
		return EINVAL;
	}

	struct sembuf *operations
		= (struct sembuf *)malloc(sizeof(struct sembuf) * numOps);
	if (operations == NULL) {
		TRACE_ERROR(("xsi_semop: failed to allocate sembuf struct\n"));
		return B_NO_MEMORY;
	}
       MemoryDeleter operationsDeleter(operations);

	if (user_memcpy(operations, ops,
		(sizeof(struct sembuf) * numOps)) < B_OK) {
		TRACE_ERROR(("xsi_semop: user_memcpy failed\n"));
		return B_BAD_ADDRESS;
	}

	// We won't do partial request, that is operations
	// only on some sempahores belonging to the set and then
	// going to sleep. If we must wait on a semaphore, we undo
	// all the operations already done and go to sleep, otherwise
	// we may caused some unwanted deadlock among threads
	// fighting for the same set.
	bool notDone = true;
	status_t result = 0;
	while (notDone) {
		XsiSemaphore *semaphore = NULL;
		short numberOfSemaphores = semaphoreSet->NumberOfSemaphores();
		bool goToSleep = false;

		uint32 i = 0;
		for (; i < numOps; i++) {
			short semaphoreNumber = operations[i].sem_num;
			if (semaphoreNumber >= numberOfSemaphores) {
				TRACE_ERROR(("xsi_semop: %" B_PRIu32 " invalid semaphore number"
					"\n", i));
				result = EINVAL;
				break;
			}
			semaphore = semaphoreSet->Semaphore(semaphoreNumber);
			unsigned short value = semaphore->Value();
			short operation = operations[i].sem_op;
			TRACE(("xsi_semop: semaphoreNumber = %d, value = %d\n",
				semaphoreNumber, value));
			if (operation < 0) {
				if (semaphore->Add(operation)) {
					if (operations[i].sem_flg & IPC_NOWAIT)
						result = EAGAIN;
					else
						goToSleep = true;
					break;
				}
			} else if (operation == 0) {
				if (value == 0)
					continue;
				else if (operations[i].sem_flg & IPC_NOWAIT) {
					result = EAGAIN;
					break;
				} else {
					goToSleep = true;
					break;
				}
			} else {
				// Operation must be greater than zero,
				// just add the value and continue
				semaphore->Add(operation);
			}
		}

		// Either we have to wait or an error occured
		if (goToSleep || result != 0) {
			// Undo all previously done operations
			for (uint32 j = 0; j < i; j++) {
				short semaphoreNumber = operations[j].sem_num;
				semaphore = semaphoreSet->Semaphore(semaphoreNumber);
				short operation = operations[j].sem_op;
				if (operation != 0)
					semaphore->Revert(operation);
			}
                       if (result != 0)
				return result;

			// We have to wait: first enqueue the thread
			// in the appropriate set waiting list, then
			// unlock the set itself and block the thread.
			bool waitOnZero = true;
			if (operations[i].sem_op != 0)
				waitOnZero = false;

			Thread *thread = thread_get_current_thread();
			queued_thread queueEntry(thread, (int32)operations[i].sem_op);
			semaphore->Enqueue(&queueEntry, waitOnZero);

			uint32 sequenceNumber = semaphoreSet->SequenceNumber();

			TRACE(("xsi_semop: thread %d going to sleep\n", (int)thread->id));
			result = semaphore->BlockAndUnlock(thread, &setLocker);
			TRACE(("xsi_semop: thread %d back to life\n", (int)thread->id));

			// We are back to life. Find out why!
			// Make sure the set hasn't been deleted or worst yet
			// replaced.
			setHashLocker.Lock();
			semaphoreSet = sSemaphoreHashTable.Lookup(semaphoreID);
			if (result == EIDRM || semaphoreSet == NULL || (semaphoreSet != NULL
				&& sequenceNumber != semaphoreSet->SequenceNumber())) {
				TRACE_ERROR(("xsi_semop: semaphore set id %d (sequence = "
					"%" B_PRIu32 ") got destroyed\n", semaphoreID,
					sequenceNumber));
				notDone = false;
				result = EIDRM;
			} else if (result == B_INTERRUPTED) {
				TRACE_ERROR(("xsi_semop: thread %d got interrupted while "
					"waiting on semaphore set id %d\n",(int)thread->id,
					semaphoreID));
				semaphore->Deque(&queueEntry, waitOnZero);
				result = EINTR;
				notDone = false;
			} else {
				setLocker.Lock();
				setHashLocker.Unlock();
			}
		} else {
			// everything worked like a charm (so far)
			notDone = false;
			TRACE(("xsi_semop: semaphore acquired succesfully\n"));
			// We acquired the semaphore, now records the sem_undo
			// requests
			XsiSemaphore *semaphore = NULL;
			uint32 i = 0;
			for (; i < numOps; i++) {
				short semaphoreNumber = operations[i].sem_num;
				semaphore = semaphoreSet->Semaphore(semaphoreNumber);
				short operation = operations[i].sem_op;
				if (operations[i].sem_flg & SEM_UNDO)
					if (semaphoreSet->RecordUndo(semaphoreNumber, operation)
						!= B_OK) {
						// Unlikely scenario, but we might get here.
						// Undo everything!
						// Start with semaphore operations
						for (uint32 j = 0; j < numOps; j++) {
							short semaphoreNumber = operations[j].sem_num;
							semaphore = semaphoreSet->Semaphore(semaphoreNumber);
							short operation = operations[j].sem_op;
							if (operation != 0)
								semaphore->Revert(operation);
						}
						// Remove all previously registered sem_undo request
						for (uint32 j = 0; j < i; j++) {
							if (operations[j].sem_flg & SEM_UNDO)
								semaphoreSet->RevertUndo(operations[j].sem_num,
									operations[j].sem_op);
						}
						result = ENOSPC;
					}
			}
		}
	}

	// We did it. Set the pid of all semaphores used
	if (result == 0) {
		for (uint32 i = 0; i < numOps; i++) {
			short semaphoreNumber = operations[i].sem_num;
			XsiSemaphore *semaphore = semaphoreSet->Semaphore(semaphoreNumber);
			semaphore->SetPid(getpid());
		}
	}
	return result;
}
Exemple #3
0
status_t
switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
	uint32 flags, bigtime_t timeout)
{
	int slot = id % sMaxSems;
	int state;
	status_t status = B_OK;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;

	if (!are_interrupts_enabled()) {
		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
			id);
	}

	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0
		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
		return B_BAD_VALUE;
	}

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		TRACE(("switch_sem_etc: bad sem %ld\n", id));
		status = B_BAD_SEM_ID;
		goto err;
	}

	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		status = B_NOT_ALLOWED;
		goto err;
	}

	if (sSems[slot].u.used.count - count < 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
			// immediate timeout
			status = B_WOULD_BLOCK;
			goto err;
		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
			// absolute negative timeout
			status = B_TIMED_OUT;
			goto err;
		}
	}

	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
		timeout);

	if ((sSems[slot].u.used.count -= count) < 0) {
		// we need to block
		Thread *thread = thread_get_current_thread();

		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));

		// do a quick check to see if the thread has any pending signals
		// this should catch most of the cases where the thread had a signal
		SpinLocker schedulerLocker(gSchedulerLock);
		if (thread_is_interrupted(thread, flags)) {
			schedulerLocker.Unlock();
			sSems[slot].u.used.count += count;
			status = B_INTERRUPTED;
				// the other semaphore will be released later
			goto err;
		}

		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
			timeout = B_INFINITE_TIMEOUT;

		// enqueue in the semaphore queue and get ready to wait
		queued_thread queueEntry(thread, count);
		sSems[slot].queue.Add(&queueEntry);
		queueEntry.queued = true;

		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
			(void*)(addr_t)id);

		RELEASE_SEM_LOCK(sSems[slot]);

		// release the other semaphore, if any
		if (semToBeReleased >= 0) {
			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
			semToBeReleased = -1;
		}

		schedulerLocker.Lock();

		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
			? thread_block_locked(thread)
			: thread_block_with_timeout_locked(flags, timeout);

		schedulerLocker.Unlock();
		GRAB_SEM_LOCK(sSems[slot]);

		// If we're still queued, this means the acquiration failed, and we
		// need to remove our entry and (potentially) wake up other threads.
		if (queueEntry.queued)
			remove_thread_from_sem(&queueEntry, &sSems[slot]);

		if (acquireStatus >= B_OK) {
			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
			sSems[slot].u.used.last_acquire_count = count;
#endif
		}

		RELEASE_SEM_LOCK(sSems[slot]);
		restore_interrupts(state);

		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
			thread->name));
		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
		return acquireStatus;
	} else {
		sSems[slot].u.used.net_count -= count;
		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
		sSems[slot].u.used.last_acquire_count = count;
#endif
	}

err:
	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
		// depending on when we were interrupted, we need to still
		// release the semaphore to always leave in a consistent
		// state
		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
	}

#if 0
	if (status == B_NOT_ALLOWED)
	_user_debugger("Thread tried to acquire kernel semaphore.");
#endif

	KTRACE("switch_sem_etc() done: 0x%lx", status);

	return status;
}