Exemple #1
0
/*
 * semaphore_V(semaphore_t sem)
 *	Signal on the semaphore.
 */
void semaphore_V(semaphore_t sem) {
	minithread_t thread;
	while(atomic_test_and_set(&(sem->mutex)));
	
	if(++sem->limit <= 0) {
		queue_dequeue(sem->waiting,(void**) &thread);
		minithread_start((minithread_t) thread);			 
	}
	sem->mutex = 0;
}
Exemple #2
0
/*
 * semaphore_P(semaphore_t sem)
 *	Wait on the semaphore.
 */
void semaphore_P(semaphore_t sem) {
	while(atomic_test_and_set(&(sem->mutex)));
	if (--sem->limit < 0) {
		queue_append(sem->waiting, minithread_self());
		sem->mutex = 0;
		minithread_stop();
	} else {
		sem->mutex = 0;
	}
}
Exemple #3
0
static status_t
mutex_lock(pthread_mutex_t *mutex, bigtime_t timeout)
{
	thread_id thisThread = find_thread(NULL);
	status_t status = B_OK;

	if (mutex == NULL)
		return B_BAD_VALUE;

	// If statically initialized, we need to create the semaphore, now.
	if (mutex->sem == -42) {
		sem_id sem = create_sem(0, "pthread_mutex");
		if (sem < 0)
			return EAGAIN;

		if (atomic_test_and_set((vint32*)&mutex->sem, sem, -42) != -42)
			delete_sem(sem);
	}

	if (MUTEX_TYPE(mutex) == PTHREAD_MUTEX_ERRORCHECK
		&& mutex->owner == thisThread) {
		// we detect this kind of deadlock and return an error
		return EDEADLK;
	}

	if (MUTEX_TYPE(mutex) == PTHREAD_MUTEX_RECURSIVE
		&& mutex->owner == thisThread) {
		// if we already hold the mutex, we don't need to grab it again
		mutex->owner_count++;
		return B_OK;
	}

	if (atomic_add((vint32*)&mutex->count, 1) > 0) {
		// this mutex is already locked by someone else, so we need
		// to wait
		status = acquire_sem_etc(mutex->sem, 1,
			timeout == B_INFINITE_TIMEOUT ? 0 : B_ABSOLUTE_REAL_TIME_TIMEOUT,
			timeout);
	}

	if (status == B_OK) {
		// we have locked the mutex for the first time
		mutex->owner = thisThread;
		mutex->owner_count = 1;
	}

	return status;
}
Exemple #4
0
int _mtx_trylock(mtx_t * mtx, char * whr)
#endif
{
    int ticket;
    int retval;

    if (MTX_OPT(mtx, MTX_OPT_DINT)) {
        cpu_istate = get_interrupt_state();
        disable_interrupt();
    }

    switch (mtx->mtx_type) {
    case MTX_TYPE_SPIN:
        retval = atomic_test_and_set(&mtx->mtx_lock);
        break;

    case MTX_TYPE_TICKET:
        ticket = atomic_inc(&mtx->ticket.queue);

        if (atomic_read(&mtx->ticket.dequeue) == ticket) {
            atomic_set(&mtx->mtx_lock, 1);
            return 0; /* Got it */
        } else {
            atomic_dec(&mtx->ticket.queue);
             if (MTX_OPT(mtx, MTX_OPT_DINT))
                set_interrupt_state(cpu_istate);
            return 1; /* No luck */
        }
        break;

    default:
        MTX_TYPE_NOTSUP();
        if (MTX_OPT(mtx, MTX_OPT_DINT))
            set_interrupt_state(cpu_istate);

        return -ENOTSUP;
    }

    /* Handle priority ceiling. */
    priceil_set(mtx);

#ifdef configLOCK_DEBUG
    mtx->mtx_ldebug = whr;
#endif

    return retval;
}
Exemple #5
0
void
hoardLock(hoardLockType &lock)
{
	// A yielding lock (with an initial spin).
	while (true) {
		int32 i = 0;
		while (i < SPIN_LIMIT) {
			if (atomic_test_and_set(&lock, LOCKED, UNLOCKED) == UNLOCKED) {
				// We got the lock.
				return;
			}
			i++;
		}

		// The lock is still being held by someone else.
		// Give up our quantum.
		hoardYield();
	}
}
//spin on a lock until it becomes available
void semaphore_spinlock(tas_lock_t *lock) {
	while (atomic_test_and_set(lock))
		minithread_yield();
}
int
network_initialize(interrupt_handler_t network_handler) {
  int arg = 1;

  /* initialise the NT socket library, inexplicably required by NT */
  assert(WSAStartup(MAKEWORD(2, 0), &winsock_version_data) == 0);
  
  if (atomic_test_and_set(&initialized)) {
    return -1;
  }

  memset(&if_info, 0, sizeof(if_info));
  
  if_info.sock = socket(PF_INET, SOCK_DGRAM, 0);
  if (if_info.sock < 0)  {
    perror("socket");
    return -1;
  }

  if_info.sin.sin_family = SOCK_DGRAM;
  if_info.sin.sin_addr.s_addr = htonl(0);
  if_info.sin.sin_port = htons(my_udp_port);
  if (bind(if_info.sock, (struct sockaddr *) &if_info.sin, 
	   sizeof(if_info.sin)) < 0)  {
    /* kprintf("Error: code %ld.\n", GetLastError());*/
    AbortOnError(0);
    perror("bind");
    return -1;
  }

  /* set for fast reuse */
  assert(setsockopt(if_info.sock, SOL_SOCKET, SO_REUSEADDR, 
		    (char *) &arg, sizeof(int)) == 0);

  if (BCAST_ENABLED){
    if (BCAST_USE_TOPOLOGY_FILE){
      bcast_initialize(BCAST_TOPOLOGY_FILE, &topology);
    } else {
      assert(setsockopt(if_info.sock, SOL_SOCKET, SO_BROADCAST, 
		    (char *) &arg, sizeof(int)) == 0);

      network_translate_hostname(BCAST_ADDRESS,broadcast_addr);
    }
  }

  /*
   * Print network information on the screen (mostly for Joranda).
   */

  {
    network_address_t my_address;
    char my_hostname[256];
    
    network_get_my_address(my_address);
    network_format_address(my_address, my_hostname, 256);

    kprintf("Hostname of local machine: %s.\n",my_hostname);
  }

  /*
   * Interrupts are handled through the caller's handler.
   */
  
  start_network_poll(network_handler, &if_info.sock);
    
  return 0;
}
Exemple #8
0
int _mtx_lock(mtx_t * mtx, char * whr)
#endif
{
    int ticket;
    const int sleep_mode = MTX_OPT(mtx, MTX_OPT_SLEEP);
#ifdef configLOCK_DEBUG
    unsigned deadlock_cnt = 0;
#endif

    if (mtx->mtx_type == MTX_TYPE_TICKET) {
        ticket = atomic_inc(&mtx->ticket.queue);
    }

    if (MTX_OPT(mtx, MTX_OPT_DINT)) {
        cpu_istate = get_interrupt_state();
        disable_interrupt();
    }

    while (1) {
#if defined(configLOCK_DEBUG) && (configKLOCK_DLTHRES > 0)
        /*
         * TODO Deadlock detection threshold should depend on lock type and
         *      current priorities.
         */
        if (++deadlock_cnt >= configSCHED_HZ * (configKLOCK_DLTHRES + 1)) {
            char * lwhr = (mtx->mtx_ldebug) ? mtx->mtx_ldebug : "?";

            KERROR(KERROR_DEBUG,
                   "Deadlock detected:\n%s WAITING\n%s LOCKED\n",
                   whr, lwhr);

            deadlock_cnt = 0;
        }
#endif

        if (sleep_mode && (current_thread->wait_tim == -2))
            return -EWOULDBLOCK;

        switch (mtx->mtx_type) {
        case MTX_TYPE_SPIN:
            if (!atomic_test_and_set(&mtx->mtx_lock))
                goto out;
            break;

        case MTX_TYPE_TICKET:
            if (atomic_read(&mtx->ticket.dequeue) == ticket) {
                atomic_set(&mtx->mtx_lock, 1);
                goto out;
            }

            thread_yield(THREAD_YIELD_LAZY);
            break;

        default:
            MTX_TYPE_NOTSUP();
            if (MTX_OPT(mtx, MTX_OPT_DINT))
                set_interrupt_state(cpu_istate);

            return -ENOTSUP;
        }

#ifdef configMP
        cpu_wfe(); /* Sleep until event. */
#endif
    }
out:

    /* Handle priority ceiling. */
    priceil_set(mtx);

#ifdef configLOCK_DEBUG
    mtx->mtx_ldebug = whr;
#endif

    return 0;
}
Exemple #9
0
int
pthread_once(pthread_once_t* onceControl, void (*initRoutine)(void))
{
	// Algorithm:
	// The state goes through at most four states:
	// STATE_UNINITIALIZED: The initial uninitialized state.
	// STATE_INITIALIZING: Set by the first thread entering the function. It
	// will call initRoutine.
	// semaphore/STATE_SPINNING: Set by the second thread entering the function,
	// when the first thread is still executing initRoutine. The normal case is
	// that the thread manages to create a semaphore. This thread (and all
	// following threads) will block on the semaphore until the first thread is
	// done.
	// STATE_INITIALIZED: Set by the first thread when it returns from
	// initRoutine. All following threads will return right away.

	int32 value = atomic_test_and_set((vint32*)&onceControl->state,
		STATE_INITIALIZING, STATE_UNINITIALIZED);

	if (value == STATE_INITIALIZED)
		return 0;

	if (value == STATE_UNINITIALIZED) {
		// we're the first -- perform the initialization
		initRoutine();

		value = atomic_set((vint32*)&onceControl->state, STATE_INITIALIZED);

		// If someone else is waiting, we need to delete the semaphore.
		if (value >= 0)
			delete_sem(value);

		return 0;
	}

	if (value == STATE_INITIALIZING) {
		// someone is initializing -- we need to create a semaphore we can wait
		// on
		sem_id semaphore = create_sem(0, "pthread once");
		if (semaphore >= 0) {
			// successfully created -- set it
			value = atomic_test_and_set((vint32*)&onceControl->state,
				semaphore, STATE_INITIALIZING);
			if (value == STATE_INITIALIZING)
				value = semaphore;
			else
				delete_sem(semaphore);
		} else {
			// Failed to create the semaphore. Can only happen when the system
			// runs out of semaphores, but we can still handle the situation
			// gracefully by spinning.
			value = atomic_test_and_set((vint32*)&onceControl->state,
				STATE_SPINNING, STATE_INITIALIZING);
			if (value == STATE_INITIALIZING)
				value = STATE_SPINNING;
		}
	}

	if (value >= 0) {
		// wait on the semaphore
		while (acquire_sem(value) == B_INTERRUPTED);

		return 0;
	} else if (value == STATE_SPINNING) {
		// out of semaphores -- spin
		while (atomic_get((vint32*)&onceControl->state) == STATE_SPINNING);
	}

	return 0;
}