Esempio n. 1
0
/*
 * Lock the given lock
 * uses waiting queues
 */
void lock_acquire(lock_t *lock) {

	interrupt_status_t intr_status;
	intr_status = _interrupt_disable();

	spinlock_acquire(&lock->spinlock);		// Acquire spinlock
	
	/* Check if lock is locked already */
	while (lock->state == LOCK_LOCKED) {
	
		/* Add thread to sleep queue and switch thread */
		sleepq_add(lock);
		
		spinlock_release(&lock->spinlock);	// Release spinlock
		
		thread_switch();
		spinlock_acquire(&lock->spinlock);	// Acquire spinlock
	}
	
	/* Lock open. Acquire it! */
	lock->state = LOCK_LOCKED;

	spinlock_release(&lock->spinlock);	// Release spinlock
	
	_interrupt_set_state(intr_status);
}
Esempio n. 2
0
/* Wait for the given process to terminate, return its return value,
   and mark the process-table entry as free */
int process_join(process_id_t pid){
  kprintf("Starten af process join pid = %d\n",pid);
  //Check if PID is valid
  if ((pid >= 0) && (pid < PROCESS_MAX_PROCESSES)) {
    interrupt_status_t intr_status;
    uint32_t retval;

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* Wait for the child process to exit and become a zombie. */
    while (process_table[pid].state != STATE_ZOMBIE) {
      /* Move to sleep queue and switch to another thread. */
      sleepq_add(&process_table[pid]);
      spinlock_release(&process_table_slock);
      thread_switch();
      spinlock_acquire(&process_table_slock);
    }

    /* Get the return value and prepare its slot for a future process. */
    retval = process_table[pid].retval;
    process_table[pid].state = STATE_FREE;

      //Release the respurce spinlock
    spinlock_release(&process_table_slock);
    //Restores the interrupt mask
    _interrupt_set_state(intr_status);

    return retval;
  }
  kprintf("Fejl i process join pid < 0 %d\n",pid);
  return -1;
}
Esempio n. 3
0
File: tty.c Progetto: DIKU-EDU/kudos
/**
 * Reads atmost len bytes from tty-device pointed by
 * gcd to buffer buf.
 *
 * @param gcd Pointer to the tty-device.
 * @param buf Character buffer to be read into.
 * @param len Maximum number of bytes to be read.
 *
 * @return Number of succesfully read characters.
 */
static int tty_read(gcd_t *gcd, void *buf, int len) {
  interrupt_status_t intr_status;
  volatile tty_real_device_t *tty_rd
    = (tty_real_device_t *)gcd->device->real_device;
  int i;

  intr_status = _interrupt_disable();
  spinlock_acquire(tty_rd->slock);

  while (tty_rd->read_count == 0) {
    /* buffer is empty, so wait it to be filled */
    sleepq_add((void *)tty_rd->read_buf);
    spinlock_release(tty_rd->slock);
    thread_switch();
    spinlock_acquire(tty_rd->slock);
  }


  /* Data is read to internal buffer by interrupt driver. Number of
     chars read is stored to i. */
  i = 0;
  while (tty_rd->read_count > 0 && i < len) {
    ((char *)buf)[i++] = tty_rd->read_buf[tty_rd->read_head];
    tty_rd->read_head = (tty_rd->read_head + 1) % TTY_BUF_SIZE;
    tty_rd->read_count--;
  }

  spinlock_release(tty_rd->slock);
  _interrupt_set_state(intr_status);

  return i;
}
Esempio n. 4
0
int process_join(process_id_t pid) {
    int retval;
    interrupt_status_t intr_status;

    /* Only join with legal pids */
    if (pid < 0 || pid >= PROCESS_MAX_PROCESSES ||
            process_table[pid].parent != process_get_current_process())
        return PROCESS_ILLEGAL_JOIN;

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* The thread could be zombie even though it wakes us (maybe). */
    while (process_table[pid].state != PROCESS_ZOMBIE) {
        sleepq_add(&process_table[pid]);
        spinlock_release(&process_table_slock);
        thread_switch();
        spinlock_acquire(&process_table_slock);
    }

    retval = process_table[pid].retval;
    process_reset(pid);

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return retval;
}
Esempio n. 5
0
void condition_wait(cond_t *cond, lock_t *condition_lock)
{
    interrupt_status_t intr_status = _interrupt_disable();
    sleepq_add(cond); // tilføj til q
    _interrupt_set_state(intr_status);
    lock_release(condition_lock); // giv slip på låsen
    thread_switch(); // skift tråd
}
Esempio n. 6
0
int process_join(process_id_t pid)
{
  process_id_t my_pid;
  uint32_t retval;
  interrupt_status_t intr_status;

  my_pid = process_get_current_process();
  if (pid < 0
      || pid >= PROCESS_MAX_PROCESSES
      || process_table[pid].parent != my_pid) {
    return -1;
  }

  intr_status = _interrupt_disable();
  spinlock_acquire(&process_table_slock);

  /* Loop until the process we are joining is a zombie. */
  while (process_table[pid].state != PROCESS_ZOMBIE) {
    sleepq_add(&process_table[pid]);
    spinlock_release(&process_table_slock);
    thread_switch();
    spinlock_acquire(&process_table_slock);
  }
  retval = process_table[pid].retval;

  /* Let children see it is gone. */
  process_table[pid].retval = -1;
  /* Make sure we can't join it again. */
  process_table[pid].parent = -1;

  if (process_table[pid].children == 0) {

    process_table[my_pid].children--;

    /* Remove the zombie child from our list of zombie children. */
    if (process_table[my_pid].first_zombie == pid) {
      process_id_t next = process_table[pid].next_zombie;
      process_table[my_pid].first_zombie = next;
      if (next >= 0) {
        process_table[next].prev_zombie = -1;
      }
    } else {
      process_id_t prev = process_table[pid].prev_zombie;
      process_id_t next = process_table[pid].next_zombie;
      process_table[prev].next_zombie = next;
      if (next >= 0) {
        process_table[next].prev_zombie = prev;
      }
    }

    process_reset(pid);

  }

  spinlock_release(&process_table_slock);
  _interrupt_set_state(intr_status);
  return retval;
}
Esempio n. 7
0
File: lock.c Progetto: Jkirstejn/OSM
/*
 * Lock the given lock
 * uses waiting queues
 */
void lock_acquire(lock_t *lock) {

	/* Check if lock is locked already */
	while (lock->state == LOCK_LOCKED) {
		/* Add thread to sleep queue and switch thread */
		sleepq_add(lock);
		thread_switch;
	}
	/* Lock opened */
	lock->state = LOCK_LOCKED;

}
Esempio n. 8
0
/*
 * Wait for a condition
 * Sleeps threads and waits for a signal from the given condition
 */
void condition_wait(cond_t *cond, lock_t *condition_lock) {

	interrupt_status_t intr_status;
	intr_status = _interrupt_disable();
	
	sleepq_add(cond);				// Wait for a signal from cond
	lock_release(condition_lock);	// Release the condition lock
	
	_interrupt_set_state(intr_status);
	thread_switch();				// Sleep thread
	
	lock_acquire(condition_lock);
}
Esempio n. 9
0
File: tty.c Progetto: DIKU-EDU/kudos
/**
 * Writes len bytes from buffer buf to tty-device
 * pointed by gcd. Implements write from the gbd interface.
 *
 * @param gcd Pointer to the tty-device.
 * @param buf Buffer to be written from.
 * @param len number of bytes to be written.
 *
 * @return Number of succesfully writeten characters.
 */
static int tty_write(gcd_t *gcd, const void *buf, int len) {
  interrupt_status_t intr_status;
  volatile tty_io_area_t *iobase = (tty_io_area_t *)gcd->device->io_address;
  volatile tty_real_device_t *tty_rd
    = (tty_real_device_t *)gcd->device->real_device;
  int i;

  intr_status = _interrupt_disable();
  spinlock_acquire(tty_rd->slock);

  i = 0;
  while (i < len) {
    while (tty_rd->write_count > 0) {
      /* buffer contains data, so wait until empty. */
      sleepq_add((void *)tty_rd->write_buf);
      spinlock_release(tty_rd->slock);
      thread_switch();
      spinlock_acquire(tty_rd->slock);
    }

    /* Fill internal buffer. */
    while (tty_rd->write_count < TTY_BUF_SIZE  && i < len) {
      int index;
      index = (tty_rd->write_head + tty_rd->write_count) % TTY_BUF_SIZE;
      tty_rd->write_buf[index] = ((char *)buf)[i++];
      tty_rd->write_count++;
    }

    /* If device is not currently busy, write one charater to
       cause interrupt. Head and count are adjusted not to write
       first character twice. Rest of the buffer is written by
       interrupt handler.

       If the device is busy, interrupt will appear by itself and
       the whole buffer will be written by interrupt handler.
    */
    if (!TTY_STATUS_WBUSY(iobase->status)) {
      iobase->data = tty_rd->write_buf[tty_rd->write_head];
      tty_rd->write_head = (tty_rd->write_head + 1) % TTY_BUF_SIZE;
      tty_rd->write_count--;
    }

  }

  spinlock_release(tty_rd->slock);
  _interrupt_set_state(intr_status);

  return i;
}
Esempio n. 10
0
/* We add the thread to the sleepqueue, and sleep, when we wake again, we
 * acquire the lock */
void condition_wait(cond_t *cond, lock_t *condition_lock){

    interrupt_status_t intr_status;
    intr_status = _interrupt_disable();
    spinlock_acquire(&(cond->spinlock));

    sleepq_add(cond);

    spinlock_release(&(cond->spinlock));

    thread_switch();

    lock_acquire(condition_lock);

    _interrupt_set_state(intr_status);
}
Esempio n. 11
0
void semaphore_P(semaphore_t *sem)
{
  interrupt_status_t intr_status;
  
  intr_status = _interrupt_disable();
  spinlock_acquire(&sem->slock);
  
  sem->value--;
  if (sem->value < 0) {
    sleepq_add(sem);
    spinlock_release(&sem->slock);
    thread_switch();
  } else {
    spinlock_release(&sem->slock);
  }
  _interrupt_set_state(intr_status);
}
Esempio n. 12
0
void lock_acquire( lock_t *lock ) {
    interrupt_status_t intr_status;

    intr_status = _interrupt_disable();
    spinlock_acquire(&lock->slock);

    while (lock->locked == LOCK_LOCKED) {
        sleepq_add(lock);
        spinlock_release(&lock->slock);
        thread_switch();
    }
    lock->locked = LOCK_LOCKED;
    
    spinlock_release(&lock->slock);
    _interrupt_set_state(intr_status);
    
}
Esempio n. 13
0
File: usr_sem.c Progetto: kazyka/OSM
int usr_sem_procure(usr_sem_t* sem) {
  interrupt_status_t intr_status;
  intr_status = _interrupt_disable();

  spinlock_acquire(&(sem->sem_slock));

  sem->value--;
  while (sem->value < 0) {
    sleepq_add(&(sem->value));
    spinlock_release(&(sem->sem_slock));
    thread_switch();
    spinlock_acquire(&(sem->sem_slock));
  }

  spinlock_release(&(sem->sem_slock));
  _interrupt_set_state(intr_status);
  return 0;
}
Esempio n. 14
0
int process_join(process_id_t pid) {
// kprintf("PROCESS JOIN ER STARTET\n");

 spinlock_t lock;
   if (!(process_table[pid].parent_id = process_get_current_process()))
     return PROCESS_ILLEGAL_JOIN;

//  kprintf("PROCESS JOIN ER LEGAL\n");
  // disable interrupts.
  _interrupt_disable();
//  kprintf("interrupts disabled\n"); 
  //acquire the resource spinlock
  spinlock_reset(&lock);
  spinlock_acquire(&lock);
//  kprintf("LOCK er ACQUIRED\n");
  //add to sleeq..
  process_table[process_get_current_process()].state = WAITING;
  while(!(process_table[pid].state == ZOMBIE)) {
   sleepq_add(&process_table[pid]);

  //release the resource spinlock.
   spinlock_release(&lock);
//  kprintf("TRÅD BLIVER SAT I SENG\n");

  //thread_switch()
   thread_switch();

  //Acquire the resource spinlock.
   spinlock_acquire(&lock);
  }

  //Do your duty with the resource (Frigøre processen, nu hvor den er færdig)
  process_table[pid].state = FREE;

  //release the resource spinlock
  spinlock_release(&lock);
  process_table[process_get_current_process()].state = RUNNING;
  //Restore the interrupt mask.
  _interrupt_enable();

//  kprintf("PROCESS_JOIN ER KOMMET IGENNEM\n");
  return process_table[process_get_current_process()].retval;
}
Esempio n. 15
0
void lock_acquire(lock_t *lock){

    interrupt_status_t intr_status;
    intr_status = _interrupt_disable();
    spinlock_acquire(&(lock->spinlock));

    /* If the lock is locked we set the thread to sleep, and check agin when
     * the thread is awoken */
    while(lock->locked){
        sleepq_add(lock);
        spinlock_release(&(lock->spinlock));

        /* let the thread sleep */
        thread_switch();
        spinlock_acquire(&(lock->spinlock));
    }

    lock->locked = 1;

    spinlock_release(&(lock->spinlock));
    _interrupt_set_state(intr_status);
}
Esempio n. 16
0
uint32_t process_join(process_id_t pid)
{
    process_id_t my_pid;
    uint32_t retval;
    interrupt_status_t intr_status;
  
    my_pid = process_get_current_process();
    if (pid < 0
        || pid >= MAX_PROCESSES
        || process_table[pid].parent != my_pid) {
        return -1;
    }

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    while (process_table[pid].state != PROCESS_ZOMBIE) {
        sleepq_add(&process_table[pid]);
        spinlock_release(&process_table_slock);
        thread_switch();
        spinlock_acquire(&process_table_slock);
    }
    retval = process_table[pid].retval;
    process_table[my_pid].children--;

    /* Let children see it is gone. */
    process_table[pid].retval = -1;
    /* Make sure we can't join it again. */
    process_table[pid].parent = -1;

    if (process_table[pid].children == 0) {
        process_table[pid].state = PROCESS_FREE;
    }

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return retval;
}
Esempio n. 17
0
uint32_t process_join(process_id_t pid) {
	interrupt_status_t intr_status;
	uint32_t retval;

	// Disable interrupts and acquire resource lock
	intr_status = _interrupt_disable();
	spinlock_acquire(&process_table_slock);

	// Sleep while the process isn't in its "dying" state.
	while(process_table[pid].state != PROCESS_DYING) {
		sleepq_add(&process_table[pid]);
		spinlock_release(&process_table_slock);
		thread_switch();
		spinlock_acquire(&process_table_slock);
	}

	retval = process_table[pid].retval;
	process_table[pid].state = PROCESS_SLOT_AVAILABLE;

    // Restore interrupts and free our lock
	spinlock_release(&process_table_slock);
	_interrupt_set_state(intr_status);
	return retval;
}
Esempio n. 18
0
/*
 * This function represents the so-called 'hard case' for sx_slock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
int
_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
{
	GIANT_DECLARE;
#ifdef ADAPTIVE_SX
	volatile struct thread *owner;
#endif
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	uintptr_t x;
	int error = 0;
#ifdef KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return (0);

#ifdef KDTRACE_HOOKS
	state = sx->sx_lock;
	all_time -= lockstat_nsecs(&sx->lock_object);
#endif

	/*
	 * As with rwlocks, we don't make any attempt to try to block
	 * shared locks once there is an exclusive waiter.
	 */
	for (;;) {
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
		x = sx->sx_lock;

		/*
		 * If no other thread has an exclusive lock then try to bump up
		 * the count of sharers.  Since we have to preserve the state
		 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
		 * shared lock loop back and retry.
		 */
		if (x & SX_LOCK_SHARED) {
			MPASS(!(x & SX_LOCK_SHARED_WAITERS));
			if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
			    x + SX_ONE_SHARER)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeed %p -> %p", __func__,
					    sx, (void *)x,
					    (void *)(x + SX_ONE_SHARER));
				break;
			}
			continue;
		}
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
		    &waittime);

#ifdef ADAPTIVE_SX
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			x = SX_OWNER(x);
			owner = (struct thread *)x;
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, sx, owner);
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname(curthread), "spinning",
				    "lockname:\"%s\"", sx->lock_object.lo_name);
				GIANT_SAVE();
				while (SX_OWNER(sx->sx_lock) == x &&
				    TD_IS_RUNNING(owner)) {
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
					cpu_spinwait();
				}
				KTR_STATE0(KTR_SCHED, "thread",
				    sched_tdname(curthread), "running");
				continue;
			}
		}
#endif

		/*
		 * Some other thread already has an exclusive lock, so
		 * start the process of blocking.
		 */
		sleepq_lock(&sx->lock_object);
		x = sx->sx_lock;

		/*
		 * The lock could have been released while we spun.
		 * In this case loop back and retry.
		 */
		if (x & SX_LOCK_SHARED) {
			sleepq_release(&sx->lock_object);
			continue;
		}

#ifdef ADAPTIVE_SX
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if (!(x & SX_LOCK_SHARED) &&
		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			owner = (struct thread *)SX_OWNER(x);
			if (TD_IS_RUNNING(owner)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
		}
#endif

		/*
		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
		 * fail to set it drop the sleep queue lock and loop
		 * back.
		 */
		if (!(x & SX_LOCK_SHARED_WAITERS)) {
			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
			    x | SX_LOCK_SHARED_WAITERS)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
				    __func__, sx);
		}

		/*
		 * Since we have been unable to acquire the shared lock,
		 * we have to sleep.
		 */
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
			    __func__, sx);

#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
		GIANT_SAVE();
		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
		if (!(opts & SX_INTERRUPTIBLE))
			sleepq_wait(&sx->lock_object, 0);
		else
			error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs(&sx->lock_object);
		sleep_cnt++;
#endif
		if (error) {
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK,
			"%s: interruptible sleep by %p suspended by signal",
				    __func__, sx);
			break;
		}
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
			    __func__, sx);
	}
#ifdef KDTRACE_HOOKS
	all_time += lockstat_nsecs(&sx->lock_object);
	if (sleep_time)
		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
#endif
	if (error == 0)
		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
		    contested, waittime, file, line, LOCKSTAT_READER);
	GIANT_RESTORE();
	return (error);
}
Esempio n. 19
0
/*
 * This function represents the so-called 'hard case' for sx_xlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
int
_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
    int line)
{
	GIANT_DECLARE;
#ifdef ADAPTIVE_SX
	volatile struct thread *owner;
	u_int i, spintries = 0;
#endif
	uintptr_t x;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int error = 0;
#ifdef	KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return (0);

	/* If we already hold an exclusive lock, then recurse. */
	if (sx_xlocked(sx)) {
		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
		    sx->lock_object.lo_name, file, line));
		sx->sx_recurse++;
		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
		return (0);
	}

	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);

#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&sx->lock_object);
	state = sx->sx_lock;
#endif
	while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
		    &waittime);
#ifdef ADAPTIVE_SX
		/*
		 * If the lock is write locked and the owner is
		 * running on another CPU, spin until the owner stops
		 * running or the state of the lock changes.
		 */
		x = sx->sx_lock;
		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			if ((x & SX_LOCK_SHARED) == 0) {
				x = SX_OWNER(x);
				owner = (struct thread *)x;
				if (TD_IS_RUNNING(owner)) {
					if (LOCK_LOG_TEST(&sx->lock_object, 0))
						CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
						    __func__, sx, owner);
					KTR_STATE1(KTR_SCHED, "thread",
					    sched_tdname(curthread), "spinning",
					    "lockname:\"%s\"",
					    sx->lock_object.lo_name);
					GIANT_SAVE();
					while (SX_OWNER(sx->sx_lock) == x &&
					    TD_IS_RUNNING(owner)) {
						cpu_spinwait();
#ifdef KDTRACE_HOOKS
						spin_cnt++;
#endif
					}
					KTR_STATE0(KTR_SCHED, "thread",
					    sched_tdname(curthread), "running");
					continue;
				}
			} else if (SX_SHARERS(x) && spintries < asx_retries) {
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname(curthread), "spinning",
				    "lockname:\"%s\"", sx->lock_object.lo_name);
				GIANT_SAVE();
				spintries++;
				for (i = 0; i < asx_loops; i++) {
					if (LOCK_LOG_TEST(&sx->lock_object, 0))
						CTR4(KTR_LOCK,
				    "%s: shared spinning on %p with %u and %u",
						    __func__, sx, spintries, i);
					x = sx->sx_lock;
					if ((x & SX_LOCK_SHARED) == 0 ||
					    SX_SHARERS(x) == 0)
						break;
					cpu_spinwait();
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
				}
				KTR_STATE0(KTR_SCHED, "thread",
				    sched_tdname(curthread), "running");
				if (i != asx_loops)
					continue;
			}
		}
#endif

		sleepq_lock(&sx->lock_object);
		x = sx->sx_lock;

		/*
		 * If the lock was released while spinning on the
		 * sleep queue chain lock, try again.
		 */
		if (x == SX_LOCK_UNLOCKED) {
			sleepq_release(&sx->lock_object);
			continue;
		}

#ifdef ADAPTIVE_SX
		/*
		 * The current lock owner might have started executing
		 * on another CPU (or the lock could have changed
		 * owners) while we were waiting on the sleep queue
		 * chain lock.  If so, drop the sleep queue lock and try
		 * again.
		 */
		if (!(x & SX_LOCK_SHARED) &&
		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			owner = (struct thread *)SX_OWNER(x);
			if (TD_IS_RUNNING(owner)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
		}
#endif

		/*
		 * If an exclusive lock was released with both shared
		 * and exclusive waiters and a shared waiter hasn't
		 * woken up and acquired the lock yet, sx_lock will be
		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
		 * If we see that value, try to acquire it once.  Note
		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
		 * as there are other exclusive waiters still.  If we
		 * fail, restart the loop.
		 */
		if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
			if (atomic_cmpset_acq_ptr(&sx->sx_lock,
			    SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
			    tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
				sleepq_release(&sx->lock_object);
				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
				    __func__, sx);
				break;
			}
			sleepq_release(&sx->lock_object);
			continue;
		}

		/*
		 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
		 * than loop back and retry.
		 */
		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
			    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
				    __func__, sx);
		}

		/*
		 * Since we have been unable to acquire the exclusive
		 * lock and the exclusive waiters flag is set, we have
		 * to sleep.
		 */
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
			    __func__, sx);

#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs(&sx->lock_object);
#endif
		GIANT_SAVE();
		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
		if (!(opts & SX_INTERRUPTIBLE))
			sleepq_wait(&sx->lock_object, 0);
		else
			error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs(&sx->lock_object);
		sleep_cnt++;
#endif
		if (error) {
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK,
			"%s: interruptible sleep by %p suspended by signal",
				    __func__, sx);
			break;
		}
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
			    __func__, sx);
	}
#ifdef KDTRACE_HOOKS
	all_time += lockstat_nsecs(&sx->lock_object);
	if (sleep_time)
		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
#endif
	if (!error)
		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
		    contested, waittime, file, line, LOCKSTAT_WRITER);
	GIANT_RESTORE();
	return (error);
}
Esempio n. 20
0
void condition_wait (cond_t *cond, lock_t *lock ) {
    lock_release(lock);
    sleepq_add(cond);
    thread_switch();
    lock_acquire(lock);
}