/* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; rwsemtrace(sem, "Entering __rwsem_do_wake"); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; goto dont_wake_writers; } /* if we are allowed to wake writers try to grant a single write lock * if there's a writer at the front of the queue * - we leave the 'waiting count' incremented to signify potential * contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; /* Don't touch waiter after ->task has been NULLed */ smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); goto out; } /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; }
/* * wait for the write lock to be granted */ struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; rwsemtrace(sem,"Entering rwsem_down_write_failed"); waiter.flags = RWSEM_WAITING_FOR_WRITE; rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS); rwsemtrace(sem,"Leaving rwsem_down_write_failed"); return sem; }
/* * release a read lock on the semaphore */ void fastcall __up_read(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_read"); spin_lock(&sem->wait_lock); if (--sem->activity==0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock(&sem->wait_lock); rwsemtrace(sem,"Leaving __up_read"); }
/* * release a write lock on the semaphore */ void fastcall __up_write(struct rw_semaphore *sem) { rwsemtrace(sem,"Entering __up_write"); spin_lock(&sem->wait_lock); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem); spin_unlock(&sem->wait_lock); rwsemtrace(sem,"Leaving __up_write"); }
/* * wait for the read lock to be granted */ struct rw_semaphore fastcall __sched * rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; rwsemtrace(sem, "Entering rwsem_down_read_failed"); waiter.flags = RWSEM_WAITING_FOR_READ; rwsem_down_failed_common(sem, &waiter, RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); rwsemtrace(sem, "Leaving rwsem_down_read_failed"); return sem; }
/* * release a read lock on the semaphore */ void fastcall __up_read(struct rw_semaphore *sem) { unsigned long flags; rwsemtrace(sem, "Entering __up_read"); spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __up_read"); }
/* * downgrade a write lock into a read lock * - caller incremented waiting part of count and discovered it still negative * - just wake up any readers at the front of the queue */ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { rwsemtrace(sem, "Entering rwsem_downgrade_wake"); spin_lock(&sem->wait_lock); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); spin_unlock(&sem->wait_lock); rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); return sem; }
/* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ void fastcall __downgrade_write(struct rw_semaphore *sem) { unsigned long flags; rwsemtrace(sem, "Entering __downgrade_write"); spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __downgrade_write"); }
/* * trylock for writing -- returns 1 if successful, 0 if contention */ int __down_write_trylock(struct rw_semaphore *sem) { int ret = 0; rwsemtrace(sem,"Entering __down_write_trylock"); spin_lock(&sem->wait_lock); if (sem->activity==0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } spin_unlock(&sem->wait_lock); rwsemtrace(sem,"Leaving __down_write_trylock"); return ret; }
/* * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ void fastcall __sched __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; unsigned long flags; rwsemtrace(sem, "Entering __down_write"); spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock_irqrestore(&sem->wait_lock, flags); /* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; out: rwsemtrace(sem, "Leaving __down_write"); }
/* * trylock for writing -- returns 1 if successful, 0 if contention */ int fastcall __down_write_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; rwsemtrace(sem, "Entering __down_write_trylock"); spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } spin_unlock_irqrestore(&sem->wait_lock, flags); rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; }
/* * get a read lock on the semaphore */ void __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; rwsemtrace(sem,"Entering __down_read"); spin_lock(&sem->wait_lock); if (sem->activity>=0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; spin_unlock(&sem->wait_lock); goto out; } tsk = current; set_task_state(tsk,TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; list_add_tail(&waiter.list,&sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock(&sem->wait_lock); /* wait to be given the lock */ for (;;) { if (!waiter.flags) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; out: rwsemtrace(sem,"Leaving __down_read"); }
/* * handle the lock being released whilst there are processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having flags zeroised */ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; int woken; rwsemtrace(sem,"Entering __rwsem_do_wake"); waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); /* try to grant a single write lock if there's a writer at the front of the queue * - we leave the 'waiting count' incremented to signify potential contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); waiter->flags = 0; wake_up_process(waiter->task); goto out; } /* grant an infinite number of read locks to the readers at the front of the queue */ woken = 0; do { list_del(&waiter->list); waiter->flags = 0; wake_up_process(waiter->task); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); } while (waiter->flags&RWSEM_WAITING_FOR_READ); sem->activity += woken; out: rwsemtrace(sem,"Leaving __rwsem_do_wake"); return sem; }
/* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - there must be someone on the queue * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if downgrading is false */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) { struct rwsem_waiter *waiter; struct task_struct *tsk; struct list_head *next; signed long oldcount, woken, loop; rwsemtrace(sem, "Entering __rwsem_do_wake"); if (downgrading) goto dont_wake_writers; /* if we came through an up_xxxx() call, we only only wake someone up * if we can transition the active part of the count from 0 -> 1 */ try_again: oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) - RWSEM_ACTIVE_BIAS; if (oldcount & RWSEM_ACTIVE_MASK) goto undo; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); /* try to grant a single write lock if there's a writer at the front * of the queue - note we leave the 'active part' of the count * incremented by 1 and the waiting part incremented by 0x00010000 */ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) goto readers_only; /* We must be careful not to touch 'waiter' after we set ->task = NULL. * It is an allocated on the waiter's stack and may become invalid at * any time after that point (due to a wakeup from another source). */ list_del(&waiter->list); tsk = waiter->task; mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); goto out; /* don't want to wake any writers */ dont_wake_writers: waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; /* grant an infinite number of read locks to the readers at the front * of the queue * - note we increment the 'active part' of the count by the number of * readers before waking any processes up */ readers_only: woken = 0; do { woken++; if (waiter->list.next == &sem->wait_list) break; waiter = list_entry(waiter->list.next, struct rwsem_waiter, list); } while (waiter->flags & RWSEM_WAITING_FOR_READ); loop = woken; woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; if (!downgrading) /* we'd already done one increment earlier */ woken -= RWSEM_ACTIVE_BIAS; rwsem_atomic_add(woken, sem); next = sem->wait_list.next; for (; loop > 0; loop--) { waiter = list_entry(next, struct rwsem_waiter, list); next = waiter->list.next; tsk = waiter->task; mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); } sem->wait_list.next = next; next->prev = &sem->wait_list; out: rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; /* undo the change to count, but check for a transition 1->0 */ undo: if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) goto out; goto try_again; }
/* * handle the lock being released whilst there are processes blocked on it that can now run * - if we come here, then: * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) * - there must be someone on the queue * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having flags zeroised * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct list_head *next; signed long oldcount; int woken, loop; rwsemtrace(sem,"Entering __rwsem_do_wake"); if (!wakewrite) goto dont_wake_writers; /* only wake someone up if we can transition the active part of the count from 0 -> 1 */ try_again: oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; if (oldcount & RWSEM_ACTIVE_MASK) goto undo; waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); /* try to grant a single write lock if there's a writer at the front of the queue * - note we leave the 'active part' of the count incremented by 1 and the waiting part * incremented by 0x00010000 */ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) goto readers_only; list_del(&waiter->list); waiter->flags = 0; wake_up_process(waiter->task); goto out; /* don't want to wake any writers */ dont_wake_writers: waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; /* grant an infinite number of read locks to the readers at the front of the queue * - note we increment the 'active part' of the count by the number of readers (less one * for the activity decrement we've already done) before waking any processes up */ readers_only: woken = 0; do { woken++; if (waiter->list.next==&sem->wait_list) break; waiter = list_entry(waiter->list.next,struct rwsem_waiter,list); } while (waiter->flags & RWSEM_WAITING_FOR_READ); loop = woken; woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS; woken -= RWSEM_ACTIVE_BIAS; rwsem_atomic_add(woken,sem); next = sem->wait_list.next; for (; loop>0; loop--) { waiter = list_entry(next,struct rwsem_waiter,list); next = waiter->list.next; waiter->flags = 0; wake_up_process(waiter->task); } sem->wait_list.next = next; next->prev = &sem->wait_list; out: rwsemtrace(sem,"Leaving __rwsem_do_wake"); return sem; /* undo the change to count, but check for a transition 1->0 */ undo: if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0) goto out; goto try_again; }