/* * 'park' an LWP waiting on a user-level synchronisation object. The LWP * will remain parked until another LWP in the same process calls in and * requests that it be unparked. */ int sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap, register_t *retval) { /* { syscallarg(clockid_t) clock_id; syscallarg(int) flags; syscallarg(const struct timespec *) ts; syscallarg(lwpid_t) unpark; syscallarg(const void *) hint; syscallarg(const void *) unparkhint; } */ struct timespec ts, *tsp; int error; if (SCARG(uap, ts) == NULL) tsp = NULL; else { error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); if (error != 0) return error; tsp = &ts; } if (SCARG(uap, unpark) != 0) { error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); if (error != 0) return error; } return lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp, SCARG(uap, hint)); }
int compat_60_netbsd32__lwp_park(struct lwp *l, const struct compat_60_netbsd32__lwp_park_args *uap, register_t *retval) { /* { syscallarg(const netbsd32_timespecp) ts; syscallarg(lwpid_t) unpark; syscallarg(netbsd32_voidp) hint; syscallarg(netbsd32_voidp) unparkhint; } */ struct timespec ts, *tsp; struct netbsd32_timespec ts32; int error; if (SCARG_P32(uap, ts) == NULL) tsp = NULL; else { error = copyin(SCARG_P32(uap, ts), &ts32, sizeof ts32); if (error != 0) return error; tsp = &ts; } if (SCARG(uap, unpark) != 0) { error = lwp_unpark(SCARG(uap, unpark), SCARG_P32(uap, unparkhint)); if (error != 0) return error; } return lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, tsp, SCARG_P32(uap, hint)); }
int netbsd32____lwp_park60(struct lwp *l, const struct netbsd32____lwp_park60_args *uap, register_t *retval) { /* { syscallarg(const netbsd32_clockid_t) clock_id; syscallarg(int) flags; syscallarg(const netbsd32_timespec50p) ts; syscallarg(netbsd32_lwpid_t) unpark; syscallarg(netbsd32_voidp) hint; syscallarg(netbsd32_voidp) unparkhint; } */ struct timespec ts, *tsp; struct netbsd32_timespec ts32; int error; if (SCARG_P32(uap, ts) == NULL) tsp = NULL; else { error = copyin(SCARG_P32(uap, ts), &ts32, sizeof ts32); if (error != 0) return error; netbsd32_to_timespec(&ts32, &ts); tsp = &ts; } if (SCARG(uap, unpark) != 0) { error = lwp_unpark(SCARG(uap, unpark), SCARG_P32(uap, unparkhint)); if (error != 0) return error; } return lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp, SCARG_P32(uap, hint)); }
int32 runtime·semasleep(int64 ns) { Timespec ts; // spin-mutex lock while(runtime·xchg(&m->waitsemalock, 1)) runtime·osyield(); for(;;) { // lock held if(m->waitsemacount == 0) { // sleep until semaphore != 0 or timeout. // thrsleep unlocks m->waitsemalock. if(ns < 0) { // TODO(jsing) - potential deadlock! // // There is a potential deadlock here since we // have to release the waitsemalock mutex // before we call lwp_park() to suspend the // thread. This allows another thread to // release the lock and call lwp_unpark() // before the thread is actually suspended. // If this occurs the current thread will end // up sleeping indefinitely. Unfortunately // the NetBSD kernel does not appear to provide // a mechanism for unlocking the userspace // mutex once the thread is actually parked. runtime·atomicstore(&m->waitsemalock, 0); runtime·lwp_park(nil, 0, &m->waitsemacount, nil); } else { ns += runtime·nanotime(); ts.tv_sec = ns/1000000000LL; ts.tv_nsec = ns%1000000000LL; // TODO(jsing) - potential deadlock! // See above for details. runtime·atomicstore(&m->waitsemalock, 0); runtime·lwp_park(&ts, 0, &m->waitsemacount, nil); } // reacquire lock while(runtime·xchg(&m->waitsemalock, 1)) runtime·osyield(); } // lock held (again) if(m->waitsemacount != 0) { // semaphore is available. m->waitsemacount--; // spin-mutex unlock runtime·atomicstore(&m->waitsemalock, 0); return 0; // semaphore acquired } // semaphore not available. // if there is a timeout, stop now. // otherwise keep trying. if(ns >= 0) break; } // lock held but giving up // spin-mutex unlock runtime·atomicstore(&m->waitsemalock, 0); return -1; }