static svn_error_t * svn_ra_local__lock(svn_ra_session_t *session, apr_hash_t *path_revs, const char *comment, svn_boolean_t force, svn_ra_lock_callback_t lock_func, void *lock_baton, apr_pool_t *pool) { svn_ra_local__session_baton_t *sess = session->priv; apr_hash_index_t *hi; apr_pool_t *iterpool = svn_pool_create(pool); /* A username is absolutely required to lock a path. */ SVN_ERR(get_username(session, pool)); for (hi = apr_hash_first(pool, path_revs); hi; hi = apr_hash_next(hi)) { svn_lock_t *lock; const void *key; const char *path; void *val; svn_revnum_t *revnum; const char *abs_path; svn_error_t *err, *callback_err = NULL; svn_pool_clear(iterpool); apr_hash_this(hi, &key, NULL, &val); path = key; revnum = val; abs_path = svn_path_join(sess->fs_path->data, path, iterpool); /* This wrapper will call pre- and post-lock hooks. */ err = svn_repos_fs_lock(&lock, sess->repos, abs_path, NULL, comment, FALSE /* not DAV comment */, 0 /* no expiration */, *revnum, force, iterpool); if (err && !SVN_ERR_IS_LOCK_ERROR(err)) return err; if (lock_func) callback_err = lock_func(lock_baton, path, TRUE, err ? NULL : lock, err, iterpool); svn_error_clear(err); if (callback_err) return callback_err; } svn_pool_destroy(iterpool); return SVN_NO_ERROR; }
static svn_error_t * svn_ra_local__unlock(svn_ra_session_t *session, apr_hash_t *path_tokens, svn_boolean_t force, svn_ra_lock_callback_t lock_func, void *lock_baton, apr_pool_t *pool) { svn_ra_local__session_baton_t *sess = session->priv; apr_hash_index_t *hi; apr_pool_t *iterpool = svn_pool_create(pool); /* A username is absolutely required to unlock a path. */ SVN_ERR(get_username(session, pool)); for (hi = apr_hash_first(pool, path_tokens); hi; hi = apr_hash_next(hi)) { const void *key; const char *path; void *val; const char *abs_path, *token; svn_error_t *err, *callback_err = NULL; svn_pool_clear(iterpool); apr_hash_this(hi, &key, NULL, &val); path = key; /* Since we can't store NULL values in a hash, we turn "" to NULL here. */ if (strcmp(val, "") != 0) token = val; else token = NULL; abs_path = svn_path_join(sess->fs_path->data, path, iterpool); /* This wrapper will call pre- and post-unlock hooks. */ err = svn_repos_fs_unlock(sess->repos, abs_path, token, force, iterpool); if (err && !SVN_ERR_IS_UNLOCK_ERROR(err)) return err; if (lock_func) callback_err = lock_func(lock_baton, path, FALSE, NULL, err, iterpool); svn_error_clear(err); if (callback_err) return callback_err; } svn_pool_destroy(iterpool); return SVN_NO_ERROR; }
/* * call-seq: * mutex.lock -> self * * Attempts to grab the lock and waits if it isn't available. * Raises +ThreadError+ if +mutex+ was locked by the current thread. */ VALUE rb_mutex_lock(VALUE self) { rb_thread_t *th = GET_THREAD(); rb_mutex_t *mutex; GetMutexPtr(self, mutex); /* When running trap handler */ if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) { rb_raise(rb_eThreadError, "can't be called from trap context"); } if (rb_mutex_trylock(self) == Qfalse) { if (mutex->th == th) { rb_raise(rb_eThreadError, "deadlock; recursive locking"); } while (mutex->th != th) { int interrupted; enum rb_thread_status prev_status = th->status; volatile int timeout_ms = 0; struct rb_unblock_callback oldubf; set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE); th->status = THREAD_STOPPED_FOREVER; th->locking_mutex = self; native_mutex_lock(&mutex->lock); th->vm->sleeper++; /* * Carefully! while some contended threads are in lock_func(), * vm->sleepr is unstable value. we have to avoid both deadlock * and busy loop. */ if ((vm_living_thread_num(th->vm) == th->vm->sleeper) && !patrol_thread) { timeout_ms = 100; patrol_thread = th; } GVL_UNLOCK_BEGIN(); interrupted = lock_func(th, mutex, (int)timeout_ms); native_mutex_unlock(&mutex->lock); GVL_UNLOCK_END(); if (patrol_thread == th) patrol_thread = NULL; reset_unblock_function(th, &oldubf); th->locking_mutex = Qfalse; if (mutex->th && interrupted == 2) { rb_check_deadlock(th->vm); } if (th->status == THREAD_STOPPED_FOREVER) { th->status = prev_status; } th->vm->sleeper--; if (mutex->th == th) mutex_locked(th, self); if (interrupted) { RUBY_VM_CHECK_INTS_BLOCKING(th); } } } return self; }