LockStatus ObjectHeader::lock(STATE, GCToken gct, size_t us, bool interrupt) { // #1 Attempt to lock an unlocked object using CAS. ObjectHeader* self = this; OnStack<1> os(state, self); step1: // Construct 2 new headers: one is the version we hope that // is in use and the other is what we want it to be. The CAS // the new one into place. HeaderWord orig = self->header; orig.f.inflated = 0; orig.f.meaning = eAuxWordEmpty; orig.f.aux_word = 0; HeaderWord new_val = orig; new_val.f.meaning = eAuxWordLock; new_val.f.aux_word = state->vm()->thread_id() << cAuxLockTIDShift; if(self->header.atomic_set(orig, new_val)) { if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " locked with CAS]\n"; } // wonderful! Locked! weeeee! state->vm()->add_locked_object(self); return eLocked; } // Ok, something went wrong. // // #2 See if we're locking the object recursively. step2: orig = self->header; // The header is inflated, use the full lock. if(orig.f.inflated) { InflatedHeader* ih = ObjectHeader::header_to_inflated_header(orig); return ih->lock_mutex(state, gct, us, interrupt); } switch(orig.f.meaning) { case eAuxWordEmpty: // O_o why is it empty? must be some weird concurrency stuff going // on. Ok, well, start over then. goto step1; case eAuxWordLock: if(orig.f.aux_word >> cAuxLockTIDShift == state->vm()->thread_id()) { // We're going to do this over and over until we get the new // header CASd into place. // Yep, we've already got this object locked, so increment the count. int count = orig.f.aux_word & cAuxLockRecCountMask; // We've recursively locked this object more than we can handle. // Inflate the lock then. if(++count > cAuxLockRecCountMax) { // If we can't inflate the lock, try the whole thing over again. if(!state->memory()->inflate_lock_count_overflow(state, self, count)) { goto step1; } // The header is now set to inflated, and the current thread // is holding the inflated lock. if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " inflated due to recursion overflow: " << count << " ]\n"; } } else { new_val = orig; new_val.f.aux_word = (state->vm()->thread_id() << cAuxLockTIDShift) | count; // Because we've got the object already locked to use, no other // thread is going to be trying to lock this thread, but another // thread might ask for an object_id and the header will // be inflated. So if we can't swap in the new header, we'll start // this step over. if(!self->header.atomic_set(orig, new_val)) goto step2; if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " recursively locked with CAS: " << count << " ]\n"; } // wonderful! Locked! weeeee! state->vm()->add_locked_object(self); } return eLocked; // Our thread id isn't in the field, so we need to inflated the lock // because another thread has it locked. } else { // We weren't able to contend for it, probably because the header changed. // Do it all over again. LockStatus ret = state->memory()->contend_for_lock(state, gct, self, us, interrupt); if(ret == eLockError) goto step1; return ret; } // The header is being used for something other than locking, so we need to // inflate it. case eAuxWordObjID: case eAuxWordHandle: // If we couldn't inflate the lock, that means the header was in some // weird state that we didn't detect and handle properly. So redo // the whole locking procedure again. if(!state->memory()->inflate_and_lock(state, self)) goto step1; return eLocked; }
LockStatus ObjectMemory::contend_for_lock(STATE, GCToken gct, CallFrame* call_frame, ObjectHeader* obj, size_t us, bool interrupt) { bool timed = false; bool timeout = false; struct timespec ts = {0,0}; OnStack<1> os(state, obj); { GCLockGuard lg(state, gct, call_frame, contention_lock_); // We want to lock obj, but someone else has it locked. // // If the lock is already inflated, no problem, just lock it! // Be sure obj is updated by the GC while we're waiting for it step1: // Only contend if the header is thin locked. // Ok, the header is not inflated, but we can't inflate it and take // the lock because the locking thread needs to do that, so indicate // that the object is being contended for and then wait on the // contention condvar until the object is unlocked. HeaderWord orig = obj->header; HeaderWord new_val = orig; orig.f.meaning = eAuxWordLock; new_val.f.LockContended = 1; if(!obj->header.atomic_set(orig, new_val)) { if(obj->inflated_header_p()) { if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " contend_for_lock error: object has been inflated.]" << std::endl; } return eLockError; } if(new_val.f.meaning != eAuxWordLock) { if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " contend_for_lock error: not thin locked.]" << std::endl; } return eLockError; } // Something changed since we started to down this path, // start over. goto step1; } // Ok, we've registered the lock contention, now spin and wait // for the us to be told to retry. if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " waiting on contention]" << std::endl; } if(us > 0) { timed = true; struct timeval tv; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + (us / 1000000); ts.tv_nsec = (us % 1000000) * 1000; } while(!obj->inflated_header_p()) { GCIndependent gc_guard(state, call_frame); state->vm()->set_sleeping(); if(timed) { timeout = (contention_var_.wait_until(contention_lock_, &ts) == utilities::thread::cTimedOut); if(timeout) break; } else { contention_var_.wait(contention_lock_); } if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " notified of contention breakage]" << std::endl; } // Someone is interrupting us trying to lock. if(interrupt && state->check_local_interrupts()) { state->vm()->clear_check_local_interrupts(); if(!state->vm()->interrupted_exception()->nil_p()) { if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " detected interrupt]" << std::endl; } state->vm()->clear_sleeping(); return eLockInterrupted; } } } state->vm()->clear_sleeping(); if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " contention broken]" << std::endl; } if(timeout) { if(cDebugThreading) { std::cerr << "[LOCK " << state->vm()->thread_id() << " contention timed out]" << std::endl; } return eLockTimeout; } } // contention_lock_ guard // We lock the InflatedHeader here rather than returning // and letting ObjectHeader::lock because the GC might have run // and we've used OnStack<> specificly to deal with that. // // ObjectHeader::lock doesn't use OnStack<>, it just is sure to // not access this if there is chance that a call blocked and GC'd // (which is true in the case of this function). InflatedHeader* ih = obj->inflated_header(state); if(timed) { return ih->lock_mutex_timed(state, gct, call_frame, obj, &ts, interrupt); } else { return ih->lock_mutex(state, gct, call_frame, obj, 0, interrupt); } }