Example #1
0
  VALUE NativeMethodFrame::get_handle(STATE, Object* obj) {
    InflatedHeader* ih = state->om->inflate_header(obj);

    capi::Handle* handle = ih->handle();

    if(handle) {
      // ref() ONLY if it's not already in there!
      // otherwise the refcount is wrong and we leak handles.
      capi::HandleSet::iterator pos = handles_.find(handle);
      if(pos == handles_.end()) {
        handle->ref();
        handles_.insert(handle);
      }
    } else {
      handle = new capi::Handle(state, obj);
      ih->set_handle(handle);

      state->shared.global_handles()->add(handle);

      handle->ref();
      handles_.insert(handle);
    }

    return handle->as_value();
  }
Example #2
0
  Data* Data::create(STATE, void* data_ptr, Data::MarkFunctor mark, Data::FreeFunctor free) {
    Data* data;

    data = state->new_object<Data>(G(data));

    // Data is just a heap alias for the handle, so go ahead and create
    // the handle and populate it as an RData now.
    InflatedHeader* ih = state->om->inflate_header(data);
    capi::Handle* handle = ih->handle();

    assert(!handle && "can't already have a handle, it's brand new!");

    handle = new capi::Handle(state, data);
    ih->set_handle(handle);

    // Don't call ->ref() on handle! We don't want the handle to keep the object
    // alive by default. The handle needs to have the lifetime of the object.

    state->shared.global_handles()->add(handle);

    RDataShadow* rdata = reinterpret_cast<RDataShadow*>(handle->as_rdata(0));

    rdata->data = data_ptr;
    rdata->dmark = mark;
    rdata->dfree = free;

    // If this Data requires a free function, register this object
    // as needing finalization.
    if(free) {
      state->om->needs_finalization(data, (FinalizerFunction)&Data::finalize);
    }

    return data;
  }
Example #3
0
  void ObjectMemory::inflate_for_handle(STATE, ObjectHeader* obj, capi::Handle* handle) {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    HeaderWord orig = obj->header;

    if(orig.f.inflated) {
      obj->inflated_header()->set_handle(state, handle);
      return;
    }

    InflatedHeader* header = inflated_headers_->allocate(obj);
    header->update(state, orig);
    header->set_handle(state, handle);

    while(!obj->set_inflated_header(state, header, orig)) {
      orig = obj->header;

      if(orig.f.inflated) {
        obj->inflated_header()->set_handle(state, handle);
        return;
      }
      header->update(state, orig);
      header->set_handle(state, handle);
    }

  }
Example #4
0
    InflatedHeader* deflate_header() {
      InflatedHeader* ih = inflated_header();
      header.f = ih->flags();
      header.f.inflated = 0;

      return ih;
    }
Example #5
0
  bool ObjectMemory::inflate_for_contention(STATE, ObjectHeader* obj) {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    for(;;) {
      HeaderWord orig = obj->header;

      InflatedHeader* ih = 0;
      uint32_t ih_header = 0;

      switch(orig.f.meaning) {
      case eAuxWordEmpty:
        ih = inflated_headers_->allocate(obj, &ih_header);
        break;
      case eAuxWordObjID:
        // We could be have made a header before trying again, so
        // keep using the original one.
        ih = inflated_headers_->allocate(obj, &ih_header);
        ih->set_object_id(orig.f.aux_word);
        break;
      case eAuxWordHandle:
        ih = inflated_headers_->allocate(obj, &ih_header);
        ih->set_handle(state, obj->handle(state));
        break;
      case eAuxWordLock:
        // We have to be locking the object to inflate it, thats the law.
        if(orig.f.aux_word >> cAuxLockTIDShift != state->vm()->thread_id()) {
          if(cDebugThreading) {
            std::cerr << "[LOCK " << state->vm()->thread_id() << " object locked by another thread while inflating for contention]" << std::endl;
          }
          return false;
        }
        if(cDebugThreading) {
          std::cerr << "[LOCK " << state->vm()->thread_id() << " being unlocked and inflated atomicly]" << std::endl;
        }

        ih = inflated_headers_->allocate(obj, &ih_header);
        break;
      case eAuxWordInflated:
        if(cDebugThreading) {
          std::cerr << "[LOCK " << state->vm()->thread_id() << " asked to inflated already inflated lock]" << std::endl;
        }
        return false;
      }

      // Try it all over again if it fails.
      if(!obj->set_inflated_header(state, ih_header, orig)) {
        ih->clear();
        continue;
      }

      obj->clear_lock_contended();

      if(cDebugThreading) {
        std::cerr << "[LOCK " << state->vm()->thread_id() << " inflated lock for contention.]" << std::endl;
      }

      // Now inflated but not locked, which is what we want.
      return true;
    }
  }
Example #6
0
  bool ObjectMemory::inflate_lock_count_overflow(STATE, ObjectHeader* obj,
                                                 int count)
  {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    HeaderWord orig = obj->header;

    if(orig.f.meaning == eAuxWordInflated) {
      return false;
    }

    uint32_t ih_header = 0;
    InflatedHeader* ih = inflated_headers_->allocate(obj, &ih_header);
    ih->update(state, orig);
    ih->initialize_mutex(state->vm()->thread_id(), count);

    while(!obj->set_inflated_header(state, ih_header, orig)) {
      orig = obj->header;

      if(orig.f.meaning == eAuxWordInflated) {
        return false;
      }
      ih->update(state, orig);
      ih->initialize_mutex(state->vm()->thread_id(), count);
    }
    return true;
  }
void InflatedHeaders::deallocate_headers(unsigned int mark) {
    std::vector<bool> chunk_marks(allocator_->chunks_.size(), false);

    diagnostics_.objects_ = 0;

    for(std::vector<int>::size_type i = 0; i < allocator_->chunks_.size(); ++i) {
        InflatedHeader* chunk = allocator_->chunks_[i];

        for(size_t j = 0; j < allocator_->cChunkSize; j++) {
            InflatedHeader* header = &chunk[j];

            if(header->marked_p(mark)) {
                chunk_marks[i] = true;
                diagnostics_.objects_++;
            } else {
                header->clear();
            }
        }
    }

    allocator_->rebuild_freelist(&chunk_marks);

    diagnostics_.bytes_ = allocator_->in_use_ * sizeof(InflatedHeader);
    diagnostics_.modify();
}
Example #8
0
  bool ObjectMemory::inflate_lock_count_overflow(STATE, ObjectHeader* obj,
                                                 int count)
  {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    HeaderWord orig = obj->header;

    if(orig.f.inflated) {
      return false;
    }

    InflatedHeader* header = inflated_headers_->allocate(obj);
    header->update(state, orig);
    header->initialize_mutex(state->vm()->thread_id(), count);

    while(!obj->set_inflated_header(state, header, orig)) {
      orig = obj->header;

      if(orig.f.inflated) {
        return false;
      }
      header->update(state, orig);
      header->initialize_mutex(state->vm()->thread_id(), count);
    }
    return true;
  }
Example #9
0
  VALUE NativeMethodFrame::get_handle(STATE, Object* obj) {
    InflatedHeader* ih = state->om->inflate_header(obj);

    capi::Handle* handle = ih->handle();

    if(handle) {
      // ref() ONLY if it's not already in there!
      // otherwise the refcount is wrong and we leak handles.
      capi::HandleSet::iterator pos = handles_.find(handle);
      if(pos == handles_.end()) {
        // We're seeing this object for the first time in this function.
        // Be sure that it's updated.
        handle->ref();
        handles_.insert(handle);
        handle->update(NativeMethodEnvironment::get());
      }
    } else {
      handle = new capi::Handle(state, obj);
      ih->set_handle(handle);

      state->shared.global_handles()->add(handle);

      handle->ref();
      handles_.insert(handle);
    }

    return handle->as_value();
  }
Example #10
0
  RDataShadow* Data::rdata(STATE) {
    InflatedHeader* ih = state->om->inflate_header(this);
    capi::Handle* handle = ih->handle();

    assert(handle && handle->is_rdata() && "invalid initialized Data object");

    return reinterpret_cast<RDataShadow*>(handle->as_rdata(0));
  }
Example #11
0
    Handle::~Handle() {
      InflatedHeader* ih = object_->inflated_header();
      assert(ih);
      ih->set_handle(0);

      free_data();
      invalidate();
    }
Example #12
0
 /**
  * Allocates a new InflatedHeader object for the specified obj ObjectHeader.
  *
  * /param obj The ObjectHeader that is to be inflated.
  * /returns the InflatedHeader representing the new inflated object header.
  */
 InflatedHeader* InflatedHeaders::allocate(ObjectHeader* obj) {
   bool needs_gc = false;
   InflatedHeader* header = allocator_->allocate(&needs_gc);
   header->set_object(obj);
   if(needs_gc) {
     state_->om->collect_mature_now = true;
   }
   return header;
 }
Example #13
0
  InflatedHeader* ObjectHeader::deflate_header() {
    // Probably needs to CAS, but this only used by immix and in a place
    // we don't hit currently, so don't worry about it for now.
    InflatedHeader* ih = inflated_header();
    header.f = ih->flags();
    header.f.meaning = eAuxWordEmpty;
    header.f.aux_word = 0;

    return ih;
  }
Example #14
0
  /**
   * Allocates a new InflatedHeader object for the specified obj ObjectHeader.
   *
   * /param obj The ObjectHeader that is to be inflated.
   * /returns the InflatedHeader representing the new inflated object header.
   */
  InflatedHeader* InflatedHeaders::allocate(ObjectHeader* obj) {
    if(!free_list_) allocate_chunk();
    InflatedHeader* header = free_list_;
    free_list_ = header->next();

    in_use_++;
    header->set_object(obj);

    return header;
  }
Example #15
0
  void ObjectMemory::run_finalizers(STATE, CallFrame* call_frame) {
    if(running_finalizers_) return;
    running_finalizers_ = true;

    for(std::list<FinalizeObject*>::iterator i = to_finalize_.begin();
        i != to_finalize_.end(); ) {
      FinalizeObject* fi = *i;

      if(fi->finalizer) {
        (*fi->finalizer)(state, fi->object);
        // Unhook any handle used by fi->object so that we don't accidentally
        // try and mark it later (after we've finalized it)
        if(fi->object->inflated_header_p()) {
          InflatedHeader* ih = fi->object->inflated_header();

          if(capi::Handle* handle = ih->handle()) {
            handle->forget_object();
            ih->set_handle(0);
          }
        }

        // If the object was remembered, unremember it.
        if(fi->object->remembered_p()) {
          unremember_object(fi->object);
        }
      } else if(fi->ruby_finalizer) {
        // Rubinius specific code. If the finalizer is Qtrue, then
        // send the object the finalize message
        if(fi->ruby_finalizer == Qtrue) {
          fi->object->send(state, call_frame, state->symbol("__finalize__"), true);
        } else {
          Array* ary = Array::create(state, 1);
          ary->set(state, 0, fi->object->id(state));

          OnStack<1> os(state, ary);

          fi->ruby_finalizer->send(state, call_frame, state->symbol("call"), ary, Qnil, true);
        }
      } else {
        std::cerr << "Unsupported object to be finalized: "
                  << fi->object->to_s(state)->c_str() << "\n";
      }

      fi->status = FinalizeObject::eFinalized;

      i = to_finalize_.erase(i);
    }

    running_finalizers_ = false;
  }
Example #16
0
    void set_forward(ObjectHeader* fwd) {
      // If the header is inflated, repoint it.
      if(inflated_header_p()) {
        InflatedHeader* ih = deflate_header();

        ih->set_object(fwd);
        fwd->set_inflated_header(ih);
      }

      flags().Forwarded = 1;

      // DO NOT USE klass() because we need to get around the
      // write barrier!
      ivars_ = reinterpret_cast<Object*>(fwd);
    }
Example #17
0
 InflatedHeader* InflatedHeaders::allocate(ObjectHeader* obj, uint32_t* index) {
   bool needs_gc = false;
   uintptr_t header_index = allocator_->allocate_index(&needs_gc);
   if(header_index > UINT32_MAX) {
     rubinius::bug("Rubinius can't handle more than 4G inflated headers active at the same time");
   }
   *index = (uint32_t)header_index;
   InflatedHeader* header = allocator_->from_index(header_index);
   header->clear_mark();
   if(needs_gc) {
     state_->om->collect_mature_now = true;
   }
   atomic::memory_barrier();
   return header;
 }
Example #18
0
  Object* ImmixGC::saw_object(Object* obj) {
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }

    immix::Address fwd = gc_.mark_address(immix::Address(obj), allocator_);
    Object* copy = fwd.as<Object>();

    // Check and update an inflated header
    if(copy && copy != obj && obj->inflated_header_p()) {
      InflatedHeader* ih = obj->deflate_header();
      ih->reset_object(copy);
      copy->set_inflated_header(ih);
    }

    return copy;
  }
Example #19
0
  void ObjectMemory::validate_handles(capi::Handles* handles) {
    capi::Handle* handle = handles->front();
    capi::Handle* current;

    while(handle) {
      current = handle;
      handle = static_cast<capi::Handle*>(handle->next());

      Object* obj = current->object();

      assert(obj->inflated_header_p());
      InflatedHeader* ih = obj->inflated_header();

      assert(ih->handle() == current);
      assert(ih->object() == obj);
    }
  }
Example #20
0
  void ObjectMemory::inflate_for_handle(STATE, ObjectHeader* obj, capi::Handle* handle) {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    HeaderWord orig = obj->header;

    if(orig.f.meaning == eAuxWordInflated) {
      obj->inflated_header(state)->set_handle(state, handle);
      return;
    }

    uint32_t ih_index = 0;
    InflatedHeader* ih = inflated_headers_->allocate(state, obj, &ih_index);
    ih->update(state, orig);
    ih->set_handle(state, handle);
    ih->mark(this, mark_);

    while(!obj->set_inflated_header(state, ih_index, orig)) {
      orig = obj->header;

      if(orig.f.meaning == eAuxWordInflated) {
        obj->inflated_header(state)->set_handle(state, handle);
        ih->clear();
        return;
      }
      ih->update(state, orig);
      ih->set_handle(state, handle);
    }

  }
Example #21
0
void InflatedHeaders::deallocate_headers(unsigned int mark) {
    std::vector<bool> chunk_marks(allocator_->chunks_.size(), false);
    for(std::vector<int>::size_type i = 0; i < allocator_->chunks_.size(); ++i) {
        InflatedHeader* chunk = allocator_->chunks_[i];

        for(size_t j = 0; j < allocator_->cChunkSize; j++) {
            InflatedHeader* header = &chunk[j];

            if(header->marked_p(mark)) {
                chunk_marks[i] = true;
            } else {
                header->clear();
            }
        }
    }

    allocator_->rebuild_freelist(&chunk_marks);
}
Example #22
0
  bool VM::wakeup(STATE) {
    utilities::thread::SpinLock::LockGuard guard(interrupt_lock_);

    set_check_local_interrupts();
    Object* wait = waiting_object_.get();

    if(park_->parked_p()) {
      park_->unpark();
      return true;
    } else if(interrupt_with_signal_) {
#ifdef RBX_WINDOWS
      // TODO: wake up the thread
#else
      pthread_kill(os_thread_, SIGVTALRM);
#endif
      interrupt_lock_.unlock();
      // Wakeup any locks hanging around with contention
      memory()->release_contention(state);
      return true;
    } else if(!wait->nil_p()) {
      // We shouldn't hold the VM lock and the IH lock at the same time,
      // other threads can grab them and deadlock.
      InflatedHeader* ih = wait->inflated_header(state);
      interrupt_lock_.unlock();
      ih->wakeup(state, wait);
      return true;
    } else {
      Channel* chan = waiting_channel_.get();

      if(!chan->nil_p()) {
        interrupt_lock_.unlock();
        memory()->release_contention(state);
        chan->send(state, cNil);
        return true;
      } else if(custom_wakeup_) {
        interrupt_lock_.unlock();
        memory()->release_contention(state);
        (*custom_wakeup_)(custom_wakeup_data_);
        return true;
      }

      return false;
    }
  }
Example #23
0
  bool VM::wakeup(STATE, GCToken gct, CallFrame* call_frame) {
    SYNC(state);

    set_check_local_interrupts();
    Object* wait = waiting_object_.get();

    if(park_->parked_p()) {
      park_->unpark();
      return true;
    } else if(vm_jit_.interrupt_with_signal_) {
#ifdef RBX_WINDOWS
      // TODO: wake up the thread
#else
      pthread_kill(os_thread_, SIGVTALRM);
#endif
      UNSYNC;
      // Wakeup any locks hanging around with contention
      om->release_contention(state, gct, call_frame);
      return true;
    } else if(!wait->nil_p()) {
      // We shouldn't hold the VM lock and the IH lock at the same time,
      // other threads can grab them and deadlock.
      InflatedHeader* ih = wait->inflated_header(state);
      UNSYNC;
      ih->wakeup(state, gct, call_frame, wait);
      return true;
    } else {
      Channel* chan = waiting_channel_.get();

      if(!chan->nil_p()) {
        UNSYNC;
        om->release_contention(state, gct, call_frame);
        chan->send(state, gct, cNil, call_frame);
        return true;
      } else if(custom_wakeup_) {
        UNSYNC;
        om->release_contention(state, gct, call_frame);
        (*custom_wakeup_)(custom_wakeup_data_);
        return true;
      }

      return false;
    }
  }
Example #24
0
  VALUE NativeMethodFrame::get_handle(STATE, Object* obj) {
    InflatedHeader* ih = state->memory()->inflate_header(state, obj);

    capi::Handle* handle = ih->handle();

    if(handle) {
      if(handles_.add_if_absent(handle)) {
        // We're seeing this object for the first time in this function.
        // Be sure that it's updated.
        handle->update(NativeMethodEnvironment::get());
      }
    } else {
      handle = new capi::Handle(state, obj);
      ih->set_handle(handle);

      state->shared().add_global_handle(state, handle);

      handles_.add_if_absent(handle);
    }

    return handle->as_value();
  }
Example #25
0
  Object* ImmixGC::saw_object(Object* obj) {
#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }
#endif

    if(!obj->reference_p()) return obj;

    memory::Address fwd = gc_.mark_address(memory::Address(obj), allocator_);
    Object* copy = fwd.as<Object>();

    // Check and update an inflated header
    if(copy && copy != obj && obj->inflated_header_p()) {
      InflatedHeader* ih = obj->deflate_header();
      ih->reset_object(copy);
      if(!copy->set_inflated_header(ih)) {
        rubinius::bug("Massive IMMIX inflated header screwup.");
      }
    }

    return copy;
  }
Example #26
0
  bool ObjectMemory::inflate_lock_count_overflow(STATE, ObjectHeader* obj,
                                                 int count)
  {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    // Inflation always happens with the ObjectMemory lock held, so we don't
    // need to worry about another thread concurrently inflating it.
    //
    // But we do need to check that it's not already inflated.
    if(obj->inflated_header_p()) return false;

    InflatedHeader* ih = inflated_headers_->allocate(obj);

    if(!obj->set_inflated_header(state, ih)) {
      if(obj->inflated_header_p()) return false;

      // Now things are really in a weird state, just abort.
      rubinius::bug("Massive header state confusion detected. Call a doctor.");
    }

    ih->initialize_mutex(state->vm()->thread_id(), count);
    return true;
  }
Example #27
0
  void ObjectMemory::inflate_for_handle(STATE, ObjectHeader* obj, capi::Handle* handle) {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    HeaderWord orig = obj->header;

    if(orig.f.inflated) {
      rubinius::bug("Massive header state confusion detected. Call a doctor.");
    }

    InflatedHeader* header = inflated_headers_->allocate(obj);
    header->set_handle(state, handle);
    header->set_object_id(obj->object_id());

    if(!obj->set_inflated_header(state, header)) {
      if(obj->inflated_header_p()) {
        obj->inflated_header()->set_handle(state, handle);
        return;
      }

      // Now things are really in a weird state, just abort.
      rubinius::bug("Massive header state confusion detected. Call a doctor.");
    }

  }
Example #28
0
  /**
   * Scans the list of InflatedHeader objects checking to see which are in use.
   * Those that do not have the appropriate mark value set are cleared and
   * added back to the free list. Chunks that are completely unused are removed
   * from the linked list.
   *
   * /param mark The current value of the mark; only InflatedHeaders that bear
   *             this mark will be retained.
   */
  void InflatedHeaders::deallocate_headers(int mark) {
    // Detect and free any full chunks first!
    for(Chunks::iterator i = chunks_.begin();
        i != chunks_.end();) {
      InflatedHeader* chunk = *i;

      bool used = false;

      for(size_t j = 0; j < cChunkSize; j++) {
        InflatedHeader* header = &chunk[j];

        if(header->marked_p(mark)) {
          used = true;
          break;
        }
      }

      // No header was marked, so it's completely empty. Free it.
      if(!used) {
        delete[] chunk;
        i = chunks_.erase(i);
      } else {
        ++i;
      }
    }

    // Ok, now, rebuild the free_list
    free_list_ = 0;
    in_use_ = 0;

    for(Chunks::iterator i = chunks_.begin();
        i != chunks_.end();
        ++i) {
      InflatedHeader* chunk = *i;

      for(size_t j = 0; j < cChunkSize; j++) {
        InflatedHeader* header = &chunk[j];

        if(!header->marked_p(mark)) {
          header->clear();
          header->set_next(free_list_);
          free_list_ = header;
        } else {
          in_use_++;
        }
      }
    }

  }
Example #29
0
  LockStatus ObjectHeader::lock(STATE, GCToken gct, size_t us, bool interrupt) {
    // #1 Attempt to lock an unlocked object using CAS.

    ObjectHeader* self = this;
    OnStack<1> os(state, self);

step1:
    // Construct 2 new headers: one is the version we hope that
    // is in use and the other is what we want it to be. The CAS
    // the new one into place.
    HeaderWord orig = self->header;

    orig.f.inflated = 0;
    orig.f.meaning  = eAuxWordEmpty;
    orig.f.aux_word = 0;

    HeaderWord new_val = orig;

    new_val.f.meaning  = eAuxWordLock;
    new_val.f.aux_word = state->vm()->thread_id() << cAuxLockTIDShift;

    if(self->header.atomic_set(orig, new_val)) {
      if(cDebugThreading) {
        std::cerr << "[LOCK " << state->vm()->thread_id() << " locked with CAS]\n";
      }

      // wonderful! Locked! weeeee!
      state->vm()->add_locked_object(self);
      return eLocked;
    }

    // Ok, something went wrong.
    //
    // #2 See if we're locking the object recursively.
step2:
    orig = self->header;

    // The header is inflated, use the full lock.
    if(orig.f.inflated) {
      InflatedHeader* ih = ObjectHeader::header_to_inflated_header(orig);
      return ih->lock_mutex(state, gct, us, interrupt);
    }

    switch(orig.f.meaning) {
    case eAuxWordEmpty:
      // O_o why is it empty? must be some weird concurrency stuff going
      // on. Ok, well, start over then.
      goto step1;

    case eAuxWordLock:
      if(orig.f.aux_word >> cAuxLockTIDShift == state->vm()->thread_id()) {
        // We're going to do this over and over until we get the new
        // header CASd into place.

        // Yep, we've already got this object locked, so increment the count.
        int count = orig.f.aux_word & cAuxLockRecCountMask;

        // We've recursively locked this object more than we can handle.
        // Inflate the lock then.
        if(++count > cAuxLockRecCountMax) {
          // If we can't inflate the lock, try the whole thing over again.
          if(!state->memory()->inflate_lock_count_overflow(state, self, count)) {
            goto step1;
          }
          // The header is now set to inflated, and the current thread
          // is holding the inflated lock.
          if(cDebugThreading) {
            std::cerr << "[LOCK " << state->vm()->thread_id() << " inflated due to recursion overflow: " << count << " ]\n";
          }
        } else {
          new_val = orig;
          new_val.f.aux_word = (state->vm()->thread_id() << cAuxLockTIDShift) | count;

          // Because we've got the object already locked to use, no other
          // thread is going to be trying to lock this thread, but another
          // thread might ask for an object_id and the header will
          // be inflated. So if we can't swap in the new header, we'll start
          // this step over.
          if(!self->header.atomic_set(orig, new_val)) goto step2;

          if(cDebugThreading) {
            std::cerr << "[LOCK " << state->vm()->thread_id() << " recursively locked with CAS: " << count << " ]\n";
          }

          // wonderful! Locked! weeeee!
          state->vm()->add_locked_object(self);
        }
        return eLocked;

      // Our thread id isn't in the field, so we need to inflated the lock
      // because another thread has it locked.
      } else {
        // We weren't able to contend for it, probably because the header changed.
        // Do it all over again.
        LockStatus ret = state->memory()->contend_for_lock(state, gct, self, us, interrupt);
        if(ret == eLockError) goto step1;
        return ret;
      }

    // The header is being used for something other than locking, so we need to
    // inflate it.
    case eAuxWordObjID:
    case eAuxWordHandle:
      // If we couldn't inflate the lock, that means the header was in some
      // weird state that we didn't detect and handle properly. So redo
      // the whole locking procedure again.
      if(!state->memory()->inflate_and_lock(state, self)) goto step1;
      return eLocked;
    }
Example #30
0
  bool ObjectMemory::inflate_and_lock(STATE, ObjectHeader* obj) {
    utilities::thread::SpinLock::LockGuard guard(inflation_lock_);

    InflatedHeader* ih = 0;
    uint32_t ih_index = 0;
    int initial_count = 0;

    HeaderWord orig = obj->header;

    switch(orig.f.meaning) {
    case eAuxWordEmpty:
      // ERROR, we can not be here because it's empty. This is only to
      // be called when the header is already in use.
      return false;
    case eAuxWordObjID:
      // We could be have made a header before trying again, so
      // keep using the original one.
      ih = inflated_headers_->allocate(state, obj, &ih_index);
      ih->set_object_id(orig.f.aux_word);
      break;
    case eAuxWordLock:
      // We have to locking the object to inflate it, thats the law.
      if(orig.f.aux_word >> cAuxLockTIDShift != state->vm()->thread_id()) {
        return false;
      }

      ih = inflated_headers_->allocate(state, obj, &ih_index);
      initial_count = orig.f.aux_word & cAuxLockRecCountMask;
      break;
    case eAuxWordHandle:
      // Handle in use so inflate and update handle
      ih = inflated_headers_->allocate(state, obj, &ih_index);
      ih->set_handle(state, obj->handle(state));
      break;
    case eAuxWordInflated:
      // Already inflated. ERROR, let the caller sort it out.
      if(cDebugThreading) {
        std::cerr << "[LOCK " << state->vm()->thread_id() << " asked to inflated already inflated lock]" << std::endl;
      }
      return false;
    }

    ih->initialize_mutex(state->vm()->thread_id(), initial_count);
    ih->mark(this, mark_);

    while(!obj->set_inflated_header(state, ih_index, orig)) {
      // The header can't have been inflated by another thread, the
      // inflation process holds the OM lock.
      //
      // So some other bits must have changed, so lets just spin and
      // keep trying to update it.

      // Sanity check that the meaning is still the same, if not, then
      // something is really wrong.
      if(orig.f.meaning != obj->header.f.meaning) {
        if(cDebugThreading) {
          std::cerr << "[LOCK object header consistence error detected.]" << std::endl;
        }
        return false;
      }
      orig = obj->header;
      if(orig.f.meaning == eAuxWordInflated) {
        return false;
      }
    }

    return true;
  }