void ThreadList::updateThreads(const std::list<ThreadInfo>& threads) { // reset flag in all items for (QTreeWidgetItemIterator i(this); *i; ++i) static_cast<ThreadEntry*>(*i)->m_delete = true; for (std::list<ThreadInfo>::const_iterator i = threads.begin(); i != threads.end(); ++i) { // look up this thread by id ThreadEntry* te = threadById(i->id); if (te == 0) { te = new ThreadEntry(this, *i); } else { te->m_delete = false; te->setFunction(i->function); } // set focus icon te->hasFocus = i->hasFocus; te->setIcon(0, i->hasFocus ? QIcon(m_focusIcon) : QIcon(m_noFocusIcon)); } // delete all entries that have not been seen for (QTreeWidgetItemIterator i(this); *i;) { ThreadEntry* te = static_cast<ThreadEntry*>(*i); ++i; // step ahead before deleting it ;-) if (te->m_delete) { delete te; } } }
void ThreadList::updateThreads(QList<ThreadInfo>& threads) { // reset flag in all items for (QListViewItem* e = firstChild(); e != 0; e = e->nextSibling()) { static_cast<ThreadEntry*>(e)->m_delete = true; } for (ThreadInfo* i = threads.first(); i != 0; i = threads.next()) { // look up this thread by id ThreadEntry* te = threadById(i->id); if (te == 0) { te = new ThreadEntry(this, i); } else { te->m_delete = false; te->setFunction(i->function); } // set focus icon te->hasFocus = i->hasFocus; te->setPixmap(0, i->hasFocus ? m_focusIcon : m_noFocusIcon); } // delete all entries that have not been seen for (QListViewItem* e = firstChild(); e != 0;) { ThreadEntry* te = static_cast<ThreadEntry*>(e); e = e->nextSibling(); /* step ahead before deleting it ;-) */ if (te->m_delete) { delete te; } } }
void StaticMetaBase::reserve(EntryID* id) { auto& meta = *this; ThreadEntry* threadEntry = (*threadEntry_)(); size_t prevCapacity = threadEntry->getElementsCapacity(); uint32_t idval = id->getOrAllocate(meta); if (prevCapacity > idval) { return; } size_t newCapacity; ElementWrapper* reallocated = reallocate(threadEntry, idval, newCapacity); // Success, update the entry { std::lock_guard<std::mutex> g(meta.lock_); if (prevCapacity == 0) { meta.push_back(threadEntry); } if (reallocated) { /* * Note: we need to hold the meta lock when copying data out of * the old vector, because some other thread might be * destructing a ThreadLocal and writing to the elements vector * of this thread. */ if (prevCapacity != 0) { memcpy( reallocated, threadEntry->elements, sizeof(*reallocated) * prevCapacity); } std::swap(reallocated, threadEntry->elements); } for (size_t i = prevCapacity; i < newCapacity; i++) { threadEntry->elements[i].node.initZero(threadEntry, i); } threadEntry->setElementsCapacity(newCapacity); } free(reallocated); }
static void SignalHandler(int, siginfo_t*, void* sigcontext) { ThreadEntry* entry = ThreadEntry::Get(getpid(), gettid(), false); if (!entry) { BACK_LOGW("Unable to find pid %d tid %d information", getpid(), gettid()); return; } entry->CopyUcontext(reinterpret_cast<ucontext_t*>(sigcontext)); // Indicate the ucontext is now valid. entry->Wake(); // Pause the thread until the unwind is complete. This avoids having // the thread run ahead causing problems. entry->Wait(1); ThreadEntry::Remove(entry); }
ThreadEntry* ThreadEntry::Get(pid_t pid, pid_t tid, bool create) { pthread_mutex_lock(&ThreadEntry::list_mutex_); ThreadEntry* entry = list_; while (entry != NULL) { if (entry->Match(pid, tid)) { break; } entry = entry->next_; } if (!entry) { if (create) { entry = new ThreadEntry(pid, tid); } } else { entry->ref_count_++; } pthread_mutex_unlock(&ThreadEntry::list_mutex_); return entry; }
bool BacktraceThread::Unwind(size_t num_ignore_frames, ucontext_t* ucontext) { if (ucontext) { // Unwind using an already existing ucontext. return impl_->Unwind(num_ignore_frames, ucontext); } // Prevent multiple threads trying to set the trigger action on different // threads at the same time. if (pthread_mutex_lock(&g_sigaction_mutex) < 0) { BACK_LOGW("sigaction failed: %s", strerror(errno)); return false; } ThreadEntry* entry = ThreadEntry::Get(Pid(), Tid()); entry->Lock(); struct sigaction act, oldact; memset(&act, 0, sizeof(act)); act.sa_sigaction = SignalHandler; act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; sigemptyset(&act.sa_mask); if (sigaction(THREAD_SIGNAL, &act, &oldact) != 0) { BACK_LOGW("sigaction failed %s", strerror(errno)); entry->Unlock(); ThreadEntry::Remove(entry); pthread_mutex_unlock(&g_sigaction_mutex); return false; } if (tgkill(Pid(), Tid(), THREAD_SIGNAL) != 0) { BACK_LOGW("tgkill %d failed: %s", Tid(), strerror(errno)); sigaction(THREAD_SIGNAL, &oldact, NULL); entry->Unlock(); ThreadEntry::Remove(entry); pthread_mutex_unlock(&g_sigaction_mutex); return false; } // Wait for the thread to get the ucontext. entry->Wait(0); // After the thread has received the signal, allow other unwinders to // continue. sigaction(THREAD_SIGNAL, &oldact, NULL); pthread_mutex_unlock(&g_sigaction_mutex); bool unwind_done = impl_->Unwind(num_ignore_frames, entry->GetUcontext()); // Tell the signal handler to exit and release the entry. entry->Wake(); return unwind_done; }
void StaticMetaBase::destroy(EntryID* ent) { try { auto& meta = *this; // Elements in other threads that use this id. std::vector<ElementWrapper> elements; { SharedMutex::WriteHolder wlock(nullptr); if (meta.strict_) { /* * In strict mode, the logic guarantees per-thread instances are * destroyed by the moment ThreadLocal<> dtor returns. * In order to achieve that, we should wait until concurrent * onThreadExit() calls (that might acquire ownership over per-thread * instances in order to destroy them) are finished. */ wlock = SharedMutex::WriteHolder(meta.accessAllThreadsLock_); } { std::lock_guard<std::mutex> g(meta.lock_); uint32_t id = ent->value.exchange(kEntryIDInvalid); if (id == kEntryIDInvalid) { return; } auto& node = meta.head_.elements[id].node; while (!node.empty()) { auto* next = node.getNext(); next->eraseZero(); ThreadEntry* e = next->parent; auto elementsCapacity = e->getElementsCapacity(); if (id < elementsCapacity && e->elements[id].ptr) { elements.push_back(e->elements[id]); /* * Writing another thread's ThreadEntry from here is fine; * the only other potential reader is the owning thread -- * from onThreadExit (which grabs the lock, so is properly * synchronized with us) or from get(), which also grabs * the lock if it needs to resize the elements vector. * * We can't conflict with reads for a get(id), because * it's illegal to call get on a thread local that's * destructing. */ e->elements[id].ptr = nullptr; e->elements[id].deleter1 = nullptr; e->elements[id].ownsDeleter = false; } } meta.freeIds_.push_back(id); } } // Delete elements outside the locks. for (ElementWrapper& elem : elements) { if (elem.dispose(TLPDestructionMode::ALL_THREADS)) { elem.cleanup(); } } } catch (...) { // Just in case we get a lock error or something anyway... LOG(WARNING) << "Destructor discarding an exception that was thrown."; } }
uint32_t StaticMetaBase::elementsCapacity() const { ThreadEntry* threadEntry = (*threadEntry_)(); return FOLLY_LIKELY(!!threadEntry) ? threadEntry->getElementsCapacity() : 0; }