DwarfChunk* DwarfInfo::addTracelet(TCRange range, const char* name, const Unit *unit, const Opcode *instr, bool exit, bool inPrologue) { DwarfChunk* chunk = NULL; FunctionInfo* f = new FunctionInfo(range, exit); if (name) { f->name = std::string(name); } else { f->name = lookupFunction(unit, instr, exit, inPrologue, true); } f->file = lookupFile(unit); TCA start = range.begin(); const TCA end = range.end(); { Lock lock(s_lock); FuncDB::iterator it = m_functions.lower_bound(range.begin()); FunctionInfo* fi = it->second; if (it != m_functions.end() && fi->name == f->name && fi->file == f->file && start > fi->range.begin() && end > fi->range.end()) { // XXX: verify that overlapping address come from jmp fixups start = fi->range.end(); fi->range.truncate(end); m_functions[end] = fi; m_functions.erase(it); delete(f); f = m_functions[end]; ASSERT(f->m_chunk != NULL); f->m_chunk->clearSynced(); f->clearPerfSynced(); } else { m_functions[end] = f; } } addLineEntries(TCRange(start, end, range.isAstubs()), unit, instr, f); if (f->m_chunk == NULL) { Lock lock(s_lock); if (m_dwarfChunks.size() == 0 || m_dwarfChunks[0] == NULL) { // new chunk of base size chunk = new DwarfChunk(); m_dwarfChunks.push_back(chunk); } else if (m_dwarfChunks[0]->m_functions.size() < RuntimeOption::EvalGdbSyncChunks) { // reuse first chunk chunk = m_dwarfChunks[0]; chunk->clearSynced(); } else { // compact chunks compactChunks(); m_dwarfChunks[0] = chunk = new DwarfChunk(); } chunk->m_functions.push_back(f); f->m_chunk = chunk; } if (f->m_chunk->m_functions.size() >= RuntimeOption::EvalGdbSyncChunks) { Lock lock(s_lock); ElfWriter e = ElfWriter(f->m_chunk); } return f->m_chunk; }
DwarfChunk* DwarfInfo::addTracelet(TCRange range, folly::Optional<std::string> name, const Func *func, const Op* instr, bool exit, bool inPrologue) { DwarfChunk* chunk = nullptr; FunctionInfo* f = new FunctionInfo(range, exit); const Unit* unit = func ? func->unit(): nullptr; if (name) { f->name = *name; } else { assert(func != nullptr); f->name = lookupFunction(func, exit, inPrologue, true); auto names = func->localNames(); for (int i = 0; i < func->numNamedLocals(); i++) { f->m_namedLocals.push_back(names[i]->toCppString()); } } f->file = lookupFile(unit); TCA start = range.begin(); const TCA end = range.end(); Lock lock(s_lock); auto const it = m_functions.lower_bound(range.begin()); auto const fi = it->second; if (it != m_functions.end() && fi->name == f->name && fi->file == f->file && start > fi->range.begin() && end > fi->range.end()) { // XXX: verify that overlapping address come from jmp fixups start = fi->range.end(); fi->range.extend(end); m_functions[end] = fi; m_functions.erase(it); delete f; f = m_functions[end]; assert(f->m_chunk != nullptr); f->m_chunk->clearSynced(); f->clearPerfSynced(); } else { m_functions[end] = f; } addLineEntries(TCRange(start, end, range.isAcold()), unit, instr, f); if (f->m_chunk == nullptr) { if (m_dwarfChunks.size() == 0 || m_dwarfChunks[0] == nullptr) { // new chunk of base size chunk = new DwarfChunk(); m_dwarfChunks.push_back(chunk); } else if (m_dwarfChunks[0]->m_functions.size() < RuntimeOption::EvalGdbSyncChunks) { // reuse first chunk chunk = m_dwarfChunks[0]; chunk->clearSynced(); } else { // compact chunks compactChunks(); m_dwarfChunks[0] = chunk = new DwarfChunk(); } chunk->m_functions.push_back(f); f->m_chunk = chunk; } #if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__CYGWIN__) if (f->m_chunk->m_functions.size() >= RuntimeOption::EvalGdbSyncChunks) { ElfWriter e = ElfWriter(f->m_chunk); } #endif return f->m_chunk; }