// this takes ownership of a module after code emission is complete // and will add it to the execution engine when required (by jl_finalize_function) void jl_finalize_module(Module *m, bool shadow) { #if !defined(USE_ORCJIT) jl_globalPM->run(*m); #endif // record the function names that are part of this Module // so it can be added to the JIT when needed for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { Function *F = &*I; if (!F->isDeclaration()) { bool known = incomplete_fname.erase(F->getName()); (void)known; // TODO: assert(known); // llvmcall gets this wrong module_for_fname[F->getName()] = m; } } #if defined(USE_ORCJIT) || defined(USE_MCJIT) // in the newer JITs, the shadow module is separate from the execution module if (shadow) jl_add_to_shadow(m); #else bool changes = jl_try_finalize(m); while (changes) { // this definitely isn't the most efficient, but it's only for the old LLVM 3.3 JIT changes = false; for (StringMap<Module*>::iterator MI = module_for_fname.begin(), ME = module_for_fname.end(); MI != ME; ++MI) { changes |= jl_try_finalize(MI->second); } } #endif }
void Put(const Slice& key, const Slice& value) { std::string str(key.data(), key.size()); std::string v(value.data(), value.size()); dels.erase(str); inserts[str] = v; }
// Note: static method bool LDModelParser::unsetToken(StringSet &tokens, const char *token) { StringSet::iterator it = tokens.find(token); if (it != tokens.end()) { tokens.erase(it); return true; } return false; }
// this takes ownership of a module after code emission is complete // and will add it to the execution engine when required (by jl_finalize_function) void jl_finalize_module(Module *m, bool shadow) { // record the function names that are part of this Module // so it can be added to the JIT when needed for (Module::iterator I = m->begin(), E = m->end(); I != E; ++I) { Function *F = &*I; if (!F->isDeclaration()) { bool known = incomplete_fname.erase(F->getName()); (void)known; // TODO: assert(known); // llvmcall gets this wrong module_for_fname[F->getName()] = m; } } // in the newer JITs, the shadow module is separate from the execution module if (shadow) jl_add_to_shadow(m); }
int MMKVImpl::SPop(DBID db, const Data& key, const StringArrayResult& members, int count) { if (m_readonly) { return ERR_PERMISSION_DENIED; } if (count < 0) { return ERR_OFFSET_OUTRANGE; } int err = 0; RWLockGuard<MemorySegmentManager, WRITE_LOCK> keylock_guard(m_segment); EnsureWritableValueSpace(); StringSet* set = GetObject<StringSet>(db, key, V_TYPE_SET, false, err)(); if (IS_NOT_EXISTS(err)) { return 0; } if (0 != err) { return err; } if (set->empty()) { return 0; } for (int i = 0; i < count && !set->empty(); i++) { StringSet::iterator it = set->begin(); it->ToString(members.Get()); Object cc = *it; set->erase(it); DestroyObjectContent(cc); } if (set->empty()) { GenericDel(GetMMKVTable(db, false), db, Object(key, false)); } return 0; }
int MMKVImpl::SRem(DBID db, const Data& key, const DataArray& members) { if (m_readonly) { return ERR_PERMISSION_DENIED; } int err = 0; RWLockGuard<MemorySegmentManager, WRITE_LOCK> keylock_guard(m_segment); EnsureWritableValueSpace(); StringSet* set = GetObject<StringSet>(db, key, V_TYPE_SET, false, err)(); if (IS_NOT_EXISTS(err)) { return 0; } if (NULL == set || 0 != err) { return err; } int removed = 0; for (size_t i = 0; i < members.size(); i++) { StringSet::iterator found = set->find(Object(members[i], true)); if (found != set->end()) { Object cc = *found; set->erase(found); DestroyObjectContent(cc); removed++; } } if (set->empty()) { GenericDel(GetMMKVTable(db, false),db, Object(key, false)); } return removed; }
static std::unique_ptr<Module> getModuleForFile(LLVMContext &Context, claimed_file &F, ld_plugin_input_file &Info, raw_fd_ostream *ApiFile, StringSet<> &Internalize, StringSet<> &Maybe) { if (get_symbols(F.handle, F.syms.size(), F.syms.data()) != LDPS_OK) message(LDPL_FATAL, "Failed to get symbol information"); const void *View; if (get_view(F.handle, &View) != LDPS_OK) message(LDPL_FATAL, "Failed to get a view of file"); MemoryBufferRef BufferRef(StringRef((const char *)View, Info.filesize), Info.name); ErrorOr<std::unique_ptr<object::IRObjectFile>> ObjOrErr = object::IRObjectFile::create(BufferRef, Context); if (std::error_code EC = ObjOrErr.getError()) message(LDPL_FATAL, "Could not read bitcode from file : %s", EC.message().c_str()); object::IRObjectFile &Obj = **ObjOrErr; Module &M = Obj.getModule(); M.materializeMetadata(); UpgradeDebugInfo(M); SmallPtrSet<GlobalValue *, 8> Used; collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false); DenseSet<GlobalValue *> Drop; std::vector<GlobalAlias *> KeptAliases; unsigned SymNum = 0; for (auto &ObjSym : Obj.symbols()) { if (shouldSkip(ObjSym.getFlags())) continue; ld_plugin_symbol &Sym = F.syms[SymNum]; ++SymNum; ld_plugin_symbol_resolution Resolution = (ld_plugin_symbol_resolution)Sym.resolution; if (options::generate_api_file) *ApiFile << Sym.name << ' ' << getResolutionName(Resolution) << '\n'; GlobalValue *GV = Obj.getSymbolGV(ObjSym.getRawDataRefImpl()); if (!GV) { freeSymName(Sym); continue; // Asm symbol. } if (Resolution != LDPR_PREVAILING_DEF_IRONLY && GV->hasCommonLinkage()) { // Common linkage is special. There is no single symbol that wins the // resolution. Instead we have to collect the maximum alignment and size. // The IR linker does that for us if we just pass it every common GV. // We still have to keep track of LDPR_PREVAILING_DEF_IRONLY so we // internalize once the IR linker has done its job. freeSymName(Sym); continue; } switch (Resolution) { case LDPR_UNKNOWN: llvm_unreachable("Unexpected resolution"); case LDPR_RESOLVED_IR: case LDPR_RESOLVED_EXEC: case LDPR_RESOLVED_DYN: assert(GV->isDeclarationForLinker()); break; case LDPR_UNDEF: if (!GV->isDeclarationForLinker()) { assert(GV->hasComdat()); Drop.insert(GV); } break; case LDPR_PREVAILING_DEF_IRONLY: { keepGlobalValue(*GV, KeptAliases); if (!Used.count(GV)) { // Since we use the regular lib/Linker, we cannot just internalize GV // now or it will not be copied to the merged module. Instead we force // it to be copied and then internalize it. Internalize.insert(GV->getName()); } break; } case LDPR_PREVAILING_DEF: keepGlobalValue(*GV, KeptAliases); break; case LDPR_PREEMPTED_IR: // Gold might have selected a linkonce_odr and preempted a weak_odr. // In that case we have to make sure we don't end up internalizing it. if (!GV->isDiscardableIfUnused()) Maybe.erase(GV->getName()); // fall-through case LDPR_PREEMPTED_REG: Drop.insert(GV); break; case LDPR_PREVAILING_DEF_IRONLY_EXP: { // We can only check for address uses after we merge the modules. The // reason is that this GV might have a copy in another module // and in that module the address might be significant, but that // copy will be LDPR_PREEMPTED_IR. if (GV->hasLinkOnceODRLinkage()) Maybe.insert(GV->getName()); keepGlobalValue(*GV, KeptAliases); break; } } freeSymName(Sym); } ValueToValueMapTy VM; LocalValueMaterializer Materializer(Drop); for (GlobalAlias *GA : KeptAliases) { // Gold told us to keep GA. It is possible that a GV usied in the aliasee // expression is being dropped. If that is the case, that GV must be copied. Constant *Aliasee = GA->getAliasee(); Constant *Replacement = mapConstantToLocalCopy(Aliasee, VM, &Materializer); GA->setAliasee(Replacement); } for (auto *GV : Drop) drop(*GV); return Obj.takeModule(); }
int MMKVImpl::GenericSInterDiffUnion(DBID db, int op, const DataArray& keys, const Data* dest, const StringArrayResult* res) { StdObjectSet results[2]; int result_index = 0; StringSetArray sets; sets.resize(keys.size()); int err = 0; size_t start_index = 0; StringSet* destset = NULL; StdObjectSet* result = NULL; StdObjectSet* cmp = NULL; int current_result_index = 0; ObjectAllocator allocator = m_segment.MSpaceAllocator<Object>(); StringSet empty_set(std::less<Object>(), allocator); for (size_t i = 0; i < keys.size(); i++) { StringSet* set = GetObject<StringSet>(db, keys[i], V_TYPE_SET, false, err)(); if (IS_NOT_EXISTS(err)) { sets[i] = &empty_set; continue; } if (0 != err) { return err; } sets[i] = set; } if (NULL != dest) { ObjectAllocator allocator = m_segment.MSpaceAllocator<Object>(); destset = GetObject<StringSet>(db, *dest, V_TYPE_SET, true, err)(std::less<Object>(), allocator); if (0 != err) { return err; } } if (NULL == sets[0]) { if (op == OP_DIFF || op == OP_INTER) { result_index = 0; goto _end; } } for (size_t i = 0; i < keys.size(); i++) { if (sets[i] != NULL) { start_index = i; break; } } for (size_t i = start_index + 1; i < keys.size(); i++) { result = results + current_result_index; if (sets[i]->empty()) { if (op == OP_INTER) { results->clear(); result_index = 0; goto _end; } } result->clear(); switch (op) { case OP_DIFF: { if (cmp == NULL) { std::set_difference(sets[start_index]->begin(), sets[start_index]->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } else { std::set_difference(cmp->begin(), cmp->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } if (result->empty()) { result_index = current_result_index; goto _end; } break; } case OP_INTER: { if (cmp == NULL) { std::set_intersection(sets[start_index]->begin(), sets[start_index]->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } else { std::set_intersection(cmp->begin(), cmp->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } if (result->empty()) { result_index = current_result_index; goto _end; } break; } case OP_UNION: { if (cmp == NULL) { std::set_union(sets[start_index]->begin(), sets[start_index]->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } else { std::set_union(cmp->begin(), cmp->end(), sets[i]->begin(), sets[i]->end(), std::inserter(*result, result->end()), std::less<Object>()); } break; } } current_result_index = 1 - current_result_index; cmp = result; } result_index = result == results ? 0 : 1; _end: if (NULL != res) { StdObjectSet::iterator it = results[result_index].begin(); while (it != results[result_index].end()) { it->ToString(res->Get()); it++; } } if (NULL != destset) { //remove elements not in dest set StringSet::iterator it = destset->begin(); while (it != destset->end()) { Object element = *it; StdObjectSet::iterator cit = results[result_index].find(element); if (cit != results[result_index].end()) //remove elements from results which already in dest set { results[result_index].erase(cit); it++; } else { it = destset->erase(it); DestroyObjectContent(element); } } //insert rest elements StdObjectSet::iterator cit = results[result_index].begin(); while (cit != results[result_index].end()) { Object clone = CloneStrObject(*cit); destset->insert(clone); cit++; } return destset->size(); } return 0; }
/** * Scan a file for includes, defines and the lot. * @param filename the name of the file to scan. * @param ext the extension of the filename. * @param header whether the file is a header or not. * @param verbose whether to give verbose debugging information. */ void ScanFile(const char *filename, const char *ext, bool header, bool verbose) { static StringSet defines; static std::stack<Ignore> ignore; /* Copy in the default defines (parameters of depend) */ if (!header) { for (StringSet::iterator it = _defines.begin(); it != _defines.end(); it++) { defines.insert(strdup(*it)); } } File file(filename); Lexer lexer(&file); /* Start the lexing! */ lexer.Lex(); while (lexer.GetToken() != TOKEN_END) { switch (lexer.GetToken()) { /* We reached the end of the file... yay, we're done! */ case TOKEN_END: break; /* The line started with a # (minus whitespace) */ case TOKEN_SHARP: lexer.Lex(); switch (lexer.GetToken()) { case TOKEN_INCLUDE: if (verbose) fprintf(stderr, "%s #include ", filename); lexer.Lex(); switch (lexer.GetToken()) { case TOKEN_LOCAL: case TOKEN_GLOBAL: { if (verbose) fprintf(stderr, "%s", lexer.GetString()); if (!ignore.empty() && ignore.top() != NOT_IGNORE) { if (verbose) fprintf(stderr, " (ignored)"); break; } const char *h = GeneratePath(file.GetDirname(), lexer.GetString(), lexer.GetToken() == TOKEN_LOCAL); if (h != NULL) { StringMap::iterator it = _headers.find(h); if (it == _headers.end()) { it = (_headers.insert(StringMapItem(strdup(h), new StringSet()))).first; if (verbose) fprintf(stderr, "\n"); ScanFile(h, ext, true, verbose); } StringMap::iterator curfile; if (header) { curfile = _headers.find(filename); } else { /* Replace the extension with the provided extension of '.o'. */ char path[PATH_MAX]; strcpy(path, filename); *(strrchr(path, '.')) = '\0'; strcat(path, ext != NULL ? ext : ".o"); curfile = _files.find(path); if (curfile == _files.end()) { curfile = (_files.insert(StringMapItem(strdup(path), new StringSet()))).first; } } if (it != _headers.end()) { for (StringSet::iterator header = it->second->begin(); header != it->second->end(); header++) { if (curfile->second->find(*header) == curfile->second->end()) curfile->second->insert(strdup(*header)); } } if (curfile->second->find(h) == curfile->second->end()) curfile->second->insert(strdup(h)); free(h); } } /* FALL THROUGH */ default: break; } break; case TOKEN_DEFINE: if (verbose) fprintf(stderr, "%s #define ", filename); lexer.Lex(); if (lexer.GetToken() == TOKEN_IDENTIFIER) { if (verbose) fprintf(stderr, "%s", lexer.GetString()); if (!ignore.empty() && ignore.top() != NOT_IGNORE) { if (verbose) fprintf(stderr, " (ignored)"); break; } if (defines.find(lexer.GetString()) == defines.end()) defines.insert(strdup(lexer.GetString())); lexer.Lex(); } break; case TOKEN_UNDEF: if (verbose) fprintf(stderr, "%s #undef ", filename); lexer.Lex(); if (lexer.GetToken() == TOKEN_IDENTIFIER) { if (verbose) fprintf(stderr, "%s", lexer.GetString()); if (!ignore.empty() && ignore.top() != NOT_IGNORE) { if (verbose) fprintf(stderr, " (ignored)"); break; } StringSet::iterator it = defines.find(lexer.GetString()); if (it != defines.end()) { free(*it); defines.erase(it); } lexer.Lex(); } break; case TOKEN_ENDIF: if (verbose) fprintf(stderr, "%s #endif", filename); lexer.Lex(); if (!ignore.empty()) ignore.pop(); if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; case TOKEN_ELSE: { if (verbose) fprintf(stderr, "%s #else", filename); lexer.Lex(); Ignore last = ignore.empty() ? NOT_IGNORE : ignore.top(); if (!ignore.empty()) ignore.pop(); if (ignore.empty() || ignore.top() == NOT_IGNORE) { ignore.push(last == IGNORE_UNTIL_ELSE ? NOT_IGNORE : IGNORE_UNTIL_ENDIF); } else { ignore.push(IGNORE_UNTIL_ENDIF); } if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; } case TOKEN_ELIF: { if (verbose) fprintf(stderr, "%s #elif ", filename); lexer.Lex(); Ignore last = ignore.empty() ? NOT_IGNORE : ignore.top(); if (!ignore.empty()) ignore.pop(); if (ignore.empty() || ignore.top() == NOT_IGNORE) { bool value = ExpressionOr(&lexer, &defines, verbose); ignore.push(last == IGNORE_UNTIL_ELSE ? (value ? NOT_IGNORE : IGNORE_UNTIL_ELSE) : IGNORE_UNTIL_ENDIF); } else { ignore.push(IGNORE_UNTIL_ENDIF); } if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; } case TOKEN_IF: { if (verbose) fprintf(stderr, "%s #if ", filename); lexer.Lex(); if (ignore.empty() || ignore.top() == NOT_IGNORE) { bool value = ExpressionOr(&lexer, &defines, verbose); ignore.push(value ? NOT_IGNORE : IGNORE_UNTIL_ELSE); } else { ignore.push(IGNORE_UNTIL_ENDIF); } if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; } case TOKEN_IFDEF: if (verbose) fprintf(stderr, "%s #ifdef ", filename); lexer.Lex(); if (lexer.GetToken() == TOKEN_IDENTIFIER) { bool value = defines.find(lexer.GetString()) != defines.end(); if (verbose) fprintf(stderr, "%s[%d]", lexer.GetString(), value); if (ignore.empty() || ignore.top() == NOT_IGNORE) { ignore.push(value ? NOT_IGNORE : IGNORE_UNTIL_ELSE); } else { ignore.push(IGNORE_UNTIL_ENDIF); } } if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; case TOKEN_IFNDEF: if (verbose) fprintf(stderr, "%s #ifndef ", filename); lexer.Lex(); if (lexer.GetToken() == TOKEN_IDENTIFIER) { bool value = defines.find(lexer.GetString()) != defines.end(); if (verbose) fprintf(stderr, "%s[%d]", lexer.GetString(), value); if (ignore.empty() || ignore.top() == NOT_IGNORE) { ignore.push(!value ? NOT_IGNORE : IGNORE_UNTIL_ELSE); } else { ignore.push(IGNORE_UNTIL_ENDIF); } } if (verbose) fprintf(stderr, " -> %signore", (!ignore.empty() && ignore.top() != NOT_IGNORE) ? "" : "not "); break; default: if (verbose) fprintf(stderr, "%s #<unknown>", filename); lexer.Lex(); break; } if (verbose) fprintf(stderr, "\n"); /* FALL THROUGH */ default: /* Ignore the rest of the garbage on this line */ while (lexer.GetToken() != TOKEN_EOL && lexer.GetToken() != TOKEN_END) lexer.Lex(); lexer.Lex(); break; } } if (!header) { for (StringSet::iterator it = defines.begin(); it != defines.end(); it++) { free(*it); } defines.clear(); while (!ignore.empty()) ignore.pop(); } }
Error DiffStyle::diffStringTable() { DiffPrinter D(2, "String Table", 30, 20, opts::diff::PrintResultColumn, opts::diff::PrintValueColumns, outs()); D.printExplicit("File", DiffResult::UNSPECIFIED, truncateStringFront(File1.getFilePath(), 18), truncateStringFront(File2.getFilePath(), 18)); auto ExpectedST1 = File1.getStringTable(); auto ExpectedST2 = File2.getStringTable(); bool Has1 = !!ExpectedST1; bool Has2 = !!ExpectedST2; std::string Count1 = Has1 ? llvm::utostr(ExpectedST1->getNameCount()) : "(string table not present)"; std::string Count2 = Has2 ? llvm::utostr(ExpectedST2->getNameCount()) : "(string table not present)"; D.print("Number of Strings", Count1, Count2); if (!Has1 || !Has2) { consumeError(ExpectedST1.takeError()); consumeError(ExpectedST2.takeError()); return Error::success(); } auto &ST1 = *ExpectedST1; auto &ST2 = *ExpectedST2; D.print("Hash Version", ST1.getHashVersion(), ST2.getHashVersion()); D.print("Byte Size", ST1.getByteSize(), ST2.getByteSize()); D.print("Signature", ST1.getSignature(), ST2.getSignature()); // Both have a valid string table, dive in and compare individual strings. auto IdList1 = ST1.name_ids(); auto IdList2 = ST2.name_ids(); StringSet<> LS; StringSet<> RS; uint32_t Empty1 = 0; uint32_t Empty2 = 0; for (auto ID : IdList1) { auto S = ST1.getStringForID(ID); if (!S) return S.takeError(); if (S->empty()) ++Empty1; else LS.insert(*S); } for (auto ID : IdList2) { auto S = ST2.getStringForID(ID); if (!S) return S.takeError(); if (S->empty()) ++Empty2; else RS.insert(*S); } D.print("Empty Strings", Empty1, Empty2); for (const auto &S : LS) { auto R = RS.find(S.getKey()); std::string Truncated = truncateStringMiddle(S.getKey(), 28); uint32_t I = cantFail(ST1.getIDForString(S.getKey())); if (R == RS.end()) { D.printExplicit(Truncated, DiffResult::DIFFERENT, I, "(not present)"); continue; } uint32_t J = cantFail(ST2.getIDForString(R->getKey())); D.print<EquivalentDiffProvider>(Truncated, I, J); RS.erase(R); } for (const auto &S : RS) { auto L = LS.find(S.getKey()); std::string Truncated = truncateStringMiddle(S.getKey(), 28); uint32_t J = cantFail(ST2.getIDForString(S.getKey())); if (L == LS.end()) { D.printExplicit(Truncated, DiffResult::DIFFERENT, "(not present)", J); continue; } uint32_t I = cantFail(ST1.getIDForString(L->getKey())); D.print<EquivalentDiffProvider>(Truncated, I, J); } return Error::success(); }