QueryData genMemoryMap(QueryContext& context) { QueryData results; // Linux memory map is exposed in /sys. std::vector<std::string> regions; auto status = listDirectoriesInDirectory(kMemoryMapLocation, regions); if (!status.ok()) { return {}; } for (const auto& index : regions) { fs::path index_path(index); Row r; r["region"] = index_path.filename().string(); // The type is a textual description std::string content; readFile(index_path / "type", content); boost::trim(content); r["type"] = content; // Keep these in 0xFFFF (hex) form. readFile(index_path / "start", content); boost::trim(content); r["start"] = content; readFile(index_path / "end", content); boost::trim(content); r["end"] = content; results.push_back(r); } return results; }
static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st) { int option, size; struct cache_entry *ce; /* Was the old index entry already up-to-date? */ if (old && !ce_stage(old) && !ce_match_stat(old, st, 0)) return 0; size = cache_entry_size(len); ce = xcalloc(1, size); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = len; fill_stat_cache_info(ce, st); ce->ce_mode = ce_mode_from_stat(old, st->st_mode); if (index_path(ce->sha1, path, st, info_only ? 0 : HASH_WRITE_OBJECT)) { free(ce); return -1; } option = allow_add ? ADD_CACHE_OK_TO_ADD : 0; option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0; if (add_cache_entry(ce, option)) return error("%s: cannot add to the index - missing --add option?", path); return 0; }
index repository::cache::read_index(const entry &package) { try { return index(index_path(package)).absolute(package); } catch (std::exception &) { BOOST_THROW_EXCEPTION(cache_read_index_error() << cache_read_index_error::package(package) << enable_nested_current()); } }
int notes_merge_commit(struct notes_merge_options *o, struct notes_tree *partial_tree, struct commit *partial_commit, unsigned char *result_sha1) { /* * Iterate through files in .git/NOTES_MERGE_WORKTREE and add all * found notes to 'partial_tree'. Write the updates notes tree to * the DB, and commit the resulting tree object while reusing the * commit message and parents from 'partial_commit'. * Finally store the new commit object SHA1 into 'result_sha1'. */ struct dir_struct dir; char *path = xstrdup(git_path(NOTES_MERGE_WORKTREE "/")); int path_len = strlen(path), i; const char *msg = strstr(partial_commit->buffer, "\n\n"); OUTPUT(o, 3, "Committing notes in notes merge worktree at %.*s", path_len - 1, path); if (!msg || msg[2] == '\0') die("partial notes commit has empty message"); msg += 2; memset(&dir, 0, sizeof(dir)); read_directory(&dir, path, path_len, NULL); for (i = 0; i < dir.nr; i++) { struct dir_entry *ent = dir.entries[i]; struct stat st; const char *relpath = ent->name + path_len; unsigned char obj_sha1[20], blob_sha1[20]; if (ent->len - path_len != 40 || get_sha1_hex(relpath, obj_sha1)) { OUTPUT(o, 3, "Skipping non-SHA1 entry '%s'", ent->name); continue; } /* write file as blob, and add to partial_tree */ if (stat(ent->name, &st)) die_errno("Failed to stat '%s'", ent->name); if (index_path(blob_sha1, ent->name, &st, HASH_WRITE_OBJECT)) die("Failed to write blob object from '%s'", ent->name); if (add_note(partial_tree, obj_sha1, blob_sha1, NULL)) die("Failed to add resolved note '%s' to notes tree", ent->name); OUTPUT(o, 4, "Added resolved note for object %s: %s", sha1_to_hex(obj_sha1), sha1_to_hex(blob_sha1)); } create_notes_commit(partial_tree, partial_commit->parents, msg, result_sha1); OUTPUT(o, 4, "Finalized notes merge commit: %s", sha1_to_hex(result_sha1)); free(path); return 0; }
std::vector<soa_record_info> list_soa_file_tables(char const* filename) { std::vector<soa_record_info> v; fs::path index_path(filename); index_path = fs::change_extension(index_path, ".ndx"); fs::ifstream index_ifs(index_path, ios_in_binary()); if(!index_ifs) { fatal_error() << "File '" << index_path << "' is required but could not be found. Try reinstalling." << LMI_FLUSH ; } // Index records have fixed length: // 4-byte integer: table number // 50-byte char array: table name // 4-byte integer: byte offset into '.dat' file // Table numbers are not necessarily consecutive or sorted. // SOA !! Assert endianness too? SOA tables are not portable; // probably they can easily be read only on x86 hardware. BOOST_STATIC_ASSERT(8 == CHAR_BIT); BOOST_STATIC_ASSERT(4 == sizeof(int)); BOOST_STATIC_ASSERT(2 == sizeof(short int)); int const index_record_length(58); char index_record[index_record_length] = {0}; BOOST_STATIC_ASSERT(sizeof(boost::int32_t) <= sizeof(int)); while(index_ifs) { index_ifs.read(index_record, index_record_length); if(index_record_length != index_ifs.gcount()) { if(!index_ifs) { break; } fatal_error() << "Index file '" << index_path.string() << "': attempted to read " << index_record_length << " bytes, but got " << index_ifs.gcount() << " bytes instead." << LMI_FLUSH ; } soa_record_info rec; rec.index = deserialize_cast<boost::int32_t>(index_record); rec.name.assign(index_record + 4); v.push_back(rec); } return v; }
int notes_merge_commit(struct notes_merge_options *o, struct notes_tree *partial_tree, struct commit *partial_commit, struct object_id *result_oid) { /* * Iterate through files in .git/NOTES_MERGE_WORKTREE and add all * found notes to 'partial_tree'. Write the updated notes tree to * the DB, and commit the resulting tree object while reusing the * commit message and parents from 'partial_commit'. * Finally store the new commit object OID into 'result_oid'. */ DIR *dir; struct dirent *e; struct strbuf path = STRBUF_INIT; const char *buffer = get_commit_buffer(partial_commit, NULL); const char *msg = strstr(buffer, "\n\n"); int baselen; git_path_buf(&path, NOTES_MERGE_WORKTREE); if (o->verbosity >= 3) printf("Committing notes in notes merge worktree at %s\n", path.buf); if (!msg || msg[2] == '\0') die("partial notes commit has empty message"); msg += 2; dir = opendir(path.buf); if (!dir) die_errno("could not open %s", path.buf); strbuf_addch(&path, '/'); baselen = path.len; while ((e = readdir(dir)) != NULL) { struct stat st; struct object_id obj_oid, blob_oid; if (is_dot_or_dotdot(e->d_name)) continue; if (get_oid_hex(e->d_name, &obj_oid)) { if (o->verbosity >= 3) printf("Skipping non-SHA1 entry '%s%s'\n", path.buf, e->d_name); continue; } strbuf_addstr(&path, e->d_name); /* write file as blob, and add to partial_tree */ if (stat(path.buf, &st)) die_errno("Failed to stat '%s'", path.buf); if (index_path(&blob_oid, path.buf, &st, HASH_WRITE_OBJECT)) die("Failed to write blob object from '%s'", path.buf); if (add_note(partial_tree, &obj_oid, &blob_oid, NULL)) die("Failed to add resolved note '%s' to notes tree", path.buf); if (o->verbosity >= 4) printf("Added resolved note for object %s: %s\n", oid_to_hex(&obj_oid), oid_to_hex(&blob_oid)); strbuf_setlen(&path, baselen); } create_notes_commit(partial_tree, partial_commit->parents, msg, strlen(msg), result_oid->hash); unuse_commit_buffer(partial_commit, buffer); if (o->verbosity >= 4) printf("Finalized notes merge commit: %s\n", oid_to_hex(result_oid)); strbuf_release(&path); closedir(dir); return 0; }
void actuarial_table::find_table() { LMI_ASSERT(0 != table_number_); fs::path index_path(filename_); index_path = fs::change_extension(index_path, ".ndx"); fs::ifstream index_ifs(index_path, ios_in_binary()); if(!index_ifs) { fatal_error() << "File '" << index_path << "' is required but could not be found. Try reinstalling." << LMI_FLUSH ; } // TODO ?? Assert endianness too? SOA tables are not portable; // probably they can easily be read only on x86 hardware. BOOST_STATIC_ASSERT(8 == CHAR_BIT); BOOST_STATIC_ASSERT(4 == sizeof(int)); BOOST_STATIC_ASSERT(2 == sizeof(short int)); // 27.4.3.2/2 requires that this be interpreted as invalid. // Reinitialize it here for robustness, even though the ctor // already initializes it in the same way. table_offset_ = std::streampos(-1); int const index_record_length(58); char index_record[index_record_length] = {0}; BOOST_STATIC_ASSERT(sizeof(boost::int32_t) <= sizeof(int)); while(index_ifs) { int index_table_number = deserialize_cast<boost::int32_t>(index_record); if(table_number_ == index_table_number) { char* p = 54 + index_record; int z = deserialize_cast<boost::int32_t>(p); table_offset_ = std::streampos(z); break; } index_ifs.read(index_record, index_record_length); if(index_record_length != index_ifs.gcount()) { fatal_error() << "Table " << table_number_ << " in file '" << filename_ << "': attempted to read " << index_record_length << " bytes, but got " << index_ifs.gcount() << " bytes instead." << LMI_FLUSH ; } } if(std::streampos(-1) == table_offset_) { fatal_error() << "Table " << table_number_ << " in file '" << filename_ << "': offset " << table_offset_ << " is invalid." << LMI_FLUSH ; } }
int git_indexer_commit(git_indexer *idx, git_transfer_progress *stats) { git_mwindow *w = NULL; unsigned int i, long_offsets = 0, left; int error; struct git_pack_idx_header hdr; git_buf filename = GIT_BUF_INIT; struct entry *entry; git_oid trailer_hash, file_hash; git_hash_ctx ctx; git_filebuf index_file = {0}; void *packfile_trailer; if (git_hash_ctx_init(&ctx) < 0) return -1; /* Test for this before resolve_deltas(), as it plays with idx->off */ if (idx->off < idx->pack->mwf.size - 20) { giterr_set(GITERR_INDEXER, "Unexpected data at the end of the pack"); return -1; } packfile_trailer = git_mwindow_open(&idx->pack->mwf, &w, idx->pack->mwf.size - GIT_OID_RAWSZ, GIT_OID_RAWSZ, &left); if (packfile_trailer == NULL) { git_mwindow_close(&w); goto on_error; } /* Compare the packfile trailer as it was sent to us and what we calculated */ git_oid_fromraw(&file_hash, (unsigned char*) packfile_trailer); git_mwindow_close(&w); git_hash_final(&trailer_hash, &idx->trailer); if (git_oid_cmp(&file_hash, &trailer_hash)) { giterr_set(GITERR_INDEXER, "packfile trailer mismatch"); return -1; } /* Freeze the number of deltas */ stats->total_deltas = stats->total_objects - stats->indexed_objects; if ((error = resolve_deltas(idx, stats)) < 0) return error; if (stats->indexed_objects != stats->total_objects) { giterr_set(GITERR_INDEXER, "early EOF"); return -1; } if (stats->local_objects > 0) { if (update_header_and_rehash(idx, stats) < 0) return -1; git_hash_final(&trailer_hash, &idx->trailer); write_at(idx, &trailer_hash, idx->pack->mwf.size - GIT_OID_RAWSZ, GIT_OID_RAWSZ); } git_vector_sort(&idx->objects); git_buf_sets(&filename, idx->pack->pack_name); git_buf_shorten(&filename, strlen("pack")); git_buf_puts(&filename, "idx"); if (git_buf_oom(&filename)) return -1; if (git_filebuf_open(&index_file, filename.ptr, GIT_FILEBUF_HASH_CONTENTS, idx->mode) < 0) goto on_error; /* Write out the header */ hdr.idx_signature = htonl(PACK_IDX_SIGNATURE); hdr.idx_version = htonl(2); git_filebuf_write(&index_file, &hdr, sizeof(hdr)); /* Write out the fanout table */ for (i = 0; i < 256; ++i) { uint32_t n = htonl(idx->fanout[i]); git_filebuf_write(&index_file, &n, sizeof(n)); } /* Write out the object names (SHA-1 hashes) */ git_vector_foreach(&idx->objects, i, entry, struct entry*) { git_filebuf_write(&index_file, &entry->oid, sizeof(git_oid)); git_hash_update(&ctx, &entry->oid, GIT_OID_RAWSZ); } git_hash_final(&idx->hash, &ctx); /* Write out the CRC32 values */ git_vector_foreach(&idx->objects, i, entry, struct entry*) { git_filebuf_write(&index_file, &entry->crc, sizeof(uint32_t)); } /* Write out the offsets */ git_vector_foreach(&idx->objects, i, entry, struct entry*) { uint32_t n; if (entry->offset == UINT32_MAX) n = htonl(0x80000000 | long_offsets++); else n = htonl(entry->offset); git_filebuf_write(&index_file, &n, sizeof(uint32_t)); } /* Write out the long offsets */ git_vector_foreach(&idx->objects, i, entry, struct entry*) { uint32_t split[2]; if (entry->offset != UINT32_MAX) continue; split[0] = htonl(entry->offset_long >> 32); split[1] = htonl(entry->offset_long & 0xffffffff); git_filebuf_write(&index_file, &split, sizeof(uint32_t) * 2); } /* Write out the packfile trailer to the index */ if (git_filebuf_write(&index_file, &trailer_hash, GIT_OID_RAWSZ) < 0) goto on_error; /* Write out the hash of the idx */ if (git_filebuf_hash(&trailer_hash, &index_file) < 0) goto on_error; git_filebuf_write(&index_file, &trailer_hash, sizeof(git_oid)); /* Figure out what the final name should be */ if (index_path(&filename, idx, ".idx") < 0) goto on_error; /* Commit file */ if (git_filebuf_commit_at(&index_file, filename.ptr) < 0) goto on_error; git_mwindow_free_all(&idx->pack->mwf); /* We need to close the descriptor here so Windows doesn't choke on commit_at */ if (p_close(idx->pack->mwf.fd) < 0) { giterr_set(GITERR_OS, "failed to close packfile"); goto on_error; } idx->pack->mwf.fd = -1; if (index_path(&filename, idx, ".pack") < 0) goto on_error; /* And don't forget to rename the packfile to its new place. */ p_rename(idx->pack->pack_name, git_buf_cstr(&filename)); git_buf_free(&filename); git_hash_ctx_cleanup(&ctx); return 0; on_error: git_mwindow_free_all(&idx->pack->mwf); git_filebuf_cleanup(&index_file); git_buf_free(&filename); git_hash_ctx_cleanup(&ctx); return -1; }