String* IxManager_Make_Snapshot_Filename_IMP(IndexManager *self) { IndexManagerIVARS *const ivars = IxManager_IVARS(self); Folder *folder = (Folder*)CERTIFY(ivars->folder, FOLDER); DirHandle *dh = Folder_Open_Dir(folder, NULL); uint64_t max_gen = 0; if (!dh) { RETHROW(INCREF(Err_get_error())); } while (DH_Next(dh)) { String *entry = DH_Get_Entry(dh); if (Str_Starts_With_Utf8(entry, "snapshot_", 9) && Str_Ends_With_Utf8(entry, ".json", 5) ) { uint64_t gen = IxFileNames_extract_gen(entry); if (gen > max_gen) { max_gen = gen; } } DECREF(entry); } DECREF(dh); uint64_t new_gen = max_gen + 1; char base36[StrHelp_MAX_BASE36_BYTES]; StrHelp_to_base36(new_gen, &base36); return Str_newf("snapshot_%s.json", &base36); }
String* IxFileNames_latest_snapshot(Folder *folder) { DirHandle *dh = Folder_Open_Dir(folder, NULL); String *retval = NULL; uint64_t latest_gen = 0; if (!dh) { RETHROW(INCREF(Err_get_error())); } while (DH_Next(dh)) { String *entry = DH_Get_Entry(dh); if (Str_Starts_With_Utf8(entry, "snapshot_", 9) && Str_Ends_With_Utf8(entry, ".json", 5) ) { uint64_t gen = IxFileNames_extract_gen(entry); if (gen > latest_gen) { latest_gen = gen; DECREF(retval); retval = Str_Clone(entry); } } DECREF(entry); } DECREF(dh); return retval; }
static void test_protect_symlinks(TestBatchRunner *runner) { #ifdef ENABLE_SYMLINK_TESTS FSFolder *folder = (FSFolder*)S_set_up(); String *foo = (String*)SSTR_WRAP_UTF8("foo", 3); String *bar = (String*)SSTR_WRAP_UTF8("bar", 3); String *foo_boffo = (String*)SSTR_WRAP_UTF8("foo/boffo", 9); FSFolder_MkDir(folder, foo); FSFolder_MkDir(folder, bar); OutStream *outstream = FSFolder_Open_Out(folder, foo_boffo); DECREF(outstream); if (!S_create_test_symlinks()) { FAIL(runner, "symlink creation failed"); FAIL(runner, "symlink creation failed"); FAIL(runner, "symlink creation failed"); FAIL(runner, "symlink creation failed"); FAIL(runner, "symlink creation failed"); // Try to clean up anyway. FSFolder_Delete_Tree(folder, foo); FSFolder_Delete_Tree(folder, bar); } else { VArray *list = FSFolder_List_R(folder, NULL); bool saw_bazooka_boffo = false; for (uint32_t i = 0, max = VA_Get_Size(list); i < max; i++) { String *entry = (String*)VA_Fetch(list, i); if (Str_Ends_With_Utf8(entry, "bazooka/boffo", 13)) { saw_bazooka_boffo = true; } } TEST_FALSE(runner, saw_bazooka_boffo, "List_R() shouldn't follow symlinks"); DECREF(list); TEST_TRUE(runner, FSFolder_Delete_Tree(folder, bar), "Delete_Tree() returns true"); TEST_FALSE(runner, FSFolder_Exists(folder, bar), "Tree is really gone"); TEST_TRUE(runner, FSFolder_Exists(folder, foo), "Original folder sill there"); TEST_TRUE(runner, FSFolder_Exists(folder, foo_boffo), "Delete_Tree() did not follow directory symlink"); FSFolder_Delete_Tree(folder, foo); } DECREF(folder); S_tear_down(); #else SKIP(runner, "Tests requiring symlink() disabled"); SKIP(runner, "Tests requiring symlink() disabled"); SKIP(runner, "Tests requiring symlink() disabled"); SKIP(runner, "Tests requiring symlink() disabled"); SKIP(runner, "Tests requiring symlink() disabled"); #endif // ENABLE_SYMLINK_TESTS }
static String* S_find_schema_file(Snapshot *snapshot) { Vector *files = Snapshot_List(snapshot); String *retval = NULL; for (size_t i = 0, max = Vec_Get_Size(files); i < max; i++) { String *file = (String*)Vec_Fetch(files, i); if (Str_Starts_With_Utf8(file, "schema_", 7) && Str_Ends_With_Utf8(file, ".json", 5) ) { retval = file; break; } } DECREF(files); return retval; }
void Err_Add_Frame_IMP(Err *self, const char *file, int line, const char *func) { CharBuf *buf = CB_new(0); CB_Cat(buf, self->mess); if (!Str_Ends_With_Utf8(self->mess, "\n", 1)) { CB_Cat_Char(buf, '\n'); } if (func != NULL) { CB_catf(buf, "\t%s at %s line %i32\n", func, file, (int32_t)line); } else { CB_catf(buf, "\tat %s line %i32\n", file, (int32_t)line); } DECREF(self->mess); self->mess = CB_Yield_String(buf); DECREF(buf); }
Lock* IxManager_Make_Snapshot_Read_Lock_IMP(IndexManager *self, String *filename) { LockFactory *lock_factory = S_obtain_lock_factory(self); if (!Str_Starts_With_Utf8(filename, "snapshot_", 9) || !Str_Ends_With_Utf8(filename, ".json", 5) ) { THROW(ERR, "Not a snapshot filename: %o", filename); } // Truncate ".json" from end of snapshot file name. size_t lock_name_len = Str_Length(filename) - (sizeof(".json") - 1); String *lock_name = Str_SubString(filename, 0, lock_name_len); Lock *lock = LockFact_Make_Shared_Lock(lock_factory, lock_name, 1000, 100); DECREF(lock_name); return lock; }
Lock* IxManager_Make_Snapshot_Lock_IMP(IndexManager *self, String *filename) { IndexManagerIVARS *const ivars = IxManager_IVARS(self); if (!Str_Starts_With_Utf8(filename, "snapshot_", 9) || !Str_Ends_With_Utf8(filename, ".json", 5) ) { THROW(ERR, "Not a snapshot filename: %o", filename); } // Truncate ".json" from end of snapshot file name. size_t lock_name_len = Str_Length(filename) - (sizeof(".json") - 1); String *lock_name = Str_SubString(filename, 0, lock_name_len); Lock *lock = (Lock*)LFLock_new(ivars->folder, lock_name, ivars->host, 1000, 100, false); DECREF(lock_name); return lock; }
static void S_do_consolidate(CompoundFileWriter *self, CompoundFileWriterIVARS *ivars) { UNUSED_VAR(self); Folder *folder = ivars->folder; Hash *metadata = Hash_new(0); Hash *sub_files = Hash_new(0); Vector *files = Folder_List(folder, NULL); Vector *merged = Vec_new(Vec_Get_Size(files)); String *cf_file = (String*)SSTR_WRAP_UTF8("cf.dat", 6); OutStream *outstream = Folder_Open_Out(folder, (String*)cf_file); bool rename_success; if (!outstream) { RETHROW(INCREF(Err_get_error())); } // Start metadata. Hash_Store_Utf8(metadata, "files", 5, INCREF(sub_files)); Hash_Store_Utf8(metadata, "format", 6, (Obj*)Str_newf("%i32", CFWriter_current_file_format)); Vec_Sort(files); for (uint32_t i = 0, max = Vec_Get_Size(files); i < max; i++) { String *infilename = (String*)Vec_Fetch(files, i); if (!Str_Ends_With_Utf8(infilename, ".json", 5)) { InStream *instream = Folder_Open_In(folder, infilename); Hash *file_data = Hash_new(2); int64_t offset, len; if (!instream) { RETHROW(INCREF(Err_get_error())); } // Absorb the file. offset = OutStream_Tell(outstream); OutStream_Absorb(outstream, instream); len = OutStream_Tell(outstream) - offset; // Record offset and length. Hash_Store_Utf8(file_data, "offset", 6, (Obj*)Str_newf("%i64", offset)); Hash_Store_Utf8(file_data, "length", 6, (Obj*)Str_newf("%i64", len)); Hash_Store(sub_files, infilename, (Obj*)file_data); Vec_Push(merged, INCREF(infilename)); // Add filler NULL bytes so that every sub-file begins on a file // position multiple of 8. OutStream_Align(outstream, 8); InStream_Close(instream); DECREF(instream); } } // Write metadata to cfmeta file. String *cfmeta_temp = (String*)SSTR_WRAP_UTF8("cfmeta.json.temp", 16); String *cfmeta_file = (String*)SSTR_WRAP_UTF8("cfmeta.json", 11); Json_spew_json((Obj*)metadata, (Folder*)ivars->folder, cfmeta_temp); rename_success = Folder_Rename(ivars->folder, cfmeta_temp, cfmeta_file); if (!rename_success) { RETHROW(INCREF(Err_get_error())); } // Clean up. OutStream_Close(outstream); DECREF(outstream); DECREF(files); DECREF(metadata); /* HashIterator *iter = HashIter_new(sub_files); while (HashIter_Next(iter)) { String *merged_file = HashIter_Get_Key(iter); if (!Folder_Delete(folder, merged_file)) { String *mess = MAKE_MESS("Can't delete '%o'", merged_file); DECREF(sub_files); Err_throw_mess(ERR, mess); } } DECREF(iter); */ DECREF(sub_files); for (uint32_t i = 0, max = Vec_Get_Size(merged); i < max; i++) { String *merged_file = (String*)Vec_Fetch(merged, i); if (!Folder_Delete(folder, merged_file)) { String *mess = MAKE_MESS("Can't delete '%o'", merged_file); DECREF(merged); Err_throw_mess(ERR, mess); } } DECREF(merged); }
static void S_discover_unused(FilePurger *self, Vector **purgables_ptr, Vector **snapshots_ptr) { FilePurgerIVARS *const ivars = FilePurger_IVARS(self); Folder *folder = ivars->folder; DirHandle *dh = Folder_Open_Dir(folder, NULL); if (!dh) { RETHROW(INCREF(Err_get_error())); } Vector *spared = Vec_new(1); Vector *snapshots = Vec_new(1); String *snapfile = NULL; // Start off with the list of files in the current snapshot. if (ivars->snapshot) { Vector *entries = Snapshot_List(ivars->snapshot); Vector *referenced = S_find_all_referenced(folder, entries); Vec_Push_All(spared, referenced); DECREF(entries); DECREF(referenced); snapfile = Snapshot_Get_Path(ivars->snapshot); if (snapfile) { Vec_Push(spared, INCREF(snapfile)); } } Hash *candidates = Hash_new(64); while (DH_Next(dh)) { String *entry = DH_Get_Entry(dh); if (Str_Starts_With_Utf8(entry, "snapshot_", 9) && Str_Ends_With_Utf8(entry, ".json", 5) && (!snapfile || !Str_Equals(entry, (Obj*)snapfile)) ) { Snapshot *snapshot = Snapshot_Read_File(Snapshot_new(), folder, entry); Lock *lock = IxManager_Make_Snapshot_Read_Lock(ivars->manager, entry); Vector *snap_list = Snapshot_List(snapshot); Vector *referenced = S_find_all_referenced(folder, snap_list); // DON'T obtain the lock -- only see whether another // entity holds a lock on the snapshot file. if (lock) { Lock_Clear_Stale(lock); } if (lock && Lock_Is_Locked(lock)) { // The snapshot file is locked, which means someone's using // that version of the index -- protect all of its entries. uint32_t new_size = Vec_Get_Size(spared) + Vec_Get_Size(referenced) + 1; Vec_Grow(spared, new_size); Vec_Push(spared, (Obj*)Str_Clone(entry)); Vec_Push_All(spared, referenced); } else { // No one's using this snapshot, so all of its entries are // candidates for deletion. for (uint32_t i = 0, max = Vec_Get_Size(referenced); i < max; i++) { String *file = (String*)Vec_Fetch(referenced, i); Hash_Store(candidates, file, (Obj*)CFISH_TRUE); } Vec_Push(snapshots, INCREF(snapshot)); } DECREF(referenced); DECREF(snap_list); DECREF(snapshot); DECREF(lock); } DECREF(entry); } DECREF(dh); // Clean up after a dead segment consolidation. S_zap_dead_merge(self, candidates); // Eliminate any current files from the list of files to be purged. for (uint32_t i = 0, max = Vec_Get_Size(spared); i < max; i++) { String *filename = (String*)Vec_Fetch(spared, i); DECREF(Hash_Delete(candidates, filename)); } // Pass back purgables and Snapshots. *purgables_ptr = Hash_Keys(candidates); *snapshots_ptr = snapshots; DECREF(candidates); DECREF(spared); }
void S_try_open_elements(void *context) { struct try_open_elements_context *args = (struct try_open_elements_context*)context; PolyReader *self = args->self; PolyReaderIVARS *const ivars = PolyReader_IVARS(self); VArray *files = Snapshot_List(ivars->snapshot); Folder *folder = PolyReader_Get_Folder(self); uint32_t num_segs = 0; uint64_t latest_schema_gen = 0; String *schema_file = NULL; // Find schema file, count segments. for (uint32_t i = 0, max = VA_Get_Size(files); i < max; i++) { String *entry = (String*)VA_Fetch(files, i); if (Seg_valid_seg_name(entry)) { num_segs++; } else if (Str_Starts_With_Utf8(entry, "schema_", 7) && Str_Ends_With_Utf8(entry, ".json", 5) ) { uint64_t gen = IxFileNames_extract_gen(entry); if (gen > latest_schema_gen) { latest_schema_gen = gen; schema_file = entry; } } } // Read Schema. if (!schema_file) { DECREF(files); THROW(ERR, "Can't find a schema file."); } else { Obj *dump = Json_slurp_json(folder, schema_file); if (dump) { // read file successfully DECREF(ivars->schema); ivars->schema = (Schema*)CERTIFY(Freezer_load(dump), SCHEMA); DECREF(dump); schema_file = NULL; } else { String *mess = MAKE_MESS("Failed to parse %o", schema_file); DECREF(files); Err_throw_mess(ERR, mess); } } VArray *segments = VA_new(num_segs); for (uint32_t i = 0, max = VA_Get_Size(files); i < max; i++) { String *entry = (String*)VA_Fetch(files, i); // Create a Segment for each segmeta. if (Seg_valid_seg_name(entry)) { int64_t seg_num = IxFileNames_extract_gen(entry); Segment *segment = Seg_new(seg_num); // Bail if reading the file fails (probably because it's been // deleted and a new snapshot file has been written so we need to // retry). if (Seg_Read_File(segment, folder)) { VA_Push(segments, (Obj*)segment); } else { String *mess = MAKE_MESS("Failed to read %o", entry); DECREF(segment); DECREF(segments); DECREF(files); Err_throw_mess(ERR, mess); } } } // Sort the segments by age. VA_Sort(segments, NULL, NULL); // Open individual SegReaders. struct try_open_segreader_context seg_context; seg_context.schema = PolyReader_Get_Schema(self); seg_context.folder = folder; seg_context.snapshot = PolyReader_Get_Snapshot(self); seg_context.segments = segments; seg_context.result = NULL; args->seg_readers = VA_new(num_segs); Err *error = NULL; for (uint32_t seg_tick = 0; seg_tick < num_segs; seg_tick++) { seg_context.seg_tick = seg_tick; error = Err_trap(S_try_open_segreader, &seg_context); if (error) { break; } VA_Push(args->seg_readers, (Obj*)seg_context.result); seg_context.result = NULL; } DECREF(segments); DECREF(files); if (error) { DECREF(args->seg_readers); args->seg_readers = NULL; RETHROW(error); } }