// Create a test data structure including at least one each of Hash, VArray, // and CharBuf. static Obj* S_make_dump() { Hash *dump = Hash_new(0); Hash_Store_Str(dump, "foo", 3, (Obj*)CB_newf("foo")); Hash_Store_Str(dump, "stuff", 5, (Obj*)VA_new(0)); return (Obj*)dump; }
Hash* Normalizer_dump(Normalizer *self) { Normalizer_Dump_t super_dump = SUPER_METHOD_PTR(NORMALIZER, Lucy_Normalizer_Dump); Hash *dump = super_dump(self); int options = self->options; CharBuf *form = options & UTF8PROC_COMPOSE ? options & UTF8PROC_COMPAT ? CB_new_from_trusted_utf8("NFKC", 4) : CB_new_from_trusted_utf8("NFC", 3) : options & UTF8PROC_COMPAT ? CB_new_from_trusted_utf8("NFKD", 4) : CB_new_from_trusted_utf8("NFD", 3); Hash_Store_Str(dump, "normalization_form", 18, (Obj*)form); BoolNum *case_fold = Bool_singleton(options & UTF8PROC_CASEFOLD); Hash_Store_Str(dump, "case_fold", 9, (Obj*)case_fold); BoolNum *strip_accents = Bool_singleton(options & UTF8PROC_STRIPMARK); Hash_Store_Str(dump, "strip_accents", 13, (Obj*)strip_accents); return dump; }
Hash* SortWriter_metadata(SortWriter *self) { SortWriterIVARS *const ivars = SortWriter_IVARS(self); Hash *const metadata = DataWriter_metadata((DataWriter*)self); Hash_Store_Str(metadata, "counts", 6, INCREF(ivars->counts)); Hash_Store_Str(metadata, "null_ords", 9, INCREF(ivars->null_ords)); Hash_Store_Str(metadata, "ord_widths", 10, INCREF(ivars->ord_widths)); return metadata; }
Hash* FullTextType_dump(FullTextType *self) { Hash *dump = FullTextType_Dump_For_Schema(self); Hash_Store_Str(dump, "_class", 6, (Obj*)CB_Clone(FullTextType_Get_Class_Name(self))); Hash_Store_Str(dump, "analyzer", 8, (Obj*)Analyzer_Dump(self->analyzer)); DECREF(Hash_Delete_Str(dump, "type", 4)); return dump; }
static void test_Dump_and_Load(TestBatch *batch) { Hash *hash = Hash_new(0); Obj *dump; Hash *loaded; Hash_Store_Str(hash, "foo", 3, (Obj*)CB_new_from_trusted_utf8("foo", 3)); dump = (Obj*)Hash_Dump(hash); loaded = (Hash*)Obj_Load(dump, dump); TEST_TRUE(batch, Hash_Equals(hash, (Obj*)loaded), "Dump => Load round trip"); DECREF(dump); DECREF(loaded); /* TODO: Fix Hash_Load(). Hash_Store_Str(hash, "_class", 6, (Obj*)CB_new_from_trusted_utf8("not_a_class", 11)); dump = (Obj*)Hash_Dump(hash); loaded = (Hash*)Obj_Load(dump, dump); TEST_TRUE(batch, Hash_Equals(hash, (Obj*)loaded), "Load still works with _class if it's not a real class"); DECREF(dump); DECREF(loaded); */ DECREF(hash); }
void Seg_write_file(Segment *self, Folder *folder) { Hash *my_metadata = Hash_new(16); // Store metadata specific to this Segment object. Hash_Store_Str(my_metadata, "count", 5, (Obj*)CB_newf("%i64", self->count)); Hash_Store_Str(my_metadata, "name", 4, (Obj*)CB_Clone(self->name)); Hash_Store_Str(my_metadata, "field_names", 11, INCREF(self->by_num)); Hash_Store_Str(my_metadata, "format", 6, (Obj*)CB_newf("%i32", 1)); Hash_Store_Str(self->metadata, "segmeta", 7, (Obj*)my_metadata); CharBuf *filename = CB_newf("%o/segmeta.json", self->name); bool_t result = Json_spew_json((Obj*)self->metadata, folder, filename); DECREF(filename); if (!result) { RETHROW(INCREF(Err_get_error())); } }
Hash* NumType_dump(NumericType *self) { Hash *dump = NumType_Dump_For_Schema(self); Hash_Store_Str(dump, "_class", 6, (Obj*)CB_Clone(NumType_Get_Class_Name(self))); DECREF(Hash_Delete_Str(dump, "type", 4)); return dump; }
Hash* EasyAnalyzer_dump(EasyAnalyzer *self) { EasyAnalyzerIVARS *const ivars = EasyAnalyzer_IVARS(self); EasyAnalyzer_Dump_t super_dump = SUPER_METHOD_PTR(EASYANALYZER, Lucy_EasyAnalyzer_Dump); Hash *dump = super_dump(self); Hash_Store_Str(dump, "language", 8, (Obj*)CB_Clone(ivars->language)); return dump; }
Obj* NoMatchQuery_dump(NoMatchQuery *self) { NoMatchQuery_Dump_t super_dump = SUPER_METHOD_PTR(NOMATCHQUERY, Lucy_NoMatchQuery_Dump); Hash *dump = (Hash*)CERTIFY(super_dump(self), HASH); Hash_Store_Str(dump, "fails_to_match", 14, (Obj*)Bool_singleton(self->fails_to_match)); return (Obj*)dump; }
Obj* Tokenizer_dump(Tokenizer *self) { Tokenizer_dump_t super_dump = (Tokenizer_dump_t)SUPER_METHOD(&TOKENIZER, Tokenizer, Dump); Hash *dump = (Hash*)ASSERT_IS_A(super_dump(self), HASH); Hash_Store_Str(dump, "pattern", 7, Obj_Dump(self->pattern)); return (Obj*)dump; }
Hash* LexWriter_metadata(LexiconWriter *self) { Hash *const metadata = DataWriter_metadata((DataWriter*)self); Hash *const counts = (Hash*)INCREF(self->counts); Hash *const ix_counts = (Hash*)INCREF(self->ix_counts); /* Placeholders. */ if (Hash_Get_Size(counts) == 0) { Hash_Store_Str(counts, "none", 4, (Obj*)CB_newf("%i32", (i32_t)0) ); Hash_Store_Str(ix_counts, "none", 4, (Obj*)CB_newf("%i32", (i32_t)0) ); } Hash_Store_Str(metadata, "counts", 6, (Obj*)counts); Hash_Store_Str(metadata, "index_counts", 12, (Obj*)ix_counts); return metadata; }
Hash* NumType_dump_for_schema(NumericType *self) { Hash *dump = Hash_new(0); Hash_Store_Str(dump, "type", 4, (Obj*)NumType_Specifier(self)); // Store attributes that override the defaults. if (self->boost != 1.0) { Hash_Store_Str(dump, "boost", 5, (Obj*)CB_newf("%f64", self->boost)); } if (!self->indexed) { Hash_Store_Str(dump, "indexed", 7, (Obj*)CFISH_FALSE); } if (!self->stored) { Hash_Store_Str(dump, "stored", 6, (Obj*)CFISH_FALSE); } if (self->sortable) { Hash_Store_Str(dump, "sortable", 8, (Obj*)CFISH_TRUE); } return dump; }
void Snapshot_write_file(Snapshot *self, Folder *folder, const CharBuf *path) { Hash *all_data = Hash_new(0); VArray *list = Snapshot_List(self); // Update path. DECREF(self->path); if (path) { self->path = CB_Clone(path); } else { CharBuf *latest = IxFileNames_latest_snapshot(folder); uint64_t gen = latest ? IxFileNames_extract_gen(latest) + 1 : 1; char base36[StrHelp_MAX_BASE36_BYTES]; StrHelp_to_base36(gen, &base36); self->path = CB_newf("snapshot_%s.json", &base36); DECREF(latest); } // Don't overwrite. if (Folder_Exists(folder, self->path)) { THROW(ERR, "Snapshot file '%o' already exists", self->path); } // Sort, then store file names. VA_Sort(list, NULL, NULL); Hash_Store_Str(all_data, "entries", 7, (Obj*)list); // Create a JSON-izable data structure. Hash_Store_Str(all_data, "format", 6, (Obj*)CB_newf("%i32", (int32_t)Snapshot_current_file_format) ); Hash_Store_Str(all_data, "subformat", 9, (Obj*)CB_newf("%i32", (int32_t)Snapshot_current_file_subformat) ); // Write out JSON-ized data to the new file. Json_spew_json((Obj*)all_data, folder, self->path); DECREF(all_data); }
void IxManager_write_merge_data(IndexManager *self, int64_t cutoff) { ZombieCharBuf *merge_json = ZCB_WRAP_STR("merge.json", 10); Hash *data = Hash_new(1); bool_t success; Hash_Store_Str(data, "cutoff", 6, (Obj*)CB_newf("%i64", cutoff)); success = Json_spew_json((Obj*)data, self->folder, (CharBuf*)merge_json); DECREF(data); if (!success) { THROW(ERR, "Failed to write to %o", merge_json); } }
static void test_max_depth(TestBatch *batch) { Hash *circular = Hash_new(0); Hash_Store_Str(circular, "circular", 8, INCREF(circular)); Err_set_error(NULL); CharBuf *not_json = Json_to_json((Obj*)circular); TEST_TRUE(batch, not_json == NULL, "to_json returns NULL when fed recursing data"); TEST_TRUE(batch, Err_get_error() != NULL, "to_json sets Err_error when fed recursing data"); DECREF(Hash_Delete_Str(circular, "circular", 8)); DECREF(circular); }
Hash* StringType_dump_for_schema(StringType *self) { Hash *dump = Hash_new(0); Hash_Store_Str(dump, "type", 4, (Obj*)CB_newf("string")); // Store attributes that override the defaults. if (self->boost != 1.0) { Hash_Store_Str(dump, "boost", 5, (Obj*)CB_newf("%f64", self->boost)); } if (!self->indexed) { Hash_Store_Str(dump, "indexed", 7, (Obj*)CB_newf("0")); } if (!self->stored) { Hash_Store_Str(dump, "stored", 6, (Obj*)CB_newf("0")); } if (self->sortable) { Hash_Store_Str(dump, "sortable", 8, (Obj*)CB_newf("1")); } return dump; }
void Snapshot_write_file(Snapshot *self, Folder *folder, const CharBuf *filename) { Hash *all_data = Hash_new(0); VArray *list = Snapshot_List(self); /* Update filename. */ DECREF(self->filename); if (filename) { self->filename = CB_Clone(filename); } else { CharBuf *latest = IxFileNames_latest_snapshot(folder); i32_t gen = latest ? IxFileNames_extract_gen(latest) + 1 : 1; CharBuf *base_36 = StrHelp_to_base36(gen); self->filename = CB_newf("snapshot_%o.json", base_36); DECREF(latest); DECREF(base_36); } /* Don't overwrite. */ if (Folder_Exists(folder, self->filename)) { THROW("Snapshot file '%o' already exists", self->filename); } /* Sort, then store file names. */ VA_Sort(list, NULL); Hash_Store_Str(all_data, "entries", 7, (Obj*)list); /* Create a JSON-izable data structure. */ Hash_Store_Str(all_data, "format", 6, (Obj*)CB_newf("%i32", (i32_t)Snapshot_current_file_format) ); /* Write out JSON-ized data to the new file. */ Json_spew_json((Obj*)all_data, folder, self->filename); DECREF(all_data); }
Hash* DefDelWriter_metadata(DefaultDeletionsWriter *self) { DefaultDeletionsWriterIVARS *const ivars = DefDelWriter_IVARS(self); Hash *const metadata = DataWriter_metadata((DataWriter*)self); Hash *const files = Hash_new(0); for (uint32_t i = 0, max = VA_Get_Size(ivars->seg_readers); i < max; i++) { SegReader *seg_reader = (SegReader*)VA_Fetch(ivars->seg_readers, i); if (ivars->updated[i]) { BitVector *deldocs = (BitVector*)VA_Fetch(ivars->bit_vecs, i); Segment *segment = SegReader_Get_Segment(seg_reader); Hash *mini_meta = Hash_new(2); Hash_Store_Str(mini_meta, "count", 5, (Obj*)CB_newf("%u32", (uint32_t)BitVec_Count(deldocs))); Hash_Store_Str(mini_meta, "filename", 8, (Obj*)S_del_filename(self, seg_reader)); Hash_Store(files, (Obj*)Seg_Get_Name(segment), (Obj*)mini_meta); } } Hash_Store_Str(metadata, "files", 5, (Obj*)files); return metadata; }
static void test_Equals(TestBatch *batch) { Hash *hash = Hash_new(0); Hash *other = Hash_new(0); ZombieCharBuf *stuff = ZCB_WRAP_STR("stuff", 5); TEST_TRUE(batch, Hash_Equals(hash, (Obj*)other), "Empty hashes are equal"); Hash_Store_Str(hash, "foo", 3, (Obj*)CFISH_TRUE); TEST_FALSE(batch, Hash_Equals(hash, (Obj*)other), "Add one pair and Equals returns false"); Hash_Store_Str(other, "foo", 3, (Obj*)CFISH_TRUE); TEST_TRUE(batch, Hash_Equals(hash, (Obj*)other), "Add a matching pair and Equals returns true"); Hash_Store_Str(other, "foo", 3, INCREF(stuff)); TEST_FALSE(batch, Hash_Equals(hash, (Obj*)other), "Non-matching value spoils Equals"); DECREF(hash); DECREF(other); }
FilePurger* FilePurger_init(FilePurger *self, Folder *folder, Snapshot *snapshot, IndexManager *manager) { self->folder = (Folder*)INCREF(folder); self->snapshot = (Snapshot*)INCREF(snapshot); self->manager = manager ? (IndexManager*)INCREF(manager) : IxManager_new(NULL, NULL); IxManager_Set_Folder(self->manager, folder); // Don't allow the locks directory to be zapped. self->disallowed = Hash_new(0); Hash_Store_Str(self->disallowed, "locks", 5, INCREF(&EMPTY)); return self; }
Hash* FullTextType_dump_for_schema(FullTextType *self) { Hash *dump = Hash_new(0); Hash_Store_Str(dump, "type", 4, (Obj*)CB_newf("fulltext")); // Store attributes that override the defaults. if (self->boost != 1.0) { Hash_Store_Str(dump, "boost", 5, (Obj*)CB_newf("%f64", self->boost)); } if (!self->indexed) { Hash_Store_Str(dump, "indexed", 7, (Obj*)CFISH_FALSE); } if (!self->stored) { Hash_Store_Str(dump, "stored", 6, (Obj*)CFISH_FALSE); } if (self->sortable) { Hash_Store_Str(dump, "sortable", 8, (Obj*)CFISH_TRUE); } if (self->highlightable) { Hash_Store_Str(dump, "highlightable", 13, (Obj*)CFISH_TRUE); } return dump; }
bool_t LFLock_request(LockFileLock *self) { Hash *file_data; bool_t wrote_json; bool_t success = false; bool_t deletion_failed = false; if (Folder_Exists(self->folder, self->lock_path)) { Err_set_error((Err*)LockErr_new(CB_newf("Can't obtain lock: '%o' exists", self->lock_path))); return false; } // Create the "locks" subdirectory if necessary. CharBuf *lock_dir_name = (CharBuf*)ZCB_WRAP_STR("locks", 5); if (!Folder_Exists(self->folder, lock_dir_name)) { if (!Folder_MkDir(self->folder, lock_dir_name)) { Err *mkdir_err = (Err*)CERTIFY(Err_get_error(), ERR); LockErr *err = LockErr_new(CB_newf("Can't create 'locks' directory: %o", Err_Get_Mess(mkdir_err))); // Maybe our attempt failed because another process succeeded. if (Folder_Find_Folder(self->folder, lock_dir_name)) { DECREF(err); } else { // Nope, everything failed, so bail out. Err_set_error((Err*)err); return false; } } } // Prepare to write pid, lock name, and host to the lock file as JSON. file_data = Hash_new(3); Hash_Store_Str(file_data, "pid", 3, (Obj*)CB_newf("%i32", (int32_t)PID_getpid())); Hash_Store_Str(file_data, "host", 4, INCREF(self->host)); Hash_Store_Str(file_data, "name", 4, INCREF(self->name)); // Write to a temporary file, then use the creation of a hard link to // ensure atomic but non-destructive creation of the lockfile with its // complete contents. wrote_json = Json_spew_json((Obj*)file_data, self->folder, self->link_path); if (wrote_json) { success = Folder_Hard_Link(self->folder, self->link_path, self->lock_path); if (!success) { Err *hard_link_err = (Err*)CERTIFY(Err_get_error(), ERR); Err_set_error((Err*)LockErr_new(CB_newf("Failed to obtain lock at '%o': %o", self->lock_path, Err_Get_Mess(hard_link_err)))); } deletion_failed = !Folder_Delete(self->folder, self->link_path); } else { Err *spew_json_err = (Err*)CERTIFY(Err_get_error(), ERR); Err_set_error((Err*)LockErr_new(CB_newf("Failed to obtain lock at '%o': %o", self->lock_path, Err_Get_Mess(spew_json_err)))); } DECREF(file_data); // Verify that our temporary file got zapped. if (wrote_json && deletion_failed) { CharBuf *mess = MAKE_MESS("Failed to delete '%o'", self->link_path); Err_throw_mess(ERR, mess); } return success; }