PolyQuery* PolyQuery_Deserialize_IMP(PolyQuery *self, InStream *instream) { float boost = InStream_Read_F32(instream); uint32_t num_children = InStream_Read_U32(instream); PolyQuery_init(self, NULL); PolyQueryIVARS *const ivars = PolyQuery_IVARS(self); PolyQuery_Set_Boost(self, boost); Vec_Grow(ivars->children, num_children); while (num_children--) { Vec_Push(ivars->children, THAW(instream)); } return self; }
Schema* Schema_Load_IMP(Schema *self, Obj *dump) { Hash *source = (Hash*)CERTIFY(dump, HASH); String *class_name = (String*)CERTIFY(Hash_Fetch_Utf8(source, "_class", 6), STRING); Class *klass = Class_singleton(class_name, NULL); Schema *loaded = (Schema*)Class_Make_Obj(klass); Hash *type_dumps = (Hash*)CERTIFY(Hash_Fetch_Utf8(source, "fields", 6), HASH); Vector *analyzer_dumps = (Vector*)CERTIFY(Hash_Fetch_Utf8(source, "analyzers", 9), VECTOR); Vector *analyzers = (Vector*)Freezer_load((Obj*)analyzer_dumps); UNUSED_VAR(self); // Start with a blank Schema. Schema_init(loaded); SchemaIVARS *const loaded_ivars = Schema_IVARS(loaded); Vec_Grow(loaded_ivars->uniq_analyzers, Vec_Get_Size(analyzers)); HashIterator *iter = HashIter_new(type_dumps); while (HashIter_Next(iter)) { String *field = HashIter_Get_Key(iter); Hash *type_dump = (Hash*)CERTIFY(HashIter_Get_Value(iter), HASH); String *type_str = (String*)Hash_Fetch_Utf8(type_dump, "type", 4); if (type_str) { if (Str_Equals_Utf8(type_str, "fulltext", 8)) { // Replace the "analyzer" tick with the real thing. Obj *tick = CERTIFY(Hash_Fetch_Utf8(type_dump, "analyzer", 8), OBJ); Analyzer *analyzer = (Analyzer*)Vec_Fetch(analyzers, (uint32_t)Json_obj_to_i64(tick)); if (!analyzer) { THROW(ERR, "Can't find analyzer for '%o'", field); } Hash_Store_Utf8(type_dump, "analyzer", 8, INCREF(analyzer)); FullTextType *type = (FullTextType*)S_load_type(FULLTEXTTYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "string", 6)) { StringType *type = (StringType*)S_load_type(STRINGTYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "blob", 4)) { BlobType *type = (BlobType*)S_load_type(BLOBTYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "i32_t", 5)) { Int32Type *type = (Int32Type*)S_load_type(INT32TYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "i64_t", 5)) { Int64Type *type = (Int64Type*)S_load_type(INT64TYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "f32_t", 5)) { Float32Type *type = (Float32Type*)S_load_type(FLOAT32TYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else if (Str_Equals_Utf8(type_str, "f64_t", 5)) { Float64Type *type = (Float64Type*)S_load_type(FLOAT64TYPE, (Obj*)type_dump); Schema_Spec_Field(loaded, field, (FieldType*)type); DECREF(type); } else { THROW(ERR, "Unknown type '%o' for field '%o'", type_str, field); } } else { FieldType *type = (FieldType*)CERTIFY(Freezer_load((Obj*)type_dump), FIELDTYPE); Schema_Spec_Field(loaded, field, type); DECREF(type); } } DECREF(iter); DECREF(analyzers); return loaded; }
static void S_discover_unused(FilePurger *self, Vector **purgables_ptr, Vector **snapshots_ptr) { FilePurgerIVARS *const ivars = FilePurger_IVARS(self); Folder *folder = ivars->folder; DirHandle *dh = Folder_Open_Dir(folder, NULL); if (!dh) { RETHROW(INCREF(Err_get_error())); } Vector *spared = Vec_new(1); Vector *snapshots = Vec_new(1); String *snapfile = NULL; // Start off with the list of files in the current snapshot. if (ivars->snapshot) { Vector *entries = Snapshot_List(ivars->snapshot); Vector *referenced = S_find_all_referenced(folder, entries); Vec_Push_All(spared, referenced); DECREF(entries); DECREF(referenced); snapfile = Snapshot_Get_Path(ivars->snapshot); if (snapfile) { Vec_Push(spared, INCREF(snapfile)); } } Hash *candidates = Hash_new(64); while (DH_Next(dh)) { String *entry = DH_Get_Entry(dh); if (Str_Starts_With_Utf8(entry, "snapshot_", 9) && Str_Ends_With_Utf8(entry, ".json", 5) && (!snapfile || !Str_Equals(entry, (Obj*)snapfile)) ) { Snapshot *snapshot = Snapshot_Read_File(Snapshot_new(), folder, entry); Lock *lock = IxManager_Make_Snapshot_Read_Lock(ivars->manager, entry); Vector *snap_list = Snapshot_List(snapshot); Vector *referenced = S_find_all_referenced(folder, snap_list); // DON'T obtain the lock -- only see whether another // entity holds a lock on the snapshot file. if (lock) { Lock_Clear_Stale(lock); } if (lock && Lock_Is_Locked(lock)) { // The snapshot file is locked, which means someone's using // that version of the index -- protect all of its entries. uint32_t new_size = Vec_Get_Size(spared) + Vec_Get_Size(referenced) + 1; Vec_Grow(spared, new_size); Vec_Push(spared, (Obj*)Str_Clone(entry)); Vec_Push_All(spared, referenced); } else { // No one's using this snapshot, so all of its entries are // candidates for deletion. for (uint32_t i = 0, max = Vec_Get_Size(referenced); i < max; i++) { String *file = (String*)Vec_Fetch(referenced, i); Hash_Store(candidates, file, (Obj*)CFISH_TRUE); } Vec_Push(snapshots, INCREF(snapshot)); } DECREF(referenced); DECREF(snap_list); DECREF(snapshot); DECREF(lock); } DECREF(entry); } DECREF(dh); // Clean up after a dead segment consolidation. S_zap_dead_merge(self, candidates); // Eliminate any current files from the list of files to be purged. for (uint32_t i = 0, max = Vec_Get_Size(spared); i < max; i++) { String *filename = (String*)Vec_Fetch(spared, i); DECREF(Hash_Delete(candidates, filename)); } // Pass back purgables and Snapshots. *purgables_ptr = Hash_Keys(candidates); *snapshots_ptr = snapshots; DECREF(candidates); DECREF(spared); }