Beispiel #1
0
bool_t
RAMFolder_local_delete(RAMFolder *self, const CharBuf *name)
{
    Obj *entry = Hash_Fetch(self->entries, (Obj*)name);
    if (entry) {
        if (Obj_Is_A(entry, RAMFILE)) {
            ;
        }
        else if (Obj_Is_A(entry, FOLDER)) {
            RAMFolder *inner_folder;
            if (Obj_Is_A(entry, COMPOUNDFILEREADER)) {
                inner_folder = (RAMFolder*)CERTIFY(
                    CFReader_Get_Real_Folder((CompoundFileReader*)entry), 
                    RAMFOLDER);
            }
            else {
                inner_folder = (RAMFolder*)CERTIFY(entry, RAMFOLDER);
            }
            if (Hash_Get_Size(inner_folder->entries)) {
                // Can't delete non-empty dir. 
                return false;
            }
        }
        else {
            return false;
        }
        DECREF(Hash_Delete(self->entries, (Obj*)name));
        return true;
    }
    else {
        return false;
    }
}
Beispiel #2
0
bool
CFReader_Local_Delete_IMP(CompoundFileReader *self, String *name) {
    CompoundFileReaderIVARS *const ivars = CFReader_IVARS(self);
    Hash *record = (Hash*)Hash_Delete(ivars->records, name);
    DECREF(record);

    if (record == NULL) {
        return Folder_Local_Delete(ivars->real_folder, name);
    }
    else {
        // Once the number of virtual files falls to 0, remove the compound
        // files.
        if (Hash_Get_Size(ivars->records) == 0) {
            String *cf_file = (String*)SSTR_WRAP_UTF8("cf.dat", 6);
            if (!Folder_Delete(ivars->real_folder, cf_file)) {
                return false;
            }
            String *cfmeta_file = (String*)SSTR_WRAP_UTF8("cfmeta.json", 11);
            if (!Folder_Delete(ivars->real_folder, cfmeta_file)) {
                return false;

            }
        }
        return true;
    }
}
Beispiel #3
0
bool_t
CFReader_local_delete(CompoundFileReader *self, const CharBuf *name) {
    Hash *record = (Hash*)Hash_Delete(self->records, (Obj*)name);
    DECREF(record);

    if (record == NULL) {
        return Folder_Local_Delete(self->real_folder, name);
    }
    else {
        // Once the number of virtual files falls to 0, remove the compound
        // files.
        if (Hash_Get_Size(self->records) == 0) {
            CharBuf *cf_file = (CharBuf*)ZCB_WRAP_STR("cf.dat", 6);
            if (!Folder_Delete(self->real_folder, cf_file)) {
                return false;
            }
            CharBuf *cfmeta_file = (CharBuf*)ZCB_WRAP_STR("cfmeta.json", 11);
            if (!Folder_Delete(self->real_folder, cfmeta_file)) {
                return false;

            }
        }
        return true;
    }
}
Beispiel #4
0
Obj*
S_dump_hash(Hash *hash) {
    Hash *dump = Hash_new(Hash_Get_Size(hash));

    HashIterator *iter = HashIter_new(hash);
    while (HashIter_Next(iter)) {
        String *key   = HashIter_Get_Key(iter);
        Obj    *value = HashIter_Get_Value(iter);
        Hash_Store(dump, key, Freezer_dump(value));
    }
    DECREF(iter);

    return (Obj*)dump;
}
Beispiel #5
0
void
Freezer_serialize_hash(Hash *hash, OutStream *outstream) {
    uint32_t hash_size = Hash_Get_Size(hash);
    OutStream_Write_C32(outstream, hash_size);

    HashIterator *iter = HashIter_new(hash);
    while (HashIter_Next(iter)) {
        String *key = HashIter_Get_Key(iter);
        Obj    *val = HashIter_Get_Value(iter);
        Freezer_serialize_string(key, outstream);
        FREEZE(val, outstream);
    }
    DECREF(iter);
}
Beispiel #6
0
Hash*
Schema_Dump_IMP(Schema *self) {
    SchemaIVARS *const ivars = Schema_IVARS(self);
    Hash *dump = Hash_new(0);
    Hash *type_dumps = Hash_new(Hash_Get_Size(ivars->types));

    // Record class name, store dumps of unique Analyzers.
    Hash_Store_Utf8(dump, "_class", 6,
                    (Obj*)Str_Clone(Schema_get_class_name(self)));
    Hash_Store_Utf8(dump, "analyzers", 9,
                    Freezer_dump((Obj*)ivars->uniq_analyzers));

    // Dump FieldTypes.
    Hash_Store_Utf8(dump, "fields", 6, (Obj*)type_dumps);
    HashIterator *iter = HashIter_new(ivars->types);
    while (HashIter_Next(iter)) {
        String    *field      = HashIter_Get_Key(iter);
        FieldType *type       = (FieldType*)HashIter_Get_Value(iter);
        Class     *type_class = FType_get_class(type);

        // Dump known types to simplified format.
        if (type_class == FULLTEXTTYPE) {
            FullTextType *fttype = (FullTextType*)type;
            Hash *type_dump = FullTextType_Dump_For_Schema(fttype);
            Analyzer *analyzer = FullTextType_Get_Analyzer(fttype);
            uint32_t tick
                = S_find_in_array(ivars->uniq_analyzers, (Obj*)analyzer);

            // Store the tick which references a unique analyzer.
            Hash_Store_Utf8(type_dump, "analyzer", 8,
                            (Obj*)Str_newf("%u32", tick));

            Hash_Store(type_dumps, field, (Obj*)type_dump);
        }
        else if (type_class == STRINGTYPE || type_class == BLOBTYPE) {
            Hash *type_dump = FType_Dump_For_Schema(type);
            Hash_Store(type_dumps, field, (Obj*)type_dump);
        }
        // Unknown FieldType type, so punt.
        else {
            Hash_Store(type_dumps, field, FType_Dump(type));
        }
    }
    DECREF(iter);

    return dump;
}
Beispiel #7
0
static Obj*
S_load_from_hash(Hash *dump) {
    String *class_name = (String*)Hash_Fetch_Utf8(dump, "_class", 6);

    // Assume that the presence of the "_class" key paired with a valid class
    // name indicates the output of a dump() rather than an ordinary Hash.
    if (class_name && Str_is_a(class_name, STRING)) {
        Class *klass = Class_fetch_class(class_name);

        if (!klass) {
            String *parent_class_name = Class_find_parent_class(class_name);
            if (parent_class_name) {
                Class *parent = Class_singleton(parent_class_name, NULL);
                klass = Class_singleton(class_name, parent);
                DECREF(parent_class_name);
            }
            else {
                // TODO: Fix load() so that it works with ordinary hash keys
                // named "_class".
                THROW(ERR, "Can't find class '%o'", class_name);
            }
        }

        // Dispatch to an alternate Load() method.
        if (klass) {
            return S_load_via_load_method(klass, (Obj*)dump);
        }

    }

    // It's an ordinary Hash.
    Hash *loaded = Hash_new(Hash_Get_Size(dump));

    HashIterator *iter = HashIter_new(dump);
    while (HashIter_Next(iter)) {
        String *key   = HashIter_Get_Key(iter);
        Obj    *value = HashIter_Get_Value(iter);
        Hash_Store(loaded, key, Freezer_load(value));
    }
    DECREF(iter);

    return (Obj*)loaded;

}
Hash*
LexWriter_metadata(LexiconWriter *self)
{
    Hash *const metadata  = DataWriter_metadata((DataWriter*)self);
    Hash *const counts    = (Hash*)INCREF(self->counts);
    Hash *const ix_counts = (Hash*)INCREF(self->ix_counts);

    /* Placeholders. */
    if (Hash_Get_Size(counts) == 0) {
        Hash_Store_Str(counts, "none", 4, (Obj*)CB_newf("%i32", (i32_t)0) );
        Hash_Store_Str(ix_counts, "none", 4, 
            (Obj*)CB_newf("%i32", (i32_t)0) );
    }

    Hash_Store_Str(metadata, "counts", 6, (Obj*)counts);
    Hash_Store_Str(metadata, "index_counts", 12, (Obj*)ix_counts);

    return metadata;
}
static void
test_offsets(TestBatchRunner *runner) {
    Folder *folder = S_folder_with_contents();
    CompoundFileWriter *cf_writer = CFWriter_new(folder);
    Hash    *cf_metadata;
    Hash    *files;

    CFWriter_Consolidate(cf_writer);

    cf_metadata = (Hash*)CERTIFY(
                      Json_slurp_json(folder, cfmeta_file), HASH);
    files = (Hash*)CERTIFY(
                Hash_Fetch_Utf8(cf_metadata, "files", 5), HASH);

    bool     offsets_ok = true;

    TEST_TRUE(runner, Hash_Get_Size(files) > 0, "Multiple files");

    HashIterator *iter = HashIter_new(files);
    while (HashIter_Next(iter)) {
        String *file   = HashIter_Get_Key(iter);
        Hash   *stats  = (Hash*)CERTIFY(HashIter_Get_Value(iter), HASH);
        Obj    *offset = CERTIFY(Hash_Fetch_Utf8(stats, "offset", 6), OBJ);
        int64_t offs   = Obj_To_I64(offset);
        if (offs % 8 != 0) {
            offsets_ok = false;
            FAIL(runner, "Offset %" PRId64 " for %s not a multiple of 8",
                 offset, Str_Get_Ptr8(file));
            break;
        }
    }
    DECREF(iter);
    if (offsets_ok) {
        PASS(runner, "All offsets are multiples of 8");
    }

    DECREF(cf_metadata);
    DECREF(cf_writer);
    DECREF(folder);
}
Beispiel #10
0
static void
test_Store_and_Fetch(TestBatch *batch) {
    Hash          *hash         = Hash_new(100);
    Hash          *dupe         = Hash_new(100);
    const uint32_t starting_cap = Hash_Get_Capacity(hash);
    VArray        *expected     = VA_new(100);
    VArray        *got          = VA_new(100);
    ZombieCharBuf *twenty       = ZCB_WRAP_STR("20", 2);
    ZombieCharBuf *forty        = ZCB_WRAP_STR("40", 2);
    ZombieCharBuf *foo          = ZCB_WRAP_STR("foo", 3);

    for (int32_t i = 0; i < 100; i++) {
        CharBuf *cb = CB_newf("%i32", i);
        Hash_Store(hash, (Obj*)cb, (Obj*)cb);
        Hash_Store(dupe, (Obj*)cb, INCREF(cb));
        VA_Push(expected, INCREF(cb));
    }
    TEST_TRUE(batch, Hash_Equals(hash, (Obj*)dupe), "Equals");

    TEST_INT_EQ(batch, Hash_Get_Capacity(hash), starting_cap,
                "Initial capacity sufficient (no rebuilds)");

    for (int32_t i = 0; i < 100; i++) {
        Obj *key  = VA_Fetch(expected, i);
        Obj *elem = Hash_Fetch(hash, key);
        VA_Push(got, (Obj*)INCREF(elem));
    }

    TEST_TRUE(batch, VA_Equals(got, (Obj*)expected),
              "basic Store and Fetch");
    TEST_INT_EQ(batch, Hash_Get_Size(hash), 100,
                "size incremented properly by Hash_Store");

    TEST_TRUE(batch, Hash_Fetch(hash, (Obj*)foo) == NULL,
              "Fetch against non-existent key returns NULL");

    Hash_Store(hash, (Obj*)forty, INCREF(foo));
    TEST_TRUE(batch, ZCB_Equals(foo, Hash_Fetch(hash, (Obj*)forty)),
              "Hash_Store replaces existing value");
    TEST_FALSE(batch, Hash_Equals(hash, (Obj*)dupe),
               "replacement value spoils equals");
    TEST_INT_EQ(batch, Hash_Get_Size(hash), 100,
                "size unaffected after value replaced");

    TEST_TRUE(batch, Hash_Delete(hash, (Obj*)forty) == (Obj*)foo,
              "Delete returns value");
    DECREF(foo);
    TEST_INT_EQ(batch, Hash_Get_Size(hash), 99,
                "size decremented by successful Delete");
    TEST_TRUE(batch, Hash_Delete(hash, (Obj*)forty) == NULL,
              "Delete returns NULL when key not found");
    TEST_INT_EQ(batch, Hash_Get_Size(hash), 99,
                "size not decremented by unsuccessful Delete");
    DECREF(Hash_Delete(dupe, (Obj*)forty));
    TEST_TRUE(batch, VA_Equals(got, (Obj*)expected), "Equals after Delete");

    Hash_Clear(hash);
    TEST_TRUE(batch, Hash_Fetch(hash, (Obj*)twenty) == NULL, "Clear");
    TEST_TRUE(batch, Hash_Get_Size(hash) == 0, "size is 0 after Clear");

    DECREF(hash);
    DECREF(dupe);
    DECREF(got);
    DECREF(expected);
}
Beispiel #11
0
uint32_t
Schema_Num_Fields_IMP(Schema *self) {
    SchemaIVARS *const ivars = Schema_IVARS(self);
    return Hash_Get_Size(ivars->types);
}
uint32_t
Snapshot_Num_Entries_IMP(Snapshot *self) {
    SnapshotIVARS *const ivars = Snapshot_IVARS(self);
    return Hash_Get_Size(ivars->entries);
}
Beispiel #13
0
u32_t
Snapshot_num_entries(Snapshot *self) { return Hash_Get_Size(self->entries); }
Beispiel #14
0
static bool_t
S_to_json(Obj *dump, CharBuf *json, int32_t depth) {
    // Guard against infinite recursion in self-referencing data structures.
    if (depth > MAX_DEPTH) {
        CharBuf *mess = MAKE_MESS("Exceeded max depth of %i32", MAX_DEPTH);
        Err_set_error(Err_new(mess));
        return false;
    }

    if (!dump) {
        CB_Cat_Trusted_Str(json, "null", 4);
    }
    else if (dump == (Obj*)CFISH_TRUE) {
        CB_Cat_Trusted_Str(json, "true", 4);
    }
    else if (dump == (Obj*)CFISH_FALSE) {
        CB_Cat_Trusted_Str(json, "false", 5);
    }
    else if (Obj_Is_A(dump, CHARBUF)) {
        S_append_json_string(dump, json);
    }
    else if (Obj_Is_A(dump, INTNUM)) {
        CB_catf(json, "%i64", Obj_To_I64(dump));
    }
    else if (Obj_Is_A(dump, FLOATNUM)) {
        CB_catf(json, "%f64", Obj_To_F64(dump));
    }
    else if (Obj_Is_A(dump, VARRAY)) {
        VArray *array = (VArray*)dump;
        size_t size = VA_Get_Size(array);
        if (size == 0) {
            // Put empty array on single line.
            CB_Cat_Trusted_Str(json, "[]", 2);
            return true;
        }
        else if (size == 1) {
            Obj *elem = VA_Fetch(array, 0);
            if (!(Obj_Is_A(elem, HASH) || Obj_Is_A(elem, VARRAY))) {
                // Put array containing single scalar element on one line.
                CB_Cat_Trusted_Str(json, "[", 1);
                if (!S_to_json(elem, json, depth + 1)) {
                    return false;
                }
                CB_Cat_Trusted_Str(json, "]", 1);
                return true;
            }
        }
        // Fall back to spreading elements across multiple lines.
        CB_Cat_Trusted_Str(json, "[", 1);
        for (size_t i = 0; i < size; i++) {
            CB_Cat_Trusted_Str(json, "\n", 1);
            S_cat_whitespace(json, depth + 1);
            if (!S_to_json(VA_Fetch(array, i), json, depth + 1)) {
                return false;
            }
            if (i + 1 < size) {
                CB_Cat_Trusted_Str(json, ",", 1);
            }
        }
        CB_Cat_Trusted_Str(json, "\n", 1);
        S_cat_whitespace(json, depth);
        CB_Cat_Trusted_Str(json, "]", 1);
    }
    else if (Obj_Is_A(dump, HASH)) {
        Hash *hash = (Hash*)dump;
        size_t size = Hash_Get_Size(hash);

        // Put empty hash on single line.
        if (size == 0) {
            CB_Cat_Trusted_Str(json, "{}", 2);
            return true;
        }

        // Validate that all keys are strings, then sort.
        VArray *keys = Hash_Keys(hash);
        for (size_t i = 0; i < size; i++) {
            Obj *key = VA_Fetch(keys, i);
            if (!key || !Obj_Is_A(key, CHARBUF)) {
                DECREF(keys);
                CharBuf *key_class = key ? Obj_Get_Class_Name(key) : NULL;
                CharBuf *mess = MAKE_MESS("Illegal key type: %o", key_class);
                Err_set_error(Err_new(mess));
                return false;
            }
        }
        VA_Sort(keys, NULL, NULL);

        // Spread pairs across multiple lines.
        CB_Cat_Trusted_Str(json, "{", 1);
        for (size_t i = 0; i < size; i++) {
            Obj *key = VA_Fetch(keys, i);
            CB_Cat_Trusted_Str(json, "\n", 1);
            S_cat_whitespace(json, depth + 1);
            S_append_json_string(key, json);
            CB_Cat_Trusted_Str(json, ": ", 2);
            if (!S_to_json(Hash_Fetch(hash, key), json, depth + 1)) {
                DECREF(keys);
                return false;
            }
            if (i + 1 < size) {
                CB_Cat_Trusted_Str(json, ",", 1);
            }
        }
        CB_Cat_Trusted_Str(json, "\n", 1);
        S_cat_whitespace(json, depth);
        CB_Cat_Trusted_Str(json, "}", 1);

        DECREF(keys);
    }

    return true;
}
Beispiel #15
0
void
FilePurger_purge(FilePurger *self)
{
    Lock *deletion_lock = IxManager_Make_Deletion_Lock(self->manager);

    // Obtain deletion lock, purge files, release deletion lock.
    Lock_Clear_Stale(deletion_lock);
    if (Lock_Obtain(deletion_lock)) {
        Folder  *folder    = self->folder;
        Hash    *failures  = Hash_new(0);
        VArray  *purgables;
        VArray  *snapshots;

        S_discover_unused(self, &purgables, &snapshots);

        // Attempt to delete entries -- if failure, no big deal, just try
        // again later.  Proceed in reverse lexical order so that directories
        // get deleted after they've been emptied. 
        VA_Sort(purgables, NULL, NULL);
        for (uint32_t i = VA_Get_Size(purgables); i--; ) {
            CharBuf *entry = (CharBuf*)VA_fetch(purgables, i);
            if (Hash_Fetch(self->disallowed, (Obj*)entry)) { continue; }
            if (!Folder_Delete(folder, entry)) { 
                if (Folder_Exists(folder, entry)) {
                    Hash_Store(failures, (Obj*)entry, INCREF(&EMPTY));
                }
            }
        }

        for (uint32_t i = 0, max = VA_Get_Size(snapshots); i < max; i++) {
            Snapshot *snapshot = (Snapshot*)VA_Fetch(snapshots, i);
            bool_t snapshot_has_failures = false;
            if (Hash_Get_Size(failures)) {
                // Only delete snapshot files if all of their entries were
                // successfully deleted.  
                VArray *entries = Snapshot_List(snapshot);
                for (uint32_t j = VA_Get_Size(entries); j--; ) {
                    CharBuf *entry = (CharBuf*)VA_Fetch(entries, j);
                    if (Hash_Fetch(failures, (Obj*)entry)) {
                        snapshot_has_failures = true;
                        break;
                    }
                }
                DECREF(entries);
            }
            if (!snapshot_has_failures) {
                CharBuf *snapfile = Snapshot_Get_Path(snapshot);
                Folder_Delete(folder, snapfile);
            }
        }

        DECREF(failures);
        DECREF(purgables);
        DECREF(snapshots);
        Lock_Release(deletion_lock);
    }
    else {
        WARN("Can't obtain deletion lock, skipping deletion of "
            "obsolete files");
    }

    DECREF(deletion_lock);
}
Beispiel #16
0
uint32_t
Doc_Get_Size_IMP(Doc *self) {
    Hash *hash = (Hash*)Doc_IVARS(self)->fields;
    return Hash_Get_Size(hash);
}