struct _list * elf64_entries (const struct _buffer * buffer) { if (elf64_check(buffer)) return NULL; Elf64_Ehdr * ehdr = elf64_ehdr(buffer); if (ehdr == NULL) return NULL; struct _list * entries = list_create(); struct _index * index = index_create(ehdr->e_entry); list_append(entries, index); object_delete(index); size_t shdr_i; for (shdr_i = 0; shdr_i < ehdr->e_shnum; shdr_i++) { size_t sym_i = 0; Elf64_Sym * sym; while ((sym = elf64_sym(buffer, shdr_i, sym_i++)) != NULL) { if ( (ELF64_ST_TYPE(sym->st_info) == STT_FUNC) && (sym->st_value != 0)) { struct _index * index = index_create(sym->st_value); list_append(entries, index); object_delete(index); } } } return entries; }
static IndexWriter *create_iw(Store *store) { FieldInfos *fis = create_fis(); index_create(store, fis); fis_deref(fis); return iw_open(store, standard_analyzer_new(true), &default_config); }
Index *index_new(Store *store, Analyzer *analyzer, HashSet *def_fields, bool create) { Index *self = ALLOC_AND_ZERO(Index); HashSetEntry *hse; /* FIXME: need to add these to the query parser */ self->config = default_config; mutex_init(&self->mutex, NULL); self->has_writes = false; if (store) { REF(store); self->store = store; } else { self->store = open_ram_store(); create = true; } if (analyzer) { self->analyzer = analyzer; REF(analyzer); } else { self->analyzer = mb_standard_analyzer_new(true); } if (create) { FieldInfos *fis = fis_new(STORE_YES, INDEX_YES, TERM_VECTOR_WITH_POSITIONS_OFFSETS); index_create(self->store, fis); fis_deref(fis); } /* options */ self->key = NULL; self->id_field = intern("id"); self->def_field = intern("id"); self->auto_flush = false; self->check_latest = true; REF(self->analyzer); self->qp = qp_new(self->analyzer); for (hse = def_fields->first; hse; hse = hse->next) { qp_add_field(self->qp, (Symbol)hse->elem, true, true); } /* Index is a convenience class so set qp convenience options */ self->qp->allow_any_fields = true; self->qp->clean_str = true; self->qp->handle_parse_errors = true; return self; }
static void sort_multi_test_setup(Store *store1, Store *store2) { int i; FieldInfos *fis = fis_new(STORE_YES, INDEX_YES, TERM_VECTOR_YES); IndexWriter *iw; SortTestData data[] = { /* len mod */ {"findall","a","6","0.01"}, /* 4 0 */ {"findall","c","5","0.1"}, /* 3 3 */ {"findall","e","2","0.001"}, /* 5 1 */ {"findall","g","1","1.0"}, /* 3 3 */ {"findall","i","3","0.0001"}, /* 6 2 */ {"findall","", "4","10.0"}, /* 4 0 */ {"findall","h","5","0.00001"}, /* 7 3 */ {"findall","f","2","100.0"}, /* 5 1 */ {"findall","d","3","1000.0"}, /* 6 2 */ {"findall","b","4","0.000001"} /* 8 0 */ }; index_create(store1, fis); index_create(store2, fis); fis_deref(fis); iw = iw_open(store1, whitespace_analyzer_new(false), NULL); for (i = 0; i < NELEMS(data)/2; i++) { add_sort_test_data(&data[i], iw); } iw_close(iw); iw = iw_open(store2, whitespace_analyzer_new(false), NULL); for (i = NELEMS(data)/2; i < NELEMS(data); i++) { add_sort_test_data(&data[i], iw); } iw_close(iw); }
static void sort_test_setup(Store *store) { int i; IndexWriter *iw; FieldInfos *fis = fis_new(STORE_YES, INDEX_YES, TERM_VECTOR_YES); index_create(store, fis); fis_deref(fis); iw = iw_open(store, whitespace_analyzer_new(false), NULL); for (i = 0; i < NELEMS(data); i++) { add_sort_test_data(&data[i], iw); } iw_close(iw); }
static void test_index_for_algorithm(struct index_algorithm *alg, const char *name) { struct index *index; char *text; int ok; int nd; test_index_naive_callback_called = 0; index = index_create(alg, name); CU_ASSERT(index != NULL); ok = index_put_document(index,4,"This is a very long text."); CU_ASSERT(ok != 0); ok = index_put_document(index,12,"This is a short text."); CU_ASSERT(ok != 0); nd = index_find_documents(index,test_index_naive_callback,NULL,1,"very"); CU_ASSERT(test_index_naive_callback_called == 1); CU_ASSERT(nd == 1); ok = index_put_document(index,20,zauberlehrling); CU_ASSERT(ok != 0); test_index_contains_all_suffixes(index, "This is a very long text.", 4); test_index_contains_all_suffixes(index, zauberlehrling, 20); ok = index_remove_document(index,4); CU_ASSERT(ok != 0); nd = index_find_documents(index,test_index_naive_callback2,NULL,1,"very"); CU_ASSERT(nd == 0); text = read_file_contents("of-human-bondage.txt"); CU_ASSERT(text != NULL); ok = index_put_document(index,32,text); CU_ASSERT(ok != 0); test_index_contains_all_suffixes(index, zauberlehrling, 20); test_index_contains_all_suffixes(index, text, 32); index_dispose(index); free(text); }
void prepare_filter_index(Store *store) { int i; IndexWriter *iw; FieldInfos *fis = fis_new(STORE_YES, INDEX_YES, TERM_VECTOR_NO); num = intern("num"); date = intern("date"); flipflop = intern("flipflop"); struct FilterData data[FILTER_DOCS_SIZE] = { {"0", "20040601", "on"}, {"1", "20041001", "off"}, {"2", "20051101", "on"}, {"3", "20041201", "off"}, {"4", "20051101", "on"}, {"5", "20041201", "off"}, {"6", "20050101", "on"}, {"7", "20040701", "off"}, {"8", "20050301", "on"}, {"9", "20050401", "off"} }; index_create(store, fis); fis_deref(fis); iw = iw_open(store, whitespace_analyzer_new(false), NULL); for (i = 0; i < FILTER_DOCS_SIZE; i++) { Document *doc = doc_new(); doc->boost = (float)(i+1); doc_add_field(doc, df_add_data(df_new(num), data[i].num)); doc_add_field(doc, df_add_data(df_new(date), data[i].date)); doc_add_field(doc, df_add_data(df_new(flipflop), data[i].flipflop)); iw_add_doc(iw, doc); doc_destroy(doc); } iw_close(iw); return; }
static void write_index(const char *filename) { struct index_node *index; char *line; FILE *cfile; cfile = fopen(filename, "w"); if (!cfile) fatal("Could not open %s for writing: %s\n", filename, strerror(errno)); index = index_create(); while((line = getline_wrapped(stdin, NULL))) { index_insert(index, line); free(line); } index_write(index, cfile); index_destroy(index); fclose(cfile); }
void copy_index(Oid OIDOldIndex, Oid OIDNewHeap) { Relation OldIndex, NewHeap; HeapTuple Old_pg_index_Tuple, Old_pg_index_relation_Tuple, pg_proc_Tuple; IndexTupleForm Old_pg_index_Form; Form_pg_class Old_pg_index_relation_Form; Form_pg_proc pg_proc_Form; char *NewIndexName; AttrNumber *attnumP; int natts; FuncIndexInfo * finfo; NewHeap = heap_open(OIDNewHeap); OldIndex = index_open(OIDOldIndex); /* * OK. Create a new (temporary) index for the one that's already * here. To do this I get the info from pg_index, re-build the * FunctInfo if I have to, and add a new index with a temporary * name. */ Old_pg_index_Tuple = SearchSysCacheTuple(INDEXRELID, ObjectIdGetDatum(OldIndex->rd_id), 0,0,0); Assert(Old_pg_index_Tuple); Old_pg_index_Form = (IndexTupleForm)GETSTRUCT(Old_pg_index_Tuple); Old_pg_index_relation_Tuple = SearchSysCacheTuple(RELOID, ObjectIdGetDatum(OldIndex->rd_id), 0,0,0); Assert(Old_pg_index_relation_Tuple); Old_pg_index_relation_Form = (Form_pg_class)GETSTRUCT(Old_pg_index_relation_Tuple); NewIndexName = palloc(NAMEDATALEN+1); /* XXX */ sprintf(NewIndexName, "temp_%x", OIDOldIndex); /* Set the name. */ /* * Ugly as it is, the only way I have of working out the number of * attribues is to count them. Mostly there'll be just one but * I've got to be sure. */ for (attnumP = &(Old_pg_index_Form->indkey[0]), natts = 0; *attnumP != InvalidAttrNumber; attnumP++, natts++); /* * If this is a functional index, I need to rebuild the functional * component to pass it to the defining procedure. */ if (Old_pg_index_Form->indproc != InvalidOid) { FIgetnArgs(finfo) = natts; FIgetProcOid(finfo) = Old_pg_index_Form->indproc; pg_proc_Tuple = SearchSysCacheTuple(PROOID, ObjectIdGetDatum(Old_pg_index_Form->indproc), 0,0,0); Assert(pg_proc_Tuple); pg_proc_Form = (Form_pg_proc)GETSTRUCT(pg_proc_Tuple); namecpy(&(finfo->funcName), &(pg_proc_Form->proname)); } else { finfo = (FuncIndexInfo *) NULL; natts = 1; } index_create((NewHeap->rd_rel->relname).data, NewIndexName, finfo, Old_pg_index_relation_Form->relam, natts, Old_pg_index_Form->indkey, Old_pg_index_Form->indclass, (uint16)0, (Datum) NULL, NULL); heap_close(OldIndex); heap_close(NewHeap); }
static db_result_t aql_execute(db_handle_t *handle, aql_adt_t *adt) { uint8_t optype; int first_rel_arg; db_result_t result; relation_t *rel; aql_attribute_t *attr; attribute_t *relattr; optype = AQL_GET_TYPE(adt); if(optype == AQL_TYPE_NONE) { /* No-ops always succeed. These can be generated by empty lines or comments in the query language. */ return DB_OK; } /* If the ASSIGN flag is set, the first relation in the array is the desired result relation. */ first_rel_arg = !!(adt->flags & AQL_FLAG_ASSIGN); if(optype != AQL_TYPE_CREATE_RELATION && optype != AQL_TYPE_REMOVE_RELATION && optype != AQL_TYPE_JOIN) { rel = relation_load(adt->relations[first_rel_arg]); if(rel == NULL) { return DB_NAME_ERROR; } } else { rel = NULL; } result = DB_RELATIONAL_ERROR; switch(optype) { case AQL_TYPE_CREATE_ATTRIBUTE: attr = &adt->attributes[0]; if(relation_attribute_add(rel, DB_STORAGE, attr->name, attr->domain, attr->element_size) != NULL) { result = DB_OK; } break; case AQL_TYPE_CREATE_INDEX: relattr = relation_attribute_get(rel, adt->attributes[0].name); if(relattr == NULL) { result = DB_NAME_ERROR; break; } result = index_create(AQL_GET_INDEX_TYPE(adt), rel, relattr); break; case AQL_TYPE_CREATE_RELATION: if(relation_create(adt->relations[0], DB_STORAGE) != NULL) { result = DB_OK; } break; case AQL_TYPE_REMOVE_ATTRIBUTE: result = relation_attribute_remove(rel, adt->attributes[0].name); break; case AQL_TYPE_REMOVE_INDEX: relattr = relation_attribute_get(rel, adt->attributes[0].name); if(relattr != NULL) { if(relattr->index != NULL) { result = index_destroy(relattr->index); } else { result = DB_OK; } } else { result = DB_NAME_ERROR; } break; case AQL_TYPE_REMOVE_RELATION: result = relation_remove(adt->relations[0], 1); break; #if DB_FEATURE_REMOVE case AQL_TYPE_REMOVE_TUPLES: /* Overwrite the attribute array with a full copy of the original relation's attributes. */ adt->attribute_count = 0; for(relattr = list_head(rel->attributes); relattr != NULL; relattr = relattr->next) { AQL_ADD_ATTRIBUTE(adt, relattr->name, DOMAIN_UNSPECIFIED, 0); } AQL_SET_FLAG(adt, AQL_FLAG_INVERSE_LOGIC); #endif /* DB_FEATURE_REMOVE */ case AQL_TYPE_SELECT: if(handle == NULL) { result = DB_ARGUMENT_ERROR; break; } result = relation_select(handle, rel, adt); break; case AQL_TYPE_INSERT: result = relation_insert(rel, adt->values); break; #if DB_FEATURE_JOIN case AQL_TYPE_JOIN: if(handle == NULL) { result = DB_ARGUMENT_ERROR; break; } handle->left_rel = relation_load(adt->relations[first_rel_arg]); if(handle->left_rel == NULL) { break; } handle->right_rel = relation_load(adt->relations[first_rel_arg + 1]); if(handle->right_rel == NULL) { relation_release(handle->left_rel); break; } result = relation_join(handle, adt); break; #endif /* DB_FEATURE_JOIN */ default: break; } if(rel != NULL) { if(handle == NULL || !(handle->flags & DB_HANDLE_FLAG_PROCESSING)) { relation_release(rel); } } return result; }
/* * Create append-only auxiliary relations for target relation rel. * Returns true if they are newly created. If pg_appendonly has already * known those tables, don't create them and returns false. */ bool CreateAOAuxiliaryTable( Relation rel, const char *auxiliaryNamePrefix, char relkind, TupleDesc tupledesc, IndexInfo *indexInfo, Oid *classObjectId, int16 *coloptions) { char aoauxiliary_relname[NAMEDATALEN]; char aoauxiliary_idxname[NAMEDATALEN]; bool shared_relation; Oid relOid, aoauxiliary_relid = InvalidOid; Oid aoauxiliary_idxid = InvalidOid; ObjectAddress baseobject; ObjectAddress aoauxiliaryobject; Assert(RelationIsValid(rel)); Assert(RelationIsAoRows(rel) || RelationIsAoCols(rel)); Assert(auxiliaryNamePrefix); Assert(tupledesc); Assert(classObjectId); if (relkind != RELKIND_AOSEGMENTS) Assert(indexInfo); shared_relation = rel->rd_rel->relisshared; /* * We cannot allow creating an auxiliary table for a shared relation * after initdb (because there's no way to let other databases know * this visibility map. */ if (shared_relation && !IsBootstrapProcessingMode()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("shared tables cannot have append-only auxiliary relations after initdb"))); relOid = RelationGetRelid(rel); switch(relkind) { case RELKIND_AOVISIMAP: GetAppendOnlyEntryAuxOids(relOid, SnapshotNow, NULL, NULL, NULL, &aoauxiliary_relid, &aoauxiliary_idxid); break; case RELKIND_AOBLOCKDIR: GetAppendOnlyEntryAuxOids(relOid, SnapshotNow, NULL, &aoauxiliary_relid, &aoauxiliary_idxid, NULL, NULL); break; case RELKIND_AOSEGMENTS: GetAppendOnlyEntryAuxOids(relOid, SnapshotNow, &aoauxiliary_relid, NULL, NULL, NULL, NULL); break; default: elog(ERROR, "unsupported auxiliary relkind '%c'", relkind); } /* * Does it have the auxiliary relation? */ if (OidIsValid(aoauxiliary_relid)) { return false; } snprintf(aoauxiliary_relname, sizeof(aoauxiliary_relname), "%s_%u", auxiliaryNamePrefix, relOid); snprintf(aoauxiliary_idxname, sizeof(aoauxiliary_idxname), "%s_%u_index", auxiliaryNamePrefix, relOid); /* * We place auxiliary relation in the pg_aoseg namespace * even if its master relation is a temp table. There cannot be * any naming collision, and the auxiliary relation will be * destroyed when its master is, so there is no need to handle * the aovisimap relation as temp. */ aoauxiliary_relid = heap_create_with_catalog(aoauxiliary_relname, PG_AOSEGMENT_NAMESPACE, rel->rd_rel->reltablespace, InvalidOid, rel->rd_rel->relowner, tupledesc, /* relam */ InvalidOid, relkind, RELSTORAGE_HEAP, shared_relation, true, /* bufferPoolBulkLoad */ false, 0, ONCOMMIT_NOOP, NULL, /* GP Policy */ (Datum) 0, true, /* valid_opts */ false, /* persistentTid */ NULL, /* persistentSerialNum */ NULL); /* Make this table visible, else index creation will fail */ CommandCounterIncrement(); /* Create an index on AO auxiliary tables (like visimap) except for pg_aoseg table */ if (relkind != RELKIND_AOSEGMENTS) { aoauxiliary_idxid = index_create(aoauxiliary_relid, aoauxiliary_idxname, InvalidOid, indexInfo, BTREE_AM_OID, rel->rd_rel->reltablespace, classObjectId, coloptions, (Datum) 0, true, false, true, false, false, NULL); /* Unlock target table -- no one can see it */ UnlockRelationOid(aoauxiliary_relid, ShareLock); /* Unlock the index -- no one can see it anyway */ UnlockRelationOid(aoauxiliary_idxid, AccessExclusiveLock); } /* * Store the auxiliary table's OID in the parent relation's pg_appendonly row. * TODO (How to generalize this?) */ switch (relkind) { case RELKIND_AOVISIMAP: UpdateAppendOnlyEntryAuxOids(relOid, InvalidOid, InvalidOid, InvalidOid, aoauxiliary_relid, aoauxiliary_idxid); break; case RELKIND_AOBLOCKDIR: UpdateAppendOnlyEntryAuxOids(relOid, InvalidOid, aoauxiliary_relid, aoauxiliary_idxid, InvalidOid, InvalidOid); break; case RELKIND_AOSEGMENTS: UpdateAppendOnlyEntryAuxOids(relOid, aoauxiliary_relid, InvalidOid, InvalidOid, InvalidOid, InvalidOid); break; default: elog(ERROR, "unsupported auxiliary relkind '%c'", relkind); } /* * Register dependency from the auxiliary table to the master, so that the * aoseg table will be deleted if the master is. */ baseobject.classId = RelationRelationId; baseobject.objectId = relOid; baseobject.objectSubId = 0; aoauxiliaryobject.classId = RelationRelationId; aoauxiliaryobject.objectId = aoauxiliary_relid; aoauxiliaryobject.objectSubId = 0; recordDependencyOn(&aoauxiliaryobject, &baseobject, DEPENDENCY_INTERNAL); /* * Make changes visible */ CommandCounterIncrement(); return true; }
void _bitmap_create_lov_heapandindex(Relation rel, Oid lovComptypeOid, Oid *lovHeapOid, Oid *lovIndexOid, Oid lovHeapRelfilenode, Oid lovIndexRelfilenode) { char lovHeapName[NAMEDATALEN]; char lovIndexName[NAMEDATALEN]; TupleDesc tupDesc; IndexInfo *indexInfo; ObjectAddress objAddr, referenced; Oid *classObjectId; int16 *coloptions; Oid heapid; Oid idxid; int indattrs; int i; Oid unusedArrayOid = InvalidOid; Assert(rel != NULL); /* create the new names for the new lov heap and index */ snprintf(lovHeapName, sizeof(lovHeapName), "pg_bm_%u", RelationGetRelid(rel)); snprintf(lovIndexName, sizeof(lovIndexName), "pg_bm_%u_index", RelationGetRelid(rel)); heapid = get_relname_relid(lovHeapName, PG_BITMAPINDEX_NAMESPACE); /* * If heapid exists, then this is happening during re-indexing. * We allocate new relfilenodes for lov heap and lov index. * * XXX Each segment db may have different relfilenodes for lov heap and * lov index, which should not be an issue now. Ideally, we would like each * segment db use the same oids. */ if (OidIsValid(heapid)) { Relation lovHeap; Relation lovIndex; Buffer btree_metabuf; Page btree_metapage; *lovHeapOid = heapid; idxid = get_relname_relid(lovIndexName, PG_BITMAPINDEX_NAMESPACE); Assert(OidIsValid(idxid)); *lovIndexOid = idxid; lovComptypeOid = get_rel_type_id(heapid); Assert(OidIsValid(lovComptypeOid)); lovHeap = heap_open(heapid, AccessExclusiveLock); lovIndex = index_open(idxid, AccessExclusiveLock); if (OidIsValid(lovHeapRelfilenode)) setNewRelfilenodeToOid(lovHeap, lovHeapRelfilenode); else setNewRelfilenode(lovHeap); if (OidIsValid(lovIndexRelfilenode)) setNewRelfilenodeToOid(lovIndex, lovIndexRelfilenode); else setNewRelfilenode(lovIndex); /* * After creating the new relfilenode for a btee index, this is not * a btree anymore. We create the new metapage for this btree. */ btree_metabuf = _bt_getbuf(lovIndex, P_NEW, BT_WRITE); Assert (BTREE_METAPAGE == BufferGetBlockNumber(btree_metabuf)); btree_metapage = BufferGetPage(btree_metabuf); _bt_initmetapage(btree_metapage, P_NONE, 0); /* XLOG the metapage */ if (!XLog_UnconvertedCanBypassWal() && !lovIndex->rd_istemp) { // Fetch gp_persistent_relation_node information that will be added to XLOG record. RelationFetchGpRelationNodeForXLog(lovIndex); _bt_lognewpage(lovIndex, btree_metapage, BufferGetBlockNumber(btree_metabuf)); } /* This cache value is not valid anymore. */ if (lovIndex->rd_amcache) { pfree(lovIndex->rd_amcache); lovIndex->rd_amcache = NULL; } MarkBufferDirty(btree_metabuf); _bt_relbuf(lovIndex, btree_metabuf); index_close(lovIndex, NoLock); heap_close(lovHeap, NoLock); return; } /* * create a new empty heap to store all attribute values with their * corresponding block number and offset in LOV. */ tupDesc = _bitmap_create_lov_heapTupleDesc(rel); Assert(rel->rd_rel != NULL); heapid = heap_create_with_catalog(lovHeapName, PG_BITMAPINDEX_NAMESPACE, rel->rd_rel->reltablespace, *lovHeapOid, rel->rd_rel->relowner, tupDesc, /* relam */ InvalidOid, RELKIND_RELATION, RELSTORAGE_HEAP, rel->rd_rel->relisshared, false, /* bufferPoolBulkLoad */ false, 0, ONCOMMIT_NOOP, NULL /* GP Policy */, (Datum)0, true, /* valid_opts */ true, &lovComptypeOid, &unusedArrayOid, /* persistentTid */ NULL, /* persistentSerialNum */ NULL); Assert(heapid == *lovHeapOid); /* * We must bump the command counter to make the newly-created relation * tuple visible for opening. */ CommandCounterIncrement(); objAddr.classId = RelationRelationId; objAddr.objectId = *lovHeapOid; objAddr.objectSubId = 0 ; referenced.classId = RelationRelationId; referenced.objectId = RelationGetRelid(rel); referenced.objectSubId = 0; recordDependencyOn(&objAddr, &referenced, DEPENDENCY_INTERNAL); /* * create a btree index on the newly-created heap. * The key includes all attributes to be indexed in this bitmap index. */ indattrs = tupDesc->natts - 2; indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = indattrs; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = make_ands_implicit(NULL); indexInfo->ii_PredicateState = NIL; indexInfo->ii_Unique = true; indexInfo->opaque = NULL; classObjectId = (Oid *) palloc(indattrs * sizeof(Oid)); coloptions = (int16 *) palloc(indattrs * sizeof(int16)); for (i = 0; i < indattrs; i++) { Oid typid = tupDesc->attrs[i]->atttypid; indexInfo->ii_KeyAttrNumbers[i] = i + 1; classObjectId[i] = GetDefaultOpClass(typid, BTREE_AM_OID); coloptions[i] = 0; } idxid = index_create(*lovHeapOid, lovIndexName, *lovIndexOid, indexInfo, BTREE_AM_OID, rel->rd_rel->reltablespace, classObjectId, coloptions, 0, false, false, (Oid *) NULL, true, false, false, NULL); Assert(idxid == *lovIndexOid); }
/* * create_aoblkdir_table * * rel is already opened and exclusive-locked. * comptypeOid is InvalidOid. */ static bool create_aoblkdir_table(Relation rel, Oid aoblkdirOid, Oid aoblkdirIndexOid, Oid *comptypeOid) { Oid relOid = RelationGetRelid(rel); Oid aoblkdir_relid; Oid aoblkdir_idxid; bool shared_relation = rel->rd_rel->relisshared; char aoblkdir_relname[NAMEDATALEN]; char aoblkdir_idxname[NAMEDATALEN]; TupleDesc tupdesc; IndexInfo *indexInfo; Oid classObjectId[3]; ObjectAddress baseobject; ObjectAddress aoblkdirobject; Oid tablespaceOid = ChooseTablespaceForLimitedObject(rel->rd_rel->reltablespace); if (!RelationIsAoRows(rel)) return false; /* * We cannot allow creating a block directory for a shared relation * after initdb (because there's no way to let other databases know * this block directory. */ if (shared_relation && !IsBootstrapProcessingMode()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("shared tables cannot have block directory after initdb"))); GetAppendOnlyEntryAuxOids(relOid, SnapshotNow, NULL,NULL, &aoblkdir_relid, &aoblkdir_idxid); /* * Does it have a block directory? */ if (aoblkdir_relid != InvalidOid) { return false; } snprintf(aoblkdir_relname, sizeof(aoblkdir_relname), "pg_aoblkdir_%u", relOid); snprintf(aoblkdir_idxname, sizeof(aoblkdir_idxname), "pg_aoblkdir_%u_index", relOid); /* Create a tuple descriptor */ tupdesc = CreateTemplateTupleDesc(4, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "segno", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "columngroup_no", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "first_row_no", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 4, "minipage", VARBITOID, -1, 0); /* * We don't want any toast columns here. */ tupdesc->attrs[0]->attstorage = 'p'; tupdesc->attrs[1]->attstorage = 'p'; tupdesc->attrs[2]->attstorage = 'p'; tupdesc->attrs[2]->attstorage = 'p'; /* * We place aoblkdir relation in the pg_aoseg namespace * even if its master relation is a temp table. There cannot be * any naming collision, and the aoblkdir relation will be * destroyed when its master is, so there is no need to handle * the aoblkdir relation as temp. */ aoblkdir_relid = heap_create_with_catalog(aoblkdir_relname, PG_AOSEGMENT_NAMESPACE, tablespaceOid, aoblkdirOid, rel->rd_rel->relowner, tupdesc, /* relam */ InvalidOid, RELKIND_AOBLOCKDIR, RELSTORAGE_HEAP, shared_relation, true, /* bufferPoolBulkLoad */ false, 0, ONCOMMIT_NOOP, NULL, /* GP Policy */ (Datum) 0, true, comptypeOid, /* persistentTid */ NULL, /* persistentSerialNum */ NULL); /* Make this table visible, else index creation will fail */ CommandCounterIncrement(); /* * Create index on segno, first_row_no. */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = 3; indexInfo->ii_KeyAttrNumbers[0] = 1; indexInfo->ii_KeyAttrNumbers[1] = 2; indexInfo->ii_KeyAttrNumbers[2] = 3; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = NIL; indexInfo->ii_PredicateState = NIL; indexInfo->ii_Unique = false; indexInfo->ii_Concurrent = false; classObjectId[0] = INT4_BTREE_OPS_OID; classObjectId[1] = INT4_BTREE_OPS_OID; classObjectId[2] = INT8_BTREE_OPS_OID; aoblkdir_idxid = index_create(aoblkdirOid, aoblkdir_idxname, aoblkdirIndexOid, indexInfo, BTREE_AM_OID, tablespaceOid, classObjectId, (Datum) 0, true, false, (Oid *) NULL, true, false, false, NULL); /* Unlock target table -- no one can see it */ UnlockRelationOid(aoblkdirOid, ShareLock); /* Unlock the index -- no one can see it anyway */ UnlockRelationOid(aoblkdirIndexOid, AccessExclusiveLock); /* * Store the aoblkdir table's OID in the parent relation's pg_appendonly row. */ UpdateAppendOnlyEntryAuxOids(relOid, InvalidOid, InvalidOid, aoblkdir_relid, aoblkdir_idxid); /* * Register dependency from the aoseg table to the master, so that the * aoseg table will be deleted if the master is. */ baseobject.classId = RelationRelationId; baseobject.objectId = relOid; baseobject.objectSubId = 0; aoblkdirobject.classId = RelationRelationId; aoblkdirobject.objectId = aoblkdirOid; aoblkdirobject.objectSubId = 0; recordDependencyOn(&aoblkdirobject, &baseobject, DEPENDENCY_INTERNAL); /* * Make changes visible */ CommandCounterIncrement(); return true; }
void x8664_functions_r (struct _map * functions, struct _tree * disassembled, uint64_t address, struct _map * memory) { ud_t ud_obj; int continue_disassembling = 1; struct _buffer * buffer = map_fetch_max(memory, address); if (buffer == NULL) return; uint64_t base_address = map_fetch_max_key(memory, address); if (base_address + buffer->size < address) return; uint64_t offset = address - base_address; ud_init (&ud_obj); ud_set_mode (&ud_obj, 64); ud_set_syntax(&ud_obj, UD_SYN_INTEL); ud_set_input_buffer(&ud_obj, &(buffer->bytes[offset]), buffer->size - offset); while (continue_disassembling == 1) { size_t bytes_disassembled = ud_disassemble(&ud_obj); if (bytes_disassembled == 0) { break; } if ( (ud_obj.mnemonic == UD_Icall) && (ud_obj.operand[0].type == UD_OP_JIMM)) { uint64_t target_addr = address + ud_insn_len(&ud_obj) + udis86_sign_extend_lval(&(ud_obj.operand[0])); if (map_fetch(functions, target_addr) == NULL) { struct _function * function = function_create(target_addr); map_insert(functions, target_addr, function); object_delete(function); } } struct _index * index = index_create(address); if (tree_fetch(disassembled, index) != NULL) { object_delete(index); return; } tree_insert(disassembled, index); object_delete(index); // these mnemonics cause us to continue disassembly somewhere else struct ud_operand * operand; switch (ud_obj.mnemonic) { case UD_Ijo : case UD_Ijno : case UD_Ijb : case UD_Ijae : case UD_Ijz : case UD_Ijnz : case UD_Ijbe : case UD_Ija : case UD_Ijs : case UD_Ijns : case UD_Ijp : case UD_Ijnp : case UD_Ijl : case UD_Ijge : case UD_Ijle : case UD_Ijg : case UD_Ijmp : case UD_Iloop : case UD_Icall : operand = &(ud_obj.operand[0]); if (operand->type == UD_OP_JIMM) { x8664_functions_r(functions, disassembled, address + ud_insn_len(&ud_obj) + udis86_sign_extend_lval(operand), memory); } break; default : break; } // these mnemonics cause disassembly to stop switch (ud_obj.mnemonic) { case UD_Iret : case UD_Ihlt : case UD_Ijmp : continue_disassembling = 0; break; default : break; } address += bytes_disassembled; } }
/* * create_toast_table --- internal workhorse * * rel is already opened and locked * toastOid and toastIndexOid are normally InvalidOid, but during * bootstrap they can be nonzero to specify hand-assigned OIDs */ static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptions, LOCKMODE lockmode, bool check) { Oid relOid = RelationGetRelid(rel); HeapTuple reltup; TupleDesc tupdesc; bool shared_relation; bool mapped_relation; Relation toast_rel; Relation class_rel; Oid toast_relid; Oid toast_typid = InvalidOid; Oid namespaceid; char toast_relname[NAMEDATALEN]; char toast_idxname[NAMEDATALEN]; IndexInfo *indexInfo; Oid collationObjectId[2]; Oid classObjectId[2]; int16 coloptions[2]; ObjectAddress baseobject, toastobject; /* * Toast table is shared if and only if its parent is. * * We cannot allow toasting a shared relation after initdb (because * there's no way to mark it toasted in other databases' pg_class). */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && !IsBootstrapProcessingMode()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("shared tables cannot be toasted after initdb"))); /* It's mapped if and only if its parent is, too */ mapped_relation = RelationIsMapped(rel); /* * Is it already toasted? */ if (rel->rd_rel->reltoastrelid != InvalidOid) return false; if (!IsBinaryUpgrade) { if (!needs_toast_table(rel)) return false; } else { /* * Check to see whether the table needs a TOAST table. * * If an update-in-place TOAST relfilenode is specified, force TOAST file * creation even if it seems not to need one. This handles the case * where the old cluster needed a TOAST table but the new cluster * would not normally create one. */ /* * If a TOAST oid is not specified, skip TOAST creation as we will do * it later so we don't create a TOAST table whose OID later conflicts * with a user-supplied OID. This handles cases where the old cluster * didn't need a TOAST table, but the new cluster does. */ if (!OidIsValid(binary_upgrade_next_toast_pg_class_oid)) return false; /* * If a special TOAST value has been passed in, it means we are in * cleanup mode --- we are creating needed TOAST tables after all user * tables with specified OIDs have been created. We let the system * assign a TOAST oid for us. The tables are empty so the missing * TOAST tables were not a problem. */ if (binary_upgrade_next_toast_pg_class_oid == OPTIONALLY_CREATE_TOAST_OID) { /* clear as it is not to be used; it is just a flag */ binary_upgrade_next_toast_pg_class_oid = InvalidOid; if (!needs_toast_table(rel)) return false; } /* both should be set, or not set */ Assert(OidIsValid(binary_upgrade_next_toast_pg_class_oid) == OidIsValid(binary_upgrade_next_toast_pg_type_oid)); } /* * If requested check lockmode is sufficient. This is a cross check in * case of errors or conflicting decisions in earlier code. */ if (check && lockmode != AccessExclusiveLock) elog(ERROR, "AccessExclusiveLock required to add toast table."); /* * Create the toast table and its index */ snprintf(toast_relname, sizeof(toast_relname), "pg_toast_%u", relOid); snprintf(toast_idxname, sizeof(toast_idxname), "pg_toast_%u_index", relOid); /* this is pretty painful... need a tuple descriptor */ tupdesc = CreateTemplateTupleDesc(3, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "chunk_id", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "chunk_seq", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "chunk_data", BYTEAOID, -1, 0); /* * Ensure that the toast table doesn't itself get toasted, or we'll be * toast :-(. This is essential for chunk_data because type bytea is * toastable; hit the other two just to be sure. */ tupdesc->attrs[0]->attstorage = 'p'; tupdesc->attrs[1]->attstorage = 'p'; tupdesc->attrs[2]->attstorage = 'p'; /* * Toast tables for regular relations go in pg_toast; those for temp * relations go into the per-backend temp-toast-table namespace. */ if (isTempOrToastNamespace(rel->rd_rel->relnamespace)) namespaceid = GetTempToastNamespace(); else namespaceid = PG_TOAST_NAMESPACE; /* Use binary-upgrade override for pg_type.oid, if supplied. */ if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid)) { toast_typid = binary_upgrade_next_toast_pg_type_oid; binary_upgrade_next_toast_pg_type_oid = InvalidOid; } toast_relid = heap_create_with_catalog(toast_relname, namespaceid, rel->rd_rel->reltablespace, toastOid, toast_typid, InvalidOid, rel->rd_rel->relowner, tupdesc, NIL, RELKIND_TOASTVALUE, rel->rd_rel->relpersistence, shared_relation, mapped_relation, true, 0, ONCOMMIT_NOOP, reloptions, false, true, true); Assert(toast_relid != InvalidOid); /* make the toast relation visible, else heap_open will fail */ CommandCounterIncrement(); /* ShareLock is not really needed here, but take it anyway */ toast_rel = heap_open(toast_relid, ShareLock); /* * Create unique index on chunk_id, chunk_seq. * * NOTE: the normal TOAST access routines could actually function with a * single-column index on chunk_id only. However, the slice access * routines use both columns for faster access to an individual chunk. In * addition, we want it to be unique as a check against the possibility of * duplicate TOAST chunk OIDs. The index might also be a little more * efficient this way, since btree isn't all that happy with large numbers * of equal keys. */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = 2; indexInfo->ii_KeyAttrNumbers[0] = 1; indexInfo->ii_KeyAttrNumbers[1] = 2; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = NIL; indexInfo->ii_PredicateState = NIL; indexInfo->ii_ExclusionOps = NULL; indexInfo->ii_ExclusionProcs = NULL; indexInfo->ii_ExclusionStrats = NULL; indexInfo->ii_Unique = true; indexInfo->ii_ReadyForInserts = true; indexInfo->ii_Concurrent = false; indexInfo->ii_BrokenHotChain = false; collationObjectId[0] = InvalidOid; collationObjectId[1] = InvalidOid; classObjectId[0] = OID_BTREE_OPS_OID; classObjectId[1] = INT4_BTREE_OPS_OID; coloptions[0] = 0; coloptions[1] = 0; index_create(toast_rel, toast_idxname, toastIndexOid, InvalidOid, indexInfo, list_make2("chunk_id", "chunk_seq"), BTREE_AM_OID, rel->rd_rel->reltablespace, collationObjectId, classObjectId, coloptions, (Datum) 0, true, false, false, false, true, false, false, true); heap_close(toast_rel, NoLock); /* * Store the toast table's OID in the parent relation's pg_class row */ class_rel = heap_open(RelationRelationId, RowExclusiveLock); reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(reltup)) elog(ERROR, "cache lookup failed for relation %u", relOid); ((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid; if (!IsBootstrapProcessingMode()) { /* normal case, use a transactional update */ simple_heap_update(class_rel, &reltup->t_self, reltup); /* Keep catalog indexes current */ CatalogUpdateIndexes(class_rel, reltup); } else { /* While bootstrapping, we cannot UPDATE, so overwrite in-place */ heap_inplace_update(class_rel, reltup); } heap_freetuple(reltup); heap_close(class_rel, RowExclusiveLock); /* * Register dependency from the toast table to the master, so that the * toast table will be deleted if the master is. Skip this in bootstrap * mode. */ if (!IsBootstrapProcessingMode()) { baseobject.classId = RelationRelationId; baseobject.objectId = relOid; baseobject.objectSubId = 0; toastobject.classId = RelationRelationId; toastobject.objectId = toast_relid; toastobject.objectSubId = 0; recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL); } /* * Make changes visible */ CommandCounterIncrement(); return true; }
/* * create_toast_table --- internal workhorse * * rel is already opened and exclusive-locked * toastOid and toastIndexOid are normally InvalidOid, but during * bootstrap they can be nonzero to specify hand-assigned OIDs */ static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, bool is_part_child) { Oid relOid = RelationGetRelid(rel); HeapTuple reltup; TupleDesc tupdesc; bool shared_relation; Relation class_rel; Oid toast_relid; Oid toast_idxid; Oid namespaceid; char toast_relname[NAMEDATALEN]; char toast_idxname[NAMEDATALEN]; IndexInfo *indexInfo; Oid classObjectId[2]; int16 coloptions[2]; ObjectAddress baseobject, toastobject; /* * Is it already toasted? */ if (rel->rd_rel->reltoastrelid != InvalidOid) return false; /* * Check to see whether the table actually needs a TOAST table. */ if (!RelationNeedsToastTable(rel)) return false; /* * Toast table is shared if and only if its parent is. * * We cannot allow toasting a shared relation after initdb (because * there's no way to mark it toasted in other databases' pg_class). */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && !IsBootstrapProcessingMode()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("shared tables cannot be toasted after initdb"))); /* * Create the toast table and its index */ snprintf(toast_relname, sizeof(toast_relname), "pg_toast_%u", relOid); snprintf(toast_idxname, sizeof(toast_idxname), "pg_toast_%u_index", relOid); /* this is pretty painful... need a tuple descriptor */ tupdesc = CreateTemplateTupleDesc(3, false); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "chunk_id", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "chunk_seq", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "chunk_data", BYTEAOID, -1, 0); /* * Ensure that the toast table doesn't itself get toasted, or we'll be * toast :-(. This is essential for chunk_data because type bytea is * toastable; hit the other two just to be sure. */ tupdesc->attrs[0]->attstorage = 'p'; tupdesc->attrs[1]->attstorage = 'p'; tupdesc->attrs[2]->attstorage = 'p'; /* * Toast tables for regular relations go in pg_toast; those for temp * relations go into the per-backend temp-toast-table namespace. */ if (rel->rd_istemp) namespaceid = GetTempToastNamespace(); else namespaceid = PG_TOAST_NAMESPACE; /* * XXX would it make sense to apply the master's reloptions to the toast * table? Or maybe some toast-specific reloptions? */ toast_relid = heap_create_with_catalog(toast_relname, namespaceid, rel->rd_rel->reltablespace, toastOid, rel->rd_rel->relowner, tupdesc, /* relam */ InvalidOid, RELKIND_TOASTVALUE, RELSTORAGE_HEAP, shared_relation, true, /* bufferPoolBulkLoad */ false, 0, ONCOMMIT_NOOP, NULL, /* CDB POLICY */ (Datum) 0, true, /* valid_opts */ false, /* persistentTid */ NULL, /* persistentSerialNum */ NULL); /* make the toast relation visible, else index creation will fail */ CommandCounterIncrement(); /* * Create unique index on chunk_id, chunk_seq. * * NOTE: the normal TOAST access routines could actually function with a * single-column index on chunk_id only. However, the slice access * routines use both columns for faster access to an individual chunk. In * addition, we want it to be unique as a check against the possibility of * duplicate TOAST chunk OIDs. The index might also be a little more * efficient this way, since btree isn't all that happy with large numbers * of equal keys. */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = 2; indexInfo->ii_KeyAttrNumbers[0] = 1; indexInfo->ii_KeyAttrNumbers[1] = 2; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = NIL; indexInfo->ii_PredicateState = NIL; indexInfo->ii_Unique = true; indexInfo->ii_ReadyForInserts = true; indexInfo->ii_Concurrent = false; indexInfo->ii_BrokenHotChain = false; classObjectId[0] = OID_BTREE_OPS_OID; classObjectId[1] = INT4_BTREE_OPS_OID; coloptions[0] = 0; coloptions[1] = 0; toast_idxid = index_create(toast_relid, toast_idxname, toastIndexOid, indexInfo, BTREE_AM_OID, rel->rd_rel->reltablespace, classObjectId, coloptions, (Datum) 0, true, false, true, false, false, NULL); /* * If this is a partitioned child, we can unlock since the master is * already locked. */ if (is_part_child) { UnlockRelationOid(toast_relid, ShareLock); UnlockRelationOid(toast_idxid, AccessExclusiveLock); } /* * Store the toast table's OID in the parent relation's pg_class row */ class_rel = heap_open(RelationRelationId, RowExclusiveLock); reltup = SearchSysCacheCopy(RELOID, ObjectIdGetDatum(relOid), 0, 0, 0); if (!HeapTupleIsValid(reltup)) elog(ERROR, "cache lookup failed for relation %u", relOid); ((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid; if (!IsBootstrapProcessingMode()) { /* normal case, use a transactional update */ simple_heap_update(class_rel, &reltup->t_self, reltup); /* Keep catalog indexes current */ CatalogUpdateIndexes(class_rel, reltup); } else { /* While bootstrapping, we cannot UPDATE, so overwrite in-place */ heap_inplace_update(class_rel, reltup); } heap_freetuple(reltup); heap_close(class_rel, RowExclusiveLock); /* * Register dependency from the toast table to the master, so that the * toast table will be deleted if the master is. Skip this in bootstrap * mode. */ if (!IsBootstrapProcessingMode()) { baseobject.classId = RelationRelationId; baseobject.objectId = relOid; baseobject.objectSubId = 0; toastobject.classId = RelationRelationId; toastobject.objectId = toast_relid; toastobject.objectSubId = 0; recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL); } /* * Make changes visible */ CommandCounterIncrement(); return true; }
struct _graph * recursive_disassemble (const struct _map * mem_map, uint64_t entry, struct _ins * (* ins_callback) (const struct _map *, uint64_t)) { struct _queue * queue = queue_create(); struct _map * map = map_create(); struct _index * index = index_create(entry); queue_push(queue, index); object_delete(index); while (queue->size > 0) { struct _index * index = queue_peek(queue); if (map_fetch(map, index->index)) { queue_pop(queue); continue; } struct _ins * ins = ins_callback(mem_map, index->index); if (ins == NULL) { queue_pop(queue); continue; } map_insert(map, index->index, ins); struct _list_it * lit; for (lit = list_iterator(ins->successors); lit != NULL; lit = lit->next) { struct _ins_value * successor = lit->data; if (successor->type == INS_SUC_CALL) continue; struct _index * index = index_create(successor->address); queue_push(queue, index); object_delete(index); } queue_pop(queue); } object_delete(queue); // create graph nodes struct _graph * graph = graph_create(); struct _map_it * mit; for (mit = map_iterator(map); mit != NULL; mit = map_it_next(mit)) { graph_add_node(graph, map_it_key(mit), map_it_data(mit)); } // create graph edges for (mit = map_iterator(map); mit != NULL; mit = map_it_next(mit)) { struct _ins * ins = map_it_data(mit); struct _list_it * lit; for (lit = list_iterator(ins->successors); lit != NULL; lit = lit->next) { struct _ins_value * successor = lit->data; // don't add call edges if (successor->type == INS_SUC_CALL) continue; graph_add_edge(graph, ins->address, successor->address, successor); } } object_delete(map); return graph; }
/* * create_toast_table --- internal workhorse * * rel is already opened and locked * toastOid and toastIndexOid are normally InvalidOid, but during * bootstrap they can be nonzero to specify hand-assigned OIDs */ static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptions, LOCKMODE lockmode, bool check) { Oid relOid = RelationGetRelid(rel); HeapTuple reltup; TupleDesc tupdesc; bool shared_relation; bool mapped_relation; Relation toast_rel; Relation class_rel; Oid toast_relid; Oid toast_typid = InvalidOid; Oid namespaceid; char toast_relname[NAMEDATALEN]; char toast_idxname[NAMEDATALEN]; IndexInfo *indexInfo; Oid collationObjectId[2]; Oid classObjectId[2]; int16 coloptions[2]; ObjectAddress baseobject, toastobject; /* * Toast table is shared if and only if its parent is. * * We cannot allow toasting a shared relation after initdb (because * there's no way to mark it toasted in other databases' pg_class). */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && !IsBootstrapProcessingMode()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("shared tables cannot be toasted after initdb"))); /* It's mapped if and only if its parent is, too */ mapped_relation = RelationIsMapped(rel); /* * Is it already toasted? */ if (rel->rd_rel->reltoastrelid != InvalidOid) return false; /* * Check to see whether the table actually needs a TOAST table. */ if (!IsBinaryUpgrade) { /* Normal mode, normal check */ if (!needs_toast_table(rel)) return false; } else { /* * In binary-upgrade mode, create a TOAST table if and only if * pg_upgrade told us to (ie, a TOAST table OID has been provided). * * This indicates that the old cluster had a TOAST table for the * current table. We must create a TOAST table to receive the old * TOAST file, even if the table seems not to need one. * * Contrariwise, if the old cluster did not have a TOAST table, we * should be able to get along without one even if the new version's * needs_toast_table rules suggest we should have one. There is a lot * of daylight between where we will create a TOAST table and where * one is really necessary to avoid failures, so small cross-version * differences in the when-to-create heuristic shouldn't be a problem. * If we tried to create a TOAST table anyway, we would have the * problem that it might take up an OID that will conflict with some * old-cluster table we haven't seen yet. */ if (!OidIsValid(binary_upgrade_next_toast_pg_class_oid) || !OidIsValid(binary_upgrade_next_toast_pg_type_oid)) return false; } /* * If requested check lockmode is sufficient. This is a cross check in * case of errors or conflicting decisions in earlier code. */ if (check && lockmode != AccessExclusiveLock) elog(ERROR, "AccessExclusiveLock required to add toast table."); /* * Create the toast table and its index */ snprintf(toast_relname, sizeof(toast_relname), "pg_toast_%u", relOid); snprintf(toast_idxname, sizeof(toast_idxname), "pg_toast_%u_index", relOid); /* this is pretty painful... need a tuple descriptor */ tupdesc = CreateTemplateTupleDesc(3); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "chunk_id", OIDOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "chunk_seq", INT4OID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 3, "chunk_data", BYTEAOID, -1, 0); /* * Ensure that the toast table doesn't itself get toasted, or we'll be * toast :-(. This is essential for chunk_data because type bytea is * toastable; hit the other two just to be sure. */ TupleDescAttr(tupdesc, 0)->attstorage = 'p'; TupleDescAttr(tupdesc, 1)->attstorage = 'p'; TupleDescAttr(tupdesc, 2)->attstorage = 'p'; /* * Toast tables for regular relations go in pg_toast; those for temp * relations go into the per-backend temp-toast-table namespace. */ if (isTempOrTempToastNamespace(rel->rd_rel->relnamespace)) namespaceid = GetTempToastNamespace(); else namespaceid = PG_TOAST_NAMESPACE; /* * Use binary-upgrade override for pg_type.oid, if supplied. We might be * in the post-schema-restore phase where we are doing ALTER TABLE to * create TOAST tables that didn't exist in the old cluster. */ if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid)) { toast_typid = binary_upgrade_next_toast_pg_type_oid; binary_upgrade_next_toast_pg_type_oid = InvalidOid; } toast_relid = heap_create_with_catalog(toast_relname, namespaceid, rel->rd_rel->reltablespace, toastOid, toast_typid, InvalidOid, rel->rd_rel->relowner, tupdesc, NIL, RELKIND_TOASTVALUE, rel->rd_rel->relpersistence, shared_relation, mapped_relation, ONCOMMIT_NOOP, reloptions, false, true, true, InvalidOid, NULL); Assert(toast_relid != InvalidOid); /* make the toast relation visible, else heap_open will fail */ CommandCounterIncrement(); /* ShareLock is not really needed here, but take it anyway */ toast_rel = heap_open(toast_relid, ShareLock); /* * Create unique index on chunk_id, chunk_seq. * * NOTE: the normal TOAST access routines could actually function with a * single-column index on chunk_id only. However, the slice access * routines use both columns for faster access to an individual chunk. In * addition, we want it to be unique as a check against the possibility of * duplicate TOAST chunk OIDs. The index might also be a little more * efficient this way, since btree isn't all that happy with large numbers * of equal keys. */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = 2; indexInfo->ii_NumIndexKeyAttrs = 2; indexInfo->ii_IndexAttrNumbers[0] = 1; indexInfo->ii_IndexAttrNumbers[1] = 2; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = NIL; indexInfo->ii_PredicateState = NULL; indexInfo->ii_ExclusionOps = NULL; indexInfo->ii_ExclusionProcs = NULL; indexInfo->ii_ExclusionStrats = NULL; indexInfo->ii_Unique = true; indexInfo->ii_ReadyForInserts = true; indexInfo->ii_Concurrent = false; indexInfo->ii_BrokenHotChain = false; indexInfo->ii_ParallelWorkers = 0; indexInfo->ii_Am = BTREE_AM_OID; indexInfo->ii_AmCache = NULL; indexInfo->ii_Context = CurrentMemoryContext; collationObjectId[0] = InvalidOid; collationObjectId[1] = InvalidOid; classObjectId[0] = OID_BTREE_OPS_OID; classObjectId[1] = INT4_BTREE_OPS_OID; coloptions[0] = 0; coloptions[1] = 0; index_create(toast_rel, toast_idxname, toastIndexOid, InvalidOid, InvalidOid, InvalidOid, indexInfo, list_make2("chunk_id", "chunk_seq"), BTREE_AM_OID, rel->rd_rel->reltablespace, collationObjectId, classObjectId, coloptions, (Datum) 0, INDEX_CREATE_IS_PRIMARY, 0, true, true, NULL); heap_close(toast_rel, NoLock); /* * Store the toast table's OID in the parent relation's pg_class row */ class_rel = heap_open(RelationRelationId, RowExclusiveLock); reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(reltup)) elog(ERROR, "cache lookup failed for relation %u", relOid); ((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid; if (!IsBootstrapProcessingMode()) { /* normal case, use a transactional update */ CatalogTupleUpdate(class_rel, &reltup->t_self, reltup); } else { /* While bootstrapping, we cannot UPDATE, so overwrite in-place */ heap_inplace_update(class_rel, reltup); } heap_freetuple(reltup); heap_close(class_rel, RowExclusiveLock); /* * Register dependency from the toast table to the master, so that the * toast table will be deleted if the master is. Skip this in bootstrap * mode. */ if (!IsBootstrapProcessingMode()) { baseobject.classId = RelationRelationId; baseobject.objectId = relOid; baseobject.objectSubId = 0; toastobject.classId = RelationRelationId; toastobject.objectId = toast_relid; toastobject.objectSubId = 0; recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL); } /* * Make changes visible */ CommandCounterIncrement(); return true; }
struct _graph * redis_x86_graph (uint64_t address, struct _map * memory) { uint64_t next_index = 1; uint64_t this_index = 0; uint64_t last_index = 0; struct _redis_x86 * redis_x86 = redis_x86_create(); redis_x86_mem_from_mem_map(redis_x86, memory); redis_x86->regs[RED_EIP] = address; redis_x86_false_stack(redis_x86); struct _map * ins_map = map_create(); struct _graph * graph = graph_create(); while (redis_x86_step(redis_x86) == REDIS_SUCCESS) { uint64_t address = redis_x86->ins_addr; // do we have an instruction at this address already? struct _index * index = map_fetch(ins_map, address); // we have an instruction, fetch it, make sure it matches if (index) { struct _graph_node * node = graph_fetch_node(graph, index->index); struct _ins * ins = list_first(node->data); // instructions diverge, create new instruction if ( (ins->size != redis_x86->ins_size) || (memcmp(ins->bytes, redis_x86->ins_bytes, redis_x86->ins_size))) { ins = redis_x86_create_ins(redis_x86); if (ins == NULL) { fprintf(stderr, "could not create ins, eip=%llx\n", (unsigned long long) redis_x86->ins_addr); break; } struct _list * list = list_create(); list_append(list, ins); object_delete(ins); graph_add_node(graph, next_index, list); object_delete(list); map_remove(ins_map, redis_x86->ins_addr); index = index_create(next_index++); map_insert(ins_map, redis_x86->ins_addr, index); this_index = index->index; object_delete(index); } else this_index = index->index; } // no instruction at this address, create it else { struct _ins * ins = redis_x86_create_ins(redis_x86); if (ins == NULL) { fprintf(stderr, "could not create ins eip=%llx\n", (unsigned long long) redis_x86->ins_addr); break; } struct _list * list = list_create(); list_append(list, ins); object_delete(ins); graph_add_node(graph, next_index, list); object_delete(list); index = index_create(next_index++); map_insert(ins_map, redis_x86->ins_addr, index); this_index = index->index; object_delete(index); } /* * create an edge from last index to this index * because our graph library enforces both the condition that the head * and tail nodes are valid, and that there exists only one edge per * head->tail combination, we can blindly add edges here and let the * graph library work out the details */ printf("[edge] %llx -> %llx\n", (unsigned long long) last_index, (unsigned long long) this_index); struct _ins_edge * ins_edge = ins_edge_create(INS_EDGE_NORMAL); graph_add_edge(graph, last_index, this_index, ins_edge); object_delete(ins_edge); last_index = this_index; printf("%llx -> %llx\n", (unsigned long long) last_index, (unsigned long long) this_index); } object_delete(ins_map); object_delete(redis_x86); return graph; }
int rdis_update_memory (struct _rdis * rdis, uint64_t address, struct _buffer * buffer) { if (buffer == NULL) return -1; mem_map_set (rdis->memory, address, buffer); // we will regraph functions whose bounds fall within this updated memory, // and all functions whose bounds fall within the bounds of functions to be // regraphed (step 2 simplifies things later on) struct _queue * queue = queue_create(); struct _map_it * it; for (it = map_iterator(rdis->functions); it != NULL; it = map_it_next(it)) { struct _function * function = map_it_data(it); if ( ( (function->bounds.lower >= address) && (function->bounds.lower < address + buffer->size)) || ( (function->bounds.upper >= address) && (function->bounds.upper < address + buffer->size)) || ( (function->bounds.lower <= address) && (function->bounds.upper >= address + buffer->size))) { queue_push(queue, function); } } struct _map * regraph_functions = map_create(); while (queue->size > 0) { struct _function * function = queue_peek(queue); if (map_fetch(regraph_functions, function->address) != NULL) { queue_pop(queue); continue; } printf("adding regraph function %llx\n", (unsigned long long) function->address); map_insert(regraph_functions, function->address, function); for (it = map_iterator(rdis->functions); it != NULL; it = map_it_next(it)) { struct _function * cmp_function = map_it_data(it); if ( ( (cmp_function->bounds.lower >= function->bounds.lower) && (cmp_function->bounds.lower < function->bounds.upper)) || ( (cmp_function->bounds.upper >= function->bounds.lower) && (cmp_function->bounds.upper < function->bounds.upper)) || ( (cmp_function->bounds.lower <= function->bounds.lower) && (cmp_function->bounds.upper >= function->bounds.upper))) queue_push(queue, cmp_function); } } // regraph dem functions struct _graph * new_graph; new_graph = loader_graph_functions(rdis->loader, rdis->memory, regraph_functions); // We are now going to go through all nodes in our regraph functions. We // will copy over comments to the new instructions and then remove the // regraph function nodes from the original graph for (it = map_iterator(regraph_functions); it != NULL; it = map_it_next(it)) { struct _function * function = map_it_data(it); struct _graph * family = graph_family(rdis->graph, function->address); if (family == NULL) continue; struct _graph_it * git; for (git = graph_iterator(family); git != NULL; git = graph_it_next(git)) { struct _list * ins_list = graph_it_data(git); struct _list_it * iit; for (iit = list_iterator(ins_list); iit != NULL; iit = iit->next) { struct _ins * ins = iit->data; if (ins->comment == NULL) continue; struct _ins * new_ins = graph_fetch_ins(new_graph, ins->address); if (ins->size != new_ins->size) continue; if (memcmp(ins->bytes, new_ins->bytes, ins->size) == 0) { printf("copy over comment from instruction at %llx\n", (unsigned long long) ins->address); ins_s_comment(new_ins, ins->comment); } } // add node for deletion struct _index * index = index_create(graph_it_index(git)); queue_push(queue, index); object_delete(index); } object_delete(family); while (queue->size > 0) { struct _index * index = queue_peek(queue); graph_remove_node(rdis->graph, index->index); queue_pop(queue); } } // merge the new graph with the old graph graph_merge(rdis->graph, new_graph); // reset bounds of these functions for (it = map_iterator(regraph_functions); it != NULL; it = map_it_next(it)) { struct _function * function = map_it_data(it); rdis_function_bounds(rdis, function->address); } objects_delete(queue, new_graph, regraph_functions, NULL); rdis_callback(rdis, RDIS_CALLBACK_ALL); return 0; }
IndexWriter * frjs_iw_init(bool create, bool create_if_missing, Store *store, Analyzer *analyzer, FieldInfos *fis) { IndexWriter *iw = NULL; Config config = default_config; // rb_scan_args(argc, argv, "01", &roptions); /*if (argc > 0) { Check_Type(roptions, T_HASH); if ((rval = rb_hash_aref(roptions, sym_dir)) != Qnil) { Check_Type(rval, T_DATA); store = DATA_PTR(rval); } else if ((rval = rb_hash_aref(roptions, sym_path)) != Qnil) { StringValue(rval); frb_create_dir(rval); store = open_fs_store(rs2s(rval)); DEREF(store); } // Let ruby's garbage collector handle the closing of the store // if (!close_dir) { // close_dir = RTEST(rb_hash_aref(roptions, sym_close_dir)); // } // use_compound_file defaults to true config.use_compound_file = (rb_hash_aref(roptions, sym_use_compound_file) == Qfalse) ? false : true; if ((rval = rb_hash_aref(roptions, sym_analyzer)) != Qnil) { analyzer = frb_get_cwrapped_analyzer(rval); } create = RTEST(rb_hash_aref(roptions, sym_create)); if ((rval = rb_hash_aref(roptions, sym_create_if_missing)) != Qnil) { create_if_missing = RTEST(rval); } SET_INT_ATTR(chunk_size); SET_INT_ATTR(max_buffer_memory); SET_INT_ATTR(index_interval); SET_INT_ATTR(skip_interval); SET_INT_ATTR(merge_factor); SET_INT_ATTR(max_buffered_docs); SET_INT_ATTR(max_merge_docs); SET_INT_ATTR(max_field_length); }*/ if (NULL == store) { store = open_ram_store(); DEREF(store); } if (!create && create_if_missing && !store->exists(store, "segments")) { create = true; } if (create) { if (fis != NULL) { index_create(store, fis); } else { fis = fis_new(STORE_YES, INDEX_YES, TERM_VECTOR_WITH_POSITIONS_OFFSETS); index_create(store, fis); fis_deref(fis); } } iw = iw_open(store, analyzer, &config); return iw; }