static void _bt_mergebuild(Spooler *self, BTSpool *btspool) { Relation heapRel = self->relinfo->ri_RelationDesc; BTWriteState wstate; BTReader reader; bool merge; Assert(btspool->index->rd_index->indisvalid); tuplesort_performsort(btspool->sortstate); wstate.index = btspool->index; /* * We need to log index creation in WAL iff WAL archiving is enabled AND * it's not a temp index. */ wstate.btws_use_wal = self->use_wal && XLogArchivingActive() && !RELATION_IS_LOCAL(wstate.index); /* reserve the metapage */ wstate.btws_pages_alloced = BTREE_METAPAGE + 1; wstate.btws_pages_written = 0; wstate.btws_zeropage = NULL; /* until needed */ /* * Flush dirty buffers so that we will read the index files directly * in order to get pre-existing data. We must acquire AccessExclusiveLock * for the target table for calling FlushRelationBuffer(). */ LockRelation(wstate.index, AccessExclusiveLock); FlushRelationBuffers(wstate.index); BULKLOAD_PROFILE(&prof_flush); merge = BTReaderInit(&reader, wstate.index); elog(DEBUG1, "pg_bulkload: build \"%s\" %s merge (%s wal)", RelationGetRelationName(wstate.index), merge ? "with" : "without", wstate.btws_use_wal ? "with" : "without"); /* Assign a new file node. */ RelationSetNewRelfilenode(wstate.index, InvalidTransactionId); if (merge || (btspool->isunique && self->max_dup_errors > 0)) { /* Merge two streams into the new file node that we assigned. */ BULKLOAD_PROFILE_PUSH(); _bt_mergeload(self, &wstate, btspool, &reader, heapRel); BULKLOAD_PROFILE_POP(); BULKLOAD_PROFILE(&prof_merge); } else { /* Fast path for newly created index. */ _bt_load(&wstate, btspool, NULL); BULKLOAD_PROFILE(&prof_index); } BTReaderTerm(&reader); }
static void PersistentStore_InsertTuple( PersistentStoreData *storeData, PersistentStoreSharedData *storeSharedData, Datum *values, bool flushToXLog, /* When true, the XLOG record for this change will be flushed to disk. */ ItemPointer persistentTid) /* TID of the stored tuple. */ { Relation persistentRel; #ifdef USE_ASSERT_CHECKING if (storeSharedData == NULL || !PersistentStoreSharedData_EyecatcherIsValid(storeSharedData)) elog(ERROR, "Persistent store shared-memory not valid"); #endif if (Debug_persistent_store_print) elog(PersistentStore_DebugPrintLevel(), "PersistentStore_InsertTuple: Going to insert new tuple ('%s', shared data %p)", storeData->tableName, storeSharedData); persistentRel = (*storeData->openRel)(); PersistentStore_DoInsertTuple( storeData, storeSharedData, persistentRel, values, flushToXLog, persistentTid); #ifdef FAULT_INJECTOR if (FaultInjector_InjectFaultIfSet(SyncPersistentTable, DDLNotSpecified, "" /* databaseName */, "" /* tableName */)== FaultInjectorTypeSkip) { FlushRelationBuffers(persistentRel); smgrimmedsync(persistentRel->rd_smgr); } #endif (*storeData->closeRel)(persistentRel); if (Debug_persistent_store_print) { elog(PersistentStore_DebugPrintLevel(), "PersistentStore_InsertTuple: Inserted new tuple at TID %s ('%s')", ItemPointerToString(persistentTid), storeData->tableName); (*storeData->printTupleCallback)( PersistentStore_DebugPrintLevel(), "STORE INSERT TUPLE", persistentTid, values); } }
void PersistentStore_UpdateTuple( PersistentStoreData *storeData, PersistentStoreSharedData *storeSharedData, ItemPointer persistentTid, /* TID of the stored tuple. */ Datum *values, bool flushToXLog) /* When true, the XLOG record for this change will be flushed to disk. */ { Relation persistentRel; bool *nulls; HeapTuple persistentTuple = NULL; XLogRecPtr xlogUpdateEndLoc; #ifdef USE_ASSERT_CHECKING if (storeSharedData == NULL || !PersistentStoreSharedData_EyecatcherIsValid(storeSharedData)) elog(ERROR, "Persistent store shared-memory not valid"); #endif if (Debug_persistent_store_print) elog(PersistentStore_DebugPrintLevel(), "PersistentStore_ReplaceTuple: Going to update whole tuple at TID %s ('%s', shared data %p)", ItemPointerToString(persistentTid), storeData->tableName, storeSharedData); persistentRel = (*storeData->openRel)(); /* * In order to keep the tuples the exact same size to enable direct reuse of * free tuples, we do not use NULLs. */ nulls = (bool*)palloc0(storeData->numAttributes * sizeof(bool)); /* * Form the tuple. */ persistentTuple = heap_form_tuple(persistentRel->rd_att, values, nulls); if (!HeapTupleIsValid(persistentTuple)) elog(ERROR, "Failed to build persistent tuple ('%s')", storeData->tableName); persistentTuple->t_self = *persistentTid; frozen_heap_inplace_update(persistentRel, persistentTuple); /* * Return the XLOG location of the UPDATE tuple's XLOG record. */ xlogUpdateEndLoc = XLogLastInsertEndLoc(); heap_freetuple(persistentTuple); #ifdef FAULT_INJECTOR if (FaultInjector_InjectFaultIfSet(SyncPersistentTable, DDLNotSpecified, "" /* databaseName */, "" /* tableName */)== FaultInjectorTypeSkip) { FlushRelationBuffers(persistentRel); smgrimmedsync(persistentRel->rd_smgr); } #endif (*storeData->closeRel)(persistentRel); if (Debug_persistent_store_print) { elog(PersistentStore_DebugPrintLevel(), "PersistentStore_UpdateTuple: Updated whole tuple at TID %s ('%s')", ItemPointerToString(persistentTid), storeData->tableName); (*storeData->printTupleCallback)( PersistentStore_DebugPrintLevel(), "STORE UPDATED TUPLE", persistentTid, values); } if (flushToXLog) { XLogFlush(xlogUpdateEndLoc); XLogRecPtr_Zero(&nowaitXLogEndLoc); } else nowaitXLogEndLoc = xlogUpdateEndLoc; }
/* * index_drop * * NOTE: this routine should now only be called through performDeletion(), * else associated dependencies won't be cleaned up. */ void index_drop(Oid indexId) { Oid heapId; Relation userHeapRelation; Relation userIndexRelation; Relation indexRelation; HeapTuple tuple; int i; Assert(OidIsValid(indexId)); /* * To drop an index safely, we must grab exclusive lock on its parent * table; otherwise there could be other backends using the index! * Exclusive lock on the index alone is insufficient because another * backend might be in the midst of devising a query plan that will * use the index. The parser and planner take care to hold an * appropriate lock on the parent table while working, but having them * hold locks on all the indexes too seems overly complex. We do grab * exclusive lock on the index too, just to be safe. Both locks must * be held till end of transaction, else other backends will still see * this index in pg_index. */ heapId = IndexGetRelation(indexId); userHeapRelation = heap_open(heapId, AccessExclusiveLock); userIndexRelation = index_open(indexId); LockRelation(userIndexRelation, AccessExclusiveLock); /* * fix RELATION relation */ DeleteRelationTuple(indexId); /* * fix ATTRIBUTE relation */ DeleteAttributeTuples(indexId); /* * fix INDEX relation */ indexRelation = heap_openr(IndexRelationName, RowExclusiveLock); tuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexId), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for index %u", indexId); simple_heap_delete(indexRelation, &tuple->t_self); ReleaseSysCache(tuple); heap_close(indexRelation, RowExclusiveLock); /* * flush buffer cache and physically remove the file */ i = FlushRelationBuffers(userIndexRelation, (BlockNumber) 0); if (i < 0) elog(ERROR, "FlushRelationBuffers returned %d", i); smgrunlink(DEFAULT_SMGR, userIndexRelation); /* * We are presently too lazy to attempt to compute the new correct * value of relhasindex (the next VACUUM will fix it if necessary). So * there is no need to update the pg_class tuple for the owning * relation. But we must send out a shared-cache-inval notice on the * owning relation to ensure other backends update their relcache * lists of indexes. */ CacheInvalidateRelcache(heapId); /* * Close rels, but keep locks */ index_close(userIndexRelation); heap_close(userHeapRelation, NoLock); RelationForgetRelation(indexId); }