/* * Initialize metadata revert block info hash tables */ bool MetadataRevertBlockInfoTablesInit(void) { HASHCTL info; int hash_flags; MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(RevertBlockInfoKey); info.entrysize = sizeof(RevertBlockInfoEntry); hash_flags = HASH_ELEM; RevertBlockHostsMap = ShmemInitHash("Metadata Revert Block Hosts Map", MAX_HDFS_HOST_NUM, MAX_HDFS_HOST_NUM, &info, hash_flags); if (NULL == RevertBlockHostsMap) { return false; } RevertBlockNamesMap = ShmemInitHash("Metadata Revert Block Names Map", MAX_HDFS_HOST_NUM, MAX_HDFS_HOST_NUM, &info, hash_flags); if (NULL == RevertBlockNamesMap) { return false; } RevertBlockTopologyPathsMap = ShmemInitHash("Metadata Revert Block TopologyPaths Map", MAX_HDFS_HOST_NUM, MAX_HDFS_HOST_NUM, &info, hash_flags); if (NULL == RevertBlockTopologyPathsMap) { return false; } return true; }
static void DtmInitialize() { bool found; static HASHCTL info; LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); dtm = ShmemInitStruct("dtm", sizeof(DtmState), &found); if (!found) { dtm->hashLock = LWLockAssign(); dtm->xidLock = LWLockAssign(); dtm->nReservedXids = 0; dtm->minXid = InvalidTransactionId; dtm->nNodes = MMNodes; dtm->disabledNodeMask = 0; pg_atomic_write_u32(&dtm->nReceivers, 0); dtm->initialized = false; BgwPoolInit(&dtm->pool, MMExecutor, MMDatabaseName, MMQueueSize); RegisterXactCallback(DtmXactCallback, NULL); RegisterSubXactCallback(DtmSubXactCallback, NULL); } LWLockRelease(AddinShmemInitLock); info.keysize = sizeof(TransactionId); info.entrysize = sizeof(TransactionId); info.hash = dtm_xid_hash_fn; info.match = dtm_xid_match_fn; xid_in_doubt = ShmemInitHash( "xid_in_doubt", DTM_HASH_SIZE, DTM_HASH_SIZE, &info, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE ); info.keysize = sizeof(TransactionId); info.entrysize = sizeof(LocalTransaction); info.hash = dtm_xid_hash_fn; info.match = dtm_xid_match_fn; local_trans = ShmemInitHash( "local_trans", DTM_HASH_SIZE, DTM_HASH_SIZE, &info, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE ); MMDoReplication = true; TM = &DtmTM; }
/* * PersistentTablespace_HashTableInit * * Create or find shared-memory hash table. */ static bool PersistentTablespace_HashTableInit(void) { HASHCTL info; int hash_flags; /* Set key and entry sizes. */ MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(TablespaceDirEntryKey); info.entrysize = sizeof(TablespaceDirEntryData); info.hash = tag_hash; hash_flags = (HASH_ELEM | HASH_FUNCTION); persistentTablespaceSharedHashTable = ShmemInitHash("Persistent Tablespace Hash", gp_max_tablespaces, gp_max_tablespaces, &info, hash_flags); if (persistentTablespaceSharedHashTable == NULL) return false; return true; }
void ContQuerySchedulerShmemInit(void) { bool found; Size size = ContQuerySchedulerShmemSize(); ContQuerySchedulerShmem = ShmemInitStruct("ContQuerySchedulerShmem", size, &found); if (!found) { HASHCTL info; MemSet(ContQuerySchedulerShmem, 0, ContQuerySchedulerShmemSize()); info.keysize = sizeof(Oid); info.entrysize = ContQueryDatabaseMetadataSize(); info.hash = oid_hash; ContQuerySchedulerShmem->proc_table = ShmemInitHash("ContQueryDatabaseMetadata", INIT_PROC_TABLE_SZ, MAX_PROC_TABLE_SZ, &info, HASH_ELEM | HASH_FUNCTION); update_run_params(); ContQuerySchedulerShmem->tranche_id = LWLockNewTrancheId(); } LWLockRegisterTranche(ContQuerySchedulerShmem->tranche_id, &DummyLWLockTranche); }
/* * Initialization performed at module-load time */ void _PG_init(void) { HASHCTL ctl; MemSet(&ctl, 0, sizeof(HASHCTL)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(KafkaConsumerProc); ctl.hash = oid_hash; consumer_procs = ShmemInitHash("KafkaConsumerProcs", NUM_CONSUMERS_INIT, NUM_CONSUMERS_MAX, &ctl, HASH_ELEM | HASH_FUNCTION); MemSet(&ctl, 0, sizeof(HASHCTL)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(KafkaConsumerGroup); ctl.hash = oid_hash; consumer_groups = ShmemInitHash("KafkaConsumerGroups", 2 * NUM_CONSUMERS_INIT, 2 * NUM_CONSUMERS_MAX, &ctl, HASH_ELEM | HASH_FUNCTION); }
/* * IPCMessageBrokerShmemInit */ void IPCMessageBrokerShmemInit(void) { bool found; Size size = IPCMessageBrokerShmemSize(); LWLockAcquire(IPCMessageBrokerIndexLock, LW_EXCLUSIVE); broker_meta = ShmemInitStruct("BrokerMeta", size, &found); if (!found) { HASHCTL ctl; int i; MemSet(broker_meta, 0, size); pg_atomic_init_flag(&broker_meta->waiting); MemSet(&ctl, 0, sizeof(HASHCTL)); ctl.keysize = sizeof(Oid); ctl.entrysize = broker_db_meta_size; ctl.hash = oid_hash; broker_meta->db_meta_hash = ShmemInitHash("BrokerDBMetaHash", 4, 16, &ctl, HASH_ELEM | HASH_FUNCTION); /* Initialize LWLocks that we'll use for all IPC queues */ broker_meta->tranche_id = LWLockNewTrancheId(); broker_meta->tranche.name = "BrokerMetaLWLocks"; broker_meta->tranche.array_base = (void *) broker_meta->locks; broker_meta->tranche.array_stride = sizeof(lw_lock_slot); for (i = 0; i < max_worker_processes; i++) { lw_lock_slot *slot = &broker_meta->locks[i]; LWLockInitialize(&slot->lock, broker_meta->tranche_id); slot->dbid = InvalidOid; } } LWLockRelease(IPCMessageBrokerIndexLock); LWLockRegisterTranche(broker_meta->tranche_id, &broker_meta->tranche); }
/* * Initialize shmem hash table for mapping buffers * size is the desired hash table size (possibly more than NBuffers) */ void InitBufTable(int size) { HASHCTL info; /* assume no locking is needed yet */ /* BufferTag maps to Buffer */ info.keysize = sizeof(BufferTag); info.entrysize = sizeof(BufferLookupEnt); info.num_partitions = NUM_BUFFER_PARTITIONS; SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table", size, size, &info, HASH_ELEM | HASH_BLOBS | HASH_PARTITION); }
void Persistent_PostDTMRecv_ShmemInit(void) { HASHCTL info; int hash_flags; bool foundPtr; PT_PostDTMRecv_Info = (PT_PostDTMRecv_Data *) ShmemInitStruct("Post DTM Recovery Checks Info", sizeof(PT_PostDTMRecv_Data), &foundPtr); if (PT_PostDTMRecv_Info == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for post DTM recv. checks")))); } if (!foundPtr) { MemSet(PT_PostDTMRecv_Info, 0, sizeof(PT_PostDTMRecv_Data)); } MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(Oid); info.entrysize = sizeof(postDTMRecv_dbTblSpc_Hash_Entry); info.hash = tag_hash; hash_flags = (HASH_ELEM | HASH_FUNCTION); PT_PostDTMRecv_Info->postDTMRecv_dbTblSpc_Hash = ShmemInitHash("Post DTM Recv dbtblspc hash", PT_MAX_NUM_POSTDTMRECV_DB, PT_MAX_NUM_POSTDTMRECV_DB, &info, hash_flags); if (PT_PostDTMRecv_Info->postDTMRecv_dbTblSpc_Hash == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for post DTM recv. checks")))); } }
/* * Hash table contains fault injection that are set on the system waiting to be injected. * FaultInjector identifier is the key in the hash table. * Hash table in shared memory is initialized only on primary and mirror segment. * It is not initialized on master host. */ void FaultInjector_ShmemInit(void) { HASHCTL hash_ctl; bool foundPtr; faultInjectorShmem = (FaultInjectorShmem_s *) ShmemInitStruct("fault injector", sizeof(FaultInjectorShmem_s), &foundPtr); if (faultInjectorShmem == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for fault injector")))); } if (! foundPtr) { MemSet(faultInjectorShmem, 0, sizeof(FaultInjectorShmem_s)); } SpinLockInit(&faultInjectorShmem->lock); faultInjectorShmem->faultInjectorSlots = 0; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = sizeof(int32); hash_ctl.entrysize = sizeof(FaultInjectorEntry_s); hash_ctl.hash = int32_hash; faultInjectorShmem->hash = ShmemInitHash("fault injector hash", FAULTINJECTOR_MAX_SLOTS, FAULTINJECTOR_MAX_SLOTS, &hash_ctl, HASH_ELEM | HASH_FUNCTION); if (faultInjectorShmem->hash == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for fault injector")))); } return; }
/* * Creates a new synchronized refcounted hashtable object. The new SyncHT * instance is palloc-ed in the current memory context. * * All fields in syncHTCtl are required. * * Returns NULL if it could not create or attach to a hashtable. */ SyncHT * SyncHTCreate(SyncHTCtl *syncHTCtl) { Assert(NULL != syncHTCtl); Assert(NULL != syncHTCtl->isEmptyEntry); Assert(NULL != syncHTCtl->initEntry); Assert(NullLock != syncHTCtl->baseLWLockId); Assert(0 != syncHTCtl->numPartitions); SyncHT *syncHT = (SyncHT *) palloc(sizeof(SyncHT)); /* Initialize underlying hashtable control structure with provided values */ HASHCTL hctl; hctl.keysize = syncHTCtl->keySize; hctl.entrysize = syncHTCtl->entrySize; hctl.hash = syncHTCtl->hash; hctl.keycopy = syncHTCtl->keyCopy; hctl.match = syncHTCtl->match; hctl.num_partitions = syncHTCtl->numPartitions; int hashFlags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION | HASH_COMPARE | HASH_KEYCOPY; /* Create underlying hashtable in shared memory (or attach to an existing one) */ syncHT->ht = ShmemInitHash(syncHTCtl->tabName, syncHTCtl->numElements, /* init_size */ syncHTCtl->numElements, /* max_size */ &hctl, hashFlags); if (syncHT->ht == NULL) { /* Could not initialize the underlying hashtable */ pfree(syncHT); return NULL; } syncHT->numPartitions = syncHTCtl->numPartitions; syncHT->baseLWLockId = syncHTCtl->baseLWLockId; syncHT->pinCountOffset = syncHTCtl->pinCountOffset; syncHT->keyOffset = syncHTCtl->keyOffset; syncHT->isEmptyEntry = syncHTCtl->isEmptyEntry; syncHT->initEntry = syncHTCtl->initEntry; return syncHT; }
/* * Initialize shmem hash table for mapping buffers * size is the desired hash table size (possibly more than NBuffers) */ void InitBufTable(int size) { HASHCTL info; /* assume no locking is needed yet */ /* BufferTag maps to Buffer */ info.keysize = sizeof(BufferTag); info.entrysize = sizeof(BufferLookupEnt); info.hash = tag_hash; info.num_partitions = NUM_BUFFER_PARTITIONS; SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table", size, size, &info, HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); if (!SharedBufHash) elog(FATAL, "could not initialize shared buffer hash table"); }
/* * Initialize metadata cache hash table */ bool MetadataCacheHashTableInit(void) { HASHCTL info; int hash_flags; MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(MetadataCacheKey); info.entrysize = sizeof(MetadataCacheEntry); info.hash = tag_hash; hash_flags = (HASH_ELEM | HASH_FUNCTION); MetadataCache = ShmemInitHash("Metadata Cache", metadata_cache_max_hdfs_file_num, metadata_cache_max_hdfs_file_num, &info, hash_flags); if (NULL == MetadataCache) { return false; } return true; }
/* * Initialize hash table of open files in private memory of FileRep process. * FileName of the file is the key of the hash table. * FileName is relative file path from $PGDATA directory. */ void FileRepAckPrimary_ShmemInit(void) { HASHCTL hash_ctl; bool foundPtr; fileRepAckHashShmem = (FileRepAckHashShmem_s *) ShmemInitStruct("filerep ack base hash", sizeof(FileRepAckHashShmem_s), &foundPtr); if (fileRepAckHashShmem == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for mirroring")))); } if (! foundPtr) { MemSet(fileRepAckHashShmem, 0, sizeof(FileRepAckHashShmem_s)); } fileRepAckHashShmem->ipcArrayIndex = IndexIpcArrayAckHashShmem; MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = MAXPGPATH+1; hash_ctl.entrysize = sizeof(FileRepAckHashEntry_s); hash_ctl.hash = string_hash; fileRepAckHashShmem->hash = ShmemInitHash("filerep ack hash", FILEREP_MAX_OPEN_FILES, FILEREP_MAX_OPEN_FILES, &hash_ctl, HASH_ELEM | HASH_FUNCTION); if (fileRepAckHashShmem->hash == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), (errmsg("not enough shared memory for mirroring")))); } return; }
/* * InitShmemIndex() --- set up or attach to shmem index table. */ void InitShmemIndex(void) { HASHCTL info; int hash_flags; /* * Create the shared memory shmem index. * * Since ShmemInitHash calls ShmemInitStruct, which expects the ShmemIndex * hashtable to exist already, we have a bit of a circularity problem in * initializing the ShmemIndex itself. The special "ShmemIndex" hash * table name will tell ShmemInitStruct to fake it. */ info.keysize = SHMEM_INDEX_KEYSIZE; info.entrysize = sizeof(ShmemIndexEnt); hash_flags = HASH_ELEM; ShmemIndex = ShmemInitHash("ShmemIndex", SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE, &info, hash_flags); }
void ContQuerySchedulerShmemInit(void) { bool found; Size size = ContQuerySchedulerShmemSize(); ContQuerySchedulerShmem = ShmemInitStruct("ContQueryScheduler Data", size, &found); if (!found) { HASHCTL info; MemSet(ContQuerySchedulerShmem, 0, ContQuerySchedulerShmemSize()); info.keysize = sizeof(Oid); info.entrysize = MAXALIGN(add_size(sizeof(ContQueryProcGroup), mul_size(sizeof(ContQueryProc), TOTAL_SLOTS))); info.hash = oid_hash; ContQuerySchedulerShmem->proc_table = ShmemInitHash("ContQueryScheduler Proc Table", INIT_PROC_TABLE_SZ, MAX_PROC_TABLE_SZ, &info, HASH_ELEM | HASH_FUNCTION); update_tuning_params(); } }
/* * shmem_startup hook: allocate or attach to shared memory, * then load any pre-existing statistics from file. */ static void pgss_shmem_startup(void) { bool found; HASHCTL info; FILE *file; uint32 header; int32 num; int32 i; int query_size; int buffer_size; char *buffer = NULL; if (prev_shmem_startup_hook) prev_shmem_startup_hook(); /* reset in case this is a restart within the postmaster */ pgss = NULL; pgss_hash = NULL; /* * Create or attach to the shared memory state, including hash table */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); pgss = ShmemInitStruct("pg_stat_statements", sizeof(pgssSharedState), &found); if (!found) { /* First time through ... */ pgss->lock = LWLockAssign(); pgss->query_size = pgstat_track_activity_query_size; } /* Be sure everyone agrees on the hash table entry size */ query_size = pgss->query_size; memset(&info, 0, sizeof(info)); info.keysize = sizeof(pgssHashKey); info.entrysize = offsetof(pgssEntry, query) +query_size; info.hash = pgss_hash_fn; info.match = pgss_match_fn; pgss_hash = ShmemInitHash("pg_stat_statements hash", pgss_max, pgss_max, &info, HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); LWLockRelease(AddinShmemInitLock); /* * If we're in the postmaster (or a standalone backend...), set up a shmem * exit hook to dump the statistics to disk. */ if (!IsUnderPostmaster) on_shmem_exit(pgss_shmem_shutdown, (Datum) 0); /* * Attempt to load old statistics from the dump file, if this is the first * time through and we weren't told not to. */ if (found || !pgss_save) return; /* * Note: we don't bother with locks here, because there should be no other * processes running when this code is reached. */ file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R); if (file == NULL) { if (errno == ENOENT) return; /* ignore not-found error */ goto error; } buffer_size = query_size; buffer = (char *) palloc(buffer_size); if (fread(&header, sizeof(uint32), 1, file) != 1 || header != PGSS_FILE_HEADER || fread(&num, sizeof(int32), 1, file) != 1) goto error; for (i = 0; i < num; i++) { pgssEntry temp; pgssEntry *entry; if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1) goto error; /* Encoding is the only field we can easily sanity-check */ if (!PG_VALID_BE_ENCODING(temp.key.encoding)) goto error; /* Previous incarnation might have had a larger query_size */ if (temp.key.query_len >= buffer_size) { buffer = (char *) repalloc(buffer, temp.key.query_len + 1); buffer_size = temp.key.query_len + 1; } if (fread(buffer, 1, temp.key.query_len, file) != temp.key.query_len) goto error; buffer[temp.key.query_len] = '\0'; /* Clip to available length if needed */ if (temp.key.query_len >= query_size) temp.key.query_len = pg_encoding_mbcliplen(temp.key.encoding, buffer, temp.key.query_len, query_size - 1); temp.key.query_ptr = buffer; /* make the hashtable entry (discards old entries if too many) */ entry = entry_alloc(&temp.key); /* copy in the actual stats */ entry->counters = temp.counters; } pfree(buffer); FreeFile(file); return; error: ereport(LOG, (errcode_for_file_access(), errmsg("could not read pg_stat_statement file \"%s\": %m", PGSS_DUMP_FILE))); if (buffer) pfree(buffer); if (file) FreeFile(file); /* If possible, throw away the bogus file; ignore any error */ unlink(PGSS_DUMP_FILE); }