static void collectLocalMemoryStats(QueryDesc *queryDesc, int eflags) { BackendMemoryStat *myStat; MemoryContextIteratorState state; if (prev_ExecutorStart) prev_ExecutorStart(queryDesc, eflags); else standard_ExecutorStart(queryDesc, eflags); if (MyBackendProcNo < 0) { on_proc_exit(cleanupMyStat, 0); MyBackendProcNo = MyProc->pgprocno; Assert(MyBackendProcNo >= 0); } if (checkTick() == false) return; Assert(MyBackendProcNo < PROCARRAY_MAXPROCS); myStat = NthBMS(MyBackendProcNo); /* * do not wait if reader currently locks our slot */ if (LWLockConditionalAcquire(myStat->lock, LW_EXCLUSIVE) == false) return; myStat->pid = MyProc->pid; myStat->nContext = 0; state.context = TopMemoryContext; state.level = 0; /* * walk through all memory context and fill stat table in shared memory */ do { MemoryContextStat *mcs = myStat->stats + myStat->nContext; int namelen = strlen(state.context->name); if (namelen > NAMEDATALEN - 1) namelen = NAMEDATALEN - 1; memcpy(mcs->name.data, state.context->name, namelen); mcs->name.data[namelen] = '\0'; mcs->level = state.level; getMemoryContextStat(state.context, &mcs->stat); myStat->nContext++; iterateMemoryContext(&state); } while (state.context && myStat->nContext < N_MC_STAT); LWLockRelease(myStat->lock); }
/* * CS186: Called when the specified buffer is unpinned and becomes * available for replacement. */ void BufferUnpinned(int bufIndex) { volatile BufferDesc *buf = &BufferDescriptors[bufIndex]; if (!LWLockConditionalAcquire(BufFreelistLock, LW_EXCLUSIVE)) return; /* * CS186 TODO: When this function is called, the specified buffer has * just been unpinned. That means you can start to manage this buffer * using your buffer replacement policy. You can access the * StrategyControl global variable from inside this function. * This function was added by the GSIs. */ if (BufferReplacementPolicy == POLICY_LRU || BufferReplacementPolicy == POLICY_MRU) { UnpinnedHelper(&buf, &(StrategyControl->head), &(StrategyControl->tail)); } else if (BufferReplacementPolicy == POLICY_2Q) { if (buf->queueTag == 2) { UnpinnedHelper(&buf, &(StrategyControl->head), &(StrategyControl->tail)); } else if (buf->queueTag == 1) { // Removes buf from A1 queue if (StrategyControl->head2Q == StrategyControl->tail2Q) { StrategyControl->head2Q = NULL; StrategyControl->tail2Q = NULL; } else { buf->prev->next = buf->next; buf->next->prev = buf->prev; if (buf == StrategyControl->head2Q) { StrategyControl->head2Q = buf->next; } else if (buf == StrategyControl->tail2Q) { StrategyControl->tail2Q= buf->prev; } } // Put buff on Am queue buf->queueTag = 2; buf->next = NULL; buf->prev = NULL; UnpinnedHelper(&buf, &(StrategyControl->head), &(StrategyControl->tail)); } // first time unpinning else { buf->queueTag = 1; UnpinnedHelper(&buf, &(StrategyControl->head2Q), &(StrategyControl->tail2Q)); } } LWLockRelease(BufFreelistLock); }
/* * CS186: Called when the specified buffer is unpinned and becomes * available for replacement. */ void BufferUnpinned(int bufIndex) { volatile BufferDesc *buf = &BufferDescriptors[bufIndex]; //lock the buffer? if (!LWLockConditionalAcquire(BufFreelistLock, LW_EXCLUSIVE)) return; /* * CS186 TODO: When this function is called, the specified buffer has * just been unpinned. That means you can start to manage this buffer * using your buffer replacement policy. You can access the * StrategyControl global variable from inside this function. * This function was added by the GSIs. */ if(BufferReplacementPolicy == POLICY_2Q){ if(buf->inList == true){ removeFromList(buf); addToList(buf); } else if(buf->inA1 == true){ removeFromA1(buf); addToList(buf); } else{ addToA1(buf); } } else{ if(buf->inList == true){ removeFromList(buf); } addToList(buf); } LWLockRelease(BufFreelistLock); }
/* * Wait for any active I/O on a page slot to finish. (This does not * guarantee that new I/O hasn't been started before we return, though.) * * Control lock must be held at entry, and will be held at exit. */ static void SimpleLruWaitIO(SlruCtl ctl, int slotno) { SlruShared shared = ctl->shared; /* See notes at top of file */ LWLockRelease(shared->ControlLock); LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED); LWLockRelease(shared->buffer_locks[slotno]); LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); /* * If the slot is still in an io-in-progress state, then either someone * already started a new I/O on the slot, or a previous I/O failed and * neglected to reset the page state. That shouldn't happen, really, but * it seems worth a few extra cycles to check and recover from it. We can * cheaply test for failure by seeing if the buffer lock is still held (we * assume that transaction abort would release the lock). */ if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS || shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS) { if (LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_SHARED)) { /* indeed, the I/O must have failed */ if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS) shared->page_status[slotno] = SLRU_PAGE_EMPTY; else /* write_in_progress */ { shared->page_status[slotno] = SLRU_PAGE_VALID; shared->page_dirty[slotno] = true; } LWLockRelease(shared->buffer_locks[slotno]); } } }
/* * ss_report_location --- update the current scan location * * Writes an entry into the shared Sync Scan state of the form * (relfilenode, blocknumber), overwriting any existing entry for the * same relfilenode. */ void ss_report_location(Relation rel, BlockNumber location) { #ifdef TRACE_SYNCSCAN if (trace_syncscan) { if ((location % 1024) == 0) elog(LOG, "SYNC_SCAN: scanning \"%s\" at %u", RelationGetRelationName(rel), location); } #endif /* * To reduce lock contention, only report scan progress every N pages. For * the same reason, don't block if the lock isn't immediately available. * Missing a few updates isn't critical, it just means that a new scan * that wants to join the pack will start a little bit behind the head of * the scan. Hopefully the pages are still in OS cache and the scan * catches up quickly. */ if ((location % SYNC_SCAN_REPORT_INTERVAL) == 0) { if (LWLockConditionalAcquire(SyncScanLock, LW_EXCLUSIVE)) { (void) ss_search(rel->rd_node, location, true); LWLockRelease(SyncScanLock); } #ifdef TRACE_SYNCSCAN else if (trace_syncscan) elog(LOG, "SYNC_SCAN: missed update for \"%s\" at %u", RelationGetRelationName(rel), location); #endif } }
/* * Write a page from a shared buffer, if necessary. * Does nothing if the specified slot is not dirty. * * NOTE: only one write attempt is made here. Hence, it is possible that * the page is still dirty at exit (if someone else re-dirtied it during * the write). However, we *do* attempt a fresh write even if the page * is already being written; this is for checkpoints. * * Control lock must be held at entry, and will be held at exit. */ void SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) { SlruShared shared = ctl->shared; int pageno; bool ok; /* Do nothing if page does not need writing */ if (shared->page_status[slotno] != SLRU_PAGE_DIRTY && shared->page_status[slotno] != SLRU_PAGE_WRITE_IN_PROGRESS) return; pageno = shared->page_number[slotno]; /* * We must grab the per-buffer lock to do I/O. To avoid deadlock, must * release ControlLock while waiting for per-buffer lock. Fortunately, * most of the time the per-buffer lock shouldn't be already held, so we * can do this: */ if (!LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE)) { LWLockRelease(shared->ControlLock); LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE); LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); } /* * Check to see if someone else already did the write, or took the buffer * away from us. If so, do nothing. NOTE: we really should never see * WRITE_IN_PROGRESS here, since that state should only occur while the * writer is holding the buffer lock. But accept it so that we have a * recovery path if a writer aborts. */ if (shared->page_number[slotno] != pageno || (shared->page_status[slotno] != SLRU_PAGE_DIRTY && shared->page_status[slotno] != SLRU_PAGE_WRITE_IN_PROGRESS)) { LWLockRelease(shared->buffer_locks[slotno]); return; } /* * Mark the slot write-busy. After this point, a transaction status * update on this page will mark it dirty again. */ shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS; /* Okay, release the control lock and do the write */ LWLockRelease(shared->ControlLock); ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata); /* If we failed, and we're in a flush, better close the files */ if (!ok && fdata) { int i; for (i = 0; i < fdata->num_files; i++) close(fdata->fd[i]); } /* Re-acquire shared control lock and update page state */ LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); Assert(shared->page_number[slotno] == pageno && (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS || shared->page_status[slotno] == SLRU_PAGE_DIRTY)); /* Cannot set CLEAN if someone re-dirtied page since write started */ if (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS) shared->page_status[slotno] = ok ? SLRU_PAGE_CLEAN : SLRU_PAGE_DIRTY; LWLockRelease(shared->buffer_locks[slotno]); /* Now it's okay to ereport if we failed */ if (!ok) SlruReportIOError(ctl, pageno, InvalidTransactionId); }
/* * Find a page in a shared buffer, reading it in if necessary. * The page number must correspond to an already-initialized page. * * The passed-in xid is used only for error reporting, and may be * InvalidTransactionId if no specific xid is associated with the action. * * Return value is the shared-buffer slot number now holding the page. * The buffer's LRU access info is updated. * * Control lock must be held at entry, and will be held at exit. */ int SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid) { SlruShared shared = ctl->shared; /* Outer loop handles restart if we lose the buffer to someone else */ for (;;) { int slotno; bool ok; /* See if page already is in memory; if not, pick victim slot */ slotno = SlruSelectLRUPage(ctl, pageno); /* Did we find the page in memory? */ if (shared->page_number[slotno] == pageno && shared->page_status[slotno] != SLRU_PAGE_EMPTY) { /* If page is still being read in, we cannot use it yet */ if (shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS) { /* otherwise, it's ready to use */ SlruRecentlyUsed(shared, slotno); return slotno; } } else { /* We found no match; assert we selected a freeable slot */ Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY || shared->page_status[slotno] == SLRU_PAGE_CLEAN); } /* Mark the slot read-busy (no-op if it already was) */ shared->page_number[slotno] = pageno; shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS; /* * Temporarily mark page as recently-used to discourage * SlruSelectLRUPage from selecting it again for someone else. */ SlruRecentlyUsed(shared, slotno); /* * We must grab the per-buffer lock to do I/O. To avoid deadlock, * must release ControlLock while waiting for per-buffer lock. * Fortunately, most of the time the per-buffer lock shouldn't be * already held, so we can do this: */ if (!LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE)) { LWLockRelease(shared->ControlLock); LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE); LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); } /* * Check to see if someone else already did the read, or took the * buffer away from us. If so, restart from the top. */ if (shared->page_number[slotno] != pageno || shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS) { LWLockRelease(shared->buffer_locks[slotno]); continue; } /* Okay, release control lock and do the read */ LWLockRelease(shared->ControlLock); ok = SlruPhysicalReadPage(ctl, pageno, slotno); /* Re-acquire shared control lock and update page state */ LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); Assert(shared->page_number[slotno] == pageno && shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS); shared->page_status[slotno] = ok ? SLRU_PAGE_CLEAN : SLRU_PAGE_EMPTY; LWLockRelease(shared->buffer_locks[slotno]); /* Now it's okay to ereport if we failed */ if (!ok) SlruReportIOError(ctl, pageno, xid); SlruRecentlyUsed(shared, slotno); return slotno; } }
void CheckForQDMirroringWork(void) { QDMIRRORUpdateMask updateMask; bool validFlag; QDMIRRORState state; QDMIRRORDisabledReason disabledReason; struct timeval lastLogTimeVal; char errorMessage[QDMIRRORErrorMessageSize]; if (ftsQDMirrorInfo == NULL) return; /* * Test without taking lock. */ if (ftsQDMirrorInfo->updateMask == QDMIRROR_UPDATEMASK_NONE) return; if (IsTransactionOrTransactionBlock()) return; /* * NOTE: We are trying to use the longer-term update config lock here. */ if (!LWLockConditionalAcquire(ftsQDMirrorUpdateConfigLock, LW_EXCLUSIVE)) return; LWLockAcquire(ftsQDMirrorLock, LW_EXCLUSIVE); if (ftsQDMirrorInfo->updateMask == QDMIRROR_UPDATEMASK_NONE) { LWLockRelease(ftsQDMirrorLock); LWLockRelease(ftsQDMirrorUpdateConfigLock); return; } updateMask = ftsQDMirrorInfo->updateMask; ftsQDMirrorInfo->updateMask = QDMIRROR_UPDATEMASK_NONE; validFlag = false; if ((updateMask & QDMIRROR_UPDATEMASK_VALIDFLAG) != 0) { validFlag = !ftsQDMirrorInfo->valid; /* * Assume we are successful... */ ftsQDMirrorInfo->valid = validFlag; } state = ftsQDMirrorInfo->state; disabledReason = ftsQDMirrorInfo->disabledReason; lastLogTimeVal = ftsQDMirrorInfo->lastLogTimeVal; strcpy(errorMessage, ftsQDMirrorInfo->errorMessage); LWLockRelease(ftsQDMirrorLock); QDMirroringUpdate(updateMask, validFlag, state, disabledReason, &lastLogTimeVal, errorMessage); LWLockRelease(ftsQDMirrorUpdateConfigLock); }
void BufferUnpinned(int bufIndex) { volatile BufferDesc *buf = &BufferDescriptors[bufIndex]; volatile BufferDesc *first = StrategyControl->firstUnpinned; volatile BufferDesc *last= StrategyControl->lastUnpinned; volatile BufferDesc *previous = buf->previous; volatile BufferDesc *next = buf->next; if (!LWLockConditionalAcquire(BufFreelistLock, LW_EXCLUSIVE)) return; if (BufferReplacementPolicy == POLICY_2Q) { //2Q stuff //if buf is on the AM queue then put buf on the front of the Am queue volatile BufferDesc *head = first; while (head != NULL) { if (head == buf) { //buf in AM queue, putting at the tail of AM queue //if it's in the queue, assume firstUnpinned and lastUnpinned have been set if (next != NULL) { if (previous != NULL) { // buf in middle of AM queue previous->next = next; next->previous = previous; last->next = buf; buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } else { // buf at beginning next->previous = NULL; StrategyControl->firstUnpinned = next; last->next = buf; buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } } LWLockRelease(BufFreelistLock); return; } else { head = head->next; } } //else if buf is on the A1 queue then head = StrategyControl->a1Head; while (head != NULL) { if (head == buf) { //remove buf from the A1 queue //put buf on the front of the Am queue if (next != NULL) { if (previous != NULL) { // buf in middle of A1 queue previous->next = next; next->previous = previous; // if AM empty if (first == NULL || last == NULL) { StrategyControl->firstUnpinned = buf; } else { last->next = buf; } buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } else { // buf at beginning next->previous = NULL; StrategyControl->a1Head = next; //if AM empty if (first == NULL || last == NULL) { StrategyControl->firstUnpinned = buf; } else { last->next = buf; } buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } } else if (previous == NULL) { // buf is only thing in A1 StrategyControl->a1Head = NULL; StrategyControl->a1Tail = NULL; if (first == NULL || last == NULL) { StrategyControl->firstUnpinned = buf; } else { last->next = buf; } buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } else { // buf is last thing in A1 StrategyControl->a1Tail = previous; previous->next = NULL; if (first == NULL || last == NULL) { StrategyControl->firstUnpinned = buf; } else { last->next = buf; } buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } LWLockRelease(BufFreelistLock); return; } else { head = head->next; } } //else //put buf on the front of the A1 queue if (StrategyControl->a1Head == NULL || StrategyControl->a1Tail == NULL) { StrategyControl->a1Head = buf; StrategyControl->a1Tail = buf; buf->previous = NULL; } else { StrategyControl->a1Tail->next = buf; buf->previous = StrategyControl->a1Tail; } buf->next = NULL; StrategyControl->a1Tail = buf; } else { //LRU or MRU or Clock if (next != NULL) { if (previous != NULL) { //next and prev != null, buf is already in middle of list previous->next = next; next->previous = previous; last->next = buf; buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } else { //next != null, prev == null, buf is at beginning of list next->previous = NULL; StrategyControl->firstUnpinned = next; last->next = buf; buf->previous = last; buf->next = NULL; StrategyControl->lastUnpinned = buf; } } else if (previous == NULL) { //next == NULL, prev == null, buf is new to list if (first == NULL) { // if first time, then set firstUnpinned to this buffer StrategyControl->firstUnpinned = buf; } buf->previous = last; buf->next = NULL; if (last != NULL) { last->next = buf; } StrategyControl->lastUnpinned = buf; } } LWLockRelease(BufFreelistLock); }