/* * RemoveFirst -- remove the first entry in the free list of ProcGlobal. * * Use compare_and_swap to avoid using lock and guarantee atomic operation. */ static PGPROC * RemoveFirst() { volatile PROC_HDR *procglobal = ProcGlobal; SHMEM_OFFSET myOffset; PGPROC *freeProc = NULL; /* * Decrement numFreeProcs before removing the first entry from the * free list. */ gp_atomic_add_32(&procglobal->numFreeProcs, -1); int32 casResult = false; while(!casResult) { myOffset = procglobal->freeProcs; if (myOffset == INVALID_OFFSET) { break; } freeProc = (PGPROC *) MAKE_PTR(myOffset); casResult = compare_and_swap_ulong(&((PROC_HDR *)procglobal)->freeProcs, myOffset, freeProc->links.next); if (gp_debug_pgproc && !casResult) { elog(LOG, "need to retry allocating a PGPROC entry: pid=%d (oldHeadOffset=%ld, newHeadOffset=%ld)", MyProcPid, myOffset, procglobal->freeProcs); } } if (freeProc == NULL) { /* * Increment numFreeProcs since we didn't remove any entry from * the free list. */ gp_atomic_add_32(&procglobal->numFreeProcs, 1); } return freeProc; }
/* * Prepend -- prepend the entry to the free list of ProcGlobal. * * Use compare_and_swap to avoid using lock and guarantee atomic operation. */ static void Prepend(PGPROC *myProc) { int pid = myProc->pid; myProc->pid = 0; int32 casResult = false; /* Update freeProcs atomically. */ while (!casResult) { myProc->links.next = ProcGlobal->freeProcs; casResult = compare_and_swap_ulong(&ProcGlobal->freeProcs, myProc->links.next, MAKE_OFFSET(myProc)); if (gp_debug_pgproc && !casResult) { elog(LOG, "need to retry moving PGPROC entry to freelist: pid=%d " "(myOffset=%ld, oldHeadOffset=%ld, newHeadOffset=%ld)", pid, MAKE_OFFSET(myProc), myProc->links.next, ProcGlobal->freeProcs); } } /* Atomically increment numFreeProcs */ gp_atomic_add_32(&ProcGlobal->numFreeProcs, 1); }
void ProcNewMppSessionId(int *newSessionId) { Assert(newSessionId != NULL); *newSessionId = MyProc->mppSessionId = gp_atomic_add_32(&ProcGlobal->mppLocalProcessCounter, 1); /* * Make sure that our SessionState entry correctly records our * new session id. */ if (NULL != MySessionState) { /* This should not happen outside of dispatcher on the master */ Assert(GpIdentity.segindex == MASTER_CONTENT_ID && Gp_role == GP_ROLE_DISPATCH); ereport(gp_sessionstate_loglevel, (errmsg("ProcNewMppSessionId: changing session id (old: %d, new: %d), pinCount: %d, activeProcessCount: %d", MySessionState->sessionId, *newSessionId, MySessionState->pinCount, MySessionState->activeProcessCount), errprintstack(true))); #ifdef USE_ASSERT_CHECKING MySessionState->isModifiedSessionId = true; #endif MySessionState->sessionId = *newSessionId; } }
/* * Updates the given performance counter by delta * * delta can be positive or negative */ void Cache_UpdatePerfCounter(uint32 *counter, int delta) { Assert(counter + delta >= 0); gp_atomic_add_32((int32 *) counter, delta); }
/* * Marks the current process as clean. If all the processes are marked * as clean for this session (i.e., cleanupCountdown == 0 in the * MySessionState) then we reset session's runaway status as well as * the runaway detector flag (i.e., a new runaway detector can run). * * Parameters: * ignoredCleanup: whether the cleanup was ignored, i.e., no elog(ERROR, ...) * was thrown. In such case a deactivated process is not reactivated as the * deactivation didn't get interrupted. */ void RunawayCleaner_RunawayCleanupDoneForProcess(bool ignoredCleanup) { /* * We don't do anything if we don't have an ongoing cleanup, or we already finished * cleanup once for the current runaway event */ if (beginCleanupRunawayVersion != *latestRunawayVersion || endCleanupRunawayVersion == beginCleanupRunawayVersion) { /* Either we never started cleanup, or we already finished */ return; } /* Disable repeating call */ endCleanupRunawayVersion = beginCleanupRunawayVersion; Assert(NULL != MySessionState); /* * As the current cleanup holds leverage on the cleanupCountdown, * the session must stay as runaway at least until the current * process marks itself clean */ Assert(MySessionState->runawayStatus != RunawayStatus_NotRunaway); /* We only cleanup if we were active when the runaway event happened */ Assert((!isProcessActive && *latestRunawayVersion < deactivationVersion && *latestRunawayVersion > activationVersion) || (*latestRunawayVersion > activationVersion && (activationVersion >= deactivationVersion && isProcessActive))); /* * We don't reactivate if the process is already active or a deactivated * process never errored out during deactivation (i.e., failed to complete * deactivation) */ if (!isProcessActive && !ignoredCleanup) { Assert(1 == *isRunawayDetector); Assert(0 < MySessionState->cleanupCountdown); /* * As the process threw ERROR instead of going into ReadCommand() blocking * state, we have to reactivate the process from its current Deactivated * state */ IdleTracker_ActivateProcess(); } Assert(0 < MySessionState->cleanupCountdown); #if USE_ASSERT_CHECKING int cleanProgress = #endif gp_atomic_add_32(&MySessionState->cleanupCountdown, -1); Assert(0 <= cleanProgress); bool finalCleaner = compare_and_swap_32((uint32*) &MySessionState->cleanupCountdown, 0, CLEANUP_COUNTDOWN_BEFORE_RUNAWAY); if (finalCleaner) { /* * The final cleaner is responsible to reset the runaway flag, * and enable the runaway detection process. */ RunawayCleaner_RunawayCleanupDoneForSession(); } /* * Finally we are done with all critical cleanup, which includes releasing all our memory and * releasing our cleanup counter so that another session can be marked as runaway, if needed. * Now, we have some head room to actually record our usage. */ write_stderr("Logging memory usage because of runaway cleanup. Note, this is a post-cleanup logging and may be incomplete."); MemoryAccounting_SaveToLog(); MemoryContextStats(TopMemoryContext); }
/* * InitProcess -- initialize a per-process data structure for this backend */ void InitProcess(void) { /* use volatile pointer to prevent code rearrangement */ volatile PROC_HDR *procglobal = ProcGlobal; int i; /* * ProcGlobal should be set up already (if we are a backend, we inherit * this by fork() or EXEC_BACKEND mechanism from the postmaster). */ if (procglobal == NULL) elog(PANIC, "proc header uninitialized"); if (MyProc != NULL) elog(ERROR, "you already exist"); MyProc = RemoveFirst(); if (MyProc == NULL) { ereport(FATAL, (errcode(ERRCODE_TOO_MANY_CONNECTIONS), errmsg("sorry, too many clients already"))); } if (gp_debug_pgproc) { elog(LOG, "allocating PGPROC entry for pid %d, freeProcs (prev offset, new offset): (%ld, %ld)", MyProcPid, MAKE_OFFSET(MyProc), MyProc->links.next); } set_spins_per_delay(procglobal->spins_per_delay); int mppLocalProcessSerial = gp_atomic_add_32(&procglobal->mppLocalProcessCounter, 1); lockHolderProcPtr = MyProc; /* Set the next pointer to INVALID_OFFSET */ MyProc->links.next = INVALID_OFFSET; /* * Initialize all fields of MyProc, except for the semaphore which was * prepared for us by InitProcGlobal. */ SHMQueueElemInit(&(MyProc->links)); MyProc->waitStatus = STATUS_OK; MyProc->xid = InvalidTransactionId; MyProc->xmin = InvalidTransactionId; MyProc->pid = MyProcPid; /* databaseId and roleId will be filled in later */ MyProc->databaseId = InvalidOid; MyProc->roleId = InvalidOid; MyProc->inVacuum = false; MyProc->postmasterResetRequired = true; MyProc->lwWaiting = false; MyProc->lwExclusive = false; MyProc->lwWaitLink = NULL; MyProc->waitLock = NULL; MyProc->waitProcLock = NULL; for (i = 0; i < NUM_LOCK_PARTITIONS; i++) SHMQueueInit(&(MyProc->myProcLocks[i])); /* * mppLocalProcessSerial uniquely identifies this backend process among * all those that our parent postmaster process creates over its lifetime. * * Since we use the process serial number to decide if we should * deliver a response from a server under this spin, we need to * assign it under the spin lock. */ MyProc->mppLocalProcessSerial = mppLocalProcessSerial; /* * A nonzero gp_session_id uniquely identifies an MPP client session * over the lifetime of the entry postmaster process. A qDisp passes * its gp_session_id down to all of its qExecs. If this is a qExec, * we have already received the gp_session_id from the qDisp. */ elog(DEBUG1,"InitProcess(): gp_session_id %d", gp_session_id); if (Gp_role == GP_ROLE_DISPATCH && gp_session_id == -1) gp_session_id = mppLocalProcessSerial; MyProc->mppSessionId = gp_session_id; MyProc->mppIsWriter = Gp_is_writer; /* * We might be reusing a semaphore that belonged to a failed process. So * be careful and reinitialize its value here. (This is not strictly * necessary anymore, but seems like a good idea for cleanliness.) */ PGSemaphoreReset(&MyProc->sem); /* Set wait portal (do not check if resource scheduling is enabled) */ MyProc->waitPortalId = INVALID_PORTALID; MyProc->queryCommandId = -1; /* * Arrange to clean up at backend exit. */ on_shmem_exit(ProcKill, 0); /* * Now that we have a PGPROC, we could try to acquire locks, so initialize * the deadlock checker. */ InitDeadLockChecking(); }