static void yaml_ofstart(void *state, char *fname, bool isnull) { word_table *p; pgspParserContext *ctx = (pgspParserContext *)state; char *s; p = search_word_table(propfields, fname, ctx->mode); if (!p) { ereport(DEBUG1, (errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname), errdetail_log("INPUT: \"%s\"", ctx->org_string))); } s = (p ? p->longname : fname); if (!bms_is_member(ctx->level, ctx->first)) { appendStringInfoString(ctx->dest, "\n"); appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP); } else ctx->first = bms_del_member(ctx->first, ctx->level); ctx->valconverter = NULL; ctx->fname = s; ctx->valconverter = (p ? p->converter : NULL); }
static void json_ofstart(void *state, char *fname, bool isnull) { word_table *p; pgspParserContext *ctx = (pgspParserContext *)state; char *fn; ctx->remove = false; p = search_word_table(propfields, fname, ctx->mode); if (!p) { ereport(DEBUG1, (errmsg("JSON parser encoutered unknown field name: \"%s\".", fname), errdetail_log("INPUT: \"%s\"", ctx->org_string))); } ctx->remove = (ctx->mode == PGSP_JSON_NORMALIZE && (!p || !p->normalize_use)); if (ctx->remove) return; if (!bms_is_member(ctx->level, ctx->first)) { appendStringInfoChar(ctx->dest, ','); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoChar(ctx->dest, '\n'); } else ctx->first = bms_del_member(ctx->first, ctx->level); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP); if (!p || !p->longname) fn = fname; else if (ctx->mode == PGSP_JSON_INFLATE) fn = p->longname; else fn = p->shortname; escape_json(ctx->dest, fn); ctx->fname = fn; ctx->valconverter = (p ? p->converter : NULL); appendStringInfoChar(ctx->dest, ':'); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoChar(ctx->dest, ' '); }
void reportException(PyObject *pErrType, PyObject *pErrValue, PyObject *pErrTraceback) { char *errName, *errValue, *errTraceback = ""; PyObject *traceback_list; PyObject *pTemp; PyObject *tracebackModule = PyImport_ImportModule("traceback"); PyObject *format_exception = PyObject_GetAttrString(tracebackModule, "format_exception"); PyObject *newline = PyString_FromString("\n"); int severity; PyErr_NormalizeException(&pErrType, &pErrValue, &pErrTraceback); pTemp = PyObject_GetAttrString(pErrType, "__name__"); errName = PyString_AsString(pTemp); errValue = PyString_AsString(PyObject_Str(pErrValue)); if (pErrTraceback != NULL) { traceback_list = PyObject_CallFunction(format_exception, "(O,O,O)", pErrType, pErrValue, pErrTraceback); errTraceback = PyString_AsString(PyObject_CallMethod(newline, "join", "(O)", traceback_list)); Py_DECREF(pErrTraceback); Py_DECREF(traceback_list); } if (IsAbortedTransactionBlockState()) { severity = WARNING; } else { severity = ERROR; } if (errstart(severity, __FILE__, __LINE__, PG_FUNCNAME_MACRO, TEXTDOMAIN)) { if (errstart(severity, __FILE__, __LINE__, PG_FUNCNAME_MACRO, TEXTDOMAIN)) errmsg("Error in python: %s", errName); errdetail("%s", errValue); errdetail_log("%s", errTraceback); } Py_DECREF(pErrType); Py_DECREF(pErrValue); Py_DECREF(format_exception); Py_DECREF(tracebackModule); Py_DECREF(newline); Py_DECREF(pTemp); errfinish(0); }
static void xml_ofstart(void *state, char *fname, bool isnull) { word_table *p; pgspParserContext *ctx = (pgspParserContext *)state; char *s; p = search_word_table(propfields, fname, ctx->mode); if (!p) { ereport(DEBUG1, (errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname), errdetail_log("INPUT: \"%s\"", ctx->org_string))); } s = (p ? p->longname : fname); /* * save current process context * There's no problem if P_Plan appears recursively. */ if (p && (p->tag == P_Plan || p->tag == P_Triggers)) ctx->processing = p->tag; appendStringInfoChar(ctx->dest, '\n'); appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP); ctx->valconverter = NULL; appendStringInfoChar(ctx->dest, '<'); appendStringInfoString(ctx->dest, escape_xml(hyphenate_words(ctx, s))); appendStringInfoChar(ctx->dest, '>'); ctx->valconverter = (p ? p->converter : NULL); /* * If the object field name is Plan or Triggers, the value should be an * array and the items are tagged by other than "Item". "Item"s appear * only in Output field. */ if (p && (p->tag == P_Plans || p->tag == P_Triggers)) ctx->not_item = bms_add_member(ctx->not_item, ctx->level + 1); else ctx->not_item = bms_del_member(ctx->not_item, ctx->level + 1); }
/* * ProcSleep -- put a process to sleep on the specified lock * * Caller must have set MyProc->heldLocks to reflect locks already held * on the lockable object by this process (under all XIDs). * * The lock table's partition lock must be held at entry, and will be held * at exit. * * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock). * * ASSUME: that no one will fiddle with the queue until after * we release the partition lock. * * NOTES: The process queue is now a priority queue for locking. */ int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) { LOCKMODE lockmode = locallock->tag.mode; LOCK *lock = locallock->lock; PROCLOCK *proclock = locallock->proclock; uint32 hashcode = locallock->hashcode; LWLock *partitionLock = LockHashPartitionLock(hashcode); PROC_QUEUE *waitQueue = &(lock->waitProcs); LOCKMASK myHeldLocks = MyProc->heldLocks; bool early_deadlock = false; bool allow_autovacuum_cancel = true; int myWaitStatus; PGPROC *proc; int i; /* * Determine where to add myself in the wait queue. * * Normally I should go at the end of the queue. However, if I already * hold locks that conflict with the request of any previous waiter, put * myself in the queue just in front of the first such waiter. This is not * a necessary step, since deadlock detection would move me to before that * waiter anyway; but it's relatively cheap to detect such a conflict * immediately, and avoid delaying till deadlock timeout. * * Special case: if I find I should go in front of some waiter, check to * see if I conflict with already-held locks or the requests before that * waiter. If not, then just grant myself the requested lock immediately. * This is the same as the test for immediate grant in LockAcquire, except * we are only considering the part of the wait queue before my insertion * point. */ if (myHeldLocks != 0) { LOCKMASK aheadRequests = 0; proc = (PGPROC *) waitQueue->links.next; for (i = 0; i < waitQueue->size; i++) { /* Must he wait for me? */ if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks) { /* Must I wait for him ? */ if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks) { /* * Yes, so we have a deadlock. Easiest way to clean up * correctly is to call RemoveFromWaitQueue(), but we * can't do that until we are *on* the wait queue. So, set * a flag to check below, and break out of loop. Also, * record deadlock info for later message. */ RememberSimpleDeadLock(MyProc, lockmode, lock, proc); early_deadlock = true; break; } /* I must go before this waiter. Check special case. */ if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 && LockCheckConflicts(lockMethodTable, lockmode, lock, proclock) == STATUS_OK) { /* Skip the wait and just grant myself the lock. */ GrantLock(lock, proclock, lockmode); GrantAwaitedLock(); return STATUS_OK; } /* Break out of loop to put myself before him */ break; } /* Nope, so advance to next waiter */ aheadRequests |= LOCKBIT_ON(proc->waitLockMode); proc = (PGPROC *) proc->links.next; } /* * If we fall out of loop normally, proc points to waitQueue head, so * we will insert at tail of queue as desired. */ } else { /* I hold no locks, so I can't push in front of anyone. */ proc = (PGPROC *) &(waitQueue->links); } /* * Insert self into queue, ahead of the given proc (or at tail of queue). */ SHMQueueInsertBefore(&(proc->links), &(MyProc->links)); waitQueue->size++; lock->waitMask |= LOCKBIT_ON(lockmode); /* Set up wait information in PGPROC object, too */ MyProc->waitLock = lock; MyProc->waitProcLock = proclock; MyProc->waitLockMode = lockmode; MyProc->waitStatus = STATUS_WAITING; /* * If we detected deadlock, give up without waiting. This must agree with * CheckDeadLock's recovery code, except that we shouldn't release the * semaphore since we haven't tried to lock it yet. */ if (early_deadlock) { RemoveFromWaitQueue(MyProc, hashcode); return STATUS_ERROR; } /* mark that we are waiting for a lock */ lockAwaited = locallock; /* * Release the lock table's partition lock. * * NOTE: this may also cause us to exit critical-section state, possibly * allowing a cancel/die interrupt to be accepted. This is OK because we * have recorded the fact that we are waiting for a lock, and so * LockErrorCleanup will clean up if cancel/die happens. */ LWLockRelease(partitionLock); /* * Also, now that we will successfully clean up after an ereport, it's * safe to check to see if there's a buffer pin deadlock against the * Startup process. Of course, that's only necessary if we're doing Hot * Standby and are not the Startup process ourselves. */ if (RecoveryInProgress() && !InRecovery) CheckRecoveryConflictDeadlock(); /* Reset deadlock_state before enabling the timeout handler */ deadlock_state = DS_NOT_YET_CHECKED; got_deadlock_timeout = false; /* * Set timer so we can wake up after awhile and check for a deadlock. If a * deadlock is detected, the handler releases the process's semaphore and * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we * must report failure rather than success. * * By delaying the check until we've waited for a bit, we can avoid * running the rather expensive deadlock-check code in most cases. * * If LockTimeout is set, also enable the timeout for that. We can save a * few cycles by enabling both timeout sources in one call. */ if (LockTimeout > 0) { EnableTimeoutParams timeouts[2]; timeouts[0].id = DEADLOCK_TIMEOUT; timeouts[0].type = TMPARAM_AFTER; timeouts[0].delay_ms = DeadlockTimeout; timeouts[1].id = LOCK_TIMEOUT; timeouts[1].type = TMPARAM_AFTER; timeouts[1].delay_ms = LockTimeout; enable_timeouts(timeouts, 2); } else enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout); /* * If somebody wakes us between LWLockRelease and WaitLatch, the latch * will not wait. But a set latch does not necessarily mean that the lock * is free now, as there are many other sources for latch sets than * somebody releasing the lock. * * We process interrupts whenever the latch has been set, so cancel/die * interrupts are processed quickly. This means we must not mind losing * control to a cancel/die interrupt here. We don't, because we have no * shared-state-change work to do after being granted the lock (the * grantor did it all). We do have to worry about canceling the deadlock * timeout and updating the locallock table, but if we lose control to an * error, LockErrorCleanup will fix that up. */ do { WaitLatch(MyLatch, WL_LATCH_SET, 0); ResetLatch(MyLatch); /* check for deadlocks first, as that's probably log-worthy */ if (got_deadlock_timeout) { CheckDeadLock(); got_deadlock_timeout = false; } CHECK_FOR_INTERRUPTS(); /* * waitStatus could change from STATUS_WAITING to something else * asynchronously. Read it just once per loop to prevent surprising * behavior (such as missing log messages). */ myWaitStatus = *((volatile int *) &MyProc->waitStatus); /* * If we are not deadlocked, but are waiting on an autovacuum-induced * task, send a signal to interrupt it. */ if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel) { PGPROC *autovac = GetBlockingAutoVacuumPgproc(); PGXACT *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno]; LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * Only do it if the worker is not working to protect against Xid * wraparound. */ if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) && !(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND)) { int pid = autovac->pid; StringInfoData locktagbuf; StringInfoData logbuf; /* errdetail for server log */ initStringInfo(&locktagbuf); initStringInfo(&logbuf); DescribeLockTag(&locktagbuf, &lock->tag); appendStringInfo(&logbuf, _("Process %d waits for %s on %s."), MyProcPid, GetLockmodeName(lock->tag.locktag_lockmethodid, lockmode), locktagbuf.data); /* release lock as quickly as possible */ LWLockRelease(ProcArrayLock); ereport(LOG, (errmsg("sending cancel to blocking autovacuum PID %d", pid), errdetail_log("%s", logbuf.data))); pfree(logbuf.data); pfree(locktagbuf.data); /* send the autovacuum worker Back to Old Kent Road */ if (kill(pid, SIGINT) < 0) { /* Just a warning to allow multiple callers */ ereport(WARNING, (errmsg("could not send signal to process %d: %m", pid))); } } else LWLockRelease(ProcArrayLock); /* prevent signal from being resent more than once */ allow_autovacuum_cancel = false; } /* * If awoken after the deadlock check interrupt has run, and * log_lock_waits is on, then report about the wait. */ if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED) { StringInfoData buf, lock_waiters_sbuf, lock_holders_sbuf; const char *modename; long secs; int usecs; long msecs; SHM_QUEUE *procLocks; PROCLOCK *proclock; bool first_holder = true, first_waiter = true; int lockHoldersNum = 0; initStringInfo(&buf); initStringInfo(&lock_waiters_sbuf); initStringInfo(&lock_holders_sbuf); DescribeLockTag(&buf, &locallock->tag.lock); modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid, lockmode); TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT), GetCurrentTimestamp(), &secs, &usecs); msecs = secs * 1000 + usecs / 1000; usecs = usecs % 1000; /* * we loop over the lock's procLocks to gather a list of all * holders and waiters. Thus we will be able to provide more * detailed information for lock debugging purposes. * * lock->procLocks contains all processes which hold or wait for * this lock. */ LWLockAcquire(partitionLock, LW_SHARED); procLocks = &(lock->procLocks); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink)); while (proclock) { /* * we are a waiter if myProc->waitProcLock == proclock; we are * a holder if it is NULL or something different */ if (proclock->tag.myProc->waitProcLock == proclock) { if (first_waiter) { appendStringInfo(&lock_waiters_sbuf, "%d", proclock->tag.myProc->pid); first_waiter = false; } else appendStringInfo(&lock_waiters_sbuf, ", %d", proclock->tag.myProc->pid); } else { if (first_holder) { appendStringInfo(&lock_holders_sbuf, "%d", proclock->tag.myProc->pid); first_holder = false; } else appendStringInfo(&lock_holders_sbuf, ", %d", proclock->tag.myProc->pid); lockHoldersNum++; } proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink, offsetof(PROCLOCK, lockLink)); } LWLockRelease(partitionLock); if (deadlock_state == DS_SOFT_DEADLOCK) ereport(LOG, (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs), (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); else if (deadlock_state == DS_HARD_DEADLOCK) { /* * This message is a bit redundant with the error that will be * reported subsequently, but in some cases the error report * might not make it to the log (eg, if it's caught by an * exception handler), and we want to ensure all long-wait * events get logged. */ ereport(LOG, (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs), (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); } if (myWaitStatus == STATUS_WAITING) ereport(LOG, (errmsg("process %d still waiting for %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs), (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); else if (myWaitStatus == STATUS_OK) ereport(LOG, (errmsg("process %d acquired %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs))); else { Assert(myWaitStatus == STATUS_ERROR); /* * Currently, the deadlock checker always kicks its own * process, which means that we'll only see STATUS_ERROR when * deadlock_state == DS_HARD_DEADLOCK, and there's no need to * print redundant messages. But for completeness and * future-proofing, print a message if it looks like someone * else kicked us off the lock. */ if (deadlock_state != DS_HARD_DEADLOCK) ereport(LOG, (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms", MyProcPid, modename, buf.data, msecs, usecs), (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.", "Processes holding the lock: %s. Wait queue: %s.", lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data)))); } /* * At this point we might still need to wait for the lock. Reset * state so we don't print the above messages again. */ deadlock_state = DS_NO_DEADLOCK; pfree(buf.data); pfree(lock_holders_sbuf.data); pfree(lock_waiters_sbuf.data); } } while (myWaitStatus == STATUS_WAITING); /* * Disable the timers, if they are still running. As in LockErrorCleanup, * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has * already caused QueryCancelPending to become set, we want the cancel to * be reported as a lock timeout, not a user cancel. */ if (LockTimeout > 0) { DisableTimeoutParams timeouts[2]; timeouts[0].id = DEADLOCK_TIMEOUT; timeouts[0].keep_indicator = false; timeouts[1].id = LOCK_TIMEOUT; timeouts[1].keep_indicator = true; disable_timeouts(timeouts, 2); } else disable_timeout(DEADLOCK_TIMEOUT, false); /* * Re-acquire the lock table's partition lock. We have to do this to hold * off cancel/die interrupts before we can mess with lockAwaited (else we * might have a missed or duplicated locallock update). */ LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * We no longer want LockErrorCleanup to do anything. */ lockAwaited = NULL; /* * If we got the lock, be sure to remember it in the locallock table. */ if (MyProc->waitStatus == STATUS_OK) GrantAwaitedLock(); /* * We don't have to do anything else, because the awaker did all the * necessary update of the lock table and MyProc. */ return MyProc->waitStatus; }
/* * A function having everything to do with logging, which ought to be factored * out one day to make a start on the Thoughts-on-logging wiki ideas. */ static void reLogWithChangedLevel(int level) { ErrorData *edata = CopyErrorData(); int sqlstate = edata->sqlerrcode; int category = ERRCODE_TO_CATEGORY(sqlstate); FlushErrorState(); if ( WARNING > level ) { if ( ERRCODE_SUCCESSFUL_COMPLETION != category ) sqlstate = ERRCODE_SUCCESSFUL_COMPLETION; } else if ( WARNING == level ) { if ( ERRCODE_WARNING != category && ERRCODE_NO_DATA != category ) sqlstate = ERRCODE_WARNING; } else if ( ERRCODE_WARNING == category || ERRCODE_NO_DATA == category || ERRCODE_SUCCESSFUL_COMPLETION == category ) sqlstate = ERRCODE_INTERNAL_ERROR; #if PG_VERSION_NUM >= 90500 edata->elevel = level; edata->sqlerrcode = sqlstate; PG_TRY(); { ThrowErrorData(edata); } PG_CATCH(); { FreeErrorData(edata); /* otherwise this wouldn't happen in ERROR case */ PG_RE_THROW(); } PG_END_TRY(); FreeErrorData(edata); #else if (!errstart(level, edata->filename, edata->lineno, edata->funcname, NULL)) { FreeErrorData(edata); return; } errcode(sqlstate); if (edata->message) errmsg("%s", edata->message); if (edata->detail) errdetail("%s", edata->detail); if (edata->detail_log) errdetail_log("%s", edata->detail_log); if (edata->hint) errhint("%s", edata->hint); if (edata->context) errcontext("%s", edata->context); /* this may need to be trimmed */ #if PG_VERSION_NUM >= 90300 if (edata->schema_name) err_generic_string(PG_DIAG_SCHEMA_NAME, edata->schema_name); if (edata->table_name) err_generic_string(PG_DIAG_TABLE_NAME, edata->table_name); if (edata->column_name) err_generic_string(PG_DIAG_COLUMN_NAME, edata->column_name); if (edata->datatype_name) err_generic_string(PG_DIAG_DATATYPE_NAME, edata->datatype_name); if (edata->constraint_name) err_generic_string(PG_DIAG_CONSTRAINT_NAME, edata->constraint_name); #endif if (edata->internalquery) internalerrquery(edata->internalquery); FreeErrorData(edata); errfinish(0); #endif }
/* * Report a detected deadlock, with available details. */ void DeadLockReport(void) { StringInfoData clientbuf; /* errdetail for client */ StringInfoData logbuf; /* errdetail for server log */ StringInfoData locktagbuf; int i; initStringInfo(&clientbuf); initStringInfo(&logbuf); initStringInfo(&locktagbuf); /* Generate the "waits for" lines sent to the client */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; int nextpid; /* The last proc waits for the first one... */ if (i < nDeadlockDetails - 1) nextpid = info[1].pid; else nextpid = deadlockDetails[0].pid; /* reset locktagbuf to hold next object description */ resetStringInfo(&locktagbuf); DescribeLockTag(&locktagbuf, &info->locktag); if (i > 0) appendStringInfoChar(&clientbuf, '\n'); appendStringInfo(&clientbuf, _("Process %d waits for %s on %s; blocked by process %d."), info->pid, GetLockmodeName(info->locktag.locktag_lockmethodid, info->lockmode), locktagbuf.data, nextpid); } /* Duplicate all the above for the server ... */ appendStringInfoString(&logbuf, clientbuf.data); /* ... and add info about query strings */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; appendStringInfoChar(&logbuf, '\n'); appendStringInfo(&logbuf, _("Process %d: %s"), info->pid, pgstat_get_backend_current_activity(info->pid, false)); } ereport(ERROR, (errcode(ERRCODE_T_R_DEADLOCK_DETECTED), errmsg("deadlock detected"), errdetail("%s", clientbuf.data), errdetail_log("%s", logbuf.data), errhint("See server log for query details."))); }
static void json_ofstart(void *state, char *fname, bool isnull) { word_table *p; pgspParserContext *ctx = (pgspParserContext *)state; char *fn; ctx->remove = false; p = search_word_table(propfields, fname, ctx->mode); if (!p) { ereport(DEBUG1, (errmsg("JSON parser encoutered unknown field name: \"%s\".", fname), errdetail_log("INPUT: \"%s\"", ctx->org_string))); } ctx->remove = (ctx->mode == PGSP_JSON_NORMALIZE && (!p || !p->normalize_use)); if (ctx->remove) return; if (!bms_is_member(ctx->level, ctx->first)) { appendStringInfoChar(ctx->dest, ','); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoChar(ctx->dest, '\n'); } else ctx->first = bms_del_member(ctx->first, ctx->level); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP); /* * We intentionally let some property names not have a short name. Use long * name for the cases. */ if (!p || !p->longname) fn = fname; else if (ctx->mode == PGSP_JSON_INFLATE || !(p->shortname && p->shortname[0])) fn = p->longname; else fn = p->shortname; escape_json(ctx->dest, fn); ctx->fname = fn; ctx->valconverter = (p ? p->converter : NULL); appendStringInfoChar(ctx->dest, ':'); if (ctx->mode == PGSP_JSON_INFLATE) appendStringInfoChar(ctx->dest, ' '); if (p && IS_INDENTED_ARRAY(p->tag)) { ctx->current_list = p->tag; ctx->list_fname = fname; ctx->wlist_level = 0; } }