Datum regress_putenv(PG_FUNCTION_ARGS) { MemoryContext oldcontext; char *envbuf; if (!superuser()) elog(ERROR, "must be superuser to change environment variables"); oldcontext = MemoryContextSwitchTo(TopMemoryContext); envbuf = text_to_cstring((text *) PG_GETARG_POINTER(0)); MemoryContextSwitchTo(oldcontext); if (putenv(envbuf) != 0) elog(ERROR, "could not set environment variable: %m"); PG_RETURN_VOID(); }
Datum ginendscan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); GinScanOpaque so = (GinScanOpaque) scan->opaque; if (so != NULL) { freeScanKeys(so->keys, so->nkeys, TRUE); freeScanKeys(so->markPos, so->nkeys, FALSE); MemoryContextDelete(so->tempCtx); pfree(so); } PG_RETURN_VOID(); }
/* --------------------------------------------------------------------- * SQL invokable compression and decompression routines for built in * compression algorithms. All routines have the same SQL signature: * * void fun(internal, int, internal, int, internal, internal) * * If we were to think of this as a C function it would be more like: * * void fun(void *src, size_t src_sz, void *dst, size_t dst_sz, * size_t *dst_used, void *opaque) * * The meaning of each argument is as follows: * src - A pointer to data to be compressed/decompressed * src_sz - The number of bytes to compress/decompress * dst - A pointer to pre-allocated memory. The data compressed or * decompressed by the function are written here. * dst_sz - The amount of memory in bytes allocated at dst * dst_used - The number of bytes written. If dst_sz was too small to * store the data, this is set to zero. * opaque - Internal to the compression function. */ Datum quicklz_compress(PG_FUNCTION_ARGS) { const void *src = PG_GETARG_POINTER(0); int32 src_sz = PG_GETARG_INT32(1); void *dst = PG_GETARG_POINTER(2); int32 dst_sz = PG_GETARG_INT32(3); int32 *dst_used = (int32 *) PG_GETARG_POINTER(4); CompressionState *cs = (CompressionState *)PG_GETARG_POINTER(5); quicklz_state *state = (quicklz_state *)cs->opaque; Insist(dst_sz >= quicklz_desired_sz(src_sz)); *dst_used = state->compress_fn(state->level, src, dst, (size_t)src_sz, state->scratch); PG_RETURN_VOID(); }
Datum win866_to_iso(PG_FUNCTION_ARGS) { unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2); unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3); int len = PG_GETARG_INT32(4); unsigned char *buf; CHECK_ENCODING_CONVERSION_ARGS(PG_WIN866, PG_ISO_8859_5); /* Use mic/KOI8R as intermediary, see comment in win866_to_win1251() */ buf = palloc(len * ENCODING_GROWTH_RATE + 1); win8662mic(src, buf, len); mic2iso(buf, dest, strlen((char *) buf)); pfree(buf); PG_RETURN_VOID(); }
/* * pg_xlog_replay_resume - resume recovery now */ Datum pg_xlog_replay_resume(PG_FUNCTION_ARGS) { if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is not in progress"), errhint("Recovery control functions can only be executed during recovery."))); SetRecoveryPause(false); PG_RETURN_VOID(); }
/* * worker_apply_shard_ddl_command extends table, index, or constraint names in * the given DDL command. The function then applies this extended DDL command * against the database. */ Datum worker_apply_shard_ddl_command(PG_FUNCTION_ARGS) { uint64 shardId = PG_GETARG_INT64(0); text *schemaNameText = PG_GETARG_TEXT_P(1); text *ddlCommandText = PG_GETARG_TEXT_P(2); char *schemaName = text_to_cstring(schemaNameText); const char *ddlCommand = text_to_cstring(ddlCommandText); Node *ddlCommandNode = ParseTreeNode(ddlCommand); /* extend names in ddl command and apply extended command */ RelayEventExtendNames(ddlCommandNode, schemaName, shardId); ProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); PG_RETURN_VOID(); }
/* * Reset state (called by ReScan). */ Datum tsm_system_time_reset(PG_FUNCTION_ARGS) { TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; sampler->lt = InvalidOffsetNumber; sampler->start_time = GetCurrentTimestamp(); sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time, sampler->time); sampler->estblocks = 2; sampler->doneblocks = 0; sampler_random_init_state(sampler->seed, sampler->randstate); sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step); PG_RETURN_VOID(); }
Datum big5_to_euc_tw(PG_FUNCTION_ARGS) { unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2); unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3); int len = PG_GETARG_INT32(4); unsigned char *buf; Assert(PG_GETARG_INT32(0) == PG_BIG5); Assert(PG_GETARG_INT32(1) == PG_EUC_TW); Assert(len >= 0); buf = palloc(len * ENCODING_GROWTH_RATE); big52mic(src, buf, len); mic2euc_tw(buf, dest, strlen((char *) buf)); pfree(buf); PG_RETURN_VOID(); }
Datum win1251_to_koi8r(PG_FUNCTION_ARGS) { unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2); unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3); int len = PG_GETARG_INT32(4); unsigned char *buf; Assert(PG_GETARG_INT32(0) == PG_WIN1251); Assert(PG_GETARG_INT32(1) == PG_KOI8R); Assert(len >= 0); buf = palloc(len * ENCODING_GROWTH_RATE); win12512mic(src, buf, len); mic2koi8r(buf, dest, strlen((char *) buf)); pfree(buf); PG_RETURN_VOID(); }
Datum plvdate_unset_nonbizday_day (PG_FUNCTION_ARGS) { DateADT arg1 = PG_GETARG_DATEADT(0); bool arg2 = PG_GETARG_BOOL(1); int y, m, d; bool found = false; int i; if (arg2) { j2date(arg1 + POSTGRES_EPOCH_JDATE, &y, &m, &d); for (i = 0; i < holidays_c; i++) { if (!found && holidays[i].month == m && holidays[i].day == d) found = true; else if (found) { holidays[i-1].month = holidays[i].month; holidays[i-1].day = holidays[i].day; } } if (found) holidays_c -= 1; } else { for (i = 0; i < exceptions_c; i++) if (!found && exceptions[i] == arg1) found = true; else if (found) exceptions[i-1] = exceptions[i]; if (found) exceptions_c -= 1; } if (!found) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("nonbizday unregisteration error"), errdetail("Nonbizday not found."))); PG_RETURN_VOID(); }
/* * Update range within LO */ Datum lo_put(PG_FUNCTION_ARGS) { Oid loOid = PG_GETARG_OID(0); int64 offset = PG_GETARG_INT64(1); bytea *str = PG_GETARG_BYTEA_PP(2); LargeObjectDesc *loDesc; int written PG_USED_FOR_ASSERTS_ONLY; CreateFSContext(); loDesc = inv_open(loOid, INV_WRITE, fscxt); inv_seek(loDesc, offset, SEEK_SET); written = inv_write(loDesc, VARDATA_ANY(str), VARSIZE_ANY_EXHDR(str)); Assert(written == VARSIZE_ANY_EXHDR(str)); inv_close(loDesc); PG_RETURN_VOID(); }
Datum wait_pid(PG_FUNCTION_ARGS) { int pid = PG_GETARG_INT32(0); if (!superuser()) elog(ERROR, "must be superuser to check PID liveness"); while (kill(pid, 0) == 0) { CHECK_FOR_INTERRUPTS(); pg_usleep(50000); } if (errno != ESRCH) elog(ERROR, "could not check PID %d liveness: %m", pid); PG_RETURN_VOID(); }
Datum win866_to_iso(PG_FUNCTION_ARGS) { unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2); unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3); int len = PG_GETARG_INT32(4); unsigned char *buf; Assert(PG_GETARG_INT32(0) == PG_WIN866); Assert(PG_GETARG_INT32(1) == PG_ISO_8859_5); Assert(len >= 0); buf = palloc(len * ENCODING_GROWTH_RATE); win8662mic(src, buf, len); mic2iso(buf, dest, strlen((char *) buf)); pfree(buf); PG_RETURN_VOID(); }
/* * pg_sleep - delay for N seconds */ Datum pg_sleep(PG_FUNCTION_ARGS) { float8 secs = PG_GETARG_FLOAT8(0); float8 endtime; /* * We break the requested sleep into segments of no more than 1 second, to * put an upper bound on how long it will take us to respond to a cancel * or die interrupt. (Note that pg_usleep is interruptible by signals on * some platforms but not others.) Also, this method avoids exposing * pg_usleep's upper bound on allowed delays. * * By computing the intended stop time initially, we avoid accumulation of * extra delay across multiple sleeps. This also ensures we won't delay * less than the specified time if pg_usleep is interrupted by other * signals such as SIGHUP. */ #ifdef HAVE_INT64_TIMESTAMP #define GetNowFloat() ((float8) GetCurrentTimestamp() / 1000000.0) #else #define GetNowFloat() GetCurrentTimestamp() #endif endtime = GetNowFloat() + secs; for (;;) { float8 delay; CHECK_FOR_INTERRUPTS(); delay = endtime - GetNowFloat(); if (delay >= 1.0) pg_usleep(1000000L); else if (delay > 0.0) pg_usleep((long) ceil(delay * 1000000.0)); else break; } PG_RETURN_VOID(); }
Datum cassandra_fdw_validator(PG_FUNCTION_ARGS) { List *options_list = untransformRelOptions(PG_GETARG_DATUM(0)); elog(DEBUG1,"entering function %s",__func__); /* make sure the options are valid */ /* no options are supported */ if (list_length(options_list) > 0) ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_OPTION_NAME), errmsg("invalid options"), errhint("Cassandra FDW doies not support any options"))); PG_RETURN_VOID(); }
Datum ginrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); /* remaining arguments are ignored */ GinScanOpaque so = (GinScanOpaque) scan->opaque; ginFreeScanKeys(so); if (scankey && scan->numberOfKeys > 0) { memmove(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); } PG_RETURN_VOID(); }
/* * Validator for C language functions * * Make sure that the library file exists, is loadable, and contains * the specified link symbol. Also check for a valid function * information record. */ Datum fmgr_c_validator(PG_FUNCTION_ARGS) { Oid funcoid = PG_GETARG_OID(0); void *libraryhandle; HeapTuple tuple; Form_pg_proc proc; bool isnull; Datum tmp; char *prosrc; char *probin; /* * It'd be most consistent to skip the check if !check_function_bodies, * but the purpose of that switch is to be helpful for pg_dump loading, * and for pg_dump loading it's much better if we *do* check. */ tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(funcoid), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for function %u", funcoid); proc = (Form_pg_proc) GETSTRUCT(tuple); tmp = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_prosrc, &isnull); if (isnull) elog(ERROR, "null prosrc"); prosrc = DatumGetCString(DirectFunctionCall1(textout, tmp)); tmp = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_probin, &isnull); if (isnull) elog(ERROR, "null probin"); probin = DatumGetCString(DirectFunctionCall1(textout, tmp)); (void) load_external_function(probin, prosrc, true, &libraryhandle); (void) fetch_finfo_record(libraryhandle, prosrc); ReleaseSysCache(tuple); PG_RETURN_VOID(); }
/* * btrestrpos() -- restore scan to last saved position */ Datum btrestrpos(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); BTScanOpaque so = (BTScanOpaque) scan->opaque; if (so->markItemIndex >= 0) { /* * The mark position is on the same page we are currently on. Just * restore the itemIndex. */ so->currPos.itemIndex = so->markItemIndex; } else { /* we aren't holding any read locks, but gotta drop the pin */ if (BTScanPosIsValid(so->currPos)) { /* Before leaving current page, deal with any killed items */ if (so->numKilled > 0 && so->currPos.buf != so->markPos.buf) _bt_killitems(scan, false); ReleaseBuffer(so->currPos.buf); so->currPos.buf = InvalidBuffer; } if (BTScanPosIsValid(so->markPos)) { /* bump pin on mark buffer for assignment to current buffer */ IncrBufferRefCount(so->markPos.buf); memcpy(&so->currPos, &so->markPos, offsetof(BTScanPosData, items[1]) + so->markPos.lastItem * sizeof(BTScanPosItem)); if (so->currTuples) memcpy(so->currTuples, so->markTuples, so->markPos.nextTupleOffset); } } PG_RETURN_VOID(); }
/* * Initializes the state. */ Datum tsm_system_time_init(PG_FUNCTION_ARGS) { TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); uint32 seed = PG_GETARG_UINT32(1); int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2); HeapScanDesc scan = tsdesc->heapScan; SystemSamplerData *sampler; if (time < 1) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("invalid time limit"), errhint("Time limit must be positive integer value."))); sampler = palloc0(sizeof(SystemSamplerData)); /* Remember initial values for reinit */ sampler->seed = seed; sampler->nblocks = scan->rs_nblocks; sampler->lt = InvalidOffsetNumber; sampler->estblocks = 2; sampler->doneblocks = 0; sampler->time = time; sampler->start_time = GetCurrentTimestamp(); sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time, sampler->time); sampler_random_init_state(sampler->seed, sampler->randstate); /* Find relative prime as step size for linear probing. */ sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); /* * Randomize start position so that blocks close to step size don't have * higher probability of being chosen on very short scan. */ sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step); tsdesc->tsmdata = (void *) sampler; PG_RETURN_VOID(); }
/* * hashrescan() -- rescan an index relation */ Datum hashrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; /* if we are called from beginscan, so is still NULL */ if (so) { /* release any pins we still hold */ if (BufferIsValid(so->hashso_curbuf)) _hash_dropbuf(rel, so->hashso_curbuf); so->hashso_curbuf = InvalidBuffer; if (BufferIsValid(so->hashso_mrkbuf)) _hash_dropbuf(rel, so->hashso_mrkbuf); so->hashso_mrkbuf = InvalidBuffer; /* release lock on bucket, too */ if (so->hashso_bucket_blkno) _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE); so->hashso_bucket_blkno = 0; } /* set positions invalid (this will cause _hash_first call) */ ItemPointerSetInvalid(&(scan->currentItemData)); ItemPointerSetInvalid(&(scan->currentMarkData)); /* Update scan key, if a new one is given */ if (scankey && scan->numberOfKeys > 0) { memmove(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); if (so) so->hashso_bucket_valid = false; } PG_RETURN_VOID(); }
Datum quicklz_destructor(PG_FUNCTION_ARGS) { CompressionState *cs = (CompressionState *) PG_GETARG_POINTER(0); if (cs != NULL) { quicklz_state *state = (quicklz_state *) cs->opaque; if (state != NULL) { if (state->scratch != NULL) { pfree(state->scratch); } pfree(cs->opaque); } } PG_RETURN_VOID(); }
Datum plunit_assert_not_equals_range_message(PG_FUNCTION_ARGS) { char *message = assert_get_message(fcinfo, 3, "plunit.assert_not_equal exception"); /* skip all tests for NULL value */ if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("%s", message), errdetail("Plunit.assertation fails (assert_not_equals)."))); if (assert_equals_range_base(fcinfo)) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("%s", message), errdetail("Plunit.assertation fails (assert_not_equals)."))); PG_RETURN_VOID(); }
/* * pgmpc_load * Load given playlist. */ Datum pgmpc_load(PG_FUNCTION_ARGS) { char *playlist; /* User needs to specify a playlist */ if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("Playlist needs to be specified"))); playlist = text_to_cstring(PG_GETARG_TEXT_PP(0)); /* Now run the command */ pgmpc_init(); if (!mpd_run_load(mpd_conn, playlist)) pgmpc_print_error(); pgmpc_reset(); PG_RETURN_VOID(); }
Datum rtendscan(PG_FUNCTION_ARGS) { IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0); RTreeScanOpaque p; p = (RTreeScanOpaque) s->opaque; if (p != (RTreeScanOpaque) NULL) { freestack(p->s_stack); freestack(p->s_markstk); pfree(s->opaque); } rtdropscan(s); /* XXX don't unset read lock -- two-phase locking */ PG_RETURN_VOID(); }
Datum dbms_pipe_create_pipe_1 (PG_FUNCTION_ARGS) { Datum arg1; if (PG_ARGISNULL(0)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("pipe name is NULL"), errdetail("Pipename may not be NULL."))); arg1 = PG_GETARG_DATUM(0); DirectFunctionCall3(dbms_pipe_create_pipe, arg1, (Datum) -1, BoolGetDatum(false)); PG_RETURN_VOID(); }
/* * master_create_worker_shards is a user facing function to create worker shards * for the given relation in round robin order. */ Datum master_create_worker_shards(PG_FUNCTION_ARGS) { text *tableNameText = PG_GETARG_TEXT_P(0); int32 shardCount = PG_GETARG_INT32(1); int32 replicationFactor = PG_GETARG_INT32(2); Oid distributedTableId = ResolveRelationId(tableNameText); /* do not add any data */ bool useExclusiveConnections = false; EnsureCoordinator(); CheckCitusVersion(ERROR); CreateShardsWithRoundRobinPolicy(distributedTableId, shardCount, replicationFactor, useExclusiveConnections); PG_RETURN_VOID(); }
/* * routine to build an index. Basically calls insert over and over */ Datum rtbuild(PG_FUNCTION_ARGS) { Relation heap = (Relation) PG_GETARG_POINTER(0); Relation index = (Relation) PG_GETARG_POINTER(1); IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2); double reltuples; RTBuildState buildstate; Buffer buffer; /* no locking is needed */ initRtstate(&buildstate.rtState, index); /* * We expect to be called exactly once for any index relation. If that's * not the case, big trouble's what we have. */ if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", RelationGetRelationName(index)); /* initialize the root page */ buffer = ReadBuffer(index, P_NEW); RTInitBuffer(buffer, F_LEAF); WriteBuffer(buffer); /* build the index */ buildstate.indtuples = 0; /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, rtbuildCallback, (void *) &buildstate); /* okay, all heap tuples are indexed */ /* since we just counted the # of tuples, may as well update stats */ IndexCloseAndUpdateStats(heap, reltuples, index, buildstate.indtuples); PG_RETURN_VOID(); }
/* * create_reference_table accepts a table and then it creates a distributed * table which has one shard and replication factor is set to * shard_replication_factor configuration value. */ Datum create_reference_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); int shardCount = 1; AttrNumber firstColumnAttrNumber = 1; char *firstColumnName = get_attname(relationId, firstColumnAttrNumber); if (firstColumnName == NULL) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("reference table candidate %s needs to have at" "least one column", relationName))); } CreateHashDistributedTable(relationId, firstColumnName, shardCount, ShardReplicationFactor); PG_RETURN_VOID(); }
Datum spg_quad_inner_consistent(PG_FUNCTION_ARGS) { spgInnerConsistentIn *in = (spgInnerConsistentIn*)PG_GETARG_POINTER(0); spgInnerConsistentOut *out = (spgInnerConsistentOut*)PG_GETARG_POINTER(1); Point *query, *centroid; query = DatumGetPointP(in->query); Assert(in->hasPrefix); centroid = DatumGetPointP(in->prefixDatum); out->levelAdd = 0; out->nodeNumbers = palloc(sizeof(int)); out->nNodes = 1; out->nodeNumbers[0] = getQuadrant(centroid, query) - 1; PG_RETURN_VOID(); }
/* * gistbuildempty() -- build an empty gist index in the initialization fork */ Datum gistbuildempty(PG_FUNCTION_ARGS) { Relation index = (Relation) PG_GETARG_POINTER(0); Buffer buffer; /* Initialize the root page */ buffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* Initialize and xlog buffer */ START_CRIT_SECTION(); GISTInitBuffer(buffer, F_LEAF); MarkBufferDirty(buffer); log_newpage_buffer(buffer, true); END_CRIT_SECTION(); /* Unlock and release the buffer */ UnlockReleaseBuffer(buffer); PG_RETURN_VOID(); }