void GpPersistentTablespaceNode_GetValues( Datum *values, Oid *filespaceOid, Oid *tablespaceOid, PersistentFileSysState *persistentState, int64 *createMirrorDataLossTrackingSessionNum, MirroredObjectExistenceState *mirrorExistenceState, int32 *reserved, TransactionId *parentXid, int64 *persistentSerialNum) { *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_filespace_oid - 1]); *tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_tablespace_oid - 1]); *persistentState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_persistent_state - 1]); *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_create_mirror_data_loss_tracking_session_num - 1]); *mirrorExistenceState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_mirror_existence_state - 1]); *reserved = DatumGetInt32(values[Anum_gp_persistent_tablespace_node_reserved - 1]); *parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_tablespace_node_parent_xid - 1]); *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_persistent_serial_num - 1]); }
/* * Evaluate the limit/offset expressions --- done at start of each scan. * * This is also a handy place to reset the current-position state info. */ static void recompute_limits(LimitState *node) { ExprContext *econtext = node->ps.ps_ExprContext; Datum val; bool isNull; if (node->limitOffset) { val = ExecEvalExprSwitchContext(node->limitOffset, econtext, &isNull, NULL); /* Interpret NULL offset as no offset */ if (isNull) node->offset = 0; else { node->offset = DatumGetInt64(val); if (node->offset < 0) node->offset = 0; } } else { /* No OFFSET supplied */ node->offset = 0; } if (node->limitCount) { val = ExecEvalExprSwitchContext(node->limitCount, econtext, &isNull, NULL); /* Interpret NULL count as no count (LIMIT ALL) */ if (isNull) { node->count = 0; node->noCount = true; } else { node->count = DatumGetInt64(val); if (node->count < 0) node->count = 0; node->noCount = false; } } else { /* No COUNT supplied */ node->count = 0; node->noCount = true; } /* Reset position to start-of-scan */ node->position = 0; node->subSlot = NULL; }
void GpPersistentRelationNode_GetValues( Datum *values, Oid *tablespaceOid, Oid *databaseOid, Oid *relfilenodeOid, int32 *segmentFileNum, PersistentFileSysRelStorageMgr *relationStorageManager, PersistentFileSysState *persistentState, int64 *createMirrorDataLossTrackingSessionNum, MirroredObjectExistenceState *mirrorExistenceState, MirroredRelDataSynchronizationState *mirrorDataSynchronizationState, bool *mirrorBufpoolMarkedForScanIncrementalResync, int64 *mirrorBufpoolResyncChangedPageCount, XLogRecPtr *mirrorBufpoolResyncCkptLoc, BlockNumber *mirrorBufpoolResyncCkptBlockNum, int64 *mirrorAppendOnlyLossEof, int64 *mirrorAppendOnlyNewEof, PersistentFileSysRelBufpoolKind *relBufpoolKind, TransactionId *parentXid, int64 *persistentSerialNum) { *tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_tablespace_oid - 1]); *databaseOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_database_oid - 1]); *relfilenodeOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_relfilenode_oid - 1]); *segmentFileNum = DatumGetInt32(values[Anum_gp_persistent_relation_node_segment_file_num - 1]); *relationStorageManager = (PersistentFileSysRelStorageMgr)DatumGetInt16(values[Anum_gp_persistent_relation_node_relation_storage_manager - 1]); *persistentState = (PersistentFileSysState)DatumGetInt16(values[Anum_gp_persistent_relation_node_persistent_state - 1]); *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_relation_node_create_mirror_data_loss_tracking_session_num - 1]); *mirrorExistenceState = (MirroredObjectExistenceState)DatumGetInt16(values[Anum_gp_persistent_relation_node_mirror_existence_state - 1]); *mirrorDataSynchronizationState = (MirroredRelDataSynchronizationState)DatumGetInt16(values[Anum_gp_persistent_relation_node_mirror_data_synchronization_state - 1]); *mirrorBufpoolMarkedForScanIncrementalResync = DatumGetBool(values[Anum_gp_persistent_relation_node_mirror_bufpool_marked_for_scan_incremental_resync - 1]); *mirrorBufpoolResyncChangedPageCount = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_changed_page_count - 1]); *mirrorBufpoolResyncCkptLoc = *((XLogRecPtr*) DatumGetPointer(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_ckpt_loc - 1])); *mirrorBufpoolResyncCkptBlockNum = (BlockNumber)DatumGetInt32(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_ckpt_block_num - 1]); *mirrorAppendOnlyLossEof = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_append_only_loss_eof - 1]); *mirrorAppendOnlyNewEof = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_append_only_new_eof - 1]); *relBufpoolKind = (PersistentFileSysRelBufpoolKind)DatumGetInt32(values[Anum_gp_persistent_relation_node_relation_bufpool_kind - 1]); *parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_relation_node_parent_xid - 1]); *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_relation_node_persistent_serial_num - 1]); }
static PyObject * PLyLong_FromInt64(PLyDatumToOb *arg, Datum d) { /* on 32 bit platforms "long" may be too small */ if (sizeof(int64) > sizeof(long)) return PyLong_FromLongLong(DatumGetInt64(d)); else return PyLong_FromLong(DatumGetInt64(d)); }
/* * GetFastSequences * * Get a list of consecutive sequence numbers. The starting sequence * number is the maximal value between 'lastsequence' + 1 and minSequence. * The length of the list is given. * * If there is not such an entry for objid in the table, create * one here. * * The existing entry for objid in the table is updated with a new * lastsequence value. */ int64 GetFastSequences(Oid objid, int64 objmod, int64 minSequence, int64 numSequences) { Relation gp_fastsequence_rel; TupleDesc tupleDesc; HeapTuple tuple; cqContext cqc; int64 firstSequence = minSequence; Datum lastSequenceDatum; int64 newLastSequence; gp_fastsequence_rel = heap_open(FastSequenceRelationId, RowExclusiveLock); tupleDesc = RelationGetDescr(gp_fastsequence_rel); tuple = caql_getfirst( caql_addrel(cqclr(&cqc), gp_fastsequence_rel), cql("SELECT * FROM gp_fastsequence " " WHERE objid = :1 " " AND objmod = :2 " " FOR UPDATE ", ObjectIdGetDatum(objid), Int64GetDatum(objmod))); if (!HeapTupleIsValid(tuple)) { newLastSequence = firstSequence + numSequences - 1; } else { bool isNull; lastSequenceDatum = heap_getattr(tuple, Anum_gp_fastsequence_last_sequence, tupleDesc, &isNull); if (isNull) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("got an invalid lastsequence number: NULL"))); if (DatumGetInt64(lastSequenceDatum) + 1 > firstSequence) firstSequence = DatumGetInt64(lastSequenceDatum) + 1; newLastSequence = firstSequence + numSequences - 1; } update_fastsequence(gp_fastsequence_rel, tuple, tupleDesc, objid, objmod, newLastSequence); if (HeapTupleIsValid(tuple)) { heap_freetuple(tuple); } /* Refer to the comment at the end of InsertFastSequenceEntry. */ heap_close(gp_fastsequence_rel, RowExclusiveLock); return firstSequence; }
void GpPersistentFilespaceNode_GetValues( Datum *values, Oid *filespaceOid, int16 *dbId1, char locationBlankPadded1[FilespaceLocationBlankPaddedWithNullTermLen], int16 *dbId2, char locationBlankPadded2[FilespaceLocationBlankPaddedWithNullTermLen], PersistentFileSysState *persistentState, int64 *createMirrorDataLossTrackingSessionNum, MirroredObjectExistenceState *mirrorExistenceState, int32 *reserved, TransactionId *parentXid, int64 *persistentSerialNum) { char *locationPtr; int locationLen; *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_filespace_node_filespace_oid - 1]); *dbId1 = DatumGetInt16(values[Anum_gp_persistent_filespace_node_db_id_1 - 1]); locationPtr = TextDatumGetCString(values[Anum_gp_persistent_filespace_node_location_1 - 1]);; locationLen = strlen(locationPtr); if (locationLen != FilespaceLocationBlankPaddedWithNullTermLen - 1) elog(ERROR, "Expected filespace location 1 to be %d characters and found %d", FilespaceLocationBlankPaddedWithNullTermLen - 1, locationLen); memcpy(locationBlankPadded1, locationPtr, FilespaceLocationBlankPaddedWithNullTermLen); *dbId2 = DatumGetInt16(values[Anum_gp_persistent_filespace_node_db_id_2 - 1]); locationPtr = TextDatumGetCString(values[Anum_gp_persistent_filespace_node_location_2 - 1]); locationLen = strlen(locationPtr); if (locationLen != FilespaceLocationBlankPaddedWithNullTermLen - 1) elog(ERROR, "Expected filespace location 2 to be %d characters and found %d", FilespaceLocationBlankPaddedWithNullTermLen - 1, locationLen); memcpy(locationBlankPadded2, locationPtr, FilespaceLocationBlankPaddedWithNullTermLen); *persistentState = DatumGetInt16(values[Anum_gp_persistent_filespace_node_persistent_state - 1]); *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_filespace_node_create_mirror_data_loss_tracking_session_num - 1]); *mirrorExistenceState = DatumGetInt16(values[Anum_gp_persistent_filespace_node_mirror_existence_state - 1]); *reserved = DatumGetInt32(values[Anum_gp_persistent_filespace_node_reserved - 1]); *parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_filespace_node_parent_xid - 1]); *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_filespace_node_persistent_serial_num - 1]); }
/* * load_consumer_offsets * * Load all offsets for all of this consumer's partitions */ static void load_consumer_offsets(KafkaConsumer *consumer, struct rd_kafka_metadata_topic *meta, int64_t offset) { MemoryContext old; ScanKeyData skey[1]; HeapTuple tup = NULL; HeapScanDesc scan; Relation offsets = open_pipeline_kafka_offsets(); TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(offsets)); int i; ScanKeyInit(&skey[0], OFFSETS_ATTR_CONSUMER, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(consumer->id)); scan = heap_beginscan(offsets, GetTransactionSnapshot(), 1, skey); old = MemoryContextSwitchTo(CacheMemoryContext); consumer->offsets = palloc0(meta->partition_cnt * sizeof(int64_t)); MemoryContextSwitchTo(old); /* by default, begin consuming from the end of a stream */ for (i = 0; i < meta->partition_cnt; i++) consumer->offsets[i] = offset; consumer->num_partitions = meta->partition_cnt; while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL) { Datum d; bool isnull; int partition; ExecStoreTuple(tup, slot, InvalidBuffer, false); d = slot_getattr(slot, OFFSETS_ATTR_PARTITION, &isnull); partition = DatumGetInt32(d); if(partition > consumer->num_partitions) elog(ERROR, "invalid partition id: %d", partition); if (offset == RD_KAFKA_OFFSET_NULL) { d = slot_getattr(slot, OFFSETS_ATTR_OFFSET, &isnull); if (isnull) offset = RD_KAFKA_OFFSET_END; else offset = DatumGetInt64(d); } consumer->offsets[partition] = DatumGetInt64(offset); } ExecDropSingleTupleTableSlot(slot); heap_endscan(scan); heap_close(offsets, RowExclusiveLock); }
static int btint8fastcmp(Datum x, Datum y, SortSupport ssup) { int64 a = DatumGetInt64(x); int64 b = DatumGetInt64(y); if (a > b) return 1; else if (a == b) return 0; else return -1; }
static int btint8fastcmp(Datum x, Datum y, SortSupport ssup) { int64 a = DatumGetInt64(x); int64 b = DatumGetInt64(y); if (a > b) return A_GREATER_THAN_B; else if (a == b) return 0; else return A_LESS_THAN_B; }
/* int8_cash() * Convert int8 (bigint) to cash */ Datum int8_cash(PG_FUNCTION_ARGS) { int64 amount = PG_GETARG_INT64(0); Cash result; int fpoint; int64 scale; int i; struct lconv *lconvert = PGLC_localeconv(); /* see comments about frac_digits in cash_in() */ fpoint = lconvert->frac_digits; if (fpoint < 0 || fpoint > 10) fpoint = 2; /* compute required scale factor */ scale = 1; for (i = 0; i < fpoint; i++) scale *= 10; /* compute amount * scale, checking for overflow */ result = DatumGetInt64(DirectFunctionCall2(int8mul, Int64GetDatum(amount), Int64GetDatum(scale))); PG_RETURN_CASH(result); }
/* numeric_cash() * Convert numeric to cash. */ Datum numeric_cash(PG_FUNCTION_ARGS) { Datum amount = PG_GETARG_DATUM(0); Cash result; int fpoint; int64 scale; int i; Datum numeric_scale; struct lconv *lconvert = PGLC_localeconv(); /* see comments about frac_digits in cash_in() */ fpoint = lconvert->frac_digits; if (fpoint < 0 || fpoint > 10) fpoint = 2; /* compute required scale factor */ scale = 1; for (i = 0; i < fpoint; i++) scale *= 10; /* multiply the input amount by scale factor */ numeric_scale = DirectFunctionCall1(int8_numeric, Int64GetDatum(scale)); amount = DirectFunctionCall2(numeric_mul, amount, numeric_scale); /* note that numeric_int8 will round to nearest integer for us */ result = DatumGetInt64(DirectFunctionCall1(numeric_int8, amount)); PG_RETURN_CASH(result); }
static void PersistentStore_ExtractOurTupleData( PersistentStoreData *storeData, Datum *values, int64 *persistentSerialNum) { *persistentSerialNum = DatumGetInt64(values[storeData->attNumPersistentSerialNum - 1]); }
void GpPersistentTablespaceNode_GetValues( Datum *values, Oid *filespaceOid, Oid *tablespaceOid, PersistentFileSysState *persistentState, int32 *reserved, TransactionId *parentXid, int64 *persistentSerialNum, ItemPointerData *previousFreeTid, bool *sharedStorage) { *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_filespace_oid - 1]); *tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_tablespace_oid - 1]); *persistentState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_persistent_state - 1]); *reserved = DatumGetInt32(values[Anum_gp_persistent_tablespace_node_reserved - 1]); *parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_tablespace_node_parent_xid - 1]); *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_persistent_serial_num - 1]); *previousFreeTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_persistent_tablespace_node_previous_free_tid - 1])); *sharedStorage = true; }
/* * Examine parameters and prepare for a sample scan. */ static void system_rows_beginsamplescan(SampleScanState *node, Datum *params, int nparams, uint32 seed) { SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; int64 ntuples = DatumGetInt64(params[0]); if (ntuples < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), errmsg("sample size must not be negative"))); sampler->seed = seed; sampler->ntuples = ntuples; sampler->donetuples = 0; sampler->lt = InvalidOffsetNumber; sampler->doneblocks = 0; /* lb will be initialized during first NextSampleBlock call */ /* we intentionally do not change nblocks/firstblock/step here */ /* * We *must* use pagemode visibility checking in this module, so force * that even though it's currently default. */ node->use_pagemode = true; }
long long * all_referenced_files(int * countOut) { char query[128]; snprintf(query, 128, "SELECT file_id FROM "WDB_SCHEMA".file_blob"); SPI_connect(); int result = SPI_execute(query, true, 0); if ( SPI_OK_SELECT != result ) ereport(ERROR, (errcode( ERRCODE_RAISE_EXCEPTION ), errmsg("Error when reading from file_blob"))); * countOut = SPI_processed; long long * ret = (long long *) SPI_palloc(sizeof(long long) * (* countOut)); int i; for ( i = 0; i < * countOut; ++ i ) { bool isNull; // unused Datum d = SPI_getbinval(SPI_tuptable->vals[i], SPI_tuptable->tupdesc, 1, & isNull); ret[i] = DatumGetInt64(d); } SPI_finish(); return ret; }
/* * lock_shard_resources allows shard resources to be locked * remotely to serialise non-commutative writes on shards. * * This function does not sort the array to avoid deadlock, callers * must ensure a consistent order. */ Datum lock_shard_resources(PG_FUNCTION_ARGS) { LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(0)); ArrayType *shardIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); Datum *shardIdArrayDatum = NULL; int shardIdCount = 0; int shardIdIndex = 0; if (ARR_NDIM(shardIdArrayObject) == 0) { ereport(ERROR, (errmsg("no locks specified"))); } /* we don't want random users to block writes */ EnsureSuperUser(); shardIdCount = ArrayObjectCount(shardIdArrayObject); shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); for (shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) { int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]); LockShardResource(shardId, lockMode); } PG_RETURN_VOID(); }
/* * Extract an int64 value from a DefElem. */ int64 defGetInt64(DefElem *def) { if (def->arg == NULL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("%s requires a numeric value", def->defname))); switch (nodeTag(def->arg)) { case T_Integer: return (int64) intVal(def->arg); case T_Float: /* * Values too large for int4 will be represented as Float * constants by the lexer. Accept these if they are valid int8 * strings. */ return DatumGetInt64(DirectFunctionCall1(int8in, CStringGetDatum(strVal(def->arg)))); default: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("%s requires a numeric value", def->defname))); } return 0; /* keep compiler quiet */ }
/* ---------------- * index_getbitmap - get all tuples at once from an index scan * * Adds the TIDs of all heap tuples satisfying the scan keys to a bitmap. * Since there's no interlock between the index scan and the eventual heap * access, this is only safe to use with MVCC-based snapshots: the heap * item slot could have been replaced by a newer tuple by the time we get * to it. * * Returns the number of matching tuples found. (Note: this might be only * approximate, so it should only be used for statistical purposes.) * ---------------- */ int64 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap) { FmgrInfo *procedure; int64 ntids; Datum d; SCAN_CHECKS; GET_SCAN_PROCEDURE(amgetbitmap); /* just make sure this is false... */ scan->kill_prior_tuple = false; /* * have the am's getbitmap proc do all the work. */ d = FunctionCall2(procedure, PointerGetDatum(scan), PointerGetDatum(bitmap)); ntids = DatumGetInt64(d); /* If int8 is pass-by-ref, must free the result to avoid memory leak */ #ifndef USE_FLOAT8_BYVAL pfree(DatumGetPointer(d)); #endif pgstat_count_index_tuples(scan->indexRelation, ntids); return ntids; }
int64_t HdfsTell(FsysName protocol, hdfsFS fileSystem, hdfsFile file) { FunctionCallInfoData fcinfo; FileSystemUdfData fsysUdf; FmgrInfo *fsysFunc = FsysInterfaceGetFunc(protocol, FSYS_FUNC_TELL); #ifdef USE_ASSERT_CHECKING if (testmode_fault(gp_fsys_fault_inject_percent)) return -1; #endif fsysUdf.type = T_FileSystemFunctionData; fsysUdf.fsys_hdfs = fileSystem; fsysUdf.fsys_hfile = file; InitFunctionCallInfoData(/* FunctionCallInfoData */ fcinfo, /* FmgrInfo */ fsysFunc, /* nArgs */ 0, /* Call Context */ (Node *) (&fsysUdf), /* ResultSetInfo */ NULL); Datum d = FunctionCallInvoke(&fcinfo); return DatumGetInt64(d); }
double pgr_SPI_getFloat8(HeapTuple *tuple, TupleDesc *tupdesc, Column_info_t info) { Datum binval; bool isnull; double value = 0.0; binval = SPI_getbinval(*tuple, *tupdesc, info.colNumber, &isnull); if (isnull) elog(ERROR, "Unexpected Null value in column %s", info.name); switch (info.type) { case INT2OID: value = (double) DatumGetInt16(binval); break; case INT4OID: value = (double) DatumGetInt32(binval); break; case INT8OID: value = (double) DatumGetInt64(binval); break; case FLOAT4OID: value = (double) DatumGetFloat4(binval); break; case FLOAT8OID: value = DatumGetFloat8(binval); break; default: elog(ERROR, "Unexpected Column type of %s. Expected ANY-NUMERICAL", info.name); } PGR_DBG("Variable: %s Value: %lf", info.name, value); return value; }
int64_t pgr_SPI_getBigInt(HeapTuple *tuple, TupleDesc *tupdesc, Column_info_t info) { Datum binval; bool isnull; int64_t value = 0; binval = SPI_getbinval(*tuple, *tupdesc, info.colNumber, &isnull); if (isnull) elog(ERROR, "Unexpected Null value in column %s", info.name); switch (info.type) { case INT2OID: value = (int64_t) DatumGetInt16(binval); break; case INT4OID: value = (int64_t) DatumGetInt32(binval); break; case INT8OID: value = DatumGetInt64(binval); break; default: elog(ERROR, "Unexpected Column type of %s. Expected ANY-INTEGER", info.name); } PGR_DBG("Variable: %s Value: %lld", info.name, value); return value; }
/* * Initialize workspace for a worker process: create the schema if it doesn't * already exist. */ static void initialize_worker_spi(worktable *table) { int ret; int ntup; bool isnull; StringInfoData buf; SetCurrentStatementStartTimestamp(); StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ initStringInfo(&buf); appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", table->schema); ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); if (SPI_processed != 1) elog(FATAL, "not a singleton result"); ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull)); if (isnull) elog(FATAL, "null result"); if (ntup == 0) { resetStringInfo(&buf); appendStringInfo(&buf, "CREATE SCHEMA \"%s\" " "CREATE TABLE \"%s\" (" " type text CHECK (type IN ('total', 'delta')), " " value integer)" "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) " "WHERE type = 'total'", table->schema, table->name, table->name, table->name); /* set statement start time */ SetCurrentStatementStartTimestamp(); ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UTILITY) elog(FATAL, "failed to create my schema"); } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); pgstat_report_activity(STATE_IDLE, NULL); }
static void PersistentStore_ExtractOurTupleData( PersistentStoreData *storeData, Datum *values, int64 *persistentSerialNum, ItemPointer previousFreeTid) { *persistentSerialNum = DatumGetInt64(values[storeData->attNumPersistentSerialNum - 1]); *previousFreeTid = *((ItemPointer) DatumGetPointer(values[storeData->attNumPreviousFreeTid - 1])); }
Datum orafce_dump(PG_FUNCTION_ARGS) { Oid valtype = get_fn_expr_argtype(fcinfo->flinfo, 0); List *args; int16 typlen; bool typbyval; Size length; Datum value; int format; StringInfoData str; if (!fcinfo->flinfo || !fcinfo->flinfo->fn_expr) elog(ERROR, "function is called from invalid context"); if (PG_ARGISNULL(0)) elog(ERROR, "argument is NULL"); value = PG_GETARG_DATUM(0); format = PG_GETARG_IF_EXISTS(1, INT32, 10); args = ((FuncExpr *) fcinfo->flinfo->fn_expr)->args; valtype = exprType((Node *) list_nth(args, 0)); get_typlenbyval(valtype, &typlen, &typbyval); length = datumGetSize(value, typbyval, typlen); initStringInfo(&str); appendStringInfo(&str, "Typ=%d Len=%d: ", valtype, (int) length); if (!typbyval) { appendDatum(&str, DatumGetPointer(value), length, format); } else if (length <= 1) { char v = DatumGetChar(value); appendDatum(&str, &v, sizeof(char), format); } else if (length <= 2) { int16 v = DatumGetInt16(value); appendDatum(&str, &v, sizeof(int16), format); } else if (length <= 4) { int32 v = DatumGetInt32(value); appendDatum(&str, &v, sizeof(int32), format); } else { int64 v = DatumGetInt64(value); appendDatum(&str, &v, sizeof(int64), format); } PG_RETURN_TEXT_P(cstring_to_text(str.data)); }
/** * Convert postgres Datum into a ConcreteValue object. */ AbstractValueSPtr AbstractPGValue::DatumToValue(bool inMemoryIsWritable, Oid inTypeID, Datum inDatum) const { // First check if datum is rowtype if (type_is_rowtype(inTypeID)) { HeapTupleHeader pgTuple = DatumGetHeapTupleHeader(inDatum); return AbstractValueSPtr(new PGValue<HeapTupleHeader>(pgTuple)); } else if (type_is_array(inTypeID)) { ArrayType *pgArray = DatumGetArrayTypeP(inDatum); if (ARR_NDIM(pgArray) != 1) throw std::invalid_argument("Multidimensional arrays not yet supported"); if (ARR_HASNULL(pgArray)) throw std::invalid_argument("Arrays with NULLs not yet supported"); switch (ARR_ELEMTYPE(pgArray)) { case FLOAT8OID: { MemHandleSPtr memoryHandle(new PGArrayHandle(pgArray)); if (inMemoryIsWritable) { return AbstractValueSPtr( new ConcreteValue<Array<double> >( Array<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } else { return AbstractValueSPtr( new ConcreteValue<Array_const<double> >( Array_const<double>(memoryHandle, boost::extents[ ARR_DIMS(pgArray)[0] ]) ) ); } } } } switch (inTypeID) { case BOOLOID: return AbstractValueSPtr( new ConcreteValue<bool>( DatumGetBool(inDatum) )); case INT2OID: return AbstractValueSPtr( new ConcreteValue<int16_t>( DatumGetInt16(inDatum) )); case INT4OID: return AbstractValueSPtr( new ConcreteValue<int32_t>( DatumGetInt32(inDatum) )); case INT8OID: return AbstractValueSPtr( new ConcreteValue<int64_t>( DatumGetInt64(inDatum) )); case FLOAT4OID: return AbstractValueSPtr( new ConcreteValue<float>( DatumGetFloat4(inDatum) )); case FLOAT8OID: return AbstractValueSPtr( new ConcreteValue<double>( DatumGetFloat8(inDatum) )); } return AbstractValueSPtr(); }
/* * Sample size estimation. */ static void system_rows_samplescangetsamplesize(PlannerInfo *root, RelOptInfo *baserel, List *paramexprs, BlockNumber *pages, double *tuples) { Node *limitnode; int64 ntuples; double npages; /* Try to extract an estimate for the limit rowcount */ limitnode = (Node *) linitial(paramexprs); limitnode = estimate_expression_value(root, limitnode); if (IsA(limitnode, Const) && !((Const *) limitnode)->constisnull) { ntuples = DatumGetInt64(((Const *) limitnode)->constvalue); if (ntuples < 0) { /* Default ntuples if the value is bogus */ ntuples = 1000; } } else { /* Default ntuples if we didn't obtain a non-null Const */ ntuples = 1000; } /* Clamp to the estimated relation size */ if (ntuples > baserel->tuples) ntuples = (int64) baserel->tuples; ntuples = clamp_row_est(ntuples); if (baserel->tuples > 0 && baserel->pages > 0) { /* Estimate number of pages visited based on tuple density */ double density = baserel->tuples / (double) baserel->pages; npages = ntuples / density; } else { /* For lack of data, assume one tuple per page */ npages = ntuples; } /* Clamp to sane value */ npages = clamp_row_est(Min((double) baserel->pages, npages)); *pages = npages; *tuples = ntuples; }
void GpRelationNode_GetValues( Datum *values, Oid *relfilenodeOid, int32 *segmentFileNum, int64 *createMirrorDataLossTrackingSessionNum, ItemPointer persistentTid, int64 *persistentSerialNum) { *relfilenodeOid = DatumGetObjectId(values[Anum_gp_relation_node_relfilenode_oid - 1]); *segmentFileNum = DatumGetInt32(values[Anum_gp_relation_node_segment_file_num - 1]); *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_relation_node_create_mirror_data_loss_tracking_session_num - 1]); *persistentTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_relation_node_persistent_tid - 1])); *persistentSerialNum = DatumGetInt64(values[Anum_gp_relation_node_persistent_serial_num - 1]); }
Datum nextval_mirror(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); int64 result; result = DatumGetInt64(DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(relid))); saveSequenceUpdate(relid, result, true); PG_RETURN_INT64(result); }
/** * @brief Parse int64 expression */ int64 ParseInt64(char *value, int64 minValue) { int64 i; if (pg_strcasecmp(value, "INFINITE") == 0) return INT64CONST(0x7FFFFFFFFFFFFFFF); i = DatumGetInt64(DirectFunctionCall1(int8in, CStringGetDatum(value))); if (i < minValue) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("value \"%s\" is out of range", value))); return i; }
void GpRelfileNode_GetValues( Datum *values, Oid *relfilenodeOid, int32 *segmentFileNum, ItemPointer persistentTid, int64 *persistentSerialNum) { *relfilenodeOid = DatumGetObjectId(values[Anum_gp_relfile_node_relfilenode_oid - 1]); *segmentFileNum = DatumGetInt32(values[Anum_gp_relfile_node_segment_file_num - 1]); *persistentTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_relfile_node_persistent_tid - 1])); *persistentSerialNum = DatumGetInt64(values[Anum_gp_relfile_node_persistent_serial_num - 1]); }