示例#1
0
void GpPersistentTablespaceNode_GetValues(
	Datum							*values,

	Oid 							*filespaceOid,
	Oid 							*tablespaceOid,
	PersistentFileSysState			*persistentState,
	int64							*createMirrorDataLossTrackingSessionNum,
	MirroredObjectExistenceState	*mirrorExistenceState,
	int32							*reserved,
	TransactionId					*parentXid,
	int64							*persistentSerialNum)
{
    *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_filespace_oid - 1]);

    *tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_tablespace_oid - 1]);

    *persistentState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_persistent_state - 1]);

    *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_create_mirror_data_loss_tracking_session_num - 1]);

    *mirrorExistenceState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_mirror_existence_state - 1]);

	*reserved = DatumGetInt32(values[Anum_gp_persistent_tablespace_node_reserved - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_tablespace_node_parent_xid - 1]);

    *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_persistent_serial_num - 1]);
}
示例#2
0
static int
btint2fastcmp(Datum x, Datum y, SortSupport ssup)
{
	int16		a = DatumGetInt16(x);
	int16		b = DatumGetInt16(y);

	return (int) a - (int) b;
}
示例#3
0
void GpPersistentRelationNode_GetValues(
	Datum									*values,

	Oid 									*tablespaceOid,
	Oid 									*databaseOid,
	Oid 									*relfilenodeOid,
	int32									*segmentFileNum,
	PersistentFileSysRelStorageMgr			*relationStorageManager,
	PersistentFileSysState					*persistentState,
	int64									*createMirrorDataLossTrackingSessionNum,
	MirroredObjectExistenceState			*mirrorExistenceState,
	MirroredRelDataSynchronizationState 	*mirrorDataSynchronizationState,
	bool									*mirrorBufpoolMarkedForScanIncrementalResync,
	int64									*mirrorBufpoolResyncChangedPageCount,
	XLogRecPtr								*mirrorBufpoolResyncCkptLoc,
	BlockNumber								*mirrorBufpoolResyncCkptBlockNum,
	int64									*mirrorAppendOnlyLossEof,
	int64									*mirrorAppendOnlyNewEof,
	PersistentFileSysRelBufpoolKind 		*relBufpoolKind,
	TransactionId							*parentXid,
	int64									*persistentSerialNum)
{
	*tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_tablespace_oid - 1]);

	*databaseOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_database_oid - 1]);

	*relfilenodeOid = DatumGetObjectId(values[Anum_gp_persistent_relation_node_relfilenode_oid - 1]);

	*segmentFileNum = DatumGetInt32(values[Anum_gp_persistent_relation_node_segment_file_num - 1]);

	*relationStorageManager = (PersistentFileSysRelStorageMgr)DatumGetInt16(values[Anum_gp_persistent_relation_node_relation_storage_manager - 1]);

	*persistentState = (PersistentFileSysState)DatumGetInt16(values[Anum_gp_persistent_relation_node_persistent_state - 1]);

	*createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_relation_node_create_mirror_data_loss_tracking_session_num - 1]);

	*mirrorExistenceState = (MirroredObjectExistenceState)DatumGetInt16(values[Anum_gp_persistent_relation_node_mirror_existence_state - 1]);

	*mirrorDataSynchronizationState = (MirroredRelDataSynchronizationState)DatumGetInt16(values[Anum_gp_persistent_relation_node_mirror_data_synchronization_state - 1]);

	*mirrorBufpoolMarkedForScanIncrementalResync = DatumGetBool(values[Anum_gp_persistent_relation_node_mirror_bufpool_marked_for_scan_incremental_resync - 1]);

	*mirrorBufpoolResyncChangedPageCount = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_changed_page_count - 1]);

	*mirrorBufpoolResyncCkptLoc = *((XLogRecPtr*) DatumGetPointer(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_ckpt_loc - 1]));

	*mirrorBufpoolResyncCkptBlockNum = (BlockNumber)DatumGetInt32(values[Anum_gp_persistent_relation_node_mirror_bufpool_resync_ckpt_block_num - 1]);

	*mirrorAppendOnlyLossEof = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_append_only_loss_eof - 1]);

	*mirrorAppendOnlyNewEof = DatumGetInt64(values[Anum_gp_persistent_relation_node_mirror_append_only_new_eof - 1]);

	*relBufpoolKind = (PersistentFileSysRelBufpoolKind)DatumGetInt32(values[Anum_gp_persistent_relation_node_relation_bufpool_kind - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_relation_node_parent_xid - 1]);

	*persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_relation_node_persistent_serial_num - 1]);
}
示例#4
0
void GpPersistentFilespaceNode_GetValues(
	Datum							*values,

	Oid 							*filespaceOid,
	int16							*dbId1,
	char							locationBlankPadded1[FilespaceLocationBlankPaddedWithNullTermLen],
	int16							*dbId2,
	char							locationBlankPadded2[FilespaceLocationBlankPaddedWithNullTermLen],
	PersistentFileSysState			*persistentState,
	int64							*createMirrorDataLossTrackingSessionNum,
	MirroredObjectExistenceState	*mirrorExistenceState,
	int32							*reserved,
	TransactionId					*parentXid,
	int64							*persistentSerialNum)
{
	char *locationPtr;
	int locationLen;

    *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_filespace_node_filespace_oid - 1]);

    *dbId1 = DatumGetInt16(values[Anum_gp_persistent_filespace_node_db_id_1 - 1]);

	locationPtr = TextDatumGetCString(values[Anum_gp_persistent_filespace_node_location_1 - 1]);;
	locationLen = strlen(locationPtr);
	if (locationLen != FilespaceLocationBlankPaddedWithNullTermLen - 1)
		elog(ERROR, "Expected filespace location 1 to be %d characters and found %d",
			 FilespaceLocationBlankPaddedWithNullTermLen - 1,
			 locationLen);
			 
	memcpy(locationBlankPadded1, locationPtr, FilespaceLocationBlankPaddedWithNullTermLen);

    *dbId2 = DatumGetInt16(values[Anum_gp_persistent_filespace_node_db_id_2 - 1]);

	locationPtr = TextDatumGetCString(values[Anum_gp_persistent_filespace_node_location_2 - 1]);
	locationLen = strlen(locationPtr);
	if (locationLen != FilespaceLocationBlankPaddedWithNullTermLen - 1)
		elog(ERROR, "Expected filespace location 2 to be %d characters and found %d",
			 FilespaceLocationBlankPaddedWithNullTermLen - 1,
			 locationLen);
			 
	memcpy(locationBlankPadded2, locationPtr, FilespaceLocationBlankPaddedWithNullTermLen);

    *persistentState = DatumGetInt16(values[Anum_gp_persistent_filespace_node_persistent_state - 1]);

    *createMirrorDataLossTrackingSessionNum = DatumGetInt64(values[Anum_gp_persistent_filespace_node_create_mirror_data_loss_tracking_session_num - 1]);

    *mirrorExistenceState = DatumGetInt16(values[Anum_gp_persistent_filespace_node_mirror_existence_state - 1]);

	*reserved = DatumGetInt32(values[Anum_gp_persistent_filespace_node_reserved - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_filespace_node_parent_xid - 1]);

    *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_filespace_node_persistent_serial_num - 1]);
}
示例#5
0
void GpPersistentTablespaceNode_GetValues(
	Datum							*values,

	Oid 							*filespaceOid,
	Oid 							*tablespaceOid,
	PersistentFileSysState			*persistentState,
	int32							*reserved,
	TransactionId					*parentXid,
	int64							*persistentSerialNum,
	ItemPointerData 				*previousFreeTid,
	bool							*sharedStorage)
{
    *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_filespace_oid - 1]);

    *tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_tablespace_node_tablespace_oid - 1]);

    *persistentState = DatumGetInt16(values[Anum_gp_persistent_tablespace_node_persistent_state - 1]);

	*reserved = DatumGetInt32(values[Anum_gp_persistent_tablespace_node_reserved - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_tablespace_node_parent_xid - 1]);

    *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_tablespace_node_persistent_serial_num - 1]);

    *previousFreeTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_persistent_tablespace_node_previous_free_tid - 1]));

	*sharedStorage = true;

}
示例#6
0
/*
 * Binary search an array of int16 datums for a match to c
 *
 * On success, *i gets the match location; on failure, it gets where to insert
 */
static bool
searchChar(Datum *nodeLabels, int nNodes, int16 c, int *i)
{
	int			StopLow = 0,
				StopHigh = nNodes;

	while (StopLow < StopHigh)
	{
		int			StopMiddle = (StopLow + StopHigh) >> 1;
		int16		middle = DatumGetInt16(nodeLabels[StopMiddle]);

		if (c < middle)
			StopHigh = StopMiddle;
		else if (c > middle)
			StopLow = StopMiddle + 1;
		else
		{
			*i = StopMiddle;
			return true;
		}
	}

	*i = StopHigh;
	return false;
}
示例#7
0
int64_t
pgr_SPI_getBigInt(HeapTuple *tuple, TupleDesc *tupdesc, Column_info_t info) {
    Datum binval;
    bool isnull;
    int64_t value = 0;
    binval = SPI_getbinval(*tuple, *tupdesc, info.colNumber, &isnull);
    if (isnull)
        elog(ERROR, "Unexpected Null value in column %s", info.name);
    switch (info.type) {
        case INT2OID:
            value = (int64_t) DatumGetInt16(binval);
            break;
        case INT4OID:
            value = (int64_t) DatumGetInt32(binval);
            break;
        case INT8OID:
            value = DatumGetInt64(binval);
            break;
        default:
            elog(ERROR,
                    "Unexpected Column type of %s. Expected ANY-INTEGER",
                    info.name);
    }
    PGR_DBG("Variable: %s Value: %lld", info.name, value);
    return value;
}
示例#8
0
/*
 * HeapTupleOfForeignConstraintIncludesColumn fetches the columns from the foreign
 * constraint and checks if the given column name matches one of them.
 */
static bool
HeapTupleOfForeignConstraintIncludesColumn(HeapTuple heapTuple, Oid relationId,
										   int pgConstraintKey, char *columnName)
{
	Datum columnsDatum = 0;
	Datum *columnArray = NULL;
	int columnCount = 0;
	int attrIdx = 0;
	bool isNull = false;

	columnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, pgConstraintKey, &isNull);
	deconstruct_array(DatumGetArrayTypeP(columnsDatum), INT2OID, 2, true,
					  's', &columnArray, NULL, &columnCount);

	for (attrIdx = 0; attrIdx < columnCount; ++attrIdx)
	{
		AttrNumber attrNo = DatumGetInt16(columnArray[attrIdx]);

		char *colName = get_relid_attribute_name(relationId, attrNo);
		if (strncmp(colName, columnName, NAMEDATALEN) == 0)
		{
			return true;
		}
	}

	return false;
}
示例#9
0
/*
 * _bitmap_findvalue() -- find a row in a given heap using
 *  a given index that satisfies the given scan key.
 *
 * If this value exists, this function returns true. Otherwise,
 * returns false.
 *
 * If this value exists in the heap, this function also returns
 * the block number and the offset number that are stored in the same
 * row with this value. This block number and the offset number
 * are for the LOV item that points the bitmap vector for this value.
 */
bool
_bitmap_findvalue(Relation lovHeap, Relation lovIndex,
				  ScanKey scanKey __attribute__((unused)), IndexScanDesc scanDesc,
				  BlockNumber *lovBlock, bool *blockNull,
				  OffsetNumber *lovOffset, bool *offsetNull)
{
	TupleDesc		tupDesc;
	HeapTuple		tuple;
	bool			found = false;

	tupDesc = RelationGetDescr(lovIndex);

	tuple = index_getnext(scanDesc, ForwardScanDirection);

	if (tuple != NULL)
	{
		TupleDesc 	heapTupDesc;
		Datum 		d;

		found = true;
		heapTupDesc = RelationGetDescr(lovHeap);

		d = heap_getattr(tuple, tupDesc->natts + 1, heapTupDesc, blockNull);
		*lovBlock =	DatumGetInt32(d);
		d = heap_getattr(tuple, tupDesc->natts + 2, heapTupDesc, offsetNull);
		*lovOffset = DatumGetInt16(d);
	}
	return found;
}
示例#10
0
static int16
getint16(HeapTuple tuple, TupleDesc desc, int column)
{
    bool	isnull;
    Datum	datum = SPI_getbinval(tuple, desc, column, &isnull);
    return isnull ? 0 : DatumGetInt16(datum);
}
示例#11
0
double
pgr_SPI_getFloat8(HeapTuple *tuple, TupleDesc *tupdesc, Column_info_t info) {
    Datum binval;
    bool isnull;
    double value = 0.0;
    binval = SPI_getbinval(*tuple, *tupdesc, info.colNumber, &isnull);
    if (isnull)
        elog(ERROR, "Unexpected Null value in column %s", info.name);

    switch (info.type) {
        case INT2OID:
            value = (double) DatumGetInt16(binval);
            break;
        case INT4OID:
            value = (double) DatumGetInt32(binval);
            break;
        case INT8OID:
            value = (double) DatumGetInt64(binval);
            break;
        case FLOAT4OID:
            value = (double) DatumGetFloat4(binval);
            break;
        case FLOAT8OID:
            value = DatumGetFloat8(binval);
            break;
        default:
            elog(ERROR,
                    "Unexpected Column type of %s. Expected ANY-NUMERICAL",
                    info.name);
    }
    PGR_DBG("Variable: %s Value: %lf", info.name, value);
    return value;
}
示例#12
0
Datum
orafce_dump(PG_FUNCTION_ARGS)
{
	Oid		valtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
	List	*args;
	int16	typlen;
	bool	typbyval;
	Size	length;
	Datum	value;
	int		format;
	StringInfoData	str;

	if (!fcinfo->flinfo || !fcinfo->flinfo->fn_expr)
		elog(ERROR, "function is called from invalid context");

	if (PG_ARGISNULL(0))
		elog(ERROR, "argument is NULL");

	value = PG_GETARG_DATUM(0);
	format = PG_GETARG_IF_EXISTS(1, INT32, 10);

	args = ((FuncExpr *) fcinfo->flinfo->fn_expr)->args;
	valtype = exprType((Node *) list_nth(args, 0));

	get_typlenbyval(valtype, &typlen, &typbyval);
	length = datumGetSize(value, typbyval, typlen);

	initStringInfo(&str);
	appendStringInfo(&str, "Typ=%d Len=%d: ", valtype, (int) length);

	if (!typbyval)
	{
		appendDatum(&str, DatumGetPointer(value), length, format);
	}
	else if (length <= 1)
	{
		char	v = DatumGetChar(value);
		appendDatum(&str, &v, sizeof(char), format);
	}
	else if (length <= 2)
	{
		int16	v = DatumGetInt16(value);
		appendDatum(&str, &v, sizeof(int16), format);
	}
	else if (length <= 4)
	{
		int32	v = DatumGetInt32(value);
		appendDatum(&str, &v, sizeof(int32), format);
	}
	else
	{
		int64	v = DatumGetInt64(value);
		appendDatum(&str, &v, sizeof(int64), format);
	}

	PG_RETURN_TEXT_P(cstring_to_text(str.data));
}
/**
 * Convert postgres Datum into a ConcreteValue object.
 */
AbstractValueSPtr AbstractPGValue::DatumToValue(bool inMemoryIsWritable,
    Oid inTypeID, Datum inDatum) const {
        
    // First check if datum is rowtype
    if (type_is_rowtype(inTypeID)) {
        HeapTupleHeader pgTuple = DatumGetHeapTupleHeader(inDatum);
        return AbstractValueSPtr(new PGValue<HeapTupleHeader>(pgTuple));
    } else if (type_is_array(inTypeID)) {
        ArrayType *pgArray = DatumGetArrayTypeP(inDatum);
        
        if (ARR_NDIM(pgArray) != 1)
            throw std::invalid_argument("Multidimensional arrays not yet supported");
        
        if (ARR_HASNULL(pgArray))
            throw std::invalid_argument("Arrays with NULLs not yet supported");
        
        switch (ARR_ELEMTYPE(pgArray)) {
            case FLOAT8OID: {
                MemHandleSPtr memoryHandle(new PGArrayHandle(pgArray));
                
                if (inMemoryIsWritable) {
                    return AbstractValueSPtr(
                        new ConcreteValue<Array<double> >(
                            Array<double>(memoryHandle,
                                boost::extents[ ARR_DIMS(pgArray)[0] ])
                            )
                        );
                } else {
                    return AbstractValueSPtr(
                        new ConcreteValue<Array_const<double> >(
                            Array_const<double>(memoryHandle,
                                boost::extents[ ARR_DIMS(pgArray)[0] ])
                            )
                        );
                }
            }
        }
    }

    switch (inTypeID) {
        case BOOLOID: return AbstractValueSPtr(
            new ConcreteValue<bool>( DatumGetBool(inDatum) ));
        case INT2OID: return AbstractValueSPtr(
            new ConcreteValue<int16_t>( DatumGetInt16(inDatum) ));
        case INT4OID: return AbstractValueSPtr(
            new ConcreteValue<int32_t>( DatumGetInt32(inDatum) ));
        case INT8OID: return AbstractValueSPtr(
            new ConcreteValue<int64_t>( DatumGetInt64(inDatum) ));
        case FLOAT4OID: return AbstractValueSPtr(
            new ConcreteValue<float>( DatumGetFloat4(inDatum) ));
        case FLOAT8OID: return AbstractValueSPtr(
            new ConcreteValue<double>( DatumGetFloat8(inDatum) ));
    }
    
    return AbstractValueSPtr();
}
示例#14
0
void GpPersistentFilespaceNode_GetValues(
	Datum							*values,
	Oid 							*filespaceOid,
	int16							*dbId1,
	char							locationBlankPadded1[FilespaceLocationBlankPaddedWithNullTermLen],
	PersistentFileSysState			*persistentState,
	int32							*reserved,
	TransactionId					*parentXid,
	int64							*persistentSerialNum,
	ItemPointerData 				*previousFreeTid,
	bool							*sharedStorage)
{
	char *locationPtr;
	int locationLen;

    *filespaceOid = DatumGetObjectId(values[Anum_gp_persistent_filespace_node_filespace_oid - 1]);

    *dbId1 = DatumGetInt16(values[Anum_gp_persistent_filespace_node_db_id - 1]);

	locationPtr = TextDatumGetCString(values[Anum_gp_persistent_filespace_node_location - 1]);;
	locationLen = strlen(locationPtr);
	if (locationLen != FilespaceLocationBlankPaddedWithNullTermLen - 1)
		elog(ERROR, "Expected filespace location to be %d characters and found %d",
			 FilespaceLocationBlankPaddedWithNullTermLen - 1,
			 locationLen);
			 
	memcpy(locationBlankPadded1, locationPtr, FilespaceLocationBlankPaddedWithNullTermLen);

    *persistentState = DatumGetInt16(values[Anum_gp_persistent_filespace_node_persistent_state - 1]);

	*reserved = DatumGetInt32(values[Anum_gp_persistent_filespace_node_reserved - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_filespace_node_parent_xid - 1]);

    *persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_filespace_node_persistent_serial_num - 1]);

    *previousFreeTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_persistent_filespace_node_previous_free_tid - 1]));

	*sharedStorage = true;

}
示例#15
0
void GpPersistentRelfileNode_GetValues(
	Datum									*values,

	Oid 									*tablespaceOid,
	Oid 									*databaseOid,
	Oid 									*relfilenodeOid,
	int32									*segmentFileNum,
	PersistentFileSysRelStorageMgr			*relationStorageManager,
	PersistentFileSysState					*persistentState,
	PersistentFileSysRelBufpoolKind 		*relBufpoolKind,
	TransactionId							*parentXid,
	int64									*persistentSerialNum,
	ItemPointerData 						*previousFreeTid,
	bool									*sharedStorage)
{
	*tablespaceOid = DatumGetObjectId(values[Anum_gp_persistent_relfile_node_tablespace_oid - 1]);

	*databaseOid = DatumGetObjectId(values[Anum_gp_persistent_relfile_node_database_oid - 1]);

	*relfilenodeOid = DatumGetObjectId(values[Anum_gp_persistent_relfile_node_relfilenode_oid - 1]);

	*segmentFileNum = DatumGetInt32(values[Anum_gp_persistent_relfile_node_segment_file_num - 1]);

	*relationStorageManager = (PersistentFileSysRelStorageMgr)DatumGetInt16(values[Anum_gp_persistent_relfile_node_relation_storage_manager - 1]);

	*persistentState = (PersistentFileSysState)DatumGetInt16(values[Anum_gp_persistent_relfile_node_persistent_state - 1]);

	*relBufpoolKind = (PersistentFileSysRelBufpoolKind)DatumGetInt32(values[Anum_gp_persistent_relfile_node_relation_bufpool_kind - 1]);

	*parentXid = (TransactionId)DatumGetInt32(values[Anum_gp_persistent_relfile_node_parent_xid - 1]);

	*persistentSerialNum = DatumGetInt64(values[Anum_gp_persistent_relfile_node_persistent_serial_num - 1]);

	*previousFreeTid = *((ItemPointer) DatumGetPointer(values[Anum_gp_persistent_relfile_node_previous_free_tid - 1]));

	*sharedStorage = true;
}
示例#16
0
/*
 * CheckInMemConstraintsPgAttribute
 * 		Check uniqueness constraints for pg_attribute in-memory tuples upon insert
 */
static void
CheckInMemConstraintsPgAttribute(InMemHeapRelation relation, HeapTuple newTuple)
{
	Assert(NULL != newTuple);
	Assert(NULL != relation); 
	Assert(NULL != relation->rel);

	TupleDesc tupleDesc = relation->rel->rd_att;
	Oid attrelidNew     = DatumGetObjectId(tuple_getattr(newTuple, tupleDesc, Anum_pg_attribute_attrelid));
	char *attnameNew    = DatumGetCString(tuple_getattr(newTuple, tupleDesc, Anum_pg_attribute_attname));
	AttrNumber attnoNew = DatumGetInt16((tuple_getattr(newTuple, tupleDesc, Anum_pg_attribute_attnum)));

	for (int i = 0; i < relation->tupsize; i++)
	{
		HeapTuple tuple = relation->tuples[i].tuple;
		Assert(NULL != tuple);

		Oid attrelid     = DatumGetObjectId(tuple_getattr(tuple, tupleDesc, Anum_pg_attribute_attrelid));
		char *attname    = DatumGetCString(tuple_getattr(tuple, tupleDesc, Anum_pg_attribute_attname));
		AttrNumber attno = DatumGetInt16((tuple_getattr(tuple, tupleDesc, Anum_pg_attribute_attnum)));
		size_t attnameLen = strlen(attname);

		if (attrelid != attrelidNew)
		{
			/* attributes belong to different relations */
			continue;
		}

		insist_log(attno != attnoNew,
			"in-memory tuple with attrelid = %d and attno = %d already exists in pg_attribute.", attrelid, attno);

		insist_log((attnameLen != strlen(attnameNew)) ||
				   (0 != strncmp(attname, attnameNew, attnameLen)),
			"in-memory tuple with attrelid = %d and attname = %s already exists in pg_attribute.", attrelid, attname);
	}
}
示例#17
0
文件: common.c 项目: ssinger/skytools
/*
 * Fill table information in hash table.
 */
static void fill_tbl_info(Relation rel, struct PgqTableInfo *info)
{
	StringInfo pkeys;
	Datum values[1];
	const char *name = find_table_name(rel);
	TupleDesc desc;
	HeapTuple row;
	bool isnull;
	int res, i, attno;

	/* allow reset ASAP, but ignore it in this call */
	info->invalid = false;

	/* load pkeys */
	values[0] = ObjectIdGetDatum(rel->rd_id);
	res = SPI_execute_plan(pkey_plan, values, NULL, false, 0);
	if (res != SPI_OK_SELECT)
		elog(ERROR, "pkey_plan exec failed");

	/*
	 * Fill info
	 */

	desc = SPI_tuptable->tupdesc;
	pkeys = makeStringInfo();
	info->n_pkeys = SPI_processed;
	info->table_name = MemoryContextStrdup(tbl_cache_ctx, name);
	info->pkey_attno = MemoryContextAlloc(tbl_cache_ctx, info->n_pkeys * sizeof(int));

	for (i = 0; i < SPI_processed; i++) {
		row = SPI_tuptable->vals[i];

		attno = DatumGetInt16(SPI_getbinval(row, desc, 1, &isnull));
		name = SPI_getvalue(row, desc, 2);
		info->pkey_attno[i] = attno;
		if (i > 0)
			appendStringInfoChar(pkeys, ',');
		appendStringInfoString(pkeys, name);
	}
	info->pkey_list = MemoryContextStrdup(tbl_cache_ctx, pkeys->data);
	info->tg_cache = NULL;
}
示例#18
0
static DTYPE *get_pgarray(int *num, ArrayType *input)
{
    int         ndims, *dims, *lbs;
    bool       *nulls;
    Oid         i_eltype;
    int16       i_typlen;
    bool        i_typbyval;
    char        i_typalign;
    Datum      *i_data;
    int         i, n;
    DTYPE      *data;

    /* get input array element type */
    i_eltype = ARR_ELEMTYPE(input);
    get_typlenbyvalalign(i_eltype, &i_typlen, &i_typbyval, &i_typalign);


    /* validate input data type */
    switch(i_eltype) {
    case INT2OID:
    case INT4OID:
    case FLOAT4OID:
    case FLOAT8OID:
        break;
    default:
        elog(ERROR, "Invalid input data type");
        break;
    }

    /* get various pieces of data from the input array */
    ndims = ARR_NDIM(input);
    dims = ARR_DIMS(input);
    lbs = ARR_LBOUND(input);

    if (ndims != 2 || dims[0] != dims[1]) {
        elog(ERROR, "Error: matrix[num][num] in its definition.");
    }

    /* get src data */
    deconstruct_array(input, i_eltype, i_typlen, i_typbyval, i_typalign,
                      &i_data, &nulls, &n);

    DBG("get_pgarray: ndims=%d, n=%d", ndims, n);

#ifdef DEBUG
    for (i=0; i<ndims; i++) {
        DBG("   dims[%d]=%d, lbs[%d]=%d", i, dims[i], i, lbs[i]);
    }
#endif

    /* construct a C array */
    data = (DTYPE *) palloc(n * sizeof(DTYPE));
    if (!data) {
        elog(ERROR, "Error: Out of memory!");
    }

    for (i=0; i<n; i++) {
        if (nulls[i]) {
            data[i] = INFINITY;
        }
        else {
            switch(i_eltype) {
            case INT2OID:
                data[i] = (DTYPE) DatumGetInt16(i_data[i]);
                break;
            case INT4OID:
                data[i] = (DTYPE) DatumGetInt32(i_data[i]);
                break;
            case FLOAT4OID:
                data[i] = (DTYPE) DatumGetFloat4(i_data[i]);
                break;
            case FLOAT8OID:
                data[i] = (DTYPE) DatumGetFloat8(i_data[i]);
                break;
            }
            /* we assume negative values are INFINTY */
            /********************************************************
               TODO: based on trying to add an endpt it is likely
               that this will not work and you will get and error in
               findEulerianPath
            **********************************************************/
            if (data[i] < 0)
                data[i] = INFINITY;
        }
        DBG("    data[%d]=%.4f", i, data[i]);
    }

    pfree(nulls);
    pfree(i_data);

    *num = dims[0];

    return data;
}
示例#19
0
文件: pguri.c 项目: dylex/huzblem
static void *uri_char(HeapTupleHeader ud, bool hdr, bool term)
{
	TupleDesc td;
	HeapTupleData tuple;
	Datum d[URI_LEN];
	bool n[URI_LEN];
	text *scheme = NULL, *host = NULL, *path = NULL;
	int16 port;
	char portbuf[8];
	unsigned schemelen = 0, hostlen = 0, portlen = 0, pathlen = 0;
	unsigned len;
	void *out;
	char *p;

	td = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(ud), HeapTupleHeaderGetTypMod(ud));
	tuple.t_len = HeapTupleHeaderGetDatumLength(ud);
	ItemPointerSetInvalid(&(tuple.t_self));
	tuple.t_tableOid = InvalidOid;
	tuple.t_data = ud;
	heap_deform_tuple(&tuple, td, d, n);
	ReleaseTupleDesc(td);

	if (!n[URI_SCHEME])
	{
		scheme = DatumGetTextP(d[URI_SCHEME]);
		schemelen = VARSIZE_ANY_EXHDR(scheme);
	}
	if (!n[URI_HOST])
	{
		host = DatumGetTextP(d[URI_HOST]);
		hostlen = VARSIZE_ANY_EXHDR(host);
	}
	if (!n[URI_PORT])
	{
		port = DatumGetInt16(d[URI_PORT]);
		portlen = snprintf(portbuf, sizeof(portbuf)-1, ":%hu", port);
	}
	if (!n[URI_PATH])
	{
		path = DatumGetTextP(d[URI_PATH]);
		pathlen = VARSIZE_ANY_EXHDR(path);
	}

	len = (hdr ? VARHDRSZ : 0) + schemelen + (scheme ? 3 : 0) + hostlen + portlen + pathlen + term;
	out = palloc(len);
	if (hdr)
		SET_VARSIZE(out, len);
	p = hdr ? VARDATA(out) : out;

	if (scheme)
	{
		memcpy(p, VARDATA(scheme), schemelen);
		p += schemelen;
		*p++ = ':';
		*p++ = '/';
		*p++ = '/';
	}
	if (host)
	{
		domainname_flip(p, VARDATA(host), hostlen);
		p += hostlen;
	}
	memcpy(p, portbuf, portlen);
	p += portlen;
	if (path)
	{
		memcpy(p, VARDATA(path), pathlen);
		p += pathlen;
	}
	if (term)
		*p = '\0';

	return out;
}
示例#20
0
/*
 * Add an attribute to the hash calculation.
 * **IMPORTANT: any new hard coded support for a data type in here
 * must be added to isGreenplumDbHashable() below!
 *
 * Note that the caller should provide the base type if the datum is
 * of a domain type. It is quite expensive to call get_typtype() and
 * getBaseType() here since this function gets called a lot for the
 * same set of Datums.
 *
 * @param hashFn called to update the hash value.
 * @param clientData passed to hashFn.
 */
void
hashDatum(Datum datum, Oid type, datumHashFunction hashFn, void *clientData)
{

	void	   *buf = NULL;		/* pointer to the data */
	size_t		len = 0;		/* length for the data buffer */
	
	int64		intbuf;			/* an 8 byte buffer for all integer sizes */
		
	float4		buf_f4;
	float8		buf_f8;
	Timestamp	tsbuf;			/* timestamp data dype is either a double or
								 * int8 (determined in compile time) */
	TimestampTz tstzbuf;
	DateADT		datebuf;
	TimeADT		timebuf;
	TimeTzADT  *timetzptr;
	Interval   *intervalptr;
	AbsoluteTime abstime_buf;
	RelativeTime reltime_buf;
	TimeInterval tinterval;
	AbsoluteTime tinterval_len;
	
	Numeric		num;
	bool		bool_buf;
	char        char_buf;
	Name		namebuf;
	
	ArrayType  *arrbuf;
	inet		 *inetptr; /* inet/cidr */
	unsigned char inet_hkey[sizeof(inet_struct)];
	macaddr		*macptr; /* MAC address */
	
	VarBit		*vbitptr;
	
	int2vector *i2vec_buf;
	oidvector  *oidvec_buf;
	
	Cash		cash_buf;
	AclItem	   *aclitem_ptr;
	uint32		aclitem_buf;
	
	/*
	 * special case buffers
	 */
	uint32		nanbuf;
	uint32		invalidbuf;

	void *tofree = NULL;

	/*
	 * Select the hash to be performed according to the field type we are adding to the
	 * hash.
	 */
	switch (type)
	{
		/*
		 * ======= NUMERIC TYPES ========
		 */
		case INT2OID:			/* -32 thousand to 32 thousand, 2-byte storage */
			intbuf = (int64) DatumGetInt16(datum);		/* cast to 8 byte before
														 * hashing */
			buf = &intbuf;
			len = sizeof(intbuf);
			break;

		case INT4OID:			/* -2 billion to 2 billion integer, 4-byte
								 * storage */
			intbuf = (int64) DatumGetInt32(datum);		/* cast to 8 byte before
														 * hashing */
			buf = &intbuf;
			len = sizeof(intbuf);
			break;
			
		case INT8OID:			/* ~18 digit integer, 8-byte storage */
			intbuf = DatumGetInt64(datum);		/* cast to 8 byte before
												 * hashing */
			buf = &intbuf;
			len = sizeof(intbuf);
			break;

		case FLOAT4OID: /* single-precision floating point number,
								 * 4-byte storage */
			buf_f4 = DatumGetFloat4(datum);

			/*
			 * On IEEE-float machines, minus zero and zero have different bit
			 * patterns but should compare as equal.  We must ensure that they
			 * have the same hash value, which is most easily done this way:
			 */
			if (buf_f4 == (float4) 0)
				buf_f4 = 0.0;

			buf = &buf_f4;
			len = sizeof(buf_f4);
			break;

		case FLOAT8OID: /* double-precision floating point number,
								 * 8-byte storage */
			buf_f8 = DatumGetFloat8(datum);

			/*
			 * On IEEE-float machines, minus zero and zero have different bit
			 * patterns but should compare as equal.  We must ensure that they
			 * have the same hash value, which is most easily done this way:
			 */
			if (buf_f8 == (float8) 0)
				buf_f8 = 0.0;

			buf = &buf_f8;
			len = sizeof(buf_f8);
			break;

		case NUMERICOID:

			num = DatumGetNumeric(datum);

			if (NUMERIC_IS_NAN(num))
			{
				nanbuf = NAN_VAL;
				buf = &nanbuf;
				len = sizeof(nanbuf);
			}
			else
				/* not a nan */
			{
				buf = num->n_data;
				len = (VARSIZE(num) - NUMERIC_HDRSZ);
			}

            /* 
             * If we did a pg_detoast_datum, we need to remember to pfree, 
             * or we will leak memory.  Because of the 1-byte varlena header stuff.
             */
            if (num != DatumGetPointer(datum)) 
                tofree = num;

			break;
		
		/*
		 * ====== CHARACTER TYPES =======
		 */
		case CHAROID:			/* char(1), single character */
			char_buf = DatumGetChar(datum);
			buf = &char_buf;
			len = 1;
			break;

		case BPCHAROID: /* char(n), blank-padded string, fixed storage */
		case TEXTOID:   /* text */
		case VARCHAROID: /* varchar */ 
		case BYTEAOID:   /* bytea */
			{
				int tmplen;
				varattrib_untoast_ptr_len(datum, (char **) &buf, &tmplen, &tofree);
				/* adjust length to not include trailing blanks */
				if (type != BYTEAOID && tmplen > 1)
					tmplen = ignoreblanks((char *) buf, tmplen);

				len = tmplen;
				break;
			}

		case NAMEOID:
			namebuf = DatumGetName(datum);
			len = NAMEDATALEN;
			buf = NameStr(*namebuf);

			/* adjust length to not include trailing blanks */
			if (len > 1)
				len = ignoreblanks((char *) buf, len);
			break;
		
		/*
		 * ====== OBJECT IDENTIFIER TYPES ======
		 */
		case OIDOID:				/* object identifier(oid), maximum 4 billion */
		case REGPROCOID:			/* function name */
		case REGPROCEDUREOID:		/* function name with argument types */
		case REGOPEROID:			/* operator name */
		case REGOPERATOROID:		/* operator with argument types */
		case REGCLASSOID:			/* relation name */
		case REGTYPEOID:			/* data type name */
			intbuf = (int64) DatumGetUInt32(datum);	/* cast to 8 byte before hashing */
			buf = &intbuf;
			len = sizeof(intbuf);
			break;

        case TIDOID:                /* tuple id (6 bytes) */
            buf = DatumGetPointer(datum);
            len = SizeOfIptrData;
            break;
			
		/*
		 * ====== DATE/TIME TYPES ======
		 */
		case TIMESTAMPOID:		/* date and time */
			tsbuf = DatumGetTimestamp(datum);
			buf = &tsbuf;
			len = sizeof(tsbuf);
			break;

		case TIMESTAMPTZOID:	/* date and time with time zone */
			tstzbuf = DatumGetTimestampTz(datum);
			buf = &tstzbuf;
			len = sizeof(tstzbuf);
			break;

		case DATEOID:			/* ANSI SQL date */
			datebuf = DatumGetDateADT(datum);
			buf = &datebuf;
			len = sizeof(datebuf);
			break;

		case TIMEOID:			/* hh:mm:ss, ANSI SQL time */
			timebuf = DatumGetTimeADT(datum);
			buf = &timebuf;
			len = sizeof(timebuf);
			break;

		case TIMETZOID: /* time with time zone */
			
			/*
			 * will not compare to TIMEOID on equal values.
			 * Postgres never attempts to compare the two as well.
			 */
			timetzptr = DatumGetTimeTzADTP(datum);
			buf = (unsigned char *) timetzptr;
			
			/*
			 * Specify hash length as sizeof(double) + sizeof(int4), not as
			 * sizeof(TimeTzADT), so that any garbage pad bytes in the structure
			 * won't be included in the hash!
			 */
			len = sizeof(timetzptr->time) + sizeof(timetzptr->zone);
			break;

		case INTERVALOID:		/* @ <number> <units>, time interval */
			intervalptr = DatumGetIntervalP(datum);
			buf = (unsigned char *) intervalptr;
			/*
			 * Specify hash length as sizeof(double) + sizeof(int4), not as
			 * sizeof(Interval), so that any garbage pad bytes in the structure
			 * won't be included in the hash!
			 */
			len = sizeof(intervalptr->time) + sizeof(intervalptr->month);
			break;
			
		case ABSTIMEOID:
			abstime_buf = DatumGetAbsoluteTime(datum);
			
			if (abstime_buf == INVALID_ABSTIME)
			{
				/* hash to a constant value */
				invalidbuf = INVALID_VAL;
				len = sizeof(invalidbuf);
				buf = &invalidbuf;
			}
			else
			{
				len = sizeof(abstime_buf);
				buf = &abstime_buf;
			}
					
			break;

		case RELTIMEOID:
			reltime_buf = DatumGetRelativeTime(datum);
			
			if (reltime_buf == INVALID_RELTIME)
			{
				/* hash to a constant value */
				invalidbuf = INVALID_VAL;
				len = sizeof(invalidbuf);
				buf = &invalidbuf;
			}
			else
			{
				len = sizeof(reltime_buf);
				buf = &reltime_buf;
			}
				
			break;
			
		case TINTERVALOID:
			tinterval = DatumGetTimeInterval(datum);
			
			/*
			 * check if a valid interval. the '0' status code
			 * stands for T_INTERVAL_INVAL which is defined in
			 * nabstime.c. We use the actual value instead
			 * of defining it again here.
			 */
			if(tinterval->status == 0 ||
			   tinterval->data[0] == INVALID_ABSTIME ||
			   tinterval->data[1] == INVALID_ABSTIME)
			{
				/* hash to a constant value */
				invalidbuf = INVALID_VAL;
				len = sizeof(invalidbuf);
				buf = &invalidbuf;				
			}
			else
			{
				/* normalize on length of the time interval */
				tinterval_len = tinterval->data[1] -  tinterval->data[0];
				len = sizeof(tinterval_len);
				buf = &tinterval_len;	
			}

			break;
			
		/*
		 * ======= NETWORK TYPES ========
		 */
		case INETOID:
		case CIDROID:
			
			inetptr = DatumGetInetP(datum);
			len = inet_getkey(inetptr, inet_hkey, sizeof(inet_hkey)); /* fill-in inet_key & get len */
			buf = inet_hkey;
			break;
		
		case MACADDROID:
			
			macptr = DatumGetMacaddrP(datum);
			len = sizeof(macaddr);
			buf = (unsigned char *) macptr;
			break;
			
		/*
		 * ======== BIT STRINGS ========
		 */
		case BITOID:
		case VARBITOID:
			
			/*
			 * Note that these are essentially strings.
			 * we don't need to worry about '10' and '010'
			 * to compare, b/c they will not, by design.
			 * (see SQL standard, and varbit.c)
			 */
			vbitptr = DatumGetVarBitP(datum);
			len = VARBITBYTES(vbitptr);
			buf = (char *) VARBITS(vbitptr);
			break;

		/*
		 * ======= other types =======
		 */
		case BOOLOID:			/* boolean, 'true'/'false' */
			bool_buf = DatumGetBool(datum);
			buf = &bool_buf;
			len = sizeof(bool_buf);
			break;
			
		/*
		 * We prepare the hash key for aclitems just like postgresql does.
		 * (see code and comment in acl.c: hash_aclitem() ).
		 */
		case ACLITEMOID:
			aclitem_ptr = DatumGetAclItemP(datum);
			aclitem_buf = (uint32) (aclitem_ptr->ai_privs + aclitem_ptr->ai_grantee + aclitem_ptr->ai_grantor);
			buf = &aclitem_buf;
			len = sizeof(aclitem_buf);
			break;
			
		/*
		 * ANYARRAY is a pseudo-type. We use it to include
		 * any of the array types (OIDs 1007-1033 in pg_type.h).
		 * caller needs to be sure the type is ANYARRAYOID
		 * before calling cdbhash on an array (INSERT and COPY do so).
		 */
		case ANYARRAYOID:	
					
			arrbuf = DatumGetArrayTypeP(datum);
			len = VARSIZE(arrbuf) - VARHDRSZ;
			buf = VARDATA(arrbuf);
			break;
			
		case INT2VECTOROID:
			i2vec_buf = (int2vector *) DatumGetPointer(datum);
			len = i2vec_buf->dim1 * sizeof(int2);
			buf = (void *)i2vec_buf->values;
			break;
			
		case OIDVECTOROID:	
			oidvec_buf = (oidvector *) DatumGetPointer(datum);
			len = oidvec_buf->dim1 * sizeof(Oid);
			buf = oidvec_buf->values;
			break;
			
		case CASHOID: /* cash is stored in int32 internally */
			cash_buf = (* (Cash *)DatumGetPointer(datum));
			len = sizeof(Cash);
			buf = &cash_buf;
			break;
				
		default:
			ereport(ERROR,
					(errcode(ERRCODE_CDB_FEATURE_NOT_YET),
					 errmsg("Type %u is not hashable.", type)));

	}							/* switch(type) */

	/* do the hash using the selected algorithm */
	hashFn(clientData, buf, len);
	if(tofree)
		pfree(tofree);
}
示例#21
0
文件: jsontriga.c 项目: pgq/pgq
static void pgq_jsonenc_row(PgqTriggerEvent *ev, HeapTuple row, StringInfo buf)
{
	Oid col_type;
	Datum col_datum;
	bool isnull;
	TriggerData *tg = ev->tgdata;
	TupleDesc tupdesc = tg->tg_relation->rd_att;
	bool first = true;
	int i;
	const char *col_ident, *col_value;
	int attkind_idx = -1;

	if (ev->op_type == 'R') {
		appendStringInfoString(buf, "{}");
		return;
	}

	appendStringInfoChar(buf, '{');
	for (i = 0; i < tg->tg_relation->rd_att->natts; i++) {
		/* Skip dropped columns */
		if (TupleDescAttr(tupdesc, i)->attisdropped)
			continue;

		attkind_idx++;

		if (pgqtriga_skip_col(ev, i, attkind_idx))
			continue;

		if (first)
			first = false;
		else
			appendStringInfoChar(buf, ',');

		/* quote column name */
		col_ident = SPI_fname(tupdesc, i + 1);
		pgq_encode_cstring(buf, col_ident, TBUF_QUOTE_JSON);
		appendStringInfoChar(buf, ':');

		/* quote column value */
		col_type = TupleDescAttr(tupdesc, i)->atttypid;
		col_datum = SPI_getbinval(row, tupdesc, i + 1, &isnull);
		col_value = NULL;
		if (isnull) {
			appendStringInfoString(buf, "null");
			continue;
		}

		switch (col_type) {
		case BOOLOID:
			if (DatumGetBool(col_datum)) {
				appendStringInfoString(buf, "true");
			} else {
				appendStringInfoString(buf, "false");
			}
			break;

		case TIMESTAMPOID:
			timestamp_to_json(col_datum, buf);
			break;

		case TIMESTAMPTZOID:
			timestamptz_to_json(col_datum, buf);
			break;

		case DATEOID:
			date_to_json(col_datum, buf);
			break;

		case INT2OID:
			appendStringInfo(buf, "%d", (int)DatumGetInt16(col_datum));
			break;

		case INT4OID:
			appendStringInfo(buf, "%d", (int)DatumGetInt32(col_datum));
			break;

		case INT8OID:
			col_value = SPI_getvalue(row, tupdesc, i + 1);
			appendStringInfoString(buf, col_value);
			break;

		default:
			col_value = SPI_getvalue(row, tupdesc, i + 1);
			pgq_encode_cstring(buf, col_value, TBUF_QUOTE_JSON);
			break;
		}

		if (col_value)
			pfree((void*)col_value);
	}
	appendStringInfoChar(buf, '}');
}
示例#22
0
static void
getAddressesForDBid(CdbComponentDatabaseInfo *c, int elevel)
{
	Relation	gp_db_interface_rel;
	Relation	gp_interface_rel;
	HeapTuple	tuple;
	ScanKeyData	key;
	SysScanDesc	dbscan, ifacescan;

	int			j, i=0;
	struct priority_iface *ifaces=NULL;
	int			iface_count, iface_max=0;

	Datum		attr;
	bool		isNull;

	int			dbid;
	int			iface_id;
	int			priority;

	char		*name;

	Assert(c != NULL);
		
	gp_db_interface_rel = heap_open(GpDbInterfacesRelationId, AccessShareLock);

	/* CaQL UNDONE: no test coverage */
	ScanKeyInit(&key, Anum_gp_db_interfaces_dbid,
				BTEqualStrategyNumber, F_INT2EQ,
				ObjectIdGetDatum(0));

	dbscan = systable_beginscan(gp_db_interface_rel, GpDbInterfacesDbidIndexId,
								true, SnapshotNow, 1, &key);

	while (HeapTupleIsValid(tuple = systable_getnext(dbscan)))
	{
		i++;
		if (i > iface_max)
		{
			/* allocate 8-more slots */
			if (ifaces == NULL)
				ifaces = palloc((iface_max + 8) * sizeof(struct priority_iface));
			else
				ifaces = repalloc(ifaces, (iface_max + 8) * sizeof(struct priority_iface));

			memset(ifaces + iface_max, 0, 8 * sizeof(struct priority_iface));
			iface_max += 8;
		}

		/* dbid is for sanity-check on scan condition only */
		attr = heap_getattr(tuple, Anum_gp_db_interfaces_dbid, gp_db_interface_rel->rd_att, &isNull);
		Assert(!isNull);
		dbid = DatumGetInt16(attr);
		//Assert(dbid == c->dbid);

		attr = heap_getattr(tuple, Anum_gp_db_interfaces_interfaceid, gp_db_interface_rel->rd_att, &isNull);
		Assert(!isNull);
		iface_id = DatumGetInt16(attr);

		attr = heap_getattr(tuple, Anum_gp_db_interfaces_priority, gp_db_interface_rel->rd_att, &isNull);
		Assert(!isNull);
		priority = DatumGetInt16(attr);

		ifaces[i-1].priority = priority;
		ifaces[i-1].interface_id = iface_id;
	}
	iface_count = i;

	/* Finish up scan and close appendonly catalog. */
	systable_endscan(dbscan);

	heap_close(gp_db_interface_rel, AccessShareLock);

	/* we now have the unsorted list, or an empty list. */
	do
	{
		/* fallback to using hostname if our list is empty */
		if (iface_count == 0)
			break;

		qsort(ifaces, iface_count, sizeof(struct priority_iface), iface_priority_compare);

		/* we now have interfaces, sorted by priority. */

		gp_interface_rel = heap_open(GpInterfacesRelationId, AccessShareLock);

		j=0;
		for (i=0; i < iface_count; i++)
		{
			int status=0;

			/* CaQL UNDONE: no test coverage */
			/* Start a new scan. */
			ScanKeyInit(&key, Anum_gp_interfaces_interfaceid,
						BTEqualStrategyNumber, F_INT2EQ,
						ObjectIdGetDatum(ifaces[i].interface_id));

			ifacescan = systable_beginscan(gp_interface_rel, GpInterfacesInterfaceidIndexId,
										   true, SnapshotNow, 1, &key);

			tuple = systable_getnext(ifacescan);

			Assert(HeapTupleIsValid(tuple));

			/* iface_id is for sanity-check on scan condition only */
			attr = heap_getattr(tuple, Anum_gp_interfaces_interfaceid, gp_interface_rel->rd_att, &isNull);
			Assert(!isNull);
			iface_id = DatumGetInt16(attr);
			Assert(iface_id == ifaces[i].interface_id); 

			attr = heap_getattr(tuple, Anum_gp_interfaces_status, gp_interface_rel->rd_att, &isNull);
			Assert(!isNull);
			status = DatumGetInt16(attr);

			/* if the status is "alive" use the interface. */
			if (status == 1)
			{
				attr = heap_getattr(tuple, Anum_gp_interfaces_address, gp_interface_rel->rd_att, &isNull);
				Assert(!isNull);
				name = getDnsCachedAddress(DatumGetCString(attr), c->port, elevel);
				if (name)
					c->hostaddrs[j++] = pstrdup(name);
			}

			systable_endscan(ifacescan);
		}

		heap_close(gp_interface_rel, AccessShareLock);

		/* fallback to using hostname if our list is empty */
		if (j == 0)
			break;

		/* successfully retrieved at least one entry. */

		return;
	}
	while (0);

	/* fallback to using hostname */
	memset(c->hostaddrs, 0, COMPONENT_DBS_MAX_ADDRS * sizeof(char *));

	/*
	 * add an entry, using the first the "address" and then the
	 * "hostname" as fallback.
	 */
	name = getDnsCachedAddress(c->address, c->port, elevel);

	if (name)
	{
		c->hostaddrs[0] = pstrdup(name);
		return;
	}

	/* now the hostname. */
	name = getDnsCachedAddress(c->hostname, c->port, elevel);
	if (name)
	{
		c->hostaddrs[0] = pstrdup(name);
	}
	else
	{
		c->hostaddrs[0] = NULL;
	}

	return;
}
示例#23
0
/*
 * ErrorIfUnsupportedForeignConstraint runs checks related to foreign constraints and
 * errors out if it is not possible to create one of the foreign constraint in distributed
 * environment.
 *
 * To support foreign constraints, we require that;
 * - If referencing and referenced tables are hash-distributed
 *		- Referencing and referenced tables are co-located.
 *      - Foreign constraint is defined over distribution column.
 *		- ON DELETE/UPDATE SET NULL, ON DELETE/UPDATE SET DEFAULT and ON UPDATE CASCADE options
 *        are not used.
 *      - Replication factors of referencing and referenced table are 1.
 * - If referenced table is a reference table
 *      - ON DELETE/UPDATE SET NULL, ON DELETE/UPDATE SET DEFAULT and ON UPDATE CASCADE options
 *        are not used on the distribution key of the referencing column.
 * - If referencing table is a reference table, error out
 */
void
ErrorIfUnsupportedForeignConstraint(Relation relation, char distributionMethod,
									Var *distributionColumn, uint32 colocationId)
{
	Relation pgConstraint = NULL;
	SysScanDesc scanDescriptor = NULL;
	ScanKeyData scanKey[1];
	int scanKeyCount = 1;
	HeapTuple heapTuple = NULL;

	Oid referencingTableId = relation->rd_id;
	Oid referencedTableId = InvalidOid;
	uint32 referencedTableColocationId = INVALID_COLOCATION_ID;
	Var *referencedTablePartitionColumn = NULL;

	Datum referencingColumnsDatum = 0;
	Datum *referencingColumnArray = NULL;
	int referencingColumnCount = 0;
	Datum referencedColumnsDatum = 0;
	Datum *referencedColumnArray = NULL;
	int referencedColumnCount = 0;
	bool isNull = false;
	int attrIdx = 0;
	bool foreignConstraintOnPartitionColumn = false;
	bool selfReferencingTable = false;
	bool referencedTableIsAReferenceTable = false;
	bool referencingColumnsIncludeDistKey = false;

	pgConstraint = heap_open(ConstraintRelationId, AccessShareLock);
	ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ,
				relation->rd_id);
	scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL,
										scanKeyCount, scanKey);

	heapTuple = systable_getnext(scanDescriptor);
	while (HeapTupleIsValid(heapTuple))
	{
		Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple);
		bool singleReplicatedTable = true;

		if (constraintForm->contype != CONSTRAINT_FOREIGN)
		{
			heapTuple = systable_getnext(scanDescriptor);
			continue;
		}

		/*
		 * We should make this check in this loop because the error message will only
		 * be given if the table has a foreign constraint and the table is a reference
		 * table.
		 */
		if (distributionMethod == DISTRIBUTE_BY_NONE)
		{
			ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
							errmsg("cannot create foreign key constraint because "
								   "reference tables are not supported as the "
								   "referencing table of a foreign constraint"),
							errdetail("Reference tables are only supported as the "
									  "referenced table of a foreign key when the "
									  "referencing table is a hash distributed "
									  "table")));
		}

		referencedTableId = constraintForm->confrelid;
		selfReferencingTable = referencingTableId == referencedTableId;

		/*
		 * Some checks are not meaningful if foreign key references the table itself.
		 * Therefore we will skip those checks.
		 */
		if (!selfReferencingTable)
		{
			if (!IsDistributedTable(referencedTableId))
			{
				ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
								errmsg("cannot create foreign key constraint"),
								errdetail("Referenced table must be a distributed "
										  "table.")));
			}

			/*
			 * PartitionMethod errors out when it is called for non-distributed
			 * tables. This is why we make this check under !selfReferencingTable
			 * and after !IsDistributedTable(referencedTableId).
			 */
			if (PartitionMethod(referencedTableId) == DISTRIBUTE_BY_NONE)
			{
				referencedTableIsAReferenceTable = true;
			}

			/*
			 * To enforce foreign constraints, tables must be co-located unless a
			 * reference table is referenced.
			 */
			referencedTableColocationId = TableColocationId(referencedTableId);
			if (colocationId == INVALID_COLOCATION_ID ||
				(colocationId != referencedTableColocationId &&
				 !referencedTableIsAReferenceTable))
			{
				ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
								errmsg("cannot create foreign key constraint since "
									   "relations are not colocated or not referencing "
									   "a reference table"),
								errdetail(
									"A distributed table can only have foreign keys "
									"if it is referencing another colocated hash "
									"distributed table or a reference table")));
			}

			referencedTablePartitionColumn = DistPartitionKey(referencedTableId);
		}
		else
		{
			/*
			 * If the referenced table is not a reference table, the distribution
			 * column in referencing table should be the distribution column in
			 * referenced table as well.
			 */
			referencedTablePartitionColumn = distributionColumn;
		}

		/*
		 * Column attributes are not available in Form_pg_constraint, therefore we need
		 * to find them in the system catalog. After finding them, we iterate over column
		 * attributes together because partition column must be at the same place in both
		 * referencing and referenced side of the foreign key constraint
		 */
		referencingColumnsDatum = SysCacheGetAttr(CONSTROID, heapTuple,
												  Anum_pg_constraint_conkey, &isNull);
		referencedColumnsDatum = SysCacheGetAttr(CONSTROID, heapTuple,
												 Anum_pg_constraint_confkey, &isNull);

		deconstruct_array(DatumGetArrayTypeP(referencingColumnsDatum), INT2OID, 2, true,
						  's', &referencingColumnArray, NULL, &referencingColumnCount);
		deconstruct_array(DatumGetArrayTypeP(referencedColumnsDatum), INT2OID, 2, true,
						  's', &referencedColumnArray, NULL, &referencedColumnCount);

		Assert(referencingColumnCount == referencedColumnCount);

		for (attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx)
		{
			AttrNumber referencingAttrNo = DatumGetInt16(referencingColumnArray[attrIdx]);
			AttrNumber referencedAttrNo = DatumGetInt16(referencedColumnArray[attrIdx]);

			if (distributionColumn->varattno == referencingAttrNo &&
				(!referencedTableIsAReferenceTable &&
				 referencedTablePartitionColumn->varattno == referencedAttrNo))
			{
				foreignConstraintOnPartitionColumn = true;
			}

			if (distributionColumn->varattno == referencingAttrNo)
			{
				referencingColumnsIncludeDistKey = true;
			}
		}


		/*
		 * If columns in the foreign key includes the distribution key from the
		 * referencing side, we do not allow update/delete operations through
		 * foreign key constraints (e.g. ... ON UPDATE SET NULL)
		 */

		if (referencingColumnsIncludeDistKey)
		{
			/*
			 * ON DELETE SET NULL and ON DELETE SET DEFAULT is not supported. Because we do
			 * not want to set partition column to NULL or default value.
			 */
			if (constraintForm->confdeltype == FKCONSTR_ACTION_SETNULL ||
				constraintForm->confdeltype == FKCONSTR_ACTION_SETDEFAULT)
			{
				ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
								errmsg("cannot create foreign key constraint"),
								errdetail("SET NULL or SET DEFAULT is not supported"
										  " in ON DELETE operation when distribution "
										  "key is included in the foreign key constraint")));
			}

			/*
			 * ON UPDATE SET NULL, ON UPDATE SET DEFAULT and UPDATE CASCADE is not supported.
			 * Because we do not want to set partition column to NULL or default value. Also
			 * cascading update operation would require re-partitioning. Updating partition
			 * column value is not allowed anyway even outside of foreign key concept.
			 */
			if (constraintForm->confupdtype == FKCONSTR_ACTION_SETNULL ||
				constraintForm->confupdtype == FKCONSTR_ACTION_SETDEFAULT ||
				constraintForm->confupdtype == FKCONSTR_ACTION_CASCADE)
			{
				ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
								errmsg("cannot create foreign key constraint"),
								errdetail("SET NULL, SET DEFAULT or CASCADE is not "
										  "supported in ON UPDATE operation  when "
										  "distribution key included in the foreign "
										  "constraint.")));
			}
		}

		/*
		 * if tables are hash-distributed and colocated, we need to make sure that
		 * the distribution key is included in foreign constraint.
		 */
		if (!referencedTableIsAReferenceTable && !foreignConstraintOnPartitionColumn)
		{
			ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
							errmsg("cannot create foreign key constraint"),
							errdetail("Foreign keys are supported in two cases, "
									  "either in between two colocated tables including "
									  "partition column in the same ordinal in the both "
									  "tables or from distributed to reference tables")));
		}

		/*
		 * We do not allow to create foreign constraints if shard replication factor is
		 * greater than 1. Because in our current design, multiple replicas may cause
		 * locking problems and inconsistent shard contents.
		 *
		 * Note that we allow referenced table to be a reference table (e.g., not a
		 * single replicated table). This is allowed since (a) we are sure that
		 * placements always be in the same state (b) executors are aware of reference
		 * tables and handle concurrency related issues accordingly.
		 */
		if (IsDistributedTable(referencingTableId))
		{
			/* check whether ALTER TABLE command is applied over single replicated table */
			if (!SingleReplicatedTable(referencingTableId))
			{
				singleReplicatedTable = false;
			}
		}
		else
		{
			Assert(distributionMethod == DISTRIBUTE_BY_HASH);

			/* check whether creating single replicated table with foreign constraint */
			if (ShardReplicationFactor > 1)
			{
				singleReplicatedTable = false;
			}
		}

		if (!singleReplicatedTable)
		{
			ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
							errmsg("cannot create foreign key constraint"),
							errdetail("Citus Community Edition currently supports "
									  "foreign key constraints only for "
									  "\"citus.shard_replication_factor = 1\"."),
							errhint("Please change \"citus.shard_replication_factor to "
									"1\". To learn more about using foreign keys with "
									"other replication factors, please contact us at "
									"https://citusdata.com/about/contact_us.")));
		}

		heapTuple = systable_getnext(scanDescriptor);
	}

	/* clean up scan and close system catalog */
	systable_endscan(scanDescriptor);
	heap_close(pgConstraint, AccessShareLock);
}
示例#24
0
文件: cdbutil.c 项目: AnLingm/gpdb
CdbComponentDatabaseInfo *
dbid_get_dbinfo(int16 dbid)
{
	HeapTuple tuple;
	Relation rel;
	cqContext	cqc;
	bool bOnly;
	CdbComponentDatabaseInfo *i = NULL;

	/*
	 * Can only run on a master node, this restriction is due to the reliance
	 * on the gp_segment_configuration table.  This may be able to be relaxed
	 * by switching to a different method of checking.
	 */
	if (GpIdentity.segindex != MASTER_CONTENT_ID)
		elog(ERROR, "dbid_get_dbinfo() executed on execution segment");

	rel = heap_open(GpSegmentConfigRelationId, AccessShareLock);

	tuple = caql_getfirst_only(
			caql_addrel(cqclr(&cqc), rel),
			&bOnly,
			cql("SELECT * FROM gp_segment_configuration "
				" WHERE dbid = :1 ",
				Int16GetDatum(dbid)));

	if (HeapTupleIsValid(tuple))
	{
		Datum attr;
		bool isNull;

		i = palloc(sizeof(CdbComponentDatabaseInfo));

		/*
		 * dbid
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_dbid,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->dbid = DatumGetInt16(attr);

		/*
		 * content
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_content,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->segindex = DatumGetInt16(attr);

		/*
		 * role
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_role,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->role = DatumGetChar(attr);

		/*
		 * preferred-role
		 */
		attr = heap_getattr(tuple,
							Anum_gp_segment_configuration_preferred_role,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->preferred_role = DatumGetChar(attr);

		/*
		 * mode
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_mode,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->mode = DatumGetChar(attr);

		/*
		 * status
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_status,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->status = DatumGetChar(attr);

		/*
		 * hostname
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_hostname,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->hostname = TextDatumGetCString(attr);

		/*
		 * address
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_address,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->address = TextDatumGetCString(attr);
		
		/*
		 * port
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_port,
							RelationGetDescr(rel), &isNull);
		Assert(!isNull);
		i->port = DatumGetInt32(attr);

		/*
		 * Filerep_port
		 */
		attr = heap_getattr(tuple, Anum_gp_segment_configuration_replication_port,
							RelationGetDescr(rel), &isNull);
		if (!isNull)
			i->filerep_port = DatumGetInt32(attr);
		else
			i->filerep_port = -1;

		Assert(bOnly); /* should be only 1 */
	}
	else
	{
		elog(ERROR, "could not find configuration entry for dbid %i", dbid);
	}

	heap_close(rel, NoLock);

	return i;
}
示例#25
0
文件: cdbutil.c 项目: AnLingm/gpdb
/*
 * getCdbComponentDatabases
 *
 *
 * Storage for the SegmentInstances block and all subsidiary
 * strucures are allocated from the caller's context.
 */
CdbComponentDatabases *
getCdbComponentInfo(bool DNSLookupAsError)
{
	CdbComponentDatabaseInfo *pOld = NULL;
	CdbComponentDatabases *component_databases = NULL;

	Relation gp_seg_config_rel;
	HeapTuple gp_seg_config_tuple = NULL;
	HeapScanDesc gp_seg_config_scan;

	/*
	 * Initial size for info arrays.
	 */
	int			segment_array_size = 500;
	int			entry_array_size = 4; /* we currently support a max of 2 */

	/*
	 * isNull and attr are used when getting the data for a specific column from a HeapTuple
	 */
	bool		isNull;
	Datum		attr;

	/*
	 * Local variables for fields from the rows of the tables that we are reading.
	 */
	int			dbid;
	int			content;

	char		role;
	char		preferred_role;
	char		mode = 0;
	char		status = 0;

	int			i;
	int			x = 0;

	/*
	 * Allocate component_databases return structure and
	 * component_databases->segment_db_info array with an initial size
	 * of 128, and component_databases->entry_db_info with an initial
	 * size of 4.  If necessary during row fetching, we grow these by
	 * doubling each time we run out.
	 */
	component_databases = palloc0(sizeof(CdbComponentDatabases));

	component_databases->segment_db_info =
		(CdbComponentDatabaseInfo *) palloc0(sizeof(CdbComponentDatabaseInfo) * segment_array_size);

	component_databases->entry_db_info =
		(CdbComponentDatabaseInfo *) palloc0(sizeof(CdbComponentDatabaseInfo) * entry_array_size);

	gp_seg_config_rel = heap_open(GpSegmentConfigRelationId, AccessShareLock);

	gp_seg_config_scan = heap_beginscan(gp_seg_config_rel, SnapshotNow, 0, NULL);

	while (HeapTupleIsValid(gp_seg_config_tuple = heap_getnext(gp_seg_config_scan, ForwardScanDirection)))
	{
		/*
		 * Grab the fields that we need from gp_configuration.  We do
		 * this first, because until we read them, we don't know
		 * whether this is an entry database row or a segment database
		 * row.
		 */
		CdbComponentDatabaseInfo *pRow;

		/*
		 * dbid
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_dbid, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		dbid = DatumGetInt16(attr);

		/*
		 * content
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_content, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		content = DatumGetInt16(attr);

		/*
		 * role
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_role, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		role = DatumGetChar(attr);

		/*
		 * preferred-role
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_preferred_role, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		preferred_role = DatumGetChar(attr);

		/*
		 * mode
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_mode, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		mode = DatumGetChar(attr);

		/*
		 * status
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_status, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		status = DatumGetChar(attr);

		/*
		 * Determine which array to place this rows data in: entry or
		 * segment, based on the content field.
		 */
		if (content >= 0)
		{
			/* if we have a dbid bigger than our array we'll have to grow the array. (MPP-2104) */
			if (dbid >= segment_array_size || component_databases->total_segment_dbs >= segment_array_size)
			{
				/*
				 * Expand CdbComponentDatabaseInfo array if we've used up currently allocated space
				 */
				segment_array_size = Max((segment_array_size * 2), dbid * 2);
				pOld = component_databases->segment_db_info;
				component_databases->segment_db_info = (CdbComponentDatabaseInfo *)
					repalloc(pOld, sizeof(CdbComponentDatabaseInfo) * segment_array_size);
			}

			pRow = &component_databases->segment_db_info[component_databases->total_segment_dbs];
			component_databases->total_segment_dbs++;
		}
		else
		{
			if (component_databases->total_entry_dbs >= entry_array_size)
			{
				/*
				 * Expand CdbComponentDatabaseInfo array if we've used up currently allocated space
				 */
				entry_array_size *= 2;
				pOld = component_databases->entry_db_info;
				component_databases->entry_db_info = (CdbComponentDatabaseInfo *)
					repalloc(pOld, sizeof(CdbComponentDatabaseInfo) * entry_array_size);
			}

			pRow = &component_databases->entry_db_info[component_databases->total_entry_dbs];
			component_databases->total_entry_dbs++;
		}

		pRow->dbid = dbid;
		pRow->segindex = content;
		pRow->role = role;
		pRow->preferred_role = preferred_role;
		pRow->mode = mode;
		pRow->status = status;

		/*
		 * hostname
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_hostname, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		pRow->hostname = TextDatumGetCString(attr);

		/*
		 * address
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_address, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		pRow->address = TextDatumGetCString(attr);
		
		/*
		 * port
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_port, RelationGetDescr(gp_seg_config_rel), &isNull);
		Assert(!isNull);
		pRow->port = DatumGetInt32(attr);

		/*
		 * Filerep_port
		 */
		attr = heap_getattr(gp_seg_config_tuple, Anum_gp_segment_configuration_replication_port, RelationGetDescr(gp_seg_config_rel), &isNull);
		if (!isNull)
			pRow->filerep_port = DatumGetInt32(attr);
		else
			pRow->filerep_port = -1;

		getAddressesForDBid(pRow, DNSLookupAsError ? ERROR : LOG);
		pRow->hostip = pRow->hostaddrs[0];
	}

	/*
	 * We're done with the catalog entries, cleanup them up, closing
	 * all the relations we opened.
	 */
	heap_endscan(gp_seg_config_scan);
	heap_close(gp_seg_config_rel, AccessShareLock);

	/*
	 * Validate that there exists at least one entry and one segment
	 * database in the configuration
	 */
	if (component_databases->total_segment_dbs == 0)
	{
		ereport(ERROR,
				(errcode(ERRCODE_CARDINALITY_VIOLATION),
				 errmsg("Greenplum Database number of segment databases cannot be 0")));
	}
	if (component_databases->total_entry_dbs == 0)
	{
		ereport(ERROR,
				(errcode(ERRCODE_CARDINALITY_VIOLATION),
				 errmsg("Greenplum Database number of entry databases cannot be 0")));
	}

	/*
	 * Now sort the data by segindex, isprimary desc
	 */
	qsort(component_databases->segment_db_info,
		  component_databases->total_segment_dbs, sizeof(CdbComponentDatabaseInfo),
		  CdbComponentDatabaseInfoCompare);

	qsort(component_databases->entry_db_info,
		  component_databases->total_entry_dbs, sizeof(CdbComponentDatabaseInfo),
		  CdbComponentDatabaseInfoCompare);

	/*
	 * Now count the number of distinct segindexes.
	 * Since it's sorted, this is easy.
	 */
	for (i = 0; i < component_databases->total_segment_dbs; i++)
	{
		if (i == 0 ||
			(component_databases->segment_db_info[i].segindex != component_databases->segment_db_info[i - 1].segindex))
		{
			component_databases->total_segments++;
		}
	}

	/*
	 *	Validate that gp_numsegments == segment_databases->total_segment_dbs
	 */
	if (getgpsegmentCount() != component_databases->total_segments)
	{
		ereport(ERROR,
				(errcode(ERRCODE_DATA_EXCEPTION),
				 errmsg("Greenplum Database number of segments inconsistency: count is %d from pg_catalog.%s table, but %d from getCdbComponentDatabases()",
						getgpsegmentCount(),  GpIdRelationName, component_databases->total_segments)));
	}

	/*
	 * Now validate that our identity is present in the entry databases
	 */
	for (i = 0; i < component_databases->total_entry_dbs; i++)
	{
		CdbComponentDatabaseInfo *pInfo = &component_databases->entry_db_info[i];

		if (pInfo->dbid == GpIdentity.dbid && pInfo->segindex == Gp_segment)
		{
			break;
		}
	}
	if (i == component_databases->total_entry_dbs)
	{
		ereport(ERROR,
				(errcode(ERRCODE_DATA_EXCEPTION),
				 errmsg("Cannot locate entry database represented by this db in gp_segment_configuration: dbid %d content %d",
						GpIdentity.dbid, Gp_segment)));
	}

	/*
	 * Now validate that the segindexes for the segment databases are
	 * between 0 and (GpIdentity.numsegments - 1) inclusive, and that we
	 * hit them all. Since it's sorted, this is relatively easy.
	 */
	x = 0;
	for (i = 0; i < getgpsegmentCount(); i++)
	{
		int			this_segindex = -1;

		while (x < component_databases->total_segment_dbs)
		{
			this_segindex = component_databases->segment_db_info[x].segindex;
			if (this_segindex < i)
				x++;
			else if (this_segindex == i)
				break;
			else if (this_segindex > i)
			{
				ereport(ERROR,
						(errcode(ERRCODE_DATA_EXCEPTION),
						 errmsg("Content values not valid in %s table.  They must be in the range 0 to %d inclusive",
								GpSegmentConfigRelationName, getgpsegmentCount() - 1)));
			}
		}
		if (this_segindex != i)
		{
			ereport(ERROR,
					(errcode(ERRCODE_DATA_EXCEPTION),
					 errmsg("Content values not valid in %s table.  They must be in the range 0 to %d inclusive",
							GpSegmentConfigRelationName, getgpsegmentCount() - 1)));
		}
	}

	return component_databases;
}
示例#26
0
/**
 * Returns a histogram from an array of numbers.
 * by Paul A. Jungwirth
 */
Datum
array_to_hist(PG_FUNCTION_ARGS)
{
  // Our arguments:
  ArrayType *vals;
  pgnum bucketsStart;
  pgnum bucketsSize;
  int32 bucketsCount;

  // The array element type:
  Oid valsType;

  // The array element type widths for our input and output arrays:
  int16 valsTypeWidth;
  int16 histTypeWidth;

  // The array element type "is passed by value" flags (not really used):
  bool valsTypeByValue;
  bool histTypeByValue;

  // The array element type alignment codes (not really used):
  char valsTypeAlignmentCode;
  char histTypeAlignmentCode;

  // The array contents, as PostgreSQL "Datum" objects:
  Datum *valsContent;
  Datum *histContent;

  // List of "is null" flags for the array contents (not used):
  bool *valsNullFlags;

  // The size of the input array:
  int valsLength;

  // The output array:
  ArrayType* histArray;

  pgnum histMax;
  pgnum v;
  int i;

  if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2) || PG_ARGISNULL(3)) {
    ereport(ERROR, (errmsg("Null arguments not accepted")));
  }

  vals = PG_GETARG_ARRAYTYPE_P(0);

  if (ARR_NDIM(vals) > 1) {
    ereport(ERROR, (errmsg("One-dimesional arrays are required")));
  }

  if (array_contains_nulls(vals)) {
    ereport(ERROR, (errmsg("Array contains null elements")));
  }

  // Determine the array element types.
  valsType = ARR_ELEMTYPE(vals);

  if (valsType != INT2OID &&
      valsType != INT4OID &&
      valsType != INT8OID &&
      valsType != FLOAT4OID &&
      valsType != FLOAT8OID) {
    ereport(ERROR, (errmsg("Histogram subject must be SMALLINT, INTEGER, BIGINT, REAL, or DOUBLE PRECISION values")));
  }

  valsLength = (ARR_DIMS(vals))[0];

  switch (valsType) {
    case INT2OID:
      bucketsStart.i16 = PG_GETARG_INT16(1);
      bucketsSize.i16 = PG_GETARG_INT16(2);
      break;
    case INT4OID:
      bucketsStart.i32 = PG_GETARG_INT32(1);
      bucketsSize.i32 = PG_GETARG_INT32(2);
      break;
    case INT8OID:
      bucketsStart.i64 = PG_GETARG_INT64(1);
      bucketsSize.i64 = PG_GETARG_INT64(2);
      break;
    case FLOAT4OID:
      bucketsStart.f4 = PG_GETARG_FLOAT4(1);
      bucketsSize.f4 = PG_GETARG_FLOAT4(2);
      break;
    case FLOAT8OID:
      bucketsStart.f8 = PG_GETARG_FLOAT8(1);
      bucketsSize.f8 = PG_GETARG_FLOAT8(2);
      break;
    default:
      break;
  }
  bucketsCount = PG_GETARG_INT32(3);

  get_typlenbyvalalign(valsType, &valsTypeWidth, &valsTypeByValue, &valsTypeAlignmentCode);

  // Extract the array contents (as Datum objects).
  deconstruct_array(vals, valsType, valsTypeWidth, valsTypeByValue, valsTypeAlignmentCode,
&valsContent, &valsNullFlags, &valsLength);

  // Create a new array of histogram bins (as Datum objects).
  // Memory we palloc is freed automatically at the end of the transaction.
  histContent = palloc0(sizeof(Datum) * bucketsCount);

  // Generate the histogram
  switch (valsType) {
    case INT2OID:
      histMax.i16 = bucketsStart.i16 + (bucketsSize.i16 * bucketsCount);
      for (i = 0; i < valsLength; i++) {
        v.i16 = DatumGetInt16(valsContent[i]);
        if (v.i16 >= bucketsStart.i16 && v.i16 <= histMax.i16) {
          int b = (v.i16 - bucketsStart.i16) / bucketsSize.i16;
          if (b >= 0 && b < bucketsCount) {
            histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1);
          }
        }
      }
      break;
    case INT4OID:
      histMax.i32 = bucketsStart.i32 + (bucketsSize.i32 * bucketsCount);
      for (i = 0; i < valsLength; i++) {
        v.i32 = DatumGetInt32(valsContent[i]);
        if (v.i32 >= bucketsStart.i32 && v.i32 <= histMax.i32) {
          int b = (v.i32 - bucketsStart.i32) / bucketsSize.i32;
          if (b >= 0 && b < bucketsCount) {
            histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1);
          }
        }
      }
      break;
    case INT8OID:
      histMax.i64 = bucketsStart.i64 + (bucketsSize.i64 * bucketsCount);
      for (i = 0; i < valsLength; i++) {
        v.i64 = DatumGetInt64(valsContent[i]);
        if (v.i64 >= bucketsStart.i64 && v.i64 <= histMax.i64) {
          int b = (v.i64 - bucketsStart.i64) / bucketsSize.i64;
          if (b >= 0 && b < bucketsCount) {
            histContent[b] = Int64GetDatum(DatumGetInt64(histContent[b]) + 1);
          }
        }
      }
      break;
    case FLOAT4OID:
      histMax.f4 = bucketsStart.f4 + (bucketsSize.f4 * bucketsCount);
      for (i = 0; i < valsLength; i++) {
        v.f4 = DatumGetFloat4(valsContent[i]);
        if (v.f4 >= bucketsStart.f4 && v.f4 <= histMax.f4) {
          int b = (v.f4 - bucketsStart.f4) / bucketsSize.f4;
          if (b >= 0 && b < bucketsCount) {
            histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1);
          }
        }
      }
      break;
    case FLOAT8OID:
      histMax.f8 = bucketsStart.f8 + (bucketsSize.f8 * bucketsCount);
      for (i = 0; i < valsLength; i++) {
        v.f8 = DatumGetFloat8(valsContent[i]);
        if (v.f8 >= bucketsStart.f8 && v.f8 <= histMax.f8) {
          int b = (v.f8 - bucketsStart.f8) / bucketsSize.f8;
          if (b >= 0 && b < bucketsCount) {
            histContent[b] = Int32GetDatum(DatumGetInt32(histContent[b]) + 1);
          }
        }
      }
      break;
    default:
      break;
  }

  // Wrap the buckets in a new PostgreSQL array object.
  get_typlenbyvalalign(INT4OID, &histTypeWidth, &histTypeByValue, &histTypeAlignmentCode);
  histArray = construct_array(histContent, bucketsCount, INT4OID, histTypeWidth, histTypeByValue, histTypeAlignmentCode);

  // Return the final PostgreSQL array object.
  PG_RETURN_ARRAYTYPE_P(histArray);
}
示例#27
0
int create_spatial_index(sqlite3 *db,char  *dataset_name, char *idx_tbl,char * idx_geom, char *idx_id, char *sql_string, int create)
{
	char sql_txt_pg[SQLSTRLEN];
	char sql_txt_sqlite[SQLSTRLEN];
	int rc;
	char *err_msg;
	SPIPlanPtr plan;
	sqlite3_stmt *prepared_statement;
	Portal cur;
	char *pg_type;
	int val_int, proc,j;
	float8 val_float;
	
	int64 val_int64;
	bool null_check;
		TupleDesc tupdesc;
	SPITupleTable *tuptable;
	HeapTuple tuple;
	int tot_rows = 0;
	
	if(create)
	{
		snprintf(sql_txt_pg,sizeof(sql_txt_pg), " %s%s%s",
		"CREATE VIRTUAL TABLE ",
		dataset_name,
		"_idx_geom USING rtree(id,minX, maxX,minY, maxY)");
		
			rc = sqlite3_exec(db, sql_txt_pg, NULL, 0, &err_msg);
		    
		    if (rc != SQLITE_OK ) {	
			    sqlite3_free(err_msg);
			sqlite3_close(db);	
			    ereport(ERROR,  ( errmsg("Problem creating table: %s", err_msg)));
			//fprintf(stderr, "SQL error: %s\n", err_msg);		
		    } 	
		   elog(INFO, "create table string: %s", sql_txt_pg); 
    }
	snprintf(sql_txt_pg,sizeof(sql_txt_pg), " %s%s%s%s%s%s%s%s%s",
	"with o as (",
	    sql_string,
	    "), g as( select ",
	idx_id,
	" id,", 
	idx_geom,
	" geom from ",
	    idx_tbl,
	    ") select g.id, st_xmin(g.geom) minx,st_xmax(g.geom) maxx,st_ymin(g.geom) miny,st_ymax(g.geom) maxy from g inner join o on g.id=o.id");

	   elog(INFO, "select table string: %s", sql_txt_pg); 
	plan =  SPI_prepare(sql_txt_pg,0,NULL);
	//ret = SPI_exec(sql_string, 0);
	cur = SPI_cursor_open("index_cursor", plan,NULL,NULL,true);

	snprintf(sql_txt_sqlite,sizeof(sql_txt_sqlite), " %s%s%s",
	"insert into ",
	dataset_name,
	"_idx_geom  (id,minX, maxX,minY, maxY) values(?,?,?,?,?)"); 
  
  
	   elog(INFO, "insert table string: %s", sql_txt_sqlite); 
	    
	     sqlite3_prepare_v2(db,sql_txt_sqlite,strlen(sql_txt_sqlite), &prepared_statement,NULL);
	     
	     
	do
	{
		
	sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, &err_msg);
		
		SPI_cursor_fetch(cur, true,100000);
		
		proc = SPI_processed;
		tot_rows += proc;
//	    if (ret > 0 && SPI_tuptable != NULL)
//	    {
		    
		 tupdesc = SPI_tuptable->tupdesc;
		tuptable = SPI_tuptable;
		

		for (j = 0; j < proc; j++)
		{
			
		    tuple = tuptable->vals[j];

				pg_type = SPI_gettype(tupdesc, 1);
				

				if(strcmp(pg_type, "int2")==0)
				{			
					val_int = (int) DatumGetInt16(SPI_getbinval(tuple,tupdesc,1, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 1);
					else
					sqlite3_bind_int(prepared_statement, 1,val_int);
				}
				else if(strcmp(pg_type, "int4")==0)
				{			
					val_int = (int) DatumGetInt32(SPI_getbinval(tuple,tupdesc,1, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 1);
					else
					sqlite3_bind_int(prepared_statement, 1,val_int);
					
				}	
				else if(strcmp(pg_type, "int8")==0)
				{			
					val_int64 = (int64) DatumGetInt64(SPI_getbinval(tuple,tupdesc,1, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 1);
					else
					sqlite3_bind_int64(prepared_statement,1,val_int64);
				}	



	
					val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,2, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 2);
					else
					sqlite3_bind_double(prepared_statement,2,val_float);
				
					val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,3, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 3);
					else
					sqlite3_bind_double(prepared_statement,3,val_float);
				
					val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,4, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 4);
					else
					sqlite3_bind_double(prepared_statement,4,val_float);
				
					val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,5, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, 5);
					else
					sqlite3_bind_double(prepared_statement,5,val_float);
				
					
					
					
		
	    	sqlite3_step(prepared_statement);	
		sqlite3_clear_bindings(prepared_statement);
				sqlite3_reset(prepared_statement);
		}
		
	sqlite3_exec(db, "END TRANSACTION", NULL, NULL, &err_msg);
	elog(INFO, "inserted %d rows in index",tot_rows);
	}
	while (proc > 0);
	   
	    
	    
	    
	return 0;
}
GISTENTRY *
gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo)
{
	if (entry->leafkey)
	{
		union
		{
			int16		i2;
			int32		i4;
			int64		i8;
			float4		f4;
			float8		f8;
			DateADT		dt;
			TimeADT		tm;
			Timestamp	ts;
			Cash		ch;
		}			v;

		GBT_NUMKEY *r = (GBT_NUMKEY *) palloc0(2 * tinfo->size);
		void	   *leaf = NULL;

		switch (tinfo->t)
		{
			case gbt_t_int2:
				v.i2 = DatumGetInt16(entry->key);
				leaf = &v.i2;
				break;
			case gbt_t_int4:
				v.i4 = DatumGetInt32(entry->key);
				leaf = &v.i4;
				break;
			case gbt_t_int8:
				v.i8 = DatumGetInt64(entry->key);
				leaf = &v.i8;
				break;
			case gbt_t_oid:
				v.i4 = DatumGetObjectId(entry->key);
				leaf = &v.i4;
				break;
			case gbt_t_float4:
				v.f4 = DatumGetFloat4(entry->key);
				leaf = &v.f4;
				break;
			case gbt_t_float8:
				v.f8 = DatumGetFloat8(entry->key);
				leaf = &v.f8;
				break;
			case gbt_t_date:
				v.dt = DatumGetDateADT(entry->key);
				leaf = &v.dt;
				break;
			case gbt_t_time:
				v.tm = DatumGetTimeADT(entry->key);
				leaf = &v.tm;
				break;
			case gbt_t_ts:
				v.ts = DatumGetTimestamp(entry->key);
				leaf = &v.ts;
				break;
			case gbt_t_cash:
				v.ch = DatumGetCash(entry->key);
				leaf = &v.ch;
				break;
			default:
				leaf = DatumGetPointer(entry->key);
		}

		memcpy((void *) &r[0], leaf, tinfo->size);
		memcpy((void *) &r[tinfo->size], leaf, tinfo->size);
		retval = palloc(sizeof(GISTENTRY));
		gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page,
					  entry->offset, FALSE);
	}
	else
		retval = entry;

	return retval;
}
示例#29
0
static PyObject *
PLyInt_FromInt16(PLyDatumToOb *arg, Datum d)
{
	return PyInt_FromLong(DatumGetInt16(d));
}
示例#30
0
int write2sqlite(char *sqlitedb_name,char *dataset_name, char *sql_string, char *twkb_name,char *id_name,char *idx_geom,char *idx_tbl, char *idx_id, int create)
{
	char *err_msg;
	int spi_conn;
	int  proc, rc;
	/*Sqlite*/
	sqlite3 *db;
	TupleDesc tupdesc;
	SPITupleTable *tuptable;
	HeapTuple tuple;
	int i, j;
	SPIPlanPtr plan;
	char insert_str[SQLSTRLEN];
	Portal cur;
	void *val_p;
	int val_int;
	int64 val_int64;
	float8 val_float;
	bool null_check;
	char *pg_type;	
	int tot_rows = 0;
	
	sqlite3_stmt *prepared_statement;
	
	spi_conn = SPI_connect();
	if (spi_conn!=SPI_OK_CONNECT)
		ereport(ERROR,  ( errmsg("Failed to open SPI Connection")));
	
	/*Open the sqlite db to write to*/
	rc = sqlite3_open(sqlitedb_name, &db);
		
		
	if (rc != SQLITE_OK) {	
		sqlite3_close(db);			
		ereport(ERROR,  ( errmsg("Cannot open SQLite database")));
	}
	
	plan =  SPI_prepare(sql_string,0,NULL);
	//ret = SPI_exec(sql_string, 0);
	cur = SPI_cursor_open("our_cursor", plan,NULL,NULL,true);

	
	elog(INFO, "build sql-strings and create table if : %d",create);
		create_sqlite_table(&cur,db, insert_str,dataset_name,twkb_name, id_name,create);
	
	   elog(INFO, "back from creating table"); 
	elog(INFO, "inserted sql = %s",insert_str);
//TODO add error handling	
 sqlite3_prepare_v2(db,insert_str,strlen(insert_str), &prepared_statement,NULL);

	do
	{
		
	sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, &err_msg);
		
		SPI_cursor_fetch(cur, true,10000);
		
		proc = SPI_processed;
		
		tot_rows += proc;
//	    if (ret > 0 && SPI_tuptable != NULL)
//	    {
		    
		 tupdesc = SPI_tuptable->tupdesc;
		tuptable = SPI_tuptable;
		

		for (j = 0; j < proc; j++)
		{
			
		    tuple = tuptable->vals[j];


		    for (i = 1; i <= tupdesc->natts; i++)
			{
					pg_type = SPI_gettype(tupdesc, i);						
				if(strcmp(pg_type, "bool")==0)
				{			
					val_int = (bool) (DatumGetBool(SPI_getbinval(tuple,tupdesc,i, &null_check)) ? 1:0);
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_int(prepared_statement, i,(int) val_int);
				}
				if(strcmp(pg_type, "int2")==0)
				{			
					val_int = (int) DatumGetInt16(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_int(prepared_statement, i,val_int);
				}
				else if(strcmp(pg_type, "int4")==0)
				{			
					val_int = (int) DatumGetInt32(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_int(prepared_statement, i,val_int);
					
				}	
				else if(strcmp(pg_type, "int8")==0)
				{			
					val_int64 = (int64) DatumGetInt64(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_int64(prepared_statement, i,val_int64);
				}			
				else if(strcmp(pg_type, "float4")==0)
				{			
					val_float = (float8) DatumGetFloat4(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_double(prepared_statement, i,val_float);
					
				}
				else if(strcmp(pg_type, "float8")==0)
				{			
					val_float = (float8) DatumGetFloat8(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_double(prepared_statement, i,val_float);
				}	
				else if(strcmp(pg_type, "bytea")==0)
				{			
					val_p = (void*) PG_DETOAST_DATUM(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					if(null_check)
						sqlite3_bind_null(prepared_statement, i);
					else
					sqlite3_bind_blob(prepared_statement, i, (const void*) VARDATA_ANY(val_p), VARSIZE_ANY(val_p)-VARHDRSZ, SQLITE_TRANSIENT);
				}		
				else
				{			
				//	val = (void*) PG_DETOAST_DATUM(SPI_getbinval(tuple,tupdesc,i, &null_check));
					//TODO add error handling
					sqlite3_bind_text(prepared_statement,i,SPI_getvalue(tuple, tupdesc, i),-1,NULL);
				}		

		}
	    			sqlite3_step(prepared_statement);	
		sqlite3_clear_bindings(prepared_statement);
				sqlite3_reset(prepared_statement);
		}
		
	sqlite3_exec(db, "END TRANSACTION", NULL, NULL, &err_msg);
		
	elog(INFO, "inserted %d rows in table",tot_rows);
	}
	while (proc > 0);
	
	if(dataset_name && idx_geom && idx_id)
		create_spatial_index(db,dataset_name,idx_tbl, idx_geom, idx_id, sql_string,create);
	else
		elog(INFO, "Finnishing without spatial index");
	
	SPI_finish();
	    sqlite3_close(db);
	    
return 0;
}