ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, std::shared_ptr< Query> query) { assert(schemas.size() == 1); for (Parameters::const_iterator it = _parameters.begin(); it != _parameters.end(); ++it) assert(((std::shared_ptr<OperatorParamReference>&)*it)->getParamType() == PARAM_ATTRIBUTE_REF); Attributes newAttributes; const Attributes &oldAttributes = schemas[0].getAttributes(); bool includesIndicator = false; size_t n = _parameters.size(); for (size_t i = 0; i < n; i++) { const AttributeDesc &attr = oldAttributes[((std::shared_ptr<OperatorParamReference>&)_parameters[i])->getObjectNo()]; newAttributes.push_back(AttributeDesc(i, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr())); includesIndicator |= attr.isEmptyIndicator(); } if (!includesIndicator) { AttributeDesc const* indicator = schemas[0].getEmptyBitmapAttribute(); if (indicator != NULL) { newAttributes.push_back(AttributeDesc(n, indicator->getName(), indicator->getType(), indicator->getFlags(), indicator->getDefaultCompressionMethod(), indicator->getAliases())); } } return ArrayDesc(schemas[0].getName(), newAttributes, schemas[0].getDimensions(), defaultPartitioning()); }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 2); ArrayDesc const& patternDesc = schemas[0]; ArrayDesc const& catalogDesc = schemas[1]; Attributes const& catalogAttributes = catalogDesc.getAttributes(true); Dimensions const& catalogDimensions = catalogDesc.getDimensions(); Attributes const& patternAttributes = patternDesc.getAttributes(true); Dimensions resultDimensions = patternDesc.getDimensions(); size_t totalAttributes = catalogAttributes.size() + patternAttributes.size() + 1 + catalogDimensions.size(); Attributes matchAttributes(totalAttributes); if (catalogDimensions.size() != resultDimensions.size()) { stringstream left, right; printDimNames(left, resultDimensions); printDimNames(right, catalogDimensions); throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DIMENSION_COUNT_MISMATCH) << "match" << left.str() << right.str(); } for (size_t i = 0, n = catalogDimensions.size(); i < n; i++) { if (!(catalogDimensions[i].getStartMin() == resultDimensions[i].getStartMin() && catalogDimensions[i].getChunkInterval() == resultDimensions[i].getChunkInterval() && catalogDimensions[i].getChunkOverlap() == resultDimensions[i].getChunkOverlap())) { // XXX To do: implement requiresRepart() method, remove interval/overlap checks // above, use SCIDB_LE_START_INDEX_MISMATCH here. throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); } } size_t j = 0; for (size_t i = 0, n = patternAttributes.size(); i < n; i++, j++) { AttributeDesc const& attr = patternAttributes[i]; matchAttributes[j] = AttributeDesc(j, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr()); } for (size_t i = 0, n = catalogAttributes.size(); i < n; i++, j++) { AttributeDesc const& attr = catalogAttributes[i]; matchAttributes[j] = AttributeDesc(j, "match_" + attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr()); } for (size_t i = 0, n = catalogDimensions.size(); i < n; i++, j++) { matchAttributes[j] = AttributeDesc(j, "match_" + catalogDimensions[i].getBaseName(), TID_INT64, 0, 0); } matchAttributes[j] = AttributeDesc(j, DEFAULT_EMPTY_TAG_ATTRIBUTE_NAME, TID_INDICATOR, AttributeDesc::IS_EMPTY_INDICATOR, 0); int64_t maxCollisions = evaluate(((boost::shared_ptr<OperatorParamLogicalExpression>&)_parameters[1])->getExpression(), query, TID_INT64).getInt64(); if (maxCollisions <= 0 || (int32_t)maxCollisions != maxCollisions) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_WRONG_OPERATOR_ARGUMENT2) << "positive"; } resultDimensions.push_back(DimensionDesc("collision", 0, 0, maxCollisions-1, maxCollisions-1, (uint32_t)maxCollisions, 0)); return ArrayDesc("match", matchAttributes, resultDimensions); }
void Blittable::init() { auto as = DisplayDevice::createAttributeSet(); attribs_.reset(new Attribute<vertex_texcoord>(AccessFreqHint::DYNAMIC, AccessTypeHint::DRAW)); attribs_->addAttributeDesc(AttributeDesc(AttrType::POSITION, 2, AttrFormat::FLOAT, false, sizeof(vertex_texcoord), offsetof(vertex_texcoord, vtx))); attribs_->addAttributeDesc(AttributeDesc(AttrType::TEXTURE, 2, AttrFormat::FLOAT, false, sizeof(vertex_texcoord), offsetof(vertex_texcoord, tc))); as->addAttribute(AttributeBasePtr(attribs_)); as->setDrawMode(DrawMode::TRIANGLE_STRIP); addAttributeSet(as); }
RowCollection<Group,Hash>::RowCollection(boost::shared_ptr<Query> const& query, const string& name, const Attributes& attributes, size_t chunkSize) : _query(query), _attributes(attributes), _chunkSize(chunkSize), _sizeBuffered(0), _mode(RowCollectionModeAppend) { assert(!attributes.empty()); assert(chunkSize >= 2); // Use (CONFIG_MEM_ARRAY_THRESHOLD / 10) as the #bytes the unflushed items may have. _maxSizeBuffered = Config::getInstance()->getOption<size_t>(CONFIG_MEM_ARRAY_THRESHOLD) * MiB / 10; // Push the empty tag Attributes attributesWithET(attributes); attributesWithET.push_back(AttributeDesc(attributes.size(), DEFAULT_EMPTY_TAG_ATTRIBUTE_NAME, TID_BOOL, AttributeDesc::IS_EMPTY_INDICATOR, 0)); // get the schema Dimensions dims(2); dims[0] = DimensionDesc("Row", 0, MAX_COORDINATE, 1, 0); dims[1] = DimensionDesc("Column", 0, MAX_COORDINATE, _chunkSize, 0); ArrayDesc schema(name, attributesWithET, dims); // create a MemArray _theArray = make_shared<MemArray>(schema,query); // get the array iterators _arrayIterators.reserve(attributes.size()); for (size_t t=0; t<attributes.size(); ++t) { _arrayIterators.push_back(_theArray->getIterator(t)); } }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { Attributes outputAttrs; outputAttrs.push_back(AttributeDesc(0, "dummy", TID_DOUBLE, AttributeDesc::IS_NULLABLE, 0)); Dimensions outputDims; outputDims.push_back(DimensionDesc("i",0,0,1,0)); return ArrayDesc("test_cache", outputAttrs, outputDims); }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { SplitSettings settings (_parameters, true, query); //construct and check to ensure settings are legit vector<AttributeDesc> attributes(1); attributes[0] = AttributeDesc((AttributeID)0, "value", TID_STRING, 0, 0); vector<DimensionDesc> dimensions(2); dimensions[0] = DimensionDesc("source_instance_id", 0, 0, MAX_COORDINATE, MAX_COORDINATE, 1, 0); dimensions[1] = DimensionDesc("chunk_no", 0, 0, MAX_COORDINATE, MAX_COORDINATE, 1, 0); return ArrayDesc("split", attributes, dimensions); }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, shared_ptr< Query> query) { ArrayDesc const& inputSchema = schemas[0]; //FastCountSettings settings (_parameters, true, query); vector<DimensionDesc> dimensions(1); size_t const nInstances = query->getInstancesCount(); dimensions[0] = DimensionDesc("i", 0, 0, CoordinateBounds::getMax(), CoordinateBounds::getMax(), 1, 0); // dimensions[0] = DimensionDesc("i", 0, 0, nInstances-1, nInstances-1, 1, 0); vector<AttributeDesc> attributes; attributes.push_back(AttributeDesc((AttributeID)0, "count", TID_UINT64, AttributeDesc::IS_NULLABLE, 0)); return ArrayDesc("fast_count", attributes, dimensions, defaultPartitioning(), inputSchema.getResidency(),false); }
ArrayDesc inferSchema(vector<ArrayDesc> inputSchemas, shared_ptr<Query> query) { Attributes atts(1); atts[0] = AttributeDesc((AttributeID)0, "success", TID_BOOL, 0, CompressorType::NONE ); Dimensions dims(1); dims[0] = DimensionDesc("i", 0, 0, 0, 0, 1, 0); //#ifdef CPP11 return ArrayDesc("", atts, dims, defaultPartitioning(), query->getDefaultArrayResidency()); //#else //return ArrayDesc("", atts, dims); //#endif }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 1); ArrayDesc const& desc = schemas[0]; Dimensions const& dims = desc.getDimensions(); Attributes const& attrs = desc.getAttributes(); AttributeID aid = 0; if (_parameters.size() >= 1) { aid = ((boost::shared_ptr<OperatorParamReference>&)_parameters[0])->getObjectNo(); } AggregatePtr maxAggregate = AggregateLibrary::getInstance()->createAggregate("max", TypeLibrary::getType(attrs[aid].getType())); Attributes aggAttrs(1); aggAttrs[0] = AttributeDesc((AttributeID)0, attrs[aid].getName() + "_max", maxAggregate->getResultType().typeId(), AttributeDesc::IS_NULLABLE, 0); if (_parameters.size() <= 1) { Dimensions aggDims(1); aggDims[0] = DimensionDesc("i", 0, 0, 0, 0, 1, 0); return ArrayDesc(desc.getName(), aggAttrs, aggDims); } else { vector<int> groupBy(_parameters.size()-1); for (size_t i = 0; i < groupBy.size(); i++) { groupBy[i] = ((boost::shared_ptr<OperatorParamReference>&)_parameters[i + 1])->getObjectNo(); } Dimensions aggDims(groupBy.size()); for (size_t i = 0, n = aggDims.size(); i < n; i++) { DimensionDesc const& srcDim = dims[groupBy[i]]; aggDims[i] = DimensionDesc( srcDim.getBaseName(), srcDim.getStartMin(), srcDim.getCurrStart(), srcDim.getCurrEnd(), srcDim.getEndMax(), i == 0 && groupBy[i] == 0 ? srcDim.getChunkInterval() : srcDim.getCurrLength(), 0, srcDim.getType(), srcDim.getFlags(), srcDim.getMappingArrayName(), srcDim.getComment(), srcDim.getFuncMapOffset(), srcDim.getFuncMapScale()); } return ArrayDesc(desc.getName(), aggAttrs, aggDims); } }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 1); const ArrayDesc& desc = schemas[0]; const Attributes &oldAttributes = desc.getAttributes(); Attributes newAttributes = desc.getAttributes(); for (size_t paramNo = 0, paramCount = _parameters.size(); paramNo < paramCount; paramNo+=2) { int32_t attNo = ((boost::shared_ptr<OperatorParamReference>&)_parameters[paramNo])->getObjectNo(); AttributeDesc attr = oldAttributes[attNo]; newAttributes[attNo] = AttributeDesc(attNo, ((boost::shared_ptr<OperatorParamReference>&)_parameters[paramNo + 1])->getObjectName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr()); } return ArrayDesc(desc.getId(), desc.getUAId(), desc.getVersionId(), desc.getName(), newAttributes, desc.getDimensions()); }
inline ArrayDesc createWindowDesc(ArrayDesc const& desc) { Dimensions const& dims = desc.getDimensions(); Dimensions aggDims(dims.size()); for (size_t i = 0, n = dims.size(); i < n; i++) { DimensionDesc const& srcDim = dims[i]; aggDims[i] = DimensionDesc(srcDim.getBaseName(), srcDim.getNamesAndAliases(), srcDim.getStartMin(), srcDim.getCurrStart(), srcDim.getCurrEnd(), srcDim.getEndMax(), srcDim.getChunkInterval(), 0, srcDim.getType(), srcDim.getFlags(), srcDim.getMappingArrayName(), srcDim.getComment(), srcDim.getFuncMapOffset(), srcDim.getFuncMapScale()); } ArrayDesc output (desc.getName(), Attributes(), aggDims); for (size_t i = dims.size() * 2, size = _parameters.size(); i < size; i++) { addAggregatedAttribute( (shared_ptr <OperatorParamAggregateCall> &) _parameters[i], desc, output); } if ( desc.getEmptyBitmapAttribute()) { AttributeDesc const* eAtt = desc.getEmptyBitmapAttribute(); output.addAttribute(AttributeDesc(output.getAttributes().size(), eAtt->getName(), eAtt->getType(), eAtt->getFlags(), eAtt->getDefaultCompressionMethod())); } return output; }
/** * Test sort array once. * The method sets the chunk limit to the indicated number, * then tries to create a chunk of the inidicated size and * type, using the indicated mode. If "expectFail" is true * then the method looks for the "CHUNK_TOO_LARGE" exception, * and fails if it does not see it. If "expectFail" is false, * the method does the opposite. Before exiting, the method * always resets the chunk limit to the original value. * * @param[in] query * @param[in] limit the desired chunk limit (as a string) * @param[in] type the value type * @param[in] count how many values * @param[in] mode iteration mode * @param[in] expectFail is an error expected? * * @throw SCIDB_SE_INTERNAL::SCIDB_LE_UNITTEST_FAILED */ void testOnce_ChunkLimit(std::shared_ptr<Query>& query, string const& limit, TypeId const& type, int count, int mode, bool expectFail) { bool failed = false; LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Attempt [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); // Array schema vector<AttributeDesc> attributes(1); attributes[0] = AttributeDesc((AttributeID)0, "X", type, AttributeDesc::IS_NULLABLE, 0); vector<DimensionDesc> dimensions(1); dimensions[0] = DimensionDesc(string("dummy_dimension"), 0, count, count, 0); ArrayDesc schema("dummy_array", addEmptyTagAttribute(attributes), dimensions, defaultPartitioning(), query->getDefaultArrayResidency()); // Test array std::shared_ptr<MemArray> array(new MemArray(schema, query)); // set the chunk size limit std::string oldLimit; try { oldLimit = Config::getInstance()->setOptionValue("chunk-size-limit-mb", limit); } catch (Exception const& e) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << e.getStringifiedLongErrorCode()); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "setOptionValue"; } // try to create the chunk try { buildRandomArrayChunk(query, *array, type, count, mode); } catch (Exception const& x) { if (!expectFail) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << x.getStringifiedLongErrorCode()); failed = true; } else if (x.getLongErrorCode() != SCIDB_LE_CHUNK_TOO_LARGE) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest incorrect exception: " << x.getStringifiedLongErrorCode()); failed = true; } } // set the chunk size limit back try { Config::getInstance()->setOptionValue("chunk-size-limit-mb", oldLimit); } catch (Exception const& e) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << e.getStringifiedLongErrorCode()); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "setOptionValue2"; } if (failed) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Failed [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "unexpected status"; } else { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Success [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); } }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 1); assert(_parameters[0]->getParamType() == PARAM_ATTRIBUTE_REF); assert(_parameters[1]->getParamType() == PARAM_LOGICAL_EXPRESSION); if ( _parameters.size() % 2 != 0 ) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_WRONG_OPERATOR_ARGUMENTS_COUNT2) << "tile_apply"; } Attributes outAttrs; AttributeID nextAttrId =0; for (size_t i=0; i<schemas[0].getAttributes().size(); i++) { AttributeDesc const& attr = schemas[0].getAttributes()[i]; if(attr.getType()!=TID_INDICATOR) { outAttrs.push_back( AttributeDesc(nextAttrId++, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), attr.getReserve(), &attr.getDefaultValue(), attr.getDefaultValueExpr(), attr.getVarSize())); } } size_t k; for (k=0; k<_parameters.size(); k+=2) { const string &attributeName = ((boost::shared_ptr<OperatorParamReference>&)_parameters[k])->getObjectName(); Expression expr; expr.compile(((boost::shared_ptr<OperatorParamLogicalExpression>&)_parameters[k+1])->getExpression(), query, _properties.tile, TID_VOID, schemas); assert(!_properties.tile); int flags = 0; if (expr.isNullable()) { flags = (int)AttributeDesc::IS_NULLABLE; } for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == attributeName) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attributeName; } } outAttrs.push_back(AttributeDesc(nextAttrId++, attributeName, expr.getType(), flags, 0)); } if(schemas[0].getEmptyBitmapAttribute()) { AttributeDesc const* emptyTag = schemas[0].getEmptyBitmapAttribute(); for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == emptyTag->getName()) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attr.getName(); } } outAttrs.push_back( AttributeDesc(nextAttrId, emptyTag->getName(), emptyTag->getType(), emptyTag->getFlags(), emptyTag->getDefaultCompressionMethod(), emptyTag->getAliases(), emptyTag->getReserve(), &emptyTag->getDefaultValue(), emptyTag->getDefaultValueExpr(), emptyTag->getVarSize())); } return ArrayDesc(schemas[0].getName(), outAttrs, schemas[0].getDimensions()); }