ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, std::shared_ptr< Query> query) { Attributes outputAttrs; outputAttrs.push_back(AttributeDesc(0, "dummy", TID_DOUBLE, AttributeDesc::IS_NULLABLE, 0)); Dimensions outputDims; outputDims.push_back(DimensionDesc("i",0,0,1,0)); return ArrayDesc("test_cache", outputAttrs, outputDims, defaultPartitioning()); }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, shared_ptr< Query> query) { ArrayDesc const& inputSchema = schemas[0]; //FastCountSettings settings (_parameters, true, query); vector<DimensionDesc> dimensions(1); size_t const nInstances = query->getInstancesCount(); dimensions[0] = DimensionDesc("i", 0, 0, CoordinateBounds::getMax(), CoordinateBounds::getMax(), 1, 0); // dimensions[0] = DimensionDesc("i", 0, 0, nInstances-1, nInstances-1, 1, 0); vector<AttributeDesc> attributes; attributes.push_back(AttributeDesc((AttributeID)0, "count", TID_UINT64, AttributeDesc::IS_NULLABLE, 0)); return ArrayDesc("fast_count", attributes, dimensions, defaultPartitioning(), inputSchema.getResidency(),false); }
ArrayDesc inferSchema(vector<ArrayDesc> inputSchemas, shared_ptr<Query> query) { Attributes atts(1); atts[0] = AttributeDesc((AttributeID)0, "success", TID_BOOL, 0, CompressorType::NONE ); Dimensions dims(1); dims[0] = DimensionDesc("i", 0, 0, 0, 0, 1, 0); //#ifdef CPP11 return ArrayDesc("", atts, dims, defaultPartitioning(), query->getDefaultArrayResidency()); //#else //return ArrayDesc("", atts, dims); //#endif }
ArrayDesc inferSchema(std::vector<ArrayDesc> schemas, std::shared_ptr<Query> query) { assert(schemas.size() == 0); assert(_parameters.size() == 2); assert(((std::shared_ptr<OperatorParam>&)_parameters[0])->getParamType() == PARAM_ARRAY_REF); assert(((std::shared_ptr<OperatorParam>&)_parameters[1])->getParamType() == PARAM_ARRAY_REF); std::string oldArrayName; std::string oldNamespaceName; std::string newArrayName; std::string newNamespaceName; const string &oldArrayNameOrg = ((std::shared_ptr<OperatorParamReference>&)_parameters[0])->getObjectName(); query->getNamespaceArrayNames(oldArrayNameOrg, oldNamespaceName, oldArrayName); const string &newArrayNameOrg = ((std::shared_ptr<OperatorParamReference>&)_parameters[1])->getObjectName(); query->getNamespaceArrayNames(newArrayNameOrg, newNamespaceName, newArrayName); if(newNamespaceName != oldNamespaceName) { throw USER_QUERY_EXCEPTION( SCIDB_SE_INFER_SCHEMA, SCIDB_LE_CANNOT_RENAME_ACROSS_NAMESPACES, _parameters[1]->getParsingContext()) << ArrayDesc::makeQualifiedArrayName(oldNamespaceName, oldArrayName) << ArrayDesc::makeQualifiedArrayName(newNamespaceName, newArrayName); } if (scidb::namespaces::Communicator::containsArray(newNamespaceName, newArrayName)) { throw USER_QUERY_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAY_ALREADY_EXIST, _parameters[1]->getParsingContext()) << newArrayName; } ArrayDesc arrDesc; arrDesc.setDistribution(defaultPartitioning()); arrDesc.setResidency(query->getDefaultArrayResidency()); return arrDesc; }
/** * Test sort array once. * The method sets the chunk limit to the indicated number, * then tries to create a chunk of the inidicated size and * type, using the indicated mode. If "expectFail" is true * then the method looks for the "CHUNK_TOO_LARGE" exception, * and fails if it does not see it. If "expectFail" is false, * the method does the opposite. Before exiting, the method * always resets the chunk limit to the original value. * * @param[in] query * @param[in] limit the desired chunk limit (as a string) * @param[in] type the value type * @param[in] count how many values * @param[in] mode iteration mode * @param[in] expectFail is an error expected? * * @throw SCIDB_SE_INTERNAL::SCIDB_LE_UNITTEST_FAILED */ void testOnce_ChunkLimit(std::shared_ptr<Query>& query, string const& limit, TypeId const& type, int count, int mode, bool expectFail) { bool failed = false; LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Attempt [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); // Array schema vector<AttributeDesc> attributes(1); attributes[0] = AttributeDesc((AttributeID)0, "X", type, AttributeDesc::IS_NULLABLE, 0); vector<DimensionDesc> dimensions(1); dimensions[0] = DimensionDesc(string("dummy_dimension"), 0, count, count, 0); ArrayDesc schema("dummy_array", addEmptyTagAttribute(attributes), dimensions, defaultPartitioning(), query->getDefaultArrayResidency()); // Test array std::shared_ptr<MemArray> array(new MemArray(schema, query)); // set the chunk size limit std::string oldLimit; try { oldLimit = Config::getInstance()->setOptionValue("chunk-size-limit-mb", limit); } catch (Exception const& e) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << e.getStringifiedLongErrorCode()); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "setOptionValue"; } // try to create the chunk try { buildRandomArrayChunk(query, *array, type, count, mode); } catch (Exception const& x) { if (!expectFail) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << x.getStringifiedLongErrorCode()); failed = true; } else if (x.getLongErrorCode() != SCIDB_LE_CHUNK_TOO_LARGE) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest incorrect exception: " << x.getStringifiedLongErrorCode()); failed = true; } } // set the chunk size limit back try { Config::getInstance()->setOptionValue("chunk-size-limit-mb", oldLimit); } catch (Exception const& e) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest unexpected exception: " << e.getStringifiedLongErrorCode()); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "setOptionValue2"; } if (failed) { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Failed [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); throw SYSTEM_EXCEPTION(SCIDB_SE_INTERNAL, SCIDB_LE_UNITTEST_FAILED) << "UnitTestChunkLimitPhysical" << "unexpected status"; } else { LOG4CXX_DEBUG(logger, "ChunkLimit UnitTest Success [type=" << type << "][count=" << count << "][mode=" << mode << "][expectFail=" << expectFail << "]"); } }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, std::shared_ptr< Query> query) { assert(schemas.size() == 1); assert(_parameters[0]->getParamType() == PARAM_ATTRIBUTE_REF); assert(_parameters[1]->getParamType() == PARAM_LOGICAL_EXPRESSION); if ( _parameters.size() % 2 != 0 ) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_WRONG_OPERATOR_ARGUMENTS_COUNT2) << "tile_apply"; } Attributes outAttrs; AttributeID nextAttrId =0; for (size_t i=0; i<schemas[0].getAttributes().size(); i++) { AttributeDesc const& attr = schemas[0].getAttributes()[i]; if(attr.getType()!=TID_INDICATOR) { outAttrs.push_back( AttributeDesc(nextAttrId++, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), attr.getReserve(), &attr.getDefaultValue(), attr.getDefaultValueExpr(), attr.getVarSize())); } } size_t k; for (k=0; k<_parameters.size(); k+=2) { const string &attributeName = ((std::shared_ptr<OperatorParamReference>&)_parameters[k])->getObjectName(); Expression expr; expr.compile(((std::shared_ptr<OperatorParamLogicalExpression>&)_parameters[k+1])->getExpression(), query, _properties.tile, TID_VOID, schemas); assert(!_properties.tile); int flags = 0; if (expr.isNullable()) { flags = (int)AttributeDesc::IS_NULLABLE; } for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == attributeName) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attributeName; } } outAttrs.push_back(AttributeDesc(nextAttrId++, attributeName, expr.getType(), flags, 0)); } if(schemas[0].getEmptyBitmapAttribute()) { AttributeDesc const* emptyTag = schemas[0].getEmptyBitmapAttribute(); for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == emptyTag->getName()) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attr.getName(); } } outAttrs.push_back( AttributeDesc(nextAttrId, emptyTag->getName(), emptyTag->getType(), emptyTag->getFlags(), emptyTag->getDefaultCompressionMethod(), emptyTag->getAliases(), emptyTag->getReserve(), &emptyTag->getDefaultValue(), emptyTag->getDefaultValueExpr(), emptyTag->getVarSize())); } return ArrayDesc(schemas[0].getName(), outAttrs, schemas[0].getDimensions(), defaultPartitioning()); }
ArrayDesc inferSchema(std::vector<ArrayDesc> schemas, std::shared_ptr<Query> query) { assert(schemas.size() == 1); ArrayDesc const& inputDesc = schemas[0]; size_t nDims = inputDesc.getDimensions().size(); Dimensions outDims(nDims); // How many parameters are of each type. size_t numAggregateCalls = 0; size_t numChunkSizes = 0; for (size_t i = nDims, n = _parameters.size(); i < n; ++i) { if (_parameters[i]->getParamType() == PARAM_AGGREGATE_CALL) { ++numAggregateCalls; } else // chunk size { ++numChunkSizes; } } if (numChunkSizes && numChunkSizes != nDims) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_NUM_CHUNKSIZES_NOT_MATCH_NUM_DIMS) << "regrid()"; } // Generate the output dims. for (size_t i = 0; i < nDims; i++) { int64_t blockSize = evaluate(((std::shared_ptr<OperatorParamLogicalExpression>&)_parameters[i])->getExpression(), query, TID_INT64).getInt64(); if (blockSize <= 0) { throw USER_QUERY_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_REGRID_ERROR1, _parameters[i]->getParsingContext()); } DimensionDesc const& srcDim = inputDesc.getDimensions()[i]; int64_t chunkSize = srcDim.getRawChunkInterval(); if (numChunkSizes) { size_t index = i + nDims + numAggregateCalls; chunkSize = evaluate(((std::shared_ptr<OperatorParamLogicalExpression>&)_parameters[index])->getExpression(), query, TID_INT64).getInt64(); if (chunkSize<=0) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_CHUNK_SIZE_MUST_BE_POSITIVE); } } outDims[i] = DimensionDesc( srcDim.getBaseName(), srcDim.getNamesAndAliases(), srcDim.getStartMin(), srcDim.getStartMin(), srcDim.getEndMax() == CoordinateBounds::getMax() ? CoordinateBounds::getMax() : srcDim.getStartMin() + (srcDim.getLength() + blockSize - 1)/blockSize - 1, srcDim.getEndMax() == CoordinateBounds::getMax() ? CoordinateBounds::getMax() : srcDim.getStartMin() + (srcDim.getLength() + blockSize - 1)/blockSize - 1, chunkSize, 0 ); } // Input and output dimensions are 1-to-1, so... _fixer.takeAllDimensions(inputDesc.getDimensions()); ArrayDesc outSchema(inputDesc.getName(), Attributes(), outDims, defaultPartitioning(), query->getDefaultArrayResidency() ); for (size_t i = nDims, j=nDims+numAggregateCalls; i<j; i++) { bool isInOrderAggregation = false; addAggregatedAttribute( (std::shared_ptr <OperatorParamAggregateCall> &) _parameters[i], inputDesc, outSchema, isInOrderAggregation); } AttributeDesc et ((AttributeID) outSchema.getAttributes().size(), DEFAULT_EMPTY_TAG_ATTRIBUTE_NAME, TID_INDICATOR, AttributeDesc::IS_EMPTY_INDICATOR, 0); outSchema.addAttribute(et); return outSchema; }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, std::shared_ptr< Query> query) { assert(schemas.size() == 1); for (Parameters::const_iterator it = _parameters.begin(); it != _parameters.end(); ++it) assert(((std::shared_ptr<OperatorParamReference>&)*it)->getParamType() == PARAM_ATTRIBUTE_REF); Attributes newAttributes; const Attributes &oldAttributes = schemas[0].getAttributes(); bool includesIndicator = false; size_t n = _parameters.size(); for (size_t i = 0; i < n; i++) { const AttributeDesc &attr = oldAttributes[((std::shared_ptr<OperatorParamReference>&)_parameters[i])->getObjectNo()]; newAttributes.push_back(AttributeDesc(i, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr())); includesIndicator |= attr.isEmptyIndicator(); } if (!includesIndicator) { AttributeDesc const* indicator = schemas[0].getEmptyBitmapAttribute(); if (indicator != NULL) { newAttributes.push_back(AttributeDesc(n, indicator->getName(), indicator->getType(), indicator->getFlags(), indicator->getDefaultCompressionMethod(), indicator->getAliases())); } } return ArrayDesc(schemas[0].getName(), newAttributes, schemas[0].getDimensions(), defaultPartitioning()); }