/** * @see ConstIterator::getPosition() */ Coordinates const& WindowArrayIterator::getPosition() { if (!hasCurrent) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); return currPos; }
/** * @see ConstChunkIterator::getItem() */ Value const& MaterializedWindowChunkIterator::getItem() { if (end()) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); return _nextValue; }
/** * @see ConstChunkIterator::getItem() */ Value const& WindowChunkIterator::getItem() { if (!_hasCurrent) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); return _nextValue; }
void IqueryParser::error2(const class location& loc, const std::string& msg) { throw USER_EXCEPTION(scidb::SCIDB_SE_SYNTAX, scidb::SCIDB_LE_IQUERY_PARSER_ERROR) << msg; }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 1); assert(_parameters[0]->getParamType() == PARAM_ATTRIBUTE_REF); assert(_parameters[1]->getParamType() == PARAM_LOGICAL_EXPRESSION); if ( _parameters.size() % 2 != 0 ) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_WRONG_OPERATOR_ARGUMENTS_COUNT2) << "tile_apply"; } Attributes outAttrs; AttributeID nextAttrId =0; for (size_t i=0; i<schemas[0].getAttributes().size(); i++) { AttributeDesc const& attr = schemas[0].getAttributes()[i]; if(attr.getType()!=TID_INDICATOR) { outAttrs.push_back( AttributeDesc(nextAttrId++, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), attr.getReserve(), &attr.getDefaultValue(), attr.getDefaultValueExpr(), attr.getVarSize())); } } size_t k; for (k=0; k<_parameters.size(); k+=2) { const string &attributeName = ((boost::shared_ptr<OperatorParamReference>&)_parameters[k])->getObjectName(); Expression expr; expr.compile(((boost::shared_ptr<OperatorParamLogicalExpression>&)_parameters[k+1])->getExpression(), query, _properties.tile, TID_VOID, schemas); assert(!_properties.tile); int flags = 0; if (expr.isNullable()) { flags = (int)AttributeDesc::IS_NULLABLE; } for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == attributeName) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attributeName; } } outAttrs.push_back(AttributeDesc(nextAttrId++, attributeName, expr.getType(), flags, 0)); } if(schemas[0].getEmptyBitmapAttribute()) { AttributeDesc const* emptyTag = schemas[0].getEmptyBitmapAttribute(); for (size_t j = 0; j < nextAttrId; j++) { AttributeDesc const& attr = outAttrs[j]; if (attr.getName() == emptyTag->getName()) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DUPLICATE_ATTRIBUTE_NAME) << attr.getName(); } } outAttrs.push_back( AttributeDesc(nextAttrId, emptyTag->getName(), emptyTag->getType(), emptyTag->getFlags(), emptyTag->getDefaultCompressionMethod(), emptyTag->getAliases(), emptyTag->getReserve(), &emptyTag->getDefaultValue(), emptyTag->getDefaultValueExpr(), emptyTag->getVarSize())); } return ArrayDesc(schemas[0].getName(), outAttrs, schemas[0].getDimensions()); }
ArrayDesc inferSchema(std::vector<ArrayDesc> schemas, std::shared_ptr<Query> query) { assert(schemas.size() == 1); ArrayDesc const& inputDesc = schemas[0]; size_t nDims = inputDesc.getDimensions().size(); Dimensions outDims(nDims); // How many parameters are of each type. size_t numAggregateCalls = 0; size_t numChunkSizes = 0; for (size_t i = nDims, n = _parameters.size(); i < n; ++i) { if (_parameters[i]->getParamType() == PARAM_AGGREGATE_CALL) { ++numAggregateCalls; } else // chunk size { ++numChunkSizes; } } if (numChunkSizes && numChunkSizes != nDims) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_NUM_CHUNKSIZES_NOT_MATCH_NUM_DIMS) << "regrid()"; } // Generate the output dims. for (size_t i = 0; i < nDims; i++) { int64_t blockSize = evaluate(((std::shared_ptr<OperatorParamLogicalExpression>&)_parameters[i])->getExpression(), query, TID_INT64).getInt64(); if (blockSize <= 0) { throw USER_QUERY_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_REGRID_ERROR1, _parameters[i]->getParsingContext()); } DimensionDesc const& srcDim = inputDesc.getDimensions()[i]; int64_t chunkSize = srcDim.getRawChunkInterval(); if (numChunkSizes) { size_t index = i + nDims + numAggregateCalls; chunkSize = evaluate(((std::shared_ptr<OperatorParamLogicalExpression>&)_parameters[index])->getExpression(), query, TID_INT64).getInt64(); if (chunkSize<=0) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_CHUNK_SIZE_MUST_BE_POSITIVE); } } outDims[i] = DimensionDesc( srcDim.getBaseName(), srcDim.getNamesAndAliases(), srcDim.getStartMin(), srcDim.getStartMin(), srcDim.getEndMax() == CoordinateBounds::getMax() ? CoordinateBounds::getMax() : srcDim.getStartMin() + (srcDim.getLength() + blockSize - 1)/blockSize - 1, srcDim.getEndMax() == CoordinateBounds::getMax() ? CoordinateBounds::getMax() : srcDim.getStartMin() + (srcDim.getLength() + blockSize - 1)/blockSize - 1, chunkSize, 0 ); } // Input and output dimensions are 1-to-1, so... _fixer.takeAllDimensions(inputDesc.getDimensions()); ArrayDesc outSchema(inputDesc.getName(), Attributes(), outDims, defaultPartitioning(), query->getDefaultArrayResidency() ); for (size_t i = nDims, j=nDims+numAggregateCalls; i<j; i++) { bool isInOrderAggregation = false; addAggregatedAttribute( (std::shared_ptr <OperatorParamAggregateCall> &) _parameters[i], inputDesc, outSchema, isInOrderAggregation); } AttributeDesc et ((AttributeID) outSchema.getAttributes().size(), DEFAULT_EMPTY_TAG_ATTRIBUTE_NAME, TID_INDICATOR, AttributeDesc::IS_EMPTY_INDICATOR, 0); outSchema.addAttribute(et); return outSchema; }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 2); if (!hasSingleAttribute(schemas[0]) || !hasSingleAttribute(schemas[1])) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR2); if (schemas[0].getDimensions().size() != 2 || schemas[1].getDimensions().size() != 2) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR3); if (schemas[0].getDimensions()[0].getLength() == INFINITE_LENGTH || schemas[0].getDimensions()[1].getLength() == INFINITE_LENGTH || schemas[1].getDimensions()[0].getLength() == INFINITE_LENGTH || schemas[1].getDimensions()[1].getLength() == INFINITE_LENGTH) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR4); if (schemas[0].getDimensions()[1].getLength() != schemas[1].getDimensions()[1].getLength() || schemas[0].getDimensions()[1].getStart() != schemas[1].getDimensions()[1].getStart()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR5); // FIXME: This condition needs to go away later if (schemas[0].getDimensions()[1].getChunkInterval() != schemas[1].getDimensions()[1].getChunkInterval()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR6); if (schemas[0].getAttributes()[0].getType() != schemas[1].getAttributes()[0].getType()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR7); if (schemas[0].getAttributes()[0].isNullable() || schemas[1].getAttributes()[0].isNullable()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_OP_MULTIPLY_ERROR8); Attributes atts(1); TypeId type = schemas[0].getAttributes()[0].getType(); AttributeDesc multAttr((AttributeID)0, "multiply", type, 0, 0); atts[0] = multAttr; Dimensions dims(2); DimensionDesc const& d1 = schemas[0].getDimensions()[0]; dims[0] = DimensionDesc(d1.getBaseName(), d1.getNamesAndAliases(), d1.getStartMin(), d1.getCurrStart(), d1.getCurrEnd(), d1.getEndMax(), d1.getChunkInterval(), 0, d1.getType(), d1.getFlags(), d1.getMappingArrayName(), d1.getComment(), d1.getFuncMapOffset(), d1.getFuncMapScale()); DimensionDesc const& d2 = schemas[1].getDimensions()[0]; dims[1] = DimensionDesc(d1.getBaseName() == d2.getBaseName() ? d1.getBaseName() + "2" : d2.getBaseName(), d2.getNamesAndAliases(), d2.getStartMin(), d2.getCurrStart(), d2.getCurrEnd(), d2.getEndMax(), d2.getChunkInterval(), 0, d2.getType(), d2.getFlags(), d2.getMappingArrayName(), d2.getComment(), d2.getFuncMapOffset(), d2.getFuncMapScale()); return ArrayDesc("MultiplyRow",atts,dims); }
void replace(PPOpIn arg) { xptr node, tmp_node, attr_node; schema_node_xptr scm_node; xqp_tuple t(arg.ts); xptr_sequence arg1seq; // Indirection of nodes which are going to be replaced xptr_sequence arg1seq_tmp; // Nodes which are going to be replaced xptr_sequence arg2seq; // Nodes to replace with (both persistent and temp) /* Persistent nodes to replace with (+ theirs position in arg2seq) */ descript_sequence arg3seq(2); upd_ns_map* ins_swiz = NULL; bool is_node_updated = true; /* * Fill up sequences with nodes to update and update with, * child (arg) returns the following sequence of items: * 1. node to be replaced (1) * 2. nodes to replace with (2) * 3. special tuple which contains separator value (3) */ arg.op->next(t); while (!t.is_eos()) { if (t.cells[0].is_node()) { node = t.cells[0].get_node(); CHECKP(node); /* * In (1) case node must be persistent (is_node_updated is true) * In (2) case it can be temporary * In both cases document nodes are not allowed */ if ((!is_node_updated || is_node_persistent(node)) && !is_node_document(node)) { xptr indir = nodeGetIndirection(node); if (is_node_updated) { /* Case (1) - fill up sequence with nodes to be replaced */ is_node_updated=false; /* Next nodes from arg are case (2) nodes, so we can use shared lock */ local_lock_mrg->lock(lm_s); arg1seq.add(indir); arg1seq_tmp.add(node); } else { /* Case (2) - fill up sequence with nodes to replace with */ if (is_node_persistent(node)) { xqp_tuple tup(2); tup.copy(tuple_cell::node(node),tuple_cell((int64_t)(arg2seq.size()))); arg3seq.add(tup); } arg2seq.add(indir); } } #ifndef IGNORE_UPDATE_ERRORS else { throw USER_EXCEPTION(SE2020); } #endif } else { /* Must be separator in this case (3) */ if (t.cells[0].get_atomic_type() == se_separator) { arg2seq.add(XNULL); is_node_updated=true; /* Next nodes from arg are case (1) node, so we can use shared lock */ local_lock_mrg->lock(lm_x); } #ifndef IGNORE_UPDATE_ERRORS else throw USER_EXCEPTION(SE2021); #endif } arg.op->next(t); } /* Nothing to do in this case */ if (arg1seq.size()<=0) return; /* Checking authorization */ if (is_auth_check_needed(REPLACE_STATEMENT)) auth_for_update(&arg1seq, REPLACE_STATEMENT, false); /* Find all common nodes in agr3seq (nodes to replace with) and * arg1seq_tmp (nodes to be replaced). Make a copy of all such nodes. */ arg1seq_tmp.sort(); arg3seq.sort(); descript_sequence::iterator it3 = arg3seq.begin(); xptr_sequence::iterator it1 = arg1seq_tmp.begin(); while(it3 != arg3seq.end() && it1 != arg1seq_tmp.end()) { switch(nid_cmp_effective((*it3).cells[0].get_node(), *it1)) { case 0: case -2: { node = copy_to_temp((*it3).cells[0].get_node()); xptr indir=nodeGetIndirection(node); arg2seq.set(indir,(*it3).cells[1].get_xs_integer()); ++it3; } break; case 1: ++it1; break; case 2: ++it1; break; case -1: ++it3; break; } } #ifdef SE_ENABLE_TRIGGERS apply_per_statement_triggers(&arg1seq, false, NULL, false, TRIGGER_BEFORE, TRIGGER_REPLACE_EVENT); #endif arg3seq.clear(); xptr_sequence::iterator it = arg1seq.begin(); xptr_sequence::iterator sit = arg2seq.begin(); int ctr=0; do { xqp_tuple tup(2); /* arg3seq will contain pairs: node -> int, namely * node to be replaced -> place in sequence of nodes to replace with */ tup.copy(tuple_cell::node(indirectionDereferenceCP(*it)),tuple_cell((int64_t)ctr)); arg3seq.add(tup); /* XNULL separates nodes in arg2seq (nodes replace with) per each * node in arg1seq (nodes to be replaced) */ while(*sit!=XNULL) { sit++; ctr++; } sit++; ctr++; it++; } while (it != arg1seq.end()); arg3seq.sort(); it3=arg3seq.begin(); descript_sequence arg4seq(2); do { node = (*it3).cells[0].get_node(); xqp_tuple t = (*it3); t.cells[0].set_safenode(node); ++it3; arg4seq.add(t); } while (it3!=arg3seq.end()); /* Deleting, inserting new nodes */ it3 = arg4seq.end(); do { --it3; node = (*it3).cells[0].get_safenode(); int pos = (*it3).cells[1].get_xs_integer(); sit = arg2seq.begin() + pos; CHECKP(node); xptr leftn = nodeGetLeftSibling(node); xptr rightn = nodeGetRightSibling(node); xptr par_ind = nodeGetParentIndirection(node); bool a_m = is_node_attribute(node); bool d_m = a_m || is_node_text(node); #ifdef SE_ENABLE_TRIGGERS scm_node = getSchemaPointer(node); tmp_node = prepare_old_node(node, scm_node, TRIGGER_REPLACE_EVENT); /* Before-for-each-node triggers (cycle for all inserted nodes) */ xptr_sequence::iterator tr_it = sit; while(*tr_it != XNULL) { if(apply_per_node_triggers(indirectionDereferenceCP(*tr_it), node, indirectionDereferenceCP(par_ind), scm_node, TRIGGER_BEFORE, TRIGGER_REPLACE_EVENT) == XNULL) { goto next_replacement; } tr_it++; } #endif /* SE_ENABLE_TRIGGERS */ //pre_deletion if (d_m) { delete_node(node, &delete_node_context); } //1.inserting attributes from sequence while(*sit != XNULL) { xptr node_child = indirectionDereferenceCP(*sit); CHECKP(node_child); if (is_node_attribute(node_child)) { attr_node = deep_copy_node(XNULL, XNULL, indirectionDereferenceCP(par_ind), node_child, is_node_persistent(node_child) ? NULL : &ins_swiz, true); #ifdef SE_ENABLE_TRIGGERS apply_per_node_triggers(attr_node, tmp_node, indirectionDereferenceCP(par_ind), scm_node, TRIGGER_AFTER, TRIGGER_REPLACE_EVENT); #endif } sit++; } //2. finding place of insertion if (a_m) { node = getFirstChildNode(indirectionDereferenceCP(par_ind)); if (node != XNULL) { CHECKP(node); if (is_node_element(node)) { rightn=node; node=XNULL; } else { rightn=XNULL; } } } else { if (d_m) { if (rightn==XNULL) node=leftn; else node=XNULL; } } //3.main insert cycle sit = arg2seq.begin() + pos; while(*sit != XNULL) { xptr node_child = indirectionDereferenceCP(*sit); CHECKP(node_child); if (!is_node_attribute(node_child)) { node = deep_copy_node(node, rightn, indirectionDereferenceCP(par_ind), node_child, is_node_persistent(node_child) ? NULL : &ins_swiz, true); #ifdef SE_ENABLE_TRIGGERS apply_per_node_triggers(node, tmp_node, indirectionDereferenceCP(par_ind), scm_node, TRIGGER_AFTER, TRIGGER_REPLACE_EVENT); #endif } sit++; } //post_deletion if (!d_m) { xptr del_node = (*it3).cells[0].get_safenode(); delete_node(del_node, &delete_node_context); } next_replacement:; } while (it3 != arg4seq.begin()); if (ins_swiz != NULL) { delete ins_swiz; } #ifdef SE_ENABLE_FTSEARCH execute_modifications(); #endif #ifdef SE_ENABLE_TRIGGERS apply_per_statement_triggers(NULL, false, NULL, false, TRIGGER_AFTER, TRIGGER_REPLACE_EVENT); #endif }
ItemSequence_t GeneratePDFFunction::evaluate(const ExternalFunction::Arguments_t& args, const zorba::StaticContext* aStaticContext, const zorba::DynamicContext* aDynamincContext) const { Iterator_t lIter = args[0]->getIterator(); lIter->open(); Item outputFormat; lIter->next(outputFormat); lIter->close(); jthrowable lException = 0; static JNIEnv* env; try { env = zorba::jvm::JavaVMSingleton::getInstance(aStaticContext)->getEnv(); jstring outFotmatString = env->NewStringUTF(outputFormat.getStringValue().c_str()); // Local variables std::ostringstream os; Zorba_SerializerOptions_t lOptions; Serializer_t lSerializer = Serializer::createSerializer(lOptions); jclass fopFactoryClass; jobject fopFactory; jmethodID fopFactoryNewInstance; jclass byteArrayOutputStreamClass; jobject byteArrayOutputStream; jobject fop; jmethodID newFop; jclass transformerFactoryClass; jobject transformerFactory; jobject transormer; jclass stringReaderClass; jobject stringReader; jstring xmlUTF; const char* xml; std::string xmlString; jclass streamSourceClass; jobject streamSource; jobject defaultHandler; jclass saxResultClass; jobject saxResult; jboolean isCopy; jbyteArray res; Item base64; String resStore; jsize dataSize; jbyte* dataElements; Item item; lIter = args[1]->getIterator(); lIter->open(); lIter->next(item); lIter->close(); // Searialize Item SingletonItemSequence lSequence(item); lSerializer->serialize(&lSequence, os); xmlString = os.str(); xml = xmlString.c_str(); // Create an OutputStream byteArrayOutputStreamClass = env->FindClass("java/io/ByteArrayOutputStream"); CHECK_EXCEPTION(env); byteArrayOutputStream = env->NewObject(byteArrayOutputStreamClass, env->GetMethodID(byteArrayOutputStreamClass, "<init>", "()V")); CHECK_EXCEPTION(env); // Create a FopFactory instance fopFactoryClass = env->FindClass("org/apache/fop/apps/FopFactory"); CHECK_EXCEPTION(env); fopFactoryNewInstance = env->GetStaticMethodID(fopFactoryClass, "newInstance", "()Lorg/apache/fop/apps/FopFactory;"); CHECK_EXCEPTION(env); fopFactory = env->CallStaticObjectMethod(fopFactoryClass, fopFactoryNewInstance); CHECK_EXCEPTION(env); // Create the Fop newFop = env->GetMethodID(fopFactoryClass, "newFop", "(Ljava/lang/String;Ljava/io/OutputStream;)Lorg/apache/fop/apps/Fop;"); CHECK_EXCEPTION(env); fop = env->CallObjectMethod(fopFactory, newFop, outFotmatString, byteArrayOutputStream); CHECK_EXCEPTION(env); // Create the Transformer transformerFactoryClass = env->FindClass("javax/xml/transform/TransformerFactory"); CHECK_EXCEPTION(env); transformerFactory = env->CallStaticObjectMethod(transformerFactoryClass, env->GetStaticMethodID(transformerFactoryClass, "newInstance", "()Ljavax/xml/transform/TransformerFactory;")); CHECK_EXCEPTION(env); transormer = env->CallObjectMethod(transformerFactory, env->GetMethodID(transformerFactoryClass, "newTransformer", "()Ljavax/xml/transform/Transformer;")); CHECK_EXCEPTION(env); // Create Source xmlUTF = env->NewStringUTF(xml); stringReaderClass = env->FindClass("java/io/StringReader"); CHECK_EXCEPTION(env); stringReader = env->NewObject(stringReaderClass, env->GetMethodID(stringReaderClass, "<init>", "(Ljava/lang/String;)V"), xmlUTF); CHECK_EXCEPTION(env); streamSourceClass = env->FindClass("javax/xml/transform/stream/StreamSource"); CHECK_EXCEPTION(env); streamSource = env->NewObject(streamSourceClass, env->GetMethodID(streamSourceClass, "<init>", "(Ljava/io/Reader;)V"), stringReader); CHECK_EXCEPTION(env); // Create the SAXResult defaultHandler = env->CallObjectMethod(fop, env->GetMethodID(env->FindClass("org/apache/fop/apps/Fop"), "getDefaultHandler", "()Lorg/xml/sax/helpers/DefaultHandler;")); CHECK_EXCEPTION(env); saxResultClass = env->FindClass("javax/xml/transform/sax/SAXResult"); CHECK_EXCEPTION(env); saxResult = env->NewObject(saxResultClass, env->GetMethodID(saxResultClass, "<init>", "(Lorg/xml/sax/ContentHandler;)V"), defaultHandler); CHECK_EXCEPTION(env); // Transform env->CallObjectMethod(transormer, env->GetMethodID(env->FindClass("javax/xml/transform/Transformer"), "transform", "(Ljavax/xml/transform/Source;Ljavax/xml/transform/Result;)V"), streamSource, saxResult); CHECK_EXCEPTION(env); // Close outputstream env->CallObjectMethod(byteArrayOutputStream, env->GetMethodID(env->FindClass("java/io/OutputStream"), "close", "()V")); CHECK_EXCEPTION(env); saxResultClass = env->FindClass("javax/xml/transform/sax/SAXResult"); CHECK_EXCEPTION(env); // Get the byte array res = (jbyteArray) env->CallObjectMethod(byteArrayOutputStream, env->GetMethodID(byteArrayOutputStreamClass, "toByteArray", "()[B")); CHECK_EXCEPTION(env); // Create the result dataSize = env->GetArrayLength(res); dataElements = env->GetByteArrayElements(res, &isCopy); std::string lBinaryString((const char*) dataElements, dataSize); std::stringstream lStream(lBinaryString); String base64S; base64::encode(lStream, &base64S); Item lRes( theFactory->createBase64Binary(base64S.data(), base64S.size(), true) ); return ItemSequence_t(new SingletonItemSequence(lRes)); } catch (zorba::jvm::VMOpenException&) { Item lQName = theFactory->createQName("http://zorba.io/modules/xsl-fo", "JVM-NOT-STARTED"); throw USER_EXCEPTION(lQName, "Could not start the Java VM (is the classpath set?)"); } catch (JavaException&) { jclass stringWriterClass = env->FindClass("java/io/StringWriter"); jclass printWriterClass = env->FindClass("java/io/PrintWriter"); jclass throwableClass = env->FindClass("java/lang/Throwable"); jobject stringWriter = env->NewObject( stringWriterClass, env->GetMethodID(stringWriterClass, "<init>", "()V")); jobject printWriter = env->NewObject( printWriterClass, env->GetMethodID(printWriterClass, "<init>", "(Ljava/io/Writer;)V"), stringWriter); env->CallObjectMethod(lException, env->GetMethodID(throwableClass, "printStackTrace", "(Ljava/io/PrintWriter;)V"), printWriter); //env->CallObjectMethod(printWriter, env->GetMethodID(printWriterClass, "flush", "()V")); jmethodID toStringMethod = env->GetMethodID(stringWriterClass, "toString", "()Ljava/lang/String;"); jobject errorMessageObj = env->CallObjectMethod( stringWriter, toStringMethod); jstring errorMessage = (jstring) errorMessageObj; const char *errMsg = env->GetStringUTFChars(errorMessage, 0); std::stringstream s; s << "A Java Exception was thrown:" << std::endl << errMsg; env->ReleaseStringUTFChars(errorMessage, errMsg); std::string err(""); err += s.str(); env->ExceptionClear(); Item lQName = theFactory->createQName("http://zorba.io/modules/xsl-fo", "JAVA-EXCEPTION"); throw USER_EXCEPTION(lQName, err); } return ItemSequence_t(new EmptySequence()); }
bool CsvChunkLoader::loadChunk(boost::shared_ptr<Query>& query, size_t chunkIndex) { // Must do EOF check *before* nextImplicitChunkPosition() call, or // we risk stepping out of bounds. if (_csvParser.empty()) { int ch = ::getc(fp()); if (ch == EOF) { return false; } ::ungetc(ch, fp()); } // Reposition and make sure all is cool. nextImplicitChunkPosition(MY_CHUNK); enforceChunkOrder("csv loader"); // Initialize a chunk and chunk iterator for each attribute. Attributes const& attrs = schema().getAttributes(); size_t nAttrs = attrs.size(); vector< boost::shared_ptr<ChunkIterator> > chunkIterators(nAttrs); for (size_t i = 0; i < nAttrs; i++) { Address addr(i, _chunkPos); MemChunk& chunk = getLookaheadChunk(i, chunkIndex); chunk.initialize(array(), &schema(), addr, attrs[i].getDefaultCompressionMethod()); chunkIterators[i] = chunk.getIterator(query, ChunkIterator::NO_EMPTY_CHECK | ConstChunkIterator::SEQUENTIAL_WRITE); } char const *field = 0; int rc = 0; bool sawData = false; bool sawEof = false; while (!chunkIterators[0]->end()) { _column = 0; array()->countCell(); // Parse and write out a line's worth of fields. NB if you // have to 'continue;' after a writeItem() call, make sure the // iterator (and possibly the _column) gets incremented. // for (size_t i = 0; i < nAttrs; ++i) { try { // Handle empty tag... if (i == emptyTagAttrId()) { attrVal(i).setBool(true); chunkIterators[i]->writeItem(attrVal(i)); ++(*chunkIterators[i]); // ...but don't increment _column. continue; } // Parse out next input field. rc = _csvParser.getField(field); if (rc == CsvParser::END_OF_FILE) { sawEof = true; break; } if (rc == CsvParser::END_OF_RECORD) { // Got record terminator, but we have more attributes! throw USER_EXCEPTION(SCIDB_SE_IMPORT_ERROR, SCIDB_LE_OP_INPUT_TOO_FEW_FIELDS) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column; } if (rc > 0) { // So long as we never call _csvParser.setStrict(true), we should never see this. throw USER_EXCEPTION(SCIDB_SE_IMPORT_ERROR, SCIDB_LE_CSV_PARSE_ERROR) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column << csv_strerror(rc); } SCIDB_ASSERT(rc == CsvParser::OK); SCIDB_ASSERT(field); sawData = true; // Process input field. if (mightBeNull(field) && attrs[i].isNullable()) { int8_t missingReason = parseNullField(field); if (missingReason >= 0) { attrVal(i).setNull(missingReason); chunkIterators[i]->writeItem(attrVal(i)); ++(*chunkIterators[i]); _column += 1; continue; } } if (converter(i)) { Value v; v.setString(field); const Value* vp = &v; (*converter(i))(&vp, &attrVal(i), NULL); chunkIterators[i]->writeItem(attrVal(i)); } else { TypeId const &tid = typeIdOfAttr(i); if (attrs[i].isNullable() && (*field == '\0' || (iswhitespace(field) && IS_NUMERIC(tid)))) { // [csv2scidb compat] With csv2scidb, empty strings (or for numeric // fields, whitespace) became nulls if the target attribute was // nullable. We keep the same behavior. (We should *not* do this for // TSV, that format requires explicit nulls!) attrVal(i).setNull(); } else { StringToValue(tid, field, attrVal(i)); } chunkIterators[i]->writeItem(attrVal(i)); } } catch (Exception& ex) { _badField = field; _fileOffset = _csvParser.getFileOffset(); array()->handleError(ex, chunkIterators[i], i); } _column += 1; ++(*chunkIterators[i]); } if (sawEof) { break; } // We should be at EOL now, otherwise there are too many fields on this line. Post a // warning: it seems useful not to complain too loudly about this or to abort the load, but // we do want to mention it. // rc = _csvParser.getField(field); if (!_tooManyWarning && (rc != CsvParser::END_OF_RECORD)) { _tooManyWarning = true; query->postWarning(SCIDB_WARNING(SCIDB_LE_OP_INPUT_TOO_MANY_FIELDS) << _csvParser.getFileOffset() << _csvParser.getRecordNumber() << _column); } array()->completeShadowArrayRow(); // done with cell/record } for (size_t i = 0; i < nAttrs; i++) { if (chunkIterators[i]) { chunkIterators[i]->flush(); } } return sawData; }
Value& ReverseChunkIterator::getItem() { if (!hasCurrent) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); return inputIterator->getItem(); }
void ReverseArrayIterator::operator ++() { if (!hasCurrent) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); hasCurrent = nextAvailable(); }
bool ReverseChunkIterator::isEmpty() { if (!hasCurrent) throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_ELEMENT); return inputIterator->isEmpty(); }
ConstChunk const& MatchArrayIterator::getChunk() { Coordinates const& currPos = inputIterator->getPosition(); if (chunk.isInitialized() && currPos == chunk.getFirstPosition(false)) { return chunk; } ConstChunk const& srcChunk = inputIterator->getChunk(); MatchArray& array = (MatchArray&)this->array; match = array.findMatch(currPos); Coordinates chunkPos = currPos; chunkPos.push_back(0); Address addr(attr, chunkPos); chunk.initialize(&array, &array.getArrayDesc(), addr, 0); std::shared_ptr<Query> emptyQuery; std::shared_ptr<ChunkIterator> dst = chunk.getIterator(emptyQuery, ChunkIterator::SEQUENTIAL_WRITE|ChunkIterator::NO_EMPTY_CHECK); if (match->initialized) { std::shared_ptr<ConstChunkIterator> src = srcChunk.getConstIterator(ConstChunkIterator::IGNORE_EMPTY_CELLS); int64_t itemNo = 0; if (attr < array.nPatternAttributes) { for (; !src->end(); ++(*src), ++itemNo) { MatchHash::Elem* elem = match->find(itemNo); if (elem != NULL) { Coordinates elemPos(src->getPosition()); elemPos.push_back(0); do { if (elem->hash == itemNo) { if (!dst->setPosition(elemPos)) { throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_POSITION); } dst->writeItem(src->getItem()); elemPos.back() += 1; } elem = elem->collisionChain; } while (elem != NULL); } } } else if (attr < array.nPatternAttributes + array.nCatalogAttributes) { if (catalogIterator->setPosition(currPos)) { std::shared_ptr<ConstChunkIterator> ci = catalogIterator->getChunk().getConstIterator(ConstChunkIterator::IGNORE_EMPTY_CELLS); for (; !src->end(); ++(*src), ++itemNo) { MatchHash::Elem* elem = match->find(itemNo); if (elem != NULL) { Coordinates elemPos(src->getPosition()); elemPos.push_back(0); do { if (elem->hash == itemNo) { if (!dst->setPosition(elemPos)) { throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_POSITION); } if (!ci->setPosition(elem->coords)) { throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_POSITION); } dst->writeItem(ci->getItem()); elemPos.back() += 1; } elem = elem->collisionChain; } while (elem != NULL); } } } } else if (attr < array.nPatternAttributes + array.nCatalogAttributes + currPos.size()) { size_t dimNo = attr - array.nPatternAttributes - array.nCatalogAttributes; Value coordValue; for (; !src->end(); ++(*src), ++itemNo) { MatchHash::Elem* elem = match->find(itemNo); if (elem != NULL) { Coordinates elemPos(src->getPosition()); elemPos.push_back(0); do { if (elem->hash == itemNo) { if (!dst->setPosition(elemPos)) { throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_POSITION); } coordValue.setInt64(elem->coords[dimNo]); dst->writeItem(coordValue); elemPos.back() += 1; } elem = elem->collisionChain; } while (elem != NULL); } } } else { Value trueValue; trueValue.setBool(true); for (; !src->end(); ++(*src), ++itemNo) { MatchHash::Elem* elem = match->find(itemNo); if (elem != NULL) { Coordinates elemPos(src->getPosition()); elemPos.push_back(0); do { if (elem->hash == itemNo) { if (!dst->setPosition(elemPos)) { throw USER_EXCEPTION(SCIDB_SE_EXECUTION, SCIDB_LE_NO_CURRENT_POSITION); } dst->writeItem(trueValue); elemPos.back() += 1; } elem = elem->collisionChain; } while (elem != NULL); } } } } dst->flush(); return chunk; }