int main(int argc, const char* argv[]) { Array integers1(7); outputArray(integers1); outputArray(1); // convert 3 to an Array and output Array's contents return 0; }
int main() { const int n=7; int a[7]= {10,1,3,12,-8,15,27}; outputArray(a,n); sortBubbleArray(a,n); outputArray(a,n); return 0; }
int main() { srand(time(NULL)); int *array= NULL, size = 20; array=(int*)malloc(size*sizeof(int)); generateArray(array, size, 0, 99); outputArray(array, size); sortBuble(array, size,less); outputArray(array,size); free(array); array=NULL; return 0; }
shared_ptr<Array> writeGamma(shared_ptr<Query> query) { // Output array and its iterator for all the chunks inside that array. shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<ArrayIterator> outputArrayIter = outputArray->getIterator(0); shared_ptr<ChunkIterator> outputChunkIter; Coordinates position(1, 1); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE); size_t i; Value valGamma; valGamma.setDouble(nlq.N); outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); for(i=1; i<=nlq.d+1; i++) { position[0] = i+1; valGamma.setDouble(nlq.L[i]); outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); } for(i=1; i<=nlq.d+1; i++) { position[0] = i+nlq.d+2; valGamma.setDouble(nlq.Q[i]); outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); } outputChunkIter->flush(); return outputArray; }
shared_ptr<Array> writeGamma(size_t d, shared_ptr<Query> query) { // Output array and its iterator for all the chunks inside that array. shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<ArrayIterator> outputArrayIter = outputArray->getIterator(0); shared_ptr<ChunkIterator> outputChunkIter; Coordinates position(2, 1); // The output array has only one chunk. outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE); size_t i, j; Value valGamma; double value; for(i=0; i<d+2; i++) { position[0] = i+1; for(j=0; j<d+2; j++) { if(i>=j) { value = Gamma[i][j]; } else { value = Gamma[j][i]; } position[1] = j+1; outputChunkIter->setPosition(position); valGamma.setDouble(value); outputChunkIter->writeItem(valGamma); } } outputChunkIter->flush(); return outputArray; }
int main() { int R, V[10], S[30], D[30], l; inputArray(V); l = inputString(S); encrypting(V, S, D, l); outputArray(D, l); R = decrypting(D, V, l); return 0; }
int _tmain(int argc, _TCHAR* argv[]) { int c[10] = { 1,2,3,4,5,6,7,8,9,10 }; int d[10]; myMemcpy( d, c, sizeof( int )*10 ); outputArray( c, 10 ); outputArray( d, 10 ); printf( "%d\n", myMemcmp( c, d, sizeof( int )*10 ) ); myMemmove( c, &c[2], sizeof( int )*8 ); outputArray( c, 10 ); myMemmove( &c[2], c, sizeof( int )*8 ); outputArray( c, 10 ); myMemset( d, 0, sizeof( int )*10 ); outputArray( d, 10 ); char a[100] = "123456789"; char b[100] = "abcdefghijklmn"; printf( "%d\n", myStrlen( a ) ); printf( "%d\n", myStrlen( b ) ); printf( "%s\n", myStrcat( b, a ) ); //printf( "%s\n", myStrcat( a, &a[2] ) ); //printf( "%s\n", myStrcat( &a[2], a ) ); printf( "%s\n", a ); printf( "%d\n", myStrcmp( b, a ) ); printf( "%s\n", myStrcpy( b, a ) ); printf( "%s\n", myStrmove( a, &a[3] ) ); printf( "%s\n", myStrmove( &a[4], a ) ); printf( "%s\n", a ); return 0; }
int main() { srand(time(NULL)); int intArray[ARRAY_SIZE]; float floatArray[ARRAY_SIZE]; populateArray(intArray); populateArray(floatArray); cout << "Your arrays BEFORE sorting: " << endl; outputArray(intArray); outputArray(floatArray); cout << endl; sort(intArray); sort(floatArray); cout << "Your arrays AFTER sorting: " << endl; outputArray(intArray); outputArray(floatArray); cout << "\nThat's all, folks!" << endl; return 0; }
shared_ptr<Array> writeGamma(shared_ptr<Query> query) { // Output array and its iterator for all the chunks inside that array. shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<ArrayIterator> outputArrayIter = outputArray->getIterator(0); shared_ptr<ChunkIterator> outputChunkIter; Coordinates position(2, 1); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE); int64_t i, j; Value valGamma; map<double, struct NLQ>::iterator it; for(it = nlq.begin(), i=1; it != nlq.end() && i<=k; it++, i++) { position[0] = i; position[1] = 1; valGamma.setDouble(it->second.N); #ifdef DEBUG log << "Entry for class " << i << ", n = " << it->second.N << endl; #endif outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); #ifdef DEBUG log << "Writing L." << endl; #endif for(j=1; j<=d+1; j++) { position[1] = j+1; valGamma.setDouble(it->second.L[j]); outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); } #ifdef DEBUG log << "Writing Q." << endl; #endif for(j=1; j<=d+1; j++) { position[1] = j+d+2; valGamma.setDouble(it->second.Q[j]); outputChunkIter->setPosition(position); outputChunkIter->writeItem(valGamma); } } outputChunkIter->flush(); return outputArray; }
MStatus ArrayAngleConstructorNode::compute(const MPlug& plug, MDataBlock& data) { if (plug != aOutput) return MS::kUnknownParameter; MStatus status; int index; MArrayDataHandle inputArrayHandle = data.inputArrayValue(aInput); int inputSize = inputArrayHandle.elementCount(); int outputSize = data.inputValue(aSize).asInt(); MDoubleArray outputArray(outputSize); MAngle::Unit uiUnit = MAngle::uiUnit(); for (int i = 0; i < inputSize; i++) { index = inputArrayHandle.elementIndex(); if (index >= outputSize) break; if (uiUnit == MAngle::kRadians) { outputArray[index] = inputArrayHandle.inputValue().asAngle().asRadians(); } else { outputArray[index] = inputArrayHandle.inputValue().asAngle().asDegrees(); } if (!inputArrayHandle.next()) break; } MFnDoubleArrayData outputArrayData; MObject outputData = outputArrayData.create(outputArray, &status); CHECK_MSTATUS_AND_RETURN_IT(status); MDataHandle outputHandle = data.outputValue(aOutput); outputHandle.setMObject(outputData); outputHandle.setClean(); return MS::kSuccess; }
int TileStaticBandwidth<T>::run() { array<T,1> outputArray(outputLength,output.begin()); cout << "\nTile Static Memory Read\nAccessType\t: single\n"<< endl; cout << "Bandwidth\t"; measureReadSingle(outputArray); array<T,1> outputArray2(outputLength,output.begin()); cout << "\nTile Static Memory Read\nAccessType\t: linear\n"<< endl; cout << "Bandwidth\t"; measureReadLinear(outputArray2); array<T,1> outputArray3(outputLength,output.begin()); cout << "\nTile Static Memory Write\nAccessType\t: linear\n"<< endl; cout << "Bandwidth\t"; measureWriteLinear(outputArray3); return AMP_SUCCESS; }
int main(int argc, char **argv) { char a[1024]; char b[1024]; fscanf(stdin, "%s %s", a, b); assert(a != NULL); assert(b != NULL); int maxSize = strlen(a); if (maxSize < strlen(b)) { maxSize = strlen(b); } maxSize += 1; // terminating zero int cSize = maxSize * 2; char *c = (char *) calloc(cSize, sizeof(char)); assert(c != NULL); char *ai = a; char *bi = b; for (int i = 0; i < cSize; i++) { if (i % 2) { c[i] = *bi == '\0' ? *bi : *bi++; } else { c[i] = *ai == '\0' ? *ai : *ai++; } } outputArray(c, cSize); free(c); }
void outputField ( FudgeField * field, unsigned int indent ) { /* Output the field's type, name (if present) and ordinal (if present) */ outputIndent ( indent ); outputType ( field->type ); printf ( " " ); if ( field->flags & FUDGE_FIELD_HAS_NAME ) { outputString ( field->name ); if ( field->flags & FUDGE_FIELD_HAS_ORDINAL ) printf ( "/" ); } if ( field->flags & FUDGE_FIELD_HAS_ORDINAL ) printf ( "ord(%d)", field->ordinal ); printf ( ": " ); /* Output the field contents */ switch ( field->type ) { case FUDGE_TYPE_INDICATOR: break; case FUDGE_TYPE_BOOLEAN: printf ( field->data.boolean ? "true" : "false" ); break; case FUDGE_TYPE_BYTE: printf ( "%d", field->data.byte ); break; case FUDGE_TYPE_SHORT: printf ( "%d", field->data.i16 ); break; case FUDGE_TYPE_INT: printf ( "%d", field->data.i32 ); break; case FUDGE_TYPE_LONG: printf ( "%ld", ( long int ) field->data.i64 ); break; case FUDGE_TYPE_FLOAT: printf ( "%f", field->data.f32 ); break; case FUDGE_TYPE_DOUBLE: printf ( "%f", field->data.f64 ); break; case FUDGE_TYPE_SHORT_ARRAY: outputArray ( field->data.bytes, field->numbytes, "%d", 2, 8 ); break; case FUDGE_TYPE_INT_ARRAY: outputArray ( field->data.bytes, field->numbytes, "%d", 4, 8 ); break; case FUDGE_TYPE_LONG_ARRAY: outputArray ( field->data.bytes, field->numbytes, "%lu", 8, 4 ); break; case FUDGE_TYPE_FLOAT_ARRAY: outputArray ( field->data.bytes, field->numbytes, "%f", 4, 4 ); break; case FUDGE_TYPE_DOUBLE_ARRAY: outputArray ( field->data.bytes, field->numbytes, "%f", 8, 4 ); break; case FUDGE_TYPE_STRING: outputString ( field->data.string ); break; case FUDGE_TYPE_BYTE_ARRAY: case FUDGE_TYPE_BYTE_ARRAY_4: case FUDGE_TYPE_BYTE_ARRAY_8: case FUDGE_TYPE_BYTE_ARRAY_16: case FUDGE_TYPE_BYTE_ARRAY_20: case FUDGE_TYPE_BYTE_ARRAY_32: case FUDGE_TYPE_BYTE_ARRAY_64: case FUDGE_TYPE_BYTE_ARRAY_128: case FUDGE_TYPE_BYTE_ARRAY_256: case FUDGE_TYPE_BYTE_ARRAY_512: outputArray ( field->data.bytes, field->numbytes, "%d", 1, 10 ); break; case FUDGE_TYPE_FUDGE_MSG: printf ( "\n" ); outputIndent ( indent ); printf ( "{\n" ); outputMessage ( field->data.message, indent + 1 ); outputIndent ( indent ); printf ( "}" ); break; default: printf ( "%d bytes ", field->numbytes ); outputArray ( field->data.bytes, field->numbytes, "%d", 1, 8 ); break; } printf ( "\n" ); }
shared_ptr< Array > execute(vector< shared_ptr< Array> >& inputArrays, shared_ptr<Query> query) { // I maintain the log of the operator in a local file named after Correlation_N.log, N is the instance ID. stringstream logFileName; logFileName << "/home/scidb/preselect_" << query->getInstanceID() << ".log"; FILE *logFile; logFile = fopen(logFileName.str().c_str(), "w"); shared_ptr<Array> originalArray = inputArrays[0]; shared_ptr<Array> correlationArray = inputArrays[1]; ArrayDesc originalSchema = originalArray->getArrayDesc(); ArrayDesc corrSchema = correlationArray->getArrayDesc(); Dimensions originalDims = originalSchema.getDimensions(); Dimensions corrDims = corrSchema.getDimensions(); DimensionDesc originalDimsP = originalDims[1]; DimensionDesc corrDimsP = corrDims[0]; // Note the correlation array doesn't have Y column. Coordinate p = corrDimsP.getCurrLength(); fprintf(logFile, "p = %ld\n # of chunk = %ld\n", p, corrSchema.getNumberOfChunks()); fflush(logFile); shared_ptr<ConstArrayIterator> corrArrayIter = correlationArray->getIterator(0); if(! corrArrayIter->end() ) { correlation *corr = new correlation[p]; // The correlation array will always have only 1 chunk (we designed correlation array like this), so no loops here. shared_ptr<ConstChunkIterator> corrChunkIter = corrArrayIter->getChunk().getConstIterator(); for(Coordinate i=0; i<p; ++i) { corr[i].id = i+1; corr[i].corr = corrChunkIter->getItem().getDouble(); //fprintf(logFile, "%d, %f\n", corr[i].id, corr[i].corr); ++(*corrChunkIter); } //fflush(logFile); qsort(corr, p, sizeof(correlation), &comp); for(Coordinate i=0; i<p; ++i) { fprintf(logFile, "%d, %f\n", corr[i].id, corr[i].corr); } fflush(logFile); Coordinate d = ((boost::shared_ptr<OperatorParamPhysicalExpression>&)_parameters[0])->getExpression()->evaluate().getInt64(); fprintf(logFile, "d=%ld\n", d); stringstream ss; vector<string> names; names.push_back("j"); vector<TypeId> types; types.push_back(TID_INT64); for(Coordinate i=0; i<d; ++i) { ss << "j=" << corr[i].id << " or "; } ss << "j=" << p+1; fprintf(logFile, "%s\n", ss.str().c_str()); fflush(logFile); Expression e; e.compile(ss.str(), names, types); fclose(logFile); boost::shared_ptr<scidb::Query> emptyQuery; return boost::shared_ptr<Array>(new FilterArray(_schema, inputArrays[0], boost::make_shared<Expression>(e), emptyQuery, _tileMode)); } else { shared_ptr<Array> outputArray(new MemArray(_schema, query)); fclose(logFile); return outputArray; } }
int main (int argc, char *argv[]) { /* Declaração de variáveis */ float velocidadeDoBarco = velocidadeDoBarcoInicial; int larguraDoRio = larguraDoRioInicial; int fluxoDesejado = fluxoDesejadoInicial; int dIlha = distanciaEntreIlhasInicial; float pIlha = probabilidadeDeObstaculosInicial; float limiteMargens = limiteDasMargens; struct timespec tim2; struct timespec tim; int seed = 1; int verbose = 0; int indice = 0; pixel **grade; /* Leitura de parametros */ getArgs(argc, argv, &velocidadeDoBarco, &larguraDoRio, &seed, &fluxoDesejado, &verbose, &dIlha, &pIlha, &limiteMargens); corrigeArgs(argc, argv, &velocidadeDoBarco, &larguraDoRio, &seed, &fluxoDesejado, &verbose, &dIlha, &pIlha, &limiteMargens); if (verbose) { printf ("\t \t Opcoes disponiveis: \n" "-b = %f - Velocidade do barco\n" "-l = %d - Largura do Rio\n" "-s = %d - Semente para o gerador aleatorio\n" "-f = %d - Fluxo da agua\n" "-v = %d - Verbose\n" "-pI = %f - Probabilidade de haver obstaculos\n" "-dI = %d - Distancia minima entre obstaculos\n" "-lM = %f - Limite de tamanho das margens (de 0 a 1)\n" "Pressione Enter para continuar...\n", velocidadeDoBarco, larguraDoRio, seed, fluxoDesejado, verbose, pIlha, dIlha, limiteMargens); getchar(); } /* Inicialização */ tim.tv_sec = 0; tim.tv_nsec = 100000000/velocidadeDoBarco; /* Seed */ if (seed == 0) seed = time(NULL); srand(seed); /* Criação do primeiro frame */ grade = initGrade(alturaDaGrade, larguraDoRio); criaPrimeiroFrame(grade, alturaDaGrade, larguraDoRio, limiteMargens, fluxoDesejado, dIlha, pIlha); outputArray(grade, alturaDaGrade, larguraDoRio, indice); clearScreen(); /* Frames subsequentes */ for(;;){ indice = (indice - 1+alturaDaGrade) % alturaDaGrade; criaProximoFrame(grade, alturaDaGrade, larguraDoRio, limiteMargens, fluxoDesejado, indice, dIlha, pIlha); outputArray(grade, alturaDaGrade, larguraDoRio, indice); clearScreen(); nanosleep(&tim, &tim2); } /* Frees */ freeGrade(grade, alturaDaGrade, larguraDoRio); return 0; }
/** * Record a set of statistics into a MemArray. * @param stats the statistics to record * @param query the query context */ shared_ptr<Array> writeStatsToMemArray(Stats const& stats, shared_ptr<Query>& query) { /* This is very similar to the write code seen in PhysicalHelloInstances, except we are writing multiple * attributes - all at the same position. */ shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<ArrayIterator> outputArrayIter = outputArray->getIterator(0); Coordinates position(1, query->getInstanceID()); /* The first attribute is opened with only SEQUENTIAL_WRITE. Other attributes are also opened with * NO_EMPTY_CHECK. So the empty tag is populated implicitly from the first attribute. * * Note: since there's only one cell to write, SEQUENTIAL_WRITE is not so relevant, though it is faster. */ //chunk count shared_ptr<ChunkIterator> outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE); outputChunkIter->setPosition(position); Value value; value.setUint32(stats.chunkCount); outputChunkIter->writeItem(value); outputChunkIter->flush(); //cell count outputArrayIter = outputArray->getIterator(1); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE | ChunkIterator::NO_EMPTY_CHECK); outputChunkIter->setPosition(position); value.setUint64(stats.cellCount); outputChunkIter->writeItem(value); outputChunkIter->flush(); //min cells per chunk outputArrayIter = outputArray->getIterator(2); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE | ChunkIterator::NO_EMPTY_CHECK); outputChunkIter->setPosition(position); if (stats.cellCount > 0) { value.setUint64(stats.minCellsPerChunk); } else { value.setNull(); } outputChunkIter->writeItem(value); outputChunkIter->flush(); //max cells per chunk outputArrayIter = outputArray->getIterator(3); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE | ChunkIterator::NO_EMPTY_CHECK); outputChunkIter->setPosition(position); if (stats.cellCount > 0) { value.setUint64(stats.maxCellsPerChunk); } else { value.setNull(); } outputChunkIter->writeItem(value); outputChunkIter->flush(); //avg cells per chunk outputArrayIter = outputArray->getIterator(4); outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE | ChunkIterator::NO_EMPTY_CHECK); outputChunkIter->setPosition(position); if (stats.cellCount > 0) { value.setDouble(stats.cellCount * 1.0 / stats.chunkCount); } else { value.setNull(); } outputChunkIter->writeItem(value); outputChunkIter->flush(); return outputArray; }
shared_ptr< Array > execute(vector< shared_ptr< Array> >& inputArrays, shared_ptr<Query> query) { shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<Array> inputArray = inputArrays[0]; ArrayDesc inputSchema = inputArray->getArrayDesc(); // Get descriptor of two dimensions d and n. DimensionDesc dimsN = inputSchema.getDimensions()[0]; DimensionDesc dimsD = inputSchema.getDimensions()[1]; size_t n = dimsN.getCurrEnd() - dimsN.getCurrStart() + 1; // Note: the input data set should have d+1 dimensions (including Y) size_t d = dimsD.getCurrEnd() - dimsD.getCurrStart(); size_t nStart = dimsN.getCurrStart(); size_t dStart = dimsD.getCurrStart(); // Get chunk size of n. size_t nChunkSize = dimsN.getChunkInterval(); // Helps to accumulate the n and L. z_i[0] = 1.0; shared_ptr<ConstArrayIterator> inputArrayIter = inputArray->getConstIterator(0); Coordinates chunkPosition; size_t i, j, k, m; while(! inputArrayIter->end() ) { shared_ptr<ConstChunkIterator> chunkIter = inputArrayIter->getChunk().getConstIterator(); chunkPosition = inputArrayIter->getPosition(); for(i=chunkPosition[0]; i<chunkPosition[0] + nChunkSize; i++) { // In case the chunk is partially filled. if(i == n + nStart) { break; } for(j=chunkPosition[1], m=1; j<=chunkPosition[1]+d; j++, m++) { // In case the chunk is partially filled. if(j == d + 1 + dStart) { break; } z_i[m] = chunkIter->getItem().getDouble(); ++(*chunkIter); } for(k=0; k<=d+1; ++k) { // This operator is not optimized for entries with value zero. // TODO: should use fabs(z_i[k]) < 10e-6 // if(z_i[k] == 0.0) { // continue; // } for(m=0; m<=k; ++m) { Gamma[k][m] += z_i[k]*z_i[m]; } } } ++(*inputArrayIter); } /** * The "logical" instance ID of the instance responsible for coordination of query. * COORDINATOR_INSTANCE if instance execute this query itself. */ if(query->getInstancesCount() > 1) { if(query->getInstanceID() != 0) { // I am not the coordinator, I should send my Gamma matrix out. shared_ptr <SharedBuffer> buf ( new MemoryBuffer(NULL, sizeof(double) * (d+3) * (d+2) / 2) ); double *Gammabuf = static_cast<double*> (buf->getData()); for(size_t i=0; i<d+2; ++i) { for(size_t j=0; j<=i; ++j) { *Gammabuf = Gamma[i][j]; ++Gammabuf; } } BufSend(0, buf, query); return outputArray; } else { // I am the coordinator, I should collect Gamma matrix from workers. for(InstanceID l = 1; l<query->getInstancesCount(); ++l) { shared_ptr<SharedBuffer> buf = BufReceive(l, query); double *Gammabuf = static_cast<double*> (buf->getData()); for(size_t i=0; i<d+2; ++i) { for(size_t j=0; j<=i; ++j) { Gamma[i][j] += *Gammabuf; ++Gammabuf; } } } } // end if getInstanceID() != 0 } //end if InstancesCount() > 1 return writeGamma(d, query); }
shared_ptr< Array > execute(vector< shared_ptr< Array> >& inputArrays, shared_ptr<Query> query) { shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<Array> inputArray = inputArrays[0]; ArrayDesc inputSchema = inputArray->getArrayDesc(); // Get descriptor of two dimensions d and n. DimensionDesc dimsN = inputSchema.getDimensions()[0]; DimensionDesc dimsD = inputSchema.getDimensions()[1]; size_t n = dimsN.getCurrLength(); // Note: the input data set should have d+1 dimensions (including Y) size_t d = dimsD.getCurrLength() - 1; nlq.N = n; nlq.d = d; shared_ptr<ConstArrayIterator> inputArrayIter = inputArray->getConstIterator(0); Coordinates cellPosition; size_t i; double value; while(! inputArrayIter->end() ) { shared_ptr<ConstChunkIterator> chunkIter = inputArrayIter->getChunk().getConstIterator(); // For each cell in the current chunk. // This will skip the empty cells. while(! chunkIter->end() ) { cellPosition = chunkIter->getPosition(); value = chunkIter->getItem().getDouble(); nlq.L[ cellPosition[1] ] += value; nlq.Q[ cellPosition[1] ] += value * value; ++(*chunkIter); } ++(*inputArrayIter); } /** * The "logical" instance ID of the instance responsible for coordination of query. * COORDINATOR_INSTANCE if instance execute this query itself. */ if(query->getInstancesCount() > 1) { if(query->getInstanceID() != 0) { // I am not the coordinator, I should send my Gamma matrix out. shared_ptr <SharedBuffer> buf ( new MemoryBuffer(NULL, sizeof(double) * (d*2+2) )); double *Gammabuf = static_cast<double*> (buf->getData()); for(i=1; i<=d+1; ++i) { *Gammabuf = nlq.L[i]; ++Gammabuf; } for(i=1; i<=d+1; ++i) { *Gammabuf = nlq.Q[i]; ++Gammabuf; } BufSend(0, buf, query); return outputArray; } else { // I am the coordinator, I should collect Gamma matrix from workers. for(InstanceID l = 1; l<query->getInstancesCount(); ++l) { shared_ptr<SharedBuffer> buf = BufReceive(l, query); double *Gammabuf = static_cast<double*> (buf->getData()); for(i=1; i<=d+1; ++i) { nlq.L[i] += *Gammabuf; ++Gammabuf; } for(i=1; i<=d+1; ++i) { nlq.Q[i] += *Gammabuf; ++Gammabuf; } } }// end if getInstanceID() != 0 }//end if InstancesCount() > 1 return writeGamma(query); }
shared_ptr< Array > execute(vector< shared_ptr< Array> >& inputArrays, shared_ptr<Query> query) { shared_ptr<Array> outputArray(new MemArray(_schema, query)); shared_ptr<Array> inputArray = inputArrays[0]; ArrayDesc inputSchema = inputArray->getArrayDesc(); // Get descriptor of two dimensions d and n. DimensionDesc dimsN = inputSchema.getDimensions()[0]; DimensionDesc dimsD = inputSchema.getDimensions()[1]; int64_t n = dimsN.getCurrEnd() - dimsN.getCurrStart() + 1; // Note: the input data set should have d+1 dimensions (including Y) d = dimsD.getCurrEnd() - dimsD.getCurrStart(); idY = d+1; int64_t nStart = dimsN.getCurrStart(); int64_t dStart = dimsD.getCurrStart(); // Get chunk size of n. int64_t nChunkSize = dimsN.getChunkInterval(); k = ((shared_ptr<OperatorParamPhysicalExpression>&)_parameters[0])->getExpression()->evaluate().getInt64(); if (_parameters.size() == 2) { idY = ((shared_ptr<OperatorParamPhysicalExpression>&)_parameters[1])->getExpression()->evaluate().getInt64(); } #ifdef DEBUG stringstream ss; ss << getenv("HOME") << "/groupdiagdensegamma-instance-" << query->getInstanceID() << ".log"; log.open(ss.str().c_str(), ios::out); log << "n = " << n << endl << "d = " << d << endl << "k = " << k << endl; log << "nStart = " << nStart << endl << "dStart = " << dStart << endl; log << "nChunkSize = " << nChunkSize << endl; log << "idY = " << idY << endl; #endif shared_ptr<ConstArrayIterator> inputArrayIter = inputArray->getConstIterator(0); Coordinates chunkPosition; int64_t i, j, k, m, l; double value; NLQ tmp; map<double, struct NLQ>::iterator it; while(! inputArrayIter->end() ) { shared_ptr<ConstChunkIterator> chunkIter = inputArrayIter->getChunk().getConstIterator(); chunkPosition = inputArrayIter->getPosition(); #ifdef DEBUG log << "Getting into chunk (" << chunkPosition[0] << ", " << chunkPosition[1] << ")." << endl; #endif for(i=chunkPosition[0]; i<chunkPosition[0] + nChunkSize; i++) { if(i == n + nStart) { #ifdef DEBUG log << "Reaching row " << i << ", exiting." << endl; #endif break; } for(j=chunkPosition[1], m=1; j<=chunkPosition[1]+d; j++, m++) { if(j == d + 1 + dStart) { #ifdef DEBUG log << "Reaching column " << j << ", exiting." << endl; #endif break; } value = chunkIter->getItem().getDouble(); tmp.L[m] = value; tmp.Q[m] = value * value; ++(*chunkIter); } double Y = tmp.L[idY]; it = nlq.find(Y); if (it == nlq.end()) { #ifdef DEBUG log << "Cannot find NLQ entry for class " << Y << ", creating new." << endl; #endif nlq[Y].N = 1; nlq[Y].groupId = Y; } else { nlq[Y].N++; } for (k=1, l=1; k<=d+1; k++) { if (k == idY) { continue; } nlq[Y].L[l] += tmp.L[k]; nlq[Y].Q[l] += tmp.Q[k]; l++; } nlq[Y].L[d+1] += tmp.L[idY]; nlq[Y].Q[d+1] += tmp.Q[idY]; } ++(*inputArrayIter); } /** * The "logical" instance ID of the instance responsible for coordination of query. * COORDINATOR_INSTANCE if instance execute this query itself. */ size_t localClassCount = nlq.size(); #ifdef DEBUG log << "localClassCount = " << localClassCount << endl; #endif if(query->getInstancesCount() > 1) { if(query->getInstanceID() != 0) { // I am not the coordinator, I should send my NLQ out. #ifdef DEBUG log << "I am not the coordinator, I should send my NLQ out." << endl; #endif shared_ptr <SharedBuffer> buf ( new MemoryBuffer(NULL, sizeof(struct NLQ) * localClassCount )); struct NLQ *NLQbuf = static_cast<struct NLQ*> (buf->getData()); for(it = nlq.begin(); it != nlq.end(); it++) { *NLQbuf = it->second; ++NLQbuf; } BufSend(0, buf, query); #ifdef DEBUG log << "Exiting." << endl; #endif return outputArray; } else { // I am the coordinator, I should collect NLQ from workers. #ifdef DEBUG log << "I am the coordinator, I should collect NLQ from workers." << endl; #endif for(InstanceID l = 1; l<query->getInstancesCount(); ++l) { shared_ptr<SharedBuffer> buf = BufReceive(l, query); if(! buf) { #ifdef DEBUG log << "Nothing from instance " << l << ", continue." << endl; #endif continue; } int64_t remoteClassCount = buf->getSize() / sizeof(struct NLQ); struct NLQ* NLQbuf = static_cast<struct NLQ*> (buf->getData()); #ifdef DEBUG log << "Received " << remoteClassCount << " entries from instance " << l << endl; #endif for(i=0; i<remoteClassCount; ++i) { it = nlq.find(NLQbuf->groupId); if( it == nlq.end() ) { #ifdef DEBUG log << "Cannot find NLQ entry for class " << NLQbuf->groupId << ", creating new." << endl; #endif nlq[NLQbuf->groupId] = *NLQbuf; } else { it->second.N += NLQbuf->N; for(j=1; j<=d+1; ++j) { it->second.L[j] += NLQbuf->L[j]; it->second.Q[j] += NLQbuf->Q[j]; } } ++NLQbuf; } #ifdef DEBUG log << "Merge complete." << endl; #endif } }// end if getInstanceID() != 0 }//end if InstancesCount() > 1 return writeGamma(query); }
shared_ptr<Array> execute(vector< shared_ptr< Array>>& inputArrays, shared_ptr< Query> query) { shared_ptr< Array> outputArray(new MemArray(_schema, query)); shared_ptr< ArrayIterator> outputArrayIter = outputArray->getIterator(0); shared_ptr< ChunkIterator> outputChunkIter; string::iterator iter; Value oneValue; Coordinates position(2, 1); ifstream fin; ofstream log; string inputLine; string fname = ((shared_ptr< OperatorParamPhysicalExpression>&)_parameters[0])->getExpression()->evaluate().getString(); int64_t n = ((shared_ptr< OperatorParamPhysicalExpression>&)_parameters[1])->getExpression()->evaluate().getInt64(); int64_t d = ((shared_ptr< OperatorParamPhysicalExpression>&)_parameters[2])->getExpression()->evaluate().getInt64(); int64_t indexStart = 1; if (_parameters.size() == 4) { indexStart = ((shared_ptr< OperatorParamPhysicalExpression>&)_parameters[3])->getExpression()->evaluate().getInt64(); } int64_t chunkSize = n < CHUNK_SIZE ? n : CHUNK_SIZE; double *buf = new double[d]; int64_t totalChunks = (n-1) / chunkSize + 1; int64_t chunksPerInstance = (totalChunks-1) / query->getInstancesCount() + 1; int64_t myStartChunkId = query->getInstanceID() * chunksPerInstance; int64_t myEndChunkId = myStartChunkId + chunksPerInstance - 1; int64_t currChunkId, currRowId, currColId; Value valueToWrite; fin.open(fname.c_str(), ios::in | ios::binary); if (query->getInstanceID() == query->getInstancesCount() - 1) { myEndChunkId += totalChunks - myEndChunkId - 1; } #ifdef DEBUG stringstream ss; ss << getenv("HOME") << "/load2d-instance-" << query->getInstanceID() << ".log"; log.open(ss.str().c_str(), ios::out); log << "File name is " << fname << endl; log << "n = " << n << endl << "d = " << d << endl; log << "indexStart = " << indexStart << endl; log << "totalChunks = " << totalChunks << endl; log << "chunksPerInstance = " << chunksPerInstance << endl; log << "I am instance " << query->getInstanceID() << ", there are " << query->getInstancesCount() << " instances." << endl; log << "myStartChunkId = " << myStartChunkId << endl; log << "myEndChunkId = " << myEndChunkId << endl; #endif if (myStartChunkId >= totalChunks) { #ifdef DEBUG log << "Nothing to be done on this instance, return." << endl; fin.close(); log.close(); #endif delete[] buf; return outputArray; } fin.seekg(0, ios::end); int64_t flen = fin.tellg(); int64_t offset = myStartChunkId * chunkSize * d * sizeof(double); offset = offset % flen; int64_t readLen = d * sizeof(double); fin.seekg(offset, ios::beg); #ifdef DEBUG log << "Seek to " << offset << endl; bool rollback = false; #endif for (currChunkId=myStartChunkId; currChunkId<=myEndChunkId; currChunkId++) { position[0] = currChunkId * chunkSize + indexStart; position[1] = indexStart; outputChunkIter = outputArrayIter->newChunk(position).getIterator(query, ChunkIterator::SEQUENTIAL_WRITE); #ifdef DEBUG log << "Create chunk at (" << position[0] << ", " << position[1] << ")" << endl; #endif for (currRowId=0; currRowId<chunkSize && position[0] < n+indexStart; currRowId++, position[0]++) { position[1] = indexStart; fin.read((char*)buf, readLen); if (fin.gcount() != readLen) { #ifdef DEBUG log << "Rolling back to the beginning of the file" << endl; rollback = true; #endif currRowId--; position[0]--; fin.clear(); fin.seekg(0, ios::beg); continue; } #ifdef DEBUG if (rollback) { log << "Read data after rolling back to the beginning successfully" << endl; } #endif for (currColId=0; currColId<d; currColId++, position[1]++) { #ifdef DEBUG if (rollback) { log << "Write " << buf[currColId] << " to (" << position[0] << ", " << position[1] << ")" << endl; } #endif outputChunkIter->setPosition(position); valueToWrite.setDouble(buf[currColId]); outputChunkIter->writeItem(valueToWrite); } #ifdef DEBUG if (rollback) { rollback = false; } #endif } outputChunkIter->flush(); } #ifdef DEBUG log << "Cleanning up..." << endl; log.close(); #endif fin.close(); delete[] buf; return outputArray; }