void test_phase4(void){ struct pptoken *token; struct list *source; struct list *p1; struct list *p2; struct list *p3; struct list *p4; struct hash_table *macros; //source = sourceCharsFromFile("tests/trigraphs.c"); //source = sourceCharsFromFile("tests/doubledefine.c"); //source = sourceCharsFromFile("tests/recursivedefine.c"); source = sourceCharsFromFile("tests/functionmacro.c"); p1 = phase1(source); p2 = phase2(p1); p3 = phase3(p2); macros = newHashTable(); assert(macros != NULL); p4 = phase4(macros, p3); while(listDequeue(p4, (void**)&token) == 0){ fputs(token->whiteSpace, stdout); if(token->name != PPTN_EOF){ putchar('{'); printf("\033[35m"); fputs(token->lexeme, stdout); printf("\033[39m"); putchar('}'); } else { break; } } }
Foam::tmp<Foam::volScalarField> Foam::phasePair::E() const { FatalErrorIn("Foam::phasePair::E() const") << "Requested aspect ratio of the dispersed phase in an unordered pair" << exit(FatalError); return phase1(); }
int main(void) { if (phase1() == 0) { printf("Phase 1: End of Phase 1 for File Server 2.\n"); } phase3(); exit(0); }
Foam::tmp<Foam::volScalarField> Foam::phasePair::sigma() const { return phase1().mesh().lookupObject<surfaceTensionModel> ( IOobject::groupName ( surfaceTensionModel::typeName, phasePair::name() ) ).sigma(); }
void test_phase1(void){ struct list *source; struct list *p1; source = sourceCharsFromFile("tests/trigraphs.c"); p1 = phase1(source); struct source_char *sc; while(listDequeue(p1, (void**)&sc) == 0){ if(sc->c > 0){ putchar(sc->c); } } }
void MainWindow::connectToHost() { client = new AnonymousClient(this); connect(client, SIGNAL(sslErrors(QList<QSslError>)), SLOT(sslErrors(QList<QSslError>))); connect(client, SIGNAL(messageAvailable(QString)), SLOT(displayMessage(QString))); connect(client, SIGNAL(debugAvailable(QString)), debugConsole, SLOT(displayDebug(QString))); connect(client, SIGNAL(setPhase0()), SLOT(phase0())); connect(client, SIGNAL(setPhase1()), SLOT(phase1())); connect(client, SIGNAL(disconnected()), SLOT(disconnectFromhost())); connectButton->setEnabled(false); disconnectButton->setEnabled(true); displayMessage(tr("Connecting...")); client->connectToHostEncrypted(settings->value("HostAddress", "localhost").toString(), settings->value("port", 9001).toInt()); }
int main(void) { /*using shared mapping*/ av = (AVAILABILITIES *)mmap(NULL, sizeof *av, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); /*using shared mapping*/ m = (pthread_mutex_t *)mmap(NULL, sizeof *m, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); pthread_mutex_t m1 = PTHREAD_MUTEX_INITIALIZER; m = &m1; int retVal = 0; if(( retVal = phase1()) <= 0) return retVal; /*while(waitpid(-1, NULL, WNOHANG) > 0);*/ munmap((void *)av, sizeof(*av)); munmap((void *)m, sizeof(*m)); return 0; }
Foam::tmp<Foam::volScalarField> Foam::phasePair::rho() const { return phase1()*phase1().rho() + phase2()*phase2().rho(); }
Foam::word Foam::phasePair::name() const { word name2(phase2().name()); name2[0] = toupper(name2[0]); return phase1().name() + "And" + name2; }
int main(unsigned long long speid, addr64 argp, addr64 envp) { unsigned long long dummy; int l ; int p0, p1 ; int i1, i2, i3 ; int j1, j2, j3 ; dummy = envp.ull ; dummy = speid ; // get arguments mfc_get((void*)&args, argp.ull, 128, 31, 0, 0); waitfor_matrix_io ( 31 ); // printf("SPE(%lld): Data received is: %d %d %d %d\n", speid, (int)args.inA // , (int)args.out, (int)args.i_initial, (int)args.i_final ); // printf("SPE(%lld): Data received is: %x %x %d %d\n", speid, (int)args.inA // , (int)args.out, (int)args.i_initial, (int)args.i_final ); // printf("SPE(%lld): size= %d \n", speid, (int)args.out-(int)args.inA ); // fflush(stdout); if ( args.sortType == 0 ) { for( l=args.i_initial; l<args.i_final; l++ ) { getlarge( (void*)&darrayA0, (unsigned long)(args.inA)+(l*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); array0 = &darrayC0 ; array1 = &darrayA0 ; # ifdef NEVER { int j, k, k2 ; for(k=2; k<=bsize; k<<=1 ) { k2 = k/2 ; array2 = array0 ; array0 = array1 ; array1 = array2 ; for(j=0; j<bsize; j+=k ) { j1=j; j2=j+k2; j3=j; while ( j1<j+k2 && j2<j+k ) { if ( (*array0)[j1].key > (*array0)[j2].key ) { (*array1)[j3] = (*array0)[j1]; j3++; j1++; } else { (*array1)[j3] = (*array0)[j2]; j3++; j2++; } } while ( j1<j+k2 ) { (*array1)[j3] = (*array0)[j1]; j3++; j1++; } while ( j2<j+k ) { (*array1)[j3] = (*array0)[j2]; j3++; j2++; } } } } # else # ifdef NEVER array1 = phase1C(array0,array1,bsize); # else array1 = phase1(array0,array1); # endif # endif putlarge( (void*)&((*array1)[0]), (unsigned long)(args.out)+(l*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); } } else if ( args.sortType == 1 ) { arrayA = (unsigned long)args.out; arrayB = (unsigned long)args.inA; for( p0=1; p0<args.blocks; p0<<=1 ) { arrayC=arrayA; arrayA=arrayB; arrayB=arrayC; for( p1=args.i_initial; p1<args.i_final; p1+=(p0*2) ) { i1 = p1 ; i2 = p1+p0; i3 = p1; getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); array0 = &darrayA0 ; j1=0; getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); array1 = &darrayB0 ; j2=0; waitfor_matrix_io ( 31 ); array2 = &darrayC0 ; j3=0; while ( i1<(p1+p0) && i2<(p1+2*p0) ) { # ifdef NEVER if ( (*array0)[j1].key > (*array1)[j2].key ) { (*array2)[j3] = (*array0)[j1]; j3++; j1++; } else { (*array2)[j3] = (*array1)[j2]; j3++; j2++; } # else # ifdef NEVER phase22C(array0,array1,array2,&j1,&j2,&j3,bsize); # else phase22(array0,array1,array2,&j1,&j2,&j3,bsize); # endif # endif if ( j1>=bsize ) { i1++; if ( i1<(p1+p0) ) { getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j1=0; } } if ( j2>=bsize ) { i2++; if ( i2<(p1+2*p0) ) { getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j2=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } while ( i1<(p1+p0) ) { # ifdef NEVER (*array2)[j3] = (*array0)[j1]; j3++; j1++; # else # ifdef NEVER phase21C(array0,array2,&j1,&j3,bsize); # else phase21(array0,array2,&j1,&j3,bsize); # endif # endif if ( j1>=bsize ) { i1++; if ( i1<(p1+p0) ) { getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j1=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } while ( i2<(p1+2*p0) ) { # ifdef NEVER (*array2)[j3] = (*array1)[j2]; j3++; j2++; # else # ifdef NEVER phase21C(array1,array2,&j2,&j3,bsize); # else phase21(array1,array2,&j2,&j3,bsize); # endif # endif if ( j2>=bsize ) { i2++; if ( i2<(p1+2*p0) ) { getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j2=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } }} } else if ( args.sortType == 2 ) { arrayA = (unsigned long)args.inA; arrayB = (unsigned long)args.out; p0 = args.blocks/2 ; p1 = args.i_initial ; i1 = p1; i2 = p1+p0; i3 = p1; getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); array0 = &darrayA0 ; j1=0; getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); array1 = &darrayB0 ; j2=0; waitfor_matrix_io ( 31 ); array2 = &darrayC0 ; j3=0; while ( i1<(p1+p0) && i2<(p1+2*p0) ) { # ifdef NEVER if ( (*array0)[j1].key > (*array1)[j2].key ) { (*array2)[j3] = (*array0)[j1]; j3++; j1++; } else { (*array2)[j3] = (*array1)[j2]; j3++; j2++; } # else # ifdef NEVER phase22C(array0,array1,array2,&j1,&j2,&j3,bsize); # else phase22(array0,array1,array2,&j1,&j2,&j3,bsize); # endif # endif if ( j1>=bsize ) { i1++; if ( i1<(p1+p0) ) { getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j1=0; } } if ( j2>=bsize ) { i2++; if ( i2<(p1+2*p0) ) { getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j2=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } while ( i1<(p1+p0) ) { # ifdef NEVER (*array2)[j3] = (*array0)[j1]; j3++; j1++; # else # ifdef NEVER phase21C(array0,array2,&j1,&j3,bsize); # else phase21(array0,array2,&j1,&j3,bsize); # endif # endif if ( j1>=bsize ) { i1++; if ( i1<(p1+p0) ) { getlarge( (void*)&darrayA0, (unsigned long)(arrayA)+(i1*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j1=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } while ( i2<(p1+2*p0) ) { # ifdef NEVER (*array2)[j3] = (*array1)[j2]; j3++; j2++; # else # ifdef NEVER phase21C(array1,array2,&j2,&j3,bsize); # else phase21(array1,array2,&j2,&j3,bsize); # endif # endif if ( j2>=bsize ) { i2++; if ( i2<(p1+2*p0) ) { getlarge( (void*)&darrayB0, (unsigned long)(arrayA)+(i2*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j2=0; } } if ( j3>=bsize ) { if ( i3<(p1+2*p0) ) { putlarge( (void*)&darrayC0, (unsigned long)(arrayB)+(i3*bsize*sizeof(record)), bsize*sizeof(record), 31 ); waitfor_matrix_io ( 31 ); j3=0; } i3++; } } } return 0; }
int solve_fr(mpfr_t *result, int n0, int m0, mpfr_t **a0, int *ineq0, mpfr_t *c0) { int i,j; m = m0; // number of inequations n = n0+1; // number of variables init(n, m); mpfr_t csum; mpfr_zinit(csum); for(j=0;j<n0+1;j++) { mpfr_set(c[j], c0[j], GMP_RNDN); } for(j=1;j<n0+1;j++) { mpfr_add(csum, csum, c0[j], GMP_RNDN); } mpfr_set(c[n], csum, GMP_RNDN); mpfr_neg(c[n], c[n], GMP_RNDN); for(i=0;i<m;i++) { mpfr_set_d(csum, 0, GMP_RNDN); for(j=0;j<n0+1;j++) mpfr_set(a[i+1][j], a0[i][j], GMP_RNDN); mpfr_neg(a[i+1][0], a[i+1][0], GMP_RNDN); for(j=1;j<n0+1;j++) { mpfr_add(csum, csum, a0[i][j], GMP_RNDN); } mpfr_set(a[i+1][n], csum, GMP_RNDN); mpfr_neg(a[i+1][n], a[i+1][n], GMP_RNDN); inequality[i+1] = ineq0[i]; if (mpfr_cmp_d(a[i+1][0], 0) < 0) { if (inequality[i+1] == GEQ) inequality[i+1] = LEQ; else if (inequality[i+1] == LEQ) inequality[i+1] = GEQ; for (j = 0; j <= n; j++) mpfr_neg(a[i+1][j], a[i+1][j], GMP_RNDN); } else if (mpfr_cmp_d(a[i+1][0], 0) == 0 && inequality[i+1] == GEQ) { inequality[i+1] = LEQ; for (j = 1; j <= n; j++) mpfr_neg(a[i+1][j], a[i+1][j], GMP_RNDN); } } int p1r = 1; prepare(); if (n3 != n2) p1r = phase1(); if (!p1r) { dispose(); return NOT_FEASIBLE; } int b = phase2(); mpfr_t *s = calloc(sizeof(mpfr_t), n); for(j=0;j<n;j++) { mpfr_zinit(s[j]); } for (j = 1; j < n; j++) { if ((i = row[j]) != 0) { tableau(s[j], i, 0); } } mpfr_t cs; mpfr_zinit(cs); if (row[n] != 0) tableau(cs, row[n], 0); for (j = 1; j < n; j++) { mpfr_sub(s[j], s[j], cs, GMP_RNDN); } for(j=0;j<n;j++) { mpfr_set(result[j], s[j], GMP_RNDN); } mpfr_clear(cs); for(j=0;j<n;j++) mpfr_clear(s[j]); free(s); dispose(); return b ? OK : MAXIMIZABLE_TO_INFINITY; }
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) { // this is a big job, so might as well make things tidy before we start just to be nice. getDur().commitIfNeeded(); list<DiskLoc> extents; for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext ) extents.push_back(L); log() << "compact " << extents.size() << " extents" << endl; ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) ); // same data, but might perform a little different after compact? NamespaceDetailsTransient::get(ns).clearQueryCache(); int nidx = d->nIndexes; scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] ); scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] ); { NamespaceDetails::IndexIterator ii = d->ii(); // For each existing index... for( int idxNo = 0; ii.more(); ++idxNo ) { // Build a new index spec based on the old index spec. BSONObjBuilder b; BSONObj::iterator i(ii.next().info.obj()); while( i.more() ) { BSONElement e = i.next(); if ( str::equals( e.fieldName(), "v" ) ) { // Drop any preexisting index version spec. The default index version will // be used instead for the new index. continue; } if ( str::equals( e.fieldName(), "background" ) ) { // Create the new index in the foreground. continue; } // Pass the element through to the new index spec. b.append(e); } // Add the new index spec to 'indexSpecs'. BSONObj o = b.obj().getOwned(); indexSpecs[idxNo].reset(o); // Create an external sorter. phase1[idxNo].sorter.reset ( new BSONObjExternalSorter // Use the default index interface, since the new index will be created // with the default index version. ( IndexInterface::defaultVersion(), o.getObjectField("key") ) ); phase1[idxNo].sorter->hintNumObjects( d->stats.nrecords ); } } log() << "compact orphan deleted lists" << endl; for( int i = 0; i < Buckets; i++ ) { d->deletedList[i].writing().Null(); } // Start over from scratch with our extent sizing and growth d->lastExtentSize=0; // before dropping indexes, at least make sure we can allocate one extent! uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull()); // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here log() << "compact dropping indexes" << endl; BSONObjBuilder b; if( !dropIndexes(d, ns, "*", errmsg, b, true) ) { errmsg = "compact drop indexes failed"; log() << errmsg << endl; return false; } getDur().commitIfNeeded(); long long skipped = 0; int n = 0; // reset data size and record counts to 0 for this namespace // as we're about to tally them up again for each new extent { NamespaceDetails::Stats *s = getDur().writing(&d->stats); s->datasize = 0; s->nrecords = 0; } for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb); pm.hit(); } if( skipped ) { result.append("invalidObjects", skipped); } verify( d->firstExtent.ext()->xprev.isNull() ); // indexes will do their own progress meter? pm.finished(); // build indexes NamespaceString s(ns); string si = s.db + ".system.indexes"; for( int i = 0; i < nidx; i++ ) { killCurrentOp.checkForInterrupt(false); BSONObj info = indexSpecs[i].info; log() << "compact create index " << info["key"].Obj().toString() << endl; try { precalced = &phase1[i]; theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize()); } catch(...) { precalced = 0; throw; } precalced = 0; } return true; }
int SubdivideExtrudedMesh(GModel *m) { // get all non-recombined extruded regions and vertices; also, // create a vector of quadToTri regions that have NOT been meshed // yet std::vector<GRegion*> regions, regions_quadToTri; MVertexRTree pos(CTX::instance()->geom.tolerance * CTX::instance()->lc); for(GModel::riter it = m->firstRegion(); it != m->lastRegion(); it++){ ExtrudeParams *ep = (*it)->meshAttributes.extrude; if(ep && ep->mesh.ExtrudeMesh && ep->geo.Mode == EXTRUDED_ENTITY && !ep->mesh.Recombine){ regions.push_back(*it); insertAllVertices(*it, pos); } // create vector of valid quadToTri regions...not all will necessarily be meshed here. if(ep && ep->mesh.ExtrudeMesh && ep->geo.Mode == EXTRUDED_ENTITY && ep->mesh.Recombine && ep->mesh.QuadToTri){ regions_quadToTri.push_back(*it); } } if(regions.empty()) return 0; Msg::Info("Subdividing extruded mesh"); // create edges on lateral sides of "prisms" std::set<std::pair<MVertex*, MVertex*> > edges; for(unsigned int i = 0; i < regions.size(); i++) phase1(regions[i], pos, edges); // swap lateral edges to make them "tet-compatible" int j = 0, swap; std::set<std::pair<MVertex*, MVertex*> > edges_swap; do { swap = 0; for(unsigned int i = 0; i < regions.size(); i++) phase2(regions[i], pos, edges, edges_swap, swap); Msg::Info("Swapping %d", swap); if(j && j == swap) { Msg::Error("Unable to subdivide extruded mesh: change surface mesh or"); Msg::Error("recombine extrusion instead"); return -1; } j = swap; } while(swap); // delete volume elements and create tetrahedra instead for(unsigned int i = 0; i < regions.size(); i++){ GRegion *gr = regions[i]; for(unsigned int i = 0; i < gr->tetrahedra.size(); i++) delete gr->tetrahedra[i]; gr->tetrahedra.clear(); for(unsigned int i = 0; i < gr->hexahedra.size(); i++) delete gr->hexahedra[i]; gr->hexahedra.clear(); for(unsigned int i = 0; i < gr->prisms.size(); i++) delete gr->prisms[i]; gr->prisms.clear(); for(unsigned int i = 0; i < gr->pyramids.size(); i++) delete gr->pyramids[i]; gr->pyramids.clear(); phase3(gr, pos, edges); // re-Extrude bounding surfaces using edges as constraint std::list<GFace*> faces = gr->faces(); for(std::list<GFace*>::iterator it = faces.begin(); it != faces.end(); it++){ ExtrudeParams *ep = (*it)->meshAttributes.extrude; if(ep && ep->mesh.ExtrudeMesh && ep->geo.Mode == EXTRUDED_ENTITY && !ep->mesh.Recombine){ GFace *gf = *it; Msg::Info("Remeshing surface %d", gf->tag()); for(unsigned int i = 0; i < gf->triangles.size(); i++) delete gf->triangles[i]; gf->triangles.clear(); for(unsigned int i = 0; i < gf->quadrangles.size(); i++) delete gf->quadrangles[i]; gf->quadrangles.clear(); MeshExtrudedSurface(gf, &edges); } } } // now mesh the QuadToTri regions. Everything can be done locally // for each quadToTri region, but still use edge set from above just // to make sure laterals get remeshed properly ( // QuadToTriEdgeGenerator detects if the neighbor has been meshed or // if a lateral surface should remain static for any other reason). // If this function detects allNonGlobalSharedLaterals, it won't // mesh the region (should already be done in ExtrudeMesh). for(unsigned int i = 0; i < regions_quadToTri.size(); i++){ GRegion *gr = regions_quadToTri[i]; MVertexRTree pos_local(CTX::instance()->geom.tolerance * CTX::instance()->lc); insertAllVertices(gr, pos_local); meshQuadToTriRegionAfterGlobalSubdivide(gr, &edges, pos_local); } // carve holes if any // TODO: update extrusion information for(unsigned int i = 0; i < regions.size(); i++){ GRegion *gr = regions[i]; ExtrudeParams *ep = gr->meshAttributes.extrude; if(ep->mesh.Holes.size()){ std::map<int, std::pair<double, std::vector<int> > >::iterator it; for(it = ep->mesh.Holes.begin(); it != ep->mesh.Holes.end(); it++) carveHole(gr, it->first, it->second.first, it->second.second); } } for(unsigned int i = 0; i < regions_quadToTri.size(); i++){ GRegion *gr = regions_quadToTri[i]; ExtrudeParams *ep = gr->meshAttributes.extrude; if(ep->mesh.Holes.size()){ std::map<int, std::pair<double, std::vector<int> > >::iterator it; for(it = ep->mesh.Holes.begin(); it != ep->mesh.Holes.end(); it++) carveHole(gr, it->first, it->second.first, it->second.second); } } return 1; }
int main(int argc,char **argv) { #ifdef HAVE_MCHECK mtrace(); #endif /* setup */ feature_recorder::set_main_threadid(); const char *progname = argv[0]; word_and_context_list alert_list; /* shold be flagged */ word_and_context_list stop_list; /* should be ignored */ scanner_info::scanner_config s_config; // the bulk extractor phase 1 config created from the command line BulkExtractor_Phase1::Config cfg; cfg.num_threads = threadpool::numCPU(); /* Options */ const char *opt_path = 0; int opt_recurse = 0; int opt_zap = 0; int opt_h = 0; int opt_H = 0; std::string opt_sampling_params; std::string opt_outdir; bool opt_write_feature_files = true; bool opt_write_sqlite3 = false; bool opt_enable_histograms=true; /* Startup */ setvbuf(stdout,0,_IONBF,0); // don't buffer stdout std::string command_line = dfxml_writer::make_command_line(argc,argv); std::vector<std::string> scanner_dirs; // where to look for scanners /* Add the default plugin_path */ add_if_present(scanner_dirs,"/usr/local/lib/bulk_extractor"); add_if_present(scanner_dirs,"/usr/lib/bulk_extractor"); add_if_present(scanner_dirs,"."); if (getenv("BE_PATH")) { std::vector<std::string> dirs = split(getenv("BE_PATH"),':'); for(std::vector<std::string>::const_iterator it = dirs.begin(); it!=dirs.end(); it++){ add_if_present(scanner_dirs,*it); } } #ifdef WIN32 setmode(1,O_BINARY); // make stdout binary threadpool::win32_init(); #endif /* look for usage first */ if(argc==1) opt_h=1; /* Process options */ int ch; while ((ch = getopt(argc, argv, "A:B:b:C:d:E:e:F:f:G:g:Hhij:M:m:o:P:p:q:Rr:S:s:VW:w:x:Y:z:Z")) != -1) { switch (ch) { case 'A': feature_recorder::offset_add = stoi64(optarg);break; case 'b': feature_recorder::banner_file = optarg; break; case 'C': feature_recorder::context_window_default = atoi(optarg);break; case 'd': { if(strcmp(optarg,"h")==0) debug_help(); int d = atoi(optarg); switch(d){ case DEBUG_ALLOCATE_512MiB: if(calloc(1024*1024*512,1)){ std::cerr << "-d1002 -- Allocating 512MB of RAM; may be repeated\n"; } else { std::cerr << "-d1002 -- CANNOT ALLOCATE MORE RAM\n"; } break; default: cfg.debug = d; break; } be13::plugin::set_scanner_debug(cfg.debug); } break; case 'E': be13::plugin::scanners_disable_all(); be13::plugin::scanners_enable(optarg); break; case 'e': be13::plugin::scanners_enable(optarg); break; case 'F': FindOpts::get().Files.push_back(optarg); break; case 'f': FindOpts::get().Patterns.push_back(optarg); break; case 'G': cfg.opt_pagesize = scaled_stoi64(optarg); break; case 'g': cfg.opt_marginsize = scaled_stoi64(optarg); break; case 'i': std::cout << "info mode:\n"; cfg.opt_info = true; break; case 'j': cfg.num_threads = atoi(optarg); break; case 'M': scanner_def::max_depth = atoi(optarg); break; case 'm': cfg.max_bad_alloc_errors = atoi(optarg); break; case 'o': opt_outdir = optarg;break; case 'P': scanner_dirs.push_back(optarg);break; case 'p': opt_path = optarg; break; case 'q': if(atoi(optarg)==-1) cfg.opt_quiet = 1;// -q -1 turns off notifications else cfg.opt_notify_rate = atoi(optarg); break; case 'r': if(alert_list.readfile(optarg)){ err(1,"Cannot read alert list %s",optarg); } break; case 'R': opt_recurse = 1; break; case 'S': { std::vector<std::string> params = split(optarg,'='); if(params.size()!=2){ std::cerr << "Invalid paramter: " << optarg << "\n"; exit(1); } s_config.namevals[params[0]] = params[1]; continue; } case 's': #if defined(HAVE_SRANDOM) && !defined(HAVE_SRANDOMDEV) srandom(time(0)); #endif #if defined(HAVE_SRANDOMDEV) srandomdev(); // if we are sampling initialize #endif opt_sampling_params = optarg; break; case 'V': std::cout << "bulk_extractor " << PACKAGE_VERSION << "\n"; exit (1); case 'W': fprintf(stderr,"-W has been deprecated. Specify with -S word_min=NN and -S word_max=NN\n"); exit(1); break; case 'w': if(stop_list.readfile(optarg)){ err(1,"Cannot read stop list %s",optarg); } break; case 'x': be13::plugin::scanners_disable(optarg); break; case 'Y': { std::string optargs = optarg; size_t dash = optargs.find('-'); if(dash==std::string::npos){ cfg.opt_offset_start = stoi64(optargs); } else { cfg.opt_offset_start = scaled_stoi64(optargs.substr(0,dash)); cfg.opt_offset_end = scaled_stoi64(optargs.substr(dash+1)); } break; } case 'z': cfg.opt_page_start = stoi64(optarg);break; case 'Z': opt_zap=true;break; case 'H': opt_H++;continue; case 'h': opt_h++;continue; } } cfg.validate(); argc -= optind; argv += optind; if(cfg.debug & DEBUG_PRINT_STEPS) std::cerr << "DEBUG: DEBUG_PRINT_STEPS\n"; if(cfg.debug & DEBUG_PEDANTIC) validateOrEscapeUTF8_validate = true; /* Create a configuration that will be used to initialize the scanners */ scanner_info si; s_config.debug = cfg.debug; si.config = &s_config; /* Make individual configuration options appear on the command line interface. */ si.get_config("work_start_work_end",&worker::opt_work_start_work_end, "Record work start and end of each scanner in report.xml file"); si.get_config("enable_histograms",&opt_enable_histograms, "Disable generation of histograms"); si.get_config("debug_histogram_malloc_fail_frequency",&HistogramMaker::debug_histogram_malloc_fail_frequency, "Set >0 to make histogram maker fail with memory allocations"); si.get_config("hash_alg",&be_hash_name,"Specifies hash algorithm to be used for all hash calculations"); si.get_config("dup_data_alerts",&be13::plugin::dup_data_alerts,"Notify when duplicate data is not processed"); si.get_config("write_feature_files",&opt_write_feature_files,"Write features to flat files"); si.get_config("write_feature_sqlite3",&opt_write_sqlite3,"Write feature files to report.sqlite3"); /* Make sure that the user selected a valid hash */ { uint8_t buf[1]; be_hash_func(buf,0); } /* Load all the scanners and enable the ones we care about */ be13::plugin::load_scanner_directories(scanner_dirs,s_config); be13::plugin::load_scanners(scanners_builtin,s_config); be13::plugin::scanners_process_enable_disable_commands(); /* Print usage if necessary */ if(opt_H){ be13::plugin::info_scanners(true,true,scanners_builtin,'e','x'); exit(0);} if(opt_h){ usage(progname);be13::plugin::info_scanners(false,true,scanners_builtin,'e','x'); exit(0);} /* Give an error if a find list was specified * but no scanner that uses the find list is enabled. */ if(!FindOpts::get().empty()) { /* Look through the enabled scanners and make sure that * at least one of them is a FIND scanner */ if(!be13::plugin::find_scanner_enabled()){ errx(1,"find words are specified with -F but no find scanner is enabled.\n"); } } if(opt_path){ if(argc!=1) errx(1,"-p requires a single argument."); process_path(argv[0],opt_path,cfg.opt_pagesize,cfg.opt_marginsize); exit(0); } if(opt_outdir.size()==0) errx(1,"error: -o outdir must be specified"); /* The zap option wipes the contents of a directory, useful for debugging */ if(opt_zap){ DIR *dirp = opendir(opt_outdir.c_str()); if(dirp){ struct dirent *dp; while ((dp = readdir(dirp)) != NULL){ std::string name = dp->d_name; if(name=="." || name=="..") continue; std::string fname = opt_outdir + std::string("/") + name; unlink(fname.c_str()); std::cout << "erasing " << fname << "\n"; } } if(rmdir(opt_outdir.c_str())){ std::cout << "rmdir " << opt_outdir << "\n"; } } /* Start the clock */ aftimer timer; timer.start(); /* If output directory does not exist, we are not restarting! */ std::string reportfilename = opt_outdir + "/report.xml"; BulkExtractor_Phase1::seen_page_ids_t seen_page_ids; // pages that do not need re-processing image_process *p = 0; // the image process iterator /* Get image or directory */ if (*argv == NULL) { if (opt_recurse) { fprintf(stderr,"filedir not provided\n"); } else { fprintf(stderr,"imagefile not provided\n"); } exit(1); } std::string image_fname = *argv; if(opt_outdir.size()==0){ fprintf(stderr,"output directory not provided\n"); exit(1); } if(directory_missing(opt_outdir) || directory_empty(opt_outdir)){ /* First time running */ /* Validate the args */ if ( argc !=1 ) errx(1,"Disk image option not provided. Run with -h for help."); validate_fn(image_fname); if (directory_missing(opt_outdir)) be_mkdir(opt_outdir); } else { /* Restarting */ std::cout << "Restarting from " << opt_outdir << "\n"; bulk_extractor_restarter r(opt_outdir,reportfilename,image_fname,seen_page_ids); /* Rename the old report and create a new one */ std::string old_reportfilename = reportfilename + "." + itos(time(0)); if(rename(reportfilename.c_str(),old_reportfilename.c_str())){ std::cerr << "Could not rename " << reportfilename << " to " << old_reportfilename << ": " << strerror(errno) << "\n"; exit(1); } } /* Open the image file (or the device) now */ p = image_process::open(image_fname,opt_recurse,cfg.opt_pagesize,cfg.opt_marginsize); if(!p) err(1,"Cannot open %s: ",image_fname.c_str()); /*** *** Create the feature recording set. *** Initialize the scanners. ****/ /* Determine the feature files that will be used */ feature_file_names_t feature_file_names; be13::plugin::get_scanner_feature_file_names(feature_file_names); uint32_t flags = 0; if (stop_list.size()>0) flags |= feature_recorder_set::CREATE_STOP_LIST_RECORDERS; if (opt_write_sqlite3) flags |= feature_recorder_set::ENABLE_SQLITE3_RECORDERS; if (!opt_write_feature_files) flags |= feature_recorder_set::DISABLE_FILE_RECORDERS; { feature_recorder_set fs(flags,be_hash,image_fname,opt_outdir); fs.init(feature_file_names); if(opt_enable_histograms) be13::plugin::add_enabled_scanner_histograms_to_feature_recorder_set(fs); be13::plugin::scanners_init(fs); fs.set_stop_list(&stop_list); fs.set_alert_list(&alert_list); /* Look for commands that impact per-recorders */ for(scanner_info::config_t::const_iterator it=s_config.namevals.begin();it!=s_config.namevals.end();it++){ /* see if there is a <recorder>: */ std::vector<std::string> params = split(it->first,':'); if(params.size()>=3 && params.at(0)=="fr"){ feature_recorder *fr = fs.get_name(params.at(1)); const std::string &cmd = params.at(2); if(fr){ if(cmd=="window") fr->set_context_window(stoi64(it->second)); if(cmd=="window_before") fr->set_context_window_before(stoi64(it->second)); if(cmd=="window_after") fr->set_context_window_after(stoi64(it->second)); } } /* See if there is a scanner? */ } /* Store the configuration in the XML file */ dfxml_writer *xreport = new dfxml_writer(reportfilename,false); dfxml_create(*xreport,command_line,cfg); xreport->xmlout("provided_filename",image_fname); // save this information /* provide documentation to the user; the DFXML information comes from elsewhere */ if(!cfg.opt_quiet){ std::cout << "bulk_extractor version: " << PACKAGE_VERSION << "\n"; #ifdef HAVE_GETHOSTNAME char hostname[1024]; gethostname(hostname,sizeof(hostname)); std::cout << "Hostname: " << hostname << "\n"; #endif std::cout << "Input file: " << image_fname << "\n"; std::cout << "Output directory: " << opt_outdir << "\n"; std::cout << "Disk Size: " << p->image_size() << "\n"; std::cout << "Threads: " << cfg.num_threads << "\n"; } /**************************************************************** *** THIS IS IT! PHASE 1! ****************************************************************/ if ( fs.flag_set(feature_recorder_set::ENABLE_SQLITE3_RECORDERS )) { fs.db_transaction_begin(); } BulkExtractor_Phase1 phase1(*xreport,timer,cfg); if(cfg.debug & DEBUG_PRINT_STEPS) std::cerr << "DEBUG: STARTING PHASE 1\n"; if(opt_sampling_params.size()>0) BulkExtractor_Phase1::set_sampling_parameters(cfg,opt_sampling_params); xreport->add_timestamp("phase1 start"); phase1.run(*p,fs,seen_page_ids); if(cfg.debug & DEBUG_PRINT_STEPS) std::cerr << "DEBUG: WAITING FOR WORKERS\n"; std::string md5_string; phase1.wait_for_workers(*p,&md5_string); delete p; // not strictly needed, but why not? p = 0; if ( fs.flag_set(feature_recorder_set::ENABLE_SQLITE3_RECORDERS )) { fs.db_transaction_commit(); } xreport->add_timestamp("phase1 end"); if(md5_string.size()>0){ std::cout << "MD5 of Disk Image: " << md5_string << "\n"; } /*** PHASE 2 --- Shutdown ***/ if(cfg.opt_quiet==0) std::cout << "Phase 2. Shutting down scanners\n"; xreport->add_timestamp("phase2 start"); be13::plugin::phase_shutdown(fs); xreport->add_timestamp("phase2 end"); /*** PHASE 3 --- Create Histograms ***/ if(cfg.opt_quiet==0) std::cout << "Phase 3. Creating Histograms\n"; xreport->add_timestamp("phase3 start"); if(opt_enable_histograms) fs.dump_histograms(0,histogram_dump_callback,0); // TK - add an xml error notifier! xreport->add_timestamp("phase3 end"); /*** PHASE 4 --- report and then print final usage information ***/ xreport->push("report"); xreport->xmlout("total_bytes",phase1.total_bytes); xreport->xmlout("elapsed_seconds",timer.elapsed_seconds()); xreport->xmlout("max_depth_seen",be13::plugin::get_max_depth_seen()); xreport->xmlout("dup_data_encountered",be13::plugin::dup_data_encountered); xreport->pop(); // report xreport->flush(); xreport->push("scanner_times"); fs.get_stats(xreport,stat_callback); xreport->pop(); xreport->add_rusage(); xreport->pop(); // bulk_extractor xreport->close(); if(cfg.opt_quiet==0){ float mb_per_sec = (phase1.total_bytes / 1000000.0) / timer.elapsed_seconds(); std::cout.precision(4); printf("Elapsed time: %g sec.\n",timer.elapsed_seconds()); printf("Total MB processed: %d\n",int(phase1.total_bytes / 100000)); printf("Overall performance: %g MBytes/sec (%g MBytes/sec/thread)\n", mb_per_sec,mb_per_sec/cfg.num_threads); if (fs.has_name("email")) { feature_recorder *fr = fs.get_name("email"); if(fr){ std::cout << "Total " << fr->name << " features found: " << fr->count() << "\n"; } } } } #ifdef HAVE_MCHECK muntrace(); #endif exit(0); }
int main(int argc, char **argv) { if (argc != 4) { printf("usage: %s <server-address> <server-port> <phase[-1 - 6]>\n", argv[0]); return EXIT_FAILURE; } int start_phase = atoi(argv[3]); bool res; test_state_t *state = NULL; rvm_cfg_t *cfg; if(start_phase >= 0) { /* Try to recover from server */ cfg = initialize_rvm(argv[1], argv[2], true, create_rmem_layer); /* Recover the state (if any) */ state = (test_state_t*)rvm_get_usr_data(cfg); } else { /* Starting from scratch */ cfg = initialize_rvm(argv[1], argv[2], false, create_rmem_layer); CHECK_ERROR(cfg == NULL, ("Failed to initialize rvm\n")); state = NULL; } rvm_txid_t txid; /*==================================================================== * TX 0 - Allocate and Initialize Arrays *===================================================================*/ /* If state is NULL then we are starting from scratch or recovering from an early error */ if(state == NULL) { LOG(1,("Phase 0:\n")); TX_START; /* Allocate a "state" structure to test pointers */ state = (test_state_t*)rvm_alloc(cfg, sizeof(test_state_t)); CHECK_ERROR(state == NULL, ("FAILURE: Couldn't allocate state\n")); /* Initialize the arrays */ res = phase0(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 0 Failure\n")); if(start_phase == -1) { LOG(1, ("SUCCESS: Phase 0, simulating failure\n")); return EXIT_SUCCESS; } /* End of first txn */ state->phase = PHASE1; TX_COMMIT; } switch(state->phase) { case PHASE1: /*==================================================================== * TX 1 Increment arrays, don't mess with LL *===================================================================*/ LOG(1, ("Phase 1:\n")); TX_START; res = phase1(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 1 failed\n")); /* Simulate Failure */ if(start_phase == 0) { LOG(1, ("SUCCESS: Phase 1, simulating failure\n")); return EXIT_SUCCESS; } state->phase = PHASE2; TX_COMMIT; case PHASE2: //Fallthrough /*==================================================================== * TX 2 Free Arrays *===================================================================*/ LOG(1, ("Phase 2:\n")); TX_START; res = phase2(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 2 failed\n")); /* Simulate Failure */ if(start_phase == 1) { LOG(1, ("SUCCESS: Phase 2, simulating failure\n")); return EXIT_SUCCESS; } state->phase = PHASE3; TX_COMMIT; case PHASE3: //Fallthrough /*==================================================================== * TX 3 Fill in Linked list *===================================================================*/ LOG(1, ("Phase 3:\n")); TX_START; res = phase3(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 3 failed\n")); /* Simulate Failure */ if(start_phase == 2) { LOG(1, ("SUCCESS: Phase 3, simulating failure\n")); return EXIT_SUCCESS; } state->phase = PHASE4; TX_COMMIT; case PHASE4: /*==================================================================== * TX 4 Free Half the linked list *===================================================================*/ LOG(1, ("Phase 4:\n")); TX_START; res = phase4(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 4 failed\n")); /* Simulate Failure */ if(start_phase == 3) { LOG(1, ("SUCCESS: Phase 4, simulating failure\n")); return EXIT_SUCCESS; } state->phase = PHASE5; TX_COMMIT; case PHASE5: /*==================================================================== * TX 5 Re-allocate half of the linked list *===================================================================*/ LOG(1, ("Phase 5:\n")); TX_START; res = phase5(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 5 failed\n")); /* Simulate Failure */ if(start_phase == 4) { LOG(1, ("SUCCESS: Phase5, simulating failure\n")); return EXIT_SUCCESS; } state->phase = PHASE6; TX_COMMIT; case PHASE6: /*==================================================================== * TX 6 Free whole linked list *===================================================================*/ LOG(1, ("Phase 6:\n")); TX_START; res = phase6(cfg, state); CHECK_ERROR(!res, ("FAILURE: Phase 6 failed\n")); /* Simulate Failure */ if(start_phase == 5) { LOG(1, ("SUCCESS: Phase 6, simulating failure\n")); return EXIT_SUCCESS; } state->phase = DONE; TX_COMMIT; case DONE: res = rvm_cfg_destroy(cfg); CHECK_ERROR(!res, ("FAILURE: Failed to destroy rvm state\n")); LOG(1, ("SUCCESS: Got through all phases\n")); break; default: LOG(1, ("FAILURE: Corrupted State, tried phase %d\n", state->phase)); return EXIT_FAILURE; } return EXIT_SUCCESS; }
void gen_schedule_plan(){ #ifdef DEVELOPING numVcore = 8; numPcore = 6; #else // get from hypervisor // numVcore =; // numPcore =; #endif vcoreContainer = (VIRT_CORE**)malloc(sizeof(VIRT_CORE*)*numVcore); pcoreContainer = (PHYS_CORE**)malloc(sizeof(PHYS_CORE*)*numPcore); #ifdef DEVELOPING VIRT_CORE* temp_c; for(int i=0;i<numVcore;i++){ temp_c = (VIRT_CORE*)malloc(sizeof(VIRT_CORE)); temp_c->domain = i/4; temp_c->num = i%4; temp_c->requ = 100000*(i+1); temp_c->code = i; vcoreContainer[i] = temp_c; } PHYS_CORE* temp_p; for(int i=0;i<numPcore;i++){ temp_p = (PHYS_CORE*)malloc(sizeof(PHYS_CORE)); (i<2)?(temp_p->freq = 1200000):(temp_p->freq = 600000); (i<2)?(temp_p->type = TYPE_BIG):(temp_p->type = TYPE_LITTLE); temp_p->efficient = temp_p->type; temp_p->load = 0; temp_p->num = i; temp_p->workload = (int*)malloc(sizeof(int)*numVcore); for(int j=0;j<numVcore;j++){ temp_p->workload[j] = 0; } pcoreContainer[i] = temp_p; } #else // fetch vcpu and pcpu info #endif #ifdef DEVELOPING clock_t t; t = clock(); // phase 1 phase1(); t = clock() - t; fprintf(stderr, "phase 1: %d ticks (%.3lf seconds).\n", t, ((double)t)/CLOCKS_PER_SEC); t = clock(); // phase 2 eSlice.next = NULL; phase2(); t = clock() - t; fprintf(stderr, "phase 2: %d ticks (%.3lf seconds).\n", t, ((double)t)/CLOCKS_PER_SEC); t = clock(); // phase 3 phase3(); t = clock() - t; fprintf(stderr, "phase 3: %d ticks (%.3lf seconds).\n", t, ((double)t)/CLOCKS_PER_SEC); #else // phase 1 phase1(); // phase 2 eSlice.next = NULL; phase2(); // phase 3 phase3(); #endif // clean up ExecutionSlice* target; while(exePlan.next != NULL){ target = exePlan.next; exePlan.next = exePlan.next->next; free(target); }; for(int i=0;i<numPcore;i++){ free(pcoreContainer[i]); } free(pcoreContainer); for(int i=0;i<numVcore;i++){ free(vcoreContainer[i]); } free(vcoreContainer); }
Foam::tmp<Foam::volScalarField> Foam::phasePair::magUr() const { return mag(phase1().U() - phase2().U()); }
bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) { //int les = d->lastExtentSize; // this is a big job, so might as well make things tidy before we start just to be nice. getDur().commitNow(); list<DiskLoc> extents; for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext ) extents.push_back(L); log() << "compact " << extents.size() << " extents" << endl; ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) ); // same data, but might perform a little different after compact? NamespaceDetailsTransient::get(ns).clearQueryCache(); int nidx = d->nIndexes; scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] ); scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] ); { NamespaceDetails::IndexIterator ii = d->ii(); int x = 0; while( ii.more() ) { BSONObjBuilder b; IndexDetails& idx = ii.next(); BSONObj::iterator i(idx.info.obj()); while( i.more() ) { BSONElement e = i.next(); if( !str::equals(e.fieldName(), "v") && !str::equals(e.fieldName(), "background") ) { b.append(e); } } BSONObj o = b.obj().getOwned(); phase1[x].sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), o.getObjectField("key") ) ); phase1[x].sorter->hintNumObjects( d->stats.nrecords ); indexSpecs[x++].reset(o); } } log() << "compact orphan deleted lists" << endl; for( int i = 0; i < Buckets; i++ ) { d->deletedList[i].writing().Null(); } // before dropping indexes, at least make sure we can allocate one extent! uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull()); // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here log() << "compact dropping indexes" << endl; BSONObjBuilder b; if( !dropIndexes(d, ns, "*", errmsg, b, true) ) { errmsg = "compact drop indexes failed"; log() << errmsg << endl; return false; } getDur().commitNow(); long long skipped = 0; int n = 0; for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) { skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb); pm.hit(); } if( skipped ) { result.append("invalidObjects", skipped); } assert( d->firstExtent.ext()->xprev.isNull() ); // indexes will do their own progress meter? pm.finished(); // build indexes NamespaceString s(ns); string si = s.db + ".system.indexes"; for( int i = 0; i < nidx; i++ ) { killCurrentOp.checkForInterrupt(false); BSONObj info = indexSpecs[i].info; log() << "compact create index " << info["key"].Obj().toString() << endl; try { precalced = &phase1[i]; theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize()); } catch(...) { precalced = 0; throw; } precalced = 0; } return true; }
void psrs(int size, int rank, int nInts, int *toSort, char *fname) { char hname[256]; double start, end, total = 0, algStart; int i, *privateInts, *regularSamples, *collectedSamples, *pivots, *partitionIndices, *localPartitionSizes, *incomingPartitionSizes, **partitions, *mergedPartitions, *partitionSizes, *sortedArray; FILE *fptr = NULL; if (rank == MASTER) fptr = fopen(fname, "a"); memset (hname, '\0', sizeof(unsigned char)*256); gethostname(hname, 255); DBPRINT(("%d of %d running on pid %d %s\n", rank, size, getpid(), hname)); if (rank == MASTER) { collectedSamples = calloc(1, sizeof(int)*size*size); if (!collectedSamples) { err(1,"Failed to allocate memory for collected samples " "array for master process"); MPI_Finalize(); exit(1); } } MPI_Barrier(MPI_COMM_WORLD); if (nInts < VALIDATION_THRESHOLD) validateResults = 1; /* * "Phase 0" in which the array is split into contiguous chunks * distributed amongst the processes. */ phase0(size, rank, nInts, toSort, &privateInts); /* Phase 1 */ algStart = MPI_Wtime(); START_TIMER((start)); phase1(size, rank, nInts, toSort, privateInts, ®ularSamples); MPI_Barrier(MPI_COMM_WORLD); STOP_TIMER((1), (start), (end), (total), (rank), (fptr)); /* Phase 2 */ START_TIMER((start)); phase2(size, rank, regularSamples, &collectedSamples, &pivots); MPI_Barrier(MPI_COMM_WORLD); STOP_TIMER((2), (start), (end), (total), (rank), (fptr)); /* Phase 3 */ START_TIMER((start)); phase3(size, rank, nInts, privateInts, pivots, &localPartitionSizes, &partitionIndices, &incomingPartitionSizes, &partitions); MPI_Barrier(MPI_COMM_WORLD); STOP_TIMER((3), (start), (end), (total), (rank), (fptr)); /* Phase 4 */ START_TIMER((start)); phase4(size, incomingPartitionSizes, partitions, &mergedPartitions); MPI_Barrier(MPI_COMM_WORLD); STOP_TIMER((4), (start), (end), (total), (rank), (fptr)); if (rank == MASTER) { total = end - algStart; fprintf(fptr, "The algorithm took %f seconds in total\n", total); } if (!validateResults) return; /* Run validation test */ /* "Phase 5" concatenate the lists back at the master */ phase5(size, rank, nInts, incomingPartitionSizes, mergedPartitions, &partitionSizes, &sortedArray); /* * Assert that the array is equivalent to the sorted original array * where we sort the original array using a known, proven, sequential, * method. */ if (rank == MASTER) { qsort(toSort, nInts, sizeof(int), intComp); } for (i = 0; i < nInts; i++) { if (rank == MASTER) { if (toSort[i] != sortedArray[i]) { printf("OH NO, got %d at pos %d, expected " "%d\n", sortedArray[i], i, toSort[i]); } } } if (rank == MASTER) fclose(fptr); }