vec3 TreeNode::getGlobalPosition(const vec3& rootPoint) { Context ctx(rootPoint); getContextForNode(ctx); return ctx.getCurrentOrigin(); }
// returns true on success, false on failure bool apply(const BSONObj& op) { Client::Context ctx( _cappedNs ); // in an annoying twist of api, returns true on failure return !applyOperation_inlock(op, true); }
bool Helpers::getLast(const char *ns, BSONObj& result) { Client::Context ctx(ns); auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns, InternalPlanner::BACKWARD)); Runner::RunnerState state = runner->getNext(&result, NULL); return Runner::RUNNER_ADVANCED == state; }
/* * Runs the command object cmdobj on the db with name dbname and puts result in result. * @param dbname, name of db * @param cmdobj, object that contains entire command * @param options * @param errmsg, reference to error message * @param result, reference to builder for result * @param fromRepl * @return true if successful, false otherwise */ bool FTSCommand::_run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int cmdOptions, const string& ns, const string& searchString, string language, // "" for not-set int limit, BSONObj& filter, BSONObj& projection, string& errmsg, BSONObjBuilder& result ) { Timer comm; // Rewrite the cmd as a normal query. BSONObjBuilder queryBob; queryBob.appendElements(filter); BSONObjBuilder textBob; textBob.append("$search", searchString); if (!language.empty()) { textBob.append("$language", language); } queryBob.append("$text", textBob.obj()); // This is the query we exec. BSONObj queryObj = queryBob.obj(); // We sort by the score. BSONObj sortSpec = BSON("$s" << BSON("$meta" << LiteParsedQuery::metaTextScore)); // We also project the score into the document and strip it out later during the reformatting // of the results. BSONObjBuilder projBob; projBob.appendElements(projection); projBob.appendElements(sortSpec); BSONObj projObj = projBob.obj(); Client::ReadContext ctx(txn, ns); CanonicalQuery* cq; Status canonicalizeStatus = CanonicalQuery::canonicalize(ns, queryObj, sortSpec, projObj, 0, limit, BSONObj(), &cq, WhereCallbackReal(txn, StringData(dbname))); if (!canonicalizeStatus.isOK()) { errmsg = canonicalizeStatus.reason(); return false; } Runner* rawRunner; Status getRunnerStatus = getRunner(txn, ctx.ctx().db()->getCollection(txn, ns), cq, &rawRunner); if (!getRunnerStatus.isOK()) { errmsg = getRunnerStatus.reason(); return false; } auto_ptr<Runner> runner(rawRunner); BSONArrayBuilder resultBuilder(result.subarrayStart("results")); // Quoth: "leave a mb for other things" int resultSize = 1024 * 1024; int numReturned = 0; BSONObj obj; while (Runner::RUNNER_ADVANCED == runner->getNext(&obj, NULL)) { if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) { break; } // We return an array of results. Add another element. BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart()); oneResultBuilder.append("score", obj["$s"].number()); // Strip out the score from the returned obj. BSONObjIterator resIt(obj); BSONObjBuilder resBob; while (resIt.more()) { BSONElement elt = resIt.next(); if (!mongoutils::str::equals("$s", elt.fieldName())) { resBob.append(elt); } } oneResultBuilder.append("obj", resBob.obj()); BSONObj addedArrayObj = oneResultBuilder.done(); resultSize += addedArrayObj.objsize(); numReturned++; } resultBuilder.done(); // returns some stats to the user BSONObjBuilder stats(result.subobjStart("stats")); // Fill in nscanned from the explain. TypeExplain* bareExplain; Status res = runner->getInfo(&bareExplain, NULL); if (res.isOK()) { auto_ptr<TypeExplain> explain(bareExplain); stats.append("nscanned", explain->getNScanned()); stats.append("nscannedObjects", explain->getNScannedObjects()); } stats.appendNumber( "n" , numReturned ); stats.append( "timeMicros", (int)comm.micros() ); stats.done(); return true; }
void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); return; } // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed if (replAllDead) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", getGlobalReplicationCoordinator()->isMasterForReportingPurposes()); } if (level) { BSONObjBuilder sources( result.subarrayStart( "sources" ) ); int n = 0; list<BSONObj> src; { const char* localSources = "local.sources"; Client::ReadContext ctx(txn, localSources); auto_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, localSources, ctx.ctx().db()->getCollection(txn, localSources))); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj); } } for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) { BSONObj s = *i; BSONObjBuilder bb; bb.append( s["host"] ); string sourcename = s["source"].valuestr(); if ( sourcename != "main" ) bb.append( s["source"] ); { BSONElement e = s["syncedTo"]; BSONObjBuilder t( bb.subobjStart( "syncedTo" ) ); t.appendDate( "time" , e.timestampTime() ); t.append( "inc" , e.timestampInc() ); t.done(); } if ( level > 1 ) { wassert(!txn->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() ); if ( cliConn && replAuthenticate(cliConn) ) { BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << 1 ) ) ); BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << -1 ) ) ); bb.appendDate( "masterFirst" , first["ts"].timestampTime() ); bb.appendDate( "masterLast" , last["ts"].timestampTime() ); double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append( "lagSeconds" , lag / 1000 ); } conn.done(); } sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() ); } sources.done(); } }
/** * Runs a query using the following steps: * --Parsing. * --Acquire locks. * --Plan query, obtaining an executor that can run it. * --Generate the first batch. * --Save state for getMore, transferring ownership of the executor to a ClientCursor. * --Generate response to send to the client. */ bool run(OperationContext* txn, const std::string& dbname, BSONObj& cmdObj, int options, std::string& errmsg, BSONObjBuilder& result) override { const NamespaceString nss(parseNs(dbname, cmdObj)); if (!nss.isValid() || nss.isCommand() || nss.isSpecialCommand()) { return appendCommandStatus(result, {ErrorCodes::InvalidNamespace, str::stream() << "Invalid collection name: " << nss.ns()}); } // Although it is a command, a find command gets counted as a query. globalOpCounters.gotQuery(); if (txn->getClient()->isInDirectClient()) { return appendCommandStatus( result, Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()")); } // Parse the command BSON to a LiteParsedQuery. const bool isExplain = false; auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain); if (!lpqStatus.isOK()) { return appendCommandStatus(result, lpqStatus.getStatus()); } auto& lpq = lpqStatus.getValue(); // Validate term before acquiring locks, if provided. if (auto term = lpq->getReplicationTerm()) { auto replCoord = repl::ReplicationCoordinator::get(txn); Status status = replCoord->updateTerm(txn, *term); // Note: updateTerm returns ok if term stayed the same. if (!status.isOK()) { return appendCommandStatus(result, status); } } // Fill out curop information. // // We pass negative values for 'ntoreturn' and 'ntoskip' to indicate that these values // should be omitted from the log line. Limit and skip information is already present in the // find command parameters, so these fields are redundant. const int ntoreturn = -1; const int ntoskip = -1; beginQueryOp(txn, nss, cmdObj, ntoreturn, ntoskip); // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery. ExtensionsCallbackReal extensionsCallback(txn, &nss); auto statusWithCQ = CanonicalQuery::canonicalize(lpq.release(), extensionsCallback); if (!statusWithCQ.isOK()) { return appendCommandStatus(result, statusWithCQ.getStatus()); } std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Acquire locks. AutoGetCollectionForRead ctx(txn, nss); Collection* collection = ctx.getCollection(); const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile; // Get the execution plan for the query. auto statusWithPlanExecutor = getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO); if (!statusWithPlanExecutor.isOK()) { return appendCommandStatus(result, statusWithPlanExecutor.getStatus()); } std::unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); if (!collection) { // No collection. Just fill out curop indicating that there were zero results and // there is no ClientCursor id, and then return. const long long numResults = 0; const CursorId cursorId = 0; endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, cursorId); appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result); return true; } const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed(); // Stream query results, adding them to a BSONArray as we go. CursorResponseBuilder firstBatch(/*isInitialResponse*/ true, &result); BSONObj obj; PlanExecutor::ExecState state = PlanExecutor::ADVANCED; long long numResults = 0; while (!FindCommon::enoughForFirstBatch(pq, numResults) && PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { // If we can't fit this result inside the current batch, then we stash it for later. if (!FindCommon::haveSpaceForNext(obj, numResults, firstBatch.bytesUsed())) { exec->enqueue(obj); break; } // Add result to output buffer. firstBatch.append(obj); numResults++; } // Throw an assertion if query execution fails for any reason. if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { firstBatch.abandon(); error() << "Plan executor error during find command: " << PlanExecutor::statestr(state) << ", stats: " << Explain::getWinningPlanStats(exec.get()); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, str::stream() << "Executor error during find command: " << WorkingSetCommon::toStatusString(obj))); } // Before saving the cursor, ensure that whatever plan we established happened with the // expected collection version auto css = CollectionShardingState::get(txn, nss); css->checkShardVersionOrThrow(txn); // Set up the cursor for getMore. CursorId cursorId = 0; if (shouldSaveCursor(txn, collection, state, exec.get())) { // Register the execution plan inside a ClientCursor. Ownership of the PlanExecutor is // transferred to the ClientCursor. // // First unregister the PlanExecutor so it can be re-registered with ClientCursor. exec->deregisterExec(); // Create a ClientCursor containing this plan executor. We don't have to worry about // leaking it as it's inserted into a global map by its ctor. ClientCursor* cursor = new ClientCursor(collection->getCursorManager(), exec.release(), nss.ns(), txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(), pq.getOptions(), pq.getFilter()); cursorId = cursor->cursorid(); invariant(!exec); PlanExecutor* cursorExec = cursor->getExecutor(); // State will be restored on getMore. cursorExec->saveState(); cursorExec->detachFromOperationContext(); cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros()); cursor->setPos(numResults); // Fill out curop based on the results. endQueryOp(txn, collection, *cursorExec, dbProfilingLevel, numResults, cursorId); } else { endQueryOp(txn, collection, *exec, dbProfilingLevel, numResults, cursorId); } // Generate the response object to send to the client. firstBatch.done(cursorId, nss.ns()); return true; }
int be_visitor_sequence_ch::visit_sequence (be_sequence *node) { if (node->defined_in () == 0) { // The node is a nested sequence, and has had no scope defined. node->set_defined_in (DeclAsScope (this->ctx_->scope ()->decl ())); } // First create a name for ourselves. if (node->create_name (this->ctx_->tdef ()) == -1) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("failed creating name\n")), -1); } // We don't check cli_hdr_gen() here. If we are generated more // than once as an anonymous sequence, the name guard will cause // the C++ preprocessor to catch it. If we are generated more than // once as a typedef (caused by a comma separated list of // typedefs), our name will be changed by the call above and the // name guard will not catch it, but that's ok - we want to // be generated for each typedef. if (node->imported ()) { return 0; } TAO_OutStream *os = this->ctx_->stream (); // Retrieve the base type since we may need to do some code // generation for the base type. be_type *bt = be_type::narrow_from_decl (node->base_type ()); if (bt == 0) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("Bad element type\n")), -1); } bt->seen_in_sequence (true); AST_Decl::NodeType nt = bt->node_type (); // If our base type is an anonymous sequence, we must create a name // and generate a class declaration for it as well. if (nt == AST_Decl::NT_sequence) { // Temporarily make the context's tdef node 0 so the nested call // to create_name will not get confused and give our anonymous // sequence element type the same name as we have. be_typedef *tmp = this->ctx_->tdef (); this->ctx_->tdef (0); if (bt->accept (this) != 0) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("codegen for anonymous ") ACE_TEXT ("base type failed\n")), -1); } // Restore the tdef value. this->ctx_->tdef (tmp); } *os << be_nl_2; *os << "// TAO_IDL - Generated from" << be_nl << "// " << __FILE__ << ":" << __LINE__; if (idl_global->dcps_sequence_type_defined (node->full_name ())) { // generate the sequence declaration as if it was native. This // to satisfy DDS // strip the "Seq" ending to get the sample's name const char * node_name = node->full_name (); const size_t max_name_length = 2000; if (ACE_OS::strlen (node_name) >= max_name_length) { return -1; } char sample_name[max_name_length]; ACE_OS::strncpy (sample_name, node_name, ACE_OS::strlen (node_name) - 3); sample_name[ACE_OS::strlen (node_name) - 3] = '\0'; *os << be_nl_2 << "typedef ::TAO::DCPS::ZeroCopyDataSeq< " << sample_name << ", DCPS_ZERO_COPY_SEQ_DEFAULT_SIZE> " << node->local_name () << ";" << be_nl; } else { os->gen_ifdef_macro (node->flat_name ()); *os << be_nl_2; /// If we are using std::vector, we won't be using _vars /// and _outs. They may get redefined and reinstated later. if (!be_global->alt_mapping () || !node->unbounded ()) { if (this->ctx_->tdef () != 0) { *os << "class " << node->local_name () << ";"; } if (this->ctx_->tdef () != 0) { this->gen_varout_typedefs (node, bt); } } else { *os << "typedef std::vector< "; // Generate the base type for the buffer. be_visitor_context ctx (*this->ctx_); ctx.state (TAO_CodeGen::TAO_SEQUENCE_BUFFER_TYPE_CH); be_visitor_sequence_buffer_type bt_visitor (&ctx); if (bt->accept (&bt_visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("buffer type visit failed\n")), -1); } *os << "> " << node->local_name () << ";"; os->gen_endif (); node->cli_hdr_gen (true); return 0; } *os << be_nl_2 << "class " << be_global->stub_export_macro () << " " << node->local_name () << be_idt_nl << ": public" << be_idt << be_idt_nl; int status = node->gen_base_class_name (os, "", this->ctx_->scope ()->decl ()); if (status == -1) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("Base class name ") ACE_TEXT ("generation failed\n")), -1); } *os << be_uidt << be_uidt << be_uidt; *os << be_nl << "{" << be_nl << "public:" << be_idt; *os << be_nl << node->local_name () << " (void);"; if (node->unbounded ()) { *os << be_nl << node->local_name () << " ( ::CORBA::ULong max);"; } /// If we are using std::vector, we can't implement this /// constructor. if (!be_global->alt_mapping () || !node->unbounded ()) { *os << be_nl << node->local_name () << " (" << be_idt; if (node->unbounded ()) { *os << be_nl << "::CORBA::ULong max,"; } *os << be_nl << "::CORBA::ULong length," << be_nl; // Generate the base type for the buffer. be_visitor_context ctx (*this->ctx_); ctx.state (TAO_CodeGen::TAO_SEQUENCE_BUFFER_TYPE_CH); be_visitor_sequence_buffer_type bt_visitor (&ctx); if (bt->accept (&bt_visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("be_visitor_sequence_ch::") ACE_TEXT ("visit_sequence - ") ACE_TEXT ("buffer type visit failed\n")), -1); } *os << "* buffer," << be_nl << "::CORBA::Boolean release = false);" << be_uidt; } *os << be_nl << node->local_name () << " (const " << node->local_name () << " &);" << be_nl; *os << "virtual ~" << node->local_name () << " (void);"; if (be_global->alt_mapping () && node->unbounded ()) { *os << be_nl_2 << "virtual ::CORBA::ULong length (void) const;" << be_nl << "virtual void length ( ::CORBA::ULong);" << be_nl_2 << "virtual ::CORBA::ULong maximum (void) const;"; } *os << be_nl; node->gen_stub_decls (os); // TAO provides extensions for octet sequences, first find out if // the base type is an octet (or an alias for octet). be_predefined_type *predef = 0; if (bt->base_node_type () == AST_Type::NT_pre_defined) { be_typedef* alias = be_typedef::narrow_from_decl (bt); if (alias == 0) { predef = be_predefined_type::narrow_from_decl (bt); } else { predef = be_predefined_type::narrow_from_decl ( alias->primitive_base_type () ); } } // Now generate the extension... if (predef != 0 && predef->pt () == AST_PredefinedType::PT_octet && node->unbounded () && !be_global->alt_mapping ()) { *os << be_nl_2 << "\n\n#if (TAO_NO_COPY_OCTET_SEQUENCES == 1)" << be_nl << node->local_name () << " (" << be_idt << be_idt_nl << "::CORBA::ULong length," << be_nl << "const ACE_Message_Block* mb" << be_uidt_nl << ")" << be_uidt_nl << " : ::TAO::unbounded_value_sequence< ::CORBA::Octet>" << " (length, mb) {}" << "\n" << "#endif /* TAO_NO_COPY_OCTET_SEQUENCE == 1 */"; } *os << be_uidt_nl << "};"; os->gen_endif (); } node->cli_hdr_gen (true); return 0; }
static jboolean NativeBN_BN_div(JNIEnv* env, jclass, BIGNUM* dv, BIGNUM* rem, BIGNUM* m, BIGNUM* d) { if (!fourValidHandles(env, (rem ? rem : dv), (dv ? dv : rem), m, d)) return JNI_FALSE; Unique_BN_CTX ctx(BN_CTX_new()); return BN_div(dv, rem, m, d, ctx.get()); }
static jboolean NativeBN_BN_nnmod(JNIEnv* env, jclass, BIGNUM* r, BIGNUM* a, BIGNUM* m) { if (!threeValidHandles(env, r, a, m)) return JNI_FALSE; Unique_BN_CTX ctx(BN_CTX_new()); return BN_nnmod(r, a, m, ctx.get()); }
/** * This is called by db/ops/query.cpp. This is the entry point for answering a query. */ string newRunQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) { // This is a read lock. Client::ReadContext ctx(q.ns, dbpath); // Parse, canonicalize, plan, transcribe, and get a runner. Runner* rawRunner; Status status = getRunner(q, &rawRunner); if (!status.isOK()) { uasserted(17007, "Couldn't process query " + q.query.toString() + " why: " + status.reason()); } verify(NULL != rawRunner); auto_ptr<Runner> runner(rawRunner); // We freak out later if this changes before we're done with the query. const ChunkVersion shardingVersionAtStart = shardingState.getVersion(q.ns); // We use this a lot below. const ParsedQuery& pq = runner->getQuery().getParsed(); // TODO: Document why we do this. replVerifyReadsOk(&pq); // If this exists, the collection is sharded. // If it doesn't exist, we can assume we're not sharded. // If we're sharded, we might encounter data that is not consistent with our sharding state. // We must ignore this data. CollectionMetadataPtr collMetadata; if (!shardingState.needCollectionMetadata(pq.ns())) { collMetadata = CollectionMetadataPtr(); } else { collMetadata = shardingState.getCollectionMetadata(pq.ns()); } // Run the query. BufBuilder bb(32768); bb.skip(sizeof(QueryResult)); // How many results have we obtained from the runner? int numResults = 0; // If we're replaying the oplog, we save the last time that we read. OpTime slaveReadTill; // Do we save the Runner in a ClientCursor for getMore calls later? bool saveClientCursor = false; BSONObj obj; // TODO: Differentiate EOF from error. while (runner->getNext(&obj)) { // If we're sharded make sure that we don't return any data that hasn't been migrated // off of our shared yet. if (collMetadata) { // This information can change if we yield and as such we must make sure to re-fetch // it if we yield. KeyPattern kp(collMetadata->getKeyPattern()); // This performs excessive BSONObj creation but that's OK for now. if (!collMetadata->keyBelongsToMe(kp.extractSingleKey(obj))) { continue; } } // Add result to output buffer. bb.appendBuf((void*)obj.objdata(), obj.objsize()); // Count the result. ++numResults; // Possibly note slave's position in the oplog. if (pq.hasOption(QueryOption_OplogReplay)) { BSONElement e = obj["ts"]; if (Date == e.type() || Timestamp == e.type()) { slaveReadTill = e._opTime(); } } // TODO: only one type of 2d search doesn't support this. We need a way to pull it out // of CanonicalQuery. :( const bool supportsGetMore = true; const bool isExplain = pq.isExplain(); if (isExplain && pq.enoughForExplain(numResults)) { break; } else if (!supportsGetMore && (pq.enough(numResults) || bb.len() >= MaxBytesToReturnToClientAtOnce)) { break; } else if (pq.enoughForFirstBatch(numResults, bb.len())) { // If only one result requested assume it's a findOne() and don't save the cursor. if (pq.wantMore() && 1 != pq.getNumToReturn()) { saveClientCursor = true; } break; } } // TODO: Stage creation can set tailable depending on what's in the parsed query. We have // the full parsed query available during planning...set it there. // // TODO: If we're tailable we want to save the client cursor. Make sure we do this later. //if (pq.hasOption(QueryOption_CursorTailable) && pq.getNumToReturn() != 1) { ... } // TODO(greg): This will go away soon. if (!shardingState.getVersion(pq.ns()).isWriteCompatibleWith(shardingVersionAtStart)) { // if the version changed during the query we might be missing some data and its safe to // send this as mongos can resend at this point throw SendStaleConfigException(pq.ns(), "version changed during initial query", shardingVersionAtStart, shardingState.getVersion(pq.ns())); } long long ccId = 0; if (saveClientCursor) { // Allocate a new ClientCursor. ClientCursorHolder ccHolder; ccHolder.reset(new ClientCursor(runner.get())); ccId = ccHolder->cursorid(); // We won't use the runner until it's getMore'd. runner->saveState(); // ClientCursor takes ownership of runner. Release to make sure it's not deleted. runner.release(); if (pq.hasOption(QueryOption_OplogReplay) && !slaveReadTill.isNull()) { ccHolder->slaveReadTill(slaveReadTill); } if (pq.hasOption(QueryOption_Exhaust)) { curop.debug().exhaust = true; } // Set attributes for getMore. ccHolder->setCollMetadata(collMetadata); ccHolder->setPos(numResults); // If the query had a time limit, remaining time is "rolled over" to the cursor (for // use by future getmore ops). ccHolder->setLeftoverMaxTimeMicros(curop.getRemainingMaxTimeMicros()); // Give up our reference to the CC. ccHolder.release(); } // Add the results from the query into the output buffer. result.appendData(bb.buf(), bb.len()); bb.decouple(); // Fill out the output buffer's header. QueryResult* qr = static_cast<QueryResult*>(result.header()); qr->cursorId = ccId; curop.debug().cursorid = (0 == ccId ? -1 : ccId); qr->setResultFlagsToOk(); qr->setOperation(opReply); qr->startingFrom = 0; qr->nReturned = numResults; // TODO: nscanned is bogus. // curop.debug().nscanned = ( cursor ? cursor->nscanned() : 0LL ); curop.debug().ntoskip = pq.getSkip(); curop.debug().nreturned = numResults; // curop.debug().exhaust is set above. return curop.debug().exhaust ? pq.ns() : ""; }
int main(int argc, char *argv[]) { if (argc < 2) { std::cerr << "Usage: " << argv[0] << " <problem.dat>" << std::endl; return 1; } amgcl::profiler<> prof(argv[0]); // Read matrix and rhs from a binary file. std::vector<int> row; std::vector<int> col; std::vector<real> val; std::vector<real> rhs; int n = read_problem(argv[1], row, col, val, rhs); // Initialize VexCL context. vex::Context ctx( vex::Filter::Env && vex::Filter::DoublePrecision ); if (!ctx.size()) { std::cerr << "No GPUs" << std::endl; return 1; } std::cout << ctx << std::endl; // Wrap the matrix into amgcl::sparse::map: amgcl::sparse::matrix_map<real, int> A( n, n, row.data(), col.data(), val.data() ); // Build the preconditioner. typedef amgcl::solver< real, int, amgcl::interp::smoothed_aggregation<amgcl::aggr::plain>, amgcl::level::vexcl > AMG; typename AMG::params prm; // Provide vex::Context for level construction: prm.level.ctx = &ctx; // Use K-Cycle on each level to improve convergence: prm.level.kcycle = 1; prof.tic("setup"); AMG amg(A, prm); prof.toc("setup"); std::cout << amg << std::endl; // Copy matrix and rhs to GPU(s). vex::SpMat<real, int, int> Agpu( ctx.queue(), n, n, row.data(), col.data(), val.data() ); vex::vector<real> f(ctx.queue(), rhs); // Solve the problem with CG method. Use AMG as a preconditioner: vex::vector<real> x(ctx.queue(), n); x = 0; prof.tic("solve (cg)"); auto cnv = amgcl::solve(Agpu, f, amg, x, amgcl::cg_tag()); prof.toc("solve (cg)"); std::cout << "Iterations: " << std::get<0>(cnv) << std::endl << "Error: " << std::get<1>(cnv) << std::endl << std::endl; std::cout << prof; }
/** * Also called by db/ops/query.cpp. This is the new getMore entry point. */ QueryResult* newGetMore(const char* ns, int ntoreturn, long long cursorid, CurOp& curop, int pass, bool& exhaust, bool* isCursorAuthorized) { exhaust = false; int bufSize = 512 + sizeof(QueryResult) + MaxBytesToReturnToClientAtOnce; BufBuilder bb(bufSize); bb.skip(sizeof(QueryResult)); // This is a read lock. TODO: There is a cursor flag for not needing this. Do we care? Client::ReadContext ctx(ns); // TODO: Document. replVerifyReadsOk(); ClientCursorPin ccPin(cursorid); ClientCursor* cc = ccPin.c(); // These are set in the QueryResult msg we return. int resultFlags = ResultFlag_AwaitCapable; int numResults = 0; int startingResult = 0; if (NULL == cc) { cursorid = 0; resultFlags = ResultFlag_CursorNotFound; } else { // Quote: check for spoofing of the ns such that it does not match the one originally // there for the cursor uassert(17011, "auth error", str::equals(ns, cc->ns().c_str())); *isCursorAuthorized = true; // TODO: fail point? // If the operation that spawned this cursor had a time limit set, apply leftover // time to this getmore. curop.setMaxTimeMicros(cc->getLeftoverMaxTimeMicros()); // TODO: // curop.debug().query = BSONForQuery // curop.setQuery(curop.debug().query); // TODO: What is pass? if (0 == pass) { cc->updateSlaveLocation(curop); } CollectionMetadataPtr collMetadata = cc->getCollMetadata(); // If we're replaying the oplog, we save the last time that we read. OpTime slaveReadTill; startingResult = cc->pos(); Runner* runner = cc->getRunner(); const ParsedQuery& pq = runner->getQuery().getParsed(); // Get results out of the runner. // TODO: There may be special handling required for tailable cursors? runner->restoreState(); BSONObj obj; // TODO: Differentiate EOF from error. while (runner->getNext(&obj)) { // If we're sharded make sure that we don't return any data that hasn't been // migrated off of our shard yet. if (collMetadata) { KeyPattern kp(collMetadata->getKeyPattern()); if (!collMetadata->keyBelongsToMe(kp.extractSingleKey(obj))) { continue; } } // Add result to output buffer. bb.appendBuf((void*)obj.objdata(), obj.objsize()); // Count the result. ++numResults; // Possibly note slave's position in the oplog. if (pq.hasOption(QueryOption_OplogReplay)) { BSONElement e = obj["ts"]; if (Date == e.type() || Timestamp == e.type()) { slaveReadTill = e._opTime(); } } if ((numResults && numResults >= ntoreturn) || bb.len() > MaxBytesToReturnToClientAtOnce) { break; } } cc->incPos(numResults); runner->saveState(); // Possibly note slave's position in the oplog. if (pq.hasOption(QueryOption_OplogReplay) && !slaveReadTill.isNull()) { cc->slaveReadTill(slaveReadTill); } exhaust = pq.hasOption(QueryOption_Exhaust); // If the getmore had a time limit, remaining time is "rolled over" back to the // cursor (for use by future getmore ops). cc->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() ); } QueryResult* qr = reinterpret_cast<QueryResult*>(bb.buf()); qr->len = bb.len(); qr->setOperation(opReply); qr->_resultFlags() = resultFlags; qr->cursorId = cursorid; qr->startingFrom = startingResult; qr->nReturned = numResults; bb.decouple(); return qr; }
int be_visitor_operation_ih::visit_operation (be_operation *node) { // Impl classes shouldn't have implied AMI operations. if (node->is_sendc_ami ()) { return 0; } TAO_OutStream *os = this->ctx_->stream (); this->ctx_->node (node); *os << be_nl_2; if (be_global->gen_impl_debug_info ()) { *os << "// TAO_IDL - Generated from" << be_nl << "// " << __FILE__ << ":" << __LINE__ << be_nl_2; } // every operation is declared virtual in the client code *os << "virtual" << be_nl; // STEP I: generate the return type be_type *bt = be_type::narrow_from_decl (node->return_type ()); if (!bt) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_operation_ih::" "visit_operation - " "Bad return type\n"), -1); } // grab the right visitor to generate the return type be_visitor_context ctx (*this->ctx_); be_visitor_operation_rettype oro_visitor (&ctx); if (bt->accept (&oro_visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_operation_ih::" "visit_operation - " "codegen for return type failed\n"), -1); } // STEP 2: generate the operation name *os << " " << node->local_name (); // STEP 3: generate the argument list with the appropriate mapping. For these // we grab a visitor that generates the parameter listing ctx = *this->ctx_; ctx.state (TAO_CodeGen::TAO_OPERATION_ARGLIST_IH); be_visitor_operation_arglist oa_visitor (&ctx); if (node->accept (&oa_visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_operation_ih::" "visit_operation - " "codegen for argument list failed\n"), -1); } return 0; }
int main(int argc, char *argv[]) { OptimizationSettings settings; Context ctx(settings); consoleHelper::parseConsoleOptions(ctx, argc, argv); ProblemData<int, double> instance; double omega = 1; std::vector<int> rowCounts; switch (ctx.settings.lossFunction) { case 0: case 1: InputOuputHelper::loadCSCData(ctx, instance); std::cout << "Data loaded with size: " << instance.m << " x " << instance.n << std::endl; rowCounts.resize(instance.m, 0); for (unsigned long i = 0; i < instance.A_csc_row_idx.size(); i++) { rowCounts[instance.A_csc_row_idx[i]]++; if (rowCounts[instance.A_csc_row_idx[i]] > omega) { omega = rowCounts[instance.A_csc_row_idx[i]]; } } break; case 2: loadDistributedSparseSVMRowData(ctx.matrixAFile, -1, -1, instance, false); if (ctx.isTestErrorFileAvailable) { ProblemData<int, double> testInstance; loadDistributedSparseSVMRowData(ctx.matrixATestFile, -1, -1, testInstance, false); instance.A_test_csr_col_idx = testInstance.A_csr_col_idx; instance.A_test_csr_row_ptr = testInstance.A_csr_row_ptr; instance.A_test_csr_values = testInstance.A_csr_values; instance.test_b = testInstance.b; } instance.lambda = ctx.lambda; rowCounts.resize(instance.m, 0); for (unsigned long i = 0; i < instance.A_csr_col_idx.size(); i++) { rowCounts[instance.A_csr_col_idx[i]]++; if (rowCounts[instance.A_csr_col_idx[i]] > omega) { omega = rowCounts[instance.A_csr_col_idx[i]]; } } instance.omegaAvg = 0; instance.omegaMin = rowCounts[0]; for (unsigned long i = 0; i < rowCounts.size(); i++) { instance.omegaAvg += rowCounts[i] / (0.0 + instance.n); if (rowCounts[i] < instance.omegaMin) { instance.omegaMin = rowCounts[i]; } } std::cout << "Omega: " << omega << std::endl; std::cout << "Omega-avg: " << instance.omegaAvg << std::endl; std::cout << "Omega-min: " << instance.omegaMin << std::endl; break; case 3: case 4: loadDistributedSparseSVMRowData(ctx.matrixAFile, -1, -1, instance, false); instance.lambda = ctx.lambda; if (ctx.isTestErrorFileAvailable) { ProblemData<int, double> testInstance; loadDistributedSparseSVMRowData(ctx.matrixATestFile, -1, -1, testInstance, false); instance.A_test_csr_col_idx = testInstance.A_csr_col_idx; instance.A_test_csr_row_ptr = testInstance.A_csr_row_ptr; instance.A_test_csr_values = testInstance.A_csr_values; instance.test_b = testInstance.b; } getCSR_from_CSC( instance.A_csr_values, //Input instance.A_csr_col_idx, instance.A_csr_row_ptr, instance.A_csc_values, //Output instance.A_csc_row_idx, instance.A_csc_col_ptr, instance.m, instance.n); int tmp=instance.n; instance.n=instance.m; instance.m=tmp; for (int i=0;i<instance.m;i++){ if (instance.A_csr_row_ptr[i+1]-instance.A_csr_row_ptr[i]>omega){ omega=instance.A_csr_row_ptr[i+1]-instance.A_csr_row_ptr[i]; } } std::cout<<"Omega is "<<omega<<std::endl; break; } instance.sigma = 1 + (omega - 1) * (ctx.settings.totalThreads - 1) / (instance.n - 1.0); MulticoreEngineExecutor<int, double> executor(instance, &(ctx.settings)); Solver<int, double> solver(executor); solver.runSolver(); }
int be_visitor_union_branch_public_ch::visit_array (be_array *node) { be_decl *ub = this->ctx_->node (); be_decl *bu = this->ctx_->scope ()->decl (); be_type *bt = 0; // Check if we are visiting this via a visit to a typedef node. if (this->ctx_->alias ()) { bt = this->ctx_->alias (); } else { bt = node; } if (!ub || !bu) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_union_branch_public_ch::" "visit_array - " "bad context information\n"), -1); } TAO_OutStream *os = this->ctx_->stream (); // Not a typedef and bt is defined inside the union. if (bt->node_type () != AST_Decl::NT_typedef && bt->is_child (bu)) { // This is the case of an anonymous array inside a union. be_visitor_context ctx (*this->ctx_); ctx.node (node); ctx.state (TAO_CodeGen::TAO_ARRAY_CH); be_visitor_array_ch visitor (&ctx); if (node->accept (&visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_union_branch_public_ch::" "visit_array - " "codegen failed\n" ), -1); } ctx.state (TAO_CodeGen::TAO_ROOT_CH); *os << be_nl_2 << "// TAO_IDL - Generated from" << be_nl << "// " << __FILE__ << ":" << __LINE__; // Now use this array as a "type" for the subsequent declarator // the set method. *os << be_nl_2 << "void " << ub->local_name () << " (" << "_" << bt->local_name () << ");" << be_nl; // The get method. *os << "_" << bt->local_name () << "_slice * " << ub->local_name () << " (void) const; // get method"; } else { *os << be_nl_2 << "// TAO_IDL - Generated from" << be_nl << "// " << __FILE__ << ":" << __LINE__; // Now use this array as a "type" for the subsequent declarator // the set method. *os << be_nl_2 << "void " << ub->local_name () << " (" << bt->nested_type_name (bu) << ");" << be_nl; // The get method. *os << bt->nested_type_name (bu, "_slice *") << " " << ub->local_name () << " (void) const;"; } return 0; }
static jboolean NativeBN_BN_mod_exp(JNIEnv* env, jclass, BIGNUM* r, BIGNUM* a, BIGNUM* p, BIGNUM* m) { if (!fourValidHandles(env, r, a, p, m)) return JNI_FALSE; Unique_BN_CTX ctx(BN_CTX_new()); return BN_mod_exp(r, a, p, m, ctx.get()); }
int be_visitor_union_branch_public_ch::visit_union (be_union *node) { be_decl *ub = this->ctx_->node (); be_decl *bu = this->ctx_->scope ()->decl (); be_type *bt = 0; // Check if we are visiting this via a visit to a typedef node. if (this->ctx_->alias ()) { bt = this->ctx_->alias (); } else { bt = node; } if (!ub || !bu) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_union_branch_public_ch::" "visit_union - " "bad context information\n" ), -1); } TAO_OutStream *os = this->ctx_->stream (); // Not a typedef and bt is defined inside the union. if (bt->node_type () != AST_Decl::NT_typedef && bt->is_child (bu)) { be_visitor_context ctx (*this->ctx_); ctx.node (node); be_visitor_union_ch visitor (&ctx); if (node->accept (&visitor) == -1) { ACE_ERROR_RETURN ((LM_ERROR, "(%N:%l) be_visitor_union_branch_public_ch::" "visit_union - " "codegen failed\n"), -1); } } *os << be_nl_2 << "// TAO_IDL - Generated from" << be_nl << "// " << __FILE__ << ":" << __LINE__; *os << be_nl_2 << "void " << ub->local_name () << " (const " << bt->nested_type_name (bu) << " &);" << be_nl << "const " << bt->nested_type_name (bu) << " &" << ub->local_name () << " (void) const;" << be_nl << bt->nested_type_name (bu) << " &" << ub->local_name () << " (void);"; return 0; }
static jboolean NativeBN_BN_mod_inverse(JNIEnv* env, jclass, BIGNUM* ret, BIGNUM* a, BIGNUM* n) { if (!threeValidHandles(env, ret, a, n)) return JNI_FALSE; Unique_BN_CTX ctx(BN_CTX_new()); return (BN_mod_inverse(ret, a, n, ctx.get()) != NULL); }
void omxInvokeNLOPT(double *est, GradientOptimizerContext &goc) { goc.optName = "SLSQP"; goc.setupSimpleBounds(); goc.useGradient = true; FitContext *fc = goc.fc; int oldWanted = fc->wanted; fc->wanted = 0; omxState *globalState = fc->state; nlopt_opt opt = nlopt_create(NLOPT_LD_SLSQP, fc->numParam); goc.extraData = opt; //local_opt = nlopt_create(NLOPT_LD_SLSQP, n); // Subsidiary algorithm //nlopt_set_local_optimizer(opt, local_opt); nlopt_set_lower_bounds(opt, goc.solLB.data()); nlopt_set_upper_bounds(opt, goc.solUB.data()); int eq, ieq; globalState->countNonlinearConstraints(eq, ieq); if (fc->CI) { nlopt_set_xtol_rel(opt, 5e-3); std::vector<double> tol(fc->numParam, std::numeric_limits<double>::epsilon()); nlopt_set_xtol_abs(opt, tol.data()); } else { // The *2 is there to roughly equate accuracy with NPSOL. nlopt_set_ftol_rel(opt, goc.ControlTolerance * 2); nlopt_set_ftol_abs(opt, std::numeric_limits<double>::epsilon()); } nlopt_set_min_objective(opt, SLSQP::nloptObjectiveFunction, &goc); double feasibilityTolerance = Global->feasibilityTolerance; SLSQP::context ctx(goc); if (eq + ieq) { ctx.origeq = eq; if (ieq > 0){ goc.inequality.resize(ieq); std::vector<double> tol(ieq, feasibilityTolerance); nlopt_add_inequality_mconstraint(opt, ieq, SLSQP::nloptInequalityFunction, &goc, tol.data()); } if (eq > 0){ goc.equality.resize(eq); std::vector<double> tol(eq, feasibilityTolerance); nlopt_add_equality_mconstraint(opt, eq, SLSQP::nloptEqualityFunction, &ctx, tol.data()); } } int priorIterations = fc->iterations; int code = nlopt_optimize(opt, est, &fc->fit); if (ctx.eqredundent) { nlopt_remove_equality_constraints(opt); eq -= ctx.eqredundent; std::vector<double> tol(eq, feasibilityTolerance); nlopt_add_equality_mconstraint(opt, eq, SLSQP::nloptEqualityFunction, &ctx, tol.data()); code = nlopt_optimize(opt, est, &fc->fit); } if (goc.verbose >= 2) mxLog("nlopt_optimize returned %d", code); nlopt_destroy(opt); fc->wanted = oldWanted; if (code == NLOPT_INVALID_ARGS) { Rf_error("NLOPT invoked with invalid arguments"); } else if (code == NLOPT_OUT_OF_MEMORY) { Rf_error("NLOPT ran out of memory"); } else if (code == NLOPT_FORCED_STOP) { if (fc->iterations - priorIterations <= 1) { goc.informOut = INFORM_STARTING_VALUES_INFEASIBLE; } else { goc.informOut = INFORM_ITERATION_LIMIT; } } else if (code == NLOPT_ROUNDOFF_LIMITED) { if (fc->iterations - priorIterations <= 2) { Rf_error("%s: Failed due to singular matrix E or C in LSQ subproblem or " "rank-deficient equality constraint subproblem or " "positive directional derivative in line search", goc.optName); } else { goc.informOut = INFORM_NOT_AT_OPTIMUM; // is this correct? TODO } } else if (code < 0) { Rf_error("NLOPT fatal error %d", code); } else if (code == NLOPT_MAXEVAL_REACHED) { goc.informOut = INFORM_ITERATION_LIMIT; } else { goc.informOut = INFORM_CONVERGED_OPTIMUM; } }
static jboolean NativeBN_BN_is_prime_ex(JNIEnv* env, jclass, BIGNUM* p, int nchecks, jint cb) { if (!oneValidHandle(env, p)) return JNI_FALSE; Unique_BN_CTX ctx(BN_CTX_new()); return BN_is_prime_ex(p, nchecks, ctx.get(), reinterpret_cast<BN_GENCB*>(cb)); }
Status MMAPV1Engine::repairDatabase( OperationContext* txn, const std::string& dbName, bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) { // We must hold some form of lock here invariant(txn->lockState()->threadState()); invariant( dbName.find( '.' ) == string::npos ); scoped_ptr<RepairFileDeleter> repairFileDeleter; doingRepair dr; log() << "repairDatabase " << dbName << endl; BackgroundOperation::assertNoBgOpInProgForDb(dbName); txn->recoveryUnit()->syncDataAndTruncateJournal(); // Must be done before and after repair intmax_t totalSize = dbSize( dbName ); intmax_t freeSize = File::freeSpace(storageGlobalParams.repairpath); if ( freeSize > -1 && freeSize < totalSize ) { return Status( ErrorCodes::OutOfDiskSpace, str::stream() << "Cannot repair database " << dbName << " having size: " << totalSize << " (bytes) because free disk space is: " << freeSize << " (bytes)" ); } txn->checkForInterrupt(); Path reservedPath = uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ? "backup" : "_tmp" ); MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::create_directory( reservedPath ) ); string reservedPathString = reservedPath.string(); if ( !preserveClonedFilesOnFailure ) repairFileDeleter.reset( new RepairFileDeleter( txn, dbName, reservedPathString, reservedPath ) ); { Database* originalDatabase = dbHolder().get(txn, dbName); if (originalDatabase == NULL) { return Status(ErrorCodes::NamespaceNotFound, "database does not exist to repair"); } scoped_ptr<Database> tempDatabase; { MMAPV1DatabaseCatalogEntry* entry = new MMAPV1DatabaseCatalogEntry( txn, dbName, reservedPathString, storageGlobalParams.directoryperdb, true ); invariant( !entry->exists() ); tempDatabase.reset( new Database( txn, dbName, entry ) ); } map<string,CollectionOptions> namespacesToCopy; { string ns = dbName + ".system.namespaces"; Client::Context ctx(txn, ns ); Collection* coll = originalDatabase->getCollection( txn, ns ); if ( coll ) { scoped_ptr<RecordIterator> it( coll->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); BSONObj obj = coll->docFor( loc ); string ns = obj["name"].String(); NamespaceString nss( ns ); if ( nss.isSystem() ) { if ( nss.isSystemDotIndexes() ) continue; if ( nss.coll() == "system.namespaces" ) continue; } if ( !nss.isNormal() ) continue; CollectionOptions options; if ( obj["options"].isABSONObj() ) { Status status = options.parse( obj["options"].Obj() ); if ( !status.isOK() ) return status; } namespacesToCopy[ns] = options; } } } for ( map<string,CollectionOptions>::const_iterator i = namespacesToCopy.begin(); i != namespacesToCopy.end(); ++i ) { string ns = i->first; CollectionOptions options = i->second; Collection* tempCollection = NULL; { Client::Context tempContext(txn, ns, tempDatabase ); tempCollection = tempDatabase->createCollection( txn, ns, options, true, false ); } Client::Context readContext(txn, ns, originalDatabase); Collection* originalCollection = originalDatabase->getCollection( txn, ns ); invariant( originalCollection ); // data MultiIndexBlock indexBlock(txn, tempCollection ); { vector<BSONObj> indexes; IndexCatalog::IndexIterator ii = originalCollection->getIndexCatalog()->getIndexIterator( false ); while ( ii.more() ) { IndexDescriptor* desc = ii.next(); indexes.push_back( desc->infoObj() ); } Client::Context tempContext(txn, ns, tempDatabase); Status status = indexBlock.init( indexes ); if ( !status.isOK() ) return status; } scoped_ptr<RecordIterator> iterator( originalCollection->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) ); while ( !iterator->isEOF() ) { DiskLoc loc = iterator->getNext(); invariant( !loc.isNull() ); BSONObj doc = originalCollection->docFor( loc ); Client::Context tempContext(txn, ns, tempDatabase); StatusWith<DiskLoc> result = tempCollection->insertDocument( txn, doc, indexBlock ); if ( !result.isOK() ) return result.getStatus(); txn->recoveryUnit()->commitIfNeeded(); txn->checkForInterrupt(false); } { Client::Context tempContext(txn, ns, tempDatabase); Status status = indexBlock.commit(); if ( !status.isOK() ) return status; } } txn->recoveryUnit()->syncDataAndTruncateJournal(); globalStorageEngine->flushAllFiles(true); // need both in case journaling is disabled txn->checkForInterrupt(false); } // at this point if we abort, we don't want to delete new files // as they might be the only copies if ( repairFileDeleter.get() ) repairFileDeleter->success(); Client::Context ctx(txn, dbName); Database::closeDatabase(txn, dbName); if ( backupOriginalFiles ) { _renameForBackup( dbName, reservedPath ); } else { // first make new directory before deleting data Path newDir = Path(storageGlobalParams.dbpath) / dbName; MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); // this deletes old files _deleteDataFiles( dbName ); if ( !boost::filesystem::exists(newDir) ) { // we deleted because of directoryperdb // re-create MONGO_ASSERT_ON_EXCEPTION(boost::filesystem::create_directory(newDir)); } } _replaceWithRecovered( dbName, reservedPathString.c_str() ); if ( !backupOriginalFiles ) MONGO_ASSERT_ON_EXCEPTION( boost::filesystem::remove_all( reservedPath ) ); return Status::OK(); }
std::shared_ptr<room_entity> create_room_entity( ID3D11Device* device, const gx::shader_database* database, std::istream& in ) { uint32_t version = 0; int8_t header[9] = {"AtiModel"}; int8_t read_header[9]; int32_t animation_type = 0; offset_table offsets = {}; thread_local_context_initializer initializer(device); concurrency::combinable<thread_local_context> ctx ( initializer ); concurrency::structured_task_group tasks; d3d11::itexture2d_ptr m_textures[7]; auto task0 = concurrency::make_task( wic_loader( device, &m_textures[0], L"media/giroom/lopal.bmp", &ctx )); auto task1 = concurrency::make_task( wic_loader( device, &m_textures[1], L"media/giroom/headpal.bmp", &ctx )); auto task2 = concurrency::make_task( dds_loader( device, &m_textures[2], L"media/giroom/picture.dds") ); auto task3 = concurrency::make_task( dds_loader( device, &m_textures[3], L"media/giroom/floor.dds") ) ; auto task4 = concurrency::make_task( dds_loader( device, &m_textures[4], L"media/giroom/globe.dds") ) ; auto task5 = concurrency::make_task( wic_loader( device, &m_textures[5], L"media/giroom/wall_lm.bmp", &ctx )); auto task6 = concurrency::make_task( dds_loader( device, &m_textures[6], L"media/giroom/ceiling_lm.dds")); tasks.run(task0); tasks.run(task1); tasks.run(task2); tasks.run(task3); tasks.run(task4); tasks.run(task5); tasks.run(task6); in.read((char*) &read_header[0], 8); if ( in.eof() ) { return nullptr; } read_header[8] = '\0'; if ( _strnicmp( (char*) &read_header[0], (char*) &header[0], 8 ) !=0 ) { return nullptr; } in.read( (char*) &animation_type, sizeof(animation_type)); in.read( (char*) &version, sizeof(version)); in.read( (char*) &offsets, sizeof(offsets) ); in.seekg(offsets.m_material_chunk, std::ios_base::beg); uint32_t material_count = 0; in.read( (char*) &material_count, sizeof(material_count) ); std::vector<material> materials( material_count ); for (uint32_t i = 0 ; i < material_count; ++i) { in.seekg(64, std::ios_base::cur); in.seekg(sizeof(float4), std::ios_base::cur); in.seekg(sizeof(float4), std::ios_base::cur); in.seekg(sizeof(float), std::ios_base::cur); in.read( (char*) &materials[i].diffuse_mp_file, 64 ); in.seekg(64, std::ios_base::cur); in.read( (char*) &materials[i].bump_map_file, 64 ); in.seekg(64, std::ios_base::cur); in.seekg(64, std::ios_base::cur); } in.seekg(offsets.m_vertex_chunk, std::ios_base::beg); uint32_t vertex_count = 0; in.read( (char*) &vertex_count, sizeof(vertex_count) ); std::vector<float3> positions( vertex_count ); std::vector<float3> normal( vertex_count ); std::vector<float3> tangent( vertex_count ); std::vector<float3> binormal( vertex_count ); std::vector<float2> uv( vertex_count ); std::vector<float4> color( vertex_count ); in.read( (char*) &positions[0], vertex_count * sizeof(float3) ); in.read( (char*) &normal[0], vertex_count * sizeof(float3) ); in.read( (char*) &tangent[0], vertex_count * sizeof(float3) ); in.read( (char*) &binormal[0], vertex_count * sizeof(float3) ); in.read( (char*) &uv[0], vertex_count * sizeof(float2) ); in.read( (char*) &color[0], vertex_count * sizeof(float4) ); size_t size = positions.size(); size_t padded_size = 24 * ( ( size + 23 ) / 24 ) ; for (uint32_t i = 0; i < padded_size - size; ++i) { float3 pad3 = {0.0f, 0.0f, 0.0f}; float2 pad2 = {0.0f, 0.0f}; float4 pad4 = {1.0f, 1.0f,1.0f,1.0f}; positions.push_back(pad3); normal.push_back(pad3); tangent.push_back(pad3); binormal.push_back(pad3); uv.push_back(pad2); color.push_back(pad4); } in.seekg(offsets.m_triangle_chunk, std::ios_base::beg); uint32_t triangle_count = 0; in.read( (char*) &triangle_count, sizeof(triangle_count) ); std::vector<triangle> triangles(triangle_count); in.read( (char*) &triangles[0], triangle_count * sizeof(triangle) ); in.seekg(offsets.m_mesh_chunk, std::ios_base::beg); uint32_t mesh_count = 0; in.read( (char*) &mesh_count, sizeof(mesh_count) ); std::vector<mesh> meshes(mesh_count); for ( uint32_t i = 0; i < mesh_count; ++i) { meshes[i].m_id = i; in.read( (char*) &meshes[i].m_name[0], 64 ); in.read( (char*) &meshes[i].m_material_index, sizeof(uint32_t) ); in.read( (char*) &meshes[i].m_base_vertex, sizeof(uint32_t) ); in.read( (char*) &meshes[i].m_vertex_count, sizeof(uint32_t) ); in.read( (char*) &meshes[i].m_base_triangle, sizeof(uint32_t) ); in.read( (char*) &meshes[i].m_triangle_count, sizeof(uint32_t) ); uint32_t bone_count = 0; in.read( (char*) &bone_count, sizeof(uint32_t) ); meshes[i].m_bones.resize(bone_count); if (bone_count > 0 ) { in.read( (char*) &meshes[i].m_bones[0], bone_count * sizeof(uint32_t) ); } in.read( (char*) &meshes[i].m_parent_id, sizeof(uint32_t) ); uint32_t child_count = 0; in.read( (char*) &child_count, sizeof(uint32_t) ); meshes[i].m_children.resize(child_count); if (child_count > 0) { in.read( (char*) &meshes[i].m_children[0], child_count * sizeof(uint32_t) ); } uint32_t primitive_count = 0; in.read( (char*) &primitive_count, sizeof(uint32_t) ); meshes[i].m_primitives.resize(primitive_count); for(uint32_t j = 0; j < primitive_count;++j) { uint32_t type = 0; in.read( (char*) &type, sizeof(uint32_t) ); switch (type) { case PL_TRIANGLE_STRIP: meshes[i].m_primitives[j].m_type = D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; break; case PL_TRIANGLE_LIST: default: meshes[i].m_primitives[j].m_type = D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST; break; } uint32_t indices_count = 0; in.read( (char*) &indices_count, sizeof(uint32_t) ); meshes[i].m_primitives[j].m_indices.resize(indices_count); if (indices_count > 0) { in.read( (char*) &meshes[i].m_primitives[j].m_indices[0], indices_count * sizeof(uint32_t) ); } } uint32_t position_key_count = 0; in.read( (char*) &position_key_count, sizeof(uint32_t) ); meshes[i].m_position_keys.resize(position_key_count); if (position_key_count > 0) { in.read( (char*) &meshes[i].m_position_keys[0], position_key_count * sizeof(animation_key) ); } uint32_t rotation_key_count = 0; in.read( (char*) &rotation_key_count, sizeof(uint32_t) ); meshes[i].m_rotation_keys.resize(rotation_key_count); if ( rotation_key_count > 0) { in.read( (char*) &meshes[i].m_rotation_keys[0], rotation_key_count * sizeof(animation_key) ); } uint32_t scale_key_count = 0; in.read( (char*) &scale_key_count, sizeof(uint32_t) ); meshes[i].m_scale_keys.resize(scale_key_count); if (scale_key_count > 0) { in.read( (char*) &meshes[i].m_scale_keys[0], scale_key_count * sizeof(animation_key) ); } } std::vector<math::half> positions_h( 4 * padded_size ); std::vector<math::half> normals_h( 4 * padded_size ); std::vector<math::half> uv_h( 2 * padded_size ); math::convert_3_x_f32_f16_stream( reinterpret_cast<float*> ( &positions[0]), 3 * padded_size, 1.0f, &positions_h[0]); math::convert_3_x_f32_f16_stream( reinterpret_cast<float*> ( &normal[0]), 3 * padded_size, 1.0f, &normals_h[0]); math::convert_f32_f16_stream( reinterpret_cast<float*> ( &uv[0]), 2 * padded_size, &uv_h[0]); //combine normals and uvs std::vector<math::half> normals_uv( 6 * padded_size); uint32_t j = 0; uint32_t k = 0; for ( uint32_t i = 0; i < 6 * padded_size; i+=6, j+=4, k+=2) { normals_uv[i] = normals_h[j]; normals_uv[i+1] = normals_h[j+1]; normals_uv[i+2] = normals_h[j+2]; normals_uv[i+3] = normals_h[j+3]; normals_uv[i+4] = uv_h[k]; normals_uv[i+5] = uv_h[k+1]; } uint32_t material_range[14]; std::vector<uint32_t> indices( triangle_count * 3); uint32_t index = 0; material_range[0] = 0; // Untextured materials add_to_material_range(&indices[0], index, 0, 19, 1, &meshes[0], &triangles[0], &material_range[0]); // Hand add_to_material_range(&indices[0], index, 1, 20, 1, &meshes[0], &triangles[0], &material_range[0]); // Ball add_to_material_range(&indices[0], index, 1, 23, 1, &meshes[0], &triangles[0], &material_range[0]); // Horse add_to_material_range(&indices[0], index, 1, 25, 1, &meshes[0], &triangles[0], &material_range[0]); // Sci-Fi weirdo add_to_material_range(&indices[0], index, 1, 28, 1, &meshes[0], &triangles[0], &material_range[0]); // Bench add_to_material_range(&indices[0], index, 1, 30, 1, &meshes[0], &triangles[0], &material_range[0]); // Frame add_to_material_range(&indices[0], index, 2, 24, 1, &meshes[0], &triangles[0], &material_range[0]); // Horse stand add_to_material_range(&indices[0], index, 2, 26, 1, &meshes[0], &triangles[0], &material_range[0]); // Sci-Fi weirdo stand add_to_material_range(&indices[0], index, 2, 32, 1, &meshes[0], &triangles[0], &material_range[0]); // Globe stand add_to_material_range(&indices[0], index, 3, 3, 15, &meshes[0], &triangles[0], &material_range[0]); // Ceiling, Pillars, Stands, Wall lights add_to_material_range(&indices[0], index, 4, 0, 1, &meshes[0], &triangles[0], &material_range[0]); // Walls add_to_material_range(&indices[0], index, 5, 21, 1, &meshes[0], &triangles[0], &material_range[0]); // Teapot // Masked materials add_to_material_range(&indices[0], index, 6, 27, 1, &meshes[0], &triangles[0], &material_range[0]); // Globe // Textured materials add_to_material_range(&indices[0], index, 7, 18, 1, &meshes[0], &triangles[0], &material_range[0]); // Ball-horse add_to_material_range(&indices[0], index, 8, 22, 1, &meshes[0], &triangles[0], &material_range[0]); // Head add_to_material_range(&indices[0], index, 9, 29, 1, &meshes[0], &triangles[0], &material_range[0]); // Picture add_to_material_range(&indices[0], index, 10, 1, 1, &meshes[0], &triangles[0], &material_range[0]); // Floor // Lightmapped materials add_to_material_range(&indices[0], index, 11, 2, 1, &meshes[0], &triangles[0], &material_range[0]); // Ceiling add_to_material_range(&indices[0], index, 12, 31, 1, &meshes[0], &triangles[0], &material_range[0]); // Wall light quads //create buffers auto positions_vb = d3d11::create_immutable_vertex_buffer( device, &positions_h[0], positions_h.size() * sizeof(math::half) ); auto normals_uvs_vb = d3d11::create_immutable_vertex_buffer( device, &normals_uv[0], normals_uv.size() * sizeof(math::half) ); auto indices_ib = d3d11::create_immutable_index_buffer( device, &indices[0], indices.size() * sizeof(uint32_t) ); //wait until initializer lists are supported in vs2012 std::vector<room_entity::material> materials_shade; room_entity::material materials_shade_[10]= { { math::set( 0.816f, 0.216f, 0.227f, 0.0f), math::set( 0.45f, 0.15f, 0.15f, gx::encode_specular_power(16.0f)) }, { math::set( 0.435f, 0.443f, 0.682f, 0.0f), math::set( 0.3f, 0.3f, 0.6f, gx::encode_specular_power(16.0f)) }, { math::set( 0.29f, 0.482f, 0.298f, 0.0f), math::set( 0.15f, 0.3f, 0.15f, gx::encode_specular_power(16.0f)) }, { math::set( 0.973f, 0.894f, 0.8f, 0.0f), math::set( 0.5f, 0.5f, 0.5f, gx::encode_specular_power(16.0f)) }, { math::set( 1.0f, 0.6f, 0.2f, 0.0f), math::set( 4.0f, 2.4f, 1.6f, gx::encode_specular_power(24.0f)) }, { math::set( 1.0f, 1.0f, 1.0f, 0.0f), math::set( 0.3f, 0.4f, 0.6f, gx::encode_specular_power(4.0f)) }, { math::set( 0.25f, 0.7f, 0.8f, 0.0f), math::set( 0.7f, 0.7f, 0.8f, gx::encode_specular_power(4.0f)) }, { math::set( 0.2f, 0.2f, 0.2f, 0.0f), math::set( 0.7f, 0.7f, 0.7f, gx::encode_specular_power(16.0f)) }, { math::set( 0.616f, 0.494f, 0.361f, 0.0f), math::set( 0.1f, 0.1f, 0.1f, gx::encode_specular_power(32.0f)) }, { math::set( 0.5f, 0.5f, 0.5f, 0.0f), math::set( 0.7f, 0.7f, 0.7f, gx::encode_specular_power(16.0f)) } }; std::copy ( &materials_shade_[0], &materials_shade_[0] + 10, std::back_insert_iterator<std::vector<room_entity::material>>(materials_shade)); std::vector<room_entity::draw_call> indexed_draw_calls; indexed_draw_calls.reserve(13); for(uint32_t i = 0; i < 13; ++i) { room_entity::draw_call::index_info info = {}; info.m_vertex_size[0] = 8; info.m_vertex_size[1] = 12; uint32_t start = material_range[i]; uint32_t end = material_range[i+1]; info.m_start_index_location = start; info.m_index_count = end - start; d3d11::ibuffer_ptr vertex_buffer_s[] = { positions_vb, normals_uvs_vb } ; indexed_draw_calls.push_back ( room_entity::draw_call( info, vertex_buffer_s, indices_ib ) ) ; } if ( in.eof() ) { return nullptr; } tasks.wait(); ctx.combine_each([&](thread_local_context& local) { d3d11::icommandlist_ptr command_list; local.m_device_context->FinishCommandList( false, dx::get_pointer(command_list)); d3d11::idevicecontext_ptr immediate_context; device->GetImmediateContext(dx::get_pointer(immediate_context) ); immediate_context->ExecuteCommandList(command_list.get(), true ); } ); float specular_power = gx::encode_specular_power(25.0f); std::vector<gx::gbuffer_dt_ng_sc_gc_material> textures_materials; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[0], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[1], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[2], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[3], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[4], math::set(0.05f, 0.05f, 0.05f, specular_power ), true ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[5], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; textures_materials.push_back( gx::create_gbuffer_dt_ng_sc_gc_material( device, database, m_textures[6], math::set(0.05f, 0.05f, 0.05f, specular_power ), false ) ) ; return std::make_shared<room_entity> ( std::begin(indexed_draw_calls), std::end(indexed_draw_calls), gx::create_blinn_phong_shift_invairant_material( database, math::set( 1.0f, 0.0f, 0.0f, 0.0f), math::set(0.05f, 0.05f, 0.05f, specular_power )), std::move(materials_shade), std::move(textures_materials) ); }
void tst_dl_rule_set() { enable_trace("mk_filter_rules"); front_end_params params; ast_manager m; smtlib::parser * parser = smtlib::parser::create(m); parser->initialize_smtlib(); datalog::context ctx(m, params); datalog::rule_set rs(ctx); datalog::rule_manager& rm = ctx.get_rule_manager(); datalog::rule_ref_vector rv(rm); if (!parser->parse_string( "(benchmark test\n" ":extrapreds ((T Int Int) (Q Int Int) (R Int Int Int) (S Int Int Int) (DynActual Int Int Int) (GlobalSym Int Int) (HeapPointsTo Int Int Int) (Calls Int Int)) \n" ":extrapreds ((Actual Int Int Int) (PointsTo Int Int) (PointsTo0 Int Int) (FuncDecl0 Int Int) (Assign Int Int) (Load Int Int Int))\n" ":formula (forall (x Int) (=> (Q x 1) (T x x)))\n" ":formula (forall (v Int) (h Int) (=> (PointsTo0 v h) (PointsTo v h)))\n" ":formula (forall (v Int) (h Int) (=> (FuncDecl0 v h) (PointsTo v h)))\n" ":formula (forall (v Int) (h Int) (=> (FuncDecl0 v h) (PointsTo v h)))\n" ":formula (forall (v1 Int) (v2 Int) (h Int) (=> (and (PointsTo v2 h) (Assign v1 v2)) (PointsTo v1 h)))\n" ":formula (forall (x Int) (y Int) (z Int) (=> (and (Q x y) (T y z)) (T x y)))\n" ":formula (forall (i1 Int) (v Int) (fun Int) (c Int) (v1 Int) (h Int) (h1 Int) (=> (and (GlobalSym 0 fun) (HeapPointsTo fun 1 c) (Calls i1 c) (Actual i1 3 v1) (PointsTo v1 h) (HeapPointsTo h 0 h1) (PointsTo v h1)) (DynActual i1 2 v)))\n" ":formula (forall (i1 Int) (v Int) (fun Int) (c Int) (v1 Int) (h Int) (h1 Int) (=> (and (GlobalSym 0 fun) (HeapPointsTo fun 1 c) (Calls i1 c) (Actual i1 3 v1) (PointsTo v1 h) (HeapPointsTo h 1 h1) (PointsTo v h1)) (DynActual i1 3 v)))\n" ":formula (forall (i1 Int) (v Int) (fun Int) (c Int) (v1 Int) (h Int) (h1 Int) (=> (and (GlobalSym 0 fun) (HeapPointsTo fun 1 c) (Calls i1 c) (Actual i1 3 v1) (PointsTo v1 h) (HeapPointsTo h 2 h1) (PointsTo v h1)) (DynActual i1 4 v)))\n" ":formula (forall (v1 Int) (v2 Int) (h1 Int) (h2 Int) (f Int) (=> (and (Load v2 v1 f) (PointsTo v1 h1) (HeapPointsTo h1 f h2)) (PointsTo v2 h1)))\n" ":formula (forall (v1 Int) (v2 Int) (h1 Int) (h2 Int) (f Int) (=> (and (Load v2 v1 0) (HeapPointsTo h1 f h2)) (PointsTo v2 h1)))\n" ":formula (forall (v1 Int) (v2 Int) (h1 Int) (h2 Int) (f Int) (=> (and (not (Load v2 v1 0)) (HeapPointsTo h1 f h2)) (PointsTo v2 h1)))\n" ")")) { SASSERT(false); dealloc(parser); return; } smtlib::benchmark * b = parser->get_benchmark(); for (unsigned j = 0; j < b->get_num_formulas(); ++j) { expr * e = b->begin_formulas()[j]; ptr_vector<expr> todo; todo.push_back(e); while (!todo.empty()) { e = todo.back(); todo.pop_back(); if (is_quantifier(e)) { e = to_quantifier(e)->get_expr(); todo.push_back(e); } else if (is_app(e)) { app* a = to_app(e); if (is_uninterp(e) && !ctx.is_predicate(a->get_decl())) { std::cout << "registering " << a->get_decl()->get_name() << "\n"; ctx.register_predicate(a->get_decl()); } else { todo.append(a->get_num_args(), a->get_args()); } } } } for (unsigned j = 0; j < b->get_num_formulas(); ++j) { expr * e = b->begin_formulas()[j]; if (is_quantifier(e)) { try { rm.mk_rule(e, rv); } catch(...) { std::cerr << "ERROR: it is not a valid Datalog rule:\n" << mk_pp(e, m) << "\n"; } } } rs.add_rules(rv.size(), rv.c_ptr()); rs.display(std::cout); datalog::mk_filter_rules p(ctx); model_converter_ref mc; proof_converter_ref pc; datalog::rule_set * new_rs = p(rs, mc, pc); std::cout << "\nAfter mk_filter:\n"; new_rs->display(std::cout); datalog::mk_simple_joins p2(ctx); datalog::rule_set * new_rs2 = p2(*new_rs, mc, pc); std::cout << "\nAfter mk_simple_joins:\n"; new_rs2->display(std::cout); dealloc(new_rs); dealloc(new_rs2); dealloc(parser); }
// // Add a rule to the policy database // CFDictionaryRef PolicyEngine::add(CFTypeRef inTarget, AuthorityType type, SecAssessmentFlags flags, CFDictionaryRef context) { // default type to execution if (type == kAuthorityInvalid) type = kAuthorityExecute; authorizeUpdate(flags, context); CFDictionary ctx(context, errSecCSInvalidAttributeValues); CFCopyRef<CFTypeRef> target = inTarget; CFRef<CFDataRef> bookmark = NULL; std::string filter_unsigned; switch (type) { case kAuthorityExecute: normalizeTarget(target, ctx, &filter_unsigned); // bookmarks are untrusted and just a hint to callers bookmark = ctx.get<CFDataRef>(kSecAssessmentRuleKeyBookmark); break; case kAuthorityInstall: if (inTarget && CFGetTypeID(inTarget) == CFURLGetTypeID()) { // no good way to turn an installer file into a requirement. Pretend to succeeed so caller proceeds return cfmake<CFDictionaryRef>("{%O=%O}", kSecAssessmentAssessmentAuthorityOverride, CFSTR("virtual install")); } break; case kAuthorityOpenDoc: // handle document-open differently: use quarantine flags for whitelisting if (!target || CFGetTypeID(target) != CFURLGetTypeID()) // can only "add" file paths MacOSError::throwMe(errSecCSInvalidObjectRef); try { std::string spath = cfString(target.as<CFURLRef>()); FileQuarantine qtn(spath.c_str()); qtn.setFlag(QTN_FLAG_ASSESSMENT_OK); qtn.applyTo(spath.c_str()); } catch (const CommonError &error) { // could not set quarantine flag - report qualified success return cfmake<CFDictionaryRef>("{%O=%O,'assessment:error'=%d}", kSecAssessmentAssessmentAuthorityOverride, CFSTR("error setting quarantine"), error.osStatus()); } catch (...) { return cfmake<CFDictionaryRef>("{%O=%O}", kSecAssessmentAssessmentAuthorityOverride, CFSTR("unable to set quarantine")); } return NULL; } // if we now have anything else, we're busted if (!target || CFGetTypeID(target) != SecRequirementGetTypeID()) MacOSError::throwMe(errSecCSInvalidObjectRef); double priority = 0; string label; bool allow = true; double expires = never; string remarks; if (CFNumberRef pri = ctx.get<CFNumberRef>(kSecAssessmentUpdateKeyPriority)) CFNumberGetValue(pri, kCFNumberDoubleType, &priority); if (CFStringRef lab = ctx.get<CFStringRef>(kSecAssessmentUpdateKeyLabel)) label = cfString(lab); if (CFDateRef time = ctx.get<CFDateRef>(kSecAssessmentUpdateKeyExpires)) // we're using Julian dates here; convert from CFDate expires = dateToJulian(time); if (CFBooleanRef allowing = ctx.get<CFBooleanRef>(kSecAssessmentUpdateKeyAllow)) allow = allowing == kCFBooleanTrue; if (CFStringRef rem = ctx.get<CFStringRef>(kSecAssessmentUpdateKeyRemarks)) remarks = cfString(rem); CFRef<CFStringRef> requirementText; MacOSError::check(SecRequirementCopyString(target.as<SecRequirementRef>(), kSecCSDefaultFlags, &requirementText.aref())); SQLite::Transaction xact(*this, SQLite3::Transaction::deferred, "add_rule"); SQLite::Statement insert(*this, "INSERT INTO authority (type, allow, requirement, priority, label, expires, filter_unsigned, remarks)" " VALUES (:type, :allow, :requirement, :priority, :label, :expires, :filter_unsigned, :remarks);"); insert.bind(":type").integer(type); insert.bind(":allow").integer(allow); insert.bind(":requirement") = requirementText.get(); insert.bind(":priority") = priority; if (!label.empty()) insert.bind(":label") = label; insert.bind(":expires") = expires; insert.bind(":filter_unsigned") = filter_unsigned.empty() ? NULL : filter_unsigned.c_str(); if (!remarks.empty()) insert.bind(":remarks") = remarks; insert.execute(); SQLite::int64 newRow = this->lastInsert(); if (bookmark) { SQLite::Statement bi(*this, "INSERT INTO bookmarkhints (bookmark, authority) VALUES (:bookmark, :authority)"); bi.bind(":bookmark") = CFDataRef(bookmark); bi.bind(":authority").integer(newRow); bi.execute(); } this->purgeObjects(priority); xact.commit(); notify_post(kNotifySecAssessmentUpdate); return cfmake<CFDictionaryRef>("{%O=%d}", kSecAssessmentUpdateKeyRow, newRow); }
static void insert( const BSONObj &o, bool god = false ) { Lock::DBWrite lk(ns()); Client::Context ctx( ns() ); theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god ); }
bool wrappedRun(OperationContext* txn, const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder) { BSONElement e = jsobj.firstElement(); const string toDeleteNs = dbname + '.' + e.valuestr(); if (!serverGlobalParams.quiet) { LOG(0) << "CMD: dropIndexes " << toDeleteNs << endl; } Client::Context ctx(txn, toDeleteNs); Database* db = ctx.db(); Collection* collection = db->getCollection( txn, toDeleteNs ); if ( ! collection ) { errmsg = "ns not found"; return false; } stopIndexBuilds(txn, db, jsobj); IndexCatalog* indexCatalog = collection->getIndexCatalog(); anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn) ); BSONElement f = jsobj.getField("index"); if ( f.type() == String ) { string indexToDelete = f.valuestr(); if ( indexToDelete == "*" ) { Status s = indexCatalog->dropAllIndexes(txn, false); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } anObjBuilder.append("msg", "non-_id indexes dropped for collection"); return true; } IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( txn, indexToDelete ); if ( desc == NULL ) { errmsg = str::stream() << "index not found with name [" << indexToDelete << "]"; return false; } if ( desc->isIdIndex() ) { errmsg = "cannot drop _id index"; return false; } Status s = indexCatalog->dropIndex(txn, desc); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } return true; } if ( f.type() == Object ) { IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( txn, f.embeddedObject() ); if ( desc == NULL ) { errmsg = "can't find index with key:"; errmsg += f.embeddedObject().toString(); return false; } if ( desc->isIdIndex() ) { errmsg = "cannot drop _id index"; return false; } Status s = indexCatalog->dropIndex(txn, desc); if ( !s.isOK() ) { appendCommandStatus( anObjBuilder, s ); return false; } return true; } errmsg = "invalid index name spec"; return false; }
void insert() { Client::Context ctx( cappedNs() ); BSONObj o = BSON(GENOID << "x" << 456); DiskLoc loc = theDataFileMgr.insert( cappedNs().c_str(), o.objdata(), o.objsize(), false ); verify(!loc.isNull()); }
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { DBDirectClient db(txn); BSONElement e = jsobj.firstElement(); string toDeleteNs = dbname + '.' + e.valuestr(); LOG(0) << "CMD: reIndex " << toDeleteNs << endl; Lock::DBWrite dbXLock(txn->lockState(), dbname); Client::Context ctx(txn, toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); if ( !collection ) { errmsg = "ns not found"; return false; } BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs ); std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj); vector<BSONObj> all; { vector<string> indexNames; collection->getCatalogEntry()->getAllIndexes( txn, &indexNames ); for ( size_t i = 0; i < indexNames.size(); i++ ) { const string& name = indexNames[i]; BSONObj spec = collection->getCatalogEntry()->getIndexSpec( txn, name ); all.push_back(spec.removeField("v").getOwned()); const BSONObj key = spec.getObjectField("key"); const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { errmsg = str::stream() << "Cannot rebuild index " << spec << ": " << keyStatus.reason() << " For more info see http://dochub.mongodb.org/core/index-validation"; return false; } } } result.appendNumber( "nIndexesWas", all.size() ); { WriteUnitOfWork wunit(txn); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if ( !s.isOK() ) { errmsg = "dropIndexes failed"; return appendCommandStatus( result, s ); } wunit.commit(); } MultiIndexBlock indexer(txn, collection); indexer.allowBackgroundBuilding(); // do not want interruption as that will leave us without indexes. Status status = indexer.init(all); if (!status.isOK()) return appendCommandStatus( result, status ); status = indexer.insertAllDocumentsInCollection(); if (!status.isOK()) return appendCommandStatus( result, status ); { WriteUnitOfWork wunit(txn); indexer.commit(); wunit.commit(); } result.append( "nIndexes", (int)all.size() ); result.append( "indexes", all ); IndexBuilder::restoreIndexes(indexesInProg); return true; }
void IndexRebuilder::checkNS(const std::list<std::string>& nsToCheck) { bool firstTime = true; for (std::list<std::string>::const_iterator it = nsToCheck.begin(); it != nsToCheck.end(); ++it) { string ns = *it; LOG(3) << "IndexRebuilder::checkNS: " << ns; // This write lock is held throughout the index building process // for this namespace. Client::WriteContext ctx(ns); Collection* collection = ctx.ctx().db()->getCollection( ns ); if ( collection == NULL ) continue; IndexCatalog* indexCatalog = collection->getIndexCatalog(); if ( collection->ns().isOplog() && indexCatalog->numIndexesTotal() > 0 ) { warning() << ns << " had ilegal indexes, removing"; indexCatalog->dropAllIndexes( true ); continue; } vector<BSONObj> indexesToBuild = indexCatalog->getAndClearUnfinishedIndexes(); // The indexes have now been removed from system.indexes, so the only record is // in-memory. If there is a journal commit between now and when insert() rewrites // the entry and the db crashes before the new system.indexes entry is journalled, // the index will be lost forever. Thus, we're assuming no journaling will happen // between now and the entry being re-written. if ( indexesToBuild.size() == 0 ) { continue; } log() << "found " << indexesToBuild.size() << " interrupted index build(s) on " << ns; if (firstTime) { log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds"; firstTime = false; } if (!serverGlobalParams.indexBuildRetry) { log() << " not rebuilding interrupted indexes"; continue; } // TODO: these can/should/must be done in parallel for ( size_t i = 0; i < indexesToBuild.size(); i++ ) { BSONObj indexObj = indexesToBuild[i]; log() << "going to rebuild: " << indexObj; Status status = indexCatalog->createIndex( indexObj, false ); if ( !status.isOK() ) { log() << "building index failed: " << status.toString() << " index: " << indexObj; } } } }
void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); if (level) { replCoord->appendSlaveInfoData(&result); } return; } result.appendBool("ismaster", ReplicationCoordinator::get(opCtx)->isMasterForReportingPurposes()); if (level) { BSONObjBuilder sources(result.subarrayStart("sources")); int n = 0; list<BSONObj> src; { const NamespaceString localSources{"local.sources"}; AutoGetCollectionForReadCommand ctx(opCtx, localSources); auto exec = InternalPlanner::collectionScan( opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::NO_YIELD); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj.getOwned()); } // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::IS_EOF == state); } for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) { BSONObj s = *i; BSONObjBuilder bb; bb.append(s["host"]); string sourcename = s["source"].valuestr(); if (sourcename != "main") bb.append(s["source"]); { BSONElement e = s["syncedTo"]; BSONObjBuilder t(bb.subobjStart("syncedTo")); t.appendDate("time", e.timestampTime()); t.append("inc", e.timestampInc()); t.done(); } if (level > 1) { invariant(!opCtx->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); if (cliConn && replAuthenticate(cliConn)) { BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << 1))); BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << -1))); bb.appendDate("masterFirst", first["ts"].timestampTime()); bb.appendDate("masterLast", last["ts"].timestampTime()); const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0); } conn.done(); } sources.append(BSONObjBuilder::numStr(n++), bb.obj()); } sources.done(); replCoord->appendSlaveInfoData(&result); } }