示例#1
0
 void init(){
     serverID.init();
     setupSIGTRAPforGDB();
 }
示例#2
0
 void init() {
     type = static_cast<char>(jstOID);
     strcpy( id, "_id" );
     oid.init();
     verify( size() == 17 );
 }
/* ****************************************************************************
*
* mongoUpdateContextSubscription - 
*/
HttpStatusCode mongoUpdateContextSubscription(UpdateContextSubscriptionRequest* requestP, UpdateContextSubscriptionResponse* responseP, Format inFormat, const std::string& tenant)
{
  reqSemTake(__FUNCTION__, "ngsi10 update subscription request");

  LM_T(LmtMongo, ("Update Context Subscription"));

  DBClientBase* connection = getMongoConnection();

  /* Look for document */
  BSONObj  sub;
  try
  {
      OID id = OID(requestP->subscriptionId.get());

      mongoSemTake(__FUNCTION__, "findOne in SubscribeContextCollection");
      sub = connection->findOne(getSubscribeContextCollectionName(tenant).c_str(), BSON("_id" << id));
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection");
      LM_I(("Database Operation Successful (findOne _id: %s)", id.toString().c_str()));
  }
  catch (const AssertionException &e)
  {
      /* This happens when OID format is wrong */
      // FIXME P4: this checking should be done at the parsing stage, without progressing to
      // mongoBackend. For the moment we can leave this here, but we should remove it in the future
      // (old issue #95)
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo assertion exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo assertion exception)");

      responseP->subscribeError.errorCode.fill(SccContextElementNotFound);
      LM_W(("Bad Input (invalid OID format)"));
      return SccOk;
  }
  catch (const DBException &e)
  {
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo db exception)");

      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - findOne() _id: " + requestP->subscriptionId.get() +
                                               " - exception: " + e.what());
      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo generic exception)");

      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - findOne() _id: " + requestP->subscriptionId.get() +
                                               " - exception: " + "generic");
      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }


  if (sub.isEmpty()) {
      responseP->subscribeError.errorCode.fill(SccContextElementNotFound);
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (no subscriptions found)");
      return SccOk;
  }

  /* We start with an empty BSONObjBuilder and process requestP for all the fields that can
   * be updated. I don't like too much this strategy (I would have preferred to start with
   * a copy of the original document, then modify as neded, but this doesn't seem to be easy
   * using the API provide by the Mongo C++ driver)
   *
   * FIXME: a better implementation strategy could be doing an findAndModify() query to do the
   * update, so detecting if the document was not found, instead of using findOne() + update()
   * with $set operation. One operations to MongoDb. vs two operations.
   */
  BSONObjBuilder newSub;

  /* Entities, attribute list and reference are not updatable, so they are appended directly */
  newSub.appendArray(CSUB_ENTITIES, sub.getField(CSUB_ENTITIES).Obj());
  newSub.appendArray(CSUB_ATTRS, sub.getField(CSUB_ATTRS).Obj());
  newSub.append(CSUB_REFERENCE, STR_FIELD(sub, CSUB_REFERENCE));

  /* Duration update */
  if (requestP->duration.isEmpty()) {      
      newSub.append(CSUB_EXPIRATION, sub.getField(CSUB_EXPIRATION).numberLong());
  }
  else {
      long long expiration = getCurrentTime() + requestP->duration.parse();
      newSub.append(CSUB_EXPIRATION, expiration);
      LM_T(LmtMongo, ("New subscription expiration: %l", expiration));
  }

  /* Restriction update */
  // FIXME: Restrictions not implemented yet

  /* Throttling update */
  if (!requestP->throttling.isEmpty()) {
      /* Throttling equal to 0 removes throttling */
      long long throttling = requestP->throttling.parse();
      if (throttling != 0) {
          newSub.append(CSUB_THROTTLING, throttling);
      }
  }
  else {
      /* The hasField check is needed due to Throttling could not be present in the original doc */
      if (sub.hasField(CSUB_THROTTLING)) {
          newSub.append(CSUB_THROTTLING, sub.getField(CSUB_THROTTLING).numberLong());
      }
  }

  /* Notify conditions */
  bool notificationDone = false;
  if (requestP->notifyConditionVector.size() == 0) {
      newSub.appendArray(CSUB_CONDITIONS, sub.getField(CSUB_CONDITIONS).embeddedObject());
  }
  else {
      /* Destroy any previous ONTIMEINTERVAL thread */
      getNotifier()->destroyOntimeIntervalThreads(requestP->subscriptionId.get());

      /* Build conditions array (including side-effect notifications and threads creation)
       * In order to do so, we have to create and EntityIdVector and AttributeList from sub
       * document, given the processConditionVector() signature */
       EntityIdVector enV = subToEntityIdVector(sub);
       AttributeList attrL = subToAttributeList(sub);

       BSONArray conds = processConditionVector(&requestP->notifyConditionVector,
                                                enV,
                                                attrL,
                                                requestP->subscriptionId.get(),
                                                C_STR_FIELD(sub, CSUB_REFERENCE),
                                                &notificationDone,
                                                inFormat,
                                                tenant);
       newSub.appendArray(CSUB_CONDITIONS, conds);

       /* Remove EntityIdVector and AttributeList dynamic memory */
       enV.release();
       attrL.release();
  }

  int count = sub.hasField(CSUB_COUNT) ? sub.getIntField(CSUB_COUNT) : 0;

  /* Last notification */
  if (notificationDone) {
      newSub.append(CSUB_LASTNOTIFICATION, getCurrentTime());
      newSub.append(CSUB_COUNT, count + 1);
  }
  else {
      /* The hasField check is needed due to lastNotification/count could not be present in the original doc */
      if (sub.hasField(CSUB_LASTNOTIFICATION)) {
          newSub.append(CSUB_LASTNOTIFICATION, sub.getIntField(CSUB_LASTNOTIFICATION));
      }
      if (sub.hasField(CSUB_COUNT)) {
          newSub.append(CSUB_COUNT, count);
      }
  }

  /* Adding format to use in notifications */
  newSub.append(CSUB_FORMAT, std::string(formatToString(inFormat)));

  /* Update document in MongoDB */
  BSONObj update = newSub.obj();
  try
  {
      LM_T(LmtMongo, ("update() in '%s' collection _id '%s': %s}", getSubscribeContextCollectionName(tenant).c_str(),
                         requestP->subscriptionId.get().c_str(),
                         update.toString().c_str()));
      mongoSemTake(__FUNCTION__, "update in SubscribeContextCollection");
      connection->update(getSubscribeContextCollectionName(tenant).c_str(), BSON("_id" << OID(requestP->subscriptionId.get())), update);
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection");
      LM_I(("Database Operation Successful (update _id: %s, %s)", requestP->subscriptionId.get().c_str(), update.toString().c_str()));
  }
  catch (const DBException &e)
  {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo db exception)");
      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                               " - update() doc: " + update.toString() +
                                               " - exception: " + e.what());

      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo generic exception)");
      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                               " - update() doc: " + update.toString() +
                                               " - exception: " + "generic");

      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }

  /* Duration and throttling are optional parameters, they are only added in the case they
   * was used for update */
  if (!requestP->duration.isEmpty()) {      
      responseP->subscribeResponse.duration = requestP->duration;
  }
  if (!requestP->throttling.isEmpty()) {      
      responseP->subscribeResponse.throttling = requestP->throttling;
  }  
  responseP->subscribeResponse.subscriptionId = requestP->subscriptionId;

  reqSemGive(__FUNCTION__, "ngsi10 update subscription request");
  return SccOk;
}
    /**
     * Upgrade v3 to v4 described here.
     *
     * This upgrade takes a config server without collection epochs (potentially) and adds
     * epochs to all mongo processes.
     *
     */
    bool doUpgradeV3ToV4(const ConnectionString& configLoc,
                         const VersionType& lastVersionInfo,
                         string* errMsg)
    {
        string dummy;
        if (!errMsg) errMsg = &dummy;

        verify(lastVersionInfo.getCurrentVersion() == UpgradeHistory_NoEpochVersion);

        if (lastVersionInfo.isUpgradeIdSet() && lastVersionInfo.getUpgradeId().isSet()) {

            //
            // Another upgrade failed, so cleanup may be necessary
            //

            BSONObj lastUpgradeState = lastVersionInfo.getUpgradeState();

            bool inCriticalSection;
            if (!FieldParser::extract(lastUpgradeState,
                                      inCriticalSectionField,
                                      &inCriticalSection,
                                      errMsg))
            {

                *errMsg = stream() << "problem reading previous upgrade state" << causedBy(errMsg);

                return false;
            }

            if (inCriticalSection) {

                // Manual intervention is needed here.  Somehow our upgrade didn't get applied
                // consistently across config servers.

                *errMsg = cannotCleanupMessage;

                return false;
            }

            if (!_cleanupUpgradeState(configLoc, lastVersionInfo.getUpgradeId(), errMsg)) {
                
                // If we can't cleanup the old upgrade state, the user might have done it for us,
                // not a fatal problem (we'll just end up with extra collections).
                
                warning() << "could not cleanup previous upgrade state" << causedBy(errMsg) << endl;
                *errMsg = "";
            }
        }

        //
        // Check the versions of other mongo processes in the cluster before upgrade.
        // We can't upgrade if there are active pre-v2.2 processes in the cluster
        //

        Status mongoVersionStatus = checkClusterMongoVersions(configLoc,
                                                              string(minMongoProcessVersion));

        if (!mongoVersionStatus.isOK()) {

            *errMsg = stream() << "cannot upgrade with pre-v" << minMongoProcessVersion
                               << " mongo processes active in the cluster"
                               << causedBy(mongoVersionStatus);

            return false;
        }

        VersionType newVersionInfo;
        lastVersionInfo.cloneTo(&newVersionInfo);

        // Set our upgrade id and state
        OID upgradeId = OID::gen();
        newVersionInfo.setUpgradeId(upgradeId);
        newVersionInfo.setUpgradeState(BSONObj());

        // Write our upgrade id and state
        {
            scoped_ptr<ScopedDbConnection> connPtr;

            try {
                connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                ScopedDbConnection& conn = *connPtr;

                verify(newVersionInfo.isValid(NULL));

                conn->update(VersionType::ConfigNS,
                             BSON("_id" << 1 << VersionType::version_DEPRECATED(3)),
                             newVersionInfo.toBSON());
                _checkGLE(conn);
            }
            catch (const DBException& e) {

                *errMsg = stream() << "could not initialize version info for upgrade"
                                   << causedBy(e);

                return false;
            }

            connPtr->done();
        }

        //
        // First lock all collection namespaces that exist
        //

        OwnedPointerMap<string, CollectionType> ownedCollections;
        const map<string, CollectionType*>& collections = ownedCollections.map();

        Status findCollectionsStatus = findAllCollectionsV3(configLoc, &ownedCollections);

        if (!findCollectionsStatus.isOK()) {

            *errMsg = stream() << "could not read collections from config server"
                               << causedBy(findCollectionsStatus);

            return false;
        }

        //
        // Acquire locks for all sharded collections
        // Something that didn't involve getting thousands of locks would be better.
        //

        OwnedPointerVector<ScopedDistributedLock> collectionLocks;

        log() << "acquiring locks for " << collections.size() << " sharded collections..." << endl;
        
        // WARNING - this string is used programmatically when forcing locks, be careful when
        // changing!
        // TODO: Add programmatic "why" field to lock collection
        string lockMessage = str::stream() << "ensuring epochs for config upgrade"
                                           << " (" << upgradeId.toString() << ")";
        
        if (!_acquireAllCollectionLocks(configLoc,
                                        collections,
                                        lockMessage,
                                        20 * 60 * 1000,
                                        &collectionLocks,
                                        errMsg))
        {

            *errMsg = stream() << "could not acquire all namespace locks for upgrade" 
                               << " (" << upgradeId.toString() << ")"
                               << causedBy(errMsg);

            return false;
        }

        // We are now preventing all splits and migrates for all sharded collections

        // Get working and backup suffixes
        string workingSuffix = genWorkingSuffix(upgradeId);
        string backupSuffix = genBackupSuffix(upgradeId);

        log() << "copying collection and chunk metadata to working and backup collections..."
              << endl;

        // Get a backup and working copy of the config.collections and config.chunks collections

        Status copyStatus = copyFrozenCollection(configLoc,
                                                 CollectionType::ConfigNS,
                                                 CollectionType::ConfigNS + workingSuffix);

        if (!copyStatus.isOK()) {

            *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to "
                               << (CollectionType::ConfigNS + workingSuffix)
                               << causedBy(copyStatus);

            return false;
        }

        copyStatus = copyFrozenCollection(configLoc,
                                          CollectionType::ConfigNS,
                                          CollectionType::ConfigNS + backupSuffix);

        if (!copyStatus.isOK()) {

            *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to "
                               << (CollectionType::ConfigNS + backupSuffix) << causedBy(copyStatus);

            return false;
        }

        copyStatus = copyFrozenCollection(configLoc,
                                          ChunkType::ConfigNS,
                                          ChunkType::ConfigNS + workingSuffix);

        if (!copyStatus.isOK()) {

            *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to "
                               << (ChunkType::ConfigNS + workingSuffix) << causedBy(copyStatus);

            return false;
        }

        copyStatus = copyFrozenCollection(configLoc,
                                          ChunkType::ConfigNS,
                                          ChunkType::ConfigNS + backupSuffix);

        if (!copyStatus.isOK()) {

            *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to "
                               << (ChunkType::ConfigNS + backupSuffix) << causedBy(copyStatus);

            return false;
        }

        //
        // Go through sharded collections one-by-one and add epochs where missing
        //

        for (map<string, CollectionType*>::const_iterator it = collections.begin();
                it != collections.end(); ++it)
        {
            // Create a copy so that we can change the epoch later
            CollectionType collection;
            it->second->cloneTo(&collection);

            log() << "checking epochs for " << collection.getNS() << " collection..." << endl;

            OID epoch = collection.getEpoch();

            //
            // Go through chunks to find epoch if we haven't found it or to verify epoch is the same
            //

            OwnedPointerVector<ChunkType> ownedChunks;
            const vector<ChunkType*>& chunks = ownedChunks.vector();

            Status findChunksStatus = findAllChunks(configLoc, collection.getNS(), &ownedChunks);

            if (!findChunksStatus.isOK()) {

                *errMsg = stream() << "could not read chunks from config server"
                                   << causedBy(findChunksStatus);

                return false;
            }

            for (vector<ChunkType*>::const_iterator chunkIt = chunks.begin();
                    chunkIt != chunks.end(); ++chunkIt)
            {
                const ChunkType& chunk = *(*chunkIt);

                // If our chunk epoch is set and doesn't match
                if (epoch.isSet() && chunk.getVersion().epoch().isSet()
                    && chunk.getVersion().epoch() != epoch)
                {

                    *errMsg = stream() << "chunk epoch for " << chunk.toString() << " in "
                                       << collection.getNS() << " does not match found epoch "
                                       << epoch;

                    return false;
                }
                else if (!epoch.isSet() && chunk.getVersion().epoch().isSet()) {
                    epoch = chunk.getVersion().epoch();
                }
            }

            //
            // Write collection epoch if needed
            //

            if (!collection.getEpoch().isSet()) {

                OID newEpoch = OID::gen();

                log() << "writing new epoch " << newEpoch << " for " << collection.getNS()
                      << " collection..." << endl;

                scoped_ptr<ScopedDbConnection> connPtr;

                try {
                    connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                    ScopedDbConnection& conn = *connPtr;

                    conn->update(CollectionType::ConfigNS + workingSuffix,
                                 BSON(CollectionType::ns(collection.getNS())),
                                 BSON("$set" << BSON(CollectionType::DEPRECATED_lastmodEpoch(newEpoch))));
                    _checkGLE(conn);
                }
                catch (const DBException& e) {

                    *errMsg = stream() << "could not write a new epoch for " << collection.getNS()
                                       << causedBy(e);

                    return false;
                }

                connPtr->done();
                collection.setEpoch(newEpoch);
            }

            epoch = collection.getEpoch();
            verify(epoch.isSet());

            //
            // Now write verified epoch to all chunks
            //

            log() << "writing epoch " << epoch << " for " << chunks.size() << " chunks in "
                  << collection.getNS() << " collection..." << endl;

            {
                scoped_ptr<ScopedDbConnection> connPtr;

                try {
                    connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                    ScopedDbConnection& conn = *connPtr;

                    // Multi-update of all chunks
                    conn->update(ChunkType::ConfigNS + workingSuffix,
                                 BSON(ChunkType::ns(collection.getNS())),
                                 BSON("$set" << BSON(ChunkType::DEPRECATED_epoch(epoch))),
                                 false,
                                 true); // multi
                    _checkGLE(conn);
                }
                catch (const DBException& e) {

                    *errMsg = stream() << "could not write a new epoch " << epoch.toString()
                                       << " for chunks in " << collection.getNS() << causedBy(e);

                    return false;
                }

                connPtr->done();
            }
        }

        //
        // Paranoid verify the collection writes
        //

        {
            scoped_ptr<ScopedDbConnection> connPtr;

            try {
                connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                ScopedDbConnection& conn = *connPtr;

                // Find collections with no epochs
                BSONObj emptyDoc =
                        conn->findOne(CollectionType::ConfigNS + workingSuffix,
                                      BSON("$unset" << BSON(CollectionType::DEPRECATED_lastmodEpoch() << 1)));

                if (!emptyDoc.isEmpty()) {

                    *errMsg = stream() << "collection " << emptyDoc
                                       << " is still missing epoch after config upgrade";

                    connPtr->done();
                    return false;
                }

                // Find collections with empty epochs
                emptyDoc = conn->findOne(CollectionType::ConfigNS + workingSuffix,
                                         BSON(CollectionType::DEPRECATED_lastmodEpoch(OID())));

                if (!emptyDoc.isEmpty()) {

                    *errMsg = stream() << "collection " << emptyDoc
                                       << " still has empty epoch after config upgrade";

                    connPtr->done();
                    return false;
                }

                // Find chunks with no epochs
                emptyDoc =
                        conn->findOne(ChunkType::ConfigNS + workingSuffix,
                                      BSON("$unset" << BSON(ChunkType::DEPRECATED_epoch() << 1)));

                if (!emptyDoc.isEmpty()) {

                    *errMsg = stream() << "chunk " << emptyDoc
                                       << " is still missing epoch after config upgrade";

                    connPtr->done();
                    return false;
                }

                // Find chunks with empty epochs
                emptyDoc = conn->findOne(ChunkType::ConfigNS + workingSuffix,
                                         BSON(ChunkType::DEPRECATED_epoch(OID())));

                if (!emptyDoc.isEmpty()) {

                    *errMsg = stream() << "chunk " << emptyDoc
                                       << " still has empty epoch after config upgrade";

                    connPtr->done();
                    return false;
                }
            }
            catch (const DBException& e) {

                *errMsg = stream() << "could not verify epoch writes" << causedBy(e);

                return false;
            }

            connPtr->done();
        }

        //
        // Double check that our collections haven't changed
        //

        Status idCheckStatus = checkIdsTheSame(configLoc,
                                               CollectionType::ConfigNS,
                                               CollectionType::ConfigNS + workingSuffix);

        if (!idCheckStatus.isOK()) {

            *errMsg = stream() << CollectionType::ConfigNS
                               << " was modified while working on upgrade"
                               << causedBy(idCheckStatus);

            return false;
        }

        idCheckStatus = checkIdsTheSame(configLoc,
                                        ChunkType::ConfigNS,
                                        ChunkType::ConfigNS + workingSuffix);

        if (!idCheckStatus.isOK()) {

            *errMsg = stream() << ChunkType::ConfigNS << " was modified while working on upgrade"
                               << causedBy(idCheckStatus);

            return false;
        }

        //
        // ENTER CRITICAL SECTION
        //

        newVersionInfo.setUpgradeState(BSON(inCriticalSectionField(true)));

        {
            scoped_ptr<ScopedDbConnection> connPtr;

            try {
                connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                ScopedDbConnection& conn = *connPtr;

                verify(newVersionInfo.isValid(NULL));

                conn->update(VersionType::ConfigNS,
                             BSON("_id" << 1 << VersionType::version_DEPRECATED(3)),
                             newVersionInfo.toBSON());
                _checkGLE(conn);
            }
            catch (const DBException& e) {

                // No cleanup message here since we're not sure if we wrote or not, and
                // not dangerous either way except to prevent further updates (at which point
                // the message is printed)
                *errMsg = stream()
                        << "could not update version info to enter critical update section"
                        << causedBy(e);

                return false;
            }

            // AT THIS POINT ANY FAILURE REQUIRES MANUAL INTERVENTION!
            connPtr->done();
        }

        log() << "entered critical section for config upgrade" << endl;

        Status overwriteStatus = overwriteCollection(configLoc,
                                                     CollectionType::ConfigNS + workingSuffix,
                                                     CollectionType::ConfigNS);

        if (!overwriteStatus.isOK()) {

            error() << cleanupMessage << endl;
            *errMsg = stream() << "could not overwrite collection " << CollectionType::ConfigNS
                               << " with working collection "
                               << (CollectionType::ConfigNS + workingSuffix)
                               << causedBy(overwriteStatus);

            return false;
        }

        overwriteStatus = overwriteCollection(configLoc,
                                              ChunkType::ConfigNS + workingSuffix,
                                              ChunkType::ConfigNS);

        if (!overwriteStatus.isOK()) {

            error() << cleanupMessage << endl;
            *errMsg = stream() << "could not overwrite collection " << ChunkType::ConfigNS
                               << " with working collection "
                               << (ChunkType::ConfigNS + workingSuffix)
                               << causedBy(overwriteStatus);

            return false;
        }

        //
        // Finally update the version to latest and add clusterId to version
        //

        OID newClusterId = OID::gen();

        // Note: hardcoded versions, since this is a very particular upgrade
        // Note: DO NOT CLEAR the config version unless bumping the minCompatibleVersion,
        // we want to save the excludes that were set.

        newVersionInfo.setMinCompatibleVersion(UpgradeHistory_NoEpochVersion);
        newVersionInfo.setCurrentVersion(UpgradeHistory_MandatoryEpochVersion);
        newVersionInfo.setClusterId(newClusterId);

        // Leave critical section
        newVersionInfo.unsetUpgradeId();
        newVersionInfo.unsetUpgradeState();

        log() << "writing new version info and clusterId " << newClusterId << "..." << endl;

        {
            scoped_ptr<ScopedDbConnection> connPtr;

            try {
                connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
                ScopedDbConnection& conn = *connPtr;

                verify(newVersionInfo.isValid(NULL));

                conn->update(VersionType::ConfigNS,
                             BSON("_id" << 1 << VersionType::version_DEPRECATED(UpgradeHistory_NoEpochVersion)),
                             newVersionInfo.toBSON());
                _checkGLE(conn);
            }
            catch (const DBException& e) {

                error() << cleanupMessage << endl;
                *errMsg = stream() << "could not write new version info "
                                   << "and exit critical upgrade section" << causedBy(e);

                return false;
            }

            connPtr->done();
        }

        //
        // END CRITICAL SECTION
        //

        return true;
    }
示例#5
0
文件: d_logic.cpp 项目: Eric-Lu/mongo
    bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) {
        DEV assert( shardingState.enabled() );

        int op = m.operation();
        if ( op < 2000
                || op >= 3000
                || op == dbGetMore  // cursors are weird
           )
            return false;

        DbMessage d(m);
        const char *ns = d.getns();
        string errmsg;
        // We don't care about the version here, since we're returning it later in the writeback
        ConfigVersion received, wanted;
        if ( shardVersionOk( ns , errmsg, received, wanted ) ) {
            return false;
        }

        LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;

        if ( doesOpGetAResponse( op ) ) {
            assert( dbresponse );
            BufBuilder b( 32768 );
            b.skip( sizeof( QueryResult ) );
            {
                BSONObj obj = BSON( "$err" << errmsg << "ns" << ns );
                b.appendBuf( obj.objdata() , obj.objsize() );
            }

            QueryResult *qr = (QueryResult*)b.buf();
            qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale;
            qr->len = b.len();
            qr->setOperation( opReply );
            qr->cursorId = 0;
            qr->startingFrom = 0;
            qr->nReturned = 1;
            b.decouple();

            Message * resp = new Message();
            resp->setData( qr , true );

            dbresponse->response = resp;
            dbresponse->responseTo = m.header()->id;
            return true;
        }
        
        uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 );

        OID writebackID;
        writebackID.init();
        lastError.getSafe()->writeback( writebackID );

        const OID& clientID = ShardedConnectionInfo::get(false)->getID();
        massert( 10422 ,  "write with bad shard config and no server id!" , clientID.isSet() );

        LOG(1) << "got write with an old config - writing back ns: " << ns << endl;
        LOG(1) << m.toString() << endl;

        BSONObjBuilder b;
        b.appendBool( "writeBack" , true );
        b.append( "ns" , ns );
        b.append( "id" , writebackID );
        b.append( "connectionId" , cc().getConnectionId() );
        b.append( "instanceIdent" , prettyHostName() );
        b.appendTimestamp( "version" , shardingState.getVersion( ns ) );
        
        ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
        b.appendTimestamp( "yourVersion" , info ? info->getVersion(ns) : (ConfigVersion)0 );

        b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) );
        LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
        writeBackManager.queueWriteBack( clientID.str() , b.obj() );

        return true;
    }
/* ****************************************************************************
*
* processRegisterContext -
*
* This function has a slightly different behaviour depending on whether the id
* parameter is null (new registration case) or not null (update case), in
* particular:
*
* - In the new registration case, the _id is generated and insert() is used to
*   put the document in the DB.
* - In the update case, the _id is set according to the argument 'id' and update() is
*   used to put the document in the DB.
*
*/
HttpStatusCode processRegisterContext
(
  RegisterContextRequest*   requestP,
  RegisterContextResponse*  responseP,
  OID*                      id,
  const std::string&        tenant,
  const std::string&        servicePath,
  const std::string&        format,
  const std::string&        fiwareCorrelator
)
{
  std::string err;

  /* If expiration is not present, then use a default one */
  if (requestP->duration.isEmpty())
  {
    requestP->duration.set(DEFAULT_DURATION);
  }

  /* Calculate expiration (using the current time and the duration field in the request) */
  long long expiration = getCurrentTime() + requestP->duration.parse();
  LM_T(LmtMongo, ("Registration expiration: %lu", expiration));

  /* Create the mongoDB registration document */
  BSONObjBuilder reg;
  OID oid;
  if (id == NULL)
  {
    oid.init();
  }
  else
  {
    oid = *id;
  }
  reg.append("_id", oid);
  reg.append(REG_EXPIRATION, expiration);
  reg.append(REG_SERVICE_PATH, servicePath == "" ? DEFAULT_SERVICE_PATH_UPDATES : servicePath);
  reg.append(REG_FORMAT, format);


  //
  // We accumulate the subscriptions in a map. The key of the map is the string representing
  // subscription id
  //
  std::map<string, TriggeredSubscription*> subsToNotify;

  // This vector is used to define which entities to include in notifications
  EntityIdVector triggerEntitiesV;

  BSONArrayBuilder contextRegistration;
  for (unsigned int ix = 0; ix < requestP->contextRegistrationVector.size(); ++ix)
  {
    ContextRegistration* cr = requestP->contextRegistrationVector[ix];

    BSONArrayBuilder entities;
    for (unsigned int jx = 0; jx < cr->entityIdVector.size(); ++jx)
    {
      EntityId* en = cr->entityIdVector[jx];
      triggerEntitiesV.push_back(en);

      if (en->type == "")
      {
        entities.append(BSON(REG_ENTITY_ID << en->id));
        LM_T(LmtMongo, ("Entity registration: {id: %s}", en->id.c_str()));
      }
      else
      {
        entities.append(BSON(REG_ENTITY_ID << en->id << REG_ENTITY_TYPE << en->type));
        LM_T(LmtMongo, ("Entity registration: {id: %s, type: %s}", en->id.c_str(), en->type.c_str()));
      }
    }

    BSONArrayBuilder attrs;
    for (unsigned int jx = 0; jx < cr->contextRegistrationAttributeVector.size(); ++jx)
    {
      ContextRegistrationAttribute* cra = cr->contextRegistrationAttributeVector[jx];
      attrs.append(BSON(REG_ATTRS_NAME << cra->name << REG_ATTRS_TYPE << cra->type << "isDomain" << cra->isDomain));
      LM_T(LmtMongo, ("Attribute registration: {name: %s, type: %s, isDomain: %s}",
                      cra->name.c_str(),
                      cra->type.c_str(),
                      cra->isDomain.c_str()));

      for (unsigned int kx = 0;
           kx < requestP->contextRegistrationVector[ix]->contextRegistrationAttributeVector[jx]->metadataVector.size();
           ++kx)
      {
        // FIXME: metadata not supported at the moment
      }
    }

    contextRegistration.append(
      BSON(
        REG_ENTITIES << entities.arr() <<
        REG_ATTRS << attrs.arr() <<
        REG_PROVIDING_APPLICATION << requestP->contextRegistrationVector[ix]->providingApplication.get()));

    LM_T(LmtMongo, ("providingApplication registration: %s",
                    requestP->contextRegistrationVector[ix]->providingApplication.c_str()));

    std::string err;

    if (!addTriggeredSubscriptions(*cr, subsToNotify, err, tenant))
    {
      responseP->errorCode.fill(SccReceiverInternalError, err);
      return SccOk;
    }
  }
  reg.append(REG_CONTEXT_REGISTRATION, contextRegistration.arr());

  /* Note we are using upsert = "true". This means that if the document doesn't previously
   * exist in the collection, it is created. Thus, this way both uses of registerContext are OK
   * (either new registration or updating an existing one) */
  if (!collectionUpdate(getRegistrationsCollectionName(tenant), BSON("_id" << oid), reg.obj(), true, &err))
  {
    responseP->errorCode.fill(SccReceiverInternalError, err);
    releaseTriggeredSubscriptions(subsToNotify);
    return SccOk;
  }

  //
  // Send notifications for each one of the subscriptions accumulated by
  // previous addTriggeredSubscriptions() invocations
  //
  processSubscriptions(triggerEntitiesV, subsToNotify, err, tenant, fiwareCorrelator);

  // Fill the response element
  responseP->duration = requestP->duration;
  responseP->registrationId.set(oid.toString());
  responseP->errorCode.fill(SccOk);

  return SccOk;
}
示例#7
0
Status MetadataLoader::initChunks(OperationContext* txn,
                                  ShardingCatalogClient* catalogClient,
                                  const string& ns,
                                  const string& shard,
                                  const CollectionMetadata* oldMetadata,
                                  CollectionMetadata* metadata) const {
    map<ShardId, ChunkVersion> versionMap;  // TODO: use .h defined type

    // Preserve the epoch
    versionMap[shard] = metadata->_shardVersion;
    OID epoch = metadata->getCollVersion().epoch();
    bool fullReload = true;

    // Check to see if we should use the old version or not.
    if (oldMetadata) {
        // If our epochs are compatible, it's useful to use the old metadata for diffs: this leads
        // to a performance gain because not all the chunks must be reloaded, just the ones this
        // shard has not seen -- they will have higher versions than present in oldMetadata.
        if (oldMetadata->getCollVersion().hasEqualEpoch(epoch)) {
            fullReload = false;
            invariant(oldMetadata->isValid());

            versionMap[shard] = oldMetadata->_shardVersion;
            metadata->_collVersion = oldMetadata->_collVersion;

            // TODO: This could be made more efficient if copying not required, but
            // not as frequently reloaded as in mongos.
            metadata->_chunksMap = oldMetadata->_chunksMap;

            LOG(2) << "loading new chunks for collection " << ns
                   << " using old metadata w/ version " << oldMetadata->getShardVersion() << " and "
                   << metadata->_chunksMap.size() << " chunks";
        } else {
            warning() << "reloading collection metadata for " << ns << " with new epoch "
                      << epoch.toString() << ", the current epoch is "
                      << oldMetadata->getCollVersion().epoch().toString();
        }
    }


    // Exposes the new metadata's range map and version to the "differ," who
    // would ultimately be responsible of filling them up.
    SCMConfigDiffTracker differ(shard);
    differ.attach(ns, metadata->_chunksMap, metadata->_collVersion, versionMap);

    try {
        std::vector<ChunkType> chunks;
        const auto diffQuery = differ.configDiffQuery();
        Status status = catalogClient->getChunks(
            txn, diffQuery.query, diffQuery.sort, boost::none, &chunks, nullptr);
        if (!status.isOK()) {
            if (status == ErrorCodes::HostUnreachable) {
                // Make our metadata invalid
                metadata->_collVersion = ChunkVersion(0, 0, OID());
                metadata->_chunksMap.clear();
            }
            return status;
        }

        //
        // The diff tracker should always find at least one chunk (the highest chunk we saw
        // last time).  If not, something has changed on the config server (potentially between
        // when we read the collection data and when we read the chunks data).
        //
        int diffsApplied = differ.calculateConfigDiff(txn, chunks);
        if (diffsApplied > 0) {
            // Chunks found, return ok
            LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
                   << " with version " << metadata->_collVersion;

            // If the last chunk was moved off of this shard, the shardVersion should be reset to
            // zero (if we did not conduct a full reload and oldMetadata was present,
            // versionMap[shard] was previously set to the oldMetadata's shardVersion for
            // performance gains).
            if (!fullReload && metadata->_chunksMap.size() == 0) {
                versionMap[shard] = ChunkVersion(0, 0, epoch);
            }

            metadata->_shardVersion = versionMap[shard];
            metadata->fillRanges();

            invariant(metadata->isValid());
            return Status::OK();
        } else if (diffsApplied == 0) {
            // No chunks found, the collection is dropping or we're confused
            // If this is a full reload, assume it is a drop for backwards compatibility
            // TODO: drop the config.collections entry *before* the chunks and eliminate this
            // ambiguity

            string errMsg = str::stream()
                << "no chunks found when reloading " << ns << ", previous version was "
                << metadata->_collVersion.toString() << (fullReload ? ", this is a drop" : "");

            warning() << errMsg;

            metadata->_collVersion = ChunkVersion(0, 0, OID());
            metadata->_chunksMap.clear();

            return fullReload ? Status(ErrorCodes::NamespaceNotFound, errMsg)
                              : Status(ErrorCodes::RemoteChangeDetected, errMsg);
        } else {
            // Invalid chunks found, our epoch may have changed because we dropped/recreated
            // the collection.
            string errMsg = str::stream()
                << "invalid chunks found when reloading " << ns << ", previous version was "
                << metadata->_collVersion.toString() << ", this should be rare";
            warning() << errMsg;

            metadata->_collVersion = ChunkVersion(0, 0, OID());
            metadata->_chunksMap.clear();

            return Status(ErrorCodes::RemoteChangeDetected, errMsg);
        }
    } catch (const DBException& e) {
        // We deliberately do not return connPtr to the pool, since it was involved with the
        // error here.
        return Status(ErrorCodes::HostUnreachable,
                      str::stream() << "problem querying chunks metadata" << causedBy(e));
    }
}
示例#8
0
 string genWorkingSuffix(const OID& lastUpgradeId) {
     return "-upgrade-" + lastUpgradeId.toString();
 }
示例#9
0
 string genBackupSuffix(const OID& lastUpgradeId) {
     return "-backup-" + lastUpgradeId.toString();
 }
示例#10
0
    void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){
        
        if ( value->IsString() ){
            b.append( sname.c_str() , toSTLString( value ).c_str() );
            return;
        }
        
        if ( value->IsFunction() ){
            b.appendCode( sname.c_str() , toSTLString( value ).c_str() );
            return;
        }
    
        if ( value->IsNumber() ){
            if ( value->IsInt32() )
                b.append( sname.c_str(), int( value->ToInt32()->Value() ) );
            else
                b.append( sname.c_str() , value->ToNumber()->Value() );
            return;
        }
    
        if ( value->IsArray() ){
            BSONObj sub = v8ToMongo( value->ToObject() );
            b.appendArray( sname.c_str() , sub );
            return;
        }
    
        if ( value->IsDate() ){
            b.appendDate( sname.c_str() , Date_t(v8::Date::Cast( *value )->NumberValue()) );
            return;
        }

        if ( value->IsExternal() )
            return;
        
        if ( value->IsObject() ){
            // The user could potentially modify the fields of these special objects,
            // wreaking havoc when we attempt to reinterpret them.  Not doing any validation
            // for now...
            Local< v8::Object > obj = value->ToObject();
            if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
                switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
                    case Timestamp:
                        b.appendTimestamp( sname.c_str(),
                                          Date_t( v8::Date::Cast( *obj->Get( v8::String::New( "time" ) ) )->NumberValue() ),
                                          obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
                        return;
                    case MinKey:
                        b.appendMinKey( sname.c_str() );
                        return;
                    case MaxKey:
                        b.appendMaxKey( sname.c_str() );
                        return;
                    default:
                        assert( "invalid internal field" == 0 );
                }
            }
            string s = toSTLString( value );
            if ( s.size() && s[0] == '/' ){
                s = s.substr( 1 );
                string r = s.substr( 0 , s.rfind( "/" ) );
                string o = s.substr( s.rfind( "/" ) + 1 );
                b.appendRegex( sname.c_str() , r.c_str() , o.c_str() );
            }
            else if ( value->ToObject()->GetPrototype()->IsObject() &&
                      value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ){
                OID oid;
                oid.init( toSTLString( value ) );
                b.appendOID( sname.c_str() , &oid );
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) {
                // TODO might be nice to potentially speed this up with an indexed internal
                // field, but I don't yet know how to use an ObjectTemplate with a
                // constructor.
                unsigned long long val =
                ( (unsigned long long)( value->ToObject()->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
                (unsigned)( value->ToObject()->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
                b.append( sname.c_str(), (long long)val );
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
                OID oid;
                oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) );
                string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) );
                b.appendDBRef( sname.c_str(), ns.c_str(), oid );                
            }
            else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) {
                int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value();
                v8::String::Utf8Value data( obj->Get( v8::String::New( "data" ) ) );
                const char *dataArray = *data;
                assert( data.length() == len );
                b.appendBinData( sname.c_str(),
                                len,
                                mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
                                dataArray );
            } else {
                BSONObj sub = v8ToMongo( value->ToObject() );
                b.append( sname.c_str() , sub );
            }
            return;
        }
    
        if ( value->IsBoolean() ){
            b.appendBool( sname.c_str() , value->ToBoolean()->Value() );
            return;
        }
    
        else if ( value->IsUndefined() ){
            b.appendUndefined( sname.c_str() );
            return;
        }
    
        else if ( value->IsNull() ){
            b.appendNull( sname.c_str() );
            return;
        }

        cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
    }
示例#11
0
 Local< v8::Value > newId( const OID &id ) {
     v8::Function * idCons = getObjectIdCons();
     v8::Handle<v8::Value> argv[1];
     argv[0] = v8::String::New( id.str().c_str() );
     return idCons->NewInstance( 1 , argv );        
 }
示例#12
0
文件: model.cpp 项目: 7segments/mongo
    void Model::save( bool safe ) {
        scoped_ptr<ScopedDbConnection> conn(
                ScopedDbConnection::getScopedDbConnection (modelServer() ) );

        BSONObjBuilder b;
        serialize( b );

        BSONElement myId;
        {
            BSONObjIterator i = b.iterator();
            while ( i.more() ) {
                BSONElement e = i.next();
                if ( strcmp( e.fieldName() , "_id" ) == 0 ) {
                    myId = e;
                    break;
                }
            }
        }

        if ( myId.type() ) {
            if ( _id.isEmpty() ) {
                _id = myId.wrap();
            }
            else if ( myId.woCompare( _id.firstElement() ) ) {
                stringstream ss;
                ss << "_id from serialize and stored differ: ";
                ss << '[' << myId << "] != ";
                ss << '[' << _id.firstElement() << ']';
                throw UserException( 13121 , ss.str() );
            }
        }

        if ( _id.isEmpty() ) {
            OID oid;
            oid.init();
            b.appendOID( "_id" , &oid );

            BSONObj o = b.obj();
            conn->get()->insert( getNS() , o );
            _id = o["_id"].wrap().getOwned();

            LOG(4) << "inserted new model " << getNS() << "  " << o << endl;
        }
        else {
            if ( myId.eoo() ) {
                myId = _id["_id"];
                b.append( myId );
            }

            verify( ! myId.eoo() );

            BSONObjBuilder qb;
            qb.append( myId );

            BSONObj q = qb.obj();
            BSONObj o = b.obj();

            LOG(4) << "updated model" << getNS() << "  " << q << " " << o << endl;

            conn->get()->update( getNS() , q , o , true );

        }

        string errmsg = "";
        if ( safe )
            errmsg = conn->get()->getLastError();

        conn->done();

        if ( safe && errmsg.size() )
            throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
    }
示例#13
0
Status updateShardChunks(OperationContext* opCtx,
                         const NamespaceString& nss,
                         const std::vector<ChunkType>& chunks,
                         const OID& currEpoch) {
    invariant(!chunks.empty());

    NamespaceString chunkMetadataNss(ChunkType::ShardNSPrefix + nss.ns());

    try {
        DBDirectClient client(opCtx);

        /**
         * Here are examples of the operations that can happen on the config server to update
         * the config.chunks collection. 'chunks' only includes the chunks that result from the
         * operations, which can be read from the config server, not any that were removed, so
         * we must delete any chunks that overlap with the new 'chunks'.
         *
         * CollectionVersion = 10.3
         *
         * moveChunk
         * {_id: 3, max: 5, version: 10.1} --> {_id: 3, max: 5, version: 11.0}
         *
         * splitChunk
         * {_id: 3, max: 9, version 10.3} --> {_id: 3, max: 5, version 10.4}
         *                                    {_id: 5, max: 8, version 10.5}
         *                                    {_id: 8, max: 9, version 10.6}
         *
         * mergeChunk
         * {_id: 10, max: 14, version 4.3} --> {_id: 10, max: 22, version 10.4}
         * {_id: 14, max: 19, version 7.1}
         * {_id: 19, max: 22, version 2.0}
         *
         */
        for (auto& chunk : chunks) {
            // Check for a different epoch.
            if (!chunk.getVersion().hasEqualEpoch(currEpoch)) {
                return Status{ErrorCodes::ConflictingOperationInProgress,
                              str::stream() << "Invalid chunks found when reloading '"
                                            << nss.toString()
                                            << "'. Previous collection epoch was '"
                                            << currEpoch.toString()
                                            << "', but unexpectedly found a new epoch '"
                                            << chunk.getVersion().epoch().toString()
                                            << "'. Collection was dropped and recreated."};
            }

            // Delete any overlapping chunk ranges. Overlapping chunks will have a min value
            // ("_id") between (chunk.min, chunk.max].
            //
            // query: { "_id" : {"$gte": chunk.min, "$lt": chunk.max}}
            auto deleteDocs(stdx::make_unique<BatchedDeleteDocument>());
            deleteDocs->setQuery(BSON(ChunkType::minShardID << BSON(
                                          "$gte" << chunk.getMin() << "$lt" << chunk.getMax())));
            deleteDocs->setLimit(0);

            auto deleteRequest(stdx::make_unique<BatchedDeleteRequest>());
            deleteRequest->addToDeletes(deleteDocs.release());

            BatchedCommandRequest batchedDeleteRequest(deleteRequest.release());
            batchedDeleteRequest.setNS(chunkMetadataNss);
            const BSONObj deleteCmdObj = batchedDeleteRequest.toBSON();

            rpc::UniqueReply deleteCommandResponse =
                client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
                                              deleteCmdObj.firstElementFieldName(),
                                              rpc::makeEmptyMetadata(),
                                              deleteCmdObj);
            auto deleteStatus =
                getStatusFromCommandResult(deleteCommandResponse->getCommandReply());

            if (!deleteStatus.isOK()) {
                return deleteStatus;
            }

            // Now the document can be expected to cleanly insert without overlap.
            auto insert(stdx::make_unique<BatchedInsertRequest>());
            insert->addToDocuments(chunk.toShardBSON());

            BatchedCommandRequest insertRequest(insert.release());
            insertRequest.setNS(chunkMetadataNss);
            const BSONObj insertCmdObj = insertRequest.toBSON();

            rpc::UniqueReply commandResponse =
                client.runCommandWithMetadata(chunkMetadataNss.db().toString(),
                                              insertCmdObj.firstElementFieldName(),
                                              rpc::makeEmptyMetadata(),
                                              insertCmdObj);
            auto insertStatus = getStatusFromCommandResult(commandResponse->getCommandReply());

            if (!insertStatus.isOK()) {
                return insertStatus;
            }
        }

        return Status::OK();
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
}
示例#14
0
    bool handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ){
        if ( ! shardingState.enabled() )
            return false;

        int op = m.operation();
        if ( op < 2000 
             || op >= 3000 
             || op == dbGetMore  // cursors are weird
             )
            return false;
        
        DbMessage d(m);        
        const char *ns = d.getns();
        string errmsg;
        if ( shardVersionOk( ns , errmsg ) ){
            return false;
        }

        log() << "shardVersionOk failed  ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
        
        if ( doesOpGetAResponse( op ) ){
            assert( dbresponse );
            BufBuilder b( 32768 );
            b.skip( sizeof( QueryResult ) );
            {
                BSONObj obj = BSON( "$err" << errmsg );
                b.appendBuf( obj.objdata() , obj.objsize() );
            }
            
            QueryResult *qr = (QueryResult*)b.buf();
            qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale;
            qr->len = b.len();
            qr->setOperation( opReply );
            qr->cursorId = 0;
            qr->startingFrom = 0;
            qr->nReturned = 1;
            b.decouple();

            Message * resp = new Message();
            resp->setData( qr , true );
            
            dbresponse->response = resp;
            dbresponse->responseTo = m.header()->id;
            return true;
        }
        
        OID writebackID;
        writebackID.init();
        lastError.getSafe()->writeback( writebackID );

        const OID& clientID = ShardedConnectionInfo::get(false)->getID();
        massert( 10422 ,  "write with bad shard config and no server id!" , clientID.isSet() );
        
        log(1) << "got write with an old config - writing back ns: " << ns << endl;
        if ( logLevel ) log(1) << debugString( m ) << endl;

        BSONObjBuilder b;
        b.appendBool( "writeBack" , true );
        b.append( "ns" , ns );
        b.append( "id" , writebackID );
        b.appendTimestamp( "version" , shardingState.getVersion( ns ) );
        b.appendTimestamp( "yourVersion" , ShardedConnectionInfo::get( true )->getVersion( ns ) );
        b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) );
        log(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
        queueWriteBack( clientID.str() , b.obj() );

        return true;
    }
示例#15
0
static void lua_append_bson(lua_State *L, const char *key, int stackpos, BSONObjBuilder *builder) {
    int type = lua_type(L, stackpos);

    if (type == LUA_TTABLE) {
        int bsontype_found = luaL_getmetafield(L, stackpos, "__bsontype");
        if (!bsontype_found) {
            // not a special bsontype
            // handle as a regular table, iterating keys
            BSONObjBuilder b;

            int arraylen = check_array(L, stackpos);

            if (arraylen) {
                for (int i = 0; i < arraylen; i++) {
                    lua_rawgeti(L, stackpos, i+1);

                    stringstream ss;
                    ss << i;

                    lua_append_bson(L, ss.str().c_str(), -1, &b);
                    lua_pop(L, 1);
                }

                builder->appendArray(key, b.obj());
            } else {
                lua_pushnil(L);
                while (lua_next(L, stackpos-1) != 0) {
                    if (lua_isnumber(L, -2)) {
                        stringstream ss;
                        ss << lua_tonumber(L, -2);

                        lua_append_bson(L, ss.str().c_str(), -1, &b);
                    } else {
                        const char *k = lua_tostring(L, -2);
                        lua_append_bson(L, k, -1, &b);
                    }

                    lua_pop(L, 1);
                }

                builder->append(key, b.obj());
            }
        } else {
            int bson_type = lua_tointeger(L, -1);
            lua_pop(L, 1);
            lua_rawgeti(L, -1, 1);
            switch (bson_type) {
            case mongo::Date:
                builder->appendDate(key, lua_tonumber(L, -1));
                break;
            case mongo::Timestamp:
                builder->appendTimestamp(key);
                break;
            case mongo::RegEx: {
                const char* regex = lua_tostring(L, -1);
                lua_rawgeti(L, -2, 2); // options
                const char* options = lua_tostring(L, -1);
                lua_pop(L, 1);
                builder->appendRegex(key, regex, options);
                break;
                }
            case mongo::NumberInt:
                builder->append(key, static_cast<int32_t>(lua_tointeger(L, -1)));
                break;
            case mongo::NumberLong:
                builder->append(key, static_cast<long long int>(lua_tonumber(L, -1)));
                break;
            case mongo::Symbol:
                builder->appendSymbol(key, lua_tostring(L, -1));
                break;
            case mongo::jstOID: {
                OID oid;
                oid.init(lua_tostring(L, -1));
                builder->appendOID(key, &oid);
                break;
            }
            case mongo::jstNULL:
                builder->appendNull(key);
                break;

            default:
                luaL_error(L, LUAMONGO_UNSUPPORTED_BSON_TYPE, luaL_typename(L, stackpos));
            }
            lua_pop(L, 1);
        }
    } else if (type == LUA_TNIL) {
        builder->appendNull(key);
    } else if (type == LUA_TNUMBER) {
        double numval = lua_tonumber(L, stackpos);
        if (numval == floor(numval)) {
            // The numeric value looks like an integer, treat it as such.
            // This is closer to how JSON datatypes behave.
            int intval = lua_tointeger(L, stackpos);
            builder->append(key, static_cast<int32_t>(intval));
        } else {
            builder->append(key, numval);
        }
    } else if (type == LUA_TBOOLEAN) {
        builder->appendBool(key, lua_toboolean(L, stackpos));
    } else if (type == LUA_TSTRING) {
        builder->append(key, lua_tostring(L, stackpos));
    } else {
        luaL_error(L, LUAMONGO_UNSUPPORTED_LUA_TYPE, luaL_typename(L, stackpos));
    }
}
示例#16
0
文件: d_logic.cpp 项目: fes/mongo
        bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
            
            string configdb = cmdObj["configdb"].valuestrsafe();
            { // configdb checking
                if ( configdb.size() == 0 ){
                    errmsg = "no configdb";
                    return false;
                }
                
                if ( shardConfigServer.size() == 0 ){
                    if ( ! cmdObj.getBoolField( "authoritative" ) ){
                        result.appendBool( "need_authoritative" , true );
                        errmsg = "first setShardVersion";
                        return false;
                    }
                    shardConfigServer = configdb;
                }
                else if ( shardConfigServer != configdb ){
                    errmsg = "specified a different configdb!";
                    return false;
                }
            }
            
            { // setting up ids
                if ( cmdObj["serverID"].type() != jstOID ){
                    // TODO: fix this
                    //errmsg = "need serverID to be an OID";
                    //return 0;
                }
                else {
                    OID clientId = cmdObj["serverID"].__oid();
                    if ( ! clientServerIds.get() ){
                        string s = clientId.str();
                        
                        OID * nid = new OID();
                        nid->init( s );
                        clientServerIds.reset( nid );
                        
                        if ( ! clientQueues[s] )
                            clientQueues[s] = new BlockingQueue<BSONObj>();
                    }
                    else if ( clientId != *clientServerIds.get() ){
                        errmsg = "server id has changed!";
                        return 0;
                    }
                }
            }
            
            unsigned long long version = getVersion( cmdObj["version"] , errmsg );
            if ( ! version )
                return false;

            NSVersions * versions = clientShardVersions.get();
            
            if ( ! versions ){
                log(1) << "entering shard mode for connection" << endl;
                versions = new NSVersions();
                clientShardVersions.reset( versions );
            }
            
            string ns = cmdObj["setShardVersion"].valuestrsafe();
            if ( ns.size() == 0 ){
                errmsg = "need to speciy fully namespace";
                return false;
            }

            unsigned long long& oldVersion = (*versions)[ns];
            if ( version < oldVersion ){
                errmsg = "you already have a newer version";
                result.appendTimestamp( "oldVersion" , oldVersion );
                result.appendTimestamp( "newVersion" , version );
                return false;
            }

            unsigned long long& myVersion = myVersions[ns];
            if ( version < myVersion ){
                errmsg = "going to older version for global";
                return false;
            }
            
            if ( myVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ){
                // need authoritative for first look
                result.appendBool( "need_authoritative" , true );
                result.append( "ns" , ns );
                errmsg = "first time for this ns";
                return false;
            }

            result.appendTimestamp( "oldVersion" , oldVersion );
            oldVersion = version;
            myVersion = version;

            result.append( "ok" , 1 );
            return 1;
        }
示例#17
0
bool mergeChunks(OperationContext* txn,
                 const NamespaceString& nss,
                 const BSONObj& minKey,
                 const BSONObj& maxKey,
                 const OID& epoch,
                 string* errMsg) {
    // Get the distributed lock
    string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to "
                                 << maxKey;
    auto scopedDistLock = grid.catalogManager(txn)->distLock(
        txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);

    if (!scopedDistLock.isOK()) {
        *errMsg = stream() << "could not acquire collection lock for " << nss.ns()
                           << " to merge chunks in [" << minKey << "," << maxKey << ")"
                           << causedBy(scopedDistLock.getStatus());

        warning() << *errMsg;
        return false;
    }

    ShardingState* shardingState = ShardingState::get(txn);

    //
    // We now have the collection lock, refresh metadata to latest version and sanity check
    //

    ChunkVersion shardVersion;
    Status status = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);

    if (!status.isOK()) {
        *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
                                << nss.ns() << causedBy(status.reason());

        warning() << *errMsg;
        return false;
    }

    if (epoch.isSet() && shardVersion.epoch() != epoch) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed"
                           << " since merge was sent"
                           << "(sent epoch : " << epoch.toString()
                           << ", current epoch : " << shardVersion.epoch().toString() << ")";

        warning() << *errMsg;
        return false;
    }

    shared_ptr<CollectionMetadata> metadata = shardingState->getCollectionMetadata(nss.ns());

    if (!metadata || metadata->getKeyPattern().isEmpty()) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " is not sharded";

        warning() << *errMsg;
        return false;
    }

    dassert(metadata->getShardVersion().equals(shardVersion));

    if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) {
        *errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey)
                           << " is not valid"
                           << " for collection " << nss.ns() << " with key pattern "
                           << metadata->getKeyPattern();

        warning() << *errMsg;
        return false;
    }

    //
    // Get merged chunk information
    //

    ChunkVersion mergeVersion = metadata->getCollVersion();
    mergeVersion.incMinor();

    std::vector<ChunkType> chunksToMerge;

    ChunkType itChunk;
    itChunk.setMin(minKey);
    itChunk.setMax(minKey);
    itChunk.setNS(nss.ns());
    itChunk.setShard(shardingState->getShardName());

    while (itChunk.getMax().woCompare(maxKey) < 0 &&
           metadata->getNextChunk(itChunk.getMax(), &itChunk)) {
        chunksToMerge.push_back(itChunk);
    }

    if (chunksToMerge.empty()) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " range starting at " << minKey << " and ending at " << maxKey
                           << " does not belong to shard " << shardingState->getShardName();

        warning() << *errMsg;
        return false;
    }

    //
    // Validate the range starts and ends at chunks and has no holes, error if not valid
    //

    BSONObj firstDocMin = chunksToMerge.front().getMin();
    BSONObj firstDocMax = chunksToMerge.front().getMax();
    // minKey is inclusive
    bool minKeyInRange = rangeContains(firstDocMin, firstDocMax, minKey);

    if (!minKeyInRange) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " range starting at " << minKey << " does not belong to shard "
                           << shardingState->getShardName();

        warning() << *errMsg;
        return false;
    }

    BSONObj lastDocMin = chunksToMerge.back().getMin();
    BSONObj lastDocMax = chunksToMerge.back().getMax();
    // maxKey is exclusive
    bool maxKeyInRange = lastDocMin.woCompare(maxKey) < 0 && lastDocMax.woCompare(maxKey) >= 0;

    if (!maxKeyInRange) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " range ending at " << maxKey << " does not belong to shard "
                           << shardingState->getShardName();

        warning() << *errMsg;
        return false;
    }

    bool validRangeStartKey = firstDocMin.woCompare(minKey) == 0;
    bool validRangeEndKey = lastDocMax.woCompare(maxKey) == 0;

    if (!validRangeStartKey || !validRangeEndKey) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " does not contain a chunk "
                           << (!validRangeStartKey ? "starting at " + minKey.toString() : "")
                           << (!validRangeStartKey && !validRangeEndKey ? " or " : "")
                           << (!validRangeEndKey ? "ending at " + maxKey.toString() : "");

        warning() << *errMsg;
        return false;
    }

    if (chunksToMerge.size() == 1) {
        *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                           << " already contains chunk for " << rangeToString(minKey, maxKey);

        warning() << *errMsg;
        return false;
    }

    // Look for hole in range
    for (size_t i = 1; i < chunksToMerge.size(); ++i) {
        if (chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) != 0) {
            *errMsg =
                stream() << "could not merge chunks, collection " << nss.ns()
                         << " has a hole in the range " << rangeToString(minKey, maxKey) << " at "
                         << rangeToString(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin());

            warning() << *errMsg;
            return false;
        }
    }

    //
    // Run apply ops command
    //
    Status applyOpsStatus = runApplyOpsCmd(txn, chunksToMerge, shardVersion, mergeVersion);
    if (!applyOpsStatus.isOK()) {
        warning() << applyOpsStatus;
        return false;
    }

    //
    // Install merged chunk metadata
    //

    {
        ScopedTransaction transaction(txn, MODE_IX);
        Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX);
        Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X);

        shardingState->mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
    }

    //
    // Log change
    //

    BSONObj mergeLogEntry = buildMergeLogEntry(chunksToMerge, shardVersion, mergeVersion);

    grid.catalogManager(txn)->logChange(txn, "merge", nss.ns(), mergeLogEntry);

    return true;
}
/* ****************************************************************************
*
* mongoSubscribeContext - 
*/
HttpStatusCode mongoSubscribeContext
(
  SubscribeContextRequest*             requestP,
  SubscribeContextResponse*            responseP,
  const std::string&                   tenant,
  std::map<std::string, std::string>&  uriParam,
  const std::string&                   xauthToken,
  const std::vector<std::string>&      servicePathV
)
{
    const std::string  notifyFormatAsString  = uriParam[URI_PARAM_NOTIFY_FORMAT];
    Format             notifyFormat          = stringToFormat(notifyFormatAsString);
    std::string        servicePath           = (servicePathV.size() == 0)? "" : servicePathV[0];    
    bool               reqSemTaken           = false;

    LM_T(LmtMongo, ("Subscribe Context Request: notifications sent in '%s' format", notifyFormatAsString.c_str()));

    reqSemTake(__FUNCTION__, "ngsi10 subscribe request", SemWriteOp, &reqSemTaken);

    //
    // Calculate expiration (using the current time and the duration field in the request).
    // If expiration is not present, use a default value
    //
    long long expiration = -1;
    if (requestP->expires > 0)
    {
      expiration = requestP->expires;
    }
    else
    {
      if (requestP->duration.isEmpty())
      {
        requestP->duration.set(DEFAULT_DURATION);
      }

      expiration = getCurrentTime() + requestP->duration.parse();
    }
    LM_T(LmtMongo, ("Subscription expiration: %lu", expiration));

    /* Create the mongoDB subscription document */
    BSONObjBuilder  sub;
    OID             oid;

    oid.init();

    sub.append("_id", oid);
    sub.append(CSUB_EXPIRATION, expiration);
    sub.append(CSUB_REFERENCE,  requestP->reference.get());


    /* Throttling */
    long long throttling = 0;
    if (requestP->throttling.seconds != -1)
    {
      throttling = (long long) requestP->throttling.seconds;
      sub.append(CSUB_THROTTLING, throttling);
    }
    else if (!requestP->throttling.isEmpty())
    {
      throttling = (long long) requestP->throttling.parse();
      sub.append(CSUB_THROTTLING, throttling);
    }

    if (servicePath != "")
    {
      sub.append(CSUB_SERVICE_PATH, servicePath);
    }

    
    /* Build entities array */
    BSONArrayBuilder entities;
    for (unsigned int ix = 0; ix < requestP->entityIdVector.size(); ++ix)
    {
        EntityId* en = requestP->entityIdVector[ix];

        if (en->type == "")
        {
          entities.append(BSON(CSUB_ENTITY_ID << en->id <<
                               CSUB_ENTITY_ISPATTERN << en->isPattern));
        }
        else
        {
          entities.append(BSON(CSUB_ENTITY_ID << en->id <<
                               CSUB_ENTITY_TYPE << en->type <<
                               CSUB_ENTITY_ISPATTERN << en->isPattern));
        }
    }
    sub.append(CSUB_ENTITIES, entities.arr());

    /* Build attributes array */
    BSONArrayBuilder attrs;
    for (unsigned int ix = 0; ix < requestP->attributeList.size(); ++ix) {
        attrs.append(requestP->attributeList[ix]);
    }
    sub.append(CSUB_ATTRS, attrs.arr());

    /* Build conditions array (including side-effect notifications and threads creation) */
    bool notificationDone = false;
    BSONArray conds = processConditionVector(&requestP->notifyConditionVector,
                                             requestP->entityIdVector,
                                             requestP->attributeList, oid.toString(),
                                             requestP->reference.get(),
                                             &notificationDone,
                                             notifyFormat,
                                             tenant,
                                             xauthToken,
                                             servicePathV,
                                             requestP->expression.q);
    sub.append(CSUB_CONDITIONS, conds);

    /* Build expression */
    BSONObjBuilder expression;

    expression << CSUB_EXPR_Q << requestP->expression.q
               << CSUB_EXPR_GEOM << requestP->expression.geometry
               << CSUB_EXPR_COORDS << requestP->expression.coords
               << CSUB_EXPR_GEOREL << requestP->expression.georel;
    sub.append(CSUB_EXPR, expression.obj());

    /* Last notification */
    long long lastNotificationTime = 0;
    if (notificationDone)
    {
      lastNotificationTime = (long long) getCurrentTime();

      sub.append(CSUB_LASTNOTIFICATION, lastNotificationTime);
      sub.append(CSUB_COUNT, 1);
    }

    /* Adding format to use in notifications */
    sub.append(CSUB_FORMAT, notifyFormatAsString);

    /* Insert document in database */
    std::string err;
    if (!collectionInsert(getSubscribeContextCollectionName(tenant), sub.obj(), &err))
    {
      reqSemGive(__FUNCTION__, "ngsi10 subscribe request (mongo db exception)", reqSemTaken);
      responseP->subscribeError.errorCode.fill(SccReceiverInternalError, err);
      return SccOk;
    }

    //
    // 3. Create Subscription for the cache
    //
    std::string oidString = oid.toString();

    LM_T(LmtSubCache, ("inserting a new sub in cache (%s)", oidString.c_str()));

    cacheSemTake(__FUNCTION__, "Inserting subscription in cache");
    subCacheItemInsert(tenant.c_str(),
                       servicePath.c_str(),
                       requestP,
                       oidString.c_str(),
                       expiration,
                       throttling,
                       notifyFormat,
                       notificationDone,
                       lastNotificationTime,
                       requestP->expression.q,
                       requestP->expression.geometry,
                       requestP->expression.coords,
                       requestP->expression.georel);

    cacheSemGive(__FUNCTION__, "Inserting subscription in cache");

    reqSemGive(__FUNCTION__, "ngsi10 subscribe request", reqSemTaken);

    /* Fill int the response element */
    responseP->subscribeResponse.duration = requestP->duration;
    responseP->subscribeResponse.subscriptionId.set(oid.toString());
    responseP->subscribeResponse.throttling = requestP->throttling;

    return SccOk;
}
示例#19
0
void LocksType::setLockID(const OID& lockID) {
    invariant(lockID.isSet());
    _lockID = lockID;
}
示例#20
0
void SoftAgent::onExecute() {
	//Loop through code
	std::cout << "EXECUTE\n";
	
	OID c = code();
	int size = c.get(Size);
	int t1,t2,t3;
	
	OID reg[MAX_REGISTERS];
	
	for (int i=0; i<size; i++) {
		switch ((int)c.get(i)) {
		case SSET:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].set(reg[t2], reg[t3]); break;
				
		case ASET:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].set(reg[t2], reg[t3], true); break;

		case SGET:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1] = reg[t2].get(reg[t3]); break;

		case SDEF:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].definefuture(reg[t2], reg[t3]); break;

		case ADEF:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].definefuture(reg[t2], reg[t3], true); break;

		case GDEF:	break; //reg[c.get(++i)] = reg[c.get(++i)].get(reg[c.get(++i)]); break;
		case GFUN:	break; //reg[c.get(++i)] = reg[c.get(++i)].get(reg[c.get(++i)]); break;

		case SFUN:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].define(reg[t2], reg[t3]); break;

		case AFUN:	t1 = (int)c.get(++i);
				t2 = (int)c.get(++i);
				t3 = (int)c.get(++i);
				reg[t1].define(reg[t2], reg[t3], true); break;

		case CREA:	t1 = (int)c.get(++i);
				reg[t1] = OID::create(); break;
		
		case ISET:	t1 = (int)c.get(++i);
				reg[t1] = c.get(++i); break;
				
		default:	Error(0, "An invalid agent instruction was encountered");
		}
	}
}
示例#21
0
void wgd::Material::bind() {
	if (s_current != this) {
		GLfloat mat[4];
		colour c = specular();
		c.toArray(mat);
		glMaterialfv(GL_FRONT, GL_SPECULAR, mat);
		c = diffuse();
		
		//if diffuse is null, use white (rather than transparent)
		if(get(ix::diffuse)==Null) { c.r=1.0; c.g=1.0; c.b=1.0; c.a=1.0; }
		
		//if (Lighting::enabled() == false) {
		//	glColor4f(c.r,c.g,c.b,c.a);
		//}

		//std::cout << "Colours: " << c.r << " " << c.g << " " << c.b << " " << c.a << "\n";

		c.toArray(mat);
		glMaterialfv(GL_FRONT, GL_DIFFUSE, mat);
		c = ambient();
		c.toArray(mat);
		glMaterialfv(GL_FRONT, GL_AMBIENT, mat);
		mat[0] = shininess();
		glMaterialfv(GL_FRONT, GL_SHININESS, mat);
		c = emission();
		c.toArray(mat);
		glMaterialfv(GL_FRONT, GL_EMISSION, mat);
	
	
		s_current = this;

		Texture *tex;

		//if (get(ix::textures) != Null) {
			for (int i=0; i<Texture::maxTextureUnits(); i++) {
				tex = texture(i);
				if (tex == 0) continue;
				tex->bind(i);
			}
		//} else {
		//	if (get(ix::texture) != Null) {
		//		tex = texture();
		//		if(tex!=NULL)
		//			tex->bind();
		//	}
		//}

		if (get(ix::shader) != Null) {
			Shader *shade = shader();
			shade->bind();

			if (Shader::current() == shade) {
				//Set shader variables.
				OID vars = get(ix::variables);
				OID val;
				char sbuf[50];

				if (vars != Null) {
					for (OID::iterator i=vars.begin(); i!=vars.end(); i++) {
						(*i).toString(sbuf, 50);
						val = vars[*i];
						
						if (val.isDouble()) shade->setVariable(sbuf, (float)val);
						if (val.isLongLong()) shade->setVariable(sbuf, (int)val);
						
						//also need arrays
						/*if (val.isObject()) {
							//val is an array of n values
							//either need to send them individually, or
							//create an array and send them all at once
							strcat(sbuf, "[0]");
							
							//get number of values
							int n=0;
							for(n=0; val[n]!=Null; n++);
							
							//floats or ints?
							if (val[0].IsFloat()){
								float *array = new float[n];
								for(int i=0; i<n; i++) array[i] = (float)val[i];
								shade->setVariable(sbuf, n, array);
								delete [] array;
								//cout << "Array " << sbuf << " = ";
								//for(int i=0; i<n; i++) cout << array[i] << ", ";
								//cout <<  " (" << n << " values)\n"; 
								
							}
							if (val.IsInt()){
								int *array = new int[n];
								for(int i=0; i<n; i++) array[i] = (int)val[i];
								shade->setVariable(sbuf, n, array);
								delete [] array;
							}	
						}*/
					}
				}
			}
		}

		OID blnd = blending();
		if (blnd == BLEND_MULTIPLY) {
			glBlendFunc (GL_DST_COLOR, GL_ZERO);
		} else if (blnd == BLEND_ONE) {
			glBlendFunc(GL_ONE, GL_ONE);
		} else if (blnd == BLEND_ADD) {
			glBlendFunc(GL_SRC_ALPHA, GL_ONE);
		} else {
			//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
		}
		//} else {
		//	glBlendFunc(GL_SRC_ALPHA, GL_ZERO);
		//}
	}
}
示例#22
0
文件: server.cpp 项目: xrogaan/mongo
namespace mongo {
    
    Database *database = 0;
    string mongosCommand;
    string ourHostname;
    OID serverID;
    bool dbexitCalled = false;
    
    bool inShutdown(){
        return dbexitCalled;
    }
    
    string getDbContext() {
        return "?";
    }

    bool haveLocalShardingInfo( const string& ns ){
        assert( 0 );
        return false;
    }
    
    void usage( char * argv[] ){
        out() << argv[0] << " usage:\n\n";
        out() << " -v+  verbose 1: general 2: more 3: per request 4: more\n";
        out() << " --port <portno>\n";
        out() << " --configdb <configdbname>,[<configdbname>,<configdbname>]\n";
        out() << endl;
    }

    class ShardingConnectionHook : public DBConnectionHook {
    public:
        virtual void onCreate( DBClientBase * conn ){
            conn->simpleCommand( "admin" , 0 , "switchtoclienterrors" );
        }
        virtual void onHandedOut( DBClientBase * conn ){
            ClientInfo::get()->addShard( conn->getServerAddress() );
        }
    } shardingConnectionHook;
    
    class ShardedMessageHandler : public MessageHandler {
    public:
        virtual ~ShardedMessageHandler(){}
        virtual void process( Message& m , AbstractMessagingPort* p ){
            Request r( m , p );
            if ( logLevel > 5 ){
                log(5) << "client id: " << hex << r.getClientId() << "\t" << r.getns() << "\t" << dec << r.op() << endl;
            }
            try {
                setClientId( r.getClientId() );
                r.process();
            }
            catch ( DBException& e ){
                m.data->id = r.id();
                log() << "UserException: " << e.what() << endl;
                if ( r.expectResponse() ){
                    BSONObj err = BSON( "$err" << e.what() );
                    replyToQuery( QueryResult::ResultFlag_ErrSet, p , m , err );
                }
            }
        }
    };

    void sighandler(int sig){
        dbexit(EXIT_CLEAN, (string("recieved signal ") + BSONObjBuilder::numStr(sig)).c_str());
    }
    
    void setupSignals(){
        // needed for cmdLine, btu we do it in init()
    }

    void init(){
        serverID.init();
        setupSIGTRAPforGDB();
        signal(SIGTERM, sighandler);
        signal(SIGINT, sighandler);
    }

    void start() {
        log() << "waiting for connections on port " << cmdLine.port << endl;
        //DbGridListener l(port);
        //l.listen();
        ShardedMessageHandler handler;
        MessageServer * server = createServer( cmdLine.port , &handler );
        server->run();
    }

    DBClientBase *createDirectClient(){
        uassert( 10197 ,  "createDirectClient not implemented for sharding yet" , 0 );
        return 0;
    }

    void printShardingVersionInfo(){
        log() << mongosCommand << " v0.3- (alpha 3t) starting (--help for usage)" << endl;
        printGitVersion();
        printSysInfo();
    }

} // namespace mongo
    bool _cleanupUpgradeState(const ConnectionString& configLoc,
                              const OID& lastUpgradeId,
                              string* errMsg)
    {
        string dummy;
        if (!errMsg) errMsg = &dummy;

        scoped_ptr<ScopedDbConnection> connPtr;

        string workingSuffix = genWorkingSuffix(lastUpgradeId);

        try {
            connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30));
            ScopedDbConnection& conn = *connPtr;

            // Drop old upgrade collections on config server

            bool resultOk;
            BSONObj dropResult;

            resultOk = conn->dropCollection(CollectionType::ConfigNS + workingSuffix, &dropResult);

            if (!resultOk) {

                *errMsg = stream() << "could not drop collection "
                                   << (CollectionType::ConfigNS + workingSuffix)
                                   << causedBy(dropResult.toString());

                return false;
            }

            resultOk = conn->dropCollection(ChunkType::ConfigNS + workingSuffix, &dropResult);

            if (!resultOk) {

                *errMsg = stream() << "could not drop collection "
                                   << (ChunkType::ConfigNS + workingSuffix)
                                   << causedBy(dropResult.toString());

                return false;
            }

            // Force old locks taken by previous upgrade process on config server
            // This is safe because no previous upgrade process can be active while we hold the
            // upgrade lock.

            log() << "forcing upgrade locks of previous failed upgrade with id "
                  << lastUpgradeId.toString() << endl;

            // Explicit builder needed b/c of regex
            BSONObjBuilder lockQueryB;
            lockQueryB.appendRegex(LocksType::why(), 
                                   pcrecpp::RE::QuoteMeta("(" + lastUpgradeId.toString() + ")"));

            conn->update(LocksType::ConfigNS,
                         lockQueryB.obj(),
                         BSON("$set" << BSON(LocksType::state(0))),
                         false, true); // multi
            _checkGLE(conn);

        }
        catch (const DBException& e) {

            *errMsg = stream() << "could not drop collections during cleanup of upgrade "
                               << lastUpgradeId << causedBy(e);

            return false;
        }

        connPtr->done();
        return true;
    }
示例#24
0
文件: server.cpp 项目: xrogaan/mongo
 void init(){
     serverID.init();
     setupSIGTRAPforGDB();
     signal(SIGTERM, sighandler);
     signal(SIGINT, sighandler);
 }
示例#25
0
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){
        
    if ( value->IsString() ){
        if ( sname == "$where" )
            b.appendCode( sname.c_str() , toSTLString( value ).c_str() );
        else
            b.append( sname.c_str() , toSTLString( value ).c_str() );
        return;
    }
        
    if ( value->IsFunction() ){
        b.appendCode( sname.c_str() , toSTLString( value ).c_str() );
        return;
    }
    
    if ( value->IsNumber() ){
        b.append( sname.c_str() , value->ToNumber()->Value() );
        return;
    }
    
    if ( value->IsArray() ){
        BSONObj sub = v8ToMongo( value->ToObject() );
        b.appendArray( sname.c_str() , sub );
        return;
    }
    
    if ( value->IsDate() ){
        b.appendDate( sname.c_str() , (unsigned long long )(v8::Date::Cast( *value )->NumberValue()) );
        return;
    }
    
    if ( value->IsObject() ){
        string s = toSTLString( value );
        if ( s.size() && s[0] == '/' ){
            s = s.substr( 1 );
            string r = s.substr( 0 , s.find( "/" ) );
            string o = s.substr( s.find( "/" ) + 1 );
            b.appendRegex( sname.c_str() , r.c_str() , o.c_str() );
        }
        else if ( value->ToObject()->GetPrototype()->IsObject() &&
                  value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( String::New( "isObjectId" ) ) ){
            OID oid;
            oid.init( toSTLString( value ) );
            b.appendOID( sname.c_str() , &oid );
        }
        else {
            BSONObj sub = v8ToMongo( value->ToObject() );
            b.append( sname.c_str() , sub );
        }
        return;
    }
    
    if ( value->IsBoolean() ){
        b.appendBool( sname.c_str() , value->ToBoolean()->Value() );
        return;
    }
    
    else if ( value->IsUndefined() ){
        return;
    }
    
    else if ( value->IsNull() ){
        b.appendNull( sname.c_str() );
        return;
    }

    cout << "don't know how to covert to mongo field [" << name << "]\t" << value << endl;
}
示例#26
0
void
FilePrivateKeyStorage::generateKeyPair
  (const Name& keyName, const KeyParams& params)
{
  if (doesKeyExist(keyName, KEY_CLASS_PUBLIC))
    throw SecurityException("Public Key already exists");
  if (doesKeyExist(keyName, KEY_CLASS_PRIVATE))
    throw SecurityException("Private Key already exists");

  Blob publicKeyDer;
  Blob privateKeyDer;

  if (params.getKeyType() == KEY_TYPE_RSA) {
    const RsaKeyParams& rsaParams = static_cast<const RsaKeyParams&>(params);

    BIGNUM* exponent = 0;
    RSA* rsa = 0;

    exponent = BN_new();
    if (BN_set_word(exponent, RSA_F4) == 1) {
      rsa = RSA_new();
      if (RSA_generate_key_ex(rsa, rsaParams.getKeySize(), exponent, NULL) == 1) {
        // Encode the public key.
        int length = i2d_RSA_PUBKEY(rsa, NULL);
        publicKeyDer = Blob(ptr_lib::make_shared<vector<uint8_t> >(length), false);
        uint8_t* derPointer = const_cast<uint8_t*>(publicKeyDer.buf());
        i2d_RSA_PUBKEY(rsa, &derPointer);

        // Encode the private key.
        length = i2d_RSAPrivateKey(rsa, NULL);
        vector<uint8_t> pkcs1PrivateKeyDer(length);
        derPointer = &pkcs1PrivateKeyDer[0];
        i2d_RSAPrivateKey(rsa, &derPointer);
        privateKeyDer = encodePkcs8PrivateKey
          (pkcs1PrivateKeyDer, OID(RSA_ENCRYPTION_OID),
           ptr_lib::make_shared<DerNode::DerNull>());
      }
    }

    BN_free(exponent);
    RSA_free(rsa);
  }
  else if (params.getKeyType() == KEY_TYPE_ECDSA) {
    const EcdsaKeyParams& ecdsaParams = static_cast<const EcdsaKeyParams&>(params);

    OID parametersOid;
    int curveId = -1;

    // Find the entry in EC_KEY_INFO.
    for (size_t i = 0 ; i < sizeof(EC_KEY_INFO) / sizeof(EC_KEY_INFO[0]); ++i) {
      if (EC_KEY_INFO[i].keySize == ecdsaParams.getKeySize()) {
        curveId = EC_KEY_INFO[i].curveId;
        parametersOid.setIntegerList
          (EC_KEY_INFO[i].oidIntegerList, EC_KEY_INFO[i].oidIntegerListLength);

        break;
      }
    }
    if (curveId == -1)
      throw SecurityException("Unsupported keySize for KEY_TYPE_ECDSA");

    EC_KEY* ecKey = EC_KEY_new_by_curve_name(curveId);
    if (ecKey != NULL) {
      if (EC_KEY_generate_key(ecKey) == 1) {
        // Encode the public key.
        int length = i2d_EC_PUBKEY(ecKey, NULL);
        vector<uint8_t> opensslPublicKeyDer(length);
        uint8_t* derPointer = &opensslPublicKeyDer[0];
        i2d_EC_PUBKEY(ecKey, &derPointer);
        // Convert the openssl style to ndn-cxx which has the simple AlgorithmIdentifier.
        // Find the bit string which is the second child.
        ptr_lib::shared_ptr<DerNode> parsedNode = DerNode::parse
          (&opensslPublicKeyDer[0], 0);
        const std::vector<ptr_lib::shared_ptr<DerNode> >& children =
          parsedNode->getChildren();
        publicKeyDer = encodeSubjectPublicKeyInfo
          (OID(EC_ENCRYPTION_OID),
           ptr_lib::make_shared<DerNode::DerOid>(parametersOid), children[1]);

        // Encode the private key.
        EC_KEY_set_enc_flags(ecKey, EC_PKEY_NO_PARAMETERS | EC_PKEY_NO_PUBKEY);
        length = i2d_ECPrivateKey(ecKey, NULL);
        vector<uint8_t> pkcs1PrivateKeyDer(length);
        derPointer = &pkcs1PrivateKeyDer[0];
        i2d_ECPrivateKey(ecKey, &derPointer);
        privateKeyDer = encodePkcs8PrivateKey
          (pkcs1PrivateKeyDer, OID(EC_ENCRYPTION_OID),
           ptr_lib::make_shared<DerNode::DerOid>(parametersOid));
      }
    }

    EC_KEY_free(ecKey);
  }
  else
    throw SecurityException("Unsupported key type");

  string keyUri = keyName.toUri();
  string publicKeyFilePath = nameTransform(keyUri, ".pub");
  string privateKeyFilePath = nameTransform(keyUri, ".pri");

  ofstream publicKeyFile(publicKeyFilePath.c_str());
  publicKeyFile << toBase64(publicKeyDer.buf(), publicKeyDer.size(), true);
  ofstream privateKeyFile(privateKeyFilePath.c_str());
  privateKeyFile << toBase64(privateKeyDer.buf(), privateKeyDer.size(), true);

  ::chmod(publicKeyFilePath.c_str(),  S_IRUSR | S_IRGRP | S_IROTH);
  ::chmod(privateKeyFilePath.c_str(), S_IRUSR);
}
示例#27
0
    bool mergeChunks( OperationContext* txn,
                      const NamespaceString& nss,
                      const BSONObj& minKey,
                      const BSONObj& maxKey,
                      const OID& epoch,
                      string* errMsg ) {

        //
        // Get sharding state up-to-date
        //

        ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(),
                                                              *errMsg );
        if ( !configLoc.isValid() ){
            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get the distributed lock
        //

        ScopedDistributedLock collLock( configLoc, nss.ns() );
        collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from "
                                          << minKey << " to " << maxKey );

        Status acquisitionStatus = collLock.tryAcquire();
        if (!acquisitionStatus.isOK()) {
            *errMsg = stream() << "could not acquire collection lock for " << nss.ns()
                               << " to merge chunks in [" << minKey << "," << maxKey << ")"
                               << causedBy(acquisitionStatus);

            warning() << *errMsg << endl;
            return false;
        }

        //
        // We now have the collection lock, refresh metadata to latest version and sanity check
        //

        ChunkVersion shardVersion;
        Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion);

        if ( !status.isOK() ) {

            *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
                                    << nss.ns() << causedBy( status.reason() );

            warning() << *errMsg << endl;
            return false;
        }

        if ( epoch.isSet() && shardVersion.epoch() != epoch ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has changed" << " since merge was sent" << "(sent epoch : "
                               << epoch.toString()
                               << ", current epoch : " << shardVersion.epoch().toString() << ")";

            warning() << *errMsg << endl;
            return false;
        }

        CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() );

        if ( !metadata || metadata->getKeyPattern().isEmpty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " is not sharded";

            warning() << *errMsg << endl;
            return false;
        }

        dassert( metadata->getShardVersion().equals( shardVersion ) );

        if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {

            *errMsg = stream() << "could not merge chunks, the range "
                               << rangeToString( minKey, maxKey ) << " is not valid"
                               << " for collection " << nss.ns() << " with key pattern "
                               << metadata->getKeyPattern();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get merged chunk information
        //

        ChunkVersion mergeVersion = metadata->getCollVersion();
        mergeVersion.incMinor();

        OwnedPointerVector<ChunkType> chunksToMerge;

        ChunkType itChunk;
        itChunk.setMin( minKey );
        itChunk.setMax( minKey );
        itChunk.setNS( nss.ns() );
        itChunk.setShard( shardingState.getShardName() );

        while ( itChunk.getMax().woCompare( maxKey ) < 0 &&
                metadata->getNextChunk( itChunk.getMax(), &itChunk ) ) {
            auto_ptr<ChunkType> saved( new ChunkType );
            itChunk.cloneTo( saved.get() );
            chunksToMerge.mutableVector().push_back( saved.release() );
        }

        if ( chunksToMerge.empty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " and ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Validate the range starts and ends at chunks and has no holes, error if not valid
        //

        BSONObj firstDocMin = ( *chunksToMerge.begin() )->getMin();
        BSONObj firstDocMax = ( *chunksToMerge.begin() )->getMax();
        // minKey is inclusive
        bool minKeyInRange = rangeContains( firstDocMin, firstDocMax, minKey );

        if ( !minKeyInRange ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        BSONObj lastDocMin = ( *chunksToMerge.rbegin() )->getMin();
        BSONObj lastDocMax = ( *chunksToMerge.rbegin() )->getMax();
        // maxKey is exclusive
        bool maxKeyInRange = lastDocMin.woCompare( maxKey ) < 0 &&
                lastDocMax.woCompare( maxKey ) >= 0;

        if ( !maxKeyInRange ) {
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        bool validRangeStartKey = firstDocMin.woCompare( minKey ) == 0;
        bool validRangeEndKey = lastDocMax.woCompare( maxKey ) == 0;

        if ( !validRangeStartKey || !validRangeEndKey ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " does not contain a chunk "
                               << ( !validRangeStartKey ? "starting at " + minKey.toString() : "" )
                               << ( !validRangeStartKey && !validRangeEndKey ? " or " : "" )
                               << ( !validRangeEndKey ? "ending at " + maxKey.toString() : "" );

            warning() << *errMsg << endl;
            return false;
        }

        if ( chunksToMerge.size() == 1 ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " already contains chunk for " << rangeToString( minKey, maxKey );

            warning() << *errMsg << endl;
            return false;
        }

        bool holeInRange = false;

        // Look for hole in range
        ChunkType* prevChunk = *chunksToMerge.begin();
        ChunkType* nextChunk = NULL;
        for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
                it != chunksToMerge.end(); ++it ) {
            if ( it == chunksToMerge.begin() ) continue;

            nextChunk = *it;
            if ( prevChunk->getMax().woCompare( nextChunk->getMin() ) != 0 ) {
                holeInRange = true;
                break;
            }
            prevChunk = nextChunk;
        }

        if ( holeInRange ) {

            dassert( NULL != nextChunk );
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has a hole in the range " << rangeToString( minKey, maxKey )
                               << " at " << rangeToString( prevChunk->getMax(),
                                                           nextChunk->getMin() );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Run apply ops command
        //

        BSONObj applyOpsCmd = buildApplyOpsCmd( chunksToMerge,
                                                shardVersion,
                                                mergeVersion );

        bool ok;
        BSONObj result;
        try {
            ScopedDbConnection conn( configLoc, 30.0 );
            ok = conn->runCommand( "config", applyOpsCmd, result );
            if ( !ok ) *errMsg = result.toString();
            conn.done();
        }
        catch( const DBException& ex ) {
            ok = false;
            *errMsg = ex.toString();
        }

        if ( !ok ) {
            *errMsg = stream() << "could not merge chunks for " << nss.ns()
                               << ", writing to config failed" << causedBy( errMsg );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Install merged chunk metadata
        //

        {
            Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
            shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
        }

        //
        // Log change
        //

        BSONObj mergeLogEntry = buildMergeLogEntry( chunksToMerge,
                                                    shardVersion,
                                                    mergeVersion );

        configServer.logChange( "merge", nss.ns(), mergeLogEntry );

        return true;
    }
示例#28
0
文件: utils.cpp 项目: Sgenmi/luamongo
static void lua_append_bson(lua_State *L, const char *key, int stackpos, BSONObjBuilder *builder, int ref) {
    int type = lua_type(L, stackpos);

    if (type == LUA_TTABLE) {
        if (stackpos < 0) stackpos = lua_gettop(L) + stackpos + 1;
        lua_checkstack(L, 3);

        int bsontype_found = luaL_getmetafield(L, stackpos, "__bsontype");
        if (!bsontype_found) {
            // not a special bsontype
            // handle as a regular table, iterating keys

            lua_rawgeti(L, LUA_REGISTRYINDEX, ref);
            lua_pushvalue(L, stackpos);
            lua_rawget(L, -2);
            if (lua_toboolean(L, -1)) { // do nothing if the same table encountered
                lua_pop(L, 2);
            } else {
                lua_pop(L, 1);
                lua_pushvalue(L, stackpos);
                lua_pushboolean(L, 1);
                lua_rawset(L, -3);
                lua_pop(L, 1);

                BSONObjBuilder b;

                bool dense = true;
                int len = 0;
                for (lua_pushnil(L); lua_next(L, stackpos); lua_pop(L, 1)) {
                    ++len;
                    if ((lua_type(L, -2) != LUA_TNUMBER) || (lua_tointeger(L, -2) != len)) {
                        lua_pop(L, 2);
                        dense = false;
                        break;
                    }
                }

                if (dense) {
                    for (int i = 0; i < len; i++) {
                        lua_rawgeti(L, stackpos, i+1);
                        std::stringstream ss;
                        ss << i;

                        lua_append_bson(L, ss.str().c_str(), -1, &b, ref);
                        lua_pop(L, 1);
                    }

                    builder->appendArray(key, b.obj());
                } else {
                    for (lua_pushnil(L); lua_next(L, stackpos); lua_pop(L, 1)) {
                        switch (lua_type(L, -2)) { // key type
                            case LUA_TNUMBER: {
                                std::stringstream ss;
                                ss << lua_tonumber(L, -2);
                                lua_append_bson(L, ss.str().c_str(), -1, &b, ref);
                                break;
                            }
                            case LUA_TSTRING: {
                                lua_append_bson(L, lua_tostring(L, -2), -1, &b, ref);
                                break;
                            }
                        }
                    }

                    builder->append(key, b.obj());
                }
            }
        } else {
            int bson_type = lua_tointeger(L, -1);
            lua_pop(L, 1);
            lua_rawgeti(L, -1, 1);
            switch (bson_type) {
            case mongo::Date:
                builder->appendDate(key, lua_tonumber(L, -1));
                break;
            case mongo::Timestamp:
                builder->appendTimestamp(key);
                break;
            case mongo::RegEx: {
                const char* regex = lua_tostring(L, -1);
                lua_rawgeti(L, -2, 2); // options
                const char* options = lua_tostring(L, -1);
                lua_pop(L, 1);
                if (regex && options) builder->appendRegex(key, regex, options);
                break;
            }
            case mongo::NumberInt:
                builder->append(key, static_cast<int32_t>(lua_tointeger(L, -1)));
                break;
            case mongo::NumberLong:
                builder->append(key, static_cast<long long int>(lua_tonumber(L, -1)));
                break;
            case mongo::Symbol: {
                const char* c = lua_tostring(L, -1);
                if (c) builder->appendSymbol(key, c);
                break;
            }
            case mongo::BinData: {
                size_t l;
                const char* c = lua_tolstring(L, -1, &l);
                if (c) builder->appendBinData(key, l, mongo::BinDataGeneral, c);
                break;
            }
            case mongo::jstOID: {
                OID oid;
                const char* c = lua_tostring(L, -1);
                if (c) {
                    oid.init(c);
                    builder->appendOID(key, &oid);
                }
                break;
            }
            case mongo::jstNULL:
                builder->appendNull(key);
                break;

            /*default:
                luaL_error(L, LUAMONGO_UNSUPPORTED_BSON_TYPE, luaL_typename(L, stackpos));*/
            }
            lua_pop(L, 1);
        }
    } else if (type == LUA_TNIL) {
        builder->appendNull(key);
    } else if (type == LUA_TNUMBER) {
        double numval = lua_tonumber(L, stackpos);
        if ((numval == floor(numval)) && fabs(numval)< INT_MAX ) {
            // The numeric value looks like an integer, treat it as such.
            // This is closer to how JSON datatypes behave.
            int intval = lua_tointeger(L, stackpos);
            builder->append(key, static_cast<int32_t>(intval));
        } else {
            builder->append(key, numval);
        }
    } else if (type == LUA_TBOOLEAN) {
        builder->appendBool(key, lua_toboolean(L, stackpos));
    } else if (type == LUA_TSTRING) {
        builder->append(key, lua_tostring(L, stackpos));
    }/* else {
        luaL_error(L, LUAMONGO_UNSUPPORTED_LUA_TYPE, luaL_typename(L, stackpos));
    }*/
}
示例#29
0
std::string lookup(const OID& oid)
   {
   const std::string oid_str = oid.as_string();
   if(oid_str == "1.0.14888.3.0.5") return "ECKCDSA";
   if(oid_str == "1.2.156.10197.1.301") return "sm2p256v1";
   if(oid_str == "1.2.156.10197.1.301.1") return "SM2_Sig";
   if(oid_str == "1.2.156.10197.1.301.2") return "SM2_Kex";
   if(oid_str == "1.2.156.10197.1.301.3") return "SM2_Enc";
   if(oid_str == "1.2.156.10197.1.401") return "SM3";
   if(oid_str == "1.2.156.10197.1.504") return "RSA/EMSA3(SM3)";
   if(oid_str == "1.2.250.1.223.101.256.1") return "frp256v1";
   if(oid_str == "1.2.410.200004.1.100.4.3") return "ECKCDSA/EMSA1(SHA-1)";
   if(oid_str == "1.2.410.200004.1.100.4.4") return "ECKCDSA/EMSA1(SHA-224)";
   if(oid_str == "1.2.410.200004.1.100.4.5") return "ECKCDSA/EMSA1(SHA-256)";
   if(oid_str == "1.2.410.200004.1.4") return "SEED/CBC";
   if(oid_str == "1.2.643.2.2.19") return "GOST-34.10";
   if(oid_str == "1.2.643.2.2.3") return "GOST-34.10/EMSA1(GOST-R-34.11-94)";
   if(oid_str == "1.2.643.2.2.35.1") return "gost_256A";
   if(oid_str == "1.2.643.2.2.36.0") return "gost_256A";
   if(oid_str == "1.2.643.7.1.1.2.2") return "Streebog-256";
   if(oid_str == "1.2.643.7.1.1.2.3") return "Streebog-512";
   if(oid_str == "1.2.840.10040.4.1") return "DSA";
   if(oid_str == "1.2.840.10040.4.3") return "DSA/EMSA1(SHA-160)";
   if(oid_str == "1.2.840.10045.2.1") return "ECDSA";
   if(oid_str == "1.2.840.10045.3.1.1") return "secp192r1";
   if(oid_str == "1.2.840.10045.3.1.2") return "x962_p192v2";
   if(oid_str == "1.2.840.10045.3.1.3") return "x962_p192v3";
   if(oid_str == "1.2.840.10045.3.1.4") return "x962_p239v1";
   if(oid_str == "1.2.840.10045.3.1.5") return "x962_p239v2";
   if(oid_str == "1.2.840.10045.3.1.6") return "x962_p239v3";
   if(oid_str == "1.2.840.10045.3.1.7") return "secp256r1";
   if(oid_str == "1.2.840.10045.4.1") return "ECDSA/EMSA1(SHA-160)";
   if(oid_str == "1.2.840.10045.4.3.1") return "ECDSA/EMSA1(SHA-224)";
   if(oid_str == "1.2.840.10045.4.3.2") return "ECDSA/EMSA1(SHA-256)";
   if(oid_str == "1.2.840.10045.4.3.3") return "ECDSA/EMSA1(SHA-384)";
   if(oid_str == "1.2.840.10045.4.3.4") return "ECDSA/EMSA1(SHA-512)";
   if(oid_str == "1.2.840.10046.2.1") return "DH";
   if(oid_str == "1.2.840.113533.7.66.10") return "CAST-128/CBC";
   if(oid_str == "1.2.840.113533.7.66.15") return "KeyWrap.CAST-128";
   if(oid_str == "1.2.840.113549.1.1.1") return "RSA";
   if(oid_str == "1.2.840.113549.1.1.10") return "RSA/EMSA4";
   if(oid_str == "1.2.840.113549.1.1.11") return "RSA/EMSA3(SHA-256)";
   if(oid_str == "1.2.840.113549.1.1.12") return "RSA/EMSA3(SHA-384)";
   if(oid_str == "1.2.840.113549.1.1.13") return "RSA/EMSA3(SHA-512)";
   if(oid_str == "1.2.840.113549.1.1.14") return "RSA/EMSA3(SHA-224)";
   if(oid_str == "1.2.840.113549.1.1.16") return "RSA/EMSA3(SHA-512-256)";
   if(oid_str == "1.2.840.113549.1.1.4") return "RSA/EMSA3(MD5)";
   if(oid_str == "1.2.840.113549.1.1.5") return "RSA/EMSA3(SHA-160)";
   if(oid_str == "1.2.840.113549.1.1.7") return "RSA/OAEP";
   if(oid_str == "1.2.840.113549.1.1.8") return "MGF1";
   if(oid_str == "1.2.840.113549.1.5.12") return "PKCS5.PBKDF2";
   if(oid_str == "1.2.840.113549.1.5.13") return "PBE-PKCS5v20";
   if(oid_str == "1.2.840.113549.1.9.1") return "PKCS9.EmailAddress";
   if(oid_str == "1.2.840.113549.1.9.14") return "PKCS9.ExtensionRequest";
   if(oid_str == "1.2.840.113549.1.9.16.3.6") return "KeyWrap.TripleDES";
   if(oid_str == "1.2.840.113549.1.9.16.3.7") return "KeyWrap.RC2";
   if(oid_str == "1.2.840.113549.1.9.16.3.8") return "Compression.Zlib";
   if(oid_str == "1.2.840.113549.1.9.2") return "PKCS9.UnstructuredName";
   if(oid_str == "1.2.840.113549.1.9.3") return "PKCS9.ContentType";
   if(oid_str == "1.2.840.113549.1.9.4") return "PKCS9.MessageDigest";
   if(oid_str == "1.2.840.113549.1.9.7") return "PKCS9.ChallengePassword";
   if(oid_str == "1.2.840.113549.2.10") return "HMAC(SHA-384)";
   if(oid_str == "1.2.840.113549.2.11") return "HMAC(SHA-512)";
   if(oid_str == "1.2.840.113549.2.5") return "MD5";
   if(oid_str == "1.2.840.113549.2.7") return "HMAC(SHA-160)";
   if(oid_str == "1.2.840.113549.2.8") return "HMAC(SHA-224)";
   if(oid_str == "1.2.840.113549.2.9") return "HMAC(SHA-256)";
   if(oid_str == "1.2.840.113549.3.2") return "RC2/CBC";
   if(oid_str == "1.2.840.113549.3.7") return "TripleDES/CBC";
   if(oid_str == "1.3.101.110") return "Curve25519";
   if(oid_str == "1.3.101.112") return "Ed25519";
   if(oid_str == "1.3.132.0.10") return "secp256k1";
   if(oid_str == "1.3.132.0.30") return "secp160r2";
   if(oid_str == "1.3.132.0.31") return "secp192k1";
   if(oid_str == "1.3.132.0.32") return "secp224k1";
   if(oid_str == "1.3.132.0.33") return "secp224r1";
   if(oid_str == "1.3.132.0.34") return "secp384r1";
   if(oid_str == "1.3.132.0.35") return "secp521r1";
   if(oid_str == "1.3.132.0.8") return "secp160r1";
   if(oid_str == "1.3.132.0.9") return "secp160k1";
   if(oid_str == "1.3.132.1.12") return "ECDH";
   if(oid_str == "1.3.14.3.2.26") return "SHA-160";
   if(oid_str == "1.3.14.3.2.7") return "DES/CBC";
   if(oid_str == "1.3.36.3.2.1") return "RIPEMD-160";
   if(oid_str == "1.3.36.3.3.1.2") return "RSA/EMSA3(RIPEMD-160)";
   if(oid_str == "1.3.36.3.3.2.5.2.1") return "ECGDSA";
   if(oid_str == "1.3.36.3.3.2.5.4.1") return "ECGDSA/EMSA1(RIPEMD-160)";
   if(oid_str == "1.3.36.3.3.2.5.4.2") return "ECGDSA/EMSA1(SHA-160)";
   if(oid_str == "1.3.36.3.3.2.5.4.3") return "ECGDSA/EMSA1(SHA-224)";
   if(oid_str == "1.3.36.3.3.2.5.4.4") return "ECGDSA/EMSA1(SHA-256)";
   if(oid_str == "1.3.36.3.3.2.5.4.5") return "ECGDSA/EMSA1(SHA-384)";
   if(oid_str == "1.3.36.3.3.2.5.4.6") return "ECGDSA/EMSA1(SHA-512)";
   if(oid_str == "1.3.36.3.3.2.8.1.1.1") return "brainpool160r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.11") return "brainpool384r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.13") return "brainpool512r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.3") return "brainpool192r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.5") return "brainpool224r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.7") return "brainpool256r1";
   if(oid_str == "1.3.36.3.3.2.8.1.1.9") return "brainpool320r1";
   if(oid_str == "1.3.6.1.4.1.11591.12.2") return "Tiger(24,3)";
   if(oid_str == "1.3.6.1.4.1.25258.1.3") return "McEliece";
   if(oid_str == "1.3.6.1.4.1.25258.1.5") return "XMSS";
   if(oid_str == "1.3.6.1.4.1.25258.1.6.1") return "GOST-34.10/EMSA1(SHA-256)";
   if(oid_str == "1.3.6.1.4.1.25258.3.1") return "Serpent/CBC";
   if(oid_str == "1.3.6.1.4.1.25258.3.101") return "Serpent/GCM";
   if(oid_str == "1.3.6.1.4.1.25258.3.102") return "Twofish/GCM";
   if(oid_str == "1.3.6.1.4.1.25258.3.2") return "Threefish-512/CBC";
   if(oid_str == "1.3.6.1.4.1.25258.3.2.1") return "AES-128/OCB";
   if(oid_str == "1.3.6.1.4.1.25258.3.2.2") return "AES-192/OCB";
   if(oid_str == "1.3.6.1.4.1.25258.3.2.3") return "AES-256/OCB";
   if(oid_str == "1.3.6.1.4.1.25258.3.2.4") return "Serpent/OCB";
   if(oid_str == "1.3.6.1.4.1.25258.3.2.5") return "Twofish/OCB";
   if(oid_str == "1.3.6.1.4.1.25258.3.3") return "Twofish/CBC";
   if(oid_str == "1.3.6.1.4.1.3029.1.2.1") return "ElGamal";
   if(oid_str == "1.3.6.1.4.1.311.20.2.2") return "Microsoft SmartcardLogon";
   if(oid_str == "1.3.6.1.4.1.8301.3.1.2.9.0.38") return "secp521r1";
   if(oid_str == "1.3.6.1.5.5.7.1.1") return "PKIX.AuthorityInformationAccess";
   if(oid_str == "1.3.6.1.5.5.7.3.1") return "PKIX.ServerAuth";
   if(oid_str == "1.3.6.1.5.5.7.3.2") return "PKIX.ClientAuth";
   if(oid_str == "1.3.6.1.5.5.7.3.3") return "PKIX.CodeSigning";
   if(oid_str == "1.3.6.1.5.5.7.3.4") return "PKIX.EmailProtection";
   if(oid_str == "1.3.6.1.5.5.7.3.5") return "PKIX.IPsecEndSystem";
   if(oid_str == "1.3.6.1.5.5.7.3.6") return "PKIX.IPsecTunnel";
   if(oid_str == "1.3.6.1.5.5.7.3.7") return "PKIX.IPsecUser";
   if(oid_str == "1.3.6.1.5.5.7.3.8") return "PKIX.TimeStamping";
   if(oid_str == "1.3.6.1.5.5.7.3.9") return "PKIX.OCSPSigning";
   if(oid_str == "1.3.6.1.5.5.7.48.1") return "PKIX.OCSP";
   if(oid_str == "1.3.6.1.5.5.7.48.1.1") return "PKIX.OCSP.BasicResponse";
   if(oid_str == "1.3.6.1.5.5.7.48.2") return "PKIX.CertificateAuthorityIssuers";
   if(oid_str == "1.3.6.1.5.5.7.8.5") return "PKIX.XMPPAddr";
   if(oid_str == "2.16.840.1.101.3.4.1.2") return "AES-128/CBC";
   if(oid_str == "2.16.840.1.101.3.4.1.22") return "AES-192/CBC";
   if(oid_str == "2.16.840.1.101.3.4.1.25") return "KeyWrap.AES-192";
   if(oid_str == "2.16.840.1.101.3.4.1.26") return "AES-192/GCM";
   if(oid_str == "2.16.840.1.101.3.4.1.27") return "AES-192/CCM";
   if(oid_str == "2.16.840.1.101.3.4.1.42") return "AES-256/CBC";
   if(oid_str == "2.16.840.1.101.3.4.1.45") return "KeyWrap.AES-256";
   if(oid_str == "2.16.840.1.101.3.4.1.46") return "AES-256/GCM";
   if(oid_str == "2.16.840.1.101.3.4.1.47") return "AES-256/CCM";
   if(oid_str == "2.16.840.1.101.3.4.1.5") return "KeyWrap.AES-128";
   if(oid_str == "2.16.840.1.101.3.4.1.6") return "AES-128/GCM";
   if(oid_str == "2.16.840.1.101.3.4.1.7") return "AES-128/CCM";
   if(oid_str == "2.16.840.1.101.3.4.2.1") return "SHA-256";
   if(oid_str == "2.16.840.1.101.3.4.2.10") return "SHA-3(512)";
   if(oid_str == "2.16.840.1.101.3.4.2.11") return "SHAKE-128";
   if(oid_str == "2.16.840.1.101.3.4.2.12") return "SHAKE-256";
   if(oid_str == "2.16.840.1.101.3.4.2.2") return "SHA-384";
   if(oid_str == "2.16.840.1.101.3.4.2.3") return "SHA-512";
   if(oid_str == "2.16.840.1.101.3.4.2.4") return "SHA-224";
   if(oid_str == "2.16.840.1.101.3.4.2.6") return "SHA-512-256";
   if(oid_str == "2.16.840.1.101.3.4.2.7") return "SHA-3(224)";
   if(oid_str == "2.16.840.1.101.3.4.2.8") return "SHA-3(256)";
   if(oid_str == "2.16.840.1.101.3.4.2.9") return "SHA-3(384)";
   if(oid_str == "2.16.840.1.101.3.4.3.1") return "DSA/EMSA1(SHA-224)";
   if(oid_str == "2.16.840.1.101.3.4.3.10") return "ECDSA/EMSA1(SHA-3(256))";
   if(oid_str == "2.16.840.1.101.3.4.3.11") return "ECDSA/EMSA1(SHA-3(384))";
   if(oid_str == "2.16.840.1.101.3.4.3.12") return "ECDSA/EMSA1(SHA-3(512))";
   if(oid_str == "2.16.840.1.101.3.4.3.13") return "RSA/EMSA3(SHA-3(224))";
   if(oid_str == "2.16.840.1.101.3.4.3.14") return "RSA/EMSA3(SHA-3(256))";
   if(oid_str == "2.16.840.1.101.3.4.3.15") return "RSA/EMSA3(SHA-3(384))";
   if(oid_str == "2.16.840.1.101.3.4.3.16") return "RSA/EMSA3(SHA-3(512))";
   if(oid_str == "2.16.840.1.101.3.4.3.2") return "DSA/EMSA1(SHA-256)";
   if(oid_str == "2.16.840.1.101.3.4.3.3") return "DSA/EMSA1(SHA-384)";
   if(oid_str == "2.16.840.1.101.3.4.3.4") return "DSA/EMSA1(SHA-512)";
   if(oid_str == "2.16.840.1.101.3.4.3.5") return "DSA/EMSA1(SHA-3(224))";
   if(oid_str == "2.16.840.1.101.3.4.3.6") return "DSA/EMSA1(SHA-3(256))";
   if(oid_str == "2.16.840.1.101.3.4.3.7") return "DSA/EMSA1(SHA-3(384))";
   if(oid_str == "2.16.840.1.101.3.4.3.8") return "DSA/EMSA1(SHA-3(512))";
   if(oid_str == "2.16.840.1.101.3.4.3.9") return "ECDSA/EMSA1(SHA-3(224))";
   if(oid_str == "2.5.29.14") return "X509v3.SubjectKeyIdentifier";
   if(oid_str == "2.5.29.15") return "X509v3.KeyUsage";
   if(oid_str == "2.5.29.17") return "X509v3.SubjectAlternativeName";
   if(oid_str == "2.5.29.18") return "X509v3.IssuerAlternativeName";
   if(oid_str == "2.5.29.19") return "X509v3.BasicConstraints";
   if(oid_str == "2.5.29.20") return "X509v3.CRLNumber";
   if(oid_str == "2.5.29.21") return "X509v3.ReasonCode";
   if(oid_str == "2.5.29.23") return "X509v3.HoldInstructionCode";
   if(oid_str == "2.5.29.24") return "X509v3.InvalidityDate";
   if(oid_str == "2.5.29.28") return "X509v3.CRLIssuingDistributionPoint";
   if(oid_str == "2.5.29.30") return "X509v3.NameConstraints";
   if(oid_str == "2.5.29.31") return "X509v3.CRLDistributionPoints";
   if(oid_str == "2.5.29.32") return "X509v3.CertificatePolicies";
   if(oid_str == "2.5.29.32.0") return "X509v3.AnyPolicy";
   if(oid_str == "2.5.29.35") return "X509v3.AuthorityKeyIdentifier";
   if(oid_str == "2.5.29.36") return "X509v3.PolicyConstraints";
   if(oid_str == "2.5.29.37") return "X509v3.ExtendedKeyUsage";
   if(oid_str == "2.5.4.10") return "X520.Organization";
   if(oid_str == "2.5.4.11") return "X520.OrganizationalUnit";
   if(oid_str == "2.5.4.12") return "X520.Title";
   if(oid_str == "2.5.4.3") return "X520.CommonName";
   if(oid_str == "2.5.4.4") return "X520.Surname";
   if(oid_str == "2.5.4.42") return "X520.GivenName";
   if(oid_str == "2.5.4.43") return "X520.Initials";
   if(oid_str == "2.5.4.44") return "X520.GenerationalQualifier";
   if(oid_str == "2.5.4.46") return "X520.DNQualifier";
   if(oid_str == "2.5.4.5") return "X520.SerialNumber";
   if(oid_str == "2.5.4.6") return "X520.Country";
   if(oid_str == "2.5.4.65") return "X520.Pseudonym";
   if(oid_str == "2.5.4.7") return "X520.Locality";
   if(oid_str == "2.5.4.8") return "X520.State";
   if(oid_str == "2.5.8.1.1") return "RSA";

#if defined(BOTAN_HOUSE_ECC_CURVE_NAME)
   if(oid_str == BOTAN_HOUSE_ECC_CURVE_OID) return BOTAN_HOUSE_ECC_CURVE_NAME;
#endif

   return std::string();
   }
示例#30
0
namespace mongo {
    
    Database *database = 0;
    string ourHostname;
    OID serverID;
    bool dbexitCalled = false;
    CmdLine cmdLine;
    
    bool inShutdown(){
        return dbexitCalled;
    }
    
    string getDbContext() {
        return "?";
    }

    bool haveLocalShardingInfo( const string& ns ){
        assert( 0 );
        return false;
    }

    void usage( char * argv[] ){
        out() << argv[0] << " usage:\n\n";
        out() << " -v+  verbose\n";
        out() << " --port <portno>\n";
        out() << " --configdb <configdbname> [<configdbname>...]\n";
        out() << endl;
    }

    class ShardingConnectionHook : public DBConnectionHook {
    public:
        virtual void onCreate( DBClientBase * conn ){
            conn->simpleCommand( "admin" , 0 , "switchtoclienterrors" );
        }
        virtual void onHandedOut( DBClientBase * conn ){
            ClientInfo::get()->addShard( conn->getServerAddress() );
        }
    } shardingConnectionHook;
    
    class ShardedMessageHandler : public MessageHandler {
    public:
        virtual ~ShardedMessageHandler(){}
        virtual void process( Message& m , AbstractMessagingPort* p ){
            Request r( m , p );
            if ( logLevel > 5 ){
                log(5) << "client id: " << hex << r.getClientId() << "\t" << r.getns() << "\t" << dec << r.op() << endl;
            }
            try {
                setClientId( r.getClientId() );
                r.process();
            }
            catch ( DBException& e ){
                m.data->id = r.id();
                log() << "UserException: " << e.what() << endl;
                if ( r.expectResponse() ){
                    BSONObj err = BSON( "$err" << e.what() );
                    replyToQuery( QueryResult::ResultFlag_ErrSet, p , m , err );
                }
            }
        }
    };
    
    void init(){
        serverID.init();
        setupSIGTRAPforGDB();
    }

    void start() {
        log() << "waiting for connections on port " << cmdLine.port << endl;
        //DbGridListener l(port);
        //l.listen();
        ShardedMessageHandler handler;
        MessageServer * server = createServer( cmdLine.port , &handler );
        server->run();
    }

    DBClientBase *createDirectClient(){
        uassert( 10197 ,  "createDirectClient not implemented for sharding yet" , 0 );
        return 0;
    }

} // namespace mongo