int DocumentDatabase::insertDocument(std::string Author, std::string Title, std::string PublishDate) { sqlite3* db = openDB(); int documentID = getDocumentID(db,Author, Title); if (documentID == -1) { int styleID = getStyleID(db,Author); if (styleID == -1) { styleID = insertStyle(db,Author); documentID = insertDocument(db, styleID, Title, PublishDate); //documentID = getDocumentID(Author, Title); } else { documentID = insertDocument(db,styleID, Title, PublishDate); //documentID = getDocumentID(Author, Title); } } else { // Just Return DocumentID } close(db); return documentID; }
void insertValeur(Valeur valeur) { if(debug) printf("le type de la valeur est %d\n",valeur.type); fwrite(&(valeur.type),sizeof(Type_Valeur),1,fichier); int taille; switch(valeur.type) { case T_ENTIER : if(debug) printf("l'entier à insérer est %d\n",valeur.parametre.entier); fwrite(&(valeur.parametre.entier),sizeof(int),1,fichier); break; case T_DECIMAL: if(debug) printf("le décimal à insérer est %lf\n",valeur.parametre.decimal); fwrite(&(valeur.parametre.decimal),sizeof(double),1,fichier); break; case T_CHAINE: taille=strlen(valeur.parametre.chaine); if(debug) printf("la taille de la chaine est %d\n",taille); fwrite(&taille,sizeof(int),1,fichier); if(debug) printf("la chaine à insérer est %s\n",valeur.parametre.chaine); fwrite(valeur.parametre.chaine,sizeof(char),strlen(valeur.parametre.chaine),fichier); break; case T_REGEXP: taille=strlen(valeur.parametre.chaine); if(debug) printf("la taille de la regexp est %d\n",taille); fwrite(&taille,sizeof(int),1,fichier); if(debug) printf("la regexp à insérer est %s\n",valeur.parametre.chaine); fwrite(valeur.parametre.chaine,sizeof(char),strlen(valeur.parametre.chaine),fichier); break; case T_BOOL: if(debug) printf("le boolean à insérer est %d\n",valeur.parametre.bool); fwrite(&(valeur.parametre.bool),sizeof(boolean),1,fichier); break; case T_DOC: insertDocument(valeur.parametre.document); break; case T_TAB: insertTableau(valeur.parametre.tableau); break; } }
void MockReplCoordServerFixture::insertOplogEntry(const repl::OplogEntry& entry) { AutoGetCollection autoColl(opCtx(), NamespaceString::kRsOplogNamespace, MODE_IX); auto coll = autoColl.getCollection(); ASSERT_TRUE(coll != nullptr); auto status = coll->insertDocument(opCtx(), InsertStatement(entry.toBSON()), &CurOp::get(opCtx())->debug(), /* fromMigrate */ false); ASSERT_OK(status); }
void insert(char* collection,Document doc) { fichier=fopen(getNomFichier(collection),"ab"); if(fichier==NULL) printf("impossible d'ouvrir la collection\n"); else { insertDocument(doc); fclose(fichier); } }
QueryStageCachedPlanBase() { // If collection exists already, we need to drop it. dropCollection(); // Add indices. addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); OldClientWriteContext ctx(&_txn, nss.ns()); Collection* collection = ctx.getCollection(); ASSERT(collection); // Add data. for (int i = 0; i < 10; i++) { insertDocument(collection, BSON("_id" << i << "a" << i << "b" << 1)); } }
void pOpenedFileModel::documentOpened( pAbstractChild* document ) { if ( mDocuments.contains( document ) ) { sortDocuments(); } else { if ( !document || mDocuments.contains( document ) ) { return; } const int index = mDocuments.count(); insertDocument( document, index ); } }
void setUp() { // If collection exists already, we need to drop it. dropCollection(); // Add indices. addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); Collection* collection = ctx.getCollection(); ASSERT(collection); // Add data. for (int i = 0; i < 10; i++) { insertDocument(collection, BSON("_id" << i << "a" << i << "b" << 1)); } }
void MongoServer::insertDocuments(const std::vector<mongo::BSONObj> &objCont, const std::string &db, const std::string &collection) { for (std::vector<mongo::BSONObj>::const_iterator it = objCont.begin(); it != objCont.end(); it++) { insertDocument(*it, db, collection); } }
void MongoServer::insertDocuments(const std::vector<mongo::BSONObj> &objCont, const MongoNamespace &ns) { for (std::vector<mongo::BSONObj>::const_iterator it = objCont.begin(); it != objCont.end(); it++) { insertDocument(*it, ns); } }
StatusWith<DiskLoc> Collection::updateDocument( const DiskLoc& oldLocation, const BSONObj& objNew, bool enforceQuota, OpDebug* debug ) { Record* oldRecord = getExtentManager()->recordFor( oldLocation ); BSONObj objOld = BSONObj::make( oldRecord ); if ( objOld.hasElement( "_id" ) ) { BSONElement oldId = objOld["_id"]; BSONElement newId = objNew["_id"]; if ( oldId != newId ) return StatusWith<DiskLoc>( ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596 ); } if ( ns().coll() == "system.users" ) { // XXX - andy and spencer think this should go away now V2UserDocumentParser parser; Status s = parser.checkValidUserDocument(objNew); if ( !s.isOK() ) return StatusWith<DiskLoc>( s ); } /* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks... */ OwnedPointerVector<UpdateTicket> updateTickets; updateTickets.mutableVector().resize(_indexCatalog.numIndexesTotal()); for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) { IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i ); IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor ); InsertDeleteOptions options; options.logIfError = false; options.dupsAllowed = !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) || ignoreUniqueIndex(descriptor); updateTickets.mutableVector()[i] = new UpdateTicket(); Status ret = iam->validateUpdate(objOld, objNew, oldLocation, options, updateTickets.mutableVector()[i]); if ( !ret.isOK() ) { return StatusWith<DiskLoc>( ret ); } } if ( oldRecord->netLength() < objNew.objsize() ) { // doesn't fit, have to move to new location if ( _details->isCapped() ) return StatusWith<DiskLoc>( ErrorCodes::InternalError, "failing update: objects in a capped ns cannot grow", 10003 ); moveCounter.increment(); _details->paddingTooSmall(); // unindex old record, don't delete // this way, if inserting new doc fails, we can re-index this one ClientCursor::aboutToDelete(_ns.ns(), _details, oldLocation); _indexCatalog.unindexRecord( objOld, oldLocation, true ); if ( debug ) { if (debug->nmoved == -1) // default of -1 rather than 0 debug->nmoved = 1; else debug->nmoved += 1; } StatusWith<DiskLoc> loc = insertDocument( objNew, enforceQuota ); if ( loc.isOK() ) { // insert successful, now lets deallocate the old location // remember its already unindexed _recordStore.deallocRecord( oldLocation, oldRecord ); } else { // new doc insert failed, so lets re-index the old document and location _indexCatalog.indexRecord( objOld, oldLocation ); } return loc; } _infoCache.notifyOfWriteOp(); _details->paddingFits(); if ( debug ) debug->keyUpdates = 0; for (int i = 0; i < _indexCatalog.numIndexesTotal(); ++i) { IndexDescriptor* descriptor = _indexCatalog.getDescriptor( i ); IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor ); int64_t updatedKeys; Status ret = iam->update(*updateTickets.vector()[i], &updatedKeys); if ( !ret.isOK() ) return StatusWith<DiskLoc>( ret ); if ( debug ) debug->keyUpdates += updatedKeys; } // update in place int sz = objNew.objsize(); memcpy(getDur().writingPtr(oldRecord->data(), sz), objNew.objdata(), sz); return StatusWith<DiskLoc>( oldLocation ); }
void updateSessionEntry(OperationContext* opCtx, const UpdateRequest& updateRequest) { // Current code only supports replacement update. dassert(UpdateDriver::isDocReplacement(updateRequest.getUpdates())); AutoGetCollection autoColl(opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IX); uassert(40527, str::stream() << "Unable to persist transaction state because the session transaction " "collection is missing. This indicates that the " << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection has been manually deleted.", autoColl.getCollection()); WriteUnitOfWork wuow(opCtx); auto collection = autoColl.getCollection(); auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx); uassert(40672, str::stream() << "Failed to fetch _id index for " << NamespaceString::kSessionTransactionsTableNamespace.ns(), idIndex); auto indexAccess = collection->getIndexCatalog()->getIndex(idIndex); // Since we are looking up a key inside the _id index, create a key object consisting of only // the _id field. auto idToFetch = updateRequest.getQuery().firstElement(); auto toUpdateIdDoc = idToFetch.wrap(); dassert(idToFetch.fieldNameStringData() == "_id"_sd); auto recordId = indexAccess->findSingle(opCtx, toUpdateIdDoc); auto startingSnapshotId = opCtx->recoveryUnit()->getSnapshotId(); if (recordId.isNull()) { // Upsert case. auto status = collection->insertDocument( opCtx, InsertStatement(updateRequest.getUpdates()), nullptr, true, false); if (status == ErrorCodes::DuplicateKey) { throw WriteConflictException(); } uassertStatusOK(status); wuow.commit(); return; } auto originalRecordData = collection->getRecordStore()->dataFor(opCtx, recordId); auto originalDoc = originalRecordData.toBson(); invariant(collection->getDefaultCollator() == nullptr); boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, nullptr)); auto matcher = fassertStatusOK( 40673, MatchExpressionParser::parse(updateRequest.getQuery(), std::move(expCtx))); if (!matcher->matchesBSON(originalDoc)) { // Document no longer match what we expect so throw WCE to make the caller re-examine. throw WriteConflictException(); } OplogUpdateEntryArgs args; args.nss = NamespaceString::kSessionTransactionsTableNamespace; args.uuid = collection->uuid(); args.update = updateRequest.getUpdates(); args.criteria = toUpdateIdDoc; args.fromMigrate = false; collection->updateDocument(opCtx, recordId, Snapshotted<BSONObj>(startingSnapshotId, originalDoc), updateRequest.getUpdates(), true, // enforceQuota false, // indexesAffected = false because _id is the only index nullptr, &args); wuow.commit(); }