Beispiel #1
0
/** basic write ops / write intents.  note there is no particular order to these : if we have
    two writes to the same location during the group commit interval, it is likely
    (although not assured) that it is journaled here once.
*/
static void prepBasicWrites(AlignedBuilder& bb, const std::vector<WriteIntent>& intents) {
    stdx::lock_guard<stdx::mutex> lk(privateViews._mutex());

    // Each time write intents switch to a different database we journal a JDbContext.
    // Switches will be rare as we sort by memory location first and we batch commit.
    RelativePath lastDbPath;

    invariant(!intents.empty());

    WriteIntent last;
    for (std::vector<WriteIntent>::const_iterator i = intents.begin(); i != intents.end(); i++) {
        if (i->start() < last.end()) {
            // overlaps
            last.absorb(*i);
        } else {
            // discontinuous
            if (i != intents.begin()) {
                prepBasicWrite_inlock(bb, &last, lastDbPath);
            }

            last = *i;
        }
    }

    prepBasicWrite_inlock(bb, &last, lastDbPath);
}
Beispiel #2
0
        /** basic write ops / write intents.  note there is no particular order to these : if we have
            two writes to the same location during the group commit interval, it is likely
            (although not assured) that it is journaled here once.
        */
        static void prepBasicWrites(AlignedBuilder& bb) {
            scoped_lock lk(privateViews._mutex());

            // each time events switch to a different database we journal a JDbContext
            // switches will be rare as we sort by memory location first and we batch commit.
            RelativePath lastDbPath;

            assertNothingSpooled();
            const vector<WriteIntent>& _intents = commitJob.getIntentsSorted();
            assert( !_intents.empty() );

            WriteIntent last;
            for( vector<WriteIntent>::const_iterator i = _intents.begin(); i != _intents.end(); i++ ) { 
                if( i->start() < last.end() ) { 
                    // overlaps
                    last.absorb(*i);
                }
                else { 
                    // discontinuous
                    if( i != _intents.begin() )
                        prepBasicWrite_inlock(bb, &last, lastDbPath);
                    last = *i;
                }
            }
            prepBasicWrite_inlock(bb, &last, lastDbPath);
        }
Beispiel #3
0
        /** basic write ops / write intents.  note there is no particular order to these : if we have
            two writes to the same location during the group commit interval, it is likely
            (although not assured) that it is journaled here once.
        */
        static void prepBasicWrites(AlignedBuilder& bb) {
            scoped_lock lk(privateViews._mutex());

            // each time events switch to a different database we journal a JDbContext
            // switches will be rare as we sort by memory location first and we batch commit.
            RelativePath lastDbPath;

            const vector<WriteIntent>& _intents = commitJob.getIntentsSorted();

            // right now the durability code assumes there is at least one write intent
            // this does not have to be true in theory as i could just add or delete a file
            // callers have to ensure they do at least something for now even though its ugly
            // until this can be addressed
            fassert( 17388, !_intents.empty() );

            WriteIntent last;
            for( vector<WriteIntent>::const_iterator i = _intents.begin(); i != _intents.end(); i++ ) { 
                if( i->start() < last.end() ) { 
                    // overlaps
                    last.absorb(*i);
                }
                else { 
                    // discontinuous
                    if( i != _intents.begin() )
                        prepBasicWrite_inlock(bb, &last, lastDbPath);
                    last = *i;
                }
            }
            prepBasicWrite_inlock(bb, &last, lastDbPath);
        }
Beispiel #4
0
        /** put the basic write operation into the buffer (bb) to be journaled */
        static void prepBasicWrite_inlock(AlignedBuilder&bb, const WriteIntent *i, RelativePath& lastDbPath) {
            size_t ofs = 1;
            MongoMMF *mmf = findMMF_inlock(i->start(), /*out*/ofs);

            if( unlikely(!mmf->willNeedRemap()) ) {
                // tag this mmf as needed a remap of its private view later.
                // usually it will already be dirty/already set, so we do the if above first
                // to avoid possibility of cpu cache line contention
                mmf->willNeedRemap() = true;
            }

            // since we have already looked up the mmf, we go ahead and remember the write view location
            // so we don't have to find the MongoMMF again later in WRITETODATAFILES()
            // 
            // this was for WRITETODATAFILES_Impl2 so commented out now
            //
            /*
            dassert( i->w_ptr == 0 );
            i->w_ptr = ((char*)mmf->view_write()) + ofs;
            */

            JEntry e;
            e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //dont write past end of file
            assert( ofs <= 0x80000000 );
            e.ofs = (unsigned) ofs;
            e.setFileNo( mmf->fileSuffixNo() );
            if( mmf->relativePath() == local ) {
                e.setLocalDbContextBit();
            }
            else if( mmf->relativePath() != lastDbPath ) {
                lastDbPath = mmf->relativePath();
                JDbContext c;
                bb.appendStruct(c);
                bb.appendStr(lastDbPath.toString());
            }
            bb.appendStruct(e);
#if defined(_EXPERIMENTAL)
            i->ofsInJournalBuffer = bb.len();
#endif
            bb.appendBuf(i->start(), e.len);

            if (unlikely(e.len != (unsigned)i->length())) {
                log() << "journal info splitting prepBasicWrite at boundary" << endl;

                // This only happens if we write to the last byte in a file and
                // the fist byte in another file that is mapped adjacently. I
                // think most OSs leave at least a one page gap between
                // mappings, but better to be safe.

                WriteIntent next ((char*)i->start() + e.len, i->length() - e.len);
                prepBasicWrite_inlock(bb, &next, lastDbPath);
            }
        }