static bool CheckCompactorDocumentCollection (TRI_document_collection_t* sim) {
  TRI_collection_t* base;
  TRI_datafile_t* compactor;
  bool worked;
  size_t i;
  size_t n;
  
  worked = false;
  base = &sim->base.base;

  // .............................................................................
  // the only thread MODIFYING the _compactor variable is this thread,
  // therefore no locking is required to access the _compactors
  // .............................................................................

  n = base->_compactors._length;

  for (i = 0;  i < n;) {
    compactor = base->_compactors._buffer[i];

    if (compactor->_full) {
      worked = true;

      LOG_DEBUG("closing full compactor '%s'", compactor->_filename);

      TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
      TRI_CloseCompactorPrimaryCollection(&sim->base, i);
      TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);

      n = base->_compactors._length;
      i = 0;
    }
    else {
      ++i;
    }
  }

  if (base->_compactors._length == 0) {
    TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);

    compactor = TRI_CreateCompactorPrimaryCollection(&sim->base);

    if (compactor != NULL) {
      worked = true;
      LOG_DEBUG("created new compactor '%s'", compactor->_filename);
    
      TRI_BROADCAST_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
    }
    else {
      // an error occurred when creating the compactor file
      LOG_ERROR("could not create compactor file");
    }

    TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
  }

  return worked;
}
Exemple #2
0
static void CompactifyDatafiles (TRI_document_collection_t* document, 
                                 TRI_vector_t const* compactions) {
  TRI_datafile_t* compactor;
  TRI_primary_collection_t* primary;
  compaction_initial_context_t initial;
  compaction_context_t context;
  size_t i, j, n;
  
  n = compactions->_length;
  assert(n > 0);

  initial = InitCompaction(document, compactions);

  if (initial._failed) {
    LOG_ERROR("could not create initialise compaction");

    return;
  }

  LOG_TRACE("compactify called for collection '%llu' for %d datafiles of total size %llu", 
            (unsigned long long) document->base.base._info._cid, 
            (int) n,
            (unsigned long long) initial._targetSize);

  // now create a new compactor file 
  // we are re-using the _fid of the first original datafile!
  compactor = CreateCompactor(document, initial._fid, initial._targetSize);
  
  if (compactor == NULL) {
    // some error occurred
    LOG_ERROR("could not create compactor file");

    return;
  }
    
  LOG_DEBUG("created new compactor file '%s'", compactor->getName(compactor));
 
  memset(&context._dfi, 0, sizeof(TRI_doc_datafile_info_t));
  // these attributes remain the same for all datafiles we collect 
  context._document  = document;
  context._compactor = compactor;
  context._dfi._fid  = compactor->_fid;
  
  // now compact all datafiles
  for (i = 0; i < n; ++i) {
    compaction_info_t* compaction;
    TRI_datafile_t* df;
    bool ok;
    
    compaction = TRI_AtVector(compactions, i);
    df = compaction->_datafile;
    
    LOG_DEBUG("compacting datafile '%s' into '%s', number: %d, keep deletions: %d", 
              df->getName(df), 
              compactor->getName(compactor),
              (int) i,
              (int) compaction->_keepDeletions);

    // if this is the first datafile in the list of datafiles, we can also collect
    // deletion markers
    context._keepDeletions = compaction->_keepDeletions;

    // run the actual compaction of a single datafile
    ok = TRI_IterateDatafile(df, Compactifier, &context, false, false);
  
    if (! ok) {
      LOG_WARNING("failed to compact datafile '%s'", df->getName(df));
      // compactor file does not need to be removed now. will be removed on next startup
      // TODO: Remove
      return;
    }
  } // next file
    
  
  // locate the compactor
  // must acquire a write-lock as we're about to change the datafiles vector 
  primary = &document->base;
  TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);

  if (! LocateDatafile(&primary->base._compactors, compactor->_fid, &j)) {
    // not found
    TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
 
    LOG_ERROR("logic error in CompactifyDatafiles: could not find compactor");
    return;
  }

  if (! TRI_CloseCompactorPrimaryCollection(primary, j)) {
    TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);

    LOG_ERROR("could not close compactor file");
    // TODO: how do we recover from this state?
    return;
  }
  
  TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);

  
  if (context._dfi._numberAlive == 0 &&
      context._dfi._numberDead == 0 &&
      context._dfi._numberDeletion == 0 &&
      context._dfi._numberTransaction == 0) {

    TRI_barrier_t* b;
   
    if (n > 1) {
      // create .dead files for all collected files 
      for (i = 0; i < n; ++i) {
        compaction_info_t* compaction;
        TRI_datafile_t* datafile;

        compaction = TRI_AtVector(compactions, i);
        datafile = compaction->_datafile;

        if (datafile->isPhysical(datafile)) {
          char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead");

          if (filename != NULL) {
            TRI_WriteFile(filename, "", 0);
            TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
          }
        }
      }
    }

    // compactor is fully empty. remove it
    RemoveCompactor(document, compactor);

    for (i = 0; i < n; ++i) {
      compaction_info_t* compaction;

      compaction = TRI_AtVector(compactions, i);
    
      // datafile is also empty after compaction and thus useless
      RemoveDatafile(document, compaction->_datafile);
  
      // add a deletion marker to the result set container
      b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary);

      if (b == NULL) {
        LOG_ERROR("out of memory when creating datafile-drop barrier");
      }
    }
  }
  else {
    if (n > 1) {
      // create .dead files for all collected files but the first 
      for (i = 1; i < n; ++i) {
        compaction_info_t* compaction;
        TRI_datafile_t* datafile;

        compaction = TRI_AtVector(compactions, i);
        datafile = compaction->_datafile;

        if (datafile->isPhysical(datafile)) {
          char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead");

          if (filename != NULL) {
            TRI_WriteFile(filename, "", 0);
            TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
          }
        }
      }
    }

    for (i = 0; i < n; ++i) {
      TRI_barrier_t* b;
      compaction_info_t* compaction;

      compaction = TRI_AtVector(compactions, i);

      if (i == 0) {
        // add a rename marker 
        void* copy;
     
        copy = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(compaction_context_t), false);

        memcpy(copy, &context, sizeof(compaction_context_t));
    
        b = TRI_CreateBarrierRenameDatafile(&primary->_barrierList, compaction->_datafile, RenameDatafileCallback, copy);
      
        if (b == NULL) {
          LOG_ERROR("out of memory when creating datafile-rename barrier");
          TRI_Free(TRI_CORE_MEM_ZONE, copy);
        }
      }
      else {
        // datafile is empty after compaction and thus useless
        RemoveDatafile(document, compaction->_datafile);

        // add a drop datafile marker
        b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary);

        if (b == NULL) {
          LOG_ERROR("out of memory when creating datafile-drop barrier");
        }
      }
    }
  }
}