コード例 #1
0
ファイル: collection.c プロジェクト: eranshir/ArangoDB
static bool IterateDatafilesVector (const TRI_vector_pointer_t* const files,
                                    bool (*iterator)(TRI_df_marker_t const*, void*, TRI_datafile_t*, bool),
                                    void* data) {
  size_t i, n;

  n = files->_length;
  for (i = 0;  i < n;  ++i) {
    TRI_datafile_t* datafile;
    int result;

    datafile = (TRI_datafile_t*) TRI_AtVectorPointer(files, i);
    result = TRI_IterateDatafile(datafile, iterator, data, false);

    if (! result) {
      return false;
    }
  }

  return true;
}
コード例 #2
0
ファイル: compactor.c プロジェクト: FikiHafana/ArangoDB
static compaction_initial_context_t InitCompaction (TRI_document_collection_t* document,
                                                    TRI_vector_t const* compactions) {
  compaction_initial_context_t context;
  size_t i, n;

  memset(&context, 0, sizeof(compaction_initial_context_t));
  context._failed = false;
  context._document = document;

  // this is the minimum required size
  context._targetSize = sizeof(TRI_df_header_marker_t) + 
                        sizeof(TRI_col_header_marker_t) + 
                        sizeof(TRI_df_footer_marker_t) + 
                        256; // allow for some overhead

  n = compactions->_length;
  for (i = 0; i < n; ++i) {
    TRI_datafile_t* df;
    compaction_info_t* compaction;
    bool ok;

    compaction = TRI_AtVector(compactions, i);
    df = compaction->_datafile;

    if (i == 0) {
      // extract and store fid
      context._fid = compaction->_datafile->_fid;
    }

    context._keepDeletions = compaction->_keepDeletions;
    
    ok = TRI_IterateDatafile(df, CalculateSize, &context, false, false);

    if (! ok) {
      context._failed = true;
      break;
    }
  }

  return context;
}
コード例 #3
0
ファイル: compactor.c プロジェクト: FikiHafana/ArangoDB
static void CompactifyDatafiles (TRI_document_collection_t* document, 
                                 TRI_vector_t const* compactions) {
  TRI_datafile_t* compactor;
  TRI_primary_collection_t* primary;
  compaction_initial_context_t initial;
  compaction_context_t context;
  size_t i, j, n;
  
  n = compactions->_length;
  assert(n > 0);

  initial = InitCompaction(document, compactions);

  if (initial._failed) {
    LOG_ERROR("could not create initialise compaction");

    return;
  }

  LOG_TRACE("compactify called for collection '%llu' for %d datafiles of total size %llu", 
            (unsigned long long) document->base.base._info._cid, 
            (int) n,
            (unsigned long long) initial._targetSize);

  // now create a new compactor file 
  // we are re-using the _fid of the first original datafile!
  compactor = CreateCompactor(document, initial._fid, initial._targetSize);
  
  if (compactor == NULL) {
    // some error occurred
    LOG_ERROR("could not create compactor file");

    return;
  }
    
  LOG_DEBUG("created new compactor file '%s'", compactor->getName(compactor));
 
  memset(&context._dfi, 0, sizeof(TRI_doc_datafile_info_t));
  // these attributes remain the same for all datafiles we collect 
  context._document  = document;
  context._compactor = compactor;
  context._dfi._fid  = compactor->_fid;
  
  // now compact all datafiles
  for (i = 0; i < n; ++i) {
    compaction_info_t* compaction;
    TRI_datafile_t* df;
    bool ok;
    
    compaction = TRI_AtVector(compactions, i);
    df = compaction->_datafile;
    
    LOG_DEBUG("compacting datafile '%s' into '%s', number: %d, keep deletions: %d", 
              df->getName(df), 
              compactor->getName(compactor),
              (int) i,
              (int) compaction->_keepDeletions);

    // if this is the first datafile in the list of datafiles, we can also collect
    // deletion markers
    context._keepDeletions = compaction->_keepDeletions;

    // run the actual compaction of a single datafile
    ok = TRI_IterateDatafile(df, Compactifier, &context, false, false);
  
    if (! ok) {
      LOG_WARNING("failed to compact datafile '%s'", df->getName(df));
      // compactor file does not need to be removed now. will be removed on next startup
      // TODO: Remove
      return;
    }
  } // next file
    
  
  // locate the compactor
  // must acquire a write-lock as we're about to change the datafiles vector 
  primary = &document->base;
  TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);

  if (! LocateDatafile(&primary->base._compactors, compactor->_fid, &j)) {
    // not found
    TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
 
    LOG_ERROR("logic error in CompactifyDatafiles: could not find compactor");
    return;
  }

  if (! TRI_CloseCompactorPrimaryCollection(primary, j)) {
    TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);

    LOG_ERROR("could not close compactor file");
    // TODO: how do we recover from this state?
    return;
  }
  
  TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);

  
  if (context._dfi._numberAlive == 0 &&
      context._dfi._numberDead == 0 &&
      context._dfi._numberDeletion == 0 &&
      context._dfi._numberTransaction == 0) {

    TRI_barrier_t* b;
   
    if (n > 1) {
      // create .dead files for all collected files 
      for (i = 0; i < n; ++i) {
        compaction_info_t* compaction;
        TRI_datafile_t* datafile;

        compaction = TRI_AtVector(compactions, i);
        datafile = compaction->_datafile;

        if (datafile->isPhysical(datafile)) {
          char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead");

          if (filename != NULL) {
            TRI_WriteFile(filename, "", 0);
            TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
          }
        }
      }
    }

    // compactor is fully empty. remove it
    RemoveCompactor(document, compactor);

    for (i = 0; i < n; ++i) {
      compaction_info_t* compaction;

      compaction = TRI_AtVector(compactions, i);
    
      // datafile is also empty after compaction and thus useless
      RemoveDatafile(document, compaction->_datafile);
  
      // add a deletion marker to the result set container
      b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary);

      if (b == NULL) {
        LOG_ERROR("out of memory when creating datafile-drop barrier");
      }
    }
  }
  else {
    if (n > 1) {
      // create .dead files for all collected files but the first 
      for (i = 1; i < n; ++i) {
        compaction_info_t* compaction;
        TRI_datafile_t* datafile;

        compaction = TRI_AtVector(compactions, i);
        datafile = compaction->_datafile;

        if (datafile->isPhysical(datafile)) {
          char* filename = TRI_Concatenate2String(datafile->getName(datafile), ".dead");

          if (filename != NULL) {
            TRI_WriteFile(filename, "", 0);
            TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
          }
        }
      }
    }

    for (i = 0; i < n; ++i) {
      TRI_barrier_t* b;
      compaction_info_t* compaction;

      compaction = TRI_AtVector(compactions, i);

      if (i == 0) {
        // add a rename marker 
        void* copy;
     
        copy = TRI_Allocate(TRI_CORE_MEM_ZONE, sizeof(compaction_context_t), false);

        memcpy(copy, &context, sizeof(compaction_context_t));
    
        b = TRI_CreateBarrierRenameDatafile(&primary->_barrierList, compaction->_datafile, RenameDatafileCallback, copy);
      
        if (b == NULL) {
          LOG_ERROR("out of memory when creating datafile-rename barrier");
          TRI_Free(TRI_CORE_MEM_ZONE, copy);
        }
      }
      else {
        // datafile is empty after compaction and thus useless
        RemoveDatafile(document, compaction->_datafile);

        // add a drop datafile marker
        b = TRI_CreateBarrierDropDatafile(&primary->_barrierList, compaction->_datafile, DropDatafileCallback, primary);

        if (b == NULL) {
          LOG_ERROR("out of memory when creating datafile-drop barrier");
        }
      }
    }
  }
}
コード例 #4
0
static void CompactifyDatafile (TRI_sim_collection_t* sim, TRI_voc_fid_t fid) {
  TRI_datafile_t* df;
  bool ok;
  size_t n;
  size_t i;

  // locate the datafile
  TRI_READ_LOCK_DATAFILES_SIM_COLLECTION(sim);

  n = sim->base.base._datafiles._length;

  for (i = 0;  i < n;  ++i) {
    df = sim->base.base._datafiles._buffer[i];

    if (df->_fid == fid) {
      break;
    }
  }

  TRI_READ_UNLOCK_DATAFILES_SIM_COLLECTION(sim);

  if (i == n) {
    return;
  }

  // now compactify the datafile
  LOG_DEBUG("starting to compactify datafile '%s'", df->_filename);

  ok = TRI_IterateDatafile(df, Compactifier, sim, false);

  if (! ok) {
    LOG_WARNING("failed to compactify the datafile '%s'", df->_filename);
    return;
  }

  // wait for the journals to sync
  WaitCompactSync(sim, df);

  // remove the datafile from the list of datafiles
  TRI_WRITE_LOCK_DATAFILES_SIM_COLLECTION(sim);

  n = sim->base.base._datafiles._length;

  for (i = 0;  i < n;  ++i) {
    df = sim->base.base._datafiles._buffer[i];

    if (df->_fid == fid) {
      TRI_RemoveVectorPointer(&sim->base.base._datafiles, i);
      break;
    }
  }

  TRI_WRITE_UNLOCK_DATAFILES_SIM_COLLECTION(sim);

  if (i == n) {
    LOG_WARNING("failed to locate the datafile '%lu'", (unsigned long) df->_fid);
    return;
  }

  // add a deletion marker to the result set container
  TRI_CreateBarrierDatafile(&sim->base._barrierList, df, RemoveDatafileCallback, &sim->base.base);
}