コード例 #1
0
ファイル: hotcopy.c プロジェクト: 2asoft/freebsd
/* Copy an un-packed revision or revprop file for revision REV from SRC_SUBDIR
 * to DST_SUBDIR. Assume a sharding layout based on MAX_FILES_PER_DIR.
 * Set *SKIPPED_P to FALSE only if the file was copied, do not change the
 * value in *SKIPPED_P otherwise. SKIPPED_P may be NULL if not required.
 * Use SCRATCH_POOL for temporary allocations. */
static svn_error_t *
hotcopy_copy_shard_file(svn_boolean_t *skipped_p,
                        const char *src_subdir,
                        const char *dst_subdir,
                        svn_revnum_t rev,
                        int max_files_per_dir,
                        apr_pool_t *scratch_pool)
{
  const char *src_subdir_shard = src_subdir,
             *dst_subdir_shard = dst_subdir;

  if (max_files_per_dir)
    {
      const char *shard = apr_psprintf(scratch_pool, "%ld",
                                       rev / max_files_per_dir);
      src_subdir_shard = svn_dirent_join(src_subdir, shard, scratch_pool);
      dst_subdir_shard = svn_dirent_join(dst_subdir, shard, scratch_pool);

      if (rev % max_files_per_dir == 0)
        {
          SVN_ERR(svn_io_make_dir_recursively(dst_subdir_shard, scratch_pool));
          SVN_ERR(svn_io_copy_perms(dst_subdir, dst_subdir_shard,
                                    scratch_pool));
        }
    }

  SVN_ERR(hotcopy_io_dir_file_copy(skipped_p,
                                   src_subdir_shard, dst_subdir_shard,
                                   apr_psprintf(scratch_pool, "%ld", rev),
                                   scratch_pool));

  return SVN_NO_ERROR;
}
コード例 #2
0
ファイル: rep-cache.c プロジェクト: kwitaszczyk/freebsd
/* Body of svn_fs_fs__open_rep_cache().
   Implements svn_atomic__init_once().init_func.
 */
static svn_error_t *
open_rep_cache(void *baton,
               apr_pool_t *pool)
{
  svn_fs_t *fs = baton;
  fs_fs_data_t *ffd = fs->fsap_data;
  svn_sqlite__db_t *sdb;
  const char *db_path;
  int version;

  /* Open (or create) the sqlite database.  It will be automatically
     closed when fs->pool is destroyed. */
  db_path = path_rep_cache_db(fs->path, pool);
#ifndef WIN32
  {
    /* We want to extend the permissions that apply to the repository
       as a whole when creating a new rep cache and not simply default
       to umask. */
    svn_boolean_t exists;

    SVN_ERR(svn_fs_fs__exists_rep_cache(&exists, fs, pool));
    if (!exists)
      {
        const char *current = svn_fs_fs__path_current(fs, pool);
        svn_error_t *err = svn_io_file_create_empty(db_path, pool);

        if (err && !APR_STATUS_IS_EEXIST(err->apr_err))
          /* A real error. */
          return svn_error_trace(err);
        else if (err)
          /* Some other thread/process created the file. */
          svn_error_clear(err);
        else
          /* We created the file. */
          SVN_ERR(svn_io_copy_perms(current, db_path, pool));
      }
  }
#endif
  SVN_ERR(svn_sqlite__open(&sdb, db_path,
                           svn_sqlite__mode_rwcreate, statements,
                           0, NULL, 0,
                           fs->pool, pool));

  SVN_ERR_CLOSE(svn_sqlite__read_schema_version(&version, sdb, pool), sdb);
  if (version < REP_CACHE_SCHEMA_FORMAT)
    {
      /* Must be 0 -- an uninitialized (no schema) database. Create
         the schema. Results in schema version of 1.  */
      SVN_ERR_CLOSE(svn_sqlite__exec_statements(sdb, STMT_CREATE_SCHEMA), sdb);
    }

  /* This is used as a flag that the database is available so don't
     set it earlier. */
  ffd->rep_cache_db = sdb;

  return SVN_NO_ERROR;
}
コード例 #3
0
ファイル: sqlite.c プロジェクト: 2trill2spill/freebsd
svn_error_t *
svn_sqlite__hotcopy(const char *src_path,
                    const char *dst_path,
                    apr_pool_t *scratch_pool)
{
  svn_sqlite__db_t *src_db;

  SVN_ERR(svn_sqlite__open(&src_db, src_path, svn_sqlite__mode_readonly,
                           NULL, 0, NULL, 0,
                           scratch_pool, scratch_pool));

  {
    svn_sqlite__db_t *dst_db;
    sqlite3_backup *backup;
    int rc1, rc2;

    SVN_ERR(svn_sqlite__open(&dst_db, dst_path, svn_sqlite__mode_rwcreate,
                             NULL, 0, NULL, 0, scratch_pool, scratch_pool));
    backup = sqlite3_backup_init(dst_db->db3, "main", src_db->db3, "main");
    if (!backup)
      return svn_error_createf(SVN_ERR_SQLITE_ERROR, NULL,
                               _("SQLite hotcopy failed for %s"), src_path);
    do
      {
        /* Pages are usually 1024 byte (SQLite docs). On my laptop
           copying gets faster as the number of pages is increased up
           to about 64, beyond that speed levels off.  Lets put the
           number of pages an order of magnitude higher, this is still
           likely to be a fraction of large databases. */
        rc1 = sqlite3_backup_step(backup, 1024);

        /* Should we sleep on SQLITE_OK?  That would make copying a
           large database take much longer.  When we do sleep how,
           long should we sleep?  Should the sleep get longer if we
           keep getting BUSY/LOCKED?  I have no real reason for
           choosing 25. */
        if (rc1 == SQLITE_BUSY || rc1 == SQLITE_LOCKED)
          sqlite3_sleep(25);
      }
    while (rc1 == SQLITE_OK || rc1 == SQLITE_BUSY || rc1 == SQLITE_LOCKED);
    rc2 = sqlite3_backup_finish(backup);
    if (rc1 != SQLITE_DONE)
      SQLITE_ERR(rc1, dst_db);
    SQLITE_ERR(rc2, dst_db);
    SVN_ERR(svn_sqlite__close(dst_db));
  }

  SVN_ERR(svn_sqlite__close(src_db));

  SVN_ERR(svn_io_copy_perms(src_path, dst_path, scratch_pool));

  return SVN_NO_ERROR;
}
コード例 #4
0
/* Write to DIGEST_PATH a representation of CHILDREN (which may be
   empty, if the versioned path in FS represented by DIGEST_PATH has
   no children) and LOCK (which may be NULL if that versioned path is
   lock itself locked).  Set the permissions of DIGEST_PATH to those of
   PERMS_REFERENCE.  Use POOL for all allocations.
 */
static svn_error_t *
write_digest_file(apr_hash_t *children,
                  svn_lock_t *lock,
                  const char *fs_path,
                  const char *digest_path,
                  const char *perms_reference,
                  apr_pool_t *pool)
{
  svn_error_t *err = SVN_NO_ERROR;
  svn_stream_t *stream;
  apr_hash_index_t *hi;
  apr_hash_t *hash = apr_hash_make(pool);
  const char *tmp_path;

  SVN_ERR(svn_fs_fs__ensure_dir_exists(svn_dirent_join(fs_path, PATH_LOCKS_DIR,
                                                       pool), fs_path, pool));
  SVN_ERR(svn_fs_fs__ensure_dir_exists(svn_dirent_dirname(digest_path, pool),
                                       fs_path, pool));

  if (lock)
    {
      const char *creation_date = NULL, *expiration_date = NULL;
      if (lock->creation_date)
        creation_date = svn_time_to_cstring(lock->creation_date, pool);
      if (lock->expiration_date)
        expiration_date = svn_time_to_cstring(lock->expiration_date, pool);
      hash_store(hash, PATH_KEY, sizeof(PATH_KEY)-1,
                 lock->path, APR_HASH_KEY_STRING, pool);
      hash_store(hash, TOKEN_KEY, sizeof(TOKEN_KEY)-1,
                 lock->token, APR_HASH_KEY_STRING, pool);
      hash_store(hash, OWNER_KEY, sizeof(OWNER_KEY)-1,
                 lock->owner, APR_HASH_KEY_STRING, pool);
      hash_store(hash, COMMENT_KEY, sizeof(COMMENT_KEY)-1,
                 lock->comment, APR_HASH_KEY_STRING, pool);
      hash_store(hash, IS_DAV_COMMENT_KEY, sizeof(IS_DAV_COMMENT_KEY)-1,
                 lock->is_dav_comment ? "1" : "0", 1, pool);
      hash_store(hash, CREATION_DATE_KEY, sizeof(CREATION_DATE_KEY)-1,
                 creation_date, APR_HASH_KEY_STRING, pool);
      hash_store(hash, EXPIRATION_DATE_KEY, sizeof(EXPIRATION_DATE_KEY)-1,
                 expiration_date, APR_HASH_KEY_STRING, pool);
    }
  if (apr_hash_count(children))
    {
      svn_stringbuf_t *children_list = svn_stringbuf_create("", pool);
      for (hi = apr_hash_first(pool, children); hi; hi = apr_hash_next(hi))
        {
          svn_stringbuf_appendbytes(children_list,
                                    svn__apr_hash_index_key(hi),
                                    svn__apr_hash_index_klen(hi));
          svn_stringbuf_appendbyte(children_list, '\n');
        }
      hash_store(hash, CHILDREN_KEY, sizeof(CHILDREN_KEY)-1,
                 children_list->data, children_list->len, pool);
    }

  SVN_ERR(svn_stream_open_unique(&stream, &tmp_path,
                                 svn_dirent_dirname(digest_path, pool),
                                 svn_io_file_del_none, pool, pool));
  if ((err = svn_hash_write2(hash, stream, SVN_HASH_TERMINATOR, pool)))
    {
      svn_error_clear(svn_stream_close(stream));
      return svn_error_createf(err->apr_err,
                               err,
                               _("Cannot write lock/entries hashfile '%s'"),
                               svn_dirent_local_style(tmp_path, pool));
    }

  SVN_ERR(svn_stream_close(stream));
  SVN_ERR(svn_io_file_rename(tmp_path, digest_path, pool));
  SVN_ERR(svn_io_copy_perms(perms_reference, digest_path, pool));
  return SVN_NO_ERROR;
}