コード例 #1
0
ファイル: cleanup.c プロジェクト: dtrebbien/subversion
/* Do a modifed check for LOCAL_ABSPATH, and all working children, to force
   timestamp repair. */
static svn_error_t *
repair_timestamps(svn_wc__db_t *db,
                  const char *local_abspath,
                  svn_cancel_func_t cancel_func,
                  void *cancel_baton,
                  apr_pool_t *scratch_pool)
{
  svn_kind_t kind;
  svn_wc__db_status_t status;

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  SVN_ERR(svn_wc__db_read_info(&status, &kind,
                               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
                               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
                               NULL, NULL, NULL, NULL, NULL, NULL,
                               NULL, NULL, NULL,
                               db, local_abspath, scratch_pool, scratch_pool));

  if (status == svn_wc__db_status_server_excluded
      || status == svn_wc__db_status_deleted
      || status == svn_wc__db_status_excluded
      || status == svn_wc__db_status_not_present)
    return SVN_NO_ERROR;

  if (kind == svn_kind_file
      || kind == svn_kind_symlink)
    {
      svn_boolean_t modified;
      SVN_ERR(svn_wc__internal_file_modified_p(&modified,
                                               db, local_abspath, FALSE,
                                               scratch_pool));
    }
  else if (kind == svn_kind_dir)
    {
      apr_pool_t *iterpool = svn_pool_create(scratch_pool);
      const apr_array_header_t *children;
      int i;

      SVN_ERR(svn_wc__db_read_children_of_working_node(&children, db,
                                                       local_abspath,
                                                       scratch_pool,
                                                       iterpool));
      for (i = 0; i < children->nelts; ++i)
        {
          const char *child_abspath;

          svn_pool_clear(iterpool);

          child_abspath = svn_dirent_join(local_abspath,
                                          APR_ARRAY_IDX(children, i,
                                                        const char *),
                                          iterpool);

          SVN_ERR(repair_timestamps(db, child_abspath,
                                    cancel_func, cancel_baton, iterpool));
        }
      svn_pool_destroy(iterpool);
    }
コード例 #2
0
ファイル: fs.c プロジェクト: FreeBSDFoundation/freebsd
/* This implements the fs_library_vtable_t.hotcopy() API.  Copy a
   possibly live Subversion filesystem SRC_FS from SRC_PATH to a
   DST_FS at DEST_PATH. If INCREMENTAL is TRUE, make an effort not to
   re-copy data which already exists in DST_FS.
   The CLEAN_LOGS argument is ignored and included for Subversion
   1.0.x compatibility.  The NOTIFY_FUNC and NOTIFY_BATON arguments
   are also currently ignored.
   Perform all temporary allocations in SCRATCH_POOL. */
static svn_error_t *
x_hotcopy(svn_fs_t *src_fs,
          svn_fs_t *dst_fs,
          const char *src_path,
          const char *dst_path,
          svn_boolean_t clean_logs,
          svn_boolean_t incremental,
          svn_fs_hotcopy_notify_t notify_func,
          void *notify_baton,
          svn_cancel_func_t cancel_func,
          void *cancel_baton,
          svn_mutex__t *common_pool_lock,
          apr_pool_t *scratch_pool,
          apr_pool_t *common_pool)
{
  /* Open the source repo as usual. */
  SVN_ERR(x_open(src_fs, src_path, common_pool_lock, scratch_pool,
                 common_pool));
  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  SVN_ERR(svn_fs__check_fs(dst_fs, FALSE));
  SVN_ERR(initialize_fs_struct(dst_fs));

  /* In INCREMENTAL mode, svn_fs_x__hotcopy() will open DST_FS.
     Otherwise, it's not an FS yet --- possibly just an empty dir --- so
     can't be opened.
   */
  return svn_fs_x__hotcopy(src_fs, dst_fs, src_path, dst_path,
                            incremental, notify_func, notify_baton,
                            cancel_func, cancel_baton, common_pool_lock,
                            scratch_pool, common_pool);
}
コード例 #3
0
ファイル: fs.c プロジェクト: 2trill2spill/freebsd
/* This implements the fs_library_vtable_t.hotcopy() API.  Copy a
   possibly live Subversion filesystem SRC_FS from SRC_PATH to a
   DST_FS at DEST_PATH. If INCREMENTAL is TRUE, make an effort not to
   re-copy data which already exists in DST_FS.
   The CLEAN_LOGS argument is ignored and included for Subversion
   1.0.x compatibility.  Indicate progress via the optional NOTIFY_FUNC
   callback using NOTIFY_BATON.  Perform all temporary allocations in POOL. */
static svn_error_t *
fs_hotcopy(svn_fs_t *src_fs,
           svn_fs_t *dst_fs,
           const char *src_path,
           const char *dst_path,
           svn_boolean_t clean_logs,
           svn_boolean_t incremental,
           svn_fs_hotcopy_notify_t notify_func,
           void *notify_baton,
           svn_cancel_func_t cancel_func,
           void *cancel_baton,
           svn_mutex__t *common_pool_lock,
           apr_pool_t *pool,
           apr_pool_t *common_pool)
{
  /* Open the source repo as usual. */
  SVN_ERR(fs_open(src_fs, src_path, common_pool_lock, pool, common_pool));
  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  /* Test target repo when in INCREMENTAL mode, initialize it when not.
   * For this, we need our FS internal data structures to be temporarily
   * available. */
  SVN_ERR(initialize_fs_struct(dst_fs));
  SVN_ERR(svn_fs_fs__hotcopy_prepare_target(src_fs, dst_fs, dst_path,
                                            incremental, pool));
  uninitialize_fs_struct(dst_fs);

  /* Now, the destination repo should open just fine. */
  SVN_ERR(fs_open(dst_fs, dst_path, common_pool_lock, pool, common_pool));
  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  /* Now, we may copy data as needed ... */
  return svn_fs_fs__hotcopy(src_fs, dst_fs, incremental,
                            notify_func, notify_baton,
                            cancel_func, cancel_baton, pool);
}
コード例 #4
0
/* Copies the data from ORIGINAL_STREAM to a temporary file, returning both
   the original and compressed size. */
static svn_error_t *
create_compressed(apr_file_t **result,
                  svn_filesize_t *full_size,
                  svn_filesize_t *compressed_size,
                  svn_stream_t *original_stream,
                  svn_cancel_func_t cancel_func,
                  void *cancel_baton,
                  apr_pool_t *result_pool,
                  apr_pool_t *scratch_pool)
{
  svn_stream_t *compressed;
  svn_filesize_t bytes_read = 0;
  apr_size_t rd;

  SVN_ERR(svn_io_open_uniquely_named(result, NULL, NULL, "diffgz",
                                     NULL, svn_io_file_del_on_pool_cleanup,
                                     result_pool, scratch_pool));

  compressed = svn_stream_compressed(
                  svn_stream_from_aprfile2(*result, TRUE, scratch_pool),
                  scratch_pool);

  if (original_stream)
    do
    {
      char buffer[SVN__STREAM_CHUNK_SIZE];
      rd = sizeof(buffer);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      SVN_ERR(svn_stream_read_full(original_stream, buffer, &rd));

      bytes_read += rd;
      SVN_ERR(svn_stream_write(compressed, buffer, &rd));
    }
    while(rd == SVN__STREAM_CHUNK_SIZE);
  else
    {
      apr_size_t zero = 0;
      SVN_ERR(svn_stream_write(compressed, NULL, &zero));
    }

  SVN_ERR(svn_stream_close(compressed)); /* Flush compression */

  *full_size = bytes_read;
  SVN_ERR(svn_io_file_size_get(compressed_size, *result, scratch_pool));

  return SVN_NO_ERROR;
}
コード例 #5
0
svn_error_t *
svn_txdelta_run(svn_stream_t *source,
                svn_stream_t *target,
                svn_txdelta_window_handler_t handler,
                void *handler_baton,
                svn_checksum_kind_t checksum_kind,
                svn_checksum_t **checksum,
                svn_cancel_func_t cancel_func,
                void *cancel_baton,
                apr_pool_t *result_pool,
                apr_pool_t *scratch_pool)
{
  apr_pool_t *iterpool = svn_pool_create(scratch_pool);
  struct txdelta_baton tb = { 0 };
  svn_txdelta_window_t *window;

  tb.source = source;
  tb.target = target;
  tb.more_source = TRUE;
  tb.more = TRUE;
  tb.pos = 0;
  tb.buf = apr_palloc(scratch_pool, 2 * SVN_DELTA_WINDOW_SIZE);
  tb.result_pool = result_pool;

  if (checksum != NULL)
    tb.context = svn_checksum_ctx_create(checksum_kind, scratch_pool);

  do
    {
      /* free the window (if any) */
      svn_pool_clear(iterpool);

      /* read in a single delta window */
      SVN_ERR(txdelta_next_window(&window, &tb, iterpool));

      /* shove it at the handler */
      SVN_ERR((*handler)(window, handler_baton));

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));
    }
  while (window != NULL);

  svn_pool_destroy(iterpool);

  if (checksum != NULL)
    *checksum = tb.checksum;  /* should be there! */

  return SVN_NO_ERROR;
}
コード例 #6
0
ファイル: hotcopy.c プロジェクト: 2asoft/freebsd
/* Shortcut for the revision and revprop copying for old (1 or 2) format
 * filesystems without sharding and packing.  Copy the non-sharded revision
 * and revprop files from SRC_FS to DST_FS.  Do not re-copy data which
 * already exists in DST_FS.  Do not somehow checkpoint the results in
 * the 'current' file in DST_FS.  Indicate progress via the optional
 * NOTIFY_FUNC callback using NOTIFY_BATON.  Use POOL for temporary
 * allocations.  Also see hotcopy_revisions().
 */
static svn_error_t *
hotcopy_revisions_old(svn_fs_t *src_fs,
                      svn_fs_t *dst_fs,
                      svn_revnum_t src_youngest,
                      const char *src_revs_dir,
                      const char *dst_revs_dir,
                      const char *src_revprops_dir,
                      const char *dst_revprops_dir,
                      svn_fs_hotcopy_notify_t notify_func,
                      void* notify_baton,
                      svn_cancel_func_t cancel_func,
                      void* cancel_baton,
                      apr_pool_t *pool)
{
  apr_pool_t *iterpool = svn_pool_create(pool);
  svn_revnum_t rev;

  for (rev = 0; rev <= src_youngest; rev++)
    {
      svn_boolean_t skipped = TRUE;

      svn_pool_clear(iterpool);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      SVN_ERR(hotcopy_io_dir_file_copy(&skipped, src_revs_dir, dst_revs_dir,
                                       apr_psprintf(iterpool, "%ld", rev),
                                       iterpool));
      SVN_ERR(hotcopy_io_dir_file_copy(&skipped, src_revprops_dir,
                                       dst_revprops_dir,
                                       apr_psprintf(iterpool, "%ld", rev),
                                       iterpool));

      if (notify_func && !skipped)
        notify_func(notify_baton, rev, rev, iterpool);
    }
    svn_pool_destroy(iterpool);

    return SVN_NO_ERROR;
}
コード例 #7
0
svn_error_t *svn_stream_copy3(svn_stream_t *from, svn_stream_t *to,
                              svn_cancel_func_t cancel_func,
                              void *cancel_baton,
                              apr_pool_t *scratch_pool)
{
  char *buf = apr_palloc(scratch_pool, SVN__STREAM_CHUNK_SIZE);
  svn_error_t *err;
  svn_error_t *err2;

  /* Read and write chunks until we get a short read, indicating the
     end of the stream.  (We can't get a short write without an
     associated error.) */
  while (1)
    {
      apr_size_t len = SVN__STREAM_CHUNK_SIZE;

      if (cancel_func)
        {
          err = cancel_func(cancel_baton);
          if (err)
             break;
        }

      err = svn_stream_read(from, buf, &len);
      if (err)
         break;

      if (len > 0)
        err = svn_stream_write(to, buf, &len);

      if (err || (len != SVN__STREAM_CHUNK_SIZE))
          break;
    }

  err2 = svn_error_compose_create(svn_stream_close(from),
                                  svn_stream_close(to));

  return svn_error_compose_create(err, err2);
}
コード例 #8
0
/* Writes out a git-like literal output of the compressed data in
   COMPRESSED_DATA to OUTPUT_STREAM, describing that its normal length is
   UNCOMPRESSED_SIZE. */
static svn_error_t *
write_literal(svn_filesize_t uncompressed_size,
              svn_stream_t *compressed_data,
              svn_stream_t *output_stream,
              svn_cancel_func_t cancel_func,
              void *cancel_baton,
              apr_pool_t *scratch_pool)
{
  apr_size_t rd;
  SVN_ERR(svn_stream_seek(compressed_data, NULL)); /* Seek to start */

  SVN_ERR(svn_stream_printf(output_stream, scratch_pool,
                            "literal %" SVN_FILESIZE_T_FMT APR_EOL_STR,
                            uncompressed_size));

  do
    {
      char chunk[GIT_BASE85_CHUNKSIZE];
      const unsigned char *next;
      apr_size_t left;

      rd = sizeof(chunk);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      SVN_ERR(svn_stream_read_full(compressed_data, chunk, &rd));

      {
        apr_size_t one = 1;
        SVN_ERR(svn_stream_write(output_stream, &b85lenstr[rd-1], &one));
      }

      left = rd;
      next = (void*)chunk;
      while (left)
      {
        char five[5];
        unsigned info = 0;
        int n;
        apr_size_t five_sz;

        /* Push 4 bytes into the 32 bit info, when available */
        for (n = 24; n >= 0 && left; n -= 8, next++, left--)
        {
            info |= (*next) << n;
        }

        /* Write out info as base85 */
        for (n = 4; n >= 0; n--)
        {
            five[n] = b85str[info % 85];
            info /= 85;
        }

        five_sz = 5;
        SVN_ERR(svn_stream_write(output_stream, five, &five_sz));
      }

      SVN_ERR(svn_stream_puts(output_stream, APR_EOL_STR));
    }
  while (rd == GIT_BASE85_CHUNKSIZE);

  return SVN_NO_ERROR;
}
コード例 #9
0
svn_error_t *
svn_fs_fs__walk_rep_reference(svn_fs_t *fs,
                              svn_error_t *(*walker)(representation_t *,
                                                     void *,
                                                     svn_fs_t *, 
                                                     apr_pool_t *),
                              void *walker_baton,
                              svn_cancel_func_t cancel_func,
                              void *cancel_baton,
                              apr_pool_t *pool)
{
  fs_fs_data_t *ffd = fs->fsap_data;
  svn_sqlite__stmt_t *stmt;
  svn_boolean_t have_row;
  int iterations = 0;

  apr_pool_t *iterpool = svn_pool_create(pool);

  /* Don't check ffd->rep_sharing_allowed. */
  SVN_ERR_ASSERT(ffd->format >= SVN_FS_FS__MIN_REP_SHARING_FORMAT);

  if (! ffd->rep_cache_db)
    SVN_ERR(svn_fs_fs__open_rep_cache(fs, pool));

  /* Get the statement. (There are no arguments to bind.) */
  SVN_ERR(svn_sqlite__get_statement(&stmt, ffd->rep_cache_db,
                                    STMT_GET_ALL_REPS));

  /* Walk the cache entries. */
  SVN_ERR(svn_sqlite__step(&have_row, stmt));
  while (have_row)
    {
      representation_t *rep;
      const char *sha1_digest;
      
      /* Clear ITERPOOL occasionally. */
      if (iterations++ % 16 == 0)
        svn_pool_clear(iterpool);

      /* Check for cancellation. */
      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      /* Construct a representation_t. */
      rep = apr_pcalloc(iterpool, sizeof(*rep));
      sha1_digest = svn_sqlite__column_text(stmt, 0, iterpool);
      SVN_ERR(svn_checksum_parse_hex(&rep->sha1_checksum,
                                     svn_checksum_sha1, sha1_digest,
                                     iterpool));
      rep->revision = svn_sqlite__column_revnum(stmt, 1);
      rep->offset = svn_sqlite__column_int64(stmt, 2);
      rep->size = svn_sqlite__column_int64(stmt, 3);
      rep->expanded_size = svn_sqlite__column_int64(stmt, 4);

      /* Sanity check. */
      if (rep)
        SVN_ERR(rep_has_been_born(rep, fs, iterpool));

      /* Walk. */
      SVN_ERR(walker(rep, walker_baton, fs, iterpool));

      SVN_ERR(svn_sqlite__step(&have_row, stmt));
    }

  SVN_ERR(svn_sqlite__reset(stmt));
  svn_pool_destroy(iterpool);

  return SVN_NO_ERROR;
}
コード例 #10
0
ファイル: dump.c プロジェクト: mommel/alien-svn
/* The main dumper. */
svn_error_t *
svn_repos_dump_fs3(svn_repos_t *repos,
                   svn_stream_t *stream,
                   svn_revnum_t start_rev,
                   svn_revnum_t end_rev,
                   svn_boolean_t incremental,
                   svn_boolean_t use_deltas,
                   svn_repos_notify_func_t notify_func,
                   void *notify_baton,
                   svn_cancel_func_t cancel_func,
                   void *cancel_baton,
                   apr_pool_t *pool)
{
  const svn_delta_editor_t *dump_editor;
  void *dump_edit_baton = NULL;
  svn_revnum_t i;
  svn_fs_t *fs = svn_repos_fs(repos);
  apr_pool_t *subpool = svn_pool_create(pool);
  svn_revnum_t youngest;
  const char *uuid;
  int version;
  svn_boolean_t found_old_reference = FALSE;
  svn_boolean_t found_old_mergeinfo = FALSE;
  svn_repos_notify_t *notify;

  /* Determine the current youngest revision of the filesystem. */
  SVN_ERR(svn_fs_youngest_rev(&youngest, fs, pool));

  /* Use default vals if necessary. */
  if (! SVN_IS_VALID_REVNUM(start_rev))
    start_rev = 0;
  if (! SVN_IS_VALID_REVNUM(end_rev))
    end_rev = youngest;
  if (! stream)
    stream = svn_stream_empty(pool);

  /* Validate the revisions. */
  if (start_rev > end_rev)
    return svn_error_createf(SVN_ERR_REPOS_BAD_ARGS, NULL,
                             _("Start revision %ld"
                               " is greater than end revision %ld"),
                             start_rev, end_rev);
  if (end_rev > youngest)
    return svn_error_createf(SVN_ERR_REPOS_BAD_ARGS, NULL,
                             _("End revision %ld is invalid "
                               "(youngest revision is %ld)"),
                             end_rev, youngest);
  if ((start_rev == 0) && incremental)
    incremental = FALSE; /* revision 0 looks the same regardless of
                            whether or not this is an incremental
                            dump, so just simplify things. */

  /* Write out the UUID. */
  SVN_ERR(svn_fs_get_uuid(fs, &uuid, pool));

  /* If we're not using deltas, use the previous version, for
     compatibility with svn 1.0.x. */
  version = SVN_REPOS_DUMPFILE_FORMAT_VERSION;
  if (!use_deltas)
    version--;

  /* Write out "general" metadata for the dumpfile.  In this case, a
     magic header followed by a dumpfile format version. */
  SVN_ERR(svn_stream_printf(stream, pool,
                            SVN_REPOS_DUMPFILE_MAGIC_HEADER ": %d\n\n",
                            version));
  SVN_ERR(svn_stream_printf(stream, pool, SVN_REPOS_DUMPFILE_UUID
                            ": %s\n\n", uuid));

  /* Create a notify object that we can reuse in the loop. */
  if (notify_func)
    notify = svn_repos_notify_create(svn_repos_notify_dump_rev_end,
                                     pool);

  /* Main loop:  we're going to dump revision i.  */
  for (i = start_rev; i <= end_rev; i++)
    {
      svn_revnum_t from_rev, to_rev;
      svn_fs_root_t *to_root;
      svn_boolean_t use_deltas_for_rev;

      svn_pool_clear(subpool);

      /* Check for cancellation. */
      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      /* Special-case the initial revision dump: it needs to contain
         *all* nodes, because it's the foundation of all future
         revisions in the dumpfile. */
      if ((i == start_rev) && (! incremental))
        {
          /* Special-special-case a dump of revision 0. */
          if (i == 0)
            {
              /* Just write out the one revision 0 record and move on.
                 The parser might want to use its properties. */
              SVN_ERR(write_revision_record(stream, fs, 0, subpool));
              to_rev = 0;
              goto loop_end;
            }

          /* Compare START_REV to revision 0, so that everything
             appears to be added.  */
          from_rev = 0;
          to_rev = i;
        }
      else
        {
          /* In the normal case, we want to compare consecutive revs. */
          from_rev = i - 1;
          to_rev = i;
        }

      /* Write the revision record. */
      SVN_ERR(write_revision_record(stream, fs, to_rev, subpool));

      /* Fetch the editor which dumps nodes to a file.  Regardless of
         what we've been told, don't use deltas for the first rev of a
         non-incremental dump. */
      use_deltas_for_rev = use_deltas && (incremental || i != start_rev);
      SVN_ERR(get_dump_editor(&dump_editor, &dump_edit_baton, fs, to_rev,
                              "", stream, notify_func, notify_baton,
                              start_rev, use_deltas_for_rev, FALSE, subpool));

      /* Drive the editor in one way or another. */
      SVN_ERR(svn_fs_revision_root(&to_root, fs, to_rev, subpool));

      /* If this is the first revision of a non-incremental dump,
         we're in for a full tree dump.  Otherwise, we want to simply
         replay the revision.  */
      if ((i == start_rev) && (! incremental))
        {
          svn_fs_root_t *from_root;
          SVN_ERR(svn_fs_revision_root(&from_root, fs, from_rev, subpool));
          SVN_ERR(svn_repos_dir_delta2(from_root, "", "",
                                       to_root, "",
                                       dump_editor, dump_edit_baton,
                                       NULL,
                                       NULL,
                                       FALSE, /* don't send text-deltas */
                                       svn_depth_infinity,
                                       FALSE, /* don't send entry props */
                                       FALSE, /* don't ignore ancestry */
                                       subpool));
        }
      else
        {
          SVN_ERR(svn_repos_replay2(to_root, "", SVN_INVALID_REVNUM, FALSE,
                                    dump_editor, dump_edit_baton,
                                    NULL, NULL, subpool));
        }

    loop_end:
      if (notify_func)
        {
          notify->revision = to_rev;
          notify_func(notify_baton, notify, subpool);
        }

      if (dump_edit_baton) /* We never get an edit baton for r0. */
        {
          if (((struct edit_baton *)dump_edit_baton)->found_old_reference)
            found_old_reference = TRUE;
          if (((struct edit_baton *)dump_edit_baton)->found_old_mergeinfo)
            found_old_mergeinfo = TRUE;
        }
    }

  if (notify_func)
    {
      /* Did we issue any warnings about references to revisions older than
         the oldest dumped revision?  If so, then issue a final generic
         warning, since the inline warnings already issued might easily be
         missed. */

      notify = svn_repos_notify_create(svn_repos_notify_dump_end, subpool);
      notify_func(notify_baton, notify, subpool);

      if (found_old_reference)
        {
          notify = svn_repos_notify_create(svn_repos_notify_warning, subpool);

          notify->warning = svn_repos_notify_warning_found_old_reference;
          notify->warning_str = _("The range of revisions dumped "
                                  "contained references to "
                                  "copy sources outside that "
                                  "range.");
          notify_func(notify_baton, notify, subpool);
        }

      /* Ditto if we issued any warnings about old revisions referenced
         in dumped mergeinfo. */
      if (found_old_mergeinfo)
        {
          notify = svn_repos_notify_create(svn_repos_notify_warning, subpool);

          notify->warning = svn_repos_notify_warning_found_old_mergeinfo;
          notify->warning_str = _("The range of revisions dumped "
                                  "contained mergeinfo "
                                  "which reference revisions outside "
                                  "that range.");
          notify_func(notify_baton, notify, subpool);
        }
    }

  svn_pool_destroy(subpool);

  return SVN_NO_ERROR;
}
コード例 #11
0
/* ### TODO: Handle depth. */
static svn_error_t *
diff_deleted_dir(const char *dir,
                 svn_revnum_t revision,
                 svn_ra_session_t *ra_session,
                 void *edit_baton,
                 svn_cancel_func_t cancel_func,
                 void *cancel_baton,
                 apr_pool_t *pool)
{
  struct edit_baton *eb = edit_baton;
  apr_hash_t *dirents;
  apr_pool_t *iterpool = svn_pool_create(pool);
  apr_hash_index_t *hi;

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  SVN_ERR(svn_ra_get_dir2(ra_session,
                          &dirents,
                          NULL, NULL,
                          dir,
                          revision,
                          SVN_DIRENT_KIND,
                          pool));

  for (hi = apr_hash_first(pool, dirents); hi;
       hi = apr_hash_next(hi))
    {
      const char *path;
      const char *name = svn__apr_hash_index_key(hi);
      svn_dirent_t *dirent = svn__apr_hash_index_val(hi);
      svn_node_kind_t kind;
      svn_client_diff_summarize_t *sum;

      svn_pool_clear(iterpool);

      path = svn_relpath_join(dir, name, iterpool);

      SVN_ERR(svn_ra_check_path(eb->ra_session,
                                path,
                                eb->revision,
                                &kind,
                                iterpool));

      sum = apr_pcalloc(iterpool, sizeof(*sum));
      sum->summarize_kind = svn_client_diff_summarize_kind_deleted;
      sum->path = path;
      sum->node_kind = kind;

      SVN_ERR(eb->summarize_func(sum,
                                 eb->summarize_func_baton,
                                 iterpool));

      if (dirent->kind == svn_node_dir)
        SVN_ERR(diff_deleted_dir(path,
                                 revision,
                                 ra_session,
                                 eb,
                                 cancel_func,
                                 cancel_baton,
                                 iterpool));
    }

  svn_pool_destroy(iterpool);
  return SVN_NO_ERROR;
}
コード例 #12
0
ファイル: hotcopy.c プロジェクト: 2asoft/freebsd
/* Perform a hotcopy, either normal or incremental.
 *
 * Normal hotcopy assumes that the destination exists as an empty
 * directory. It behaves like an incremental hotcopy except that
 * none of the copied files already exist in the destination.
 *
 * An incremental hotcopy copies only changed or new files to the destination,
 * and removes files from the destination no longer present in the source.
 * While the incremental hotcopy is running, readers should still be able
 * to access the destintation repository without error and should not see
 * revisions currently in progress of being copied. Readers are able to see
 * new fully copied revisions even if the entire incremental hotcopy procedure
 * has not yet completed.
 *
 * Writers are blocked out completely during the entire incremental hotcopy
 * process to ensure consistency. This function assumes that the repository
 * write-lock is held.
 */
static svn_error_t *
hotcopy_body(void *baton, apr_pool_t *pool)
{
  struct hotcopy_body_baton *hbb = baton;
  svn_fs_t *src_fs = hbb->src_fs;
  fs_fs_data_t *src_ffd = src_fs->fsap_data;
  svn_fs_t *dst_fs = hbb->dst_fs;
  fs_fs_data_t *dst_ffd = dst_fs->fsap_data;
  svn_boolean_t incremental = hbb->incremental;
  svn_fs_hotcopy_notify_t notify_func = hbb->notify_func;
  void* notify_baton = hbb->notify_baton;
  svn_cancel_func_t cancel_func = hbb->cancel_func;
  void* cancel_baton = hbb->cancel_baton;
  svn_revnum_t src_youngest;
  apr_uint64_t src_next_node_id;
  apr_uint64_t src_next_copy_id;
  svn_revnum_t dst_youngest;
  const char *src_revprops_dir;
  const char *dst_revprops_dir;
  const char *src_revs_dir;
  const char *dst_revs_dir;
  const char *src_subdir;
  const char *dst_subdir;
  svn_node_kind_t kind;

  /* Try to copy the config.
   *
   * ### We try copying the config file before doing anything else,
   * ### because higher layers will abort the hotcopy if we throw
   * ### an error from this function, and that renders the hotcopy
   * ### unusable anyway. */
  if (src_ffd->format >= SVN_FS_FS__MIN_CONFIG_FILE)
    {
      svn_error_t *err;

      err = svn_io_dir_file_copy(src_fs->path, dst_fs->path, PATH_CONFIG,
                                 pool);
      if (err)
        {
          if (APR_STATUS_IS_ENOENT(err->apr_err))
            {
              /* 1.6.0 to 1.6.11 did not copy the configuration file during
               * hotcopy. So if we're hotcopying a repository which has been
               * created as a hotcopy itself, it's possible that fsfs.conf
               * does not exist. Ask the user to re-create it.
               *
               * ### It would be nice to make this a non-fatal error,
               * ### but this function does not get an svn_fs_t object
               * ### so we have no way of just printing a warning via
               * ### the fs->warning() callback. */

              const char *src_abspath;
              const char *dst_abspath;
              const char *config_relpath;
              svn_error_t *err2;

              config_relpath = svn_dirent_join(src_fs->path, PATH_CONFIG, pool);
              err2 = svn_dirent_get_absolute(&src_abspath, src_fs->path, pool);
              if (err2)
                return svn_error_trace(svn_error_compose_create(err, err2));
              err2 = svn_dirent_get_absolute(&dst_abspath, dst_fs->path, pool);
              if (err2)
                return svn_error_trace(svn_error_compose_create(err, err2));

              /* ### hack: strip off the 'db/' directory from paths so
               * ### they make sense to the user */
              src_abspath = svn_dirent_dirname(src_abspath, pool);
              dst_abspath = svn_dirent_dirname(dst_abspath, pool);

              return svn_error_quick_wrapf(err,
                                 _("Failed to create hotcopy at '%s'. "
                                   "The file '%s' is missing from the source "
                                   "repository. Please create this file, for "
                                   "instance by running 'svnadmin upgrade %s'"),
                                 dst_abspath, config_relpath, src_abspath);
            }
          else
            return svn_error_trace(err);
        }
    }

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  /* Find the youngest revision in the source and destination.
   * We only support hotcopies from sources with an equal or greater amount
   * of revisions than the destination.
   * This also catches the case where users accidentally swap the
   * source and destination arguments. */
  SVN_ERR(svn_fs_fs__read_current(&src_youngest, &src_next_node_id,
                                  &src_next_copy_id, src_fs, pool));
  if (incremental)
    {
      SVN_ERR(svn_fs_fs__youngest_rev(&dst_youngest, dst_fs, pool));
      if (src_youngest < dst_youngest)
        return svn_error_createf(SVN_ERR_UNSUPPORTED_FEATURE, NULL,
                 _("The hotcopy destination already contains more revisions "
                   "(%lu) than the hotcopy source contains (%lu); are source "
                   "and destination swapped?"),
                   dst_youngest, src_youngest);
    }
  else
    dst_youngest = 0;

  src_revs_dir = svn_dirent_join(src_fs->path, PATH_REVS_DIR, pool);
  dst_revs_dir = svn_dirent_join(dst_fs->path, PATH_REVS_DIR, pool);
  src_revprops_dir = svn_dirent_join(src_fs->path, PATH_REVPROPS_DIR, pool);
  dst_revprops_dir = svn_dirent_join(dst_fs->path, PATH_REVPROPS_DIR, pool);

  /* Ensure that the required folders exist in the destination
   * before actually copying the revisions and revprops. */
  SVN_ERR(svn_io_make_dir_recursively(dst_revs_dir, pool));
  SVN_ERR(svn_io_make_dir_recursively(dst_revprops_dir, pool));

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  /* Split the logic for new and old FS formats. The latter is much simpler
   * due to the absense of sharding and packing. However, it requires special
   * care when updating the 'current' file (which contains not just the
   * revision number, but also the next-ID counters). */
  if (src_ffd->format >= SVN_FS_FS__MIN_NO_GLOBAL_IDS_FORMAT)
    {
      SVN_ERR(hotcopy_revisions(src_fs, dst_fs, src_youngest, dst_youngest,
                                incremental, src_revs_dir, dst_revs_dir,
                                src_revprops_dir, dst_revprops_dir,
                                notify_func, notify_baton,
                                cancel_func, cancel_baton, pool));
      SVN_ERR(svn_fs_fs__write_current(dst_fs, src_youngest, 0, 0, pool));
    }
  else
    {
      SVN_ERR(hotcopy_revisions_old(src_fs, dst_fs, src_youngest,
                                    src_revs_dir, dst_revs_dir,
                                    src_revprops_dir, dst_revprops_dir,
                                    notify_func, notify_baton,
                                    cancel_func, cancel_baton, pool));
      SVN_ERR(svn_fs_fs__write_current(dst_fs, src_youngest, src_next_node_id,
                                       src_next_copy_id, pool));
    }

  /* Replace the locks tree.
   * This is racy in case readers are currently trying to list locks in
   * the destination. However, we need to get rid of stale locks.
   * This is the simplest way of doing this, so we accept this small race. */
  dst_subdir = svn_dirent_join(dst_fs->path, PATH_LOCKS_DIR, pool);
  SVN_ERR(svn_io_remove_dir2(dst_subdir, TRUE, cancel_func, cancel_baton,
                             pool));
  src_subdir = svn_dirent_join(src_fs->path, PATH_LOCKS_DIR, pool);
  SVN_ERR(svn_io_check_path(src_subdir, &kind, pool));
  if (kind == svn_node_dir)
    SVN_ERR(svn_io_copy_dir_recursively(src_subdir, dst_fs->path,
                                        PATH_LOCKS_DIR, TRUE,
                                        cancel_func, cancel_baton, pool));

  /* Now copy the node-origins cache tree. */
  src_subdir = svn_dirent_join(src_fs->path, PATH_NODE_ORIGINS_DIR, pool);
  SVN_ERR(svn_io_check_path(src_subdir, &kind, pool));
  if (kind == svn_node_dir)
    SVN_ERR(hotcopy_io_copy_dir_recursively(NULL, src_subdir, dst_fs->path,
                                            PATH_NODE_ORIGINS_DIR, TRUE,
                                            cancel_func, cancel_baton, pool));

  /*
   * NB: Data copied below is only read by writers, not readers.
   *     Writers are still locked out at this point.
   */

  if (dst_ffd->format >= SVN_FS_FS__MIN_REP_SHARING_FORMAT)
    {
      /* Copy the rep cache and then remove entries for revisions
       * that did not make it into the destination. */
      src_subdir = svn_dirent_join(src_fs->path, REP_CACHE_DB_NAME, pool);
      dst_subdir = svn_dirent_join(dst_fs->path, REP_CACHE_DB_NAME, pool);
      SVN_ERR(svn_io_check_path(src_subdir, &kind, pool));
      if (kind == svn_node_file)
        {
          SVN_ERR(svn_sqlite__hotcopy(src_subdir, dst_subdir, pool));

          /* The source might have r/o flags set on it - which would be
             carried over to the copy. */
          SVN_ERR(svn_io_set_file_read_write(dst_subdir, FALSE, pool));
          SVN_ERR(svn_fs_fs__del_rep_reference(dst_fs, src_youngest, pool));
        }
    }

  /* Copy the txn-current file. */
  if (dst_ffd->format >= SVN_FS_FS__MIN_TXN_CURRENT_FORMAT)
    SVN_ERR(svn_io_dir_file_copy(src_fs->path, dst_fs->path,
                                 PATH_TXN_CURRENT, pool));

  return SVN_NO_ERROR;
}
コード例 #13
0
ファイル: hotcopy.c プロジェクト: 2asoft/freebsd
/* Copy the revision and revprop files (possibly sharded / packed) from
 * SRC_FS to DST_FS.  Do not re-copy data which already exists in DST_FS.
 * When copying packed or unpacked shards, checkpoint the result in DST_FS
 * for every shard by updating the 'current' file if necessary.  Assume
 * the >= SVN_FS_FS__MIN_NO_GLOBAL_IDS_FORMAT filesystem format without
 * global next-ID counters.  Indicate progress via the optional NOTIFY_FUNC
 * callback using NOTIFY_BATON.  Use POOL for temporary allocations.
 */
static svn_error_t *
hotcopy_revisions(svn_fs_t *src_fs,
                  svn_fs_t *dst_fs,
                  svn_revnum_t src_youngest,
                  svn_revnum_t dst_youngest,
                  svn_boolean_t incremental,
                  const char *src_revs_dir,
                  const char *dst_revs_dir,
                  const char *src_revprops_dir,
                  const char *dst_revprops_dir,
                  svn_fs_hotcopy_notify_t notify_func,
                  void* notify_baton,
                  svn_cancel_func_t cancel_func,
                  void* cancel_baton,
                  apr_pool_t *pool)
{
  fs_fs_data_t *src_ffd = src_fs->fsap_data;
  fs_fs_data_t *dst_ffd = dst_fs->fsap_data;
  int max_files_per_dir = src_ffd->max_files_per_dir;
  svn_revnum_t src_min_unpacked_rev;
  svn_revnum_t dst_min_unpacked_rev;
  svn_revnum_t rev;
  apr_pool_t *iterpool;

  /* Copy the min unpacked rev, and read its value. */
  if (src_ffd->format >= SVN_FS_FS__MIN_PACKED_FORMAT)
    {
      SVN_ERR(svn_fs_fs__read_min_unpacked_rev(&src_min_unpacked_rev,
                                               src_fs, pool));
      SVN_ERR(svn_fs_fs__read_min_unpacked_rev(&dst_min_unpacked_rev,
                                               dst_fs, pool));

      /* We only support packs coming from the hotcopy source.
       * The destination should not be packed independently from
       * the source. This also catches the case where users accidentally
       * swap the source and destination arguments. */
      if (src_min_unpacked_rev < dst_min_unpacked_rev)
        return svn_error_createf(SVN_ERR_UNSUPPORTED_FEATURE, NULL,
                                 _("The hotcopy destination already contains "
                                   "more packed revisions (%lu) than the "
                                   "hotcopy source contains (%lu)"),
                                   dst_min_unpacked_rev - 1,
                                   src_min_unpacked_rev - 1);

      SVN_ERR(svn_io_dir_file_copy(src_fs->path, dst_fs->path,
                                   PATH_MIN_UNPACKED_REV, pool));
    }
  else
    {
      src_min_unpacked_rev = 0;
      dst_min_unpacked_rev = 0;
    }

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  /*
   * Copy the necessary rev files.
   */

  iterpool = svn_pool_create(pool);
  /* First, copy packed shards. */
  for (rev = 0; rev < src_min_unpacked_rev; rev += max_files_per_dir)
    {
      svn_boolean_t skipped = TRUE;
      svn_revnum_t pack_end_rev;

      svn_pool_clear(iterpool);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      /* Copy the packed shard. */
      SVN_ERR(hotcopy_copy_packed_shard(&skipped, &dst_min_unpacked_rev,
                                        src_fs, dst_fs,
                                        rev, max_files_per_dir,
                                        iterpool));

      pack_end_rev = rev + max_files_per_dir - 1;

      /* Whenever this pack did not previously exist in the destination,
       * update 'current' to the most recent packed rev (so readers can see
       * new revisions which arrived in this pack). */
      if (pack_end_rev > dst_youngest)
        {
          SVN_ERR(svn_fs_fs__write_current(dst_fs, pack_end_rev, 0, 0,
                                           iterpool));
        }

      /* When notifying about packed shards, make things simpler by either
       * reporting a full revision range, i.e [pack start, pack end] or
       * reporting nothing. There is one case when this approach might not
       * be exact (incremental hotcopy with a pack replacing last unpacked
       * revisions), but generally this is good enough. */
      if (notify_func && !skipped)
        notify_func(notify_baton, rev, pack_end_rev, iterpool);

      /* Remove revision files which are now packed. */
      if (incremental)
        {
          SVN_ERR(hotcopy_remove_rev_files(dst_fs, rev,
                                           rev + max_files_per_dir,
                                           max_files_per_dir, iterpool));
          if (dst_ffd->format >= SVN_FS_FS__MIN_PACKED_REVPROP_FORMAT)
            SVN_ERR(hotcopy_remove_revprop_files(dst_fs, rev,
                                                 rev + max_files_per_dir,
                                                 max_files_per_dir,
                                                 iterpool));
        }

      /* Now that all revisions have moved into the pack, the original
       * rev dir can be removed. */
      SVN_ERR(remove_folder(svn_fs_fs__path_rev_shard(dst_fs, rev, iterpool),
                            cancel_func, cancel_baton, iterpool));
      if (rev > 0 && dst_ffd->format >= SVN_FS_FS__MIN_PACKED_REVPROP_FORMAT)
        SVN_ERR(remove_folder(svn_fs_fs__path_revprops_shard(dst_fs, rev,
                                                             iterpool),
                              cancel_func, cancel_baton, iterpool));
    }

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  SVN_ERR_ASSERT(rev == src_min_unpacked_rev);
  SVN_ERR_ASSERT(src_min_unpacked_rev == dst_min_unpacked_rev);

  /* Now, copy pairs of non-packed revisions and revprop files.
   * If necessary, update 'current' after copying all files from a shard. */
  for (; rev <= src_youngest; rev++)
    {
      svn_boolean_t skipped = TRUE;

      svn_pool_clear(iterpool);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      /* Copying non-packed revisions is racy in case the source repository is
       * being packed concurrently with this hotcopy operation. The race can
       * happen with FS formats prior to SVN_FS_FS__MIN_PACK_LOCK_FORMAT that
       * support packed revisions. With the pack lock, however, the race is
       * impossible, because hotcopy and pack operations block each other.
       *
       * We assume that all revisions coming after 'min-unpacked-rev' really
       * are unpacked and that's not necessarily true with concurrent packing.
       * Don't try to be smart in this edge case, because handling it properly
       * might require copying *everything* from the start. Just abort the
       * hotcopy with an ENOENT (revision file moved to a pack, so it is no
       * longer where we expect it to be). */

      /* Copy the rev file. */
      SVN_ERR(hotcopy_copy_shard_file(&skipped,
                                      src_revs_dir, dst_revs_dir, rev,
                                      max_files_per_dir,
                                      iterpool));
      /* Copy the revprop file. */
      SVN_ERR(hotcopy_copy_shard_file(&skipped,
                                      src_revprops_dir, dst_revprops_dir,
                                      rev, max_files_per_dir,
                                      iterpool));

      /* Whenever this revision did not previously exist in the destination,
       * checkpoint the progress via 'current' (do that once per full shard
       * in order not to slow things down). */
      if (rev > dst_youngest)
        {
          if (max_files_per_dir && (rev % max_files_per_dir == 0))
            {
              SVN_ERR(svn_fs_fs__write_current(dst_fs, rev, 0, 0,
                                               iterpool));
            }
        }

      if (notify_func && !skipped)
        notify_func(notify_baton, rev, rev, iterpool);
    }
  svn_pool_destroy(iterpool);

  /* We assume that all revisions were copied now, i.e. we didn't exit the
   * above loop early. 'rev' was last incremented during exit of the loop. */
  SVN_ERR_ASSERT(rev == src_youngest + 1);

  return SVN_NO_ERROR;
}
コード例 #14
0
ファイル: hotcopy.c プロジェクト: 2asoft/freebsd
/* Like svn_io_copy_dir_recursively() but doesn't copy regular files that
 * exist in the destination and do not differ from the source in terms of
 * kind, size, and mtime. Set *SKIPPED_P to FALSE only if at least one
 * file was copied, do not change the value in *SKIPPED_P otherwise.
 * SKIPPED_P may be NULL if not required. */
static svn_error_t *
hotcopy_io_copy_dir_recursively(svn_boolean_t *skipped_p,
                                const char *src,
                                const char *dst_parent,
                                const char *dst_basename,
                                svn_boolean_t copy_perms,
                                svn_cancel_func_t cancel_func,
                                void *cancel_baton,
                                apr_pool_t *pool)
{
  svn_node_kind_t kind;
  apr_status_t status;
  const char *dst_path;
  apr_dir_t *this_dir;
  apr_finfo_t this_entry;
  apr_int32_t flags = APR_FINFO_TYPE | APR_FINFO_NAME;

  /* Make a subpool for recursion */
  apr_pool_t *subpool = svn_pool_create(pool);

  /* The 'dst_path' is simply dst_parent/dst_basename */
  dst_path = svn_dirent_join(dst_parent, dst_basename, pool);

  /* Sanity checks:  SRC and DST_PARENT are directories, and
     DST_BASENAME doesn't already exist in DST_PARENT. */
  SVN_ERR(svn_io_check_path(src, &kind, subpool));
  if (kind != svn_node_dir)
    return svn_error_createf(SVN_ERR_NODE_UNEXPECTED_KIND, NULL,
                             _("Source '%s' is not a directory"),
                             svn_dirent_local_style(src, pool));

  SVN_ERR(svn_io_check_path(dst_parent, &kind, subpool));
  if (kind != svn_node_dir)
    return svn_error_createf(SVN_ERR_NODE_UNEXPECTED_KIND, NULL,
                             _("Destination '%s' is not a directory"),
                             svn_dirent_local_style(dst_parent, pool));

  SVN_ERR(svn_io_check_path(dst_path, &kind, subpool));

  /* Create the new directory. */
  /* ### TODO: copy permissions (needs apr_file_attrs_get()) */
  SVN_ERR(svn_io_make_dir_recursively(dst_path, pool));

  /* Loop over the dirents in SRC.  ('.' and '..' are auto-excluded) */
  SVN_ERR(svn_io_dir_open(&this_dir, src, subpool));

  for (status = apr_dir_read(&this_entry, flags, this_dir);
       status == APR_SUCCESS;
       status = apr_dir_read(&this_entry, flags, this_dir))
    {
      if ((this_entry.name[0] == '.')
          && ((this_entry.name[1] == '\0')
              || ((this_entry.name[1] == '.')
                  && (this_entry.name[2] == '\0'))))
        {
          continue;
        }
      else
        {
          const char *entryname_utf8;

          if (cancel_func)
            SVN_ERR(cancel_func(cancel_baton));

          SVN_ERR(entry_name_to_utf8(&entryname_utf8, this_entry.name,
                                     src, subpool));
          if (this_entry.filetype == APR_REG) /* regular file */
            {
              SVN_ERR(hotcopy_io_dir_file_copy(skipped_p, src, dst_path,
                                               entryname_utf8, subpool));
            }
          else if (this_entry.filetype == APR_LNK) /* symlink */
            {
              const char *src_target = svn_dirent_join(src, entryname_utf8,
                                                       subpool);
              const char *dst_target = svn_dirent_join(dst_path,
                                                       entryname_utf8,
                                                       subpool);
              SVN_ERR(svn_io_copy_link(src_target, dst_target,
                                       subpool));
            }
          else if (this_entry.filetype == APR_DIR) /* recurse */
            {
              const char *src_target;

              /* Prevent infinite recursion by filtering off our
                 newly created destination path. */
              if (strcmp(src, dst_parent) == 0
                  && strcmp(entryname_utf8, dst_basename) == 0)
                continue;

              src_target = svn_dirent_join(src, entryname_utf8, subpool);
              SVN_ERR(hotcopy_io_copy_dir_recursively(skipped_p,
                                                      src_target,
                                                      dst_path,
                                                      entryname_utf8,
                                                      copy_perms,
                                                      cancel_func,
                                                      cancel_baton,
                                                      subpool));
            }
          /* ### support other APR node types someday?? */

        }
    }

  if (! (APR_STATUS_IS_ENOENT(status)))
    return svn_error_wrap_apr(status, _("Can't read directory '%s'"),
                              svn_dirent_local_style(src, pool));

  status = apr_dir_close(this_dir);
  if (status)
    return svn_error_wrap_apr(status, _("Error closing directory '%s'"),
                              svn_dirent_local_style(src, pool));

  /* Free any memory used by recursion */
  svn_pool_destroy(subpool);

  return SVN_NO_ERROR;
}
コード例 #15
0
/* Helper function that crops the children of the LOCAL_ABSPATH, under the
 * constraint of NEW_DEPTH. The DIR_PATH itself will never be cropped. The
 * whole subtree should have been locked.
 *
 * DIR_DEPTH is the current depth of LOCAL_ABSPATH as stored in DB.
 *
 * If NOTIFY_FUNC is not null, each file and ROOT of subtree will be reported
 * upon remove.
 */
static svn_error_t *
crop_children(svn_wc__db_t *db,
              const char *local_abspath,
              svn_depth_t dir_depth,
              svn_depth_t new_depth,
              svn_wc_notify_func2_t notify_func,
              void *notify_baton,
              svn_cancel_func_t cancel_func,
              void *cancel_baton,
              apr_pool_t *pool)
{
  const apr_array_header_t *children;
  apr_pool_t *iterpool;
  int i;

  SVN_ERR_ASSERT(new_depth >= svn_depth_empty
                 && new_depth <= svn_depth_infinity);

  if (cancel_func)
    SVN_ERR(cancel_func(cancel_baton));

  iterpool = svn_pool_create(pool);

  if (dir_depth == svn_depth_unknown)
    dir_depth = svn_depth_infinity;

  /* Update the depth of target first, if needed. */
  if (dir_depth > new_depth)
    SVN_ERR(svn_wc__db_op_set_base_depth(db, local_abspath, new_depth,
                                         iterpool));

  /* Looping over current directory's SVN entries: */
  SVN_ERR(svn_wc__db_read_children(&children, db, local_abspath, pool,
                                   iterpool));

  for (i = 0; i < children->nelts; i++)
    {
      const char *child_name = APR_ARRAY_IDX(children, i, const char *);
      const char *child_abspath;
      svn_wc__db_status_t child_status;
      svn_wc__db_kind_t kind;
      svn_depth_t child_depth;

      svn_pool_clear(iterpool);

      /* Get the next node */
      child_abspath = svn_dirent_join(local_abspath, child_name, iterpool);

      SVN_ERR(svn_wc__db_read_info(&child_status, &kind, NULL, NULL, NULL,
                                   NULL,NULL, NULL, NULL, &child_depth,
                                   NULL, NULL, NULL, NULL, NULL, NULL,
                                   NULL, NULL, NULL, NULL, NULL, NULL,
                                   NULL, NULL, NULL, NULL, NULL,
                                   db, child_abspath, iterpool, iterpool));

      if (child_status == svn_wc__db_status_server_excluded ||
          child_status == svn_wc__db_status_excluded ||
          child_status == svn_wc__db_status_not_present)
        {
          svn_depth_t remove_below = (kind == svn_wc__db_kind_dir)
                                            ? svn_depth_immediates
                                            : svn_depth_files;
          if (new_depth < remove_below)
            SVN_ERR(svn_wc__db_op_remove_node(db, local_abspath,
                                              SVN_INVALID_REVNUM,
                                              svn_wc__db_kind_unknown,
                                              iterpool));

          continue;
        }
      else if (kind == svn_wc__db_kind_file)
        {
          /* We currently crop on a directory basis. So don't worry about
             svn_depth_exclude here. And even we permit excluding a single
             file in the future, svn_wc_remove_from_revision_control() can
             also handle it. We only need to skip the notification in that
             case. */
          if (new_depth == svn_depth_empty)
            IGNORE_LOCAL_MOD(
              svn_wc__internal_remove_from_revision_control(
                                                   db,
                                                   child_abspath,
                                                   TRUE, /* destroy */
                                                   FALSE, /* instant error */
                                                   cancel_func, cancel_baton,
                                                   iterpool));
          else
            continue;

        }
      else if (kind == svn_wc__db_kind_dir)
        {
          if (new_depth < svn_depth_immediates)
            {
              IGNORE_LOCAL_MOD(
                svn_wc__internal_remove_from_revision_control(
                                                     db,
                                                     child_abspath,
                                                     TRUE, /* destroy */
                                                     FALSE, /* instant error */
                                                     cancel_func,
                                                     cancel_baton,
                                                     iterpool));
            }
          else
            {
              SVN_ERR(crop_children(db,
                                    child_abspath,
                                    child_depth,
                                    svn_depth_empty,
                                    notify_func,
                                    notify_baton,
                                    cancel_func,
                                    cancel_baton,
                                    iterpool));
              continue;
            }
        }
      else
        {
          return svn_error_createf
            (SVN_ERR_NODE_UNKNOWN_KIND, NULL, _("Unknown node kind for '%s'"),
             svn_dirent_local_style(child_abspath, iterpool));
        }

      if (notify_func)
        {
          svn_wc_notify_t *notify;
          notify = svn_wc_create_notify(child_abspath,
                                        svn_wc_notify_delete,
                                        iterpool);
          (*notify_func)(notify_baton, notify, iterpool);
        }
    }

  svn_pool_destroy(iterpool);

  return SVN_NO_ERROR;
}
コード例 #16
0
ファイル: copy.c プロジェクト: geofft/subversion
/* Copy the versioned dir SRC_ABSPATH in DB to the path DST_ABSPATH in DB,
   recursively.  If METADATA_ONLY is true, copy only the versioned metadata,
   otherwise copy both the versioned metadata and the filesystem nodes (even
   if they are the wrong kind, and including unversioned children).
   If IS_MOVE is true, record move information in working copy meta
   data in addition to copying the directory.

   WITHIN_ONE_WC is TRUE if the copy/move is within a single working copy (root)
 */
static svn_error_t *
copy_versioned_dir(svn_wc__db_t *db,
                   const char *src_abspath,
                   const char *dst_abspath,
                   const char *dst_op_root_abspath,
                   const char *tmpdir_abspath,
                   svn_boolean_t metadata_only,
                   svn_boolean_t is_move,
                   svn_cancel_func_t cancel_func,
                   void *cancel_baton,
                   svn_wc_notify_func2_t notify_func,
                   void *notify_baton,
                   apr_pool_t *scratch_pool)
{
  svn_skel_t *work_items = NULL;
  const char *dir_abspath = svn_dirent_dirname(dst_abspath, scratch_pool);
  apr_hash_t *versioned_children;
  apr_hash_t *conflicted_children;
  apr_hash_t *disk_children;
  apr_hash_index_t *hi;
  svn_node_kind_t disk_kind;
  apr_pool_t *iterpool;

  /* Prepare a temp copy of the single filesystem node (usually a dir). */
  if (!metadata_only)
    {
      SVN_ERR(copy_to_tmpdir(&work_items, &disk_kind,
                             db, src_abspath, dst_abspath,
                             tmpdir_abspath,
                             FALSE /* file_copy */,
                             FALSE /* unversioned */,
                             cancel_func, cancel_baton,
                             scratch_pool, scratch_pool));
    }

  /* Copy the (single) node's metadata, and move the new filesystem node
     into place. */
  SVN_ERR(svn_wc__db_op_copy(db, src_abspath, dst_abspath,
                             dst_op_root_abspath, is_move, work_items,
                             scratch_pool));

  if (notify_func)
    {
      svn_wc_notify_t *notify
        = svn_wc_create_notify(dst_abspath, svn_wc_notify_add,
                               scratch_pool);
      notify->kind = svn_node_dir;

      /* When we notify that we performed a copy, make sure we already did */
      if (work_items != NULL)
        SVN_ERR(svn_wc__wq_run(db, dir_abspath,
                               cancel_func, cancel_baton, scratch_pool));

      (*notify_func)(notify_baton, notify, scratch_pool);
    }

  if (!metadata_only && disk_kind == svn_node_dir)
    /* All filesystem children, versioned and unversioned.  We're only
       interested in their names, so we can pass TRUE as the only_check_type
       param. */
    SVN_ERR(svn_io_get_dirents3(&disk_children, src_abspath, TRUE,
                                scratch_pool, scratch_pool));
  else
    disk_children = NULL;

  /* Copy all the versioned children */
  iterpool = svn_pool_create(scratch_pool);
  SVN_ERR(svn_wc__db_read_children_info(&versioned_children,
                                        &conflicted_children,
                                        db, src_abspath,
                                        scratch_pool, iterpool));
  for (hi = apr_hash_first(scratch_pool, versioned_children);
       hi;
       hi = apr_hash_next(hi))
    {
      const char *child_name, *child_src_abspath, *child_dst_abspath;
      struct svn_wc__db_info_t *info;

      svn_pool_clear(iterpool);

      if (cancel_func)
        SVN_ERR(cancel_func(cancel_baton));

      child_name = svn__apr_hash_index_key(hi);
      info = svn__apr_hash_index_val(hi);
      child_src_abspath = svn_dirent_join(src_abspath, child_name, iterpool);
      child_dst_abspath = svn_dirent_join(dst_abspath, child_name, iterpool);

      if (info->op_root)
        SVN_ERR(svn_wc__db_op_copy_shadowed_layer(db,
                                                  child_src_abspath,
                                                  child_dst_abspath,
                                                  is_move,
                                                  scratch_pool));

      if (info->status == svn_wc__db_status_normal
          || info->status == svn_wc__db_status_added)
        {
          /* We have more work to do than just changing the DB */
          if (info->kind == svn_node_file)
            {
              /* We should skip this node if this child is a file external
                 (issues #3589, #4000) */
              if (!info->file_external)
                SVN_ERR(copy_versioned_file(db,
                                            child_src_abspath,
                                            child_dst_abspath,
                                            dst_op_root_abspath,
                                            tmpdir_abspath,
                                            metadata_only, info->conflicted,
                                            is_move,
                                            cancel_func, cancel_baton,
                                            NULL, NULL,
                                            iterpool));
            }
          else if (info->kind == svn_node_dir)
            SVN_ERR(copy_versioned_dir(db,
                                       child_src_abspath, child_dst_abspath,
                                       dst_op_root_abspath, tmpdir_abspath,
                                       metadata_only, is_move,
                                       cancel_func, cancel_baton, NULL, NULL,
                                       iterpool));
          else
            return svn_error_createf(SVN_ERR_NODE_UNEXPECTED_KIND, NULL,
                                     _("cannot handle node kind for '%s'"),
                                     svn_dirent_local_style(child_src_abspath,
                                                            scratch_pool));
        }
      else if (info->status == svn_wc__db_status_deleted
          || info->status == svn_wc__db_status_not_present
          || info->status == svn_wc__db_status_excluded)
        {
          /* This will be copied as some kind of deletion. Don't touch
             any actual files */
          SVN_ERR(svn_wc__db_op_copy(db, child_src_abspath,
                                     child_dst_abspath, dst_op_root_abspath,
                                     is_move, NULL, iterpool));

          /* Don't recurse on children while all we do is creating not-present
             children */
        }
      else if (info->status == svn_wc__db_status_incomplete)
        {
          /* Should go ahead and copy incomplete to incomplete? Try to
             copy as much as possible, or give up early? */
          return svn_error_createf(SVN_ERR_WC_PATH_UNEXPECTED_STATUS, NULL,
                                   _("Cannot handle status of '%s'"),
                                   svn_dirent_local_style(child_src_abspath,
                                                          iterpool));
        }
      else
        {
          SVN_ERR_ASSERT(info->status == svn_wc__db_status_server_excluded);

          return svn_error_createf(SVN_ERR_WC_PATH_UNEXPECTED_STATUS, NULL,
                                   _("Cannot copy '%s' excluded by server"),
                                   svn_dirent_local_style(child_src_abspath,
                                                          iterpool));
        }

      if (disk_children
          && (info->status == svn_wc__db_status_normal
              || info->status == svn_wc__db_status_added))
        {
          /* Remove versioned child as it has been handled */
          svn_hash_sets(disk_children, child_name, NULL);
        }
    }

  /* Copy the remaining filesystem children, which are unversioned, skipping
     any conflict-marker files. */
  if (disk_children && apr_hash_count(disk_children))
    {
      apr_hash_t *marker_files;

      SVN_ERR(svn_wc__db_get_conflict_marker_files(&marker_files, db,
                                                   src_abspath, scratch_pool,
                                                   scratch_pool));

      work_items = NULL;

      for (hi = apr_hash_first(scratch_pool, disk_children); hi;
           hi = apr_hash_next(hi))
        {
          const char *name = svn__apr_hash_index_key(hi);
          const char *unver_src_abspath, *unver_dst_abspath;
          svn_skel_t *work_item;

          if (svn_wc_is_adm_dir(name, iterpool))
            continue;

          if (cancel_func)
            SVN_ERR(cancel_func(cancel_baton));

          svn_pool_clear(iterpool);
          unver_src_abspath = svn_dirent_join(src_abspath, name, iterpool);
          unver_dst_abspath = svn_dirent_join(dst_abspath, name, iterpool);

          if (marker_files &&
              svn_hash_gets(marker_files, unver_src_abspath))
            continue;

          SVN_ERR(copy_to_tmpdir(&work_item, NULL, db, unver_src_abspath,
                                 unver_dst_abspath, tmpdir_abspath,
                                 TRUE /* recursive */, TRUE /* unversioned */,
                                 cancel_func, cancel_baton,
                                 scratch_pool, iterpool));

          if (work_item)
            work_items = svn_wc__wq_merge(work_items, work_item, scratch_pool);
        }
      SVN_ERR(svn_wc__db_wq_add(db, dst_abspath, work_items, iterpool));
    }

  svn_pool_destroy(iterpool);

  return SVN_NO_ERROR;
}
コード例 #17
0
ファイル: load.c プロジェクト: Ranga123/test1
svn_error_t *
svn_repos_parse_dumpstream3(svn_stream_t *stream,
                            const svn_repos_parse_fns3_t *parse_fns,
                            void *parse_baton,
                            svn_boolean_t deltas_are_text,
                            svn_cancel_func_t cancel_func,
                            void *cancel_baton,
                            apr_pool_t *pool)
{
    svn_boolean_t eof;
    svn_stringbuf_t *linebuf;
    void *rev_baton = NULL;
    char *buffer = apr_palloc(pool, SVN__STREAM_CHUNK_SIZE);
    apr_size_t buflen = SVN__STREAM_CHUNK_SIZE;
    apr_pool_t *linepool = svn_pool_create(pool);
    apr_pool_t *revpool = svn_pool_create(pool);
    apr_pool_t *nodepool = svn_pool_create(pool);
    int version;

    SVN_ERR(svn_stream_readline(stream, &linebuf, "\n", &eof, linepool));
    if (eof)
        return stream_ran_dry();

    /* The first two lines of the stream are the dumpfile-format version
       number, and a blank line.  To preserve backward compatibility,
       don't assume the existence of newer parser-vtable functions. */
    SVN_ERR(parse_format_version(&version, linebuf->data));
    if (parse_fns->magic_header_record != NULL)
        SVN_ERR(parse_fns->magic_header_record(version, parse_baton, pool));

    /* A dumpfile "record" is defined to be a header-block of
       rfc822-style headers, possibly followed by a content-block.

         - A header-block is always terminated by a single blank line (\n\n)

         - We know whether the record has a content-block by looking for
           a 'Content-length:' header.  The content-block will always be
           of a specific length, plus an extra newline.

       Once a record is fully sucked from the stream, an indeterminate
       number of blank lines (or lines that begin with whitespace) may
       follow before the next record (or the end of the stream.)
    */

    while (1)
    {
        apr_hash_t *headers;
        void *node_baton;
        svn_boolean_t found_node = FALSE;
        svn_boolean_t old_v1_with_cl = FALSE;
        const char *content_length;
        const char *prop_cl;
        const char *text_cl;
        const char *value;
        svn_filesize_t actual_prop_length;

        /* Clear our per-line pool. */
        svn_pool_clear(linepool);

        /* Check for cancellation. */
        if (cancel_func)
            SVN_ERR(cancel_func(cancel_baton));

        /* Keep reading blank lines until we discover a new record, or until
           the stream runs out. */
        SVN_ERR(svn_stream_readline(stream, &linebuf, "\n", &eof, linepool));

        if (eof)
        {
            if (svn_stringbuf_isempty(linebuf))
                break;   /* end of stream, go home. */
            else
                return stream_ran_dry();
        }

        if ((linebuf->len == 0) || (svn_ctype_isspace(linebuf->data[0])))
            continue; /* empty line ... loop */

        /*** Found the beginning of a new record. ***/

        /* The last line we read better be a header of some sort.
           Read the whole header-block into a hash. */
        SVN_ERR(read_header_block(stream, linebuf, &headers, linepool));

        /*** Handle the various header blocks. ***/

        /* Is this a revision record? */
        if (svn_hash_gets(headers, SVN_REPOS_DUMPFILE_REVISION_NUMBER))
        {
            /* If we already have a rev_baton open, we need to close it
               and clear the per-revision subpool. */
            if (rev_baton != NULL)
            {
                SVN_ERR(parse_fns->close_revision(rev_baton));
                svn_pool_clear(revpool);
            }

            SVN_ERR(parse_fns->new_revision_record(&rev_baton,
                                                   headers, parse_baton,
                                                   revpool));
        }
        /* Or is this, perhaps, a node record? */
        else if (svn_hash_gets(headers, SVN_REPOS_DUMPFILE_NODE_PATH))
        {
            SVN_ERR(parse_fns->new_node_record(&node_baton,
                                               headers,
                                               rev_baton,
                                               nodepool));
            found_node = TRUE;
        }
        /* Or is this the repos UUID? */
        else if ((value = svn_hash_gets(headers, SVN_REPOS_DUMPFILE_UUID)))
        {
            SVN_ERR(parse_fns->uuid_record(value, parse_baton, pool));
        }
        /* Or perhaps a dumpfile format? */
        /* ### TODO: use parse_format_version */
        else if ((value = svn_hash_gets(headers,
                                        SVN_REPOS_DUMPFILE_MAGIC_HEADER)))
        {
            /* ### someday, switch modes of operation here. */
            SVN_ERR(svn_cstring_atoi(&version, value));
        }
        /* Or is this bogosity?! */
        else
        {
            /* What the heck is this record?!? */
            return svn_error_create(SVN_ERR_STREAM_MALFORMED_DATA, NULL,
                                    _("Unrecognized record type in stream"));
        }

        /* Need 3 values below to determine v1 dump type

           Old (pre 0.14?) v1 dumps don't have Prop-content-length
           and Text-content-length fields, but always have a properties
           block in a block with Content-Length > 0 */

        content_length = svn_hash_gets(headers,
                                       SVN_REPOS_DUMPFILE_CONTENT_LENGTH);
        prop_cl = svn_hash_gets(headers, SVN_REPOS_DUMPFILE_PROP_CONTENT_LENGTH);
        text_cl = svn_hash_gets(headers, SVN_REPOS_DUMPFILE_TEXT_CONTENT_LENGTH);
        old_v1_with_cl =
            version == 1 && content_length && ! prop_cl && ! text_cl;

        /* Is there a props content-block to parse? */
        if (prop_cl || old_v1_with_cl)
        {
            const char *delta = svn_hash_gets(headers,
                                              SVN_REPOS_DUMPFILE_PROP_DELTA);
            svn_boolean_t is_delta = (delta && strcmp(delta, "true") == 0);

            /* First, remove all node properties, unless this is a delta
               property block. */
            if (found_node && !is_delta)
                SVN_ERR(parse_fns->remove_node_props(node_baton));

            SVN_ERR(parse_property_block
                    (stream,
                     svn__atoui64(prop_cl ? prop_cl : content_length),
                     parse_fns,
                     found_node ? node_baton : rev_baton,
                     parse_baton,
                     found_node,
                     &actual_prop_length,
                     found_node ? nodepool : revpool));
        }

        /* Is there a text content-block to parse? */
        if (text_cl)
        {
            const char *delta = svn_hash_gets(headers,
                                              SVN_REPOS_DUMPFILE_TEXT_DELTA);
            svn_boolean_t is_delta = FALSE;
            if (! deltas_are_text)
                is_delta = (delta && strcmp(delta, "true") == 0);

            SVN_ERR(parse_text_block(stream,
                                     svn__atoui64(text_cl),
                                     is_delta,
                                     parse_fns,
                                     found_node ? node_baton : rev_baton,
                                     buffer,
                                     buflen,
                                     found_node ? nodepool : revpool));
        }
        else if (old_v1_with_cl)
        {
            /* An old-v1 block with a Content-length might have a text block.
               If the property block did not consume all the bytes of the
               Content-length, then it clearly does have a text block.
               If not, then we must deduce whether we have an *empty* text
               block or an *absent* text block.  The rules are:
               - "Node-kind: file" blocks have an empty (i.e. present, but
                 zero-length) text block, since they represent a file
                 modification.  Note that file-copied-text-unmodified blocks
                 have no Content-length - even if they should have contained
                 a modified property block, the pre-0.14 dumper forgets to
                 dump the modified properties.
               - If it is not a file node, then it is a revision or directory,
                 and so has an absent text block.
            */
            const char *node_kind;
            svn_filesize_t cl_value = svn__atoui64(content_length)
                                      - actual_prop_length;

            if (cl_value ||
                    ((node_kind = svn_hash_gets(headers,
                                                SVN_REPOS_DUMPFILE_NODE_KIND))
                     && strcmp(node_kind, "file") == 0)
               )
                SVN_ERR(parse_text_block(stream,
                                         cl_value,
                                         FALSE,
                                         parse_fns,
                                         found_node ? node_baton : rev_baton,
                                         buffer,
                                         buflen,
                                         found_node ? nodepool : revpool));
        }

        /* if we have a content-length header, did we read all of it?
           in case of an old v1, we *always* read all of it, because
           text-content-length == content-length - prop-content-length
        */
        if (content_length && ! old_v1_with_cl)
        {
            apr_size_t rlen, num_to_read;
            svn_filesize_t remaining =
                svn__atoui64(content_length) -
                (prop_cl ? svn__atoui64(prop_cl) : 0) -
                (text_cl ? svn__atoui64(text_cl) : 0);


            if (remaining < 0)
                return svn_error_create(SVN_ERR_STREAM_MALFORMED_DATA, NULL,
                                        _("Sum of subblock sizes larger than "
                                          "total block content length"));

            /* Consume remaining bytes in this content block */
            while (remaining > 0)
            {
                if (remaining >= (svn_filesize_t)buflen)
                    rlen = buflen;
                else
                    rlen = (apr_size_t) remaining;

                num_to_read = rlen;
                SVN_ERR(svn_stream_read(stream, buffer, &rlen));
                remaining -= rlen;
                if (rlen != num_to_read)
                    return stream_ran_dry();
            }
        }

        /* If we just finished processing a node record, we need to
           close the node record and clear the per-node subpool. */
        if (found_node)
        {
            SVN_ERR(parse_fns->close_node(node_baton));
            svn_pool_clear(nodepool);
        }

        /*** End of processing for one record. ***/

    } /* end of stream */

    /* Close out whatever revision we're in. */
    if (rev_baton != NULL)
        SVN_ERR(parse_fns->close_revision(rev_baton));

    svn_pool_destroy(linepool);
    svn_pool_destroy(revpool);
    svn_pool_destroy(nodepool);
    return SVN_NO_ERROR;
}
コード例 #18
0
ファイル: rep-cache.c プロジェクト: kwitaszczyk/freebsd
svn_error_t *
svn_fs_fs__walk_rep_reference(svn_fs_t *fs,
                              svn_revnum_t start,
                              svn_revnum_t end,
                              svn_error_t *(*walker)(representation_t *,
                                                     void *,
                                                     svn_fs_t *,
                                                     apr_pool_t *),
                              void *walker_baton,
                              svn_cancel_func_t cancel_func,
                              void *cancel_baton,
                              apr_pool_t *pool)
{
  fs_fs_data_t *ffd = fs->fsap_data;
  svn_sqlite__stmt_t *stmt;
  svn_boolean_t have_row;
  int iterations = 0;

  apr_pool_t *iterpool = svn_pool_create(pool);

  /* Don't check ffd->rep_sharing_allowed. */
  SVN_ERR_ASSERT(ffd->format >= SVN_FS_FS__MIN_REP_SHARING_FORMAT);

  if (! ffd->rep_cache_db)
    SVN_ERR(svn_fs_fs__open_rep_cache(fs, pool));

  /* Check global invariants. */
  if (start == 0)
    {
      svn_revnum_t max;

      SVN_ERR(svn_sqlite__get_statement(&stmt, ffd->rep_cache_db,
                                        STMT_GET_MAX_REV));
      SVN_ERR(svn_sqlite__step(&have_row, stmt));
      max = svn_sqlite__column_revnum(stmt, 0);
      SVN_ERR(svn_sqlite__reset(stmt));
      if (SVN_IS_VALID_REVNUM(max))  /* The rep-cache could be empty. */
        SVN_ERR(svn_fs_fs__ensure_revision_exists(max, fs, iterpool));
    }

  SVN_ERR(svn_sqlite__get_statement(&stmt, ffd->rep_cache_db,
                                    STMT_GET_REPS_FOR_RANGE));
  SVN_ERR(svn_sqlite__bindf(stmt, "rr",
                            start, end));

  /* Walk the cache entries. */
  SVN_ERR(svn_sqlite__step(&have_row, stmt));
  while (have_row)
    {
      representation_t *rep;
      const char *sha1_digest;
      svn_error_t *err;
      svn_checksum_t *checksum;

      /* Clear ITERPOOL occasionally. */
      if (iterations++ % 16 == 0)
        svn_pool_clear(iterpool);

      /* Check for cancellation. */
      if (cancel_func)
        {
          err = cancel_func(cancel_baton);
          if (err)
            return svn_error_compose_create(err, svn_sqlite__reset(stmt));
        }

      /* Construct a representation_t. */
      rep = apr_pcalloc(iterpool, sizeof(*rep));
      svn_fs_fs__id_txn_reset(&rep->txn_id);
      sha1_digest = svn_sqlite__column_text(stmt, 0, iterpool);
      err = svn_checksum_parse_hex(&checksum, svn_checksum_sha1,
                                   sha1_digest, iterpool);
      if (err)
        return svn_error_compose_create(err, svn_sqlite__reset(stmt));

      rep->has_sha1 = TRUE;
      memcpy(rep->sha1_digest, checksum->digest, sizeof(rep->sha1_digest));
      rep->revision = svn_sqlite__column_revnum(stmt, 1);
      rep->item_index = svn_sqlite__column_int64(stmt, 2);
      rep->size = svn_sqlite__column_int64(stmt, 3);
      rep->expanded_size = svn_sqlite__column_int64(stmt, 4);

      /* Walk. */
      err = walker(rep, walker_baton, fs, iterpool);
      if (err)
        return svn_error_compose_create(err, svn_sqlite__reset(stmt));

      SVN_ERR(svn_sqlite__step(&have_row, stmt));
    }

  SVN_ERR(svn_sqlite__reset(stmt));
  svn_pool_destroy(iterpool);

  return SVN_NO_ERROR;
}