int main(int argc,char **argv) {
    apr_initialize();
    apr_pool_t *pool;
    apr_pool_create(&pool,NULL);
    apr_status_t st;
    
    apr_thread_pool_t *tpl;
    st=apr_thread_pool_create(&tpl,8,128,pool);

    apr_size_t n;
    for(long i=0;i<30;i++) {
        apr_thread_pool_push(tpl,thread_func,(void *)i,APR_THREAD_TASK_PRIORITY_NORMAL,NULL);
        apr_sleep(50000);
    }
    for(long i=0;i<10;i++) {
        n=apr_thread_pool_idle_count(tpl);
        printf("idle thread: %d\n",n);
        apr_sleep(1000000);
    }
    for(long i=100;i<130;i++) {
        apr_thread_pool_push(tpl,thread_func,(void *)i,APR_THREAD_TASK_PRIORITY_NORMAL,NULL);
        apr_sleep(50000);
    }
    for(long i=0;i<10;i++) {
        n=apr_thread_pool_idle_count(tpl);
        printf("idle thread: %d\n",n);
        apr_sleep(1000000);
    }

    apr_thread_pool_destroy(tpl);
    apr_pool_destroy(pool);
    apr_terminate();
    return 0;
}
Esempio n. 2
0
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth)
{
//  char buffer[1024];
    gop_thread_pool_context_t *tpc;
    apr_interval_time_t dt;
    int i;

    log_printf(15, "count=%d\n", _tp_context_count);

    tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1);

    if (tbx_atomic_inc(_tp_context_count) == 0) {
        apr_pool_create(&_tp_pool, NULL);
        apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool);
        thread_pool_stats_init();
    }

    if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool);
    tpc->pc = gop_hp_context_create(&_tp_base_portal);  //** Really just used for the submit

    default_thread_pool_config(tpc);
    if (min_threads > 0) tpc->min_threads = min_threads;
    if (max_threads > 0) tpc->max_threads = max_threads + 1;  //** Add one for the recursion depth starting offset being 1
    tpc->recursion_depth = max_recursion_depth + 1;  //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location
    tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
    if (tpc->max_concurrency <= 0) {
        tpc->max_threads += 5 - tpc->max_concurrency;  //** MAke sure we have at least 5 threads for work
        tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
        log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads);
    }

    dt = tpc->min_idle * 1000000;
    assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS);
    apr_thread_pool_idle_wait_set(tpc->tp, dt);
    apr_thread_pool_threshold_set(tpc->tp, 0);

    tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name);
    tbx_atomic_set(tpc->n_ops, 0);
    tbx_atomic_set(tpc->n_completed, 0);
    tbx_atomic_set(tpc->n_started, 0);
    tbx_atomic_set(tpc->n_submitted, 0);
    tbx_atomic_set(tpc->n_running, 0);

    tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth);
    tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth);
    for (i=0; i<tpc->recursion_depth; i++) {
        tpc->overflow_running_depth[i] = -1;
        tpc->reserve_stack[i] = tbx_stack_new();
    }

    return(tpc);
}
Esempio n. 3
0
void initialize_apr(void) {

    apr_status_t rv;
    apr_initialize();
    atexit(deinitialize_apr);

    //create mem pool / thread pool /queue.
    rv = apr_pool_create(&mem_pool, NULL);
    check_error(rv);
    rv = apr_thread_pool_create(&thrp, 1, 1, mem_pool);
    check_error(rv);
    rv = apr_queue_create(&queue, QUEUE_SIZE, mem_pool);
    check_error(rv);
}
Esempio n. 4
0
void server_create(server_t **svr, poll_mgr_t *mgr) {
    *svr = (server_t*) malloc(sizeof(server_t));
    server_t *s = *svr;
    rpc_common_create(&s->comm);
    poll_job_create(&s->pjob);
    s->pjob->do_read = &handle_server_accept;
    s->pjob->holder = s;
    s->pjob->mgr = (mgr != NULL) ? mgr: mgr_;

    s->is_start = false;
    mpr_hash_create(&s->ht_conn);

    apr_thread_pool_create(&s->tp, 10, 10, s->comm->mp);
}
Esempio n. 5
0
/* test for threadsafe crypt() */
static void test_threadsafe(abts_case *tc, void *data)
{
    int i;
    apr_status_t rv;
    apr_thread_pool_t *thrp;

    rv = apr_thread_pool_create(&thrp, NUM_THR/2, NUM_THR, p);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);

    for (i = 0; i < NUM_THR; i++) {
        rv = apr_thread_pool_push(thrp, testing_thread, tc, 0, NULL);
        ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);
    }

    apr_thread_pool_destroy(thrp);
}
Esempio n. 6
0
static void test_reslist(abts_case *tc, void *data)
{
    int i;
    apr_status_t rv;
    apr_reslist_t *rl;
    my_parameters_t *params;
    apr_thread_pool_t *thrp;
    my_thread_info_t thread_info[CONSUMER_THREADS];

    rv = apr_thread_pool_create(&thrp, CONSUMER_THREADS/2, CONSUMER_THREADS, p);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);

    /* Create some parameters that will be passed into each
     * constructor and destructor call. */
    params = apr_pcalloc(p, sizeof(*params));
    params->sleep_upon_construct = CONSTRUCT_SLEEP_TIME;
    params->sleep_upon_destruct = DESTRUCT_SLEEP_TIME;

    /* We're going to want 10 blocks of data from our target rmm. */
    rv = apr_reslist_create(&rl, RESLIST_MIN, RESLIST_SMAX, RESLIST_HMAX,
                            RESLIST_TTL, my_constructor, my_destructor,
                            params, p);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);

    for (i = 0; i < CONSUMER_THREADS; i++) {
        thread_info[i].tid = i;
        thread_info[i].tc = tc;
        thread_info[i].reslist = rl;
        thread_info[i].work_delay_sleep = WORK_DELAY_SLEEP_TIME;
        rv = apr_thread_pool_push(thrp, resource_consuming_thread,
                                  &thread_info[i], 0, NULL);
        ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);
    }

    rv = apr_thread_pool_destroy(thrp);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);

    test_timeout(tc, rl);

    test_shrinking(tc, rl);
    ABTS_INT_EQUAL(tc, RESLIST_SMAX, params->c_count - params->d_count);

    rv = apr_reslist_destroy(rl);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, rv);
}
Esempio n. 7
0
void http_server_init()
{
    apr_status_t st;

    evthread_use_pthreads(); //ok if called from other places

    apr_initialize(); //ok if called from other places

    CREATE_POOL(global_pool, NULL);

    st = apr_thread_pool_create(&thread_pool, 20, 1000, global_pool);
    if (st != APR_SUCCESS) {
        LOG4C_FATAL(logger, "error creating pool\n");
        exit(1);
    }

    if (signal(SIGPIPE, SIG_IGN ) == SIG_ERR )
        exit(1);
}
Esempio n. 8
0
void mapcache_prefetch_tiles(mapcache_context *ctx, mapcache_tile **tiles, int ntiles)
{

  apr_thread_t **threads;
  apr_threadattr_t *thread_attrs;
  int nthreads;
#if !APR_HAS_THREADS
  int i;
  for(i=0; i<ntiles; i++) {
    mapcache_tileset_tile_get(ctx, tiles[i]);
    GC_CHECK_ERROR(ctx);
  }
#else
  int i,rv;
  _thread_tile* thread_tiles;
  if(ntiles==1 || ctx->config->threaded_fetching == 0) {
    /* if threads disabled, or only fetching a single tile, don't launch a thread for the operation */
    for(i=0; i<ntiles; i++) {
      mapcache_tileset_tile_get(ctx, tiles[i]);
      GC_CHECK_ERROR(ctx);
    }
    return;
  }


  /* allocate a thread struct for each tile. Not all will be used */
  thread_tiles = (_thread_tile*)apr_pcalloc(ctx->pool,ntiles*sizeof(_thread_tile));
#if 1 || !USE_THREADPOOL
  /* use multiple threads, to fetch from multiple metatiles and/or multiple tilesets */
  apr_threadattr_create(&thread_attrs, ctx->pool);
  threads = (apr_thread_t**)apr_pcalloc(ctx->pool, ntiles*sizeof(apr_thread_t*));
  nthreads = 0;
  for(i=0; i<ntiles; i++) {
    int j;
    thread_tiles[i].tile = tiles[i];
    thread_tiles[i].launch = 1;
    j=i-1;
    /*
     * we only launch one thread per metatile as in the unseeded case the threads
     * for a same metatile will lock while only a single thread launches the actual
     * rendering request
     */
    while(j>=0) {
      /* check that the given metatile hasn't been rendered yet */
      if(thread_tiles[j].launch &&
          (thread_tiles[i].tile->tileset == thread_tiles[j].tile->tileset) &&
          (thread_tiles[i].tile->x / thread_tiles[i].tile->tileset->metasize_x  ==
           thread_tiles[j].tile->x / thread_tiles[j].tile->tileset->metasize_x)&&
          (thread_tiles[i].tile->y / thread_tiles[i].tile->tileset->metasize_y  ==
           thread_tiles[j].tile->y / thread_tiles[j].tile->tileset->metasize_y)) {
        thread_tiles[i].launch = 0; /* this tile will not have a thread spawned for it */
        break;
      }
      j--;
    }
    if(thread_tiles[i].launch)
      thread_tiles[i].ctx = ctx->clone(ctx);
  }
  for(i=0; i<ntiles; i++) {
    if(!thread_tiles[i].launch) continue; /* skip tiles that have been marked */
    rv = apr_thread_create(&threads[i], thread_attrs, _thread_get_tile, (void*)&(thread_tiles[i]), thread_tiles[i].ctx->pool);
    if(rv != APR_SUCCESS) {
      ctx->set_error(ctx,500, "failed to create thread %d of %d\n",i,ntiles);
      break;
    }
    nthreads++;
  }

  /* wait for launched threads to finish */
  for(i=0; i<ntiles; i++) {
    if(!thread_tiles[i].launch) continue;
    apr_thread_join(&rv, threads[i]);
    if(rv != APR_SUCCESS) {
      ctx->set_error(ctx,500, "thread %d of %d failed on exit\n",i,ntiles);
    }
    if(GC_HAS_ERROR(thread_tiles[i].ctx)) {
      /* transfer error message from child thread to main context */
      ctx->set_error(ctx,thread_tiles[i].ctx->get_error(thread_tiles[i].ctx),
                     thread_tiles[i].ctx->get_error_message(thread_tiles[i].ctx));
    }
  }
  for(i=0; i<ntiles; i++) {
    /* fetch the tiles that did not get a thread launched for them */
    if(thread_tiles[i].launch) continue;
    mapcache_tileset_tile_get(ctx, tiles[i]);
    GC_CHECK_ERROR(ctx);
  }
#else
  /* experimental version using a threadpool, disabled for stability reasons */
  apr_thread_pool_t *thread_pool;
  apr_thread_pool_create(&thread_pool,2,ctx->config->download_threads,ctx->pool);
  for(i=0; i<ntiles; i++) {
    ctx->log(ctx,MAPCACHE_DEBUG,"starting thread for tile %s",tiles[i]->tileset->name);
    thread_tiles[i].tile = tiles[i];
    thread_tiles[i].ctx = ctx->clone(ctx);
    rv = apr_thread_pool_push(thread_pool,_thread_get_tile,(void*)&(thread_tiles[i]), 0,NULL);
    if(rv != APR_SUCCESS) {
      ctx->set_error(ctx,500, "failed to push thread %d of %d in thread pool\n",i,ntiles);
      break;
    }
  }
  GC_CHECK_ERROR(ctx);
  while(apr_thread_pool_tasks_run_count(thread_pool) != ntiles || apr_thread_pool_busy_count(thread_pool)>0)
    apr_sleep(10000);
  apr_thread_pool_destroy(thread_pool);
  for(i=0; i<ntiles; i++) {
    if(GC_HAS_ERROR(thread_tiles[i].ctx)) {
      ctx->set_error(ctx,thread_tiles[i].ctx->get_error(thread_tiles[i].ctx),
                     thread_tiles[i].ctx->get_error_message(thread_tiles[i].ctx));
    }
  }
#endif

#endif

}
Esempio n. 9
0
/*
 * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
 * either return an error to be displayed, or set *EXIT_CODE to non-zero and
 * return SVN_NO_ERROR.
 */
static svn_error_t *
sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
{
  enum run_mode run_mode = run_mode_unspecified;
  svn_boolean_t foreground = FALSE;
  apr_socket_t *sock;
  apr_sockaddr_t *sa;
  svn_error_t *err;
  apr_getopt_t *os;
  int opt;
  serve_params_t params;
  const char *arg;
  apr_status_t status;
#ifndef WIN32
  apr_proc_t proc;
#endif
  svn_boolean_t is_multi_threaded;
  enum connection_handling_mode handling_mode = CONNECTION_DEFAULT;
  svn_boolean_t cache_fulltexts = TRUE;
  svn_boolean_t cache_txdeltas = TRUE;
  svn_boolean_t cache_revprops = FALSE;
  svn_boolean_t use_block_read = FALSE;
  apr_uint16_t port = SVN_RA_SVN_PORT;
  const char *host = NULL;
  int family = APR_INET;
  apr_int32_t sockaddr_info_flags = 0;
#if APR_HAVE_IPV6
  svn_boolean_t prefer_v6 = FALSE;
#endif
  svn_boolean_t quiet = FALSE;
  svn_boolean_t is_version = FALSE;
  int mode_opt_count = 0;
  int handling_opt_count = 0;
  const char *config_filename = NULL;
  const char *pid_filename = NULL;
  const char *log_filename = NULL;
  svn_node_kind_t kind;
  apr_size_t min_thread_count = THREADPOOL_MIN_SIZE;
  apr_size_t max_thread_count = THREADPOOL_MAX_SIZE;
#ifdef SVN_HAVE_SASL
  SVN_ERR(cyrus_init(pool));
#endif

  /* Check library versions */
  SVN_ERR(check_lib_versions());

  /* Initialize the FS library. */
  SVN_ERR(svn_fs_initialize(pool));

  SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));

  params.root = "/";
  params.tunnel = FALSE;
  params.tunnel_user = NULL;
  params.read_only = FALSE;
  params.base = NULL;
  params.cfg = NULL;
  params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_DEFAULT;
  params.logger = NULL;
  params.config_pool = NULL;
  params.authz_pool = NULL;
  params.fs_config = NULL;
  params.vhost = FALSE;
  params.username_case = CASE_ASIS;
  params.memory_cache_size = (apr_uint64_t)-1;
  params.zero_copy_limit = 0;
  params.error_check_interval = 4096;

  while (1)
    {
      status = apr_getopt_long(os, svnserve__options, &opt, &arg);
      if (APR_STATUS_IS_EOF(status))
        break;
      if (status != APR_SUCCESS)
        {
          usage(argv[0], pool);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }
      switch (opt)
        {
        case '6':
#if APR_HAVE_IPV6
          prefer_v6 = TRUE;
#endif
          /* ### Maybe error here if we don't have IPV6 support? */
          break;

        case 'h':
          help(pool);
          return SVN_NO_ERROR;

        case 'q':
          quiet = TRUE;
          break;

        case SVNSERVE_OPT_VERSION:
          is_version = TRUE;
          break;

        case 'd':
          if (run_mode != run_mode_daemon)
            {
              run_mode = run_mode_daemon;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_FOREGROUND:
          foreground = TRUE;
          break;

        case SVNSERVE_OPT_SINGLE_CONN:
          handling_mode = connection_mode_single;
          handling_opt_count++;
          break;

        case 'i':
          if (run_mode != run_mode_inetd)
            {
              run_mode = run_mode_inetd;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_LISTEN_PORT:
          {
            apr_uint64_t val;

            err = svn_cstring_strtoui64(&val, arg, 0, APR_UINT16_MAX, 10);
            if (err)
              return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, err,
                                       _("Invalid port '%s'"), arg);
            port = (apr_uint16_t)val;
          }
          break;

        case SVNSERVE_OPT_LISTEN_HOST:
          host = arg;
          break;

        case 't':
          if (run_mode != run_mode_tunnel)
            {
              run_mode = run_mode_tunnel;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_TUNNEL_USER:
          params.tunnel_user = arg;
          break;

        case 'X':
          if (run_mode != run_mode_listen_once)
            {
              run_mode = run_mode_listen_once;
              mode_opt_count++;
            }
          break;

        case 'r':
          SVN_ERR(svn_utf_cstring_to_utf8(&params.root, arg, pool));

          SVN_ERR(svn_io_check_resolved_path(params.root, &kind, pool));
          if (kind != svn_node_dir)
            {
              return svn_error_createf(SVN_ERR_ILLEGAL_TARGET, NULL,
                       _("Root path '%s' does not exist "
                         "or is not a directory"), params.root);
            }

          params.root = svn_dirent_internal_style(params.root, pool);
          SVN_ERR(svn_dirent_get_absolute(&params.root, params.root, pool));
          break;

        case 'R':
          params.read_only = TRUE;
          break;

        case 'T':
          handling_mode = connection_mode_thread;
          handling_opt_count++;
          break;

        case 'c':
          params.compression_level = atoi(arg);
          if (params.compression_level < SVN_DELTA_COMPRESSION_LEVEL_NONE)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_NONE;
          if (params.compression_level > SVN_DELTA_COMPRESSION_LEVEL_MAX)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_MAX;
          break;

        case 'M':
          params.memory_cache_size = 0x100000 * apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_CACHE_TXDELTAS:
          cache_txdeltas = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_FULLTEXTS:
          cache_fulltexts = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_REVPROPS:
          cache_revprops = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_BLOCK_READ:
          use_block_read = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CLIENT_SPEED:
          {
            apr_size_t bandwidth = (apr_size_t)apr_strtoi64(arg, NULL, 0);

            /* for slower clients, don't try anything fancy */
            if (bandwidth >= 1000)
              {
                /* block other clients for at most 1 ms (at full bandwidth).
                   Note that the send buffer is 16kB anyways. */
                params.zero_copy_limit = bandwidth * 120;

                /* check for aborted connections at the same rate */
                params.error_check_interval = bandwidth * 120;
              }
          }
          break;

        case SVNSERVE_OPT_MIN_THREADS:
          min_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_MAX_THREADS:
          max_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

#ifdef WIN32
        case SVNSERVE_OPT_SERVICE:
          if (run_mode != run_mode_service)
            {
              run_mode = run_mode_service;
              mode_opt_count++;
            }
          break;
#endif

        case SVNSERVE_OPT_CONFIG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&config_filename, arg, pool));
          config_filename = svn_dirent_internal_style(config_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&config_filename, config_filename,
                                          pool));
          break;

        case SVNSERVE_OPT_PID_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&pid_filename, arg, pool));
          pid_filename = svn_dirent_internal_style(pid_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&pid_filename, pid_filename, pool));
          break;

         case SVNSERVE_OPT_VIRTUAL_HOST:
           params.vhost = TRUE;
           break;

         case SVNSERVE_OPT_LOG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&log_filename, arg, pool));
          log_filename = svn_dirent_internal_style(log_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&log_filename, log_filename, pool));
          break;

        }
    }

  if (is_version)
    {
      SVN_ERR(version(quiet, pool));
      return SVN_NO_ERROR;
    }

  if (os->ind != argc)
    {
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (mode_opt_count != 1)
    {
      svn_error_clear(svn_cmdline_fputs(
#ifdef WIN32
                      _("You must specify exactly one of -d, -i, -t, "
                        "--service or -X.\n"),
#else
                      _("You must specify exactly one of -d, -i, -t or -X.\n"),
#endif
                       stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (handling_opt_count > 1)
    {
      svn_error_clear(svn_cmdline_fputs(
                      _("You may only specify one of -T or --single-thread\n"),
                      stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  /* construct object pools */
  is_multi_threaded = handling_mode == connection_mode_thread;
  params.fs_config = apr_hash_make(pool);
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_DELTAS,
                cache_txdeltas ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_FULLTEXTS,
                cache_fulltexts ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_REVPROPS,
                cache_revprops ? "2" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_BLOCK_READ,
                use_block_read ? "1" :"0");

  SVN_ERR(svn_repos__config_pool_create(&params.config_pool,
                                        is_multi_threaded,
                                        pool));
  SVN_ERR(svn_repos__authz_pool_create(&params.authz_pool,
                                       params.config_pool,
                                       is_multi_threaded,
                                       pool));

  /* If a configuration file is specified, load it and any referenced
   * password and authorization files. */
  if (config_filename)
    {
      params.base = svn_dirent_dirname(config_filename, pool);

      SVN_ERR(svn_repos__config_pool_get(&params.cfg, NULL,
                                         params.config_pool,
                                         config_filename,
                                         TRUE, /* must_exist */
                                         FALSE, /* names_case_sensitive */
                                         NULL,
                                         pool));
    }

  if (log_filename)
    SVN_ERR(logger__create(&params.logger, log_filename, pool));
  else if (run_mode == run_mode_listen_once)
    SVN_ERR(logger__create_for_stderr(&params.logger, pool));

  if (params.tunnel_user && run_mode != run_mode_tunnel)
    {
      return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
               _("Option --tunnel-user is only valid in tunnel mode"));
    }

  if (run_mode == run_mode_inetd || run_mode == run_mode_tunnel)
    {
      apr_pool_t *connection_pool;
      svn_ra_svn_conn_t *conn;
      svn_stream_t *stdin_stream;
      svn_stream_t *stdout_stream;

      params.tunnel = (run_mode == run_mode_tunnel);
      apr_pool_cleanup_register(pool, pool, apr_pool_cleanup_null,
                                redirect_stdout);

      SVN_ERR(svn_stream_for_stdin(&stdin_stream, pool));
      SVN_ERR(svn_stream_for_stdout(&stdout_stream, pool));

      /* Use a subpool for the connection to ensure that if SASL is used
       * the pool cleanup handlers that call sasl_dispose() (connection_pool)
       * and sasl_done() (pool) are run in the right order. See issue #3664. */
      connection_pool = svn_pool_create(pool);
      conn = svn_ra_svn_create_conn4(NULL, stdin_stream, stdout_stream,
                                     params.compression_level,
                                     params.zero_copy_limit,
                                     params.error_check_interval,
                                     connection_pool);
      err = serve(conn, &params, connection_pool);
      svn_pool_destroy(connection_pool);

      return err;
    }

#ifdef WIN32
  /* If svnserve needs to run as a Win32 service, then we need to
     coordinate with the Service Control Manager (SCM) before
     continuing.  This function call registers the svnserve.exe
     process with the SCM, waits for the "start" command from the SCM
     (which will come very quickly), and confirms that those steps
     succeeded.

     After this call succeeds, the service is free to run.  At some
     point in the future, the SCM will send a message to the service,
     requesting that it stop.  This is translated into a call to
     winservice_notify_stop().  The service is then responsible for
     cleanly terminating.

     We need to do this before actually starting the service logic
     (opening files, sockets, etc.) because the SCM wants you to
     connect *first*, then do your service-specific logic.  If the
     service process takes too long to connect to the SCM, then the
     SCM will decide that the service is busted, and will give up on
     it.
     */
  if (run_mode == run_mode_service)
    {
      err = winservice_start();
      if (err)
        {
          svn_handle_error2(err, stderr, FALSE, "svnserve: ");

          /* This is the most common error.  It means the user started
             svnserve from a shell, and specified the --service
             argument.  svnserve cannot be started, as a service, in
             this way.  The --service argument is valid only valid if
             svnserve is started by the SCM. */
          if (err->apr_err ==
              APR_FROM_OS_ERROR(ERROR_FAILED_SERVICE_CONTROLLER_CONNECT))
            {
              svn_error_clear(svn_cmdline_fprintf(stderr, pool,
                  _("svnserve: The --service flag is only valid if the"
                    " process is started by the Service Control Manager.\n")));
            }

          svn_error_clear(err);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }

      /* The service is now in the "starting" state.  Before the SCM will
         consider the service "started", this thread must call the
         winservice_running() function. */
    }
#endif /* WIN32 */

  /* Make sure we have IPV6 support first before giving apr_sockaddr_info_get
     APR_UNSPEC, because it may give us back an IPV6 address even if we can't
     create IPV6 sockets. */

#if APR_HAVE_IPV6
#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status == 0)
    {
      apr_socket_close(sock);
      family = APR_UNSPEC;

      if (prefer_v6)
        {
          if (host == NULL)
            host = "::";
          sockaddr_info_flags = APR_IPV6_ADDR_OK;
        }
      else
        {
          if (host == NULL)
            host = "0.0.0.0";
          sockaddr_info_flags = APR_IPV4_ADDR_OK;
        }
    }
#endif

  status = apr_sockaddr_info_get(&sa, host, family, port,
                                 sockaddr_info_flags, pool);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't get address info"));
    }


#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't create server socket"));
    }

  /* Prevents "socket in use" errors when server is killed and quickly
   * restarted. */
  status = apr_socket_opt_set(sock, APR_SO_REUSEADDR, 1);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't set options on server socket"));
    }

  status = apr_socket_bind(sock, sa);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't bind server socket"));
    }

  status = apr_socket_listen(sock, ACCEPT_BACKLOG);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't listen on server socket"));
    }

#if APR_HAS_FORK
  if (run_mode != run_mode_listen_once && !foreground)
    /* ### ignoring errors... */
    apr_proc_detach(APR_PROC_DETACH_DAEMONIZE);

  apr_signal(SIGCHLD, sigchld_handler);
#endif

#ifdef SIGPIPE
  /* Disable SIGPIPE generation for the platforms that have it. */
  apr_signal(SIGPIPE, SIG_IGN);
#endif

#ifdef SIGXFSZ
  /* Disable SIGXFSZ generation for the platforms that have it, otherwise
   * working with large files when compiled against an APR that doesn't have
   * large file support will crash the program, which is uncool. */
  apr_signal(SIGXFSZ, SIG_IGN);
#endif

  if (pid_filename)
    SVN_ERR(write_pid_file(pid_filename, pool));

#ifdef WIN32
  status = apr_os_sock_get(&winservice_svnserve_accept_socket, sock);
  if (status)
    winservice_svnserve_accept_socket = INVALID_SOCKET;

  /* At this point, the service is "running".  Notify the SCM. */
  if (run_mode == run_mode_service)
    winservice_running();
#endif

  /* Configure FS caches for maximum efficiency with svnserve.
   * For pre-forked (i.e. multi-processed) mode of operation,
   * keep the per-process caches smaller than the default.
   * Also, apply the respective command line parameters, if given. */
  {
    svn_cache_config_t settings = *svn_cache_config_get();

    if (params.memory_cache_size != -1)
      settings.cache_size = params.memory_cache_size;

    settings.single_threaded = TRUE;
    if (handling_mode == connection_mode_thread)
      {
#if APR_HAS_THREADS
        settings.single_threaded = FALSE;
#else
        /* No requests will be processed at all
         * (see "switch (handling_mode)" code further down).
         * But if they were, some other synchronization code
         * would need to take care of securing integrity of
         * APR-based structures. That would include our caches.
         */
#endif
      }

    svn_cache_config_set(&settings);
  }

#if APR_HAS_THREADS
  SVN_ERR(svn_root_pools__create(&connection_pools));

  if (handling_mode == connection_mode_thread)
    {
      /* create the thread pool with a valid range of threads */
      if (max_thread_count < 1)
        max_thread_count = 1;
      if (min_thread_count > max_thread_count)
        min_thread_count = max_thread_count;

      status = apr_thread_pool_create(&threads,
                                      min_thread_count,
                                      max_thread_count,
                                      pool);
      if (status)
        {
          return svn_error_wrap_apr(status, _("Can't create thread pool"));
        }

      /* let idle threads linger for a while in case more requests are
         coming in */
      apr_thread_pool_idle_wait_set(threads, THREADPOOL_THREAD_IDLE_LIMIT);

      /* don't queue requests unless we reached the worker thread limit */
      apr_thread_pool_threshold_set(threads, 0);
    }
  else
    {
      threads = NULL;
    }
#endif

  while (1)
    {
      connection_t *connection = NULL;
      SVN_ERR(accept_connection(&connection, sock, &params, handling_mode,
                                pool));
      if (run_mode == run_mode_listen_once)
        {
          err = serve_socket(connection, connection->pool);
          close_connection(connection);
          return err;
        }

      switch (handling_mode)
        {
        case connection_mode_fork:
#if APR_HAS_FORK
          status = apr_proc_fork(&proc, connection->pool);
          if (status == APR_INCHILD)
            {
              /* the child would't listen to the main server's socket */
              apr_socket_close(sock);

              /* serve_socket() logs any error it returns, so ignore it. */
              svn_error_clear(serve_socket(connection, connection->pool));
              close_connection(connection);
              return SVN_NO_ERROR;
            }
          else if (status != APR_INPARENT)
            {
              err = svn_error_wrap_apr(status, "apr_proc_fork");
              logger__log_error(params.logger, err, NULL, NULL);
              svn_error_clear(err);
            }
#endif
          break;

        case connection_mode_thread:
          /* Create a detached thread for each connection.  That's not a
             particularly sophisticated strategy for a threaded server, it's
             little different from forking one process per connection. */
#if APR_HAS_THREADS
          attach_connection(connection);

          status = apr_thread_pool_push(threads, serve_thread, connection,
                                        0, NULL);
          if (status)
            {
              return svn_error_wrap_apr(status, _("Can't push task"));
            }
#endif
          break;

        case connection_mode_single:
          /* Serve one connection at a time. */
          /* serve_socket() logs any error it returns, so ignore it. */
          svn_error_clear(serve_socket(connection, connection->pool));
        }

      close_connection(connection);
    }

  /* NOTREACHED */
}