void pocketvox_controller_start(PocketvoxController *controller)
{
	GList* modules = NULL;
    gint i, n_threads;
    GThreadPool *thread_pool = NULL;

    g_return_if_fail(NULL != controller);

	controller->priv = G_TYPE_INSTANCE_GET_PRIVATE (controller,
			TYPE_POCKETVOX_CONTROLLER, PocketvoxControllerPrivate);
	PocketvoxControllerPrivate *priv = controller->priv;

    modules   = g_hash_table_get_values(priv->modules);

    //create a GThreadPool to make dictionnaries loading smoother
    n_threads   = g_get_num_processors();
    thread_pool = g_thread_pool_new((GFunc)pocketvox_module_build_dictionnary, NULL, n_threads, TRUE, NULL);

    for(i = 0; i < g_list_length(modules); i++)
    {
        g_thread_pool_push(thread_pool, (PocketvoxModule *)g_list_nth_data(modules, i), NULL);
        //pocketvox_module_build_dictionnary((PocketvoxModule *)g_list_nth_data(modules, i));
    }
    g_thread_pool_free(thread_pool, FALSE, TRUE);
    g_list_free(modules);

	priv->loop = g_main_loop_new(NULL, FALSE);
	g_main_loop_run(priv->loop);
}
static gboolean
gst_transcoder_adjust_wait_time (GstClock * sync_clock, GstClockTime time,
    GstClockID id, GstCpuThrottlingClock * self)
{
  struct rusage ru;
  float delta_usage, usage, coef;

  GstCpuThrottlingClockPrivate *priv = self->priv;

  getrusage (RUSAGE_SELF, &ru);
  delta_usage = GST_TIMEVAL_TO_TIME (ru.ru_utime) -
      GST_TIMEVAL_TO_TIME (self->priv->last_usage.ru_utime);
  usage =
      ((float) delta_usage / self->priv->time_between_evals * 100) /
      g_get_num_processors ();

  self->priv->last_usage = ru;

  coef = GST_MSECOND / 10;
  if (usage < (gfloat) priv->wanted_cpu_usage) {
    coef = -coef;
  }

  priv->current_wait_time = CLAMP (0,
      (GstClockTime) priv->current_wait_time + coef, GST_SECOND);

  GST_DEBUG_OBJECT (self,
      "Avg is %f (wanted %d) => %" GST_TIME_FORMAT, usage,
      self->priv->wanted_cpu_usage, GST_TIME_ARGS (priv->current_wait_time));

  return TRUE;
}
示例#3
0
static void
test_xattr_races (void)
{
  /* If for some reason we're built in a VM which only has one vcpu, let's still
   * at least make the test do something.
   */
  /* FIXME - this deadlocks for me on 4.9.4-201.fc25.x86_64, whether
   * using overlayfs or xfs as source/dest.
   */
  const guint nprocs = MAX (4, g_get_num_processors ());
  struct XattrWorker wdata[nprocs];
  GThread *threads[nprocs];
  g_autoptr(GError) local_error = NULL;
  GError **error = &local_error;
  g_auto(GLnxTmpDir) tmpdir = { 0, };
  g_autofree char *tmpdir_path = g_strdup_printf ("%s/libglnx-xattrs-XXXXXX",
                                                  getenv ("TMPDIR") ?: "/var/tmp");
  guint nread = 0;

  if (!glnx_mkdtempat (AT_FDCWD, tmpdir_path, 0700,
                       &tmpdir, error))
    goto out;

  /* Support people building/testing on tmpfs https://github.com/flatpak/flatpak/issues/686 */
  if (fsetxattr (tmpdir.fd, "user.test", "novalue", strlen ("novalue"), 0) < 0)
    {
      if (errno == EOPNOTSUPP)
        {
          g_test_skip ("no xattr support");
          return;
        }
      else
        {
          glnx_set_error_from_errno (error);
          goto out;
        }
    }

  for (guint i = 0; i < nprocs; i++)
    {
      struct XattrWorker *worker = &wdata[i];
      worker->dfd = tmpdir.fd;
      worker->is_writer = i % 2 == 0;
      threads[i] = g_thread_new (NULL, xattr_thread, worker);
    }

  for (guint i = 0; i < nprocs; i++)
    {
      if (wdata[i].is_writer)
        (void) g_thread_join (threads[i]);
      else
        nread += GPOINTER_TO_UINT (g_thread_join (threads[i]));
    }

  g_print ("Read %u xattrs race free!\n", nread);

 out:
  g_assert_no_error (local_error);
}
示例#4
0
static void
sp_hostinfo_source_prepare (SpSource *source)
{
  SpHostinfoSource *self = (SpHostinfoSource *)source;
  SpCaptureCounter *counters;

  g_assert (SP_IS_HOSTINFO_SOURCE (self));

  self->stat_fd = open ("/proc/stat", O_RDONLY);
  self->n_cpu = g_get_num_processors ();

  g_array_set_size (self->cpu_info, 0);

  counters = alloca (sizeof *counters * self->n_cpu * 2);

  for (guint i = 0; i < self->n_cpu; i++)
    {
      SpCaptureCounter *ctr = &counters[i*2];
      CpuInfo info = { 0 };

      /*
       * Request 2 counter values.
       * One for CPU and one for Frequency.
       */
      info.counter_base = sp_capture_writer_request_counter (self->writer, 2);

      /*
       * Define counters for capture file.
       */
      ctr->id = info.counter_base;
      ctr->type = SP_CAPTURE_COUNTER_DOUBLE;
      ctr->value.vdbl = 0;
      g_strlcpy (ctr->category, "CPU Percent", sizeof ctr->category);
      g_snprintf (ctr->name, sizeof ctr->name, "Total CPU %d", i);
      g_snprintf (ctr->description, sizeof ctr->description,
                  "Total CPU usage %d", i);

      ctr++;

      ctr->id = info.counter_base + 1;
      ctr->type = SP_CAPTURE_COUNTER_DOUBLE;
      ctr->value.vdbl = 0;
      g_strlcpy (ctr->category, "CPU Frequency", sizeof ctr->category);
      g_snprintf (ctr->name, sizeof ctr->name, "CPU %d", i);
      g_snprintf (ctr->description, sizeof ctr->description,
                  "Frequency of CPU %d", i);

      g_array_append_val (self->cpu_info, info);
    }

  sp_capture_writer_define_counters (self->writer,
                                     SP_CAPTURE_CURRENT_TIME,
                                     -1,
                                     getpid (),
                                     counters,
                                     self->n_cpu * 2);

  sp_source_emit_ready (SP_SOURCE (self));
}
static void
gt_resource_downloader_class_init(GtResourceDownloaderClass* klass)
{
    G_OBJECT_CLASS(klass)->finalize = finalize;
    G_OBJECT_CLASS(klass)->dispose = dispose;

    dl_pool = g_thread_pool_new((GFunc) download_cb, NULL, g_get_num_processors(), FALSE, NULL);
}
示例#6
0
void
egg_counter_reset (EggCounter *counter)
{
  guint ncpu;
  guint i;

  g_return_if_fail (counter);

  ncpu = g_get_num_processors ();

  for (i = 0; i < ncpu; i++)
    counter->values [i].value = 0;

  EGG_MEMORY_BARRIER;
}
示例#7
0
文件: grustna.c 项目: sp3d/grust
static gpointer
create_call_minder_pool ()
{
  g_message ("Taking an inefficient, lock-prone call path"
             " -- consider against migrating object references between tasks");

  return g_thread_pool_new (call_minder, NULL,
#if GLIB_CHECK_VERSION(2, 36, 0)
                            g_get_num_processors (),
#else
                            12,
#endif
                            FALSE,
                            NULL);
}
示例#8
0
gint64
egg_counter_get (EggCounter *counter)
{
  gint64 value = 0;
  guint ncpu;
  guint i;

  g_return_val_if_fail (counter, G_GINT64_CONSTANT (-1));

  ncpu = g_get_num_processors ();

  EGG_MEMORY_BARRIER;

  for (i = 0; i < ncpu; i++)
    value += counter->values [i].value;

  return value;
}
示例#9
0
文件: util.c 项目: LebedevRI/viking
guint util_get_number_of_cpus ()
{
#if GLIB_CHECK_VERSION (2, 36, 0)
  return g_get_num_processors();
#else
  long nprocs = 1;
#ifdef WINDOWS
  SYSTEM_INFO info;
  GetSystemInfo(&info);
  nprocs = info.dwNumberOfProcessors;
#else
#ifdef _SC_NPROCESSORS_ONLN
  nprocs = sysconf(_SC_NPROCESSORS_ONLN);
  if (nprocs < 1)
    nprocs = 1;
#endif
#endif
  return nprocs;
#endif
}
示例#10
0
static gboolean
gst_libde265_dec_start (GstVideoDecoder * decoder)
{
  GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder);
  int threads = dec->max_threads;
  struct de265_image_allocation allocation;

  _gst_libde265_dec_free_decoder (dec);
  dec->ctx = de265_new_decoder ();
  if (dec->ctx == NULL) {
    return FALSE;
  }
  if (threads == 0) {
    threads = g_get_num_processors ();

    /* NOTE: We start more threads than cores for now, as some threads
     * might get blocked while waiting for dependent data. Having more
     * threads increases decoding speed by about 10% */
    threads *= 2;
  }
  if (threads > 1) {
    if (threads > 32) {
      /* TODO: this limit should come from the libde265 headers */
      threads = 32;
    }
    de265_start_worker_threads (dec->ctx, threads);
  }
  GST_INFO_OBJECT (dec, "Using libde265 %s with %d worker threads",
      de265_get_version (), threads);

  allocation.get_buffer = gst_libde265_dec_get_buffer;
  allocation.release_buffer = gst_libde265_dec_release_buffer;
  de265_set_image_allocation_functions (dec->ctx, &allocation, decoder);
  /* NOTE: we explicitly disable hash checks for now */
  de265_set_parameter_bool (dec->ctx, DE265_DECODER_PARAM_BOOL_SEI_CHECK_HASH,
      0);
  return TRUE;
}
void pocketvox_controller_on_request(PocketvoxController *controller, gpointer hyp, gpointer user_data)
{
    GList *modules = NULL;
    gchar *request = (gchar *)hyp;
    gint i = 0, j = 0, n_threads;
    gdouble mindist = -1.0f, dist;
    GThreadPool *thread_pool = NULL;
    gboolean first_module = FALSE;

	g_return_if_fail(NULL != controller);
	g_return_if_fail(NULL != hyp);

	controller->priv = G_TYPE_INSTANCE_GET_PRIVATE (controller,
			TYPE_POCKETVOX_CONTROLLER, PocketvoxControllerPrivate);
	PocketvoxControllerPrivate *priv = controller->priv;

	gchar* window = pocketvox_xmanager_get_window(priv->xmanager);

    g_warning("WINDOW: %s", window);

	//put modules apps to activated
	g_hash_table_foreach(priv->modules, pocketvox_module_manage_apps, window);

	//make request
	//g_hash_table_foreach(priv->modules, pocketvox_module_make_request, request);

	modules     = g_hash_table_get_values(priv->modules);

    n_threads   = g_get_num_processors();
    thread_pool = g_thread_pool_new((GFunc)pocketvox_module_threaded_request, request, n_threads, TRUE, NULL );

    for(i = 0; i < g_list_length(modules); i++)
    {
        g_thread_pool_push(thread_pool, (PocketvoxModule *)g_list_nth_data(modules,i), NULL);
    }
    g_thread_pool_free(thread_pool, FALSE, TRUE);

    for(i = 0; i< g_list_length(modules); i++)
	{
		PocketvoxModule *module = g_list_nth_data(modules, i);

		dist = pocketvox_module_get_score(module);

        g_warning("%d %s %d %d %s %.5f",
                  i,
                  pocketvox_module_get_id(module),
                  pocketvox_module_is_apps(module),
                  pocketvox_module_get_activated(module),
                  pocketvox_module_get_command(module),
                  pocketvox_module_get_score(module));

		if(pocketvox_module_get_activated(module) == TRUE && (dist < mindist || first_module==FALSE ))
		{
			mindist = dist;
			j = i;

            first_module = TRUE;
		}
	}

	PocketvoxModule *m = g_list_nth_data(modules, j);

	pocketvox_module_execute(m);

	g_list_free(modules);
}
示例#12
0
static void
gimp_gegl_config_class_init (GimpGeglConfigClass *klass)
{
  GObjectClass *object_class = G_OBJECT_CLASS (klass);
  gint          num_processors;
  guint64       memory_size;

  parent_class = g_type_class_peek_parent (klass);

  object_class->finalize     = gimp_gegl_config_finalize;
  object_class->set_property = gimp_gegl_config_set_property;
  object_class->get_property = gimp_gegl_config_get_property;

  GIMP_CONFIG_PROP_PATH (object_class, PROP_TEMP_PATH,
                         "temp-path",
                         "Temp path",
                         TEMP_PATH_BLURB,
                         GIMP_CONFIG_PATH_DIR,
                         "${gimp_dir}" G_DIR_SEPARATOR_S "tmp",
                         GIMP_PARAM_STATIC_STRINGS |
                         GIMP_CONFIG_PARAM_RESTART);

  GIMP_CONFIG_PROP_PATH (object_class, PROP_SWAP_PATH,
                         "swap-path",
                         "Swap path",
                         SWAP_PATH_BLURB,
                         GIMP_CONFIG_PATH_DIR,
                         "${gimp_dir}",
                         GIMP_PARAM_STATIC_STRINGS |
                         GIMP_CONFIG_PARAM_RESTART);

  num_processors = g_get_num_processors ();

#ifdef GIMP_UNSTABLE
  num_processors = num_processors * 2;
#endif

  num_processors = MIN (num_processors, GIMP_MAX_NUM_THREADS);

  GIMP_CONFIG_PROP_UINT (object_class, PROP_NUM_PROCESSORS,
                         "num-processors",
                         "Number of processors to use",
                         NUM_PROCESSORS_BLURB,
                         1, GIMP_MAX_NUM_THREADS, num_processors,
                         GIMP_PARAM_STATIC_STRINGS);

  memory_size = gimp_get_physical_memory_size ();

  /* limit to the amount one process can handle */
  memory_size = MIN (GIMP_MAX_MEM_PROCESS, memory_size);

  if (memory_size > 0)
    memory_size = memory_size / 2; /* half the memory */
  else
    memory_size = 1 << 30; /* 1GB */

  GIMP_CONFIG_PROP_MEMSIZE (object_class, PROP_TILE_CACHE_SIZE,
                            "tile-cache-size",
                            "Tile cach size",
                            TILE_CACHE_SIZE_BLURB,
                            0, GIMP_MAX_MEM_PROCESS,
                            memory_size,
                            GIMP_PARAM_STATIC_STRINGS |
                            GIMP_CONFIG_PARAM_CONFIRM);

  GIMP_CONFIG_PROP_BOOLEAN (object_class, PROP_USE_OPENCL,
                            "use-opencl",
                            "Use OpenCL",
                            USE_OPENCL_BLURB,
                            TRUE,
                            GIMP_PARAM_STATIC_STRINGS);

  /*  only for backward compatibility:  */
  GIMP_CONFIG_PROP_BOOLEAN (object_class, PROP_STINGY_MEMORY_USE,
                            "stingy-memory-use",
                            NULL, NULL,
                            FALSE,
                            GIMP_CONFIG_PARAM_IGNORE);
}
示例#13
0
void
egg_counter_arena_register (EggCounterArena *arena,
                            EggCounter      *counter)
{
  CounterInfo *info;
  guint group;
  guint ncpu;
  guint position;
  guint group_start_cell;

  g_return_if_fail (arena != NULL);
  g_return_if_fail (counter != NULL);

  if (!arena->is_local_arena)
    {
      g_warning ("Cannot add counters to a remote arena.");
      return;
    }

  ncpu = g_get_num_processors ();

  G_LOCK (reglock);

  /*
   * Get the counter group and position within the group of the counter.
   */
  group = arena->n_counters / COUNTERS_PER_GROUP;
  position = arena->n_counters % COUNTERS_PER_GROUP;

  /*
   * Get the starting cell for this group. Cells roughly map to cachelines.
   */
  group_start_cell = CELLS_PER_HEADER + (CELLS_PER_GROUP (ncpu) * group);
  info = &((CounterInfo *)&arena->cells [group_start_cell])[position];

  g_assert (position < COUNTERS_PER_GROUP);
  g_assert (group_start_cell < arena->n_cells);

  /*
   * Store information about the counter in the SHM area. Also, update
   * the counter values pointer to map to the right cell in the SHM zone.
   */
  info->cell = group_start_cell + (COUNTERS_PER_GROUP * CELLS_PER_INFO);
  info->position = position;
  g_snprintf (info->category, sizeof info->category, "%s", counter->category);
  g_snprintf (info->description, sizeof info->description, "%s", counter->description);
  g_snprintf (info->name, sizeof info->name, "%s", counter->name);
  counter->values = (EggCounterValue *)&arena->cells [info->cell].values[info->position];

#if 0
  g_print ("Counter registered: cell=%u position=%u category=%s name=%s\n",
           info->cell, info->position, info->category, info->name);
#endif

  /*
   * Track the counter address, so we can _foreach() them.
   */
  arena->counters = g_list_append (arena->counters, counter);
  arena->n_counters++;

  /*
   * Now notify remote processes of the counter.
   */
  EGG_MEMORY_BARRIER;
  ((ShmHeader *)&arena->cells[0])->n_counters++;

  G_UNLOCK (reglock);
}
示例#14
0
static gboolean
_egg_counter_arena_init_remote (EggCounterArena *arena,
                                GPid             pid)
{
  ShmHeader header;
  gssize count;
  gchar name [32];
  void *mem = NULL;
  guint ncpu;
  guint n_counters;
  int i;
  int fd = -1;

  g_assert (arena != NULL);

  ncpu = g_get_num_processors ();

  arena->ref_count = 1;
  arena->pid = pid;

  g_snprintf (name, sizeof name, NAME_FORMAT, (int)pid);

  fd = shm_open (name, O_RDONLY, 0);
  if (fd < 0)
    return FALSE;

  count = pread (fd, &header, sizeof header, 0);

  if ((count != sizeof header) ||
      (header.magic != MAGIC) ||
      (header.size > COUNTER_MAX_SHM) ||
      (header.ncpu > g_get_num_processors ()))
    goto failure;

  n_counters = header.n_counters;

  if (header.size <
      CELLS_PER_HEADER + (((n_counters / COUNTERS_PER_GROUP) + 1) * CELLS_PER_GROUP(header.ncpu)))
    goto failure;

  mem = mmap (NULL, header.size, PROT_READ, MAP_SHARED, fd, 0);

  if (mem == MAP_FAILED)
    goto failure;

  arena->is_local_arena = FALSE;
  arena->data_is_mmapped = TRUE;
  arena->cells = mem;
  arena->n_cells = header.size / DATA_CELL_SIZE;
  arena->data_length = header.size;
  arena->counters = NULL;

  /* Not strictly required, but helpful for now */
  if (header.first_offset != CELLS_PER_HEADER)
    goto failure;

  for (i = 0; i < n_counters; i++)
    {
      CounterInfo *info;
      EggCounter *counter;
      guint group_start_cell;
      guint group;
      guint position;

      group = i / COUNTERS_PER_GROUP;
      position = i % COUNTERS_PER_GROUP;
      group_start_cell = header.first_offset + (CELLS_PER_GROUP (ncpu) * group);

      if (group_start_cell + CELLS_PER_GROUP (ncpu) >= arena->n_cells)
        goto failure;

      info = &(((CounterInfo *)&arena->cells[group_start_cell])[position]);

      counter = g_new0 (EggCounter, 1);
      counter->category = g_strndup (info->category, sizeof info->category);
      counter->name = g_strndup (info->name, sizeof info->name);
      counter->description = g_strndup (info->description, sizeof info->description);
      counter->values = (EggCounterValue *)&arena->cells [info->cell].values[info->position];

#if 0
      g_print ("Counter discovered: cell=%u position=%u category=%s name=%s values=%p offset=%lu\n",
               info->cell, info->position, info->category, info->name, counter->values,
               (guint8*)counter->values - (guint8*)mem);
#endif

      arena->counters = g_list_prepend (arena->counters, counter);
    }

  close (fd);

  return TRUE;

failure:
  close (fd);

  if ((mem != NULL) && (mem != MAP_FAILED))
    munmap (mem, header.size);

  return FALSE;
}
示例#15
0
static void
_egg_counter_arena_init_local (EggCounterArena *arena)
{
  ShmHeader *header;
  gpointer mem;
  unsigned pid;
  gsize size;
  gint page_size;
  gint fd;
  gchar name [32];

  page_size = sysconf (_SC_PAGE_SIZE);

  /* Implausible, but squashes warnings. */
  if (page_size < 4096)
    {
      page_size = 4096;
      size = page_size * 4;
      goto use_malloc;
    }

  /*
   * FIXME: https://bugzilla.gnome.org/show_bug.cgi?id=749280
   *
   * We have some very tricky work ahead of us to add unlimited numbers
   * of counters at runtime. We basically need to avoid placing counters
   * that could overlap a page.
   */
  size = page_size * 4;

  arena->ref_count = 1;
  arena->is_local_arena = TRUE;

  if (getenv ("EGG_COUNTER_DISABLE_SHM"))
    goto use_malloc;

  pid = getpid ();
  g_snprintf (name, sizeof name, NAME_FORMAT, pid);

  if (-1 == (fd = shm_open (name, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP)))
    goto use_malloc;

  /*
   * ftruncate() will cause reads to be zero. Therefore, we don't need to
   * do write() of zeroes to initialize the shared memory area.
   */
  if (-1 == ftruncate (fd, size))
    goto failure;

  /*
   * Memory map the shared memory segement so that we can store our counters
   * within it. We need to layout the counters into the segment so that other
   * processes can traverse and read the values by loading the shared page.
   */
  mem = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
  if (mem == MAP_FAILED)
    goto failure;

  close (fd);
  atexit (_egg_counter_arena_atexit);

  arena->data_is_mmapped = TRUE;
  arena->cells = mem;
  arena->n_cells = (size / DATA_CELL_SIZE);
  arena->data_length = size;

  header = mem;
  header->magic = MAGIC;
  header->ncpu = g_get_num_processors ();
  header->first_offset = CELLS_PER_HEADER;

  EGG_MEMORY_BARRIER;

  header->size = (guint32)arena->data_length;

  return;

failure:
  shm_unlink (name);
  close (fd);

use_malloc:
  g_warning ("Failed to allocate shared memory for counters. "
             "Counters will not be available to external processes.");

  arena->data_is_mmapped = FALSE;
  arena->cells = g_malloc0 (size << 1);
  arena->n_cells = (size / DATA_CELL_SIZE);
  arena->data_length = size;

  /*
   * Make sure that we have a properly aligned allocation back from
   * malloc. Since we are at least a page size, we should pretty much
   * be guaranteed this, but better to check with posix_memalign().
   */
  if (posix_memalign ((void *)&arena->cells, page_size, size << 1) != 0)
    {
      perror ("posix_memalign()");
      abort ();
    }

  header = (void *)arena->cells;
  header->magic = MAGIC;
  header->ncpu = g_get_num_processors ();
  header->first_offset = CELLS_PER_HEADER;

  EGG_MEMORY_BARRIER;

  header->size = (guint32)arena->data_length;
}
gboolean
flatpak_builtin_build_update_repo (int argc, char **argv,
                                   GCancellable *cancellable, GError **error)
{
  g_autoptr(GOptionContext) context = NULL;
  g_autoptr(GFile) repofile = NULL;
  g_autoptr(OstreeRepo) repo = NULL;
  const char *location;
  g_autoptr(GPtrArray) unwanted_deltas = NULL;

  context = g_option_context_new (_("LOCATION - Update repository metadata"));
  g_option_context_set_translation_domain (context, GETTEXT_PACKAGE);

  if (!flatpak_option_context_parse (context, options, &argc, &argv, FLATPAK_BUILTIN_FLAG_NO_DIR, NULL, cancellable, error))
    return FALSE;

  if (argc < 2)
    return usage_error (context, _("LOCATION must be specified"), error);

  if (opt_static_delta_jobs <= 0)
    opt_static_delta_jobs = g_get_num_processors ();

  location = argv[1];

  repofile = g_file_new_for_commandline_arg (location);
  repo = ostree_repo_new (repofile);

  if (!ostree_repo_open (repo, cancellable, error))
    return FALSE;

  if (opt_generate_delta_to)
    {
      if (!generate_one_delta (repo, opt_generate_delta_from, opt_generate_delta_to, opt_generate_delta_ref, cancellable, error))
        return FALSE;
      return TRUE;
    }

  if (opt_title &&
      !flatpak_repo_set_title (repo, opt_title[0] ? opt_title : NULL, error))
    return FALSE;

  if (opt_comment &&
      !flatpak_repo_set_comment (repo, opt_comment[0] ? opt_comment : NULL, error))
    return FALSE;

  if (opt_description &&
      !flatpak_repo_set_description (repo, opt_description[0] ? opt_description : NULL, error))
    return FALSE;

  if (opt_homepage &&
      !flatpak_repo_set_homepage (repo, opt_homepage[0] ? opt_homepage : NULL, error))
    return FALSE;

  if (opt_icon &&
      !flatpak_repo_set_icon (repo, opt_icon[0] ? opt_icon : NULL, error))
    return FALSE;

  if (opt_redirect_url &&
      !flatpak_repo_set_redirect_url (repo, opt_redirect_url[0] ? opt_redirect_url : NULL, error))
    return FALSE;

  if (opt_default_branch &&
      !flatpak_repo_set_default_branch (repo, opt_default_branch[0] ? opt_default_branch : NULL, error))
    return FALSE;

  if (opt_collection_id != NULL)
    {
      /* Only allow a transition from no collection ID to a non-empty collection ID.
       * Changing the collection ID between two different non-empty values is too
       * dangerous: it will break all clients who have previously pulled from the repository.
       * Require the user to recreate the repository from scratch in that case. */
      const char *old_collection_id = ostree_repo_get_collection_id (repo);
      const char *new_collection_id = opt_collection_id[0] ? opt_collection_id : NULL;

      if (old_collection_id != NULL &&
          g_strcmp0 (old_collection_id, new_collection_id) != 0)
        return flatpak_fail (error, "The collection ID of an existing repository cannot be changed. "
                                    "Recreate the repository to change or clear its collection ID.");

      if (!flatpak_repo_set_collection_id (repo, new_collection_id, error))
        return FALSE;
    }

  if (opt_deploy_collection_id &&
      !flatpak_repo_set_deploy_collection_id (repo, TRUE, error))
    return FALSE;

  if (opt_gpg_import)
    {
      g_autoptr(GBytes) gpg_data = flatpak_load_gpg_keys (opt_gpg_import, cancellable, error);
      if (gpg_data == NULL)
        return FALSE;

      if (!flatpak_repo_set_gpg_keys (repo, gpg_data, error))
        return FALSE;
    }

  if (!opt_no_update_appstream)
    {
      g_print (_("Updating appstream branch\n"));
      if (!flatpak_repo_generate_appstream (repo, (const char **) opt_gpg_key_ids, opt_gpg_homedir, 0, cancellable, error))
        return FALSE;
    }

  if (opt_generate_deltas &&
      !generate_all_deltas (repo, &unwanted_deltas, cancellable, error))
    return FALSE;

  if (unwanted_deltas != NULL)
    {
      int i;
      for (i = 0; i < unwanted_deltas->len; i++)
        {
          const char *delta = g_ptr_array_index (unwanted_deltas, i);
          g_print ("Deleting unwanted delta: %s\n", delta);
          g_autoptr(GError) my_error = NULL;
          if (!_ostree_repo_static_delta_delete (repo, delta, cancellable, &my_error))
            g_printerr ("Unable to delete delta %s: %s\n", delta, my_error->message);
        }
    }

  if (!opt_no_update_summary)
    {
      g_print (_("Updating summary\n"));
      if (!flatpak_repo_update (repo, (const char **) opt_gpg_key_ids, opt_gpg_homedir, cancellable, error))
        return FALSE;
    }

  if (opt_prune)
    {
      gint n_objects_total;
      gint n_objects_pruned;
      guint64 objsize_total;
      g_autofree char *formatted_freed_size = NULL;

      g_print ("Pruning old commits\n");
      if (!ostree_repo_prune (repo, OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY, opt_prune_depth,
                              &n_objects_total, &n_objects_pruned, &objsize_total,
                              cancellable, error))
        return FALSE;

      formatted_freed_size = g_format_size_full (objsize_total, 0);

      g_print (_("Total objects: %u\n"), n_objects_total);
      if (n_objects_pruned == 0)
        g_print (_("No unreachable objects\n"));
      else
        g_print (_("Deleted %u objects, %s freed\n"),
                 n_objects_pruned, formatted_freed_size);
    }

  return TRUE;
}