Exemplo n.º 1
0
void GC::collect(int generation) {
    int i;
    GCList *young = &generations[generation]; // the generation we are examining
    GCList *old; // next older generation
    GCList unreachable; // unreachable trash 

    collect_mutex.lock();
    
    // update collection and allocation counters
    if (generation+1 < NUM_GENERATIONS)
	generations[generation+1].count += 1;
    
    for (i = 0; i <= generation; i++)
	generations[i].count = 0;

    // merge younger generations with one we are currently collecting
    for (i = 0; i < generation; i++) {
	generations[generation].merge(&generations[i]);
    }

    if (generation < NUM_GENERATIONS-1)
	old = &generations[generation+1];
    else
	old = young;

    // Using refcount and gc_refs, calculate which objects in the
    // container set are reachable from outside the set (i.e., have a
    // refcount greater than 0 when all the references within the
    // set are taken into account).

    update_refs(young);
    subtract_refs(young);

    // Leave everything reachable from outside young in young, and move
    // everything else (in young) to unreachable.
    // NOTE:  This used to move the reachable objects into a reachable
    // set instead.  But most things usually turn out to be reachable,
    // so it's more efficient to move the unreachable things.

    move_unreachable(young, &unreachable);

    // move reachable objects to next generation.
    if (young != old)
	old->merge(young);


    // call clear on objects in the unreachable set.  This will cause
    // the reference cycles to be broken.  It may also cause some objects
    // in finalizers to be freed.
    delete_garbage(&unreachable, old);

    collect_mutex.unlock();
}
Exemplo n.º 2
0
int cmd_reset(int argc, const char **argv, const char *prefix)
{
	int reset_type = NONE, update_ref_status = 0, quiet = 0;
	int patch_mode = 0, unborn;
	const char *rev;
	unsigned char sha1[20];
	const char **pathspec = NULL;
	const struct option options[] = {
		OPT__QUIET(&quiet, N_("be quiet, only report errors")),
		OPT_SET_INT(0, "mixed", &reset_type,
						N_("reset HEAD and index"), MIXED),
		OPT_SET_INT(0, "soft", &reset_type, N_("reset only HEAD"), SOFT),
		OPT_SET_INT(0, "hard", &reset_type,
				N_("reset HEAD, index and working tree"), HARD),
		OPT_SET_INT(0, "merge", &reset_type,
				N_("reset HEAD, index and working tree"), MERGE),
		OPT_SET_INT(0, "keep", &reset_type,
				N_("reset HEAD but keep local changes"), KEEP),
		OPT_BOOL('p', "patch", &patch_mode, N_("select hunks interactively")),
		OPT_END()
	};

	git_config(git_default_config, NULL);

	argc = parse_options(argc, argv, prefix, options, git_reset_usage,
						PARSE_OPT_KEEP_DASHDASH);
	pathspec = parse_args(argv, prefix, &rev);

	unborn = !strcmp(rev, "HEAD") && get_sha1("HEAD", sha1);
	if (unborn) {
		/* reset on unborn branch: treat as reset to empty tree */
		hashcpy(sha1, EMPTY_TREE_SHA1_BIN);
	} else if (!pathspec) {
		struct commit *commit;
		if (get_sha1_committish(rev, sha1))
			die(_("Failed to resolve '%s' as a valid revision."), rev);
		commit = lookup_commit_reference(sha1);
		if (!commit)
			die(_("Could not parse object '%s'."), rev);
		hashcpy(sha1, commit->object.sha1);
	} else {
		struct tree *tree;
		if (get_sha1_treeish(rev, sha1))
			die(_("Failed to resolve '%s' as a valid tree."), rev);
		tree = parse_tree_indirect(sha1);
		if (!tree)
			die(_("Could not parse object '%s'."), rev);
		hashcpy(sha1, tree->object.sha1);
	}

	if (patch_mode) {
		if (reset_type != NONE)
			die(_("--patch is incompatible with --{hard,mixed,soft}"));
		return run_add_interactive(sha1_to_hex(sha1), "--patch=reset", pathspec);
	}

	/* git reset tree [--] paths... can be used to
	 * load chosen paths from the tree into the index without
	 * affecting the working tree nor HEAD. */
	if (pathspec) {
		if (reset_type == MIXED)
			warning(_("--mixed with paths is deprecated; use 'git reset -- <paths>' instead."));
		else if (reset_type != NONE)
			die(_("Cannot do %s reset with paths."),
					_(reset_type_names[reset_type]));
	}
	if (reset_type == NONE)
		reset_type = MIXED; /* by default */

	if (reset_type != SOFT && reset_type != MIXED)
		setup_work_tree();

	if (reset_type == MIXED && is_bare_repository())
		die(_("%s reset is not allowed in a bare repository"),
		    _(reset_type_names[reset_type]));

	/* Soft reset does not touch the index file nor the working tree
	 * at all, but requires them in a good order.  Other resets reset
	 * the index file to the tree object we are switching to. */
	if (reset_type == SOFT || reset_type == KEEP)
		die_if_unmerged_cache(reset_type);

	if (reset_type != SOFT) {
		struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
		int newfd = hold_locked_index(lock, 1);
		if (reset_type == MIXED) {
			if (read_from_tree(pathspec, sha1))
				return 1;
		} else {
			int err = reset_index(sha1, reset_type, quiet);
			if (reset_type == KEEP && !err)
				err = reset_index(sha1, MIXED, quiet);
			if (err)
				die(_("Could not reset index file to revision '%s'."), rev);
		}

		if (reset_type == MIXED) { /* Report what has not been updated. */
			int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN;
			refresh_index(&the_index, flags, NULL, NULL,
				      _("Unstaged changes after reset:"));
		}

		if (write_cache(newfd, active_cache, active_nr) ||
		    commit_locked_index(lock))
			die(_("Could not write new index file."));
	}

	if (!pathspec && !unborn) {
		/* Any resets without paths update HEAD to the head being
		 * switched to, saving the previous head in ORIG_HEAD before. */
		update_ref_status = update_refs(rev, sha1);

		if (reset_type == HARD && !update_ref_status && !quiet)
			print_new_head_line(lookup_commit_reference(sha1));
	}
	if (!pathspec)
		remove_branch_state();

	return update_ref_status;
}
Exemplo n.º 3
0
/* This is the main function.  Read this to understand how the
 * collection process works. */
static long
collect(int generation)
{
	int i;
	long m = 0;	/* # objects collected */
	long n = 0;	/* # unreachable objects that couldn't be collected */
	PyGC_Head *young; /* the generation we are examining */
	PyGC_Head *old; /* next older generation */
	PyGC_Head unreachable; /* non-problematic unreachable trash */
	PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
	PyGC_Head *gc;

	if (delstr == NULL) {
		delstr = PyString_InternFromString("__del__");
		if (delstr == NULL)
			Py_FatalError("gc couldn't allocate \"__del__\"");
	}

	if (debug & DEBUG_STATS) {
		PySys_WriteStderr("gc: collecting generation %d...\n",
				  generation);
		PySys_WriteStderr("gc: objects in each generation:");
		for (i = 0; i < NUM_GENERATIONS; i++) {
			PySys_WriteStderr(" %ld", gc_list_size(GEN_HEAD(i)));
		}
		PySys_WriteStderr("\n");
	}

	/* update collection and allocation counters */
	if (generation+1 < NUM_GENERATIONS)
		generations[generation+1].count += 1;
	for (i = 0; i <= generation; i++)
		generations[i].count = 0;

	/* merge younger generations with one we are currently collecting */
	for (i = 0; i < generation; i++) {
		gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
	}

	/* handy references */
	young = GEN_HEAD(generation);
	if (generation < NUM_GENERATIONS-1)
		old = GEN_HEAD(generation+1);
	else
		old = young;

	/* Using ob_refcnt and gc_refs, calculate which objects in the
	 * container set are reachable from outside the set (i.e., have a
	 * refcount greater than 0 when all the references within the
	 * set are taken into account).
	 */
	update_refs(young);
	subtract_refs(young);

	/* Leave everything reachable from outside young in young, and move
	 * everything else (in young) to unreachable.
	 * NOTE:  This used to move the reachable objects into a reachable
	 * set instead.  But most things usually turn out to be reachable,
	 * so it's more efficient to move the unreachable things.
	 */
	gc_list_init(&unreachable);
	move_unreachable(young, &unreachable);

	/* Move reachable objects to next generation. */
	if (young != old)
		gc_list_merge(young, old);

	/* All objects in unreachable are trash, but objects reachable from
	 * finalizers can't safely be deleted.  Python programmers should take
	 * care not to create such things.  For Python, finalizers means
	 * instance objects with __del__ methods.  Weakrefs with callbacks
	 * can also call arbitrary Python code but they will be dealt with by
	 * handle_weakrefs().
 	 */
	gc_list_init(&finalizers);
	move_finalizers(&unreachable, &finalizers);
	/* finalizers contains the unreachable objects with a finalizer;
	 * unreachable objects reachable *from* those are also uncollectable,
	 * and we move those into the finalizers list too.
	 */
	move_finalizer_reachable(&finalizers);

	/* Collect statistics on collectable objects found and print
	 * debugging information.
	 */
	for (gc = unreachable.gc.gc_next; gc != &unreachable;
			gc = gc->gc.gc_next) {
		m++;
		if (debug & DEBUG_COLLECTABLE) {
			debug_cycle("collectable", FROM_GC(gc));
		}
	}

	/* Clear weakrefs and invoke callbacks as necessary. */
	m += handle_weakrefs(&unreachable, old);

	/* Call tp_clear on objects in the unreachable set.  This will cause
	 * the reference cycles to be broken.  It may also cause some objects
	 * in finalizers to be freed.
	 */
	delete_garbage(&unreachable, old);

	/* Collect statistics on uncollectable objects found and print
	 * debugging information. */
	for (gc = finalizers.gc.gc_next;
	     gc != &finalizers;
	     gc = gc->gc.gc_next) {
		n++;
		if (debug & DEBUG_UNCOLLECTABLE)
			debug_cycle("uncollectable", FROM_GC(gc));
	}
	if (debug & DEBUG_STATS) {
		if (m == 0 && n == 0) {
			PySys_WriteStderr("gc: done.\n");
		}
		else {
			PySys_WriteStderr(
			    "gc: done, %ld unreachable, %ld uncollectable.\n",
			    n+m, n);
		}
	}

	/* Append instances in the uncollectable set to a Python
	 * reachable list of garbage.  The programmer has to deal with
	 * this if they insist on creating this type of structure.
	 */
	(void)handle_finalizers(&finalizers, old);

	if (PyErr_Occurred()) {
		if (gc_str == NULL)
			gc_str = PyString_FromString("garbage collection");
		PyErr_WriteUnraisable(gc_str);
		Py_FatalError("unexpected exception during garbage collection");
	}
	return n+m;
}