예제 #1
0
static PyObject *
gc_get_referrers(PyObject *self, PyObject *args)
{
	int i;
	PyObject *result = PyList_New(0);
	if (!result) return NULL;
	for (i = 0; i < NUM_GENERATIONS; i++) {
		if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
			Py_DECREF(result);
			return NULL;
		}
	}
	return result;
}
예제 #2
0
static PyObject *
gc_get_objects(PyObject *self, PyObject *noargs)
{
	int i;
	PyObject* result;

	result = PyList_New(0);
	if (result == NULL)
		return NULL;
	for (i = 0; i < NUM_GENERATIONS; i++) {
		if (append_objects(result, GEN_HEAD(i))) {
			Py_DECREF(result);
			return NULL;
		}
	}
	return result;
}
The FE must generate padding here both at the end of each PyG_Head and
between array elements.  Reduced from Python. */

typedef union _gc_head {
  struct {
    union _gc_head *gc_next;
    union _gc_head *gc_prev;
    long gc_refs;
  } gc;
  int dummy __attribute__((aligned(16)));
} PyGC_Head;

struct gc_generation {
  PyGC_Head head;
  int threshold;
  int count;
};

#define GEN_HEAD(n) (&generations[n].head)

// The idea is that there are 6 undefs in this structure initializer to cover
// the padding between elements.
// CHECK: @generations = global [3 x %struct._Z13gc_generation] [%struct._Z13gc_generation { %union._Z8_gc_head { %struct._ZN8_gc_headUt7_3_E { %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 0, i32 0), %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 0, i32 0), i64 0 }, [8 x i8] undef }, i32 700, i32 0, [8 x i8] undef }, %struct._Z13gc_generation { %union._Z8_gc_head { %struct._ZN8_gc_headUt7_3_E { %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 1, i32 0), %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 1, i32 0), i64 0 }, [8 x i8] undef }, i32 10, i32 0, [8 x i8] undef }, %struct._Z13gc_generation { %union._Z8_gc_head { %struct._ZN8_gc_headUt7_3_E { %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 2, i32 0), %union._Z8_gc_head* getelementptr inbounds ([3 x %struct._Z13gc_generation], [3 x %struct._Z13gc_generation]* @generations, i32 0, i32 2, i32 0), i64 0 }, [8 x i8] undef }, i32 10, i32 0, [8 x i8] undef }]
/* linked lists of container objects */
struct gc_generation generations[3] = {
        /* PyGC_Head,                           threshold,      count */
        {{{GEN_HEAD(0), GEN_HEAD(0), 0}},       700,            0},
        {{{GEN_HEAD(1), GEN_HEAD(1), 0}},       10,             0},
        {{{GEN_HEAD(2), GEN_HEAD(2), 0}},       10,             0},
};
예제 #4
0
/* This is the main function.  Read this to understand how the
 * collection process works. */
static long
collect(int generation)
{
	int i;
	long m = 0;	/* # objects collected */
	long n = 0;	/* # unreachable objects that couldn't be collected */
	PyGC_Head *young; /* the generation we are examining */
	PyGC_Head *old; /* next older generation */
	PyGC_Head unreachable; /* non-problematic unreachable trash */
	PyGC_Head finalizers;  /* objects with, & reachable from, __del__ */
	PyGC_Head *gc;

	if (delstr == NULL) {
		delstr = PyString_InternFromString("__del__");
		if (delstr == NULL)
			Py_FatalError("gc couldn't allocate \"__del__\"");
	}

	if (debug & DEBUG_STATS) {
		PySys_WriteStderr("gc: collecting generation %d...\n",
				  generation);
		PySys_WriteStderr("gc: objects in each generation:");
		for (i = 0; i < NUM_GENERATIONS; i++) {
			PySys_WriteStderr(" %ld", gc_list_size(GEN_HEAD(i)));
		}
		PySys_WriteStderr("\n");
	}

	/* update collection and allocation counters */
	if (generation+1 < NUM_GENERATIONS)
		generations[generation+1].count += 1;
	for (i = 0; i <= generation; i++)
		generations[i].count = 0;

	/* merge younger generations with one we are currently collecting */
	for (i = 0; i < generation; i++) {
		gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
	}

	/* handy references */
	young = GEN_HEAD(generation);
	if (generation < NUM_GENERATIONS-1)
		old = GEN_HEAD(generation+1);
	else
		old = young;

	/* Using ob_refcnt and gc_refs, calculate which objects in the
	 * container set are reachable from outside the set (i.e., have a
	 * refcount greater than 0 when all the references within the
	 * set are taken into account).
	 */
	update_refs(young);
	subtract_refs(young);

	/* Leave everything reachable from outside young in young, and move
	 * everything else (in young) to unreachable.
	 * NOTE:  This used to move the reachable objects into a reachable
	 * set instead.  But most things usually turn out to be reachable,
	 * so it's more efficient to move the unreachable things.
	 */
	gc_list_init(&unreachable);
	move_unreachable(young, &unreachable);

	/* Move reachable objects to next generation. */
	if (young != old)
		gc_list_merge(young, old);

	/* All objects in unreachable are trash, but objects reachable from
	 * finalizers can't safely be deleted.  Python programmers should take
	 * care not to create such things.  For Python, finalizers means
	 * instance objects with __del__ methods.  Weakrefs with callbacks
	 * can also call arbitrary Python code but they will be dealt with by
	 * handle_weakrefs().
 	 */
	gc_list_init(&finalizers);
	move_finalizers(&unreachable, &finalizers);
	/* finalizers contains the unreachable objects with a finalizer;
	 * unreachable objects reachable *from* those are also uncollectable,
	 * and we move those into the finalizers list too.
	 */
	move_finalizer_reachable(&finalizers);

	/* Collect statistics on collectable objects found and print
	 * debugging information.
	 */
	for (gc = unreachable.gc.gc_next; gc != &unreachable;
			gc = gc->gc.gc_next) {
		m++;
		if (debug & DEBUG_COLLECTABLE) {
			debug_cycle("collectable", FROM_GC(gc));
		}
	}

	/* Clear weakrefs and invoke callbacks as necessary. */
	m += handle_weakrefs(&unreachable, old);

	/* Call tp_clear on objects in the unreachable set.  This will cause
	 * the reference cycles to be broken.  It may also cause some objects
	 * in finalizers to be freed.
	 */
	delete_garbage(&unreachable, old);

	/* Collect statistics on uncollectable objects found and print
	 * debugging information. */
	for (gc = finalizers.gc.gc_next;
	     gc != &finalizers;
	     gc = gc->gc.gc_next) {
		n++;
		if (debug & DEBUG_UNCOLLECTABLE)
			debug_cycle("uncollectable", FROM_GC(gc));
	}
	if (debug & DEBUG_STATS) {
		if (m == 0 && n == 0) {
			PySys_WriteStderr("gc: done.\n");
		}
		else {
			PySys_WriteStderr(
			    "gc: done, %ld unreachable, %ld uncollectable.\n",
			    n+m, n);
		}
	}

	/* Append instances in the uncollectable set to a Python
	 * reachable list of garbage.  The programmer has to deal with
	 * this if they insist on creating this type of structure.
	 */
	(void)handle_finalizers(&finalizers, old);

	if (PyErr_Occurred()) {
		if (gc_str == NULL)
			gc_str = PyString_FromString("garbage collection");
		PyErr_WriteUnraisable(gc_str);
		Py_FatalError("unexpected exception during garbage collection");
	}
	return n+m;
}
예제 #5
0
/*** Global GC state ***/

struct gc_generation {
	PyGC_Head head;
	int threshold; /* collection threshold */
	int count; /* count of allocations or collections of younger
		      generations */
};

#define NUM_GENERATIONS 3
#define GEN_HEAD(n) (&generations[n].head)

/* linked lists of container objects */
static struct gc_generation generations[NUM_GENERATIONS] = {
	/* PyGC_Head,				threshold,	count */
	{{{GEN_HEAD(0), GEN_HEAD(0), 0}},	700,		0},
	{{{GEN_HEAD(1), GEN_HEAD(1), 0}},	10,		0},
	{{{GEN_HEAD(2), GEN_HEAD(2), 0}},	10,		0},
};

PyGC_Head *_PyGC_generation0 = GEN_HEAD(0);

static int enabled = 1; /* automatic collection enabled? */

/* true if we are currently running the collector */
static int collecting = 0;

/* list of uncollectable objects */
static PyObject *garbage = NULL;

/* Python string to use if unhandled exception occurs */