Esempio n. 1
0
static void
env_mark(void * const ptr)
{
    RUBY_MARK_ENTER("env");
    if (ptr) {
	const rb_env_t * const env = ptr;

	if (env->env) {
	    /* TODO: should mark more restricted range */
	    RUBY_GC_INFO("env->env\n");
	    rb_gc_mark_locations(env->env, env->env + env->env_size);
	}

	RUBY_GC_INFO("env->prev_envval\n");
	RUBY_MARK_UNLESS_NULL(env->prev_envval);
	RUBY_MARK_UNLESS_NULL(env->block.self);
	RUBY_MARK_UNLESS_NULL(env->block.proc);

	if (env->block.iseq) {
	    if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
		RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
	    }
	    else {
		RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
	    }
	}
    }
    RUBY_MARK_LEAVE("env");
}
Esempio n. 2
0
static void
cont_mark(void *ptr)
{
    RUBY_MARK_ENTER("cont");
    if (ptr) {
	rb_context_t *cont = ptr;
	rb_gc_mark(cont->value);
	rb_thread_mark(&cont->saved_thread);

	if (cont->vm_stack) {
#ifdef CAPTURE_JUST_VALID_VM_STACK
	    rb_gc_mark_locations(cont->vm_stack,
				 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
#else
	    rb_gc_mark_localtion(cont->vm_stack,
				 cont->vm_stack, cont->saved_thread.stack_size);
#endif
	}

	if (cont->machine_stack) {
	    rb_gc_mark_locations(cont->machine_stack,
				 cont->machine_stack + cont->machine_stack_size);
	}
#ifdef __ia64
	if (cont->machine_register_stack) {
	    rb_gc_mark_locations(cont->machine_register_stack,
				 cont->machine_register_stack + cont->machine_register_stack_size);
	}
#endif
    }
    RUBY_MARK_LEAVE("cont");
}
Esempio n. 3
0
void
rb_vm_mark(void *ptr)
{
    int i;

    RUBY_MARK_ENTER("vm");
    RUBY_GC_INFO("-------------------------------------------------\n");
    if (ptr) {
	rb_vm_t *vm = ptr;
	if (vm->living_threads) {
	    st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
	}
	RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
	RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
	RUBY_MARK_UNLESS_NULL(vm->load_path);
	RUBY_MARK_UNLESS_NULL(vm->loaded_features);
	RUBY_MARK_UNLESS_NULL(vm->top_self);
	RUBY_MARK_UNLESS_NULL(vm->coverages);
	rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);

	if (vm->loading_table) {
	    rb_mark_tbl(vm->loading_table);
	}

	mark_event_hooks(vm->event_hooks);

	for (i = 0; i < RUBY_NSIG; i++) {
	    if (vm->trap_list[i].cmd)
		rb_gc_mark(vm->trap_list[i].cmd);
	}
    }

    RUBY_MARK_LEAVE("vm");
}
Esempio n. 4
0
File: cont.c Progetto: genki/ruby
static void
cont_mark(void *ptr)
{
    RUBY_MARK_ENTER("cont");
    if (ptr) {
	rb_context_t *cont = ptr;
	rb_gc_mark(cont->value);
	rb_gc_mark(cont->prev);
	rb_thread_mark(&cont->saved_thread);

	if (cont->vm_stack) {
	    rb_gc_mark_locations(cont->vm_stack,
				 cont->vm_stack + cont->saved_thread.stack_size);
	}

	if (cont->machine_stack) {
	    rb_gc_mark_locations(cont->machine_stack,
				 cont->machine_stack + cont->machine_stack_size);
	}
#ifdef __ia64
	if (cont->machine_register_stack) {
	    rb_gc_mark_locations(cont->machine_register_stack,
				 cont->machine_register_stack + cont->machine_register_stack_size);
	}
#endif
    }
    RUBY_MARK_LEAVE("cont");
}
Esempio n. 5
0
static void
fiber_mark(void *ptr)
{
    RUBY_MARK_ENTER("cont");
    if (ptr) {
	rb_fiber_t *fib = ptr;
	rb_gc_mark(fib->prev);
	cont_mark(&fib->cont);
    }
    RUBY_MARK_LEAVE("cont");
}
Esempio n. 6
0
static void
binding_mark(void *ptr)
{
    rb_binding_t *bind;
    RUBY_MARK_ENTER("binding");
    if (ptr) {
	bind = ptr;
	RUBY_MARK_UNLESS_NULL(bind->env);
    }
    RUBY_MARK_LEAVE("binding");
}
Esempio n. 7
0
static void
binding_mark(void *ptr)
{
  rb_binding_t *bind;
  RUBY_MARK_ENTER("binding");
  if (ptr) {
    bind = ptr;
    RUBY_MARK_UNLESS_NULL(bind->env);

#ifdef RUBY_192
    RUBY_MARK_UNLESS_NULL(bind->filename);
#endif

  }
  RUBY_MARK_LEAVE("binding");
}
Esempio n. 8
0
File: cont.c Progetto: rhenium/ruby
static void
cont_mark(void *ptr)
{
    RUBY_MARK_ENTER("cont");
    if (ptr) {
	rb_context_t *cont = ptr;
	rb_gc_mark(cont->value);

	rb_thread_mark(&cont->saved_thread);
	rb_gc_mark(cont->saved_thread.self);

	if (cont->vm_stack) {
#ifdef CAPTURE_JUST_VALID_VM_STACK
	    rb_gc_mark_locations(cont->vm_stack,
				 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
#else
	    rb_gc_mark_locations(cont->vm_stack,
				 cont->vm_stack, cont->saved_thread.stack_size);
#endif
	}

	if (cont->machine.stack) {
	    if (cont->type == CONTINUATION_CONTEXT) {
		/* cont */
		rb_gc_mark_locations(cont->machine.stack,
				     cont->machine.stack + cont->machine.stack_size);
            }
            else {
		/* fiber */
		rb_thread_t *th;
                rb_fiber_t *fib = (rb_fiber_t*)cont;
		GetThreadPtr(cont->saved_thread.self, th);
		if ((th->fiber != fib) && fib->status == RUNNING) {
		    rb_gc_mark_locations(cont->machine.stack,
					 cont->machine.stack + cont->machine.stack_size);
		}
	    }
	}
#ifdef __ia64
	if (cont->machine.register_stack) {
	    rb_gc_mark_locations(cont->machine.register_stack,
				 cont->machine.register_stack + cont->machine.register_stack_size);
	}
#endif
    }
    RUBY_MARK_LEAVE("cont");
}
Esempio n. 9
0
static void
proc_mark(void *ptr)
{
    rb_proc_t *proc;
    RUBY_MARK_ENTER("proc");
    if (ptr) {
	proc = ptr;
	RUBY_MARK_UNLESS_NULL(proc->envval);
	RUBY_MARK_UNLESS_NULL(proc->blockprocval);
	RUBY_MARK_UNLESS_NULL(proc->block.proc);
	RUBY_MARK_UNLESS_NULL(proc->block.self);
	if (proc->block.iseq && RUBY_VM_IFUNC_P(proc->block.iseq)) {
	    RUBY_MARK_UNLESS_NULL((VALUE)(proc->block.iseq));
	}
    }
    RUBY_MARK_LEAVE("proc");
}
Esempio n. 10
0
void
rb_thread_mark(void *ptr)
{
    rb_thread_t *th = NULL;
    RUBY_MARK_ENTER("thread");
    if (ptr) {
	th = ptr;
	if (th->stack) {
	    VALUE *p = th->stack;
	    VALUE *sp = th->cfp->sp;
	    rb_control_frame_t *cfp = th->cfp;
	    rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);

	    while (p < sp) {
		rb_gc_mark(*p++);
	    }
	    rb_gc_mark_locations(p, p + th->mark_stack_len);

	    while (cfp != limit_cfp) {
		rb_iseq_t *iseq = cfp->iseq;
		rb_gc_mark(cfp->proc);
		if (iseq) {
		    rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
		}
		if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
		cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
	    }
	}

	/* mark ruby objects */
	RUBY_MARK_UNLESS_NULL(th->first_proc);
	if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);

	RUBY_MARK_UNLESS_NULL(th->thgroup);
	RUBY_MARK_UNLESS_NULL(th->value);
	RUBY_MARK_UNLESS_NULL(th->errinfo);
	RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
	RUBY_MARK_UNLESS_NULL(th->local_svar);
	RUBY_MARK_UNLESS_NULL(th->top_self);
	RUBY_MARK_UNLESS_NULL(th->top_wrapper);
	RUBY_MARK_UNLESS_NULL(th->fiber);
	RUBY_MARK_UNLESS_NULL(th->root_fiber);
	RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
	RUBY_MARK_UNLESS_NULL(th->last_status);

	RUBY_MARK_UNLESS_NULL(th->locking_mutex);

	rb_mark_tbl(th->local_storage);

	if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
	    rb_gc_mark_machine_stack(th);
	    rb_gc_mark_locations((VALUE *)&th->machine_regs,
				 (VALUE *)(&th->machine_regs) +
				 sizeof(th->machine_regs) / sizeof(VALUE));
	}

	mark_event_hooks(th->event_hooks);
    }

    RUBY_MARK_LEAVE("thread");
}