Пример #1
0
struct instr * iload(struct var *vr)
{
	struct instr *i;
	struct reg *reg = reg_new(vr);

	i = instr_new(I_LOA, vr, elt_new(E_REG, reg), NULL, NULL);

	return i;
}
Пример #2
0
struct instr * i3addr(char optype, struct elt *e1, struct elt *e2)
{
	struct reg *reg;
	struct instr *i;
	struct stack *types;


	/* DEBUG STUFF
	printf("Opcode: %#x\n", optype);
	elt_dump(e1);
	elt_dump(e2);
	*/

	if (optype & I_ARI) {
		types = type_constrain_ari(e1, e2);
		if (types == NULL) { 
			fprintf(stderr, "Invalid type for arithmetic operation.\n");
			return NULL;
		}
	}
	
	else if (optype & I_CMP) {
		types = type_constrain_cmp(e1, e2);
		if (types == NULL) { 
			fprintf(stderr, "Invalid type for comparison operation.\n");
			return NULL;
		}
	}

	else if (optype & I_BOO) {
		types = type_constrain_boo(e1, e2);
		if (types == NULL) { 
			fprintf(stderr, "Invalid type for boolean operation.\n");
			return NULL;
		}
	}
	
	else {
		fprintf(stderr, "Unrecognized opcode.\n");
		return NULL;
	}

	reg = reg_new(NULL);
	reg_settypes(reg, types);
	i = instr_new(optype, NULL, elt_new(E_REG, reg), e1, e2);

	stack_free(&types, NULL);
	return i;
}
Пример #3
0
struct instr * icast(
	void (*cast_func)(struct elt *, struct elt **),
	struct elt *tocast, type_t t)
{
	struct instr *i = malloc(sizeof *i);

	i->optype = I_CAST;
	i->cast_func = cast_func;
	i->tocast = tocast;
	i->res = elt_new(E_REG, reg_new(NULL));

	stack_push(i->res->reg->types, &possible_types[t]);
	 
	return i;
}
Пример #4
0
struct instr * icall(char *fn, struct stack * args)
{
	struct instr *i = malloc(sizeof *i);

	if (i == NULL) {
		perror("malloc");
		return NULL;
	}

	i->optype = I_CAL;
	i->fn = fn;
	i->args = args;
	i->ret = elt_new(E_REG, reg_new(var_new(fn)));

	return i;
}
Пример #5
0
/*
 * The worker thread function. Take a task from the queue and perform it if
 * there is any. Otherwise, put itself into the idle thread list and waiting
 * for signal to wake up.
 * The thread terminate directly by detach and exit when it is asked to stop
 * after finishing a task. Otherwise, the thread should be in idle thread list
 * and should be joined.
 */
static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
{
    apr_thread_pool_t *me = param;
    apr_thread_pool_task_t *task = NULL;
    apr_interval_time_t wait;
    struct apr_thread_list_elt *elt;

    apr_thread_mutex_lock(me->lock);

    --me->spawning_cnt;

    elt = elt_new(me, t);
    if (!elt) {
        apr_thread_mutex_unlock(me->lock);
        apr_thread_exit(t, APR_ENOMEM);
    }

    while (!me->terminated && elt->state != TH_STOP) {
        /* Test if not new element, it is awakened from idle */
        if (APR_RING_NEXT(elt, link) != elt) {
            --me->idle_cnt;
            APR_RING_REMOVE(elt, link);
        }

        APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
        task = pop_task(me);
        while (NULL != task && !me->terminated) {
            ++me->tasks_run;
            elt->current_owner = task->owner;
            apr_thread_mutex_unlock(me->lock);
            apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
            task->func(t, task->param);
            apr_thread_mutex_lock(me->lock);
            APR_RING_INSERT_TAIL(me->recycled_tasks, task,
                                 apr_thread_pool_task, link);
            elt->current_owner = NULL;
            if (TH_STOP == elt->state) {
                break;
            }
            task = pop_task(me);
        }
        assert(NULL == elt->current_owner);
        if (TH_STOP != elt->state)
            APR_RING_REMOVE(elt, link);

        /* Test if a busy thread been asked to stop, which is not joinable */
        if ((me->idle_cnt >= me->idle_max
             && !(me->scheduled_task_cnt && 0 >= me->idle_max)
             && !me->idle_wait)
            || me->terminated || elt->state != TH_RUN) {
            --me->thd_cnt;
            if ((TH_PROBATION == elt->state) && me->idle_wait)
                ++me->thd_timed_out;
            APR_RING_INSERT_TAIL(me->recycled_thds, elt,
                                 apr_thread_list_elt, link);
            apr_thread_mutex_unlock(me->lock);
            apr_thread_detach(t);
            apr_thread_exit(t, APR_SUCCESS);
            return NULL;        /* should not be here, safe net */
        }

        /* busy thread become idle */
        ++me->idle_cnt;
        APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);

        /* 
         * If there is a scheduled task, always scheduled to perform that task.
         * Since there is no guarantee that current idle threads are scheduled
         * for next scheduled task.
         */
        if (me->scheduled_task_cnt)
            wait = waiting_time(me);
        else if (me->idle_cnt > me->idle_max) {
            wait = me->idle_wait;
            elt->state = TH_PROBATION;
        }
        else
            wait = -1;

        if (wait >= 0) {
            apr_thread_cond_timedwait(me->cond, me->lock, wait);
        }
        else {
            apr_thread_cond_wait(me->cond, me->lock);
        }
    }

    /* idle thread been asked to stop, will be joined */
    --me->thd_cnt;
    apr_thread_mutex_unlock(me->lock);
    apr_thread_exit(t, APR_SUCCESS);
    return NULL;                /* should not be here, safe net */
}