Beispiel #1
0
int _lru_free_mem(cache_t *c, segment_t *pseg, ex_off_t bytes_to_free)
{
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    cache_segment_t *s;
    cache_page_t *p;
    page_lru_t *lp;
    Stack_ele_t *ele;
    apr_thread_mutex_t *plock;
    ex_off_t total_bytes, pending_bytes;
    int gotlock, count, bits, err;

    total_bytes = 0;
    err = 0;

    log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(pseg), bytes_to_free, cp->bytes_used, stack_size(cp->stack));

    move_to_bottom(cp->stack);
    ele = get_ptr(cp->stack);
    while ((total_bytes < bytes_to_free) && (ele != NULL) && (err == 0)) {
        p = (cache_page_t *)get_stack_ele_data(ele);
        lp = (page_lru_t *)p->priv;
        plock = p->seg->lock;
        gotlock = apr_thread_mutex_trylock(plock);
        if ((gotlock == APR_SUCCESS) || (p->seg == pseg)) {
            bits = atomic_get(p->bit_fields);
            if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal
                count = atomic_get(p->access_pending[CACHE_READ]) + atomic_get(p->access_pending[CACHE_WRITE]) + atomic_get(p->access_pending[CACHE_FLUSH]);
                if (count == 0) { //** No one is using it
                    s = (cache_segment_t *)p->seg->priv;
                    if ((bits & C_ISDIRTY) == 0) {  //** Don't have to flush it
                        total_bytes += s->page_size;
                        log_printf(15, "lru_free_mem: freeing page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                        list_remove(s->pages, &(p->offset), p);  //** Have to do this here cause p->offset is the key var
                        delete_current(cp->stack, 1, 0);
                        if (p->data[0].ptr) free(p->data[0].ptr);
                        if (p->data[1].ptr) free(p->data[1].ptr);
                        free(lp);
                    } else {         //** Got to flush the page first
                        err = 1;
                    }
                } else {
                    err = 1;
                }
            }
            if (gotlock == APR_SUCCESS) apr_thread_mutex_unlock(plock);
        } else {
            err = 1;
        }

        if ((total_bytes < bytes_to_free) && (err == 0)) ele = get_ptr(cp->stack);
    }

    cp->bytes_used -= total_bytes;
    pending_bytes = bytes_to_free - total_bytes;
    log_printf(15, "END seg=" XIDT " bytes_to_free=" XOT " pending_bytes=" XOT " bytes_used=" XOT "\n", segment_id(pseg), bytes_to_free, pending_bytes, cp->bytes_used);

    return(pending_bytes);
}
Beispiel #2
0
ex_off_t _lru_force_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free, int check_waiters)
{
    cache_segment_t *s = (cache_segment_t *)page_seg->priv;
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    ex_off_t freed_bytes, bytes_left;
    int top, finished;
    pigeon_coop_hole_t pch;
    cache_cond_t *cache_cond;

    //** I'm holding this coming in but don't need it cause I can touch all segs
    segment_unlock(page_seg);

    top = 0;
    bytes_left = bytes_to_free;
    freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left);
    finished = 0;

    while ((freed_bytes < bytes_to_free) && (finished == 0)) {  //** Keep trying to mark space as free until I get enough
        if (top == 0) {
            top = 1;
            pch = reserve_pigeon_coop_hole(s->c->cond_coop);
            cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&pch);
            cache_cond->count = 0;

            move_to_bottom(cp->pending_free_tasks);
            insert_below(cp->pending_free_tasks, cache_cond);  //** Add myself to the bottom
        } else {
            push(cp->pending_free_tasks, cache_cond);  //** I go on the top
        }

        log_printf(15, "not enough space so waiting cache_cond=%p freed_bytes=" XOT " bytes_to_free=" XOT "\n", cache_cond, freed_bytes, bytes_to_free);
        //** Now wait until it's my turn
        apr_thread_cond_wait(cache_cond->cond, c->lock);

        bytes_left -= freed_bytes;
        freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left);
        finished = 1;
    }

    //** Now check if we can handle some waiters
    if (check_waiters == 1) _lru_process_waiters(c);

    cache_unlock(c);  //** Reacquire the lock in the proper order
    segment_lock(page_seg);  //** Reacquire the lock cause I had it coming in
    cache_lock(c);

    if (top == 1) release_pigeon_coop_hole(s->c->cond_coop, &pch);

    freed_bytes = bytes_to_free - bytes_left;
//NEW  freed_bytes = bytes_left - freed_bytes;

    return(freed_bytes);
}
Beispiel #3
0
void _lru_wait_for_page(cache_t *c, segment_t *seg, int ontop)
{
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    cache_segment_t *s = (cache_segment_t *)seg->priv;
    lru_page_wait_t pw;
    pigeon_coop_hole_t pch;
    cache_cond_t *cc;
    ex_off_t bytes_free, bytes_needed, n;
    int check_waiters_first;

    check_waiters_first = (ontop == 0) ? 1 : 0;
    pch = reserve_pigeon_coop_hole(c->cond_coop);
    cc = (cache_cond_t *)pigeon_coop_hole_data(&pch);
    pw.cond = cc->cond;
    pw.bytes_needed = s->page_size;

    bytes_free = _lru_max_bytes(c) - cp->bytes_used;
    while (s->page_size > bytes_free) {
        //** Attempt to free pages
        bytes_needed = s->page_size - bytes_free;
        n = _lru_force_free_mem(c, seg, bytes_needed, check_waiters_first);

        if (n > 0) { //** Didn't make it so wait
            if (ontop == 0) {
                move_to_bottom(cp->waiting_stack);
                insert_below(cp->waiting_stack, &pw);
            } else {
                push(cp->waiting_stack, &pw);
            }

            segment_unlock(seg);  //** Unlock the segment to prevent deadlocks

            apr_thread_cond_wait(pw.cond, c->lock);  //** Wait for the space to become available

            //** Have to reaquire both locks in the correct order
            cache_unlock(c);
            segment_lock(seg);
            cache_lock(c);

            ontop = 1;  //** 2nd time we are always placed on the top of the stack
            check_waiters_first = 0;  //** And don't check on waiters
        }

        bytes_free = _lru_max_bytes(c) - cp->bytes_used;
    }

    release_pigeon_coop_hole(c->cond_coop, &pch);

    return;
}
Beispiel #4
0
int internal_opque_add(opque_t *que, op_generic_t *gop, int dolock)
{
    int err = 0;
    callback_t *cb;
    que_data_t *q = &(que->qd);

    //** Create the callback **
    type_malloc(cb, callback_t, 1);
    callback_set(cb, _opque_cb, (void *)gop); //** Set the global callback for the list

    if (dolock != 0) lock_opque(q);

    log_printf(15, "opque_add: qid=%d gid=%d\n", que->op.base.id, gop_get_id(gop));

    //** Add the list CB to the the op
//  lock_gop(gop)
    gop->base.parent_q = que;
    callback_append(&(gop->base.cb), cb);
//  unlock_gop(gop)

    //**Add the op to the q
    q->nsubmitted++;
    q->nleft++;
    if (q->opque->op.base.started_execution == 0) {
        move_to_bottom(q->list);
        insert_below(q->list, (void *)cb);
    }

    if (dolock != 0) unlock_opque(q);

    if (q->opque->op.base.started_execution == 1) {
        if (gop->type == Q_TYPE_OPERATION) {
            log_printf(15, "gid=%d started_execution=%d\n", gop_get_id(gop), gop->base.started_execution);
            gop->base.started_execution = 1;
            gop->base.pc->fn->submit(gop->base.pc->arg, gop);
        } else {  //** It's a queue
            opque_start_execution(gop->q->opque);
        }
    }

    return(err);
}
Beispiel #5
0
task main()
{
	move_to_bottom();

	wait1Msec(2000);

	do{
		if(SensorValue(floor_1) == 1){
			move_to_floor(current_floor(), FIRST);

		}

		else if(SensorValue(floor_2) == 1){
			move_to_floor(current_floor(), SECOND);
		}

		else if(SensorValue(floor_3) == 1){
			move_to_floor(current_floor(), THIRD);
		}
	}while(true);

	move_motors(0);
}
Beispiel #6
0
static cb_ret_t
help_execute_cmd (unsigned long command)
{
    cb_ret_t ret = MSG_HANDLED;

    switch (command)
    {
    case CK_Help:
        help_help (whelp);
        break;
    case CK_Index:
        help_index (whelp);
        break;
    case CK_Back:
        help_back (whelp);
        break;
    case CK_Up:
        help_prev_link (TRUE);
        break;
    case CK_Down:
        help_next_link (TRUE);
        break;
    case CK_PageDown:
        move_forward (help_lines - 1);
        break;
    case CK_PageUp:
        move_backward (help_lines - 1);
        break;
    case CK_HalfPageDown:
        move_forward (help_lines / 2);
        break;
    case CK_HalfPageUp:
        move_backward (help_lines / 2);
        break;
    case CK_Top:
        move_to_top ();
        break;
    case CK_Bottom:
        move_to_bottom ();
        break;
    case CK_Enter:
        help_select_link ();
        break;
    case CK_LinkNext:
        help_next_link (FALSE);
        break;
    case CK_LinkPrev:
        help_prev_link (FALSE);
        break;
    case CK_NodeNext:
        help_next_node ();
        break;
    case CK_NodePrev:
        help_prev_node ();
        break;
    case CK_Quit:
        dlg_stop (whelp);
        break;
    default:
        ret = MSG_NOT_HANDLED;
    }

    return ret;
}
Beispiel #7
0
void _opque_cb(void *v, int mode)
{
    int type, n;
    op_status_t success;
    op_generic_t *gop = (op_generic_t *)v;
    que_data_t *q = &(gop->base.parent_q->qd);

    log_printf(15, "_opque_cb: START qid=%d gid=%d\n", gop_id(&(q->opque->op)), gop_id(gop));

    //** Get the status (gop is already locked)
    type = gop_get_type(gop);
    if (type == Q_TYPE_QUE) {
        n = stack_size(gop->q->failed);
        log_printf(15, "_opque_cb: qid=%d gid=%d  stack_size(q->failed)=%d gop->status=%d\n", gop_id(&(q->opque->op)), gop_id(gop), n, gop->base.status.op_status);
        success = (n == 0) ? gop->base.status : op_failure_status;
    } else {
        success = gop->base.status;
    }

    lock_opque(q);

    log_printf(15, "_opque_cb: qid=%d gid=%d success=%d gop_type(gop)=%d\n", gop_id(&(q->opque->op)), gop_id(gop), success, gop_get_type(gop));


    //** It always goes on the finished list
    move_to_bottom(q->finished);
    insert_below(q->finished, gop);

    log_printf(15, "PUSH finished gid=%d qid=%d\n", gop_id(gop), gop_id(&(q->opque->op)));
    log_printf(15, "Printing finished stack for qid=%d\n", gop_id(&(q->opque->op)));
    if (log_level() > 15) _opque_print_stack(q->finished);

    if (success.op_status == OP_STATE_FAILURE) push(q->failed, gop); //** Push it on the failed list if needed

    q->nleft--;
    log_printf(15, "_opque_cb: qid=%d gid=%d nleft=%d stack_size(q->failed)=%d stack_size(q->finished)=%d\n", gop_id(&(q->opque->op)), gop_id(gop), q->nleft, stack_size(q->failed), stack_size(q->finished));
    flush_log();

    if (q->nleft <= 0) {  //** we're finished
        if (stack_size(q->failed) == 0) {
            q->opque->op.base.status = op_success_status;
            callback_execute(q->opque->op.base.cb, OP_STATE_SUCCESS);

            //** Lastly trigger the signal. for anybody listening
            apr_thread_cond_broadcast(q->opque->op.base.ctl->cond);
        } else if (q->opque->op.base.retries == 0) {  //** How many times we're retried
            //** Trigger the callbacks
            q->opque->op.base.retries++;
            q->nleft = 0;
            q->opque->op.base.failure_mode = 0;
            callback_execute(&(q->failure_cb), OP_STATE_FAILURE);  //** Attempt to fix things

            if (q->opque->op.base.failure_mode == 0) {  //** No retry
                q->opque->op.base.status = op_failure_status;
                callback_execute(q->opque->op.base.cb, OP_STATE_FAILURE);  //**Execute the other CBs
                apr_thread_cond_broadcast(q->opque->op.base.ctl->cond);  //** and fail for good
            }

            //** If retrying don't send the broadcast
            log_printf(15, "_opque_cb: RETRY END qid=%d gid=%d\n", gop_id(&(q->opque->op)), gop_id(gop));
            flush_log();
        } else {
            //** Finished with errors but trigger the signal for anybody listening
            apr_thread_cond_broadcast(q->opque->op.base.ctl->cond);
        }
    } else {
        //** Not finished but trigger the signal for anybody listening
        apr_thread_cond_broadcast(q->opque->op.base.ctl->cond);
    }

    log_printf(15, "_opque_cb: END qid=%d gid=%d\n", gop_id(&(q->opque->op)), gop_id(gop));
    flush_log();

    unlock_opque(q);
}
Beispiel #8
0
ex_off_t _lru_attempt_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free)
{
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    cache_segment_t *s;
    segment_t *pseg;
    cache_page_t *p;
    page_lru_t *lp;
    Stack_ele_t *ele;
    op_generic_t *gop;
    opque_t *q;
    ex_off_t total_bytes, freed_bytes, pending_bytes, *poff;
    ex_off_t *segid;
    ex_off_t min_off, max_off;
    list_iter_t sit;
    int count, bits, cw, flush_count;
    list_t *table;
    page_table_t *ptable;
    pigeon_coop_hole_t pch, pt_pch;

    log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(page_seg), bytes_to_free, cp->bytes_used, stack_size(cp->stack));

    freed_bytes = 0;
    pending_bytes = 0;
    total_bytes = 0;

    //** cache_lock(c) is already acquired
    pch = reserve_pigeon_coop_hole(cp->free_pending_tables);
    table = *(list_t **)pigeon_coop_hole_data(&pch);

    //** Get the list of pages to free
    move_to_bottom(cp->stack);
    ele = stack_unlink_current(cp->stack, 1);
    while ((total_bytes < bytes_to_free) && (ele != NULL)) {
        p = (cache_page_t *)get_stack_ele_data(ele);
        lp = (page_lru_t *)p->priv;

        bits = atomic_get(p->bit_fields);
        log_printf(15, "checking page for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
        flush_log();

        if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal
            ptable = (page_table_t *)list_search(table, (list_key_t *)&(segment_id(p->seg)));
            if (ptable == NULL) {  //** Have to make a new segment entry
                pt_pch = reserve_pigeon_coop_hole(cp->free_page_tables);
                ptable = (page_table_t *)pigeon_coop_hole_data(&pt_pch);
                ptable->seg = p->seg;
                ptable->id = segment_id(p->seg);
                ptable->pch = pt_pch;
                list_insert(table, &(ptable->id), ptable);
            }

            cp->limbo_pages++;
            log_printf(15, "UNLINKING seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages);

            atomic_inc(p->access_pending[CACHE_READ]);  //** Do this so it's not accidentally deleted
            push(ptable->stack, p);
            s = (cache_segment_t *)p->seg->priv;
            total_bytes += s->page_size;
            free(lp->ele);
            lp->ele = NULL;  //** Mark it as removed from the list so a page_release doesn't free also
        }

        if (total_bytes < bytes_to_free) ele = stack_unlink_current(cp->stack, 1);
    }


    if (total_bytes == 0) {  //** Nothing to do so exit
        log_printf(15, "Nothing to do so exiting\n");
        release_pigeon_coop_hole(cp->free_pending_tables, &pch);
        return(0);
    }

    cache_unlock(c);  //** Don't need the cache lock for the next part

    q = new_opque();
    opque_start_execution(q);

    //** Now cycle through the segments to be freed
    pending_bytes = 0;
    sit = list_iter_search(table, list_first_key(table), 0);
    list_next(&sit, (list_key_t **)&segid, (list_data_t **)&ptable);
    while (ptable != NULL) {
        //** Verify the segment is still valid.  If not then just delete everything
        pseg = list_search(c->segments, segid);
        if (pseg != NULL) {
            segment_lock(ptable->seg);
            min_off = s->total_size;
            max_off = -1;

            s = (cache_segment_t *)ptable->seg->priv;
            while ((p = pop(ptable->stack)) != NULL) {
                atomic_dec(p->access_pending[CACHE_READ]); //** Removed my access control from earlier
                flush_count = atomic_get(p->access_pending[CACHE_FLUSH]);
                cw = atomic_get(p->access_pending[CACHE_WRITE]);
                count = atomic_get(p->access_pending[CACHE_READ]) + cw + flush_count;
                bits = atomic_get(p->bit_fields);
                if (count != 0) { //** Currently in use so wait for it to be released
                    if (cw > 0) {  //** Got writes so need to wait until they complete otherwise the page may not get released
                        bits = bits | C_TORELEASE;  //** Mark it for release
                        atomic_set(p->bit_fields, bits);
                        _cache_drain_writes(p->seg, p);  //** Drain the write ops
                        bits = atomic_get(p->bit_fields);  //** Get the bit fields to see if it's dirty
                    }

                    if (flush_count == 0) {  //** Make sure it's not already being flushed
                        if ((bits & C_ISDIRTY) != 0) {  //** Have to flush it don't have to track it cause the flush will do the release
                            if (min_off > p->offset) min_off = p->offset;
                            if (max_off < p->offset) max_off = p->offset;
                        }
                    }
                    bits = bits | C_TORELEASE;

                    log_printf(15, "in use tagging for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                    atomic_set(p->bit_fields, bits);

                    pending_bytes += s->page_size;
                } else {  //** Not in use
                    if ((bits & (C_ISDIRTY|C_EMPTY)) == 0) {  //** Don't have to flush it just drop the page
                        cp->limbo_pages--;
                        log_printf(15, "FREEING page seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages);
                        list_remove(s->pages, &(p->offset), p);  //** Have to do this here cause p->offset is the key var
                        if (p->data[0].ptr) free(p->data[0].ptr);
                        if (p->data[1].ptr) free(p->data[1].ptr);
                        lp = (page_lru_t *)p->priv;
                        free(lp);
                        freed_bytes += s->page_size;
                    } else {         //** Got to flush the page first but don't have to track it cause the flush will do the release
                        if (p->offset > -1) { //** Skip blank pages
                            if (min_off > p->offset) min_off = p->offset;
                            if (max_off < p->offset) max_off = p->offset;
                        }

                        bits = bits | C_TORELEASE;
                        atomic_set(p->bit_fields, bits);

                        pending_bytes += s->page_size;
                        if (p->offset > -1) {
                            log_printf(15, "FLUSHING page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                        } else {
                            log_printf(15, "RELEASE trigger for empty page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                        }
                    }
                }

                list_next(&sit, (list_key_t **)&poff, (list_data_t **)&p);
            }

            segment_unlock(ptable->seg);

            if (max_off>-1) {
                gop = cache_flush_range(ptable->seg, s->c->da, min_off, max_off + s->page_size - 1, s->c->timeout);
                opque_add(q, gop);
            }
        } else {  //** Segment has been deleted so drop everything cause it's already freeed
            empty_stack(ptable->stack, 0);
        }

        cache_lock(c);
        release_pigeon_coop_hole(cp->free_page_tables, &(ptable->pch));
        cache_unlock(c);

        list_next(&sit, (skiplist_key_t **)&pseg, (skiplist_data_t **)&ptable);
    }

    cache_lock(c);
    log_printf(15, "BEFORE waitall seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " freed_bytes=" XOT " pending_bytes=" XOT "\n",
               segment_id(page_seg), bytes_to_free, cp->bytes_used, freed_bytes, pending_bytes);
    cache_unlock(c);


    //** Wait for any tasks to complete
    opque_waitall(q);
    opque_free(q, OP_DESTROY);

    //** Had this when we came in
    cache_lock(c);

    log_printf(15, "AFTER waitall seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used);

    cp->bytes_used -= freed_bytes;  //** Update how much I directly freed

    log_printf(15, "AFTER used update seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used);

    //** Clean up
    empty_skiplist(table);
    release_pigeon_coop_hole(cp->free_pending_tables, &pch);

    log_printf(15, "total_bytes marked for removal =" XOT "\n", total_bytes);

    return(total_bytes);
}
Beispiel #9
0
/*
*  A round is defined as playing to top and then to bottom until there are no
*  more cards in the hand deck.
*/
void play_round(deck_t *handdeck, deck_t *tabledeck) {
    while (handdeck->top != NULL) {
        move_to_top(handdeck, tabledeck);
        move_to_bottom(handdeck);
    }
}