Exemplo n.º 1
0
ex_off_t _lru_force_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free, int check_waiters)
{
    cache_segment_t *s = (cache_segment_t *)page_seg->priv;
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    ex_off_t freed_bytes, bytes_left;
    int top, finished;
    pigeon_coop_hole_t pch;
    cache_cond_t *cache_cond;

    //** I'm holding this coming in but don't need it cause I can touch all segs
    segment_unlock(page_seg);

    top = 0;
    bytes_left = bytes_to_free;
    freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left);
    finished = 0;

    while ((freed_bytes < bytes_to_free) && (finished == 0)) {  //** Keep trying to mark space as free until I get enough
        if (top == 0) {
            top = 1;
            pch = reserve_pigeon_coop_hole(s->c->cond_coop);
            cache_cond = (cache_cond_t *)pigeon_coop_hole_data(&pch);
            cache_cond->count = 0;

            move_to_bottom(cp->pending_free_tasks);
            insert_below(cp->pending_free_tasks, cache_cond);  //** Add myself to the bottom
        } else {
            push(cp->pending_free_tasks, cache_cond);  //** I go on the top
        }

        log_printf(15, "not enough space so waiting cache_cond=%p freed_bytes=" XOT " bytes_to_free=" XOT "\n", cache_cond, freed_bytes, bytes_to_free);
        //** Now wait until it's my turn
        apr_thread_cond_wait(cache_cond->cond, c->lock);

        bytes_left -= freed_bytes;
        freed_bytes = _lru_attempt_free_mem(c, page_seg, bytes_left);
        finished = 1;
    }

    //** Now check if we can handle some waiters
    if (check_waiters == 1) _lru_process_waiters(c);

    cache_unlock(c);  //** Reacquire the lock in the proper order
    segment_lock(page_seg);  //** Reacquire the lock cause I had it coming in
    cache_lock(c);

    if (top == 1) release_pigeon_coop_hole(s->c->cond_coop, &pch);

    freed_bytes = bytes_to_free - bytes_left;
//NEW  freed_bytes = bytes_left - freed_bytes;

    return(freed_bytes);
}
Exemplo n.º 2
0
void _lru_wait_for_page(cache_t *c, segment_t *seg, int ontop)
{
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    cache_segment_t *s = (cache_segment_t *)seg->priv;
    lru_page_wait_t pw;
    pigeon_coop_hole_t pch;
    cache_cond_t *cc;
    ex_off_t bytes_free, bytes_needed, n;
    int check_waiters_first;

    check_waiters_first = (ontop == 0) ? 1 : 0;
    pch = reserve_pigeon_coop_hole(c->cond_coop);
    cc = (cache_cond_t *)pigeon_coop_hole_data(&pch);
    pw.cond = cc->cond;
    pw.bytes_needed = s->page_size;

    bytes_free = _lru_max_bytes(c) - cp->bytes_used;
    while (s->page_size > bytes_free) {
        //** Attempt to free pages
        bytes_needed = s->page_size - bytes_free;
        n = _lru_force_free_mem(c, seg, bytes_needed, check_waiters_first);

        if (n > 0) { //** Didn't make it so wait
            if (ontop == 0) {
                move_to_bottom(cp->waiting_stack);
                insert_below(cp->waiting_stack, &pw);
            } else {
                push(cp->waiting_stack, &pw);
            }

            segment_unlock(seg);  //** Unlock the segment to prevent deadlocks

            apr_thread_cond_wait(pw.cond, c->lock);  //** Wait for the space to become available

            //** Have to reaquire both locks in the correct order
            cache_unlock(c);
            segment_lock(seg);
            cache_lock(c);

            ontop = 1;  //** 2nd time we are always placed on the top of the stack
            check_waiters_first = 0;  //** And don't check on waiters
        }

        bytes_free = _lru_max_bytes(c) - cp->bytes_used;
    }

    release_pigeon_coop_hole(c->cond_coop, &pch);

    return;
}
Exemplo n.º 3
0
void gop_init(op_generic_t *gop)
{
    pigeon_coop_hole_t pch;

    op_common_t *base = &(gop->base);

    type_memclear(gop, op_generic_t, 1);

    base->id = atomic_global_counter();

    log_printf(15, "gop ptr=%p gid=%d\n", gop, gop_id(gop));

    //** Get the control struct
    pch = reserve_pigeon_coop_hole(_gop_control);
    gop->base.ctl = (gop_control_t *)pigeon_coop_hole_data(&pch);
    gop->base.ctl->pch = pch;
}
Exemplo n.º 4
0
ex_off_t _lru_attempt_free_mem(cache_t *c, segment_t *page_seg, ex_off_t bytes_to_free)
{
    cache_lru_t *cp = (cache_lru_t *)c->fn.priv;
    cache_segment_t *s;
    segment_t *pseg;
    cache_page_t *p;
    page_lru_t *lp;
    Stack_ele_t *ele;
    op_generic_t *gop;
    opque_t *q;
    ex_off_t total_bytes, freed_bytes, pending_bytes, *poff;
    ex_off_t *segid;
    ex_off_t min_off, max_off;
    list_iter_t sit;
    int count, bits, cw, flush_count;
    list_t *table;
    page_table_t *ptable;
    pigeon_coop_hole_t pch, pt_pch;

    log_printf(15, "START seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " stack_size=%d\n", segment_id(page_seg), bytes_to_free, cp->bytes_used, stack_size(cp->stack));

    freed_bytes = 0;
    pending_bytes = 0;
    total_bytes = 0;

    //** cache_lock(c) is already acquired
    pch = reserve_pigeon_coop_hole(cp->free_pending_tables);
    table = *(list_t **)pigeon_coop_hole_data(&pch);

    //** Get the list of pages to free
    move_to_bottom(cp->stack);
    ele = stack_unlink_current(cp->stack, 1);
    while ((total_bytes < bytes_to_free) && (ele != NULL)) {
        p = (cache_page_t *)get_stack_ele_data(ele);
        lp = (page_lru_t *)p->priv;

        bits = atomic_get(p->bit_fields);
        log_printf(15, "checking page for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
        flush_log();

        if ((bits & C_TORELEASE) == 0) { //** Skip it if already flagged for removal
            ptable = (page_table_t *)list_search(table, (list_key_t *)&(segment_id(p->seg)));
            if (ptable == NULL) {  //** Have to make a new segment entry
                pt_pch = reserve_pigeon_coop_hole(cp->free_page_tables);
                ptable = (page_table_t *)pigeon_coop_hole_data(&pt_pch);
                ptable->seg = p->seg;
                ptable->id = segment_id(p->seg);
                ptable->pch = pt_pch;
                list_insert(table, &(ptable->id), ptable);
            }

            cp->limbo_pages++;
            log_printf(15, "UNLINKING seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages);

            atomic_inc(p->access_pending[CACHE_READ]);  //** Do this so it's not accidentally deleted
            push(ptable->stack, p);
            s = (cache_segment_t *)p->seg->priv;
            total_bytes += s->page_size;
            free(lp->ele);
            lp->ele = NULL;  //** Mark it as removed from the list so a page_release doesn't free also
        }

        if (total_bytes < bytes_to_free) ele = stack_unlink_current(cp->stack, 1);
    }


    if (total_bytes == 0) {  //** Nothing to do so exit
        log_printf(15, "Nothing to do so exiting\n");
        release_pigeon_coop_hole(cp->free_pending_tables, &pch);
        return(0);
    }

    cache_unlock(c);  //** Don't need the cache lock for the next part

    q = new_opque();
    opque_start_execution(q);

    //** Now cycle through the segments to be freed
    pending_bytes = 0;
    sit = list_iter_search(table, list_first_key(table), 0);
    list_next(&sit, (list_key_t **)&segid, (list_data_t **)&ptable);
    while (ptable != NULL) {
        //** Verify the segment is still valid.  If not then just delete everything
        pseg = list_search(c->segments, segid);
        if (pseg != NULL) {
            segment_lock(ptable->seg);
            min_off = s->total_size;
            max_off = -1;

            s = (cache_segment_t *)ptable->seg->priv;
            while ((p = pop(ptable->stack)) != NULL) {
                atomic_dec(p->access_pending[CACHE_READ]); //** Removed my access control from earlier
                flush_count = atomic_get(p->access_pending[CACHE_FLUSH]);
                cw = atomic_get(p->access_pending[CACHE_WRITE]);
                count = atomic_get(p->access_pending[CACHE_READ]) + cw + flush_count;
                bits = atomic_get(p->bit_fields);
                if (count != 0) { //** Currently in use so wait for it to be released
                    if (cw > 0) {  //** Got writes so need to wait until they complete otherwise the page may not get released
                        bits = bits | C_TORELEASE;  //** Mark it for release
                        atomic_set(p->bit_fields, bits);
                        _cache_drain_writes(p->seg, p);  //** Drain the write ops
                        bits = atomic_get(p->bit_fields);  //** Get the bit fields to see if it's dirty
                    }

                    if (flush_count == 0) {  //** Make sure it's not already being flushed
                        if ((bits & C_ISDIRTY) != 0) {  //** Have to flush it don't have to track it cause the flush will do the release
                            if (min_off > p->offset) min_off = p->offset;
                            if (max_off < p->offset) max_off = p->offset;
                        }
                    }
                    bits = bits | C_TORELEASE;

                    log_printf(15, "in use tagging for release seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                    atomic_set(p->bit_fields, bits);

                    pending_bytes += s->page_size;
                } else {  //** Not in use
                    if ((bits & (C_ISDIRTY|C_EMPTY)) == 0) {  //** Don't have to flush it just drop the page
                        cp->limbo_pages--;
                        log_printf(15, "FREEING page seg=" XIDT " p->offset=" XOT " bits=%d limbo=%d\n", segment_id(p->seg), p->offset, bits, cp->limbo_pages);
                        list_remove(s->pages, &(p->offset), p);  //** Have to do this here cause p->offset is the key var
                        if (p->data[0].ptr) free(p->data[0].ptr);
                        if (p->data[1].ptr) free(p->data[1].ptr);
                        lp = (page_lru_t *)p->priv;
                        free(lp);
                        freed_bytes += s->page_size;
                    } else {         //** Got to flush the page first but don't have to track it cause the flush will do the release
                        if (p->offset > -1) { //** Skip blank pages
                            if (min_off > p->offset) min_off = p->offset;
                            if (max_off < p->offset) max_off = p->offset;
                        }

                        bits = bits | C_TORELEASE;
                        atomic_set(p->bit_fields, bits);

                        pending_bytes += s->page_size;
                        if (p->offset > -1) {
                            log_printf(15, "FLUSHING page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                        } else {
                            log_printf(15, "RELEASE trigger for empty page seg=" XIDT " p->offset=" XOT " bits=%d\n", segment_id(p->seg), p->offset, bits);
                        }
                    }
                }

                list_next(&sit, (list_key_t **)&poff, (list_data_t **)&p);
            }

            segment_unlock(ptable->seg);

            if (max_off>-1) {
                gop = cache_flush_range(ptable->seg, s->c->da, min_off, max_off + s->page_size - 1, s->c->timeout);
                opque_add(q, gop);
            }
        } else {  //** Segment has been deleted so drop everything cause it's already freeed
            empty_stack(ptable->stack, 0);
        }

        cache_lock(c);
        release_pigeon_coop_hole(cp->free_page_tables, &(ptable->pch));
        cache_unlock(c);

        list_next(&sit, (skiplist_key_t **)&pseg, (skiplist_data_t **)&ptable);
    }

    cache_lock(c);
    log_printf(15, "BEFORE waitall seg=" XIDT " bytes_to_free=" XOT " bytes_used=" XOT " freed_bytes=" XOT " pending_bytes=" XOT "\n",
               segment_id(page_seg), bytes_to_free, cp->bytes_used, freed_bytes, pending_bytes);
    cache_unlock(c);


    //** Wait for any tasks to complete
    opque_waitall(q);
    opque_free(q, OP_DESTROY);

    //** Had this when we came in
    cache_lock(c);

    log_printf(15, "AFTER waitall seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used);

    cp->bytes_used -= freed_bytes;  //** Update how much I directly freed

    log_printf(15, "AFTER used update seg=" XIDT " bytes_used=" XOT "\n", segment_id(page_seg), cp->bytes_used);

    //** Clean up
    empty_skiplist(table);
    release_pigeon_coop_hole(cp->free_pending_tables, &pch);

    log_printf(15, "total_bytes marked for removal =" XOT "\n", total_bytes);

    return(total_bytes);
}