Exemplo n.º 1
0
/* Called by a thread when it thinks it is done with GC. It may get some more
 * work yet, though. */
static void finish_gc(MVMThreadContext *tc, MVMuint8 gen) {
    MVMuint32 put_vote = 1, i;

    /* Loop until other threads have terminated, processing any extra work
     * that we are given. */
    while (tc->instance->gc_finish) {
        MVMuint32 failed = 0;
        MVMuint32 i = 0;

        for ( ; i < tc->gc_work_count; i++) {
            process_in_tray(tc->gc_work[i].tc, gen, &put_vote);
            failed = process_sent_items(tc->gc_work[i].tc, &put_vote) | failed;
        }

        if (!failed && put_vote) {
            MVM_atomic_decr(&tc->instance->gc_finish);
            put_vote = 0;
        }
    }
/*    GCORCH_LOG(tc, "Thread %d run %d : Discovered GC termination\n");*/

    /* Reset GC status flags and cleanup sent items for any work threads. */
    /* This is also where thread destruction happens, and it needs to happen
     * before we acknowledge this GC run is finished. */
    for (i = 0; i < tc->gc_work_count; i++) {
        MVMThreadContext *other = tc->gc_work[i].tc;
        MVMThread *thread_obj = other->thread_obj;
        cleanup_sent_items(other);
        if (thread_obj->body.stage == MVM_thread_stage_clearing_nursery) {
            GCORCH_LOG(tc, "Thread %d run %d : freeing gen2 of thread %d\n", other->thread_id);
            /* always free gen2 */
            MVM_gc_collect_free_gen2_unmarked(other);
            GCORCH_LOG(tc, "Thread %d run %d : transferring gen2 of thread %d\n", other->thread_id);
            MVM_gc_gen2_transfer(other, tc);
            GCORCH_LOG(tc, "Thread %d run %d : destroying thread %d\n", other->thread_id);
            MVM_tc_destroy(other);
            tc->gc_work[i].tc = thread_obj->body.tc = NULL;
            thread_obj->body.stage = MVM_thread_stage_destroyed;
        }
        else {
            if (thread_obj->body.stage == MVM_thread_stage_exited) {
                /* don't bother freeing gen2; we'll do it next time */
                thread_obj->body.stage = MVM_thread_stage_clearing_nursery;
//                    GCORCH_LOG(tc, "Thread %d run %d : set thread %d clearing nursery stage to %d\n", other->thread_id, thread_obj->body.stage);
            }
            apr_atomic_cas32(&other->gc_status, MVMGCStatus_UNABLE,
                MVMGCStatus_STOLEN);
            apr_atomic_cas32(&other->gc_status, MVMGCStatus_NONE,
                MVMGCStatus_INTERRUPT);
        }
    }
    MVM_atomic_decr(&tc->instance->gc_ack);
}
Exemplo n.º 2
0
apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
                                    apr_pool_t *pool_to_recycle)
{
    apr_status_t rv;
    int prev_idlers;

    /* If we have been given a pool to recycle, atomically link
     * it into the queue_info's list of recycled pools
     */
    if (pool_to_recycle) {
        struct recycled_pool *new_recycle;
        new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
                                                         sizeof(*new_recycle));
        new_recycle->pool = pool_to_recycle;
        for (;;) {
            /* Save queue_info->recycled_pool in local variable next because
             * new_recycle->next can be changed after apr_atomic_casptr
             * function call. For gory details see PR 44402.
             */
            struct recycled_pool *next = queue_info->recycled_pools;
            new_recycle->next = next;
            if (apr_atomic_casptr((void*)&(queue_info->recycled_pools),
                                  new_recycle, next) == next) {
                break;
            }
        }
    }

    /* Atomically increment the count of idle workers */
    for (;;) {
        prev_idlers = queue_info->idlers;
        if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1,
                             prev_idlers) == prev_idlers) {
            break;
        }
    }

    /* If this thread just made the idle worker count nonzero,
     * wake up the listener. */
    if (prev_idlers == 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
        rv = apr_thread_cond_signal(queue_info->wait_for_idler);
        if (rv != APR_SUCCESS) {
            apr_thread_mutex_unlock(queue_info->idlers_mutex);
            return rv;
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    return APR_SUCCESS;
}
Exemplo n.º 3
0
static void test_cas_equal_nonnull(abts_case *tc, void *data)
{
    apr_uint32_t casval = 12;
    apr_uint32_t oldval;

    oldval = apr_atomic_cas32(&casval, 23, 12);
    ABTS_INT_EQUAL(tc, 12, oldval);
    ABTS_INT_EQUAL(tc, 23, casval);
}
Exemplo n.º 4
0
static void test_cas_equal(abts_case *tc, void *data)
{
    apr_uint32_t casval = 0;
    apr_uint32_t oldval;

    oldval = apr_atomic_cas32(&casval, 12, 0);
    ABTS_INT_EQUAL(tc, 0, oldval);
    ABTS_INT_EQUAL(tc, 12, casval);
}
Exemplo n.º 5
0
/* Called by a thread to indicate it has completed a block operation and is
 * thus able to particpate in a GC run again. Note that this case needs some
 * special handling if it comes out of this mode when a GC run is taking place. */
void MVM_gc_mark_thread_unblocked(MVMThreadContext *tc) {
    /* Try to set it from unable to running. */
    while (apr_atomic_cas32(&tc->gc_status, MVMGCStatus_NONE,
            MVMGCStatus_UNABLE) != MVMGCStatus_UNABLE) {
        /* We can't, presumably because a GC run is going on. We should wait
         * for that to finish before we go on, but without chewing CPU. */
        apr_thread_yield();
    }
}
Exemplo n.º 6
0
void
rawx_stats_rrd_lock(struct rawx_stats_rrd_s *rsr)
{
    do {
        if (0 == apr_atomic_cas32(&(rsr->lock), 1, 0))
            return;
        apr_sleep(100);
    } while (1);
}
Exemplo n.º 7
0
/* Goes through all threads but the current one and notifies them that a
 * GC run is starting. Those that are blocked are considered excluded from
 * the run, and are not counted. Returns the count of threads that should be
 * added to the finished countdown. */
static MVMuint32 signal_one_thread(MVMThreadContext *tc, MVMThreadContext *to_signal) {

    /* Loop here since we may not succeed first time (e.g. the status of the
     * thread may change between the two ways we try to twiddle it). */
    while (1) {
        switch (to_signal->gc_status) {
            case MVMGCStatus_NONE:
                /* Try to set it from running to interrupted - the common case. */
                if (apr_atomic_cas32(&to_signal->gc_status, MVMGCStatus_INTERRUPT,
                        MVMGCStatus_NONE) == MVMGCStatus_NONE) {
                    GCORCH_LOG(tc, "Thread %d run %d : Signalled thread %d to interrupt\n", to_signal->thread_id);
                    return 1;
                }
                break;
            case MVMGCStatus_INTERRUPT:
                GCORCH_LOG(tc, "Thread %d run %d : thread %d already interrupted\n", to_signal->thread_id);
                return 0;
            case MVMGCStatus_UNABLE:
                /* Otherwise, it's blocked; try to set it to work Stolen. */
                if (apr_atomic_cas32(&to_signal->gc_status, MVMGCStatus_STOLEN,
                        MVMGCStatus_UNABLE) == MVMGCStatus_UNABLE) {
                    GCORCH_LOG(tc, "Thread %d run %d : A blocked thread %d spotted; work stolen\n", to_signal->thread_id);
                    add_work(tc, to_signal);
                    return 0;
                }
                break;
            /* this case occurs if a child thread is Stolen by its parent
             * before we get to it in the chain. */
            case MVMGCStatus_STOLEN:
                GCORCH_LOG(tc, "Thread %d run %d : thread %d already stolen (it was a spawning child)\n", to_signal->thread_id);
                return 0;
            default:
                MVM_panic(MVM_exitcode_gcorch, "invalid status %d in GC orchestrate\n", to_signal->gc_status);
                return 0;
        }
    }
}
Exemplo n.º 8
0
static void busyloop_cas32(tbox_t *tbox)
{
    apr_uint32_t val;

    do {
        do {
            val = apr_atomic_cas32(tbox->mem, tbox->postval, tbox->preval);

            if (val != tbox->preval)
                apr_thread_yield();
            else
                break;
        } while (1);
    } while (--tbox->loop);
}
Exemplo n.º 9
0
/* Called by a thread to indicate it is about to enter a blocking operation.
 * This tells any thread that is coordinating a GC run that this thread will
 * be unable to participate. */
void MVM_gc_mark_thread_blocked(MVMThreadContext *tc) {
    /* This may need more than one attempt. */
    while (1) {
        /* Try to set it from running to unable - the common case. */
        if (apr_atomic_cas32(&tc->gc_status, MVMGCStatus_UNABLE,
                MVMGCStatus_NONE) == MVMGCStatus_NONE)
            return;

        /* The only way this can fail is if another thread just decided we're to
         * participate in a GC run. */
        if (tc->gc_status == MVMGCStatus_INTERRUPT)
            MVM_gc_enter_from_interrupt(tc);
        else
            MVM_panic(MVM_exitcode_gcorch, "Invalid GC status observed; aborting");
    }
}
Exemplo n.º 10
0
/**
 * Used lockword
 * Thin monitor functions used java monitor.
 */
IDATA VMCALL hythread_unreserve_lock(hythread_thin_monitor_t *lockword_ptr) {
    U_32 lockword = *lockword_ptr;
    U_32 lockword_new;
    uint16 lock_id;
    hythread_t owner;
    IDATA status;
    I_32 append;

    // trylock used to prevent cyclic suspend deadlock
    // the java_monitor_enter calls safe_point between attempts.
    /*status = port_mutex_trylock(&TM_LOCK);
      if (status !=TM_ERROR_NONE) {
      return status;
      }*/
    
    if (IS_FAT_LOCK(lockword)) {
        return TM_ERROR_NONE;
    }
    lock_id = THREAD_ID(lockword);
    owner = hythread_get_thread(lock_id);
    CTRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/));
    if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) {
        // port_mutex_unlock(&TM_LOCK);
        return TM_ERROR_NONE;
    }
    // suspend owner 
    if (owner) {
        assert(owner);
        assert(hythread_get_id(owner) == lock_id);
        assert(owner != hythread_self());
        if(owner->state
                & (TM_THREAD_STATE_TERMINATED
                    | TM_THREAD_STATE_WAITING
                    | TM_THREAD_STATE_WAITING_INDEFINITELY
                    | TM_THREAD_STATE_WAITING_WITH_TIMEOUT
                    | TM_THREAD_STATE_SLEEPING
                    | TM_THREAD_STATE_PARKED
                    | TM_THREAD_STATE_SUSPENDED
                    | TM_THREAD_STATE_IN_MONITOR_WAIT))
        {
            append = 0;
        } else {
            append = RESERVED_BITMASK;
        }

        status=hythread_suspend_other(owner);
        if (status !=TM_ERROR_NONE) {
            return status;
        }
    } else {
        append = 0;
    }

    if(!tm_properties || !tm_properties->use_soft_unreservation) {
	    append = RESERVED_BITMASK;
    }

    // prepare new unreserved lockword and try to CAS it with old one.
    while (IS_RESERVED(lockword)) {
        assert(!IS_FAT_LOCK(lockword));
        CTRACE(("unreserving lock"));
        if (RECURSION(lockword) != 0) {
            lockword_new = (lockword | RESERVED_BITMASK);
            assert(RECURSION(lockword) > 0);
            assert(RECURSION(lockword_new) > 0);
            RECURSION_DEC(&lockword_new, lockword_new);
        } else {
            lockword_new = (lockword | append);
            lockword_new =  lockword_new & 0x0000ffff; 
        }
        if (lockword == apr_atomic_cas32 (((volatile apr_uint32_t*) lockword_ptr), 
                                          (apr_uint32_t) lockword_new, lockword)) {
            CTRACE(("unreserved lock"));
            break;
        }
        lockword = *lockword_ptr;
    }

    // resume owner
    if (owner) {
        hythread_yield_other(owner);
        hythread_resume(owner);
    }

    /* status = port_mutex_unlock(&TM_LOCK);*/

    // Gregory - This lock, right after it was unreserved, may be
    // inflated by another thread and therefore instead of recursion
    // count and reserved flag it will have the fat monitor ID. The
    // assertion !IS_RESERVED(lockword) fails in this case. So it is
    // necessary to check first that monitor is not fat.
    // To avoid race condition between checking two different
    // conditions inside of assert, the lockword contents has to be
    // loaded before checking.
//    lockword = *lockword_ptr;
//    assert(IS_FAT_LOCK(lockword) || !IS_RESERVED(lockword));
    return TM_ERROR_NONE;
}
Exemplo n.º 11
0
void
rawx_stats_rrd_unlock(struct rawx_stats_rrd_s *rsr)
{
    apr_atomic_cas32(&(rsr->lock), 0, 1);
}