// void Push(WorkStealQueue *q, Obj* elem)       // Obj is track
void Push(WorkStealQueue *q, int elem)       // Obj is track
{
    long t = readV(&q->tail);
    // Careful here since we might interleave with Steal.
    // This is no problem since we just conservatively check if there is
    // enough space left (t < head + size). However, Steal might just have
    // incremented head and we could potentially overwrite the old head
    // entry, so we always leave at least one extra 'buffer' element and
    // check (tail < head + size - 1). This also plays nicely with our
    // initial mask of 0, where size is 2^0 == 1, but the tasks array is
    // still null.
    //
    // Correct: if (t < readV(&q->head) + mask && t < MaxSize)
#define BUG3
#ifdef BUG3
    if (t < readV(&q->head) + q->mask + 1 && t < q->MaxSize)
#else
    if (t < readV(&q->head) + q->mask   // == t < head + size - 1
            && t < q->MaxSize)
#endif
    {
        // q->elems[t & q->mask] = elem;       // they are all tracked
        MEMORY_VALUE[q->elems + (t & q->mask)] = elem;       // they are all tracked
        writeV(&q->tail, t + 1);       // only increment once we have initialized the task entry.
    }
    else
    {
        // failure: we need to resize or re-index
        //
        SyncPush(q, elem);      // second parameter needs to be tracked
    }
}
// _Bool Pop(WorkStealQueue *q, Obj **result)   // track results
_Bool Pop(WorkStealQueue *q, int * result)   // track results
{
    // decrement the tail. Use local t for efficiency.
    //
    long t = readV(&q->tail) - 1;
    writeV(&q->tail, t);

    // insert a memory fence here if memory is not sequentially consistent
    //
    if (readV(&q->head) <= t)
    {
        // BUG:  writeV(&q->tail, t);

        // == (head <= tail)
        //
        // *result = q->elems[t & q->mask];    // result need to be tracked
        *result = MEMORY_VALUE[q->elems + (t & q->mask)];    // result need to be tracked
        return 1;
    }
    else
    {
        // failure: either empty or single element interleaving with take
        //
        writeV(&q->tail, t + 1);             // restore the tail
        return SyncPop(q, result);   // do a single-threaded pop
    }
}
// _Bool Steal(WorkStealQueue *q, Obj **result)   // result is track
_Bool Steal(WorkStealQueue *q, int * result)   // result is track
{
    _Bool found;
    pthread_mutex_lock(&q->cs);

    // ensure that at most one (foreign) thread writes to head
    // increment the head. Save in local h for efficiency
    //
    long h = readV(&q->head);
    writeV(&q->head, h + 1);

    // insert a memory fence here if memory is not sequentially consistent
    //
    if (h < readV(&q->tail)) {
        // == (h+1 <= tail) == (head <= tail)
        //
        // BUG: writeV(&q->head, h + 1);
        // *result = q->elems[h & q->mask];
        *result = MEMORY_VALUE[q->elems + (h & q->mask)];
        found = 1;
    }
    else {
        // failure: either empty or single element interleaving with pop
        //
        writeV(&q->head, h);              // restore the head
        found = 0;
    }
    pthread_mutex_unlock(&q->cs);
    return found;
}
// _Bool SyncPop(WorkStealQueue* q, Obj **result)   // result is tracked
_Bool SyncPop(WorkStealQueue* q, int * result)   // result is tracked
{
    _Bool found;

    pthread_mutex_lock(&q->cs);

    // ensure that no Steal interleaves with this pop
    //
    long t = readV(&q->tail) - 1;
    writeV(&q->tail, t);
    if (readV(&q->head) <= t)
    {
        // == (head <= tail)
        //
        // *result = q->elems[t & q->mask];
        *result = MEMORY_VALUE[q->elems + (t & q->mask)];
        found = 1;
    }
    else
    {
        writeV(&q->tail, t + 1);       // restore tail
        found = 0;
    }
    if (readV(&q->head) > t)
    {
        // queue is empty: reset head and tail
        //
        writeV(&q->head, 0);
        writeV(&q->tail, 0);
        found = 0;
    }
    pthread_mutex_unlock(&q->cs);
    return found;
}
_Bool SyncPop(WorkStealQueue* q, Obj **result)
{
    _Bool found;

    pthread_mutex_lock(&q->cs);



    long t = readV(&q->tail) - 1;
    writeV(&q->tail, t);
    if (readV(&q->head) <= t)
    {


        *result = q->elems[t & q->mask];
        found = 1;
    }
    else
    {
        writeV(&q->tail, t + 1);
        found = 0;
    }
    if (readV(&q->head) > t)
    {


        writeV(&q->head, 0);
        writeV(&q->tail, 0);
        found = 0;
    }
    pthread_mutex_unlock(&q->cs);
    return found;
}
_Bool Pop(WorkStealQueue *q, Obj **result)
{


    long t = readV(&q->tail) - 1;
    writeV(&q->tail, t);



    if (readV(&q->head) <= t)
    {




        *result = q->elems[t & q->mask];
        return 1;
    }
    else
    {


        writeV(&q->tail, t + 1);
        return SyncPop(q, result);
    }
}
void Push(int* elem)
{
    long t = readV(&q.tail);
    // Careful here since we might interleave with Steal.
    // This is no problem since we just conservatively check if there is
    // enough space left (t < head + size). However, Steal might just have
    // incremented head and we could potentially overwrite the old head
    // entry, so we always leave at least one extra 'buffer' element and
    // check (tail < head + size - 1). This also plays nicely with our
    // initial mask of 0, where size is 2^0 == 1, but the tasks array is
    // still null.
    //
    // Correct: if (t < readV(&q.head) + mask && t < MaxSize)
#define BUG3
#ifdef BUG3
    if (t < readV(&q.head) + q.mask + 1 && t < q.MaxSize)
#else
    if (t < readV(&q.head) + q.mask   // == t < head + size - 1
            && t < q.MaxSize)
#endif
    {
        q.elems[t & q.mask] = elem;
        writeV(&q.tail, t + 1);       // only increment once we have initialized the task entry.
    }
    else
    {
        // failure: we need to resize or re-index
        //
        SyncPush(q, elem);
    }
}
_Bool Steal(WorkStealQueue *q, Obj **result)
{
    _Bool found;
    pthread_mutex_lock(&q->cs);




    long h = readV(&q->head);
    writeV(&q->head, h + 1);



    if (h < readV(&q->tail)) {



        *result = q->elems[h & q->mask];
        found = 1;
    }
    else {


        writeV(&q->head, h);
        found = 0;
    }
    pthread_mutex_unlock(&q->cs);
    return found;
}
_Bool Pop(int **result)
{
    // decrement the tail. Use local t for efficiency.
    //
    long t = readV(&q.tail) - 1;
    writeV(&q.tail, t);

    // insert a memory fence here if memory is not sequentially consistent
    //
    if (readV(&q.head) <= t)
    {
        // BUG:  writeV(&q.tail, t);

        // == (head <= tail)
        //
        *result = q.elems[t & q.mask];
        return 1;
    }
    else
    {
        // failure: either empty or single element interleaving with take
        //
        writeV(&q.tail, t + 1);             // restore the tail
        return SyncPop(q, result);   // do a single-threaded pop
    }
}
_Bool SyncPop(int **result)
{
    _Bool found;

    pthread_mutex_lock(&q.cs);

    // ensure that no Steal interleaves with this pop
    //
    long t = readV(&q.tail) - 1;
    writeV(&q.tail, t);
    if (readV(&q.head) <= t)
    {
        // == (head <= tail)
        //
        *result = q.elems[t & q.mask];
        found = 1;
    }
    else
    {
        writeV(&q.tail, t + 1);       // restore tail
        found = 0;
    }
    if (readV(&q.head) > t)
    {
        // queue is empty: reset head and tail
        //
        writeV(&q.head, 0);
        writeV(&q.tail, 0);
        found = 0;
    }
    pthread_mutex_unlock(&q.cs);
    return found;
}
_Bool Steal(int **result)
{
    _Bool found;
    pthread_mutex_lock(&q.cs);

    // ensure that at most one (foreign) thread writes to head
    // increment the head. Save in local h for efficiency
    //
    long h = readV(&q.head);
    writeV(&q.head, h + 1);

    // insert a memory fence here if memory is not sequentially consistent
    //
    if (h < readV(&q.tail)) {
        // == (h+1 <= tail) == (head <= tail)
        //
        // BUG: writeV(&q.head, h + 1);
        *result = q.elems[h & q.mask];
        found = 1;
    }
    else {
        // failure: either empty or single element interleaving with pop
        //
        writeV(&q.head, h);              // restore the head
        found = 0;
    }
    pthread_mutex_unlock(&q.cs);
    return found;
}
// void SyncPush(WorkStealQueue *q, Obj* elem)     // elem is tracked
void SyncPush(WorkStealQueue *q, int elem)     // elem is tracked
{
    pthread_mutex_lock(&q->cs);
    // ensure that no Steal interleaves here
    // cache head, and calculate number of tasks
    //
    long h = readV(&q->head);
    long count = readV(&q->tail) - h;

    // normalize indices
    //
    h = h & q->mask;           // normalize head
    writeV(&q->head, h);
    writeV(&q->tail, h + count);

    // check if we need to enlarge the tasks
    //
    if (count >= q->mask)
    {
        // == (count >= size-1)
        //
        long newsize = (q->mask == 0 ? q->InitialSize : 2 * (q->mask + 1));

        assert(newsize < q->MaxSize);

        // Obj ** newtasks = malloc(newsize * sizeof(Obj*));    // newtasks is track
        int newtasks = __VERIFIER_atomic_malloc(newsize * 1);    // newtasks is track
        long i;
        for (i = 0; i < count; i++)
        {
            // newtasks[i] = q->elems[(h + i) & q->mask];
            MEMORY_VALUE[newtasks + i] = MEMORY_VALUE[q->elems + ((h + i) & q->mask)];
        }
        // free(q->elems);       // is track
        __VERIFIER_atomic_free(q->elems);       // is track
        q->elems = newtasks;
        q->mask = newsize - 1;
        writeV(&q->head, 0);
        writeV(&q->tail, count);
    }

    assert(count < q->mask);

    // push the element
    //
    long t = readV(&q->tail);
    // q->elems[t & q->mask] = elem;    // they are all tracked
    MEMORY_VALUE[q->elems + (t & q->mask)] = elem;    // they are all tracked
    writeV(&q->tail, t + 1);
    pthread_mutex_unlock(&q->cs);
}
void SyncPush(WorkStealQueue *q, Obj* elem)
{
    pthread_mutex_lock(&q->cs);



    long h = readV(&q->head);
    long count = readV(&q->tail) - h;



    h = h & q->mask;
    writeV(&q->head, h);
    writeV(&q->tail, h + count);



    if (count >= q->mask)
    {


        long newsize = (q->mask == 0 ? q->InitialSize : 2 * (q->mask + 1));

        assert(newsize < q->MaxSize);

        Obj ** newtasks = malloc(newsize * sizeof(Obj*));
        long i;
        for (i = 0; i < count; i++)
        {
            newtasks[i] = q->elems[(h + i) & q->mask];
        }
        free(q->elems);
        q->elems = newtasks;
        q->mask = newsize - 1;
        writeV(&q->head, 0);
        writeV(&q->tail, count);
    }

    assert(count < q->mask);



    long t = readV(&q->tail);
    q->elems[t & q->mask] = elem;
    writeV(&q->tail, t + 1);
    pthread_mutex_unlock(&q->cs);
}
void SyncPush(int* elem)
{
    pthread_mutex_lock(&q.cs);
    // ensure that no Steal interleaves here
    // cache head, and calculate number of tasks
    //
    long h = readV(&q.head);
    long count = readV(&q.tail) - h;

    // normalize indices
    //
    h = h & q.mask;           // normalize head
    writeV(&q.head, h);
    writeV(&q.tail, h + count);

    // check if we need to enlarge the tasks
    //
    if (count >= q.mask)
    {
        // == (count >= size-1)
        //
        long newsize = (q.mask == 0 ? q.InitialSize : 2 * (q.mask + 1));

        assert(newsize < q.MaxSize);

        int ** newtasks = malloc(newsize * sizeof(int*));
        long i;
        for (i = 0; i < count; i++)
        {
            newtasks[i] = q.elems[(h + i) & q.mask];
        }
        free(q.elems);
        q.elems = newtasks;
        q.mask = newsize - 1;
        writeV(&q.head, 0);
        writeV(&q.tail, count);
    }

    assert(count < q.mask);

    // push the element
    //
    long t = readV(&q.tail);
    q.elems[t & q.mask] = elem;
    writeV(&q.tail, t + 1);
    pthread_mutex_unlock(&q.cs);
}
void Push(WorkStealQueue *q, Obj* elem)
{
    long t = readV(&q->tail);
# 296 "/home/trucnguyenlam/Development/cseq/regression/pointertranslation/WorkStealQueue_withmutex-1.c"
    if (t < readV(&q->head) + q->mask + 1 && t < q->MaxSize)




    {
        q->elems[t & q->mask] = elem;
        writeV(&q->tail, t + 1);
    }
    else
    {


        SyncPush(q, elem);
    }
}
Beispiel #16
0
void
ODMatrix::loadMatrix(OptionsCont& oc) {
    std::vector<std::string> files = oc.getStringVector("od-matrix-files");
    for (std::vector<std::string>::iterator i = files.begin(); i != files.end(); ++i) {
        LineReader lr(*i);
        if (!lr.good()) {
            throw ProcessError("Could not open '" + (*i) + "'.");
        }
        std::string type = lr.readLine();
        // get the type only
        if (type.find(';') != std::string::npos) {
            type = type.substr(0, type.find(';'));
        }
        // parse type-dependant
        if (type.length() > 1 && type[1] == 'V') {
            // process ptv's 'V'-matrices
            if (type.find('N') != std::string::npos) {
                throw ProcessError("'" + *i + "' does not contain the needed information about the time described.");
            }
            readV(lr, oc.getFloat("scale"), oc.getString("vtype"), type.find('M') != std::string::npos);
        } else if (type.length() > 1 && type[1] == 'O') {
            // process ptv's 'O'-matrices
            if (type.find('N') != std::string::npos) {
                throw ProcessError("'" + *i + "' does not contain the needed information about the time described.");
            }
            readO(lr, oc.getFloat("scale"), oc.getString("vtype"), type.find('M') != std::string::npos);
        } else {
            throw ProcessError("'" + *i + "' uses an unknown matrix type '" + type + "'.");
        }
    }
    std::vector<std::string> amitranFiles = oc.getStringVector("od-amitran-files");
    for (std::vector<std::string>::iterator i = amitranFiles.begin(); i != amitranFiles.end(); ++i) {
        if (!FileHelpers::isReadable(*i)) {
            throw ProcessError("Could not access matrix file '" + *i + "' to load.");
        }
        PROGRESS_BEGIN_MESSAGE("Loading matrix in Amitran format from '" + *i + "'");
        ODAmitranHandler handler(*this, *i);
        if (!XMLSubSys::runParser(handler, *i)) {
            PROGRESS_FAILED_MESSAGE();
        } else {
            PROGRESS_DONE_MESSAGE();
        }
    }
}