コード例 #1
0
ファイル: heap.c プロジェクト: Ikulagin/transmem
TM_SAFE
bool_t
TMheap_insert (  heap_t* heapPtr, void* dataPtr)
{
    long size = (long)TM_SHARED_READ(heapPtr->size);
    long capacity = (long)TM_SHARED_READ(heapPtr->capacity);

    if ((size + 1) >= capacity) {
        long newCapacity = capacity * 2;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return FALSE;
        }
        TM_SHARED_WRITE(heapPtr->capacity, newCapacity);
        long i;
        void** elements = (void **)TM_SHARED_READ_P(heapPtr->elements);
        for (i = 0; i <= size; i++) {
            newElements[i] = (void*)TM_SHARED_READ_P(elements[i]);
        }
        free(heapPtr->elements);
        TM_SHARED_WRITE_P(heapPtr->elements, newElements);
    }

    size++;
    TM_SHARED_WRITE(heapPtr->size, size);
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    TM_SHARED_WRITE_P(elements[size], dataPtr);
    siftUp(heapPtr, size);

    return TRUE;
}
コード例 #2
0
ファイル: queue.c プロジェクト: nathanielherman/sto-stamp
/* =============================================================================
 * TMqueue_push
 * =============================================================================
 */
bool_t
TMqueue_push (TM_ARGDECL  queue_t* queuePtr, void* dataPtr)
{
    long pop      = (long)TM_SHARED_READ(queuePtr->pop);
    long push     = (long)TM_SHARED_READ(queuePtr->push);
    long capacity = (long)TM_SHARED_READ(queuePtr->capacity);

    assert(pop != push);

    /* Need to resize */
    long newPush = (push + 1) % capacity;
    if (newPush == pop) {
        long newCapacity = capacity * QUEUE_GROWTH_FACTOR;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return FALSE;
        }

        long dst = 0;
        void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
        if (pop < push) {
            long src;
            for (src = (pop + 1); src < push; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
        } else {
            long src;
            for (src = (pop + 1); src < capacity; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
            for (src = 0; src < push; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
        }

        TM_FREE(elements);
        TM_SHARED_WRITE_P(queuePtr->elements, newElements);
        TM_SHARED_WRITE(queuePtr->pop,      newCapacity - 1);
        TM_SHARED_WRITE(queuePtr->capacity, newCapacity);
        push = dst;
        newPush = push + 1; /* no need modulo */

    }

    void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
    TM_SHARED_WRITE_P(elements[push], dataPtr);
    TM_SHARED_WRITE(queuePtr->push, newPush);

    return TRUE;
}
コード例 #3
0
ファイル: hashtable.c プロジェクト: kohler/stamp-mp
/* =============================================================================
 * TMhashtable_remove
 * -- Returns TRUE if successful, else FALSE
 * =============================================================================
 */
bool_t
TMhashtable_remove (TM_ARGDECL  hashtable_t* hashtablePtr, void* keyPtr)
{
    long numBucket = hashtablePtr->numBucket;
    long i = hashtablePtr->hash(keyPtr) % numBucket;
    list_t* chainPtr = hashtablePtr->buckets[i];
    pair_t* pairPtr;
    pair_t removePair;

    removePair.firstPtr = keyPtr;
    pairPtr = (pair_t*)TMLIST_FIND(chainPtr, &removePair);
    if (pairPtr == NULL) {
        return FALSE;
    }

    bool_t status = TMLIST_REMOVE(chainPtr, &removePair);
    assert(status);
    TMPAIR_FREE(pairPtr);

#ifdef HASHTABLE_SIZE_FIELD
    TM_SHARED_WRITE(hashtablePtr->size
                    (long)TM_SHARED_READ(hashtablePtr->size)-1);
    assert(hashtablePtr->size >= 0);
#endif

    return TRUE;
}
コード例 #4
0
ファイル: hashtable.c プロジェクト: kohler/stamp-mp
/* =============================================================================
 * TMhashtable_insert
 * =============================================================================
 */
bool_t
TMhashtable_insert (TM_ARGDECL
                    hashtable_t* hashtablePtr, void* keyPtr, void* dataPtr)
{
    long numBucket = hashtablePtr->numBucket;
    long i = hashtablePtr->hash(keyPtr) % numBucket;

    pair_t findPair;
    findPair.firstPtr = keyPtr;
    pair_t* pairPtr = (pair_t*)TMLIST_FIND(hashtablePtr->buckets[i], &findPair);
    if (pairPtr != NULL) {
        return FALSE;
    }

    pair_t* insertPtr = TMPAIR_ALLOC(keyPtr, dataPtr);
    if (insertPtr == NULL) {
        return FALSE;
    }

    /* Add new entry  */
    if (TMLIST_INSERT(hashtablePtr->buckets[i], insertPtr) == FALSE) {
        TMPAIR_FREE(insertPtr);
        return FALSE;
    }

#ifdef HASHTABLE_SIZE_FIELD
    long newSize = TM_SHARED_READ(hashtablePtr->size) + 1;
    assert(newSize > 0);
    TM_SHARED_WRITE(hashtablePtr->size, newSize);
#endif

    return TRUE;
}
コード例 #5
0
ファイル: list.c プロジェクト: nmldiegues/proteustm
/* =============================================================================
 * TMlist_insert
 * -- Return TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
TMlist_insert (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;
    list_node_t* currPtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);
    currPtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);

#ifdef LIST_NO_DUPLICATES
    if ((currPtr != NULL) &&
        listPtr->compare(currPtr->dataPtr, dataPtr) == 0) {
        return FALSE;
    }
#endif

    nodePtr = TMallocNode(TM_ARG  dataPtr);
    if (nodePtr == NULL) {
        return FALSE;
    }

    nodePtr->nextPtr = currPtr;
    TM_SHARED_WRITE_P(prevPtr->nextPtr, nodePtr);
    TM_SHARED_WRITE(listPtr->size, (TM_SHARED_READ(listPtr->size) + 1));

    return TRUE;
}
コード例 #6
0
ファイル: tm_counter.c プロジェクト: rennery/shootgameproject
void
func_count (void* argPtr)
{

  TM_THREAD_ENTER();
 
  while (1) {
    int stop_counting = 0;
    
    //pair_t* coordinatePairPtr;
    TM_BEGIN();
    
    long local_counter = (long)TM_SHARED_READ(my_counter);
    local_counter++;

    if(local_counter > max_count)
      stop_counting = 1;
    
    TM_SHARED_WRITE(my_counter, local_counter);
    
    TM_END();
    
    //pthread_yield();
    
    if (stop_counting == 1) {
      break;
    }
    
  }//end of while
  
   
  TM_THREAD_EXIT();
}
コード例 #7
0
/* =============================================================================
 * reservation_addToTotal
 * -- Adds if 'num' > 0, removes if 'num' < 0;
 * -- Returns TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
reservation_addToTotal (TM_ARGDECL  reservation_t* reservationPtr, int num)
{
#ifdef reservation2
		return reservationPtr->reservation_addToTotal(TM_ARG num);
#else
    int numFree = TM_SHARED_READ(reservationPtr->numFree);

    if (numFree + num < 0) {
        return FALSE;
    }
    
		int numTotal = TM_SHARED_READ(reservationPtr->numTotal);
	
		TM_SHARED_WRITE(reservationPtr->numTotal, numTotal + num);
		TM_SHARED_WRITE(reservationPtr->numFree, numFree + num);
    CHECK_RESERVATION(reservationPtr);
    return TRUE;
#endif
}
コード例 #8
0
/* =============================================================================
 * reservation_cancel
 * -- Returns TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
reservation_cancel (TM_ARGDECL  reservation_t* reservationPtr)
{
#ifdef reservation2
		return reservationPtr->reservation_cancel(TM_ARG_ALONE);
#else
    int numUsed = TM_SHARED_READ(reservationPtr->numUsed);

    if (numUsed < 1) {
        return FALSE;
    }

		TM_SHARED_WRITE(reservationPtr->numUsed, numUsed-1);
		TM_SHARED_WRITE(reservationPtr->numFree, 
						TM_SHARED_READ(reservationPtr->numFree)+1);

    CHECK_RESERVATION(reservationPtr);

    return TRUE;
#endif
}
コード例 #9
0
/* =============================================================================
 * reservation_updatePrice
 * -- Failure if 'price' < 0
 * -- Returns TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
reservation_updatePrice (TM_ARGDECL  reservation_t* reservationPtr, int newPrice)
{
#ifdef reservation2
		return reservationPtr->reservation_update_price(TM_ARG newPrice);
#else
    if (newPrice < 0) {
        return FALSE;
    }
    TM_SHARED_WRITE(reservationPtr->price, newPrice);
    CHECK_RESERVATION(reservationPtr);
    return TRUE;
#endif

}
コード例 #10
0
ファイル: queue.c プロジェクト: nathanielherman/sto-stamp
/* =============================================================================
 * TMqueue_pop
 * =============================================================================
 */
void*
TMqueue_pop (TM_ARGDECL  queue_t* queuePtr)
{
    long pop      = (long)TM_SHARED_READ(queuePtr->pop);
    long push     = (long)TM_SHARED_READ(queuePtr->push);
    long capacity = (long)TM_SHARED_READ(queuePtr->capacity);

    long newPop = (pop + 1) % capacity;
    if (newPop == push) {
        return NULL;
    }

    void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
    void* dataPtr = (void*)TM_SHARED_READ_P(elements[newPop]);
    TM_SHARED_WRITE(queuePtr->pop, newPop);

    return dataPtr;
}
コード例 #11
0
ファイル: list.c プロジェクト: nmldiegues/proteustm
/* =============================================================================
 * TMlist_remove
 * -- Returns TRUE if successful, else FALSE
 * =============================================================================
 */
bool_t
TMlist_remove (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);

    nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);
    if ((nodePtr != NULL) &&
        (listPtr->compare(nodePtr->dataPtr, dataPtr) == 0))
    {
        TM_SHARED_WRITE_P(prevPtr->nextPtr, TM_SHARED_READ_P(nodePtr->nextPtr));
        TM_SHARED_WRITE_P(nodePtr->nextPtr, (struct list_node*)NULL);
        TMfreeNode(TM_ARG  nodePtr);
        TM_SHARED_WRITE(listPtr->size, (TM_SHARED_READ(listPtr->size) - 1));
        assert(listPtr->size >= 0);
        return TRUE;
    }

    return FALSE;
}
コード例 #12
0
ファイル: array.c プロジェクト: nmldiegues/stamp-rtm
void client_run (void* argPtr) {
    TM_THREAD_ENTER();

    /*long id = thread_getId();

    volatile long* ptr1 = &(global_array[0].value);
    volatile long* ptr2 = &(global_array[100].value);
    long tt = 0;
    if (id == 0) {
        while (1) {
            long v1 = 0;
            long v2 = 0;
            acquire_write(&(local_th_data[phys_id]), &the_lock);
            *ptr1 = (*ptr1) + 1;

            int f = 1;
            int ii;
            for(ii = 1; ii <= 100000000; ii++)
            {
                f *= ii;
            }
            tt += f;

            *ptr2 = (*ptr2) + 1;
            v1 = global_array[0].value;
            v2 = global_array[100].value;
            release_write(cluster_id, &(local_th_data[phys_id]), &the_lock); \
                if (v1 != v2) {
                    printf("different2! %ld %ld\n", v1, v2);
                    exit(1);
                }

        }
    } else {
        while (1) {
            int i = 0;
            long sum = 0;
            for (; i < 100000; i++) {
                int status = _xbegin();
                if (status == _XBEGIN_STARTED) {
                    sum += *ptr1;
                    sum += *ptr2;
                    _xend();
                }
            }
            while(1) {
                long v1 = 0;
                long v2 = 0;
                int status = _xbegin();
                if (status == _XBEGIN_STARTED) {
                    v1 = *ptr1;
                    v2 = *ptr2;
                    _xend();
                if (v1 != v2) {
                    printf("different! %ld %ld\n", v1, v2);
                    exit(1);
                }

                }
            }
        }
    }
    printf("%ld", tt);*/


    random_t* randomPtr = random_alloc();
    random_seed(randomPtr, time(0));

    // unsigned long myId = thread_getId();
    // long numThread = *((long*)argPtr);
    long operations = (long)global_params[PARAM_OPERATIONS] / (long)global_params[PARAM_THREADS];
    long interval = (long)global_params[PARAM_INTERVAL];
    printf("operations: %ld \tinterval: %ld\n", operations, interval);

    long total = 0;
    long total2 = 0;

    long i = 0;
    for (; i < operations; i++) {
        long random_number = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        long random_number2 = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        if (random_number == random_number2) {
            random_number2 = (random_number2 + 1) % ((long)global_params[PARAM_SIZE]);
        }
        TM_BEGIN();
        long r1 = (long)TM_SHARED_READ(global_array[random_number].value);
        long r2 = (long)TM_SHARED_READ(global_array[random_number2].value);

        int repeat = 0;
        for (; repeat < (long) global_params[PARAM_CONTENTION]; repeat++) {
        	total2 += (long) TM_SHARED_READ(global_array[((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE])].value);
        }
        r1 = r1 + 1;
        r2 = r2 - 1;

        int f = 1;
        int ii;
        for(ii = 1; ii <= ((unsigned int) global_params[PARAM_WORK]); ii++)
        {
            f *= ii;
        }
        total += f / 1000000;

        TM_SHARED_WRITE(global_array[random_number].value, r1);
        TM_SHARED_WRITE(global_array[random_number2].value, r2);
        TM_END();

        long k = 0;
        for (;k < (long)global_params[PARAM_INTERVAL]; k++) {
            long ru = ((long) random_generate(randomPtr)) % 2;
            total += ru;
        }

    }

    TM_THREAD_EXIT();
    printf("ru ignore %ld - %ld\n", total, total2);
}
コード例 #13
0
ファイル: genScalData.c プロジェクト: shadyalaa/GreenCM
/* =============================================================================
 * genScalData
 * =============================================================================
 */
void
genScalData (void* argPtr)
{
    TM_THREAD_ENTER();

    graphSDG* SDGdataPtr = (graphSDG*)argPtr;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    /*
     * STEP 0: Create the permutations required to randomize the vertices
     */

    random_t* stream = PRANDOM_ALLOC();
    assert(stream);
    PRANDOM_SEED(stream, myId);

    ULONGINT_T* permV; /* the vars associated with the graph tuple */

    if (myId == 0) {
        permV = (ULONGINT_T*)P_MALLOC(TOT_VERTICES * sizeof(ULONGINT_T));
        assert(permV);
        global_permV = permV;
    }

    thread_barrier_wait();

    permV = global_permV;

    long i;
    long i_start;
    long i_stop;
    createPartition(0, TOT_VERTICES, myId, numThread, &i_start, &i_stop);

    /* Initialize the array */
    for (i = i_start; i < i_stop; i++) {
        permV[i] = i;
    }

    thread_barrier_wait();

    for (i = i_start; i < i_stop; i++) {
        long t1 = PRANDOM_GENERATE(stream);
        long t = i + t1 % (TOT_VERTICES - i);
        if (t != i) {
        	AL_LOCK(0);
            TM_BEGIN();
            long t2 = (long)TM_SHARED_READ(permV[t]);
            TM_SHARED_WRITE(permV[t], TM_SHARED_READ(permV[i]));
            TM_SHARED_WRITE(permV[i], t2);
            TM_END();
        }
    }

    /*
     * STEP 1: Create Cliques
     */

    long* cliqueSizes;

    long estTotCliques = ceil(1.5 * TOT_VERTICES / ((1+MAX_CLIQUE_SIZE)/2));

    /*
     * Allocate mem for Clique array
     * Estimate number of clique required and pad by 50%
     */
    if (myId == 0) {
        cliqueSizes = (long*)P_MALLOC(estTotCliques * sizeof(long));
        assert(cliqueSizes);
        global_cliqueSizes = cliqueSizes;
    }

    thread_barrier_wait();

    cliqueSizes = global_cliqueSizes;

    createPartition(0, estTotCliques, myId, numThread, &i_start, &i_stop);

    /* Generate random clique sizes. */
    for (i = i_start; i < i_stop; i++) {
        cliqueSizes[i] = 1 + (PRANDOM_GENERATE(stream) % MAX_CLIQUE_SIZE);
    }

    thread_barrier_wait();

    long totCliques = 0;

    /*
     * Allocate memory for cliqueList
     */

    ULONGINT_T* lastVsInCliques;
    ULONGINT_T* firstVsInCliques;

    if (myId == 0) {

        lastVsInCliques = (ULONGINT_T*)P_MALLOC(estTotCliques * sizeof(ULONGINT_T));
        assert(lastVsInCliques);
        global_lastVsInCliques = lastVsInCliques;
        firstVsInCliques = (ULONGINT_T*)P_MALLOC(estTotCliques * sizeof(ULONGINT_T));
        assert(firstVsInCliques);
        global_firstVsInCliques = firstVsInCliques;

        /*
         * Sum up vertices in each clique to determine the lastVsInCliques array
         */

        lastVsInCliques[0] = cliqueSizes[0] - 1;
        for (i = 1; i < estTotCliques; i++) {
            lastVsInCliques[i] = cliqueSizes[i] + lastVsInCliques[i-1];
            if (lastVsInCliques[i] >= TOT_VERTICES-1) {
                break;
            }
        }
        totCliques = i + 1;

        global_totCliques = totCliques;

        /*
         * Fix the size of the last clique
         */
        cliqueSizes[totCliques-1] =
            TOT_VERTICES - lastVsInCliques[totCliques-2] - 1;
        lastVsInCliques[totCliques-1] = TOT_VERTICES - 1;

        firstVsInCliques[0] = 0;

    }

    thread_barrier_wait();

    lastVsInCliques  = global_lastVsInCliques;
    firstVsInCliques = global_firstVsInCliques;
    totCliques = global_totCliques;

    /* Compute start Vertices in cliques. */
    createPartition(1, totCliques, myId, numThread, &i_start, &i_stop);
    for (i = i_start; i < i_stop; i++) {
        firstVsInCliques[i] = lastVsInCliques[i-1] + 1;
    }

#ifdef WRITE_RESULT_FILES
    thread_barrier_wait();

    /* Write the generated cliques to file for comparison with Kernel 4 */
    if (myId == 0) {
        FILE* outfp = fopen("cliques.txt", "w");
        fprintf(outfp, "No. of cliques - %lu\n", totCliques);
        for (i = 0; i < totCliques; i++) {
            fprintf(outfp, "Clq %lu - ", i);
            long j;
            for (j = firstVsInCliques[i]; j <= lastVsInCliques[i]; j++) {
                fprintf(outfp, "%lu ", permV[j]);
            }
            fprintf(outfp, "\n");
        }
        fclose(outfp);
    }

    thread_barrier_wait();
#endif

    /*
     * STEP 2: Create the edges within the cliques
     */

    /*
     * Estimate number of edges - using an empirical measure
     */
    long estTotEdges;
    if (SCALE >= 12) {
        estTotEdges = ceil(((MAX_CLIQUE_SIZE-1) * TOT_VERTICES));
    } else {
        estTotEdges = ceil(1.2 * (((MAX_CLIQUE_SIZE-1)*TOT_VERTICES)
                                  * ((1 + MAX_PARAL_EDGES)/2) + TOT_VERTICES*2));
    }

    /*
     * Initialize edge counter
     */
    long i_edgePtr = 0;
    float p = PROB_UNIDIRECTIONAL;

    /*
     * Partial edgeLists
     */

    ULONGINT_T* startV;
    ULONGINT_T* endV;

    if (numThread > 3) {
        long numByte = 1.5 * (estTotEdges/numThread) * sizeof(ULONGINT_T);
        startV = (ULONGINT_T*)P_MALLOC(numByte);
        endV = (ULONGINT_T*)P_MALLOC(numByte);
    } else  {
        long numByte = (estTotEdges/numThread) * sizeof(ULONGINT_T);
        startV = (ULONGINT_T*)P_MALLOC(numByte);
        endV = (ULONGINT_T*)P_MALLOC(numByte);
    }
    assert(startV);
    assert(endV);

    /*
     * Tmp array to keep track of the no. of parallel edges in each direction
     */
    ULONGINT_T** tmpEdgeCounter =
        (ULONGINT_T**)P_MALLOC(MAX_CLIQUE_SIZE * sizeof(ULONGINT_T *));
    assert(tmpEdgeCounter);
    for (i = 0; i < MAX_CLIQUE_SIZE; i++) {
        tmpEdgeCounter[i] =
            (ULONGINT_T*)P_MALLOC(MAX_CLIQUE_SIZE * sizeof(ULONGINT_T));
        assert(tmpEdgeCounter[i]);
    }

    /*
     * Create edges in parallel
     */
     long i_clique;
     createPartition(0, totCliques, myId, numThread, &i_start, &i_stop);

     for (i_clique = i_start; i_clique < i_stop; i_clique++) {

        /*
         * Get current clique parameters
         */

        long i_cliqueSize = cliqueSizes[i_clique];
        long i_firstVsInClique = firstVsInCliques[i_clique];

        /*
         * First create at least one edge between two vetices in a clique
         */

        for (i = 0; i < i_cliqueSize; i++) {

            long j;
            for (j = 0; j < i; j++) {

                float r = (float)(PRANDOM_GENERATE(stream) % 1000) / (float)1000;
                if (r >= p) {

                    startV[i_edgePtr] = i + i_firstVsInClique;
                    endV[i_edgePtr] = j + i_firstVsInClique;
                    i_edgePtr++;
                    tmpEdgeCounter[i][j] = 1;

                    startV[i_edgePtr] = j + i_firstVsInClique;
                    endV[i_edgePtr] = i + i_firstVsInClique;
                    i_edgePtr++;
                    tmpEdgeCounter[j][i] = 1;

                } else  if (r >= 0.5) {

                    startV[i_edgePtr] = i + i_firstVsInClique;
                    endV[i_edgePtr] = j + i_firstVsInClique;
                    i_edgePtr++;
                    tmpEdgeCounter[i][j] = 1;
                    tmpEdgeCounter[j][i] = 0;

                } else {

                    startV[i_edgePtr] = j + i_firstVsInClique;
                    endV[i_edgePtr] = i + i_firstVsInClique;
                    i_edgePtr++;
                    tmpEdgeCounter[j][i] = 1;
                    tmpEdgeCounter[i][j] = 0;

                }

            } /* for j */
        } /* for i */

        if (i_cliqueSize != 1) {
            long randNumEdges = (long)(PRANDOM_GENERATE(stream)
                                       % (2*i_cliqueSize*MAX_PARAL_EDGES));
            long i_paralEdge;
            for (i_paralEdge = 0; i_paralEdge < randNumEdges; i_paralEdge++) {
                i = (PRANDOM_GENERATE(stream) % i_cliqueSize);
                long j = (PRANDOM_GENERATE(stream) % i_cliqueSize);
                if ((i != j) && (tmpEdgeCounter[i][j] < MAX_PARAL_EDGES)) {
                    float r = (float)(PRANDOM_GENERATE(stream) % 1000) / (float)1000;
                    if (r >= p) {
                        /* Copy to edge structure. */
                        startV[i_edgePtr] = i + i_firstVsInClique;
                        endV[i_edgePtr] = j + i_firstVsInClique;
                        i_edgePtr++;
                        tmpEdgeCounter[i][j]++;
                    }
                }
            }
        }

    } /* for i_clique */

    for (i = 0; i < MAX_CLIQUE_SIZE; i++) {
        P_FREE(tmpEdgeCounter[i]);
    }

    P_FREE(tmpEdgeCounter);


    /*
     * Merge partial edge lists
     */

    ULONGINT_T* i_edgeStartCounter;
    ULONGINT_T* i_edgeEndCounter;

    if (myId == 0) {
        i_edgeStartCounter = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(i_edgeStartCounter);
        global_i_edgeStartCounter = i_edgeStartCounter;
        i_edgeEndCounter = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(i_edgeEndCounter);
        global_i_edgeEndCounter = i_edgeEndCounter;
    }

    thread_barrier_wait();

    i_edgeStartCounter = global_i_edgeStartCounter;
    i_edgeEndCounter   = global_i_edgeEndCounter;

    i_edgeEndCounter[myId] = i_edgePtr;
    i_edgeStartCounter[myId] = 0;

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    AL_LOCK(0);
    TM_BEGIN();
    TM_SHARED_WRITE(global_edgeNum,
                    ((long)TM_SHARED_READ(global_edgeNum) + i_edgePtr));
    TM_END();

    thread_barrier_wait();

    long edgeNum = global_edgeNum;

    /*
     * Initialize edge list arrays
     */

    ULONGINT_T* startVertex;
    ULONGINT_T* endVertex;

    if (myId == 0) {
        if (SCALE < 10) {
            long numByte = 2 * edgeNum * sizeof(ULONGINT_T);
            startVertex = (ULONGINT_T*)P_MALLOC(numByte);
            endVertex = (ULONGINT_T*)P_MALLOC(numByte);
        } else {
            long numByte = (edgeNum + MAX_PARAL_EDGES * TOT_VERTICES)
                           * sizeof(ULONGINT_T);
            startVertex = (ULONGINT_T*)P_MALLOC(numByte);
            endVertex = (ULONGINT_T*)P_MALLOC(numByte);
        }
        assert(startVertex);
        assert(endVertex);
        global_startVertex = startVertex;
        global_endVertex = endVertex;
    }

    thread_barrier_wait();

    startVertex = global_startVertex;
    endVertex = global_endVertex;

    for (i = i_edgeStartCounter[myId]; i < i_edgeEndCounter[myId]; i++) {
        startVertex[i] = startV[i-i_edgeStartCounter[myId]];
        endVertex[i] = endV[i-i_edgeStartCounter[myId]];
    }

    ULONGINT_T numEdgesPlacedInCliques = edgeNum;

    thread_barrier_wait();

    /*
     * STEP 3: Connect the cliques
     */

    i_edgePtr = 0;
    p = PROB_INTERCL_EDGES;

    /*
     * Generating inter-clique edges as given in the specs
     */

    createPartition(0, TOT_VERTICES, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {

        ULONGINT_T tempVertex1 = i;
        long h = totCliques;
        long l = 0;
        long t = -1;
        while (h - l > 1) {
            long m = (h + l) / 2;
            if (tempVertex1 >= firstVsInCliques[m]) {
                l = m;
            } else {
                if ((tempVertex1 < firstVsInCliques[m]) && (m > 0)) {
                    if (tempVertex1 >= firstVsInCliques[m-1]) {
                        t = m - 1;
                        break;
                    } else {
                        h = m;
                    }
                }
            }
        }

        if (t == -1) {
            long m;
            for (m = (l + 1); m < h; m++) {
                if (tempVertex1<firstVsInCliques[m]) {
                    break;
                }
            }
            t = m-1;
        }

        long t1 = firstVsInCliques[t];

        ULONGINT_T d;
        for (d = 1, p = PROB_INTERCL_EDGES; d < TOT_VERTICES; d *= 2, p /= 2) {

            float r = (float)(PRANDOM_GENERATE(stream) % 1000) / (float)1000;

            if (r <= p) {

                ULONGINT_T tempVertex2 = (i+d) % TOT_VERTICES;

                h = totCliques;
                l = 0;
                t = -1;
                while (h - l > 1) {
                    long m = (h + l) / 2;
                    if (tempVertex2 >= firstVsInCliques[m]) {
                        l = m;
                    } else {
                        if ((tempVertex2 < firstVsInCliques[m]) && (m > 0)) {
                            if (firstVsInCliques[m-1] <= tempVertex2) {
                                t = m - 1;
                                break;
                            } else {
                                h = m;
                            }
                        }
                    }
                }

                if (t == -1) {
                    long m;
                    for (m = (l + 1); m < h; m++) {
                        if (tempVertex2 < firstVsInCliques[m]) {
                            break;
                        }
                    }
                    t = m - 1;
                }

                long t2 = firstVsInCliques[t];

                if (t1 != t2) {
                    long randNumEdges =
                        PRANDOM_GENERATE(stream) % MAX_PARAL_EDGES + 1;
                    long j;
                    for (j = 0; j < randNumEdges; j++) {
                        startV[i_edgePtr] = tempVertex1;
                        endV[i_edgePtr] = tempVertex2;
                        i_edgePtr++;
                    }
                }

            } /* r <= p */

            float r0 = (float)(PRANDOM_GENERATE(stream) % 1000) / (float)1000;

            if ((r0 <= p) && (i-d>=0)) {

                ULONGINT_T tempVertex2 = (i-d) % TOT_VERTICES;

                h = totCliques;
                l = 0;
                t = -1;
                while (h - l > 1) {
                    long m = (h + l) / 2;
                    if (tempVertex2 >= firstVsInCliques[m]) {
                        l = m;
                    } else {
                        if ((tempVertex2 < firstVsInCliques[m]) && (m > 0)) {
                            if (firstVsInCliques[m-1] <= tempVertex2) {
                                t = m - 1;
                                break;
                            } else {
                                h = m;
                            }
                        }
                    }
                }

                if (t == -1) {
                    long m;
                    for (m = (l + 1); m < h; m++) {
                        if (tempVertex2 < firstVsInCliques[m]) {
                            break;
                        }
                    }
                    t = m - 1;
                }

                long t2 = firstVsInCliques[t];

                if (t1 != t2) {
                    long randNumEdges =
                        PRANDOM_GENERATE(stream) % MAX_PARAL_EDGES + 1;
                    long j;
                    for (j = 0; j < randNumEdges; j++) {
                        startV[i_edgePtr] = tempVertex1;
                        endV[i_edgePtr] = tempVertex2;
                        i_edgePtr++;
                    }
                }

            } /* r0 <= p && (i-d) > 0 */

        } /* for d, p */

    } /* for i */


    i_edgeEndCounter[myId] = i_edgePtr;
    i_edgeStartCounter[myId] = 0;

    if (myId == 0) {
        global_edgeNum = 0;
    }

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    AL_LOCK(0);
    TM_BEGIN();
    TM_SHARED_WRITE(global_edgeNum,
                    ((long)TM_SHARED_READ(global_edgeNum) + i_edgePtr));
    TM_END();


    thread_barrier_wait();

    edgeNum = global_edgeNum;
    ULONGINT_T numEdgesPlacedOutside = global_edgeNum;

    for (i = i_edgeStartCounter[myId]; i < i_edgeEndCounter[myId]; i++) {
        startVertex[i+numEdgesPlacedInCliques] = startV[i-i_edgeStartCounter[myId]];
        endVertex[i+numEdgesPlacedInCliques] = endV[i-i_edgeStartCounter[myId]];
    }

    thread_barrier_wait();

    ULONGINT_T  numEdgesPlaced = numEdgesPlacedInCliques + numEdgesPlacedOutside;

    if (myId == 0) {

        SDGdataPtr->numEdgesPlaced = numEdgesPlaced;

        printf("Finished generating edges\n");
        printf("No. of intra-clique edges - %lu\n", numEdgesPlacedInCliques);
        printf("No. of inter-clique edges - %lu\n", numEdgesPlacedOutside);
        printf("Total no. of edges        - %lu\n", numEdgesPlaced);

        P_FREE(i_edgeStartCounter);
        P_FREE(i_edgeEndCounter);

        P_FREE(cliqueSizes);
        P_FREE(firstVsInCliques);
        P_FREE(lastVsInCliques);
    }

    thread_barrier_wait();

    P_FREE(startV);
    P_FREE(endV);

    /*
     * STEP 4: Generate edge weights
     */

    if (myId == 0) {
        SDGdataPtr->intWeight =
            (LONGINT_T*)P_MALLOC(numEdgesPlaced * sizeof(LONGINT_T));
        assert(SDGdataPtr->intWeight);
    }

    thread_barrier_wait();

    p = PERC_INT_WEIGHTS;
    ULONGINT_T numStrWtEdges  = 0;

    createPartition(0, numEdgesPlaced, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        float r = (float)(PRANDOM_GENERATE(stream) % 1000) / (float)1000;
        if (r <= p) {
            SDGdataPtr->intWeight[i] =
                1 + (PRANDOM_GENERATE(stream) % (MAX_INT_WEIGHT-1));
        } else {
            SDGdataPtr->intWeight[i] = -1;
            numStrWtEdges++;
        }
    }

    thread_barrier_wait();

    if (myId == 0) {
        long t = 0;
        for (i = 0; i < numEdgesPlaced; i++) {
            if (SDGdataPtr->intWeight[i] < 0) {
                SDGdataPtr->intWeight[i] = -t;
                t++;
            }
        }
    }

    AL_LOCK(0);
    TM_BEGIN();
    TM_SHARED_WRITE(global_numStrWtEdges,
                    ((long)TM_SHARED_READ(global_numStrWtEdges) + numStrWtEdges));
    TM_END();

    thread_barrier_wait();

    numStrWtEdges = global_numStrWtEdges;

    if (myId == 0) {
        SDGdataPtr->strWeight =
            (char*)P_MALLOC(numStrWtEdges * MAX_STRLEN * sizeof(char));
        assert(SDGdataPtr->strWeight);
    }

    thread_barrier_wait();

    createPartition(0, numEdgesPlaced, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        if (SDGdataPtr->intWeight[i] <= 0) {
            long j;
            for (j = 0; j < MAX_STRLEN; j++) {
                SDGdataPtr->strWeight[(-SDGdataPtr->intWeight[i])*MAX_STRLEN+j] =
                    (char) (1 + PRANDOM_GENERATE(stream) % 127);
            }
        }
    }

    /*
     * Choose SOUGHT STRING randomly if not assigned
     */

    if (myId == 0) {

        if (strlen(SOUGHT_STRING) != MAX_STRLEN) {
            SOUGHT_STRING = (char*)P_MALLOC(MAX_STRLEN * sizeof(char));
            assert(SOUGHT_STRING);
        }

        long t = PRANDOM_GENERATE(stream) % numStrWtEdges;
        long j;
        for (j = 0; j < MAX_STRLEN; j++) {
            SOUGHT_STRING[j] =
                (char) ((long) SDGdataPtr->strWeight[t*MAX_STRLEN+j]);
        }

    }

    thread_barrier_wait();

    /*
     * STEP 5: Permute Vertices
     */

    for (i = i_start; i < i_stop; i++) {
        startVertex[i] = permV[(startVertex[i])];
        endVertex[i] = permV[(endVertex[i])];
    }

    thread_barrier_wait();

    /*
     * STEP 6: Sort Vertices
     */

    /*
     * Radix sort with StartVertex as primary key
     */

    if (myId == 0) {
        long numByte = numEdgesPlaced * sizeof(ULONGINT_T);
        SDGdataPtr->startVertex = (ULONGINT_T*)P_MALLOC(numByte);
        assert(SDGdataPtr->startVertex);
        SDGdataPtr->endVertex = (ULONGINT_T*)P_MALLOC(numByte);
        assert(SDGdataPtr->endVertex);
    }

    thread_barrier_wait();

    all_radixsort_node_aux_s3(numEdgesPlaced,
                              startVertex,
                              SDGdataPtr->startVertex,
                              endVertex,
                              SDGdataPtr->endVertex);

    thread_barrier_wait();

    if (myId == 0) {
        P_FREE(startVertex);
        P_FREE(endVertex);
    }

    thread_barrier_wait();

    if (SCALE < 12) {

        /*
         * Sort with endVertex as secondary key
         */

        if (myId == 0) {

            long i0 = 0;
            long i1 = 0;
            i = 0;

            while (i < numEdgesPlaced) {

                for (i = i0; i < numEdgesPlaced; i++) {
                    if (SDGdataPtr->startVertex[i] !=
                        SDGdataPtr->startVertex[i1])
                    {
                        i1 = i;
                        break;
                    }
                }

                long j;
                for (j = i0; j < i1; j++) {
                    long k;
                    for (k = j+1; k < i1; k++) {
                        if (SDGdataPtr->endVertex[k] <
                            SDGdataPtr->endVertex[j])
                        {
                            long t = SDGdataPtr->endVertex[j];
                            SDGdataPtr->endVertex[j] = SDGdataPtr->endVertex[k];
                            SDGdataPtr->endVertex[k] = t;
                        }
                    }
                }

                if (SDGdataPtr->startVertex[i0] != TOT_VERTICES-1) {
                    i0 = i1;
                } else {
                    long j;
                    for (j=i0; j<numEdgesPlaced; j++) {
                        long k;
                        for (k=j+1; k<numEdgesPlaced; k++) {
                            if (SDGdataPtr->endVertex[k] <
                                SDGdataPtr->endVertex[j])
                            {
                                long t = SDGdataPtr->endVertex[j];
                                SDGdataPtr->endVertex[j] = SDGdataPtr->endVertex[k];
                                SDGdataPtr->endVertex[k] = t;
                            }
                        }
                    }
                }

            } /* while i < numEdgesPlaced */

        }

    } else {

        ULONGINT_T* tempIndex;

        if (myId == 0) {

            tempIndex =
                (ULONGINT_T*)P_MALLOC((TOT_VERTICES + 1) * sizeof(ULONGINT_T));
            assert(tempIndex);
            global_tempIndex = tempIndex;

            /*
             * Update degree of each vertex
             */

            tempIndex[0] = 0;
            tempIndex[TOT_VERTICES] = numEdgesPlaced;
            long i0 = 0;

            for (i=0; i < TOT_VERTICES; i++) {
                tempIndex[i+1] = tempIndex[i];
                long j;
                for (j = i0; j < numEdgesPlaced; j++) {
                    if (SDGdataPtr->startVertex[j] !=
                        SDGdataPtr->startVertex[i0])
                    {
                        if (SDGdataPtr->startVertex[i0] == i) {
                            tempIndex[i+1] = j;
                            i0 = j;
                            break;
                        }
                    }
                }
            }
        }

        thread_barrier_wait();

        tempIndex = global_tempIndex;

        /*
         * Insertion sort for now, replace with something better later on
         */
#if 0
        createPartition(0, TOT_VERTICES, myId, numThread, &i_start, &i_stop);

        for (i = i_start; i < i_stop; i++) {
            long j;
            for (j = tempIndex[i]; j < tempIndex[i+1]; j++) {
                long k;
                for (k = (j + 1); k < tempIndex[i+1]; k++) {
                    if (SDGdataPtr->endVertex[k] <
                        SDGdataPtr->endVertex[j])
                    {
                        long t = SDGdataPtr->endVertex[j];
                        SDGdataPtr->endVertex[j] = SDGdataPtr->endVertex[k];
                        SDGdataPtr->endVertex[k] = t;
                    }
                }
            }
        }
#else
        if (myId == 0) {
            for (i = 0; i < TOT_VERTICES; i++) {
                long j;
                for (j = tempIndex[i]; j < tempIndex[i+1]; j++) {
                    long k;
                    for (k = (j + 1); k < tempIndex[i+1]; k++) {
                        if (SDGdataPtr->endVertex[k] <
                            SDGdataPtr->endVertex[j])
                        {
                            long t = SDGdataPtr->endVertex[j];
                            SDGdataPtr->endVertex[j] = SDGdataPtr->endVertex[k];
                            SDGdataPtr->endVertex[k] = t;
                        }
                    }
                }
            }
        }
#endif

        if (myId == 0) {
            P_FREE(tempIndex);
        }

    } /* SCALE >= 12 */

    PRANDOM_FREE(stream);
    if (myId == 0) {
        P_FREE(permV);
    }

    TM_THREAD_EXIT();
}
コード例 #14
0
/* =============================================================================
 * cutClusters
 * =============================================================================
 */
void
cutClusters (void* argPtr)
{
    TM_THREAD_ENTER();

    graph* GPtr = (graph*)argPtr;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    /*
     * Sort the vertex list by their degree
     */

    ULONGINT_T* Index;
    ULONGINT_T* neighbourArray;
    ULONGINT_T* IndexSorted;
    ULONGINT_T* neighbourArraySorted;

    if (myId == 0) {
        long numByte = GPtr->numVertices * sizeof(ULONGINT_T);
        Index = (ULONGINT_T*)P_MALLOC(numByte);
        assert(Index);
        global_Index = Index;
        neighbourArray = (ULONGINT_T*)P_MALLOC(numByte);
        assert(neighbourArray);
        global_neighbourArray = neighbourArray;
        IndexSorted = (ULONGINT_T*)P_MALLOC(numByte);
        assert(IndexSorted);
        global_IndexSorted = IndexSorted;
        neighbourArraySorted = (ULONGINT_T*)P_MALLOC(numByte);
        assert(neighbourArraySorted);
        global_neighbourArraySorted = neighbourArraySorted;
    }

    thread_barrier_wait();

    Index = global_Index;
    neighbourArray = global_neighbourArray;
    IndexSorted = global_IndexSorted;
    neighbourArraySorted = global_neighbourArraySorted;

    long i;
    long i_start;
    long i_stop;
    createPartition(0, GPtr->numVertices, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        neighbourArray[i] = GPtr->inDegree[i] + GPtr->outDegree[i];
        Index[i] = i;
    }

    thread_barrier_wait();


    all_radixsort_node_aux_s3(GPtr->numVertices,
                              neighbourArray,
                              neighbourArraySorted,
                              Index,
                              IndexSorted);

    thread_barrier_wait();

    /*
     * Global array to keep track of vertex status:
     * -1 if a vertex hasn't been assigned to a cluster yet
     * t if it belongs to a cluster; t = iteration*numThread + myId
     */
    long* vStatus;

    edge* pCutSet;
    ULONGINT_T* startV;
    ULONGINT_T* clusterSize;

    if (myId == 0) {

        P_FREE(Index);
        P_FREE(neighbourArray);

        vStatus = (long*)P_MALLOC(GPtr->numVertices * sizeof(long));
        assert(vStatus);
        global_vStatus = vStatus;

        /*
         * Allocate mem. for the cut set list
         * Maintain local arrays initially and merge them in the end
         */

        if (SCALE < 12) {
            pCutSet =(edge*)P_MALLOC((1*(GPtr->numDirectedEdges)/numThread)
                                     * sizeof(edge));
        } else {
            pCutSet = (edge*)P_MALLOC((0.2*(GPtr->numDirectedEdges)/numThread)
                                      * sizeof(edge));
        }
        assert(pCutSet);
        global_pCutSet = pCutSet;

        /*
         * Vertex to start from, on each thread
         */
        startV = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(startV);
        global_startV = startV;
        clusterSize = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(clusterSize);
        global_clusterSize = clusterSize;
    }

    thread_barrier_wait();

    vStatus     = global_vStatus;
    pCutSet     = global_pCutSet;
    startV      = global_startV;
    clusterSize = global_clusterSize;

    for (i = i_start; i < i_stop; i++) {
        vStatus[i] = -1;
    }

    thread_barrier_wait();

    ULONGINT_T verticesVisited = 0;

#ifdef WRITE_RESULT_FILES
    FILE* outfp1 = NULL;
    if (myId == 0) {
        outfp1 = fopen("clusters.txt", "w");
        fprintf(outfp1, "\nKernel 4 - Extracted Clusters\n");
    }
#endif

    long iter = 0;
    ULONGINT_T currIndex = 0;
    ULONGINT_T cutSetIndex = 0;

    while (verticesVisited < GPtr->numVertices) {

        /* Clear start vertex array */
        startV[myId] = -1;
        clusterSize[myId] = 0;

        if (currIndex == GPtr->numVertices) {
            currIndex = 0;
        }

        thread_barrier_wait();

        /*
         * Choose vertices to start from
         * Done sequentially right now, can be parallelized
         */
        if (myId == 0) {
            long t;
            for (t = 0; t < numThread; t++) {
                long r;
                for (r = currIndex; r < GPtr->numVertices; r++) {
                    if (vStatus[IndexSorted[GPtr->numVertices - r - 1]] == -1) {
                        startV[t] = IndexSorted[GPtr->numVertices - r - 1];
                        vStatus[startV[t]] = iter * numThread + t;
                        long j;
                        for (j = 0; j < GPtr->outDegree[startV[t]]; j++) {
                            long outVertexListIndex =
                                j+GPtr->outVertexIndex[startV[t]];
                            long vStatusIndex =
                                GPtr->outVertexList[outVertexListIndex];
                            if (vStatus[vStatusIndex] == -1) {
                                vStatus[vStatusIndex] = iter * numThread + t;
                                clusterSize[t]++;
                            }
                        }
                        for (j = 0; j < GPtr->inDegree[startV[t]]; j++) {
                            long inVertexIndex = j+GPtr->inVertexIndex[startV[t]];
                            long vStatusIndex = GPtr->inVertexList[inVertexIndex];
                            if (vStatus[vStatusIndex] == -1) {
                                vStatus[vStatusIndex] = iter * numThread + t;
                                clusterSize[t]++;
                            }
                        }
                        currIndex = r+1;
                        break;
                    }
                }
            }
        }

        thread_barrier_wait();

        /*
         * Determine clusters and cut sets in parallel
         */

        i = startV[myId];

        ULONGINT_T cliqueSize = 0;

        /* If the thread has some vertex to start from */
        if (i != -1)  {

            cliqueSize = 1;

            /* clusterSize[myId] gives the no. of 'unassigned' vertices adjacent to the current vertex */
            if ((clusterSize[myId] >= 0.6*(GPtr->inDegree[i]+GPtr->outDegree[i])) ||
                ((iter > (GPtr->numVertices)/(numThread*MAX_CLUSTER_SIZE)) &&
                 (clusterSize[myId] > 0)))
            {

                /*
                 * Most of the adjacent vertices are unassigned,
                 * should be able to extract a cluster easily
                 */

                /* Inspect adjacency list */
                long j;
                for (j = 0; j < GPtr->outDegree[i]; j++) {

                    ULONGINT_T clusterCounter = 0;
                    ULONGINT_T cutSetIndexPrev = cutSetIndex;
                    ULONGINT_T cutSetCounter = 0;

                    if (vStatus[GPtr->outVertexList[j+GPtr->outVertexIndex[i]]] ==
                        iter * numThread + myId)
                    {

                        long v = GPtr->outVertexList[j+GPtr->outVertexIndex[i]];

                        /*
                         * Inspect vertices adjacent to v and determine if it belongs
                         * to a cluster or not
                         */
                        long k;
                        for (k = 0; k < GPtr->outDegree[v]; k++) {
                            long outVertexListIndex = k+GPtr->outVertexIndex[v];
                            long vStatusIndex = GPtr->outVertexList[outVertexListIndex];
                            if (vStatus[vStatusIndex] == (iter * numThread + myId)) {
                                clusterCounter++;
                            } else {
                                cutSetCounter++;
                                if (vStatus[vStatusIndex] == -1) {
                                    /* Ensure that an edge is not added twice to the list */
                                    pCutSet[cutSetIndex].startVertex = v;
                                    pCutSet[cutSetIndex].endVertex = vStatusIndex;
                                    cutSetIndex++;
                                }
                            }
                        }

                        if ((cutSetCounter >= clusterCounter) ||
                            ((SCALE < 9) &&
                             (clusterCounter <= 2) &&
                             (GPtr->inDegree[v]+GPtr->outDegree[v] >
                              clusterCounter + cutSetCounter) &&
                             (clusterSize[myId] > clusterCounter + 2)) ||
                            ((SCALE > 9) &&
                             (clusterCounter < 0.5*clusterSize[myId])))
                        {

                            /* v doesn't belong to this clique, free it */
                            vStatus[v] = -1;

                            /* Also add this edge to cutset list, removing previously added edges */
                            cutSetIndex = cutSetIndexPrev;
                            pCutSet[cutSetIndex].startVertex = i;
                            pCutSet[cutSetIndex].endVertex = v;
                            cutSetIndex++;

                        } else {

                            cliqueSize++;
                             /* Add edges in inVertexList also to cut Set */
                            for (k = 0; k < GPtr->inDegree[v]; k++) {
                                long inVertexListIndex = k+GPtr->inVertexIndex[v];
                                long vStatusIndex = GPtr->inVertexList[inVertexListIndex];
                                if (vStatus[vStatusIndex] == -1) {
                                    pCutSet[cutSetIndex].startVertex = v;
                                    pCutSet[cutSetIndex].endVertex = vStatusIndex;
                                    cutSetIndex++;
                                }
                            }

                        }

                    }

                }

                /* Do the same for the implied edges too */
                for (j = 0; j < GPtr->inDegree[i]; j++) {

                    ULONGINT_T clusterCounter = 0;
                    ULONGINT_T cutSetIndexPrev = cutSetIndex;
                    ULONGINT_T cutSetCounter = 0;

                    if (vStatus[GPtr->inVertexList[j+GPtr->inVertexIndex[i]]] ==
                        iter*numThread+myId)
                    {
                        long v = GPtr->inVertexList[j+GPtr->inVertexIndex[i]];

                        /* Inspect vertices adjacent to v and determine if it belongs to a cluster or not */
                        long k;
                        for (k = 0; k < GPtr->outDegree[v]; k++) {
                            long outVertexListIndex = k+GPtr->outVertexIndex[v];
                            long vStatusIndex = GPtr->outVertexList[outVertexListIndex];
                            if (vStatus[vStatusIndex] == iter*numThread+myId) {
                                clusterCounter++;
                            } else {
                                cutSetCounter++;
                                if (vStatus[vStatusIndex] == -1) {
                                    /* To ensure that an edge is not added twice to the list */
                                    pCutSet[cutSetIndex].startVertex = v;
                                    pCutSet[cutSetIndex].endVertex = vStatusIndex;
                                    cutSetIndex++;
                                }
                            }
                        }

                        if ((cutSetCounter >= clusterCounter) ||
                            ((SCALE < 9) &&
                             (clusterCounter <= 2) &&
                             (GPtr->inDegree[v]+GPtr->outDegree[v] >
                              clusterCounter + cutSetCounter)  &&
                             (clusterSize[myId] > clusterCounter + 2)) ||
                            ((SCALE > 9) &&
                             (clusterCounter < 0.5*clusterSize[myId])))
                        {
                            /* v doesn't belong to this clique, free it */
                            vStatus[v] = -1;
                            cutSetIndex = cutSetIndexPrev;
                            pCutSet[cutSetIndex].startVertex = i;
                            pCutSet[cutSetIndex].endVertex = v;
                            cutSetIndex++;

                        } else {

                            cliqueSize++;
                            /* Add edges in inVertexList also to cut Set */
                            for (k = 0; k < GPtr->inDegree[v]; k++) {
                                long inVertexListIndex = k+GPtr->inVertexIndex[v];
                                long vStatusIndex = GPtr->inVertexList[inVertexListIndex];
                                if (vStatus[vStatusIndex] == -1) {
                                    pCutSet[cutSetIndex].startVertex = v;
                                    pCutSet[cutSetIndex].endVertex = vStatusIndex;
                                    cutSetIndex++;
                                }
                            }

                        }

                    }

                }

            } /* i != -1 */

            if (clusterSize[myId] == 0)  {

              /* Only one vertex in cluster */
              cliqueSize = 1;

            } else {

                if ((clusterSize[myId] < 0.6*(GPtr->inDegree[i]+GPtr->outDegree[i])) &&
                    (iter <= GPtr->numVertices/(numThread*MAX_CLUSTER_SIZE)))
                {
                    /* High perc. of intra-clique edges, do not commit clique */
                    cliqueSize = 0;
                    vStatus[i] = -1;

                    long j;
                    for (j=0; j<GPtr->outDegree[i]; j++) {
                        long outVertexListIndex = j+GPtr->outVertexIndex[i];
                        long vStatusIndex = GPtr->outVertexList[outVertexListIndex];
                        if (vStatus[vStatusIndex] == iter*numThread+myId) {
                            vStatus[vStatusIndex] = -1;
                        }
                    }

                    for (j=0; j<GPtr->inDegree[i]; j++) {
                        long inVertexListIndex = j+GPtr->inVertexIndex[i];
                        long vStatusIndex = GPtr->inVertexList[inVertexListIndex];
                        if (vStatus[vStatusIndex] == iter*numThread+myId) {
                            vStatus[vStatusIndex] = -1;
                        }
                    }
                }

            }
        } /* if i != -1 */

        if (myId == 0) {
            global_cliqueSize = 0;
        }

        thread_barrier_wait();

#ifdef WRITE_RESULT_FILES
        /* Print to results.clq file */

        if (myId == 0) {
            long t;
            for (t = 0; t < numThread; t++) {
                if (startV[t] != -1) {
                    if (vStatus[startV[t]] == iter*numThread+t) {
                        fprintf(outfp1, "%lu ", startV[t]);
                        long j;
                        for (j = 0; j < GPtr->outDegree[startV[t]]; j++) {
                            long outVertexListIndex = j+GPtr->outVertexIndex[startV[t]];
                            long vStatusIndex = GPtr->outVertexList[outVertexListIndex];
                            if (vStatus[vStatusIndex] == iter*numThread+t) {
                                fprintf(outfp1, "%lu ", vStatusIndex);
                            }
                        }
                        for (j = 0; j < GPtr->inDegree[startV[t]]; j++) {
                            long inVertexListIndex = j+GPtr->inVertexIndex[startV[t]];
                            long vStatusIndex = GPtr->inVertexList[inVertexListIndex];
                            if (vStatus[vStatusIndex] == iter*numThread+t) {
                                fprintf(outfp1, "%lu ", vStatusIndex);
                            }
                        }
                        fprintf(outfp1, "\n");
                    }
                }
            }
        }

        thread_barrier_wait();
#endif /* WRITE_RESULTS_FILE */

        if (myId == 0) {
            iter++;
            global_iter = iter;
        }

        TM_BEGIN();
        long tmp_cliqueSize = (long)TM_SHARED_READ(global_cliqueSize);
        TM_SHARED_WRITE(global_cliqueSize, (tmp_cliqueSize + cliqueSize));
        TM_END();

        thread_barrier_wait();

        iter = global_iter;
        verticesVisited += global_cliqueSize;

        if ((verticesVisited >= 0.95*GPtr->numVertices) ||
            (iter > GPtr->numVertices/2))
        {
            break;
        }

    } /* while (verticesVisited < GPtr->numVertices) */

    thread_barrier_wait();

#ifdef WRITE_RESULT_FILES
    /* Take care of unmarked vertices */
    if (myId == 0) {
        if (verticesVisited < GPtr->numVertices) {
            for(i = 0; i < GPtr->numVertices; i++) {
                if (vStatus[i] == -1) {
                    vStatus[i] = iter*numThread+myId;
                    fprintf(outfp1, "%lu\n", i);
                    iter++;
                }
            }
        }
    }

    thread_barrier_wait();
#endif

    /*
     * Merge partial Cutset Lists
     */

    /* Temp vars for merging edge lists */
    ULONGINT_T* edgeStartCounter;
    ULONGINT_T* edgeEndCounter;

    if (myId == 0) {
        edgeStartCounter = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(edgeStartCounter);
        global_edgeStartCounter = edgeStartCounter;
        edgeEndCounter = (ULONGINT_T*)P_MALLOC(numThread * sizeof(ULONGINT_T));
        assert(edgeEndCounter);
        global_edgeEndCounter = edgeEndCounter;
    }

    thread_barrier_wait();

    edgeStartCounter = global_edgeStartCounter;
    edgeEndCounter   = global_edgeEndCounter;

    edgeEndCounter[myId] = cutSetIndex;
    edgeStartCounter[myId] = 0;

    thread_barrier_wait();

    if (myId == 0) {
        long t;
        for (t = 1; t < numThread; t++) {
            edgeEndCounter[t] = edgeEndCounter[t-1] + edgeEndCounter[t];
            edgeStartCounter[t] = edgeEndCounter[t-1];
        }
    }

    TM_BEGIN();
    long tmp_cutSetIndex = (long)TM_SHARED_READ(global_cutSetIndex);
    TM_SHARED_WRITE(global_cutSetIndex, (tmp_cutSetIndex + cutSetIndex));
    TM_END();

    thread_barrier_wait();

    cutSetIndex = global_cutSetIndex;
    ULONGINT_T cutSetCounter = cutSetIndex;

    /* Data struct. for storing edgeCut */
    edge* cutSet;

    if (myId == 0) {
        cutSet = (edge*)P_MALLOC(cutSetCounter * sizeof(edge));
        assert(cutSet);
        global_cutSet = cutSet;
    }

    thread_barrier_wait();

    cutSet = global_cutSet;

    long j;
    for (j = edgeStartCounter[myId]; j < edgeEndCounter[myId]; j++) {
        cutSet[j].startVertex = pCutSet[j-edgeStartCounter[myId]].startVertex;
        cutSet[j].endVertex = pCutSet[j-edgeStartCounter[myId]].endVertex;
    }

    thread_barrier_wait();

#ifdef WRITE_RESULT_FILES
    FILE* outfp2 = NULL;
    if (myId == 0) {
        outfp2 = fopen("edgeCut.txt", "w");
        fprintf(outfp2, "\nEdges in Cut Set - \n");
        for (i = 0; i < cutSetCounter; i++) {
            fprintf(outfp2, "[%lu %lu] ",
                    cutSet[i].startVertex, cutSet[i].endVertex);
        }
        fclose(outfp2);
        fclose(outfp1);
    }
#endif

    if (myId == 0) {
        P_FREE(edgeStartCounter);
        P_FREE(edgeEndCounter);
        P_FREE(pCutSet);
        P_FREE(IndexSorted);
        P_FREE(neighbourArraySorted);
        P_FREE(startV);
        P_FREE(clusterSize);
        P_FREE(cutSet);
        P_FREE(vStatus);
    }

    TM_THREAD_EXIT();
}
コード例 #15
0
ファイル: computeGraph.c プロジェクト: Ikulagin/transmem
/* =============================================================================
 * computeGraph
 * =============================================================================
 */
void
computeGraph (void* argPtr)
{
    graph*    GPtr       = ((computeGraph_arg_t*)argPtr)->GPtr;
    graphSDG* SDGdataPtr = ((computeGraph_arg_t*)argPtr)->SDGdataPtr;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    ULONGINT_T j;
    ULONGINT_T maxNumVertices = 0;
    //[wer210] comparison below ">= 0" of unsigned type is always true?
    ULONGINT_T numEdgesPlaced = SDGdataPtr->numEdgesPlaced;

    /*
     * First determine the number of vertices by scanning the tuple
     * startVertex list
     */

    long i;
    long i_start;
    long i_stop;
    createPartition(0, numEdgesPlaced, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        if (SDGdataPtr->startVertex[i] > maxNumVertices) {
            maxNumVertices = SDGdataPtr->startVertex[i];
        }
    }

    __transaction_atomic {
      //long tmp_maxNumVertices = (long)TM_SHARED_READ(global_maxNumVertices);
      //long new_maxNumVertices = MAX(tmp_maxNumVertices, maxNumVertices + 1);
      //TM_SHARED_WRITE(global_maxNumVertices, (unsigned long)new_maxNumVertices);
      if (global_maxNumVertices < maxNumVertices + 1)
        global_maxNumVertices = maxNumVertices + 1;
    }

    thread_barrier_wait();

    maxNumVertices = global_maxNumVertices;

    if (myId == 0) {

        GPtr->numVertices = maxNumVertices;
        GPtr->numEdges    = numEdgesPlaced;
        GPtr->intWeight   = SDGdataPtr->intWeight;
        GPtr->strWeight   = SDGdataPtr->strWeight;

        for (i = 0; i < numEdgesPlaced; i++) {
            if (GPtr->intWeight[numEdgesPlaced-i-1] < 0) {
                GPtr->numStrEdges = -(GPtr->intWeight[numEdgesPlaced-i-1]) + 1;
                GPtr->numIntEdges = numEdgesPlaced - GPtr->numStrEdges;
                break;
            }
        }

        GPtr->outDegree =
            (LONGINT_T*)malloc((GPtr->numVertices) * sizeof(LONGINT_T));
        assert(GPtr->outDegree);

        GPtr->outVertexIndex =
            (ULONGINT_T*)malloc((GPtr->numVertices) * sizeof(ULONGINT_T));
        assert(GPtr->outVertexIndex);
    }

    thread_barrier_wait();

    createPartition(0, GPtr->numVertices, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        GPtr->outDegree[i] = 0;
        GPtr->outVertexIndex[i] = 0;
    }

    ULONGINT_T outVertexListSize = 0;

    thread_barrier_wait();

    //[wer210] unsigned -1 ?
    ULONGINT_T i0 = -1UL;

    for (i = i_start; i < i_stop; i++) {

        ULONGINT_T k = i;
        if ((outVertexListSize == 0) && (k != 0)) {
            while (i0 == -1UL) {
                for (j = 0; j < numEdgesPlaced; j++) {
                    if (k == SDGdataPtr->startVertex[j]) {
                        i0 = j;
                        break;
                    }

                }
                k--;
            }
        }

        if ((outVertexListSize == 0) && (k == 0)) {
            i0 = 0;
        }

        for (j = i0; j < numEdgesPlaced; j++) {
            if (i == GPtr->numVertices-1) {
                break;
            }
            if ((i != SDGdataPtr->startVertex[j])) {
                if ((j > 0) && (i == SDGdataPtr->startVertex[j-1])) {
                    if (j-i0 >= 1) {
                        outVertexListSize++;
                        GPtr->outDegree[i]++;
                        ULONGINT_T t;
                        for (t = i0+1; t < j; t++) {
                            if (SDGdataPtr->endVertex[t] !=
                                SDGdataPtr->endVertex[t-1])
                            {
                                outVertexListSize++;
                                GPtr->outDegree[i] = GPtr->outDegree[i]+1;
                            }
                        }
                    }
                }
                i0 = j;
                break;
            }
        }

        if (i == GPtr->numVertices-1) {
            if (numEdgesPlaced-i0 >= 0) {
                outVertexListSize++;
                GPtr->outDegree[i]++;
                ULONGINT_T t;
                for (t = i0+1; t < numEdgesPlaced; t++) {
                    if (SDGdataPtr->endVertex[t] != SDGdataPtr->endVertex[t-1]) {
                        outVertexListSize++;
                        GPtr->outDegree[i]++;
                    }
                }
            }
        }

    } /* for i */

    thread_barrier_wait();

    prefix_sums(GPtr->outVertexIndex, GPtr->outDegree, GPtr->numVertices);

    thread_barrier_wait();

    __transaction_atomic {
      TM_SHARED_WRITE( global_outVertexListSize,
                       ((long)TM_SHARED_READ(global_outVertexListSize) + outVertexListSize));

      global_outVertexListSize += outVertexListSize;
    }

    thread_barrier_wait();

    outVertexListSize = global_outVertexListSize;

    if (myId == 0) {
        GPtr->numDirectedEdges = outVertexListSize;
        GPtr->outVertexList =
            (ULONGINT_T*)malloc(outVertexListSize * sizeof(ULONGINT_T));
        assert(GPtr->outVertexList);
        GPtr->paralEdgeIndex =
            (ULONGINT_T*)malloc(outVertexListSize * sizeof(ULONGINT_T));
        assert(GPtr->paralEdgeIndex);
        GPtr->outVertexList[0] = SDGdataPtr->endVertex[0];
    }

    thread_barrier_wait();

    /*
     * Evaluate outVertexList
     */

    i0 = -1UL;

    for (i = i_start; i < i_stop; i++) {

        ULONGINT_T k = i;
        while ((i0 == -1UL) && (k != 0)) {
            for (j = 0; j < numEdgesPlaced; j++) {
                if (k == SDGdataPtr->startVertex[j]) {
                    i0 = j;
                    break;
                }
            }
            k--;
        }

        if ((i0 == -1) && (k == 0)) {
            i0 = 0;
        }

        for (j = i0; j < numEdgesPlaced; j++) {
            if (i == GPtr->numVertices-1) {
                break;
            }
            if (i != SDGdataPtr->startVertex[j]) {
                if ((j > 0) && (i == SDGdataPtr->startVertex[j-1])) {
                    if (j-i0 >= 1) {
                        long ii = GPtr->outVertexIndex[i];
                        ULONGINT_T r = 0;
                        GPtr->paralEdgeIndex[ii] = i0;
                        GPtr->outVertexList[ii] = SDGdataPtr->endVertex[i0];
                        r++;
                        ULONGINT_T t;
                        for (t = i0+1; t < j; t++) {
                            if (SDGdataPtr->endVertex[t] !=
                                SDGdataPtr->endVertex[t-1])
                            {
                                GPtr->paralEdgeIndex[ii+r] = t;
                                GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[t];
                                r++;
                            }
                        }

                    }
                }
                i0 = j;
                break;
            }
        } /* for j */

        if (i == GPtr->numVertices-1) {
            ULONGINT_T r = 0;
            if (numEdgesPlaced - i0 >= 0) {
                long ii = GPtr->outVertexIndex[i];
                GPtr->paralEdgeIndex[ii+r] = i0;
                GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[i0];
                r++;
                ULONGINT_T t;
                for (t = i0 + 1; t < numEdgesPlaced; t++) {
                    if (SDGdataPtr->endVertex[t] != SDGdataPtr->endVertex[t-1]) {
                        GPtr->paralEdgeIndex[ii+r] = t;
                        GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[t];
                        r++;
                    }
                }
            }
        }

    } /* for i */

    thread_barrier_wait();

    if (myId == 0) {
        free(SDGdataPtr->startVertex);
        free(SDGdataPtr->endVertex);
        GPtr->inDegree =
            (LONGINT_T*)malloc(GPtr->numVertices * sizeof(LONGINT_T));
        assert(GPtr->inDegree);
        GPtr->inVertexIndex =
            (ULONGINT_T*)malloc(GPtr->numVertices * sizeof(ULONGINT_T));
        assert(GPtr->inVertexIndex);
    }

    thread_barrier_wait();

    for (i = i_start; i < i_stop; i++) {
        GPtr->inDegree[i] = 0;
        GPtr->inVertexIndex[i] = 0;
    }

    /* A temp. array to store the inplied edges */
    ULONGINT_T* impliedEdgeList;
    if (myId == 0) {
        impliedEdgeList = (ULONGINT_T*)malloc(GPtr->numVertices
                                                * MAX_CLUSTER_SIZE
                                                * sizeof(ULONGINT_T));
        global_impliedEdgeList = impliedEdgeList;
    }

    thread_barrier_wait();

    impliedEdgeList = global_impliedEdgeList;

    createPartition(0,
                    (GPtr->numVertices * MAX_CLUSTER_SIZE),
                    myId,
                    numThread,
                    &i_start,
                    &i_stop);

    for (i = i_start; i < i_stop; i++) {
        impliedEdgeList[i] = 0;
    }

    /*
     * An auxiliary array to store implied edges, in case we overshoot
     * MAX_CLUSTER_SIZE
     */

    ULONGINT_T** auxArr;
    if (myId == 0) {
        auxArr = (ULONGINT_T**)malloc(GPtr->numVertices * sizeof(ULONGINT_T*));
        assert(auxArr);
        global_auxArr = auxArr;
    }

    thread_barrier_wait();

    auxArr = global_auxArr;

    createPartition(0, GPtr->numVertices, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        /* Inspect adjacency list of vertex i */
        for (j = GPtr->outVertexIndex[i];
             j < (GPtr->outVertexIndex[i] + GPtr->outDegree[i]);
             j++)
        {
            ULONGINT_T v = GPtr->outVertexList[j];
            ULONGINT_T k;
            for (k = GPtr->outVertexIndex[v];
                 k < (GPtr->outVertexIndex[v] + GPtr->outDegree[v]);
                 k++)
            {
                if (GPtr->outVertexList[k] == i) {
                    break;
                }
            }
            if (k == GPtr->outVertexIndex[v]+GPtr->outDegree[v]) {
              __transaction_atomic {
                /* Add i to the impliedEdgeList of v */

                long inDegree = (long)TM_SHARED_READ(GPtr->inDegree[v]);
                TM_SHARED_WRITE(GPtr->inDegree[v], (inDegree + 1));

                if (inDegree < MAX_CLUSTER_SIZE) {
                  TM_SHARED_WRITE(impliedEdgeList[v*MAX_CLUSTER_SIZE+inDegree],
                                  (unsigned long)i);
                } else {
                  /* Use auxiliary array to store the implied edge */
                  /* Create an array if it's not present already */
                  ULONGINT_T* a = NULL;
                  if ((inDegree % MAX_CLUSTER_SIZE) == 0) {
                    a = (ULONGINT_T*)malloc(MAX_CLUSTER_SIZE * sizeof(ULONGINT_T));
                    assert(a);
                    TM_SHARED_WRITE_P(auxArr[v], a);
                  } else {
                    a = auxArr[v];
                  }
                  TM_SHARED_WRITE(a[inDegree % MAX_CLUSTER_SIZE], (unsigned long)i);
                }
              } // TM_END
            }
        }
    } /* for i */
コード例 #16
0
ファイル: yada.c プロジェクト: albertghtoun/transmem
/* =============================================================================
 * process
 * =============================================================================
 */
static void
process ()
{
    heap_t* workHeapPtr = global_workHeapPtr;
    mesh_t* meshPtr = global_meshPtr;
    region_t* regionPtr;
    long totalNumAdded = 0;
    long numProcess = 0;

    regionPtr = PREGION_ALLOC();
    assert(regionPtr);

    while (1) {

        element_t* elementPtr;

        __transaction_atomic {
            elementPtr = (element_t*)TMHEAP_REMOVE(workHeapPtr);
        }

        if (elementPtr == NULL) {
            break;
        }

        bool_t isGarbage;
        __transaction_atomic {
            isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        }
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            TMELEMENT_FREE(elementPtr);
            continue;
        }

        long numAdded;
        //[wer210] changed the control flow to get rid of self-abort
        bool_t success = TRUE;
        while (1) {
            __transaction_atomic {
                // TM_SAFE: PVECTOR_CLEAR (regionPtr->badVectorPtr);
                PREGION_CLEARBAD(regionPtr);
                //[wer210] problematic function!
                numAdded = TMREGION_REFINE(regionPtr, elementPtr, meshPtr, &success);
                if (success) break;
                else __transaction_cancel;
            }
        }

        __transaction_atomic {
            TMELEMENT_SETISREFERENCED(elementPtr, FALSE);
            isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        }
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            TMELEMENT_FREE(elementPtr);
        }

        totalNumAdded += numAdded;

        __transaction_atomic {
            TMREGION_TRANSFERBAD(regionPtr, workHeapPtr);
        }

        numProcess++;

    }

    __transaction_atomic {
        TM_SHARED_WRITE(global_totalNumAdded,
        TM_SHARED_READ(global_totalNumAdded) + totalNumAdded);
        TM_SHARED_WRITE(global_numProcess,
        TM_SHARED_READ(global_numProcess) + numProcess);
    }

    PREGION_FREE(regionPtr);
}
コード例 #17
0
ファイル: getStartLists.c プロジェクト: HPDCS/htmMCATS
/* =============================================================================
 * getStartLists
 * =============================================================================
 */
void
getStartLists (void* argPtr)
{
    TM_THREAD_ENTER();

    graph* GPtr                = ((getStartLists_arg_t*)argPtr)->GPtr;
    edge** maxIntWtListPtr     = ((getStartLists_arg_t*)argPtr)->maxIntWtListPtr;
    long*  maxIntWtListSize    = ((getStartLists_arg_t*)argPtr)->maxIntWtListSize;
    edge** soughtStrWtListPtr  = ((getStartLists_arg_t*)argPtr)->soughtStrWtListPtr;
    long*  soughtStrWtListSize = ((getStartLists_arg_t*)argPtr)->soughtStrWtListSize;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    /*
     * Find Max Wt on each thread
     */

    LONGINT_T maxWeight = 0;

    long i;
    long i_start;
    long i_stop;
    createPartition(0, GPtr->numEdges, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        if (GPtr->intWeight[i] > maxWeight) {
            maxWeight = GPtr->intWeight[i];
        }
    }

    AL_LOCK(0);
    TM_BEGIN(9);
    long tmp_maxWeight = (long)TM_SHARED_READ(global_maxWeight);
    if (maxWeight > tmp_maxWeight) {
        TM_SHARED_WRITE(global_maxWeight, maxWeight);
    }
    TM_END();

    thread_barrier_wait();

    maxWeight = global_maxWeight;

    /*
     * Create partial lists
     */

    /*
     * Allocate mem. for temp edge list for each thread
     */
    long numTmpEdge = (5+ceil(1.5*(GPtr->numIntEdges)/MAX_INT_WEIGHT));
    edge* tmpEdgeList = (edge*)P_MALLOC(numTmpEdge * sizeof(edge));

    long i_edgeCounter = 0;

    for (i = i_start; i < i_stop; i++) {

        if (GPtr->intWeight[i] == maxWeight) {

            /* Find the corresponding endVertex */
            long j;
            for (j = 0; j < GPtr->numDirectedEdges; j++) {
                if (GPtr->paralEdgeIndex[j] > i) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].endVertex = GPtr->outVertexList[j-1];
            tmpEdgeList[i_edgeCounter].edgeNum = j-1;

            long t;
            for (t = 0; t < GPtr->numVertices; t++) {
                if (GPtr->outVertexIndex[t] > j-1) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].startVertex = t-1;

            i_edgeCounter++;

        }
    }

    /*
     * Merge partial edge lists
     */

    long* i_edgeStartCounter;
    long* i_edgeEndCounter;

    if (myId == 0) {
        i_edgeStartCounter = (long*)P_MALLOC(numThread * sizeof(long));
        assert(i_edgeStartCounter);
        global_i_edgeStartCounter = i_edgeStartCounter;
        i_edgeEndCounter = (long*)P_MALLOC(numThread * sizeof(long));
        assert(i_edgeEndCounter);
        global_i_edgeEndCounter = i_edgeEndCounter;

        *maxIntWtListSize = 0;
    }

    thread_barrier_wait();

    i_edgeStartCounter = global_i_edgeStartCounter;
    i_edgeEndCounter = global_i_edgeEndCounter;

    i_edgeEndCounter[myId] = i_edgeCounter;
    i_edgeStartCounter[myId] = 0;

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    *maxIntWtListSize += i_edgeCounter;

    thread_barrier_wait();

    edge* maxIntWtList;

    if (myId == 0) {
        P_FREE(*maxIntWtListPtr);
        maxIntWtList = (edge*)P_MALLOC((*maxIntWtListSize) * sizeof(edge));
        assert(maxIntWtList);
        global_maxIntWtList = maxIntWtList;
    }

    thread_barrier_wait();

    maxIntWtList = global_maxIntWtList;

    for (i = i_edgeStartCounter[myId]; i<i_edgeEndCounter[myId]; i++) {
      (maxIntWtList[i]).startVertex = tmpEdgeList[i-i_edgeStartCounter[myId]].startVertex;
      (maxIntWtList[i]).endVertex = tmpEdgeList[i-i_edgeStartCounter[myId]].endVertex;
      (maxIntWtList[i]).edgeNum = tmpEdgeList[i-i_edgeStartCounter[myId]].edgeNum;
    }

    if (myId == 0) {
        *maxIntWtListPtr = maxIntWtList;
    }

    i_edgeCounter = 0;

    createPartition(0, GPtr->numStrEdges, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {

        if (strncmp(GPtr->strWeight+i*MAX_STRLEN,
                    SOUGHT_STRING,
                    MAX_STRLEN) == 0)
        {
            /*
             * Find the corresponding endVertex
             */

            long t;
            for (t = 0; t < GPtr->numEdges; t++) {
                if (GPtr->intWeight[t] == -i) {
                    break;
                }
            }

            long j;
            for (j = 0; j < GPtr->numDirectedEdges; j++) {
            if (GPtr->paralEdgeIndex[j] > t) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].endVertex = GPtr->outVertexList[j-1];
            tmpEdgeList[i_edgeCounter].edgeNum = j-1;

            for (t = 0; t < GPtr->numVertices; t++) {
                if (GPtr->outVertexIndex[t] > j-1) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].startVertex = t-1;
            i_edgeCounter++;
        }

    }

    thread_barrier_wait();

    i_edgeEndCounter[myId] = i_edgeCounter;
    i_edgeStartCounter[myId] = 0;

    if (myId == 0) {
        *soughtStrWtListSize = 0;
    }

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    *soughtStrWtListSize += i_edgeCounter;

    thread_barrier_wait();

    edge* soughtStrWtList;

    if (myId == 0) {
        P_FREE(*soughtStrWtListPtr);
        soughtStrWtList = (edge*)P_MALLOC((*soughtStrWtListSize) * sizeof(edge));
        assert(soughtStrWtList);
        global_soughtStrWtList = soughtStrWtList;
    }

    thread_barrier_wait();

    soughtStrWtList = global_soughtStrWtList;

    for (i = i_edgeStartCounter[myId]; i < i_edgeEndCounter[myId]; i++) {
        (soughtStrWtList[i]).startVertex =
            tmpEdgeList[i-i_edgeStartCounter[myId]].startVertex;
        (soughtStrWtList[i]).endVertex =
            tmpEdgeList[i-i_edgeStartCounter[myId]].endVertex;
        (soughtStrWtList[i]).edgeNum =
            tmpEdgeList[i-i_edgeStartCounter[myId]].edgeNum;
    }

    thread_barrier_wait();

    if (myId == 0) {
        *soughtStrWtListPtr = soughtStrWtList;
        P_FREE(i_edgeStartCounter);
        P_FREE(i_edgeEndCounter);
    }

    P_FREE(tmpEdgeList);

    TM_THREAD_EXIT();
}
コード例 #18
0
ファイル: yada.c プロジェクト: HPDCS/htmMCATS
/* =============================================================================
 * process
 * =============================================================================
 */
void
process ()
{
    TM_THREAD_ENTER();

    heap_t* workHeapPtr = global_workHeapPtr;
    mesh_t* meshPtr = global_meshPtr;
    region_t* regionPtr;
    long totalNumAdded = 0;
    long numProcess = 0;

    regionPtr = PREGION_ALLOC();
    assert(regionPtr);

    while (1) {

        element_t* elementPtr;

        AL_LOCK(0);
        TM_BEGIN(0);
        elementPtr = TMHEAP_REMOVE(workHeapPtr);
        TM_END();
        if (elementPtr == NULL) {
            break;
        }

        bool_t isGarbage;
        AL_LOCK(0);
        TM_BEGIN(1);
        isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        TM_END();
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            PELEMENT_FREE(elementPtr);
            continue;
        }

        long numAdded;

        AL_LOCK(0);
        TM_BEGIN(2);
        PREGION_CLEARBAD(regionPtr);
        numAdded = TMREGION_REFINE(regionPtr, elementPtr, meshPtr);
        TM_END();

        AL_LOCK(0);
        TM_BEGIN(3);
        TMELEMENT_SETISREFERENCED(elementPtr, FALSE);
        isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        TM_END();
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            PELEMENT_FREE(elementPtr);
        }

        totalNumAdded += numAdded;

        AL_LOCK(0);
        TM_BEGIN(4);
        TMREGION_TRANSFERBAD(regionPtr, workHeapPtr);
        TM_END();

        numProcess++;

    }

    AL_LOCK(0);
    TM_BEGIN(5);
    TM_SHARED_WRITE(global_totalNumAdded,
                    TM_SHARED_READ(global_totalNumAdded) + totalNumAdded);
    TM_SHARED_WRITE(global_numProcess,
                    TM_SHARED_READ(global_numProcess) + numProcess);
    TM_END();

    PREGION_FREE(regionPtr);

    TM_THREAD_EXIT();
}
コード例 #19
0
ファイル: sequencer.c プロジェクト: YunlongXu/tm-study-pact14
/* =============================================================================
 * sequencer_run
 * =============================================================================
 */
void
sequencer_run (void* argPtr)
{
    TM_THREAD_ENTER();

    long threadId = thread_getId();

    sequencer_t* sequencerPtr = (sequencer_t*)argPtr;

    hashtable_t*      uniqueSegmentsPtr;
    endInfoEntry_t*   endInfoEntries;
    table_t**         startHashToConstructEntryTables;
    constructEntry_t* constructEntries;
    table_t*          hashToConstructEntryTable;

    uniqueSegmentsPtr               = sequencerPtr->uniqueSegmentsPtr;
    endInfoEntries                  = sequencerPtr->endInfoEntries;
    startHashToConstructEntryTables = sequencerPtr->startHashToConstructEntryTables;
    constructEntries                = sequencerPtr->constructEntries;
    hashToConstructEntryTable       = sequencerPtr->hashToConstructEntryTable;

    segments_t* segmentsPtr         = sequencerPtr->segmentsPtr;
    assert(segmentsPtr);
    vector_t*   segmentsContentsPtr = segmentsPtr->contentsPtr;
    long        numSegment          = vector_getSize(segmentsContentsPtr);
    long        segmentLength       = segmentsPtr->length;

    long i;
    long j;
    long i_start;
    long i_stop;
    long numUniqueSegment;
    long substringLength;
    long entryIndex;

    /*
     * Step 1: Remove duplicate segments
     */
// #if defined(HTM) || defined(STM)
    long numThread = thread_getNumThread();
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long partitionSize = (numSegment + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = numSegment;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
// #else /* !(HTM || STM) */
//     i_start = 0;
//     i_stop = numSegment;
// #endif /* !(HTM || STM) */
    for (i = i_start; i < i_stop; i+=CHUNK_STEP1) {
        TM_BEGIN();
        {
            long ii;
            long ii_stop = MIN(i_stop, (i+CHUNK_STEP1));
            for (ii = i; ii < ii_stop; ii++) {
                void* segment = vector_at(segmentsContentsPtr, ii);
                TMHASHTABLE_INSERT(uniqueSegmentsPtr,
                                   segment,
                                   segment);
            } /* ii */
        }
        TM_END();
    }

    thread_barrier_wait();

    /*
     * Step 2a: Iterate over unique segments and compute hashes.
     *
     * For the gene "atcg", the hashes for the end would be:
     *
     *     "t", "tc", and "tcg"
     *
     * And for the gene "tcgg", the hashes for the start would be:
     *
     *    "t", "tc", and "tcg"
     *
     * The names are "end" and "start" because if a matching pair is found,
     * they are the substring of the end part of the pair and the start
     * part of the pair respectively. In the above example, "tcg" is the
     * matching substring so:
     *
     *     (end)    (start)
     *     a[tcg] + [tcg]g  = a[tcg]g    (overlap = "tcg")
     */

    /* uniqueSegmentsPtr is constant now */
    numUniqueSegment = hashtable_getSize(uniqueSegmentsPtr);
    entryIndex = 0;

// #if defined(HTM) || defined(STM)
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long num = uniqueSegmentsPtr->numBucket;
        long partitionSize = (num + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = num;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
    {
        /* Approximate disjoint segments of element allocation in constructEntries */
        long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
        entryIndex = threadId * partitionSize;
    }
// #else /* !(HTM || STM) */
//    i_start = 0;
//    i_stop = uniqueSegmentsPtr->numBucket;
//    entryIndex = 0;
//#endif /* !(HTM || STM) */

    for (i = i_start; i < i_stop; i++) {

        list_t* chainPtr = uniqueSegmentsPtr->buckets[i];
        list_iter_t it;
        list_iter_reset(&it, chainPtr);

        while (list_iter_hasNext(&it, chainPtr)) {

            char* segment =
                (char*)((pair_t*)list_iter_next(&it, chainPtr))->firstPtr;
            constructEntry_t* constructEntryPtr;
            long j;
            ulong_t startHash;
            bool_t status;

            /* Find an empty constructEntries entry */
            TM_BEGIN();
            while (((void*)TM_SHARED_READ_P(constructEntries[entryIndex].segment)) != NULL) {
                entryIndex = (entryIndex + 1) % numUniqueSegment; /* look for empty */
            }
            constructEntryPtr = &constructEntries[entryIndex];
            TM_SHARED_WRITE_P(constructEntryPtr->segment, segment);
            TM_END();
            entryIndex = (entryIndex + 1) % numUniqueSegment;

            /*
             * Save hashes (sdbm algorithm) of segment substrings
             *
             * endHashes will be computed for shorter substrings after matches
             * have been made (in the next phase of the code). This will reduce
             * the number of substrings for which hashes need to be computed.
             *
             * Since we can compute startHashes incrementally, we go ahead
             * and compute all of them here.
             */
            /* constructEntryPtr is local now */
            constructEntryPtr->endHash = (ulong_t)hashString(&segment[1]);

            startHash = 0;
            for (j = 1; j < segmentLength; j++) {
                startHash = (ulong_t)segment[j-1] +
                            (startHash << 6) + (startHash << 16) - startHash;
                TM_BEGIN();
                status = TMTABLE_INSERT(startHashToConstructEntryTables[j],
                                        (ulong_t)startHash,
                                        (void*)constructEntryPtr );
                TM_END();
                assert(status);
            }

            /*
             * For looking up construct entries quickly
             */
            startHash = (ulong_t)segment[j-1] +
                        (startHash << 6) + (startHash << 16) - startHash;
            TM_BEGIN();
            status = TMTABLE_INSERT(hashToConstructEntryTable,
                                    (ulong_t)startHash,
                                    (void*)constructEntryPtr);
            TM_END();
            assert(status);
        }
    }

    thread_barrier_wait();

    /*
     * Step 2b: Match ends to starts by using hash-based string comparison.
     */
    for (substringLength = segmentLength-1; substringLength > 0; substringLength--) {

        table_t* startHashToConstructEntryTablePtr =
            startHashToConstructEntryTables[substringLength];
        list_t** buckets = startHashToConstructEntryTablePtr->buckets;
        long numBucket = startHashToConstructEntryTablePtr->numBucket;

        long index_start;
        long index_stop;

// #if defined(HTM) || defined(STM)
        {
            /* Choose disjoint segments [index_start,index_stop) for each thread */
            long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
            index_start = threadId * partitionSize;
            if (threadId == (numThread - 1)) {
                index_stop = numUniqueSegment;
            } else {
                index_stop = index_start + partitionSize;
            }
        }
// #else /* !(HTM || STM) */
//        index_start = 0;
//        index_stop = numUniqueSegment;
//#endif /* !(HTM || STM) */

        /* Iterating over disjoint itervals in the range [0, numUniqueSegment) */
        for (entryIndex = index_start;
             entryIndex < index_stop;
             entryIndex += endInfoEntries[entryIndex].jumpToNext)
        {
            if (!endInfoEntries[entryIndex].isEnd) {
                continue;
            }

            /*  ConstructEntries[entryIndex] is local data */
            constructEntry_t* endConstructEntryPtr =
                &constructEntries[entryIndex];
            char* endSegment = endConstructEntryPtr->segment;
            ulong_t endHash = endConstructEntryPtr->endHash;

            list_t* chainPtr = buckets[endHash % numBucket]; /* buckets: constant data */
            list_iter_t it;
            list_iter_reset(&it, chainPtr);

            /* Linked list at chainPtr is constant */
            while (list_iter_hasNext(&it, chainPtr)) {

                constructEntry_t* startConstructEntryPtr =
                    (constructEntry_t*)list_iter_next(&it, chainPtr);
                char* startSegment = startConstructEntryPtr->segment;
                long newLength = 0;

                /* endConstructEntryPtr is local except for properties startPtr/endPtr/length */
                TM_BEGIN();

                /* Check if matches */
                if (TM_SHARED_READ(startConstructEntryPtr->isStart) &&
                    (TM_SHARED_READ_P(endConstructEntryPtr->startPtr) != startConstructEntryPtr) &&
                    (strncmp(startSegment,
                             &endSegment[segmentLength - substringLength],
                             substringLength) == 0))
                {
                    TM_SHARED_WRITE(startConstructEntryPtr->isStart, FALSE);

                    constructEntry_t* startConstructEntry_endPtr;
                    constructEntry_t* endConstructEntry_startPtr;

                    /* Update endInfo (appended something so no longer end) */
                    TM_LOCAL_WRITE(endInfoEntries[entryIndex].isEnd, FALSE);

                    /* Update segment chain construct info */
                    startConstructEntry_endPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(startConstructEntryPtr->endPtr);
                    endConstructEntry_startPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(endConstructEntryPtr->startPtr);

                    assert(startConstructEntry_endPtr);
                    assert(endConstructEntry_startPtr);
                    TM_SHARED_WRITE_P(startConstructEntry_endPtr->startPtr,
                                      endConstructEntry_startPtr);
                    TM_LOCAL_WRITE_P(endConstructEntryPtr->nextPtr,
                                     startConstructEntryPtr);
                    TM_SHARED_WRITE_P(endConstructEntry_startPtr->endPtr,
                                      startConstructEntry_endPtr);
                    TM_SHARED_WRITE(endConstructEntryPtr->overlap, substringLength);
                    newLength = (long)TM_SHARED_READ(endConstructEntry_startPtr->length) +
                                (long)TM_SHARED_READ(startConstructEntryPtr->length) -
                                substringLength;
                    TM_SHARED_WRITE(endConstructEntry_startPtr->length, newLength);
                } /* if (matched) */

                TM_END();

                if (!endInfoEntries[entryIndex].isEnd) { /* if there was a match */
                    break;
                }
            } /* iterate over chain */

        } /* for (endIndex < numUniqueSegment) */

        thread_barrier_wait();

        /*
         * Step 2c: Update jump values and hashes
         *
         * endHash entries of all remaining ends are updated to the next
         * substringLength. Additionally jumpToNext entries are updated such
         * that they allow to skip non-end entries. Currently this is sequential
         * because parallelization did not perform better.
.        */

        if (threadId == 0) {
            if (substringLength > 1) {
                long index = segmentLength - substringLength + 1;
                /* initialization if j and i: with i being the next end after j=0 */
                for (i = 1; !endInfoEntries[i].isEnd; i+=endInfoEntries[i].jumpToNext) {
                    /* find first non-null */
                }
                /* entry 0 is handled seperately from the loop below */
                endInfoEntries[0].jumpToNext = i;
                if (endInfoEntries[0].isEnd) {
                    constructEntry_t* constructEntryPtr = &constructEntries[0];
                    char* segment = constructEntryPtr->segment;
                    constructEntryPtr->endHash = (ulong_t)hashString(&segment[index]);
                }
                /* Continue scanning (do not reset i) */
                for (j = 0; i < numUniqueSegment; i+=endInfoEntries[i].jumpToNext) {
                    if (endInfoEntries[i].isEnd) {
                        constructEntry_t* constructEntryPtr = &constructEntries[i];
                        char* segment = constructEntryPtr->segment;
                        constructEntryPtr->endHash = (ulong_t)hashString(&segment[index]);
                        endInfoEntries[j].jumpToNext = MAX(1, (i - j));
                        j = i;
                    }
                }
                endInfoEntries[j].jumpToNext = i - j;
            }
        }

        thread_barrier_wait();

    } /* for (substringLength > 0) */


    thread_barrier_wait();

    /*
     * Step 3: Build sequence string
     */
    if (threadId == 0) {

        long totalLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            if (constructEntryPtr->isStart) {
              totalLength += constructEntryPtr->length;
            }
        }

        sequencerPtr->sequence = (char*)P_MALLOC((totalLength+1) * sizeof(char));
        char* sequence = sequencerPtr->sequence;
        assert(sequence);

        char* copyPtr = sequence;
        long sequenceLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            /* If there are several start segments, we append in arbitrary order  */
            if (constructEntryPtr->isStart) {
                long newSequenceLength = sequenceLength + constructEntryPtr->length;
                assert( newSequenceLength <= totalLength );
                copyPtr = sequence + sequenceLength;
                sequenceLength = newSequenceLength;
                do {
                    long numChar = segmentLength - constructEntryPtr->overlap;
                    if ((copyPtr + numChar) > (sequence + newSequenceLength)) {
                        TM_PRINT0("ERROR: sequence length != actual length\n");
                        break;
                    }
                    memcpy(copyPtr,
                           constructEntryPtr->segment,
                           (numChar * sizeof(char)));
                    copyPtr += numChar;
                } while ((constructEntryPtr = constructEntryPtr->nextPtr) != NULL);
                assert(copyPtr <= (sequence + sequenceLength));
            }
        }

        assert(sequence != NULL);
        sequence[sequenceLength] = '\0';
    }

    TM_THREAD_EXIT();
}
コード例 #20
0
ファイル: array-locks.c プロジェクト: nmldiegues/stamp-rtm
void client_run (void* argPtr) {
    TM_THREAD_ENTER();

    random_t* randomPtr = random_alloc();
    random_seed(randomPtr, time(0));

    // unsigned long myId = thread_getId();
    // long numThread = *((long*)argPtr);
    long operations = (long)global_params[PARAM_OPERATIONS] / (long)global_params[PARAM_THREADS];
    long interval = (long)global_params[PARAM_INTERVAL];
    printf("operations: %ld \tinterval: %ld\n", operations, interval);

    long total = 0;
    long total2 = 0;

    long i = 0;
    unsigned int cont_size = (unsigned int) global_params[PARAM_CONTENTION];
    unsigned int* sorted_locks = (unsigned int*) malloc((2 + cont_size) * sizeof(int));
    unsigned int* read_idxs = (unsigned int*) malloc(cont_size * sizeof(int));

    for (; i < operations; i++) {
        long random_number = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        long random_number2 = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        if (random_number == random_number2) {
            random_number2 = (random_number2 + 1) % ((long)global_params[PARAM_SIZE]);
        }

        int repeat = 0;
        for (; repeat < cont_size; repeat++) {
        	read_idxs[repeat] = ((unsigned int) random_generate(randomPtr)) % ((unsigned int)global_params[PARAM_SIZE]);
        	LI_HASH(&global_array[read_idxs[repeat]], &sorted_locks[repeat + 2]);
        }

        // TM_BEGIN();
        LI_HASH(&global_array[random_number], &sorted_locks[0]);
        LI_HASH(&global_array[random_number2], &sorted_locks[1]);
        TM_BEGIN_ARGS(sorted_locks, cont_size + 2);

        long r1 = (long)TM_SHARED_READ(global_array[random_number].value);
        long r2 = (long)TM_SHARED_READ(global_array[random_number2].value);

        for (repeat--; repeat >= 0; repeat--) {
        	total2 += (long) TM_SHARED_READ(global_array[read_idxs[repeat]].value);
        }
        r1 = r1 + 1;
        r2 = r2 - 1;

        int f = 1;
        int ii;
        for(ii = 1; ii <= ((unsigned int) global_params[PARAM_WORK]); ii++)
        {
            f *= ii;
        }
        total += f / 1000000;

        TM_SHARED_WRITE(global_array[random_number].value, r1);
        TM_SHARED_WRITE(global_array[random_number2].value, r2);
        TM_END_ARGS(sorted_locks, cont_size + 2);

        long k = 0;
        for (;k < (long)global_params[PARAM_INTERVAL]; k++) {
            long ru = ((long) random_generate(randomPtr)) % 2;
            total += ru;
        }

    }

    TM_THREAD_EXIT();
    printf("ru ignore %ld - %ld\n", total, total2);
}