Exemple #1
0
long
TMelement_compare (TM_ARGDECL element_t* aElementPtr, element_t* bElementPtr)
{
    element_t *aEP = NULL, *bEP = NULL;
    aEP = TM_SHARED_READ_P(aElementPtr);
    bEP = TM_SHARED_READ_P(bElementPtr);
    long aNumCoordinate = TM_SHARED_READ_L(aEP->numCoordinate);
    long bNumCoordinate = TM_SHARED_READ_L(bEP->numCoordinate);
    coordinate_t* aCoordinates = aEP->coordinates;
    coordinate_t* bCoordinates = bEP->coordinates;

    if (aNumCoordinate < bNumCoordinate) {
        return -1;
    } else if (aNumCoordinate > bNumCoordinate) {
        return 1;
    }

    long i;
    for (i = 0; i < aNumCoordinate; i++) {
        long compareCoordinate =
            TMcoordinate_compare(TM_ARG &aCoordinates[i], &bCoordinates[i]);
        if (compareCoordinate != 0) {
            return compareCoordinate;
        }
    }

    return 0;
}
Exemple #2
0
/* =============================================================================
 * TMheap_insert
 * -- Returns false on failure
 * =============================================================================
 */
bool
TMheap_insert (TM_ARGDECL  heap_t* heapPtr, void* dataPtr)
{
    long size = (long)TM_SHARED_READ_L(heapPtr->size);
    long capacity = (long)TM_SHARED_READ_L(heapPtr->capacity);
    if ((size + 1) >= capacity) {
        long newCapacity = capacity * 2;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return false;
        }
        TM_SHARED_WRITE_L(heapPtr->capacity, newCapacity);
        long i;
        void** elements = TM_SHARED_READ_P(heapPtr->elements);
        for (i = 0; i <= size; i++) {
            newElements[i] = (void*)TM_SHARED_READ_P(elements[i]);
        }
        TM_FREE(elements);
        TM_SHARED_WRITE_P(heapPtr->elements, newElements);
    }

    size++;
    TM_SHARED_WRITE_L(heapPtr->size, size);
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    TM_SHARED_WRITE_P(elements[size], dataPtr);
    TMsiftUp(TM_ARG heapPtr, size);

    return true;
}
Exemple #3
0
/* =============================================================================
 * TMlist_insert
 * -- Return TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
TMlist_insert (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;
    list_node_t* currPtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);
    currPtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);

#ifdef LIST_NO_DUPLICATES
    if ((currPtr != NULL) &&
        listPtr->comparator->compare_tm(TM_ARG TM_SHARED_READ_P(currPtr->dataPtr), dataPtr) == 0) {
        return FALSE;
    }
#endif

    nodePtr = TMallocNode(TM_ARG  dataPtr);
    if (nodePtr == NULL) {
        return FALSE;
    }

    TM_SHARED_WRITE_P(nodePtr->nextPtr, currPtr);
    TM_SHARED_WRITE_P(prevPtr->nextPtr, nodePtr);
    TM_SHARED_WRITE_L(listPtr->size, (TM_SHARED_READ_L(listPtr->size) + 1));

    return TRUE;
}
Exemple #4
0
/* =============================================================================
 * customer_removeReservationInfo
 * -- Returns TRUE if success, else FALSE
 * =============================================================================
 */
bool_t
customer_removeReservationInfo (TM_ARGDECL
                                customer_t* customerPtr,
                                reservation_type_t type, long id)
{
    reservation_info_t findReservationInfo;

    findReservationInfo.type = type;
    findReservationInfo.id = id;
    /* price not used to compare reservation infos */

    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_L(customerPtr->reservationInfoListPtr);

    reservation_info_t* reservationInfoPtr =
        (reservation_info_t*)TMLIST_FIND(reservationInfoListPtr,
                                         &findReservationInfo);

    if (reservationInfoPtr == NULL) {
        return FALSE;
    }

    bool_t status = TMLIST_REMOVE(reservationInfoListPtr,
                                  (void*)&findReservationInfo);
    if (status == FALSE) {
        TM_RESTART();
    }

    RESERVATION_INFO_FREE(reservationInfoPtr);

    return TRUE;
}
Exemple #5
0
/* =============================================================================
 * addReservation
 * -- If 'num' > 0 then add, if < 0 remove
 * -- Adding 0 seats is error if does not exist
 * -- If 'price' < 0, do not update price
 * -- Returns true on success, else false
 * =============================================================================
 */
bool
addReservation (TM_ARGDECL  MAP_T* tablePtr, long id, long num, long price)
{
    reservation_t* reservationPtr;

    reservationPtr = (reservation_t*)TMMAP_FIND(tablePtr, id);
    if (reservationPtr == NULL) {
        /* Create new reservation */
        if (num < 1 || price < 0) {
            return false;
        }
        reservationPtr = RESERVATION_ALLOC(id, num, price);
        assert(reservationPtr != NULL);
        TMMAP_INSERT(tablePtr, id, reservationPtr);
    } else {
        /* Update existing reservation */
        if (!RESERVATION_ADD_TO_TOTAL(reservationPtr, num)) {
            return false;
        }
        if ((long)TM_SHARED_READ_L(reservationPtr->numTotal) == 0) {
            bool status = TMMAP_REMOVE(tablePtr, id);
            if (!status) {
                TM_RESTART();
            }
            RESERVATION_FREE(reservationPtr);
        } else {
            RESERVATION_UPDATE_PRICE(reservationPtr, price);
        }
    }

    return true;
}
Exemple #6
0
/* =============================================================================
 * customer_free
 * =============================================================================
 */
void
customer_free (TM_ARGDECL  customer_t* customerPtr)
{
    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_L(customerPtr->reservationInfoListPtr);
    TMLIST_FREE(reservationInfoListPtr);
    TM_FREE(customerPtr);
}
Exemple #7
0
/* =============================================================================
 * manager_deleteFlight
 * -- Delete an entire flight
 * -- Fails if customer has reservation on this flight
 * -- Returns true on success, else false
 * =============================================================================
 */
bool
manager_deleteFlight (TM_ARGDECL  manager_t* managerPtr, long flightId)
{
    reservation_t* reservationPtr;

    reservationPtr = (reservation_t*)TMMAP_FIND(managerPtr->flightTablePtr, flightId);
    if (reservationPtr == NULL) {
        return false;
    }

    if ((long)TM_SHARED_READ_L(reservationPtr->numUsed) > 0) {
        return false; /* somebody has a reservation */
    }

    return addReservation(TM_ARG
                          managerPtr->flightTablePtr,
                          flightId,
                          -1*(long)TM_SHARED_READ_L(reservationPtr->numTotal),
                          -1 /* -1 keeps old price */);
}
Exemple #8
0
/* =============================================================================
 * queryPrice
 * -- Return price of a reservation, -1 if failure
 * =============================================================================
 */
static long
queryPrice (TM_ARGDECL  MAP_T* tablePtr, long id)
{
    long price = -1;
    reservation_t* reservationPtr;

    reservationPtr = (reservation_t*)TMMAP_FIND(tablePtr, id);
    if (reservationPtr != NULL) {
        price = (long)TM_SHARED_READ_L(reservationPtr->price);
    }

    return price;
}
Exemple #9
0
/* =============================================================================
 * queryNumFree
 * -- Return numFree of a reservation, -1 if failure
 * =============================================================================
 */
static long
queryNumFree (TM_ARGDECL  MAP_T* tablePtr, long id)
{
    long numFree = -1;
    reservation_t* reservationPtr;

    reservationPtr = (reservation_t*)TMMAP_FIND(tablePtr, id);
    if (reservationPtr != NULL) {
        numFree = (long)TM_SHARED_READ_L(reservationPtr->numFree);
    }

    return numFree;
}
Exemple #10
0
/* =============================================================================
 * customer_addReservationInfo
 * -- Returns TRUE if success, else FALSE
 * =============================================================================
 */
bool_t
customer_addReservationInfo (TM_ARGDECL
                             customer_t* customerPtr,
                             reservation_type_t type, long id, long price)
{
    reservation_info_t* reservationInfoPtr;

    reservationInfoPtr = RESERVATION_INFO_ALLOC(type, id, price);
    assert(reservationInfoPtr != NULL);

    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_L(customerPtr->reservationInfoListPtr);

    return TMLIST_INSERT(reservationInfoListPtr, (void*)reservationInfoPtr);
}
Exemple #11
0
/* =============================================================================
 * TMgrid_addPath
 * =============================================================================
 */
void
TMgrid_addPath (TM_ARGDECL  grid_t* gridPtr, vector_t* pointVectorPtr)
{
    long i;
    long n = vector_getSize(pointVectorPtr);

    for (i = 1; i < (n-1); i++) {
        long* gridPointPtr = (long*)vector_at(pointVectorPtr, i);
        long value = (long)TM_SHARED_READ_L(*gridPointPtr);
        if (value != GRID_POINT_EMPTY) {
            TM_RESTART();
        }
        TM_SHARED_WRITE_L(*gridPointPtr, (long)GRID_POINT_FULL);
    }
}
Exemple #12
0
/* =============================================================================
 * TMheap_remove
 * -- Returns NULL if empty
 * =============================================================================
 */
void*
TMheap_remove (TM_ARGDECL  heap_t* heapPtr)
{
    long size = (long)TM_SHARED_READ_L(heapPtr->size);

    if (size < 1) {
        return NULL;
    }

    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    void* dataPtr = (void*)TM_SHARED_READ_P(elements[1]);
    TM_SHARED_WRITE_P(elements[1], TM_SHARED_READ_P(elements[size]));
    TM_SHARED_WRITE_L(heapPtr->size, (size - 1));
    TMheapify(TM_ARG  heapPtr, 1);

    return dataPtr;
}
Exemple #13
0
/* =============================================================================
 * customer_getBill
 * -- Returns total cost of reservations
 * =============================================================================
 */
long
customer_getBill (TM_ARGDECL  customer_t* customerPtr)
{
    long bill = 0;
    list_iter_t it;
    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_L(customerPtr->reservationInfoListPtr);

    TMLIST_ITER_RESET(&it, reservationInfoListPtr);
    while (TMLIST_ITER_HASNEXT(&it, reservationInfoListPtr)) {
        reservation_info_t* reservationInfoPtr =
            (reservation_info_t*)TMLIST_ITER_NEXT(&it, reservationInfoListPtr);
        bill += reservationInfoPtr->price;
    }

    return bill;
}
Exemple #14
0
/* =============================================================================
 * TMheapify
 * =============================================================================
 */
static void
TMheapify (TM_ARGDECL  heap_t* heapPtr, long startIndex)
{
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    long (*compare)(TM_ARGDECL const void*, const void*) = heapPtr->compare->compare_tm;

    long size = (long)TM_SHARED_READ_L(heapPtr->size);
    long index = startIndex;

    while (1) {

        long leftIndex = LEFT_CHILD(index);
        long rightIndex = RIGHT_CHILD(index);
        long maxIndex = -1;

        if ((leftIndex <= size) &&
            (compare(TM_ARG
         (void*)TM_SHARED_READ_P(elements[leftIndex]),
                     (void*)TM_SHARED_READ_P(elements[index])) > 0))
        {
            maxIndex = leftIndex;
        } else {
      maxIndex = index;
        }

        if ((rightIndex <= size) &&
            (compare(TM_ARG
         (void*)TM_SHARED_READ_P(elements[rightIndex]),
                     (void*)TM_SHARED_READ_P(elements[maxIndex])) > 0))
        {
            maxIndex = rightIndex;
        }

        if (maxIndex == index) {
            break;
        } else {
            void* tmpPtr = (void*)TM_SHARED_READ_P(elements[index]);
            TM_SHARED_WRITE_P(elements[index],
                              (void*)TM_SHARED_READ_P(elements[maxIndex]));
            TM_SHARED_WRITE_P(elements[maxIndex], tmpPtr);
            index = maxIndex;
        }
    }
}
Exemple #15
0
coordinate_t
TMelement_getNewPoint (TM_ARGDECL element_t* elementPtr)
{
    edge_t* encroachedEdgePtr =TM_SHARED_READ_P(elementPtr->encroachedEdgePtr);
    void *edge;
    if (encroachedEdgePtr) {
        long e;
        long numEdge = TM_SHARED_READ_L(elementPtr->numEdge);
        edge_t *edges = elementPtr->edges;
        for (e = 0; e < numEdge; e++) {
            edge = &edges[e];
            edge = TM_SHARED_READ_P(edge);
            if (TMcompareEdge(TM_ARG encroachedEdgePtr, (edge_t*)edge) == 0) {
                return elementPtr->midpoints[e];
            }
        }
        assert(0);
    }

    return elementPtr->circumCenter;
}
Exemple #16
0
/* =============================================================================
 * reserve
 * -- Customer is not allowed to reserve same (type, id) multiple times
 * -- Returns true on success, else false
 * =============================================================================
 */
static bool
reserve (TM_ARGDECL
         MAP_T* tablePtr, MAP_T* customerTablePtr,
         long customerId, long id, reservation_type_t type)
{
    customer_t* customerPtr;
    reservation_t* reservationPtr;

    customerPtr = (customer_t*)TMMAP_FIND(customerTablePtr, customerId);
    if (customerPtr == NULL) {
        return false;
    }

    reservationPtr = (reservation_t*)TMMAP_FIND(tablePtr, id);
    if (reservationPtr == NULL) {
        return false;
    }

    if (!RESERVATION_MAKE(reservationPtr)) {
        return false;
    }

    if (!CUSTOMER_ADD_RESERVATION_INFO(
            customerPtr,
            type,
            id,
            (long)TM_SHARED_READ_L(reservationPtr->price)))
    {
        /* Undo previous successful reservation */
        bool status = RESERVATION_CANCEL(reservationPtr);
        if (!status) {
            TM_RESTART();
        }
        return false;
    }

    return true;
}
Exemple #17
0
/* =============================================================================
 * TMlist_remove
 * -- Returns TRUE if successful, else FALSE
 * =============================================================================
 */
bool_t
TMlist_remove (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);

    nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);
    if ((nodePtr != NULL) &&
        (listPtr->comparator->compare_tm(TM_ARG TM_SHARED_READ_P(nodePtr->dataPtr), dataPtr) == 0))
    {
        TM_SHARED_WRITE_P(prevPtr->nextPtr, TM_SHARED_READ_P(nodePtr->nextPtr));
        TM_SHARED_WRITE_P(nodePtr->nextPtr, (struct list_node*)NULL);
        TMfreeNode(TM_ARG  nodePtr);
        TM_SHARED_WRITE_L(listPtr->size, (TM_SHARED_READ_L(listPtr->size) - 1));
        assert(listPtr->size >= 0);

        return TRUE;
    }

    return FALSE;
}
Exemple #18
0
/* =============================================================================
 * getStartLists
 * =============================================================================
 */
void
getStartLists (void* argPtr)
{
    TM_THREAD_ENTER();

    graph* GPtr                = ((getStartLists_arg_t*)argPtr)->GPtr;
    edge** maxIntWtListPtr     = ((getStartLists_arg_t*)argPtr)->maxIntWtListPtr;
    long*  maxIntWtListSize    = ((getStartLists_arg_t*)argPtr)->maxIntWtListSize;
    edge** soughtStrWtListPtr  = ((getStartLists_arg_t*)argPtr)->soughtStrWtListPtr;
    long*  soughtStrWtListSize = ((getStartLists_arg_t*)argPtr)->soughtStrWtListSize;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    /*
     * Find Max Wt on each thread
     */

    LONGINT_T maxWeight = 0;

    long i;
    long i_start;
    long i_stop;
    createPartition(0, GPtr->numEdges, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        if (GPtr->intWeight[i] > maxWeight) {
            maxWeight = GPtr->intWeight[i];
        }
    }

    TM_BEGIN();
    long tmp_maxWeight = (long)TM_SHARED_READ_L(*global_maxWeight);
    if (maxWeight > tmp_maxWeight) {
        TM_SHARED_WRITE_L(*global_maxWeight, maxWeight);
    }
    TM_END();

    thread_barrier_wait();

    maxWeight = *global_maxWeight;

    /*
     * Create partial lists
     */

    /*
     * Allocate mem. for temp edge list for each thread
     */
    long numTmpEdge = (5+ceil(1.5*(GPtr->numIntEdges)/MAX_INT_WEIGHT));
    edge* tmpEdgeList = (edge*)P_MALLOC(numTmpEdge * sizeof(edge));

    long i_edgeCounter = 0;

    for (i = i_start; i < i_stop; i++) {

        if (GPtr->intWeight[i] == maxWeight) {

            /* Find the corresponding endVertex */
            unsigned long j;
            for (j = 0; j < GPtr->numDirectedEdges; j++) {
                if (GPtr->paralEdgeIndex[j] > (unsigned long)i) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].endVertex = GPtr->outVertexList[j-1];
            tmpEdgeList[i_edgeCounter].edgeNum = j-1;

            unsigned long t;
            for (t = 0; t < GPtr->numVertices; t++) {
                if (GPtr->outVertexIndex[t] > j-1) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].startVertex = t-1;

            i_edgeCounter++;

        }
    }

    /*
     * Merge partial edge lists
     */

    long* i_edgeStartCounter;
    long* i_edgeEndCounter;

    if (myId == 0) {
        i_edgeStartCounter = (long*)P_MALLOC(numThread * sizeof(long));
        assert(i_edgeStartCounter);
        global_i_edgeStartCounter = i_edgeStartCounter;
        i_edgeEndCounter = (long*)P_MALLOC(numThread * sizeof(long));
        assert(i_edgeEndCounter);
        global_i_edgeEndCounter = i_edgeEndCounter;

        *maxIntWtListSize = 0;
    }

    thread_barrier_wait();

    i_edgeStartCounter = global_i_edgeStartCounter;
    i_edgeEndCounter = global_i_edgeEndCounter;

    i_edgeEndCounter[myId] = i_edgeCounter;
    i_edgeStartCounter[myId] = 0;

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    *maxIntWtListSize += i_edgeCounter;

    thread_barrier_wait();

    edge* maxIntWtList;

    if (myId == 0) {
        P_FREE(*maxIntWtListPtr);
        maxIntWtList = (edge*)P_MALLOC((*maxIntWtListSize) * sizeof(edge));
        assert(maxIntWtList);
        global_maxIntWtList = maxIntWtList;
    }

    thread_barrier_wait();

    maxIntWtList = global_maxIntWtList;

    for (i = i_edgeStartCounter[myId]; i<i_edgeEndCounter[myId]; i++) {
      (maxIntWtList[i]).startVertex = tmpEdgeList[i-i_edgeStartCounter[myId]].startVertex;
      (maxIntWtList[i]).endVertex = tmpEdgeList[i-i_edgeStartCounter[myId]].endVertex;
      (maxIntWtList[i]).edgeNum = tmpEdgeList[i-i_edgeStartCounter[myId]].edgeNum;
    }

    if (myId == 0) {
        *maxIntWtListPtr = maxIntWtList;
    }

    i_edgeCounter = 0;

    createPartition(0, GPtr->numStrEdges, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {

        if (strncmp(GPtr->strWeight+i*MAX_STRLEN,
                    SOUGHT_STRING,
                    MAX_STRLEN) == 0)
        {
            /*
             * Find the corresponding endVertex
             */

            unsigned long t;
            for (t = 0; t < GPtr->numEdges; t++) {
                if (GPtr->intWeight[t] == -i) {
                    break;
                }
            }

            unsigned long j;
            for (j = 0; j < GPtr->numDirectedEdges; j++) {
            if (GPtr->paralEdgeIndex[j] > t) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].endVertex = GPtr->outVertexList[j-1];
            tmpEdgeList[i_edgeCounter].edgeNum = j-1;

            for (t = 0; t < GPtr->numVertices; t++) {
                if (GPtr->outVertexIndex[t] > j-1) {
                    break;
                }
            }
            tmpEdgeList[i_edgeCounter].startVertex = t-1;
            i_edgeCounter++;
        }

    }

    thread_barrier_wait();

    i_edgeEndCounter[myId] = i_edgeCounter;
    i_edgeStartCounter[myId] = 0;

    if (myId == 0) {
        *soughtStrWtListSize = 0;
    }

    thread_barrier_wait();

    if (myId == 0) {
        for (i = 1; i < numThread; i++) {
            i_edgeEndCounter[i] = i_edgeEndCounter[i-1] + i_edgeEndCounter[i];
            i_edgeStartCounter[i] = i_edgeEndCounter[i-1];
        }
    }

    *soughtStrWtListSize += i_edgeCounter;

    thread_barrier_wait();

    edge* soughtStrWtList;

    if (myId == 0) {
        P_FREE(*soughtStrWtListPtr);
        soughtStrWtList = (edge*)P_MALLOC((*soughtStrWtListSize) * sizeof(edge));
        assert(soughtStrWtList);
        global_soughtStrWtList = soughtStrWtList;
    }

    thread_barrier_wait();

    soughtStrWtList = global_soughtStrWtList;

    for (i = i_edgeStartCounter[myId]; i < i_edgeEndCounter[myId]; i++) {
        (soughtStrWtList[i]).startVertex =
            tmpEdgeList[i-i_edgeStartCounter[myId]].startVertex;
        (soughtStrWtList[i]).endVertex =
            tmpEdgeList[i-i_edgeStartCounter[myId]].endVertex;
        (soughtStrWtList[i]).edgeNum =
            tmpEdgeList[i-i_edgeStartCounter[myId]].edgeNum;
    }

    thread_barrier_wait();

    if (myId == 0) {
        *soughtStrWtListPtr = soughtStrWtList;
        P_FREE(i_edgeStartCounter);
        P_FREE(i_edgeEndCounter);
    }

    P_FREE(tmpEdgeList);

    TM_THREAD_EXIT();
}
Exemple #19
0
void client_run (void* argPtr) {
    TM_THREAD_ENTER();

    /*long id = thread_getId();

    volatile long* ptr1 = &(global_array[0].value);
    volatile long* ptr2 = &(global_array[100].value);
    long tt = 0;
    if (id == 0) {
        while (1) {
            long v1 = 0;
            long v2 = 0;
            acquire_write(&(local_th_data[phys_id]), &the_lock);
            *ptr1 = (*ptr1) + 1;

            int f = 1;
            int ii;
            for(ii = 1; ii <= 100000000; ii++)
            {
                f *= ii;
            }
            tt += f;

            *ptr2 = (*ptr2) + 1;
            v1 = global_array[0].value;
            v2 = global_array[100].value;
            release_write(cluster_id, &(local_th_data[phys_id]), &the_lock); \
                if (v1 != v2) {
                    printf("different2! %ld %ld\n", v1, v2);
                    exit(1);
                }

        }
    } else {
        while (1) {
            int i = 0;
            long sum = 0;
            for (; i < 100000; i++) {
                int status = _xbegin();
                if (status == _XBEGIN_STARTED) {
                    sum += *ptr1;
                    sum += *ptr2;
                    _xend();
                }
            }
            while(1) {
                long v1 = 0;
                long v2 = 0;
                int status = _xbegin();
                if (status == _XBEGIN_STARTED) {
                    v1 = *ptr1;
                    v2 = *ptr2;
                    _xend();
                if (v1 != v2) {
                    printf("different! %ld %ld\n", v1, v2);
                    exit(1);
                }

                }
            }
        }
    }
    printf("%ld", tt);*/


    random_t* randomPtr = random_alloc();
    random_seed(randomPtr, time(0));

    // unsigned long myId = thread_getId();
    // long numThread = *((long*)argPtr);
    long operations = (long)global_params[PARAM_OPERATIONS] / (long)global_params[PARAM_THREADS];
    long interval = (long)global_params[PARAM_INTERVAL];
    printf("operations: %ld \tinterval: %ld\n", operations, interval);

    long total = 0;
    long total2 = 0;

    long i = 0;
    for (; i < operations; i++) {
        long random_number = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        long random_number2 = ((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE]);
        if (random_number == random_number2) {
            random_number2 = (random_number2 + 1) % ((long)global_params[PARAM_SIZE]);
        }
        TM_BEGIN();
        long r1 = (long)TM_SHARED_READ_L(global_array[random_number].value);
        long r2 = (long)TM_SHARED_READ_L(global_array[random_number2].value);

        int repeat = 0;
        for (; repeat < (long) global_params[PARAM_CONTENTION]; repeat++) {
        	total2 += (long) TM_SHARED_READ_L(global_array[((long) random_generate(randomPtr)) % ((long)global_params[PARAM_SIZE])].value);
        }
        r1 = r1 + 1;
        r2 = r2 - 1;

        int f = 1;
        int ii;
        for(ii = 1; ii <= ((unsigned int) global_params[PARAM_WORK]); ii++)
        {
            f *= ii;
        }
        total += f / 1000000;

        TM_SHARED_WRITE_L(global_array[random_number].value, r1);
        TM_SHARED_WRITE_L(global_array[random_number2].value, r2);
        TM_END();

        long k = 0;
        for (;k < (long)global_params[PARAM_INTERVAL]; k++) {
            long ru = ((long) random_generate(randomPtr)) % 2;
            total += ru;
        }

    }

    TM_THREAD_EXIT();
    printf("ru ignore %ld - %ld\n", total, total2);
}
Exemple #20
0
/* =============================================================================
 * work
 * =============================================================================
 */
static void
work (void* argPtr)
{
    
  TM_THREAD_ENTER();

    args_t* args = (args_t*)argPtr;
    float** feature         = args->feature;
    int     nfeatures       = args->nfeatures;
    int     npoints         = args->npoints;
    int     nclusters       = args->nclusters;
    int*    membership      = args->membership;
    float** clusters        = args->clusters;
    long long int**   new_centers_len = args->new_centers_len;
    float** new_centers     = args->new_centers;
    float delta = 0.0;
    int index;
    int i;
    int j;
    int start;
    int stop;
    int myId;
    bool indexx[1000];

    myId = thread_getId();

    start = myId * CHUNK;
    int cnt=0;
    while (start < npoints) {
      stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints);
        for (i = start; i < stop; i++) {

            index = common_findNearestPoint(feature[i],
                                            nfeatures,
                                            clusters,
                                            nclusters);
            /*
             * If membership changes, increase delta by 1.
             * membership[i] cannot be changed by other threads
             */
            if (membership[i] != index) {
                delta += 1.0;
            }

            /* Assign the membership to object i */
            /* membership[i] can't be changed by other thread */
            membership[i] = index;

            /* Update new cluster centers : sum of objects located within */
            TM_BEGIN();
	    //printf("shared write to begin: \n");
	    //	    int write = *new_centers_len[index];
	    //int* pt = new_centers_len[i];
	    //int dat = TM_SHARED_READ_I(*new_centers_len[i]);
	    //printf("in loop write centers lendata: %i %i\n", dat, *new_centers_len[i]);

	    
            TM_SHARED_WRITE_I(*new_centers_len[index],
                            TM_SHARED_READ_I(*new_centers_len[index]) + 1);
	    
	    //printf("befor loop len P: %p data: %i\n", new_centers_len[index], *new_centers_len[index]);
	    //new *new_centers_len[index] = *new_centers_len[index] + 1;
	    //printf("INDEX %i \n" , index);
	    indexx[index] = true;
	    /*if(*new_centers_len[index]==0)*/
	    //printf("in   lloop len P: %p data: %i\n", new_centers_len[index], *new_centers_len[index]);
         	    
	    //*new_centers_len[i] = *new_centers_len[i]+1;
	    //	    pt = new_centers_len[i];
	    //dat = TM_SHARED_READ_I(*new_centers_len[i]);
	    //printf("in loop write centers len data: %i\n", dat);
         
            for (j = 0; j < nfeatures; j++) {
	      //printf("featurs\n");
	      //int feat = feature[i][j];
	      //printf("write\n");
	      //float read = TM_SHARED_READ_F(new_centers[index][j]);
	       
	      //printf("write %p " ,write);
	      //printf("shared write to:\n");
	      //printf("feature %f", feature[i][j]);
	      //float feat = feature[i][j];
	      //float fl = (TM_SHARED_READ_F(new_centers[index][j])+ feat);//feature[i][j]);
	      //int len = *new_centers_len[index];
               
	      
	      TM_SHARED_WRITE_F(
				  //write,
				  new_centers[index][j],
				  //(read + feat)
				  //fl
				  (TM_SHARED_READ_F(new_centers[index][j])+ feature[i][j])
		    //printf("index      %p %p\n", (void*)*new_centers[index][j], (void*)(*new_centers[index][j] +1));
		    //printf("indexnon p %p %p\n", (void*)new_centers[index][j], (void*)(new_centers[index][j] +1));

		    );
	      //new new_centers[index][j] = new_centers[index][j] + feature[i][j];
		
		//if(0==*new_centers_len[index]) printf("ISNAN %i\n", len);
		//if(isnanf(new_centers[index][j])) printf("ISNAN2\n\n");

		//		if(isinf(*new_centers_len[index])) printf("ISINF\n\n");
		//if(isinf(*new_centers_len[index])) printf("ISINF2\n\n");
            }
            TM_END();
        }
	//printf("update \n");
        /* Update task queue */
	if (start + CHUNK < npoints) {
	  TM_BEGIN();
	  start = (int)TM_SHARED_READ_L(*global_i);
	  TM_SHARED_WRITE_L(*global_i, (long)(start + CHUNK));
	  TM_END();
        } else {
            break;
        }
    }

    TM_BEGIN();
    //printf("shared write to: %p", *global_delta);
    TM_SHARED_WRITE_F(*global_delta, TM_SHARED_READ_F(*global_delta) + delta);
    //new *global_delta = *global_delta + delta;
    TM_END();
    int u1 =0;
    /* for(int i1=0; i1<1000; i1++){
      if(indexx[i1]) printf("INDEX %i %i\n", i1, u1++);
    }*/
    TM_THREAD_EXIT();
}
Exemple #21
0
/* =============================================================================
 * sequencer_run
 * =============================================================================
 */
void
sequencer_run (void* argPtr)
{
    TM_THREAD_ENTER();

    long threadId = thread_getId();

    sequencer_t* sequencerPtr = (sequencer_t*)argPtr;

    hashtable_t*      uniqueSegmentsPtr;
    endInfoEntry_t*   endInfoEntries;
    table_t**         startHashToConstructEntryTables;
    constructEntry_t* constructEntries;
    table_t*          hashToConstructEntryTable;

    uniqueSegmentsPtr               = sequencerPtr->uniqueSegmentsPtr;
    endInfoEntries                  = sequencerPtr->endInfoEntries;
    startHashToConstructEntryTables = sequencerPtr->startHashToConstructEntryTables;
    constructEntries                = sequencerPtr->constructEntries;
    hashToConstructEntryTable       = sequencerPtr->hashToConstructEntryTable;

    segments_t* segmentsPtr         = sequencerPtr->segmentsPtr;
    assert(segmentsPtr);
    vector_t*   segmentsContentsPtr = segmentsPtr->contentsPtr;
    long        numSegment          = vector_getSize(segmentsContentsPtr);
    long        segmentLength       = segmentsPtr->length;

    long i;
    long j;
    long i_start;
    long i_stop;
    long numUniqueSegment;
    long substringLength;
    long entryIndex;

    /*
     * Step 1: Remove duplicate segments
     */
#if defined(HTM) || defined(STM)
    long numThread = thread_getNumThread();
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long partitionSize = (numSegment + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = numSegment;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
#else /* !(HTM || STM) */
    i_start = 0;
    i_stop = numSegment;
#endif /* !(HTM || STM) */
    for (i = i_start; i < i_stop; i+=CHUNK_STEP1) {
        TM_BEGIN();
        {
            long ii;
            long ii_stop = MIN(i_stop, (i+CHUNK_STEP1));
            for (ii = i; ii < ii_stop; ii++) {
                void* segment = vector_at(segmentsContentsPtr, ii);
                TMHASHTABLE_INSERT(uniqueSegmentsPtr,
                                   segment,
                                   segment);
            } /* ii */
        }
        TM_END();
    }

    thread_barrier_wait();

    /*
     * Step 2a: Iterate over unique segments and compute hashes.
     *
     * For the gene "atcg", the hashes for the end would be:
     *
     *     "t", "tc", and "tcg"
     *
     * And for the gene "tcgg", the hashes for the start would be:
     *
     *    "t", "tc", and "tcg"
     *
     * The names are "end" and "start" because if a matching pair is found,
     * they are the substring of the end part of the pair and the start
     * part of the pair respectively. In the above example, "tcg" is the
     * matching substring so:
     *
     *     (end)    (start)
     *     a[tcg] + [tcg]g  = a[tcg]g    (overlap = "tcg")
     */

    /* uniqueSegmentsPtr is constant now */
    numUniqueSegment = hashtable_getSize(uniqueSegmentsPtr);
    entryIndex = 0;

#if defined(HTM) || defined(STM)
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long num = uniqueSegmentsPtr->numBucket;
        long partitionSize = (num + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = num;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
    {
        /* Approximate disjoint segments of element allocation in constructEntries */
        long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
        entryIndex = threadId * partitionSize;
    }
#else /* !(HTM || STM) */
    i_start = 0;
    i_stop = uniqueSegmentsPtr->numBucket;
    entryIndex = 0;
#endif /* !(HTM || STM) */

    for (i = i_start; i < i_stop; i++) {

        list_t* chainPtr = uniqueSegmentsPtr->buckets[i];
        list_iter_t it;
        list_iter_reset(&it, chainPtr);

        while (list_iter_hasNext(&it, chainPtr)) {

            char* segment =
                (char*)((pair_t*)list_iter_next(&it, chainPtr))->firstPtr;
            constructEntry_t* constructEntryPtr;
            long j;
            unsigned long startHash;
            bool status;

            /* Find an empty constructEntries entry */
            TM_BEGIN();
            while (((void*)TM_SHARED_READ_P(constructEntries[entryIndex].segment)) != NULL) {
                entryIndex = (entryIndex + 1) % numUniqueSegment; /* look for empty */
            }
            constructEntryPtr = &constructEntries[entryIndex];
            TM_SHARED_WRITE_P(constructEntryPtr->segment, segment);
            TM_END();
            entryIndex = (entryIndex + 1) % numUniqueSegment;

            /*
             * Save hashes (sdbm algorithm) of segment substrings
             *
             * endHashes will be computed for shorter substrings after matches
             * have been made (in the next phase of the code). This will reduce
             * the number of substrings for which hashes need to be computed.
             *
             * Since we can compute startHashes incrementally, we go ahead
             * and compute all of them here.
             */
            /* constructEntryPtr is local now */
            constructEntryPtr->endHash = hashString(&segment[1]);

            startHash = 0;
            for (j = 1; j < segmentLength; j++) {
                startHash = (unsigned long)segment[j-1] +
                            (startHash << 6) + (startHash << 16) - startHash;
                TM_BEGIN();
                status = TMTABLE_INSERT(startHashToConstructEntryTables[j],
                                        (unsigned long)startHash,
                                        (void*)constructEntryPtr );
                TM_END();
                assert(status);
            }

            /*
             * For looking up construct entries quickly
             */
            startHash = (unsigned long)segment[j-1] +
                        (startHash << 6) + (startHash << 16) - startHash;
            TM_BEGIN();
            status = TMTABLE_INSERT(hashToConstructEntryTable,
                                    (unsigned long)startHash,
                                    (void*)constructEntryPtr);
            TM_END();
            assert(status);
        }
    }

    thread_barrier_wait();

    /*
     * Step 2b: Match ends to starts by using hash-based string comparison.
     */
    for (substringLength = segmentLength-1; substringLength > 0; substringLength--) {

        table_t* startHashToConstructEntryTablePtr =
            startHashToConstructEntryTables[substringLength];
        list_t** buckets = startHashToConstructEntryTablePtr->buckets;
        long numBucket = startHashToConstructEntryTablePtr->numBucket;

        long index_start;
        long index_stop;

#if defined(HTM) || defined(STM)
        {
            /* Choose disjoint segments [index_start,index_stop) for each thread */
            long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
            index_start = threadId * partitionSize;
            if (threadId == (numThread - 1)) {
                index_stop = numUniqueSegment;
            } else {
                index_stop = index_start + partitionSize;
            }
        }
#else /* !(HTM || STM) */
        index_start = 0;
        index_stop = numUniqueSegment;
#endif /* !(HTM || STM) */

        /* Iterating over disjoint itervals in the range [0, numUniqueSegment) */
        for (entryIndex = index_start;
             entryIndex < index_stop;
             entryIndex += endInfoEntries[entryIndex].jumpToNext)
        {
            if (!endInfoEntries[entryIndex].isEnd) {
                continue;
            }

            /*  ConstructEntries[entryIndex] is local data */
            constructEntry_t* endConstructEntryPtr =
                &constructEntries[entryIndex];
            char* endSegment = endConstructEntryPtr->segment;
            unsigned long endHash = endConstructEntryPtr->endHash;

            list_t* chainPtr = buckets[endHash % numBucket]; /* buckets: constant data */
            list_iter_t it;
            list_iter_reset(&it, chainPtr);

            /* Linked list at chainPtr is constant */
            while (list_iter_hasNext(&it, chainPtr)) {

                constructEntry_t* startConstructEntryPtr =
                    (constructEntry_t*)list_iter_next(&it, chainPtr);
                char* startSegment = startConstructEntryPtr->segment;
                long newLength = 0;

                /* endConstructEntryPtr is local except for properties startPtr/endPtr/length */
                TM_BEGIN();

                /* Check if matches */
                if (TM_SHARED_READ_L(startConstructEntryPtr->isStart) &&
                    (TM_SHARED_READ_P(endConstructEntryPtr->startPtr) != startConstructEntryPtr) &&
                    (strncmp(startSegment,
                             &endSegment[segmentLength - substringLength],
                             substringLength) == 0))
                {
                    TM_SHARED_WRITE_L(startConstructEntryPtr->isStart, false);

                    constructEntry_t* startConstructEntry_endPtr;
                    constructEntry_t* endConstructEntry_startPtr;

                    /* Update endInfo (appended something so no longer end) */
                    TM_LOCAL_WRITE_L(endInfoEntries[entryIndex].isEnd, false);

                    /* Update segment chain construct info */
                    startConstructEntry_endPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(startConstructEntryPtr->endPtr);
                    endConstructEntry_startPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(endConstructEntryPtr->startPtr);

                    assert(startConstructEntry_endPtr);
                    assert(endConstructEntry_startPtr);
                    TM_SHARED_WRITE_P(startConstructEntry_endPtr->startPtr,
                                      endConstructEntry_startPtr);
                    TM_LOCAL_WRITE_P(endConstructEntryPtr->nextPtr,
                                     startConstructEntryPtr);
                    TM_SHARED_WRITE_P(endConstructEntry_startPtr->endPtr,
                                      startConstructEntry_endPtr);
                    TM_SHARED_WRITE_L(endConstructEntryPtr->overlap, substringLength);
                    newLength = (long)TM_SHARED_READ_L(endConstructEntry_startPtr->length) +
                                (long)TM_SHARED_READ_L(startConstructEntryPtr->length) -
                                substringLength;
                    TM_SHARED_WRITE_L(endConstructEntry_startPtr->length, newLength);
                } /* if (matched) */

                TM_END();

                if (!endInfoEntries[entryIndex].isEnd) { /* if there was a match */
                    break;
                }
            } /* iterate over chain */

        } /* for (endIndex < numUniqueSegment) */

        thread_barrier_wait();

        /*
         * Step 2c: Update jump values and hashes
         *
         * endHash entries of all remaining ends are updated to the next
         * substringLength. Additionally jumpToNext entries are updated such
         * that they allow to skip non-end entries. Currently this is sequential
         * because parallelization did not perform better.
.        */

        if (threadId == 0) {
            if (substringLength > 1) {
                long index = segmentLength - substringLength + 1;
                /* initialization if j and i: with i being the next end after j=0 */
                for (i = 1; !endInfoEntries[i].isEnd; i+=endInfoEntries[i].jumpToNext) {
                    /* find first non-null */
                }
                /* entry 0 is handled seperately from the loop below */
                endInfoEntries[0].jumpToNext = i;
                if (endInfoEntries[0].isEnd) {
                    constructEntry_t* constructEntryPtr = &constructEntries[0];
                    char* segment = constructEntryPtr->segment;
                    constructEntryPtr->endHash = hashString(&segment[index]);
                }
                /* Continue scanning (do not reset i) */
                for (j = 0; i < numUniqueSegment; i+=endInfoEntries[i].jumpToNext) {
                    if (endInfoEntries[i].isEnd) {
                        constructEntry_t* constructEntryPtr = &constructEntries[i];
                        char* segment = constructEntryPtr->segment;
                        constructEntryPtr->endHash = hashString(&segment[index]);
                        endInfoEntries[j].jumpToNext = MAX(1, (i - j));
                        j = i;
                    }
                }
                endInfoEntries[j].jumpToNext = i - j;
            }
        }

        thread_barrier_wait();

    } /* for (substringLength > 0) */


    thread_barrier_wait();

    /*
     * Step 3: Build sequence string
     */
    if (threadId == 0) {

        long totalLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            if (constructEntryPtr->isStart) {
              totalLength += constructEntryPtr->length;
            }
        }

        sequencerPtr->sequence = (char*)P_MALLOC((totalLength+1) * sizeof(char));
        char* sequence = sequencerPtr->sequence;
        assert(sequence);

        char* copyPtr = sequence;
        long sequenceLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            /* If there are several start segments, we append in arbitrary order  */
            if (constructEntryPtr->isStart) {
                long newSequenceLength = sequenceLength + constructEntryPtr->length;
                assert( newSequenceLength <= totalLength );
                copyPtr = sequence + sequenceLength;
                sequenceLength = newSequenceLength;
                do {
                    long numChar = segmentLength - constructEntryPtr->overlap;
                    if ((copyPtr + numChar) > (sequence + newSequenceLength)) {
                        TM_PRINT0("ERROR: sequence length != actual length\n");
                        break;
                    }
                    memcpy(copyPtr,
                           constructEntryPtr->segment,
                           (numChar * sizeof(char)));
                    copyPtr += numChar;
                } while ((constructEntryPtr = constructEntryPtr->nextPtr) != NULL);
                assert(copyPtr <= (sequence + sequenceLength));
            }
        }

        assert(sequence != NULL);
        sequence[sequenceLength] = '\0';
    }

    TM_THREAD_EXIT();
}
Exemple #22
0
/* =============================================================================
 * TMelement_isGarbage
 * -- Can we deallocate?
 * =============================================================================
 */
bool
TMelement_isGarbage (TM_ARGDECL  element_t* elementPtr)
{
    return TM_SHARED_READ_L(elementPtr->isGarbage);
}
Exemple #23
0
/* =============================================================================
 * TMelement_isReferenced
 * -- Held by another data structure?
 * =============================================================================
 */
bool
TMelement_isReferenced (TM_ARGDECL  element_t* elementPtr)
{
    return (bool)TM_SHARED_READ_L(elementPtr->isReferenced);
}
Exemple #24
0
/* =============================================================================
 * computeGraph
 * =============================================================================
 */
void
computeGraph (void* argPtr)
{
    TM_THREAD_ENTER();

    graph*    GPtr       = ((computeGraph_arg_t*)argPtr)->GPtr;
    graphSDG* SDGdataPtr = ((computeGraph_arg_t*)argPtr)->SDGdataPtr;

    long myId = thread_getId();
    long numThread = thread_getNumThread();

    ULONGINT_T j;
    ULONGINT_T maxNumVertices = 0;
    ULONGINT_T numEdgesPlaced = SDGdataPtr->numEdgesPlaced;

    /*
     * First determine the number of vertices by scanning the tuple
     * startVertex list
     */

    long i;
    long i_start;
    long i_stop;
    createPartition(0, numEdgesPlaced, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        if (SDGdataPtr->startVertex[i] > maxNumVertices) {
            maxNumVertices = SDGdataPtr->startVertex[i];
        }
    }

    TM_BEGIN();
    long tmp_maxNumVertices = (long)TM_SHARED_READ_L(global_maxNumVertices);
    long new_maxNumVertices = MAX(tmp_maxNumVertices, maxNumVertices) + 1;
    TM_SHARED_WRITE_L(global_maxNumVertices, new_maxNumVertices);
    TM_END();

    thread_barrier_wait();

    maxNumVertices = global_maxNumVertices;

    if (myId == 0) {

        GPtr->numVertices = maxNumVertices;
        GPtr->numEdges    = numEdgesPlaced;
        GPtr->intWeight   = SDGdataPtr->intWeight;
        GPtr->strWeight   = SDGdataPtr->strWeight;

        for (i = 0; i < numEdgesPlaced; i++) {
            if (GPtr->intWeight[numEdgesPlaced-i-1] < 0) {
                GPtr->numStrEdges = -(GPtr->intWeight[numEdgesPlaced-i-1]) + 1;
                GPtr->numIntEdges = numEdgesPlaced - GPtr->numStrEdges;
                break;
            }
        }

        GPtr->outDegree =
            (LONGINT_T*)P_MALLOC((GPtr->numVertices) * sizeof(LONGINT_T));
        assert(GPtr->outDegree);

        GPtr->outVertexIndex =
            (ULONGINT_T*)P_MALLOC((GPtr->numVertices) * sizeof(ULONGINT_T));
        assert(GPtr->outVertexIndex);
    }

    thread_barrier_wait();

    createPartition(0, GPtr->numVertices, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        GPtr->outDegree[i] = 0;
        GPtr->outVertexIndex[i] = 0;
    }

    ULONGINT_T outVertexListSize = 0;

    thread_barrier_wait();

    ULONGINT_T i0 = -1UL;

    for (i = i_start; i < i_stop; i++) {

        ULONGINT_T k = i;
        if ((outVertexListSize == 0) && (k != 0)) {
            while (i0 == -1UL) {
                for (j = 0; j < numEdgesPlaced; j++) {
                    if (k == SDGdataPtr->startVertex[j]) {
                        i0 = j;
                        break;
                    }

                }
                k--;
            }
        }

        if ((outVertexListSize == 0) && (k == 0)) {
            i0 = 0;
        }

        for (j = i0; j < numEdgesPlaced; j++) {
            if (i == GPtr->numVertices-1) {
                break;
            }
            if ((i != SDGdataPtr->startVertex[j])) {
                if ((j > 0) && (i == SDGdataPtr->startVertex[j-1])) {
                    if (j-i0 >= 1) {
                        outVertexListSize++;
                        GPtr->outDegree[i]++;
                        ULONGINT_T t;
                        for (t = i0+1; t < j; t++) {
                            if (SDGdataPtr->endVertex[t] !=
                                SDGdataPtr->endVertex[t-1])
                            {
                                outVertexListSize++;
                                GPtr->outDegree[i] = GPtr->outDegree[i]+1;
                            }
                        }
                    }
                }
                i0 = j;
                break;
            }
        }

        if (i == GPtr->numVertices-1) {
            if (numEdgesPlaced-i0 >= 0) {
                outVertexListSize++;
                GPtr->outDegree[i]++;
                ULONGINT_T t;
                for (t = i0+1; t < numEdgesPlaced; t++) {
                    if (SDGdataPtr->endVertex[t] != SDGdataPtr->endVertex[t-1]) {
                        outVertexListSize++;
                        GPtr->outDegree[i]++;
                    }
                }
            }
        }

    } /* for i */

    thread_barrier_wait();

    prefix_sums(GPtr->outVertexIndex, GPtr->outDegree, GPtr->numVertices);

    thread_barrier_wait();

    TM_BEGIN();
    TM_SHARED_WRITE_L(
        global_outVertexListSize,
        ((long)TM_SHARED_READ_L(global_outVertexListSize) + outVertexListSize)
    );
    TM_END();

    thread_barrier_wait();

    outVertexListSize = global_outVertexListSize;

    if (myId == 0) {
        GPtr->numDirectedEdges = outVertexListSize;
        GPtr->outVertexList =
            (ULONGINT_T*)P_MALLOC(outVertexListSize * sizeof(ULONGINT_T));
        assert(GPtr->outVertexList);
        GPtr->paralEdgeIndex =
            (ULONGINT_T*)P_MALLOC(outVertexListSize * sizeof(ULONGINT_T));
        assert(GPtr->paralEdgeIndex);
        GPtr->outVertexList[0] = SDGdataPtr->endVertex[0];
    }

    thread_barrier_wait();

    /*
     * Evaluate outVertexList
     */

    i0 = -1UL;

    for (i = i_start; i < i_stop; i++) {

        ULONGINT_T k = i;
        while ((i0 == -1UL) && (k != 0)) {
            for (j = 0; j < numEdgesPlaced; j++) {
                if (k == SDGdataPtr->startVertex[j]) {
                    i0 = j;
                    break;
                }
            }
            k--;
        }

        if ((i0 == -1) && (k == 0)) {
            i0 = 0;
        }

        for (j = i0; j < numEdgesPlaced; j++) {
            if (i == GPtr->numVertices-1) {
                break;
            }
            if (i != SDGdataPtr->startVertex[j]) {
                if ((j > 0) && (i == SDGdataPtr->startVertex[j-1])) {
                    if (j-i0 >= 1) {
                        long ii = GPtr->outVertexIndex[i];
                        ULONGINT_T r = 0;
                        GPtr->paralEdgeIndex[ii] = i0;
                        GPtr->outVertexList[ii] = SDGdataPtr->endVertex[i0];
                        r++;
                        ULONGINT_T t;
                        for (t = i0+1; t < j; t++) {
                            if (SDGdataPtr->endVertex[t] !=
                                SDGdataPtr->endVertex[t-1])
                            {
                                GPtr->paralEdgeIndex[ii+r] = t;
                                GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[t];
                                r++;
                            }
                        }

                    }
                }
                i0 = j;
                break;
            }
        } /* for j */

        if (i == GPtr->numVertices-1) {
            ULONGINT_T r = 0;
            if (numEdgesPlaced-i0 >= 0) {
                long ii = GPtr->outVertexIndex[i];
                GPtr->paralEdgeIndex[ii+r] = i0;
                GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[i0];
                r++;
                ULONGINT_T t;
                for (t = i0+1; t < numEdgesPlaced; t++) {
                    if (SDGdataPtr->endVertex[t] != SDGdataPtr->endVertex[t-1]) {
                        GPtr->paralEdgeIndex[ii+r] = t;
                        GPtr->outVertexList[ii+r] = SDGdataPtr->endVertex[t];
                        r++;
                    }
                }
            }
        }

    } /* for i */

    thread_barrier_wait();

    if (myId == 0) {
        P_FREE(SDGdataPtr->startVertex);
        P_FREE(SDGdataPtr->endVertex);
        GPtr->inDegree =
            (LONGINT_T*)P_MALLOC(GPtr->numVertices * sizeof(LONGINT_T));
        assert(GPtr->inDegree);
        GPtr->inVertexIndex =
            (ULONGINT_T*)P_MALLOC(GPtr->numVertices * sizeof(ULONGINT_T));
        assert(GPtr->inVertexIndex);
    }

    thread_barrier_wait();

    for (i = i_start; i < i_stop; i++) {
        GPtr->inDegree[i] = 0;
        GPtr->inVertexIndex[i] = 0;
    }

    /* A temp. array to store the inplied edges */
    ULONGINT_T* impliedEdgeList;
    if (myId == 0) {
        impliedEdgeList = (ULONGINT_T*)P_MALLOC(GPtr->numVertices
                                                * MAX_CLUSTER_SIZE
                                                * sizeof(ULONGINT_T));
        global_impliedEdgeList = impliedEdgeList;
    }

    thread_barrier_wait();

    impliedEdgeList = global_impliedEdgeList;

    createPartition(0,
                    (GPtr->numVertices * MAX_CLUSTER_SIZE),
                    myId,
                    numThread,
                    &i_start,
                    &i_stop);

    for (i = i_start; i < i_stop; i++) {
        impliedEdgeList[i] = 0;
    }

    /*
     * An auxiliary array to store implied edges, in case we overshoot
     * MAX_CLUSTER_SIZE
     */

    ULONGINT_T** auxArr;
    if (myId == 0) {
        auxArr = (ULONGINT_T**)P_MALLOC(GPtr->numVertices * sizeof(ULONGINT_T*));
        assert(auxArr);
        global_auxArr = auxArr;
    }

    thread_barrier_wait();

    auxArr = global_auxArr;

    createPartition(0, GPtr->numVertices, myId, numThread, &i_start, &i_stop);

    for (i = i_start; i < i_stop; i++) {
        /* Inspect adjacency list of vertex i */
        for (j = GPtr->outVertexIndex[i];
             j < (GPtr->outVertexIndex[i] + GPtr->outDegree[i]);
             j++)
        {
            ULONGINT_T v = GPtr->outVertexList[j];
            ULONGINT_T k;
            for (k = GPtr->outVertexIndex[v];
                 k < (GPtr->outVertexIndex[v] + GPtr->outDegree[v]);
                 k++)
            {
                if (GPtr->outVertexList[k] == i) {
                    break;
                }
            }
            if (k == GPtr->outVertexIndex[v]+GPtr->outDegree[v]) {
                TM_BEGIN();
                /* Add i to the impliedEdgeList of v */
                long inDegree = (long)TM_SHARED_READ_L(GPtr->inDegree[v]);
                TM_SHARED_WRITE_L(GPtr->inDegree[v], (inDegree + 1));
                if (inDegree < MAX_CLUSTER_SIZE) {
                    TM_SHARED_WRITE_L(impliedEdgeList[v*MAX_CLUSTER_SIZE+inDegree],
                                    i);
                } else {
                    /* Use auxiliary array to store the implied edge */
                    /* Create an array if it's not present already */
                    ULONGINT_T* a = NULL;
                    if ((inDegree % MAX_CLUSTER_SIZE) == 0) {
                        a = (ULONGINT_T*)TM_MALLOC(MAX_CLUSTER_SIZE
                                                   * sizeof(ULONGINT_T));
                        assert(a);
                        TM_SHARED_WRITE_P(auxArr[v], a);
                    } else {
                        a = auxArr[v];
                    }
                    TM_SHARED_WRITE_L(a[inDegree % MAX_CLUSTER_SIZE], i);
                }
                TM_END();
            }
        }
    } /* for i */

    thread_barrier_wait();

    prefix_sums(GPtr->inVertexIndex, GPtr->inDegree, GPtr->numVertices);

    if (myId == 0) {
        GPtr->numUndirectedEdges = GPtr->inVertexIndex[GPtr->numVertices-1]
                                   + GPtr->inDegree[GPtr->numVertices-1];
        GPtr->inVertexList =
            (ULONGINT_T *)P_MALLOC(GPtr->numUndirectedEdges * sizeof(ULONGINT_T));
    }

    thread_barrier_wait();

    /*
     * Create the inVertex List
     */

    for (i = i_start; i < i_stop; i++) {
        for (j = GPtr->inVertexIndex[i];
             j < (GPtr->inVertexIndex[i] + GPtr->inDegree[i]);
             j++)
        {
            if ((j - GPtr->inVertexIndex[i]) < MAX_CLUSTER_SIZE) {
                GPtr->inVertexList[j] =
                    impliedEdgeList[i*MAX_CLUSTER_SIZE+j-GPtr->inVertexIndex[i]];
            } else {
                GPtr->inVertexList[j] =
                    auxArr[i][(j-GPtr->inVertexIndex[i]) % MAX_CLUSTER_SIZE];
            }
        }
    }

    thread_barrier_wait();

    if (myId == 0) {
        P_FREE(impliedEdgeList);
    }

    for (i = i_start; i < i_stop; i++) {
        if (GPtr->inDegree[i] > MAX_CLUSTER_SIZE) {
            P_FREE(auxArr[i]);
        }
    }

    thread_barrier_wait();

    if (myId == 0) {
        P_FREE(auxArr);
    }

    TM_THREAD_EXIT();
}
Exemple #25
0
/* =============================================================================
 * process
 * =============================================================================
 */
void
process (void*)
{
    TM_THREAD_ENTER();

    heap_t* workHeapPtr = global_workHeapPtr;
    mesh_t* meshPtr = global_meshPtr;
    region_t* regionPtr;
    long totalNumAdded = 0;
    long numProcess = 0;

    regionPtr = PREGION_ALLOC();
    assert(regionPtr);

    while (1) {

        element_t* elementPtr;

        TM_BEGIN();
        elementPtr = (element_t*)TMHEAP_REMOVE(workHeapPtr);
        TM_END();
        if (elementPtr == NULL) {
            break;
        }

        bool isGarbage;
        TM_BEGIN();
        isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        TM_END();
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            PELEMENT_FREE(elementPtr);
            continue;
        }

        long numAdded;

        TM_BEGIN();
        PREGION_CLEARBAD(regionPtr);
        numAdded = TMREGION_REFINE(regionPtr, elementPtr, meshPtr);
        TM_END();

        TM_BEGIN();
        TMELEMENT_SETISREFERENCED(elementPtr, false);
        isGarbage = TMELEMENT_ISGARBAGE(elementPtr);
        TM_END();
        if (isGarbage) {
            /*
             * Handle delayed deallocation
             */
            PELEMENT_FREE(elementPtr);
        }

        totalNumAdded += numAdded;

        TM_BEGIN();
        TMREGION_TRANSFERBAD(regionPtr, workHeapPtr);
        TM_END();

        numProcess++;

    }

    TM_BEGIN();
    TM_SHARED_WRITE_L(global_totalNumAdded,
                    TM_SHARED_READ_L(global_totalNumAdded) + totalNumAdded);
    TM_SHARED_WRITE_L(global_numProcess,
                    TM_SHARED_READ_L(global_numProcess) + numProcess);
    TM_END();

    PREGION_FREE(regionPtr);
    TM_THREAD_EXIT();
}
Exemple #26
0
/* =============================================================================
 * work
 * =============================================================================
 */
static void
work (void* argPtr)
{
    TM_THREAD_ENTER();

    args_t* args = (args_t*)argPtr;
    float** feature         = args->feature;
    int     nfeatures       = args->nfeatures;
    int     npoints         = args->npoints;
    int     nclusters       = args->nclusters;
    int*    membership      = args->membership;
    float** clusters        = args->clusters;
    int**   new_centers_len = args->new_centers_len;
    float** new_centers     = args->new_centers;
    float delta = 0.0;
    int index;
    long i;
    int j;
    int start;
    int stop;
    int myId;

    myId = thread_getId();

    start = myId * CHUNK;

    while (start < npoints) {
        stop = (((start + CHUNK) < npoints) ? (start + CHUNK) : npoints);
        for (i = start; i < stop; TMHT_LOCAL_WRITE(i, i+1)) {

            index = common_findNearestPoint(feature[i],
                                            nfeatures,
                                            clusters,
                                            nclusters);
            /*
             * If membership changes, increase delta by 1.
             * membership[i] cannot be changed by other threads
             */
            if (membership[i] != index) {
                delta += 1.0;
            }

            /* Assign the membership to object i */
            /* membership[i] can't be changed by other thread */
            membership[i] = index;

            /* Update new cluster centers : sum of objects located within */
            TM_BEGIN();
            TM_SHARED_WRITE_I(*new_centers_len[index],
                              TM_SHARED_READ_I(*new_centers_len[index]) + 1);
            for (j = 0; j < nfeatures; j++) {
                TM_SHARED_WRITE_F(
                    new_centers[index][j],
                    (TM_SHARED_READ_F(new_centers[index][j]) + feature[i][j])
                );
            }
            TM_END();
        }

        /* Update task queue */
        if (start + CHUNK < npoints) {
            TM_BEGIN();
            start = (int)TM_SHARED_READ_L(global_i);
            TM_SHARED_WRITE_L(global_i, (long)(start + CHUNK));
            TM_END();
        } else {
            break;
        }
    }

    TM_BEGIN();
    TM_SHARED_WRITE_F(global_delta, TM_SHARED_READ_F(global_delta) + delta);
    TM_END();

    TM_THREAD_EXIT();
}
Exemple #27
0
/* =============================================================================
 * TMlist_getSize
 * -- Returns the size of the list
 * =============================================================================
 */
long
TMlist_getSize (TM_ARGDECL  list_t* listPtr)
{
    return (long)TM_SHARED_READ_L(listPtr->size);
}