Exemple #1
0
long
TMelement_compare (TM_ARGDECL element_t* aElementPtr, element_t* bElementPtr)
{
    element_t *aEP = NULL, *bEP = NULL;
    aEP = TM_SHARED_READ_P(aElementPtr);
    bEP = TM_SHARED_READ_P(bElementPtr);
    long aNumCoordinate = TM_SHARED_READ_L(aEP->numCoordinate);
    long bNumCoordinate = TM_SHARED_READ_L(bEP->numCoordinate);
    coordinate_t* aCoordinates = aEP->coordinates;
    coordinate_t* bCoordinates = bEP->coordinates;

    if (aNumCoordinate < bNumCoordinate) {
        return -1;
    } else if (aNumCoordinate > bNumCoordinate) {
        return 1;
    }

    long i;
    for (i = 0; i < aNumCoordinate; i++) {
        long compareCoordinate =
            TMcoordinate_compare(TM_ARG &aCoordinates[i], &bCoordinates[i]);
        if (compareCoordinate != 0) {
            return compareCoordinate;
        }
    }

    return 0;
}
Exemple #2
0
/* =============================================================================
 * TMheap_insert
 * -- Returns false on failure
 * =============================================================================
 */
bool
TMheap_insert (TM_ARGDECL  heap_t* heapPtr, void* dataPtr)
{
    long size = (long)TM_SHARED_READ_L(heapPtr->size);
    long capacity = (long)TM_SHARED_READ_L(heapPtr->capacity);
    if ((size + 1) >= capacity) {
        long newCapacity = capacity * 2;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return false;
        }
        TM_SHARED_WRITE_L(heapPtr->capacity, newCapacity);
        long i;
        void** elements = TM_SHARED_READ_P(heapPtr->elements);
        for (i = 0; i <= size; i++) {
            newElements[i] = (void*)TM_SHARED_READ_P(elements[i]);
        }
        TM_FREE(elements);
        TM_SHARED_WRITE_P(heapPtr->elements, newElements);
    }

    size++;
    TM_SHARED_WRITE_L(heapPtr->size, size);
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    TM_SHARED_WRITE_P(elements[size], dataPtr);
    TMsiftUp(TM_ARG heapPtr, size);

    return true;
}
Exemple #3
0
TM_SAFE
bool_t
TMheap_insert (  heap_t* heapPtr, void* dataPtr)
{
    long size = (long)TM_SHARED_READ(heapPtr->size);
    long capacity = (long)TM_SHARED_READ(heapPtr->capacity);

    if ((size + 1) >= capacity) {
        long newCapacity = capacity * 2;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return FALSE;
        }
        TM_SHARED_WRITE(heapPtr->capacity, newCapacity);
        long i;
        void** elements = (void **)TM_SHARED_READ_P(heapPtr->elements);
        for (i = 0; i <= size; i++) {
            newElements[i] = (void*)TM_SHARED_READ_P(elements[i]);
        }
        free(heapPtr->elements);
        TM_SHARED_WRITE_P(heapPtr->elements, newElements);
    }

    size++;
    TM_SHARED_WRITE(heapPtr->size, size);
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    TM_SHARED_WRITE_P(elements[size], dataPtr);
    siftUp(heapPtr, size);

    return TRUE;
}
Exemple #4
0
/* =============================================================================
 * TMlist_insert
 * -- Return TRUE on success, else FALSE
 * =============================================================================
 */
bool_t
TMlist_insert (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;
    list_node_t* currPtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);
    currPtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);

#ifdef LIST_NO_DUPLICATES
    if ((currPtr != NULL) &&
        listPtr->comparator->compare_tm(TM_ARG TM_SHARED_READ_P(currPtr->dataPtr), dataPtr) == 0) {
        return FALSE;
    }
#endif

    nodePtr = TMallocNode(TM_ARG  dataPtr);
    if (nodePtr == NULL) {
        return FALSE;
    }

    TM_SHARED_WRITE_P(nodePtr->nextPtr, currPtr);
    TM_SHARED_WRITE_P(prevPtr->nextPtr, nodePtr);
    TM_SHARED_WRITE_L(listPtr->size, (TM_SHARED_READ_L(listPtr->size) + 1));

    return TRUE;
}
Exemple #5
0
/* =============================================================================
 * TMlist_iter_next
 * =============================================================================
 */
void*
TMlist_iter_next (TM_ARGDECL  list_iter_t* itPtr, list_t* listPtr)
{
    list_iter_t next = (list_iter_t)TM_SHARED_READ_P((*itPtr)->nextPtr);
    TM_LOCAL_WRITE_P(*itPtr, next);

    return TM_SHARED_READ_P(next->dataPtr);
}
Exemple #6
0
static long
TMcompareEdge (TM_ARGDECL edge_t* aEdgePtr, edge_t* bEdgePtr)
{
  long diffFirst = TMcoordinate_compare(TM_ARG (coordinate_t*)TM_SHARED_READ_P(aEdgePtr->firstPtr),
                                        (coordinate_t*)TM_SHARED_READ_P(bEdgePtr->firstPtr));

    return ((diffFirst != 0) ?
            (diffFirst) :
            (TMcoordinate_compare(TM_ARG (coordinate_t*)aEdgePtr->secondPtr,
                                (coordinate_t*)bEdgePtr->secondPtr)));
}
Exemple #7
0
/* =============================================================================
 * TMqueue_push
 * =============================================================================
 */
bool_t
TMqueue_push (TM_ARGDECL  queue_t* queuePtr, void* dataPtr)
{
    long pop      = (long)TM_SHARED_READ(queuePtr->pop);
    long push     = (long)TM_SHARED_READ(queuePtr->push);
    long capacity = (long)TM_SHARED_READ(queuePtr->capacity);

    assert(pop != push);

    /* Need to resize */
    long newPush = (push + 1) % capacity;
    if (newPush == pop) {
        long newCapacity = capacity * QUEUE_GROWTH_FACTOR;
        void** newElements = (void**)TM_MALLOC(newCapacity * sizeof(void*));
        if (newElements == NULL) {
            return FALSE;
        }

        long dst = 0;
        void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
        if (pop < push) {
            long src;
            for (src = (pop + 1); src < push; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
        } else {
            long src;
            for (src = (pop + 1); src < capacity; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
            for (src = 0; src < push; src++, dst++) {
                newElements[dst] = (void*)TM_SHARED_READ_P(elements[src]);
            }
        }

        TM_FREE(elements);
        TM_SHARED_WRITE_P(queuePtr->elements, newElements);
        TM_SHARED_WRITE(queuePtr->pop,      newCapacity - 1);
        TM_SHARED_WRITE(queuePtr->capacity, newCapacity);
        push = dst;
        newPush = push + 1; /* no need modulo */

    }

    void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
    TM_SHARED_WRITE_P(elements[push], dataPtr);
    TM_SHARED_WRITE(queuePtr->push, newPush);

    return TRUE;
}
Exemple #8
0
/* =============================================================================
 * customer_removeReservationInfo
 * -- Returns TRUE if success, else FALSE
 * =============================================================================
 */
bool_t
customer_removeReservationInfo (TM_ARGDECL
                                customer_t* customerPtr,
                                reservation_type_t type, long id)
{
    reservation_info_t findReservationInfo;

    findReservationInfo.type = type;
    findReservationInfo.id = id;
    /* price not used to compare reservation infos */

    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_P(customerPtr->reservationInfoListPtr);

    reservation_info_t* reservationInfoPtr =
        (reservation_info_t*)TMLIST_FIND(reservationInfoListPtr,
                                         &findReservationInfo);

    if (reservationInfoPtr == NULL) {
        return FALSE;
    }

    bool_t status = TMLIST_REMOVE(reservationInfoListPtr,
                                  (void*)&findReservationInfo);
    if (status == FALSE) {
        TM_RESTART();
    }

    RESERVATION_INFO_FREE(reservationInfoPtr);

    return TRUE;
}
Exemple #9
0
/* =============================================================================
 * TMlist_free
 * =============================================================================
 */
void
TMlist_free (TM_ARGDECL  list_t* listPtr)
{
    list_node_t* nextPtr = (list_node_t*)TM_SHARED_READ_P(listPtr->head.nextPtr);
    TMfreeList(TM_ARG  nextPtr);
    TM_FREE(listPtr);
}
Exemple #10
0
/* =============================================================================
 * TMlist_iter_hasNext
 * =============================================================================
 */
bool_t
TMlist_iter_hasNext (TM_ARGDECL  list_iter_t* itPtr, list_t* listPtr)
{
    list_iter_t next = (list_iter_t)TM_SHARED_READ_P((*itPtr)->nextPtr);

    return ((next != NULL) ? TRUE : FALSE);
}
Exemple #11
0
/* =============================================================================
 * customer_free
 * =============================================================================
 */
void
customer_free (TM_ARGDECL  customer_t* customerPtr)
{
    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_P(customerPtr->reservationInfoListPtr);
    TMLIST_FREE(reservationInfoListPtr);
    TM_FREE(customerPtr);
}
Exemple #12
0
/* =============================================================================
 * TMheap_remove
 * -- Returns NULL if empty
 * =============================================================================
 */
void*
TMheap_remove (TM_ARGDECL  heap_t* heapPtr)
{
    long size = (long)TM_SHARED_READ_L(heapPtr->size);

    if (size < 1) {
        return NULL;
    }

    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    void* dataPtr = (void*)TM_SHARED_READ_P(elements[1]);
    TM_SHARED_WRITE_P(elements[1], TM_SHARED_READ_P(elements[size]));
    TM_SHARED_WRITE_L(heapPtr->size, (size - 1));
    TMheapify(TM_ARG  heapPtr, 1);

    return dataPtr;
}
Exemple #13
0
/* =============================================================================
 * TMqueue_pop
 * =============================================================================
 */
void*
TMqueue_pop (TM_ARGDECL  queue_t* queuePtr)
{
    long pop      = (long)TM_SHARED_READ(queuePtr->pop);
    long push     = (long)TM_SHARED_READ(queuePtr->push);
    long capacity = (long)TM_SHARED_READ(queuePtr->capacity);

    long newPop = (pop + 1) % capacity;
    if (newPop == push) {
        return NULL;
    }

    void** elements = (void**)TM_SHARED_READ_P(queuePtr->elements);
    void* dataPtr = (void*)TM_SHARED_READ_P(elements[newPop]);
    TM_SHARED_WRITE(queuePtr->pop, newPop);

    return dataPtr;
}
Exemple #14
0
/* =============================================================================
 * TMfreeList
 * =============================================================================
 */
static void
TMfreeList (TM_ARGDECL  list_node_t* nodePtr)
{
    if (nodePtr != NULL) {
        list_node_t* nextPtr = (list_node_t*)TM_SHARED_READ_P(nodePtr->nextPtr);
        TMfreeList(TM_ARG  nextPtr);
        TMfreeNode(TM_ARG  nodePtr);
    }
}
Exemple #15
0
/* =============================================================================
 * TMfindPrevious
 * =============================================================================
 */
static list_node_t*
TMfindPrevious (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr = &(listPtr->head);
    list_node_t* nodePtr;

    for (nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);
         nodePtr != NULL;
         nodePtr = (list_node_t*)TM_SHARED_READ_P(nodePtr->nextPtr))
    {
        if (listPtr->compare(nodePtr->dataPtr, dataPtr) >= 0) {
            return prevPtr;
        }
        prevPtr = nodePtr;
    }

    return prevPtr;
}
Exemple #16
0
/* =============================================================================
 * TMsiftUp
 * =============================================================================
 */
static void
TMsiftUp (TM_ARGDECL  heap_t* heapPtr, long startIndex)
{
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    long (*compare)(TM_ARGDECL const void*, const void*) = heapPtr->compare->compare_tm;
    long index = startIndex;
    while ((index > 1)) {
        long parentIndex = PARENT(index);
        void* parentPtr = (void*)TM_SHARED_READ_P(elements[parentIndex]);
        void* thisPtr   = (void*)TM_SHARED_READ_P(elements[index]);
        if (compare(TM_ARG parentPtr, thisPtr) >= 0) {
            break;
        }
        void* tmpPtr = parentPtr;
        TM_SHARED_WRITE_P(elements[parentIndex], thisPtr);
        TM_SHARED_WRITE_P(elements[index], tmpPtr);
        index = parentIndex;
    }
}
Exemple #17
0
/* =============================================================================
 * TMheapify
 * =============================================================================
 */
TM_SAFE
void
TMheapify (  heap_t* heapPtr, long startIndex)
{
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    //long (*compare)(const void*, const void*) TM_IFUNC_DECL = heapPtr->compare;
    long (*compare)(const void*, const void*) TM_SAFE = heapPtr->compare;

    long size = (long)TM_SHARED_READ(heapPtr->size);
    long index = startIndex;

    while (1) {
        long leftIndex = LEFT_CHILD(index);
        long rightIndex = RIGHT_CHILD(index);
        long maxIndex = -1;

        if (leftIndex <= size)
        {
            long ret;
            void *e1, *e2;

            e1 = (void*)TM_SHARED_READ_P(elements[leftIndex]);
            e2 = (void*)TM_SHARED_READ_P(elements[index]);
            TM_IFUNC_CALL2(ret, compare, e1, e2);

            if (ret > 0)
                maxIndex = leftIndex;
            else
                maxIndex = index;
        } else {
            maxIndex = index;
        }

        if (rightIndex <= size)
        {
            long ret;
            void *e1, *e2;

            e1 = (void*)TM_SHARED_READ_P(elements[rightIndex]);
            e2 = (void*)TM_SHARED_READ_P(elements[maxIndex]);
            TM_IFUNC_CALL2(ret, compare, e1, e2);

            if (ret > 0)
                maxIndex = rightIndex;
        }

        if (maxIndex == index) {
            break;
        } else {
            void* tmpPtr = (void*)TM_SHARED_READ_P(elements[index]);
            TM_SHARED_WRITE_P(elements[index],
                              (void*)TM_SHARED_READ_P(elements[maxIndex]));
            TM_SHARED_WRITE_P(elements[maxIndex], tmpPtr);
            index = maxIndex;
        }
    }
}
Exemple #18
0
coordinate_t
TMelement_getNewPoint (TM_ARGDECL element_t* elementPtr)
{
    edge_t* encroachedEdgePtr =TM_SHARED_READ_P(elementPtr->encroachedEdgePtr);
    void *edge;
    if (encroachedEdgePtr) {
        long e;
        long numEdge = TM_SHARED_READ_L(elementPtr->numEdge);
        edge_t *edges = elementPtr->edges;
        for (e = 0; e < numEdge; e++) {
            edge = &edges[e];
            edge = TM_SHARED_READ_P(edge);
            if (TMcompareEdge(TM_ARG encroachedEdgePtr, (edge_t*)edge) == 0) {
                return elementPtr->midpoints[e];
            }
        }
        assert(0);
    }

    return elementPtr->circumCenter;
}
Exemple #19
0
/* =============================================================================
 * TMfindPrevious
 * =============================================================================
 */
static list_node_t*
TMfindPrevious (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr = &(listPtr->head);
    list_node_t* nodePtr;

    // cache the comparison function
    long int (*compare)(TM_ARGDECL const void*, const void*) =
      listPtr->comparator->compare_tm;

    for (nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);
         nodePtr != NULL;
         nodePtr = (list_node_t*)TM_SHARED_READ_P(nodePtr->nextPtr))
    {
        if (compare(TM_ARG TM_SHARED_READ_P(nodePtr->dataPtr), dataPtr) >= 0) {
            return prevPtr;
        }
        prevPtr = nodePtr;
    }

    return prevPtr;
}
Exemple #20
0
/* =============================================================================
 * TMlist_remove
 * -- Returns TRUE if successful, else FALSE
 * =============================================================================
 */
bool_t
TMlist_remove (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* prevPtr;
    list_node_t* nodePtr;

    prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);

    nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);
    if ((nodePtr != NULL) &&
        (listPtr->compare(nodePtr->dataPtr, dataPtr) == 0))
    {
        TM_SHARED_WRITE_P(prevPtr->nextPtr, TM_SHARED_READ_P(nodePtr->nextPtr));
        TM_SHARED_WRITE_P(nodePtr->nextPtr, (struct list_node*)NULL);
        TMfreeNode(TM_ARG  nodePtr);
        TM_SHARED_WRITE(listPtr->size, (TM_SHARED_READ(listPtr->size) - 1));
        assert(listPtr->size >= 0);
        return TRUE;
    }

    return FALSE;
}
Exemple #21
0
/* =============================================================================
 * customer_addReservationInfo
 * -- Returns TRUE if success, else FALSE
 * =============================================================================
 */
bool_t
customer_addReservationInfo (TM_ARGDECL
                             customer_t* customerPtr,
                             reservation_type_t type, long id, long price)
{
    reservation_info_t* reservationInfoPtr;

    reservationInfoPtr = RESERVATION_INFO_ALLOC(type, id, price);
    assert(reservationInfoPtr != NULL);

    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_P(customerPtr->reservationInfoListPtr);

    return TMLIST_INSERT(reservationInfoListPtr, (void*)reservationInfoPtr);
}
Exemple #22
0
/* =============================================================================
 * TMlist_find
 * -- Returns NULL if not found, else returns pointer to data
 * =============================================================================
 */
void*
TMlist_find (TM_ARGDECL  list_t* listPtr, void* dataPtr)
{
    list_node_t* nodePtr;
    list_node_t* prevPtr = TMfindPrevious(TM_ARG  listPtr, dataPtr);

    nodePtr = (list_node_t*)TM_SHARED_READ_P(prevPtr->nextPtr);

    if ((nodePtr == NULL) ||
        (listPtr->compare(nodePtr->dataPtr, dataPtr) != 0)) {
        return NULL;
    }

    return (nodePtr->dataPtr);
}
Exemple #23
0
/* =============================================================================
 * customer_getBill
 * -- Returns total cost of reservations
 * =============================================================================
 */
long
customer_getBill (TM_ARGDECL  customer_t* customerPtr)
{
    long bill = 0;
    list_iter_t it;
    list_t* reservationInfoListPtr =
        (list_t*)TM_SHARED_READ_P(customerPtr->reservationInfoListPtr);

    TMLIST_ITER_RESET(&it, reservationInfoListPtr);
    while (TMLIST_ITER_HASNEXT(&it, reservationInfoListPtr)) {
        reservation_info_t* reservationInfoPtr =
            (reservation_info_t*)TMLIST_ITER_NEXT(&it, reservationInfoListPtr);
        bill += reservationInfoPtr->price;
    }

    return bill;
}
Exemple #24
0
/* =============================================================================
 * TMheapify
 * =============================================================================
 */
static void
TMheapify (TM_ARGDECL  heap_t* heapPtr, long startIndex)
{
    void** elements = (void**)TM_SHARED_READ_P(heapPtr->elements);
    long (*compare)(TM_ARGDECL const void*, const void*) = heapPtr->compare->compare_tm;

    long size = (long)TM_SHARED_READ_L(heapPtr->size);
    long index = startIndex;

    while (1) {

        long leftIndex = LEFT_CHILD(index);
        long rightIndex = RIGHT_CHILD(index);
        long maxIndex = -1;

        if ((leftIndex <= size) &&
            (compare(TM_ARG
         (void*)TM_SHARED_READ_P(elements[leftIndex]),
                     (void*)TM_SHARED_READ_P(elements[index])) > 0))
        {
            maxIndex = leftIndex;
        } else {
      maxIndex = index;
        }

        if ((rightIndex <= size) &&
            (compare(TM_ARG
         (void*)TM_SHARED_READ_P(elements[rightIndex]),
                     (void*)TM_SHARED_READ_P(elements[maxIndex])) > 0))
        {
            maxIndex = rightIndex;
        }

        if (maxIndex == index) {
            break;
        } else {
            void* tmpPtr = (void*)TM_SHARED_READ_P(elements[index]);
            TM_SHARED_WRITE_P(elements[index],
                              (void*)TM_SHARED_READ_P(elements[maxIndex]));
            TM_SHARED_WRITE_P(elements[maxIndex], tmpPtr);
            index = maxIndex;
        }
    }
}
Exemple #25
0
/* =============================================================================
 * sequencer_run
 * =============================================================================
 */
void
sequencer_run (void* argPtr)
{
    TM_THREAD_ENTER();

    long threadId = thread_getId();

    sequencer_t* sequencerPtr = (sequencer_t*)argPtr;

    hashtable_t*      uniqueSegmentsPtr;
    endInfoEntry_t*   endInfoEntries;
    table_t**         startHashToConstructEntryTables;
    constructEntry_t* constructEntries;
    table_t*          hashToConstructEntryTable;

    uniqueSegmentsPtr               = sequencerPtr->uniqueSegmentsPtr;
    endInfoEntries                  = sequencerPtr->endInfoEntries;
    startHashToConstructEntryTables = sequencerPtr->startHashToConstructEntryTables;
    constructEntries                = sequencerPtr->constructEntries;
    hashToConstructEntryTable       = sequencerPtr->hashToConstructEntryTable;

    segments_t* segmentsPtr         = sequencerPtr->segmentsPtr;
    assert(segmentsPtr);
    vector_t*   segmentsContentsPtr = segmentsPtr->contentsPtr;
    long        numSegment          = vector_getSize(segmentsContentsPtr);
    long        segmentLength       = segmentsPtr->length;

    long i;
    long j;
    long i_start;
    long i_stop;
    long numUniqueSegment;
    long substringLength;
    long entryIndex;

    /*
     * Step 1: Remove duplicate segments
     */
// #if defined(HTM) || defined(STM)
    long numThread = thread_getNumThread();
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long partitionSize = (numSegment + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = numSegment;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
// #else /* !(HTM || STM) */
//     i_start = 0;
//     i_stop = numSegment;
// #endif /* !(HTM || STM) */
    for (i = i_start; i < i_stop; i+=CHUNK_STEP1) {
        TM_BEGIN();
        {
            long ii;
            long ii_stop = MIN(i_stop, (i+CHUNK_STEP1));
            for (ii = i; ii < ii_stop; ii++) {
                void* segment = vector_at(segmentsContentsPtr, ii);
                TMHASHTABLE_INSERT(uniqueSegmentsPtr,
                                   segment,
                                   segment);
            } /* ii */
        }
        TM_END();
    }

    thread_barrier_wait();

    /*
     * Step 2a: Iterate over unique segments and compute hashes.
     *
     * For the gene "atcg", the hashes for the end would be:
     *
     *     "t", "tc", and "tcg"
     *
     * And for the gene "tcgg", the hashes for the start would be:
     *
     *    "t", "tc", and "tcg"
     *
     * The names are "end" and "start" because if a matching pair is found,
     * they are the substring of the end part of the pair and the start
     * part of the pair respectively. In the above example, "tcg" is the
     * matching substring so:
     *
     *     (end)    (start)
     *     a[tcg] + [tcg]g  = a[tcg]g    (overlap = "tcg")
     */

    /* uniqueSegmentsPtr is constant now */
    numUniqueSegment = hashtable_getSize(uniqueSegmentsPtr);
    entryIndex = 0;

// #if defined(HTM) || defined(STM)
    {
        /* Choose disjoint segments [i_start,i_stop) for each thread */
        long num = uniqueSegmentsPtr->numBucket;
        long partitionSize = (num + numThread/2) / numThread; /* with rounding */
        i_start = threadId * partitionSize;
        if (threadId == (numThread - 1)) {
            i_stop = num;
        } else {
            i_stop = i_start + partitionSize;
        }
    }
    {
        /* Approximate disjoint segments of element allocation in constructEntries */
        long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
        entryIndex = threadId * partitionSize;
    }
// #else /* !(HTM || STM) */
//    i_start = 0;
//    i_stop = uniqueSegmentsPtr->numBucket;
//    entryIndex = 0;
//#endif /* !(HTM || STM) */

    for (i = i_start; i < i_stop; i++) {

        list_t* chainPtr = uniqueSegmentsPtr->buckets[i];
        list_iter_t it;
        list_iter_reset(&it, chainPtr);

        while (list_iter_hasNext(&it, chainPtr)) {

            char* segment =
                (char*)((pair_t*)list_iter_next(&it, chainPtr))->firstPtr;
            constructEntry_t* constructEntryPtr;
            long j;
            ulong_t startHash;
            bool_t status;

            /* Find an empty constructEntries entry */
            TM_BEGIN();
            while (((void*)TM_SHARED_READ_P(constructEntries[entryIndex].segment)) != NULL) {
                entryIndex = (entryIndex + 1) % numUniqueSegment; /* look for empty */
            }
            constructEntryPtr = &constructEntries[entryIndex];
            TM_SHARED_WRITE_P(constructEntryPtr->segment, segment);
            TM_END();
            entryIndex = (entryIndex + 1) % numUniqueSegment;

            /*
             * Save hashes (sdbm algorithm) of segment substrings
             *
             * endHashes will be computed for shorter substrings after matches
             * have been made (in the next phase of the code). This will reduce
             * the number of substrings for which hashes need to be computed.
             *
             * Since we can compute startHashes incrementally, we go ahead
             * and compute all of them here.
             */
            /* constructEntryPtr is local now */
            constructEntryPtr->endHash = (ulong_t)hashString(&segment[1]);

            startHash = 0;
            for (j = 1; j < segmentLength; j++) {
                startHash = (ulong_t)segment[j-1] +
                            (startHash << 6) + (startHash << 16) - startHash;
                TM_BEGIN();
                status = TMTABLE_INSERT(startHashToConstructEntryTables[j],
                                        (ulong_t)startHash,
                                        (void*)constructEntryPtr );
                TM_END();
                assert(status);
            }

            /*
             * For looking up construct entries quickly
             */
            startHash = (ulong_t)segment[j-1] +
                        (startHash << 6) + (startHash << 16) - startHash;
            TM_BEGIN();
            status = TMTABLE_INSERT(hashToConstructEntryTable,
                                    (ulong_t)startHash,
                                    (void*)constructEntryPtr);
            TM_END();
            assert(status);
        }
    }

    thread_barrier_wait();

    /*
     * Step 2b: Match ends to starts by using hash-based string comparison.
     */
    for (substringLength = segmentLength-1; substringLength > 0; substringLength--) {

        table_t* startHashToConstructEntryTablePtr =
            startHashToConstructEntryTables[substringLength];
        list_t** buckets = startHashToConstructEntryTablePtr->buckets;
        long numBucket = startHashToConstructEntryTablePtr->numBucket;

        long index_start;
        long index_stop;

// #if defined(HTM) || defined(STM)
        {
            /* Choose disjoint segments [index_start,index_stop) for each thread */
            long partitionSize = (numUniqueSegment + numThread/2) / numThread; /* with rounding */
            index_start = threadId * partitionSize;
            if (threadId == (numThread - 1)) {
                index_stop = numUniqueSegment;
            } else {
                index_stop = index_start + partitionSize;
            }
        }
// #else /* !(HTM || STM) */
//        index_start = 0;
//        index_stop = numUniqueSegment;
//#endif /* !(HTM || STM) */

        /* Iterating over disjoint itervals in the range [0, numUniqueSegment) */
        for (entryIndex = index_start;
             entryIndex < index_stop;
             entryIndex += endInfoEntries[entryIndex].jumpToNext)
        {
            if (!endInfoEntries[entryIndex].isEnd) {
                continue;
            }

            /*  ConstructEntries[entryIndex] is local data */
            constructEntry_t* endConstructEntryPtr =
                &constructEntries[entryIndex];
            char* endSegment = endConstructEntryPtr->segment;
            ulong_t endHash = endConstructEntryPtr->endHash;

            list_t* chainPtr = buckets[endHash % numBucket]; /* buckets: constant data */
            list_iter_t it;
            list_iter_reset(&it, chainPtr);

            /* Linked list at chainPtr is constant */
            while (list_iter_hasNext(&it, chainPtr)) {

                constructEntry_t* startConstructEntryPtr =
                    (constructEntry_t*)list_iter_next(&it, chainPtr);
                char* startSegment = startConstructEntryPtr->segment;
                long newLength = 0;

                /* endConstructEntryPtr is local except for properties startPtr/endPtr/length */
                TM_BEGIN();

                /* Check if matches */
                if (TM_SHARED_READ(startConstructEntryPtr->isStart) &&
                    (TM_SHARED_READ_P(endConstructEntryPtr->startPtr) != startConstructEntryPtr) &&
                    (strncmp(startSegment,
                             &endSegment[segmentLength - substringLength],
                             substringLength) == 0))
                {
                    TM_SHARED_WRITE(startConstructEntryPtr->isStart, FALSE);

                    constructEntry_t* startConstructEntry_endPtr;
                    constructEntry_t* endConstructEntry_startPtr;

                    /* Update endInfo (appended something so no longer end) */
                    TM_LOCAL_WRITE(endInfoEntries[entryIndex].isEnd, FALSE);

                    /* Update segment chain construct info */
                    startConstructEntry_endPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(startConstructEntryPtr->endPtr);
                    endConstructEntry_startPtr =
                        (constructEntry_t*)TM_SHARED_READ_P(endConstructEntryPtr->startPtr);

                    assert(startConstructEntry_endPtr);
                    assert(endConstructEntry_startPtr);
                    TM_SHARED_WRITE_P(startConstructEntry_endPtr->startPtr,
                                      endConstructEntry_startPtr);
                    TM_LOCAL_WRITE_P(endConstructEntryPtr->nextPtr,
                                     startConstructEntryPtr);
                    TM_SHARED_WRITE_P(endConstructEntry_startPtr->endPtr,
                                      startConstructEntry_endPtr);
                    TM_SHARED_WRITE(endConstructEntryPtr->overlap, substringLength);
                    newLength = (long)TM_SHARED_READ(endConstructEntry_startPtr->length) +
                                (long)TM_SHARED_READ(startConstructEntryPtr->length) -
                                substringLength;
                    TM_SHARED_WRITE(endConstructEntry_startPtr->length, newLength);
                } /* if (matched) */

                TM_END();

                if (!endInfoEntries[entryIndex].isEnd) { /* if there was a match */
                    break;
                }
            } /* iterate over chain */

        } /* for (endIndex < numUniqueSegment) */

        thread_barrier_wait();

        /*
         * Step 2c: Update jump values and hashes
         *
         * endHash entries of all remaining ends are updated to the next
         * substringLength. Additionally jumpToNext entries are updated such
         * that they allow to skip non-end entries. Currently this is sequential
         * because parallelization did not perform better.
.        */

        if (threadId == 0) {
            if (substringLength > 1) {
                long index = segmentLength - substringLength + 1;
                /* initialization if j and i: with i being the next end after j=0 */
                for (i = 1; !endInfoEntries[i].isEnd; i+=endInfoEntries[i].jumpToNext) {
                    /* find first non-null */
                }
                /* entry 0 is handled seperately from the loop below */
                endInfoEntries[0].jumpToNext = i;
                if (endInfoEntries[0].isEnd) {
                    constructEntry_t* constructEntryPtr = &constructEntries[0];
                    char* segment = constructEntryPtr->segment;
                    constructEntryPtr->endHash = (ulong_t)hashString(&segment[index]);
                }
                /* Continue scanning (do not reset i) */
                for (j = 0; i < numUniqueSegment; i+=endInfoEntries[i].jumpToNext) {
                    if (endInfoEntries[i].isEnd) {
                        constructEntry_t* constructEntryPtr = &constructEntries[i];
                        char* segment = constructEntryPtr->segment;
                        constructEntryPtr->endHash = (ulong_t)hashString(&segment[index]);
                        endInfoEntries[j].jumpToNext = MAX(1, (i - j));
                        j = i;
                    }
                }
                endInfoEntries[j].jumpToNext = i - j;
            }
        }

        thread_barrier_wait();

    } /* for (substringLength > 0) */


    thread_barrier_wait();

    /*
     * Step 3: Build sequence string
     */
    if (threadId == 0) {

        long totalLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            if (constructEntryPtr->isStart) {
              totalLength += constructEntryPtr->length;
            }
        }

        sequencerPtr->sequence = (char*)P_MALLOC((totalLength+1) * sizeof(char));
        char* sequence = sequencerPtr->sequence;
        assert(sequence);

        char* copyPtr = sequence;
        long sequenceLength = 0;

        for (i = 0; i < numUniqueSegment; i++) {
            constructEntry_t* constructEntryPtr = &constructEntries[i];
            /* If there are several start segments, we append in arbitrary order  */
            if (constructEntryPtr->isStart) {
                long newSequenceLength = sequenceLength + constructEntryPtr->length;
                assert( newSequenceLength <= totalLength );
                copyPtr = sequence + sequenceLength;
                sequenceLength = newSequenceLength;
                do {
                    long numChar = segmentLength - constructEntryPtr->overlap;
                    if ((copyPtr + numChar) > (sequence + newSequenceLength)) {
                        TM_PRINT0("ERROR: sequence length != actual length\n");
                        break;
                    }
                    memcpy(copyPtr,
                           constructEntryPtr->segment,
                           (numChar * sizeof(char)));
                    copyPtr += numChar;
                } while ((constructEntryPtr = constructEntryPtr->nextPtr) != NULL);
                assert(copyPtr <= (sequence + sequenceLength));
            }
        }

        assert(sequence != NULL);
        sequence[sequenceLength] = '\0';
    }

    TM_THREAD_EXIT();
}
Exemple #26
0
/* =============================================================================
 * TMlist_isEmpty
 * -- Return TRUE if list is empty, else FALSE
 * =============================================================================
 */
bool_t
TMlist_isEmpty (TM_ARGDECL  list_t* listPtr)
{
    return (((void*)TM_SHARED_READ_P(listPtr->head.nextPtr) == NULL) ?
            TRUE : FALSE);
}
Exemple #27
0
list_t*
TMelement_getNeighborListPtr (TM_ARGDECL element_t* elementPtr)
{
    return TM_SHARED_READ_P(elementPtr->neighborListPtr);
}
Exemple #28
0
/* =============================================================================
 * TMqueue_free
 * =============================================================================
 */
void
TMqueue_free (TM_ARGDECL  queue_t* queuePtr)
{
    TM_FREE((void**)TM_SHARED_READ_P(queuePtr->elements));
    TM_FREE(queuePtr);
}