Esempio n. 1
0
void mrqd_delegate(void* lock,
                   void (*funPtr)(unsigned int, void *), 
                   unsigned int messageSize,
                   void * messageAddress) {
    MRQDLock *l = (MRQDLock*)lock;
    while(atomic_load_explicit(&l->writeBarrier.value, memory_order_seq_cst) > 0){
        thread_yield();
    }
    while(true) {
        if(tatas_try_lock(&l->mutexLock)) {
            qdq_open(&l->queue);
            rgri_wait_all_readers_gone(&l->readIndicator);
            funPtr(messageSize, messageAddress);
            qdq_flush(&l->queue);
            tatas_unlock(&l->mutexLock);
            return;
        } else if(qdq_enqueue(&l->queue,
                              funPtr,
                              messageSize,
                              messageAddress)){
            return;
        }
        thread_yield();
    }
}
Esempio n. 2
0
void ccsynch_close_delegate_buffer(void * buffer,
                                   void (*funPtr)(unsigned int, void *)){
    CCSynchLockNode *tmpNode;
    void (*tmpFunPtr)(unsigned int, void *);
    CCSynchLockNode *tmpNodeNext;
    int counter = 0;
    CCSynchLockNode *curNode = ccsynchNextLocalNode;
    curNode->buffer = buffer;
    curNode->requestFunction = funPtr;
    atomic_thread_fence( memory_order_release );
    while (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        thread_yield();
    }
    if(curNode->completed==true){
        return;
    }else{
        funPtr(curNode->messageSize, buffer);
    }
    tmpNode = (CCSynchLockNode *)atomic_load_explicit(&curNode->next, memory_order_acquire);

    while ((tmpNodeNext=(CCSynchLockNode *)atomic_load_explicit(&tmpNode->next, memory_order_acquire)) != NULL && counter < CCSYNCH_HAND_OFF_LIMIT) {
        counter = counter + 1;
        tmpFunPtr = tmpNode->requestFunction;
        if(tmpFunPtr==NULL){
            break;
        }
        tmpFunPtr(tmpNode->messageSize, tmpNode->buffer);
        tmpNode->completed = true;
        atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
        tmpNode = tmpNodeNext;
    }
    atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
}
Esempio n. 3
0
void mrqd_delegate_wait(void* lock,
                      void (*funPtr)(unsigned int, void *), 
                      unsigned int messageSize,
                      void * messageAddress) {
    volatile atomic_int waitVar = ATOMIC_VAR_INIT(1);
    unsigned int metaDataSize = sizeof(volatile atomic_int *) + 
        sizeof(void (*)(unsigned int, void *));
    char * buff = mrqd_delegate_or_lock(lock,
                                        metaDataSize + messageSize);
    if(buff==NULL){
        funPtr(messageSize, messageAddress);
        mrqd_delegate_unlock(lock);
    }else{
        volatile atomic_int ** waitVarPtrAddress = (volatile atomic_int **)buff;
        *waitVarPtrAddress = &waitVar;
        void (**funPtrAdress)(unsigned int, void *) = (void (**)(unsigned int, void *))&buff[sizeof(volatile atomic_int *)];
        *funPtrAdress = funPtr;
        unsigned int metaDataSize = sizeof(volatile atomic_int *) + 
            sizeof(void (*)(unsigned int, void *));
        char * msgBuffer = (char *)messageAddress;
        for(unsigned int i = metaDataSize; i < (messageSize + metaDataSize); i++){
            buff[i] = msgBuffer[i - metaDataSize];
        }
        mrqd_close_delegate_buffer((void *)buff, mrqd_executeAndWaitCS);
        while(atomic_load_explicit(&waitVar, memory_order_acquire)){
            thread_yield();
        }
    }
}
Esempio n. 4
0
void tatas_delegate(void * lock,
                    void (*funPtr)(unsigned int, void *), 
                    unsigned int messageSize,
                    void * messageAddress){
    TATASLock *l = (TATASLock*)lock;
    tatas_lock(l);
    funPtr(messageSize, messageAddress);
    tatas_unlock(l);
}
Esempio n. 5
0
void BSTree::reverseOrder (BSTNode* node, void (*funPtr)(int)) {
    if (node != NULL) {
        if (node->getRightChild() != NULL) {
            inOrder(node->getRightChild(), funPtr);
        }

        funPtr(node->getContents());

        if (node->getLeftChild() != NULL) {
            inOrder(node->getLeftChild(), funPtr);
        }
    }
}
Esempio n. 6
0
void ccsynch_delegate(void* lock,
                      void (*funPtr)(unsigned int, void *), 
                      unsigned int messageSize,
                      void * messageAddress) {
    CCSynchLock *l = (CCSynchLock*)lock;
    unsigned char * messageBuffer =  (unsigned char *) messageAddress;
    CCSynchLockNode *nextNode;
    CCSynchLockNode *curNode;
    CCSynchLockNode *tmpNode;
    void (*tmpFunPtr)(unsigned int, void *);
    CCSynchLockNode *tmpNodeNext;
    int counter = 0;
    ccsynchlock_initLocalIfNeeded();
    nextNode = ccsynchNextLocalNode;
    atomic_store_explicit(&nextNode->next, (uintptr_t)NULL, memory_order_relaxed);
    atomic_store_explicit(&nextNode->wait, 1, memory_order_relaxed);
    nextNode->completed = false;
    curNode = (CCSynchLockNode *)atomic_exchange_explicit(&l->tailPtr.value, (uintptr_t)nextNode, memory_order_release);
    curNode->buffer = messageBuffer;
    curNode->messageSize = messageSize;
    curNode->requestFunction = funPtr;
    atomic_store_explicit(&curNode->next, (uintptr_t)nextNode, memory_order_release);
    ccsynchNextLocalNode = curNode;
    while (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        thread_yield();
    }
    if(curNode->completed==true){
        return;
    }else{
        funPtr(messageSize, messageBuffer);
    }
    tmpNode = (CCSynchLockNode *)atomic_load_explicit(&curNode->next, memory_order_acquire);
    while ((tmpNodeNext=(CCSynchLockNode *)atomic_load_explicit(&tmpNode->next, memory_order_acquire)) != NULL && counter < CCSYNCH_HAND_OFF_LIMIT) {
        counter = counter + 1;
        tmpFunPtr = tmpNode->requestFunction;
        if(tmpFunPtr==NULL){
            break;
        }
        tmpFunPtr(tmpNode->messageSize, tmpNode->buffer);
        tmpNode->completed = true;
        atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
        tmpNode = tmpNodeNext;
    }
    atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
}
Esempio n. 7
0
void qd_delegate(void* lock,
                 void (*funPtr)(unsigned int, void *), 
                 unsigned int messageSize,
                 void * messageAddress) {
    QDLock *l = (QDLock*)lock;
    while(true) {
        if(tatas_try_lock(&l->mutexLock)) {
            qdq_open(&l->queue);
            funPtr(messageSize, messageAddress);
            qdq_flush(&l->queue);
            tatas_unlock(&l->mutexLock);
            return;
        } else if(qdq_enqueue(&l->queue,
                              funPtr,
                              messageSize,
                              messageAddress)){
            return;
        }
        thread_yield();
    }
}