Exemple #1
0
void* operator_new_arr(size_t sz) throw (std::bad_alloc) {
    void *res = scalable_malloc(sz);
    if (NULL == res) throw std::bad_alloc();
    return res;
}
Exemple #2
0
void* operator_new_arr_t(std::size_t sz, const std::nothrow_t&) throw() {
    return scalable_malloc(sz);
}
 explicit AllocInfo(int sz) : p((int*)scalable_malloc(sz*sizeof(int))),
                                val(rand()), size(sz) {
     ASSERT(p, NULL);
     for (int k=0; k<size; k++)
         p[k] = val;
 }
Exemple #4
0
void* operator new(size_t sz, const std::nothrow_t&) throw() {
    return scalable_malloc(sz);
}
Exemple #5
0
void * __TBB_internal_malloc(size_t size)
{
    return scalable_malloc(size);
}
void TestObjectRecognition() {
    size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr);
    unsigned falseObjectSize = 113; // unsigned is the type expected by getObjectSize
    size_t obtainedSize;

    ASSERT(sizeof(BackRefIdx)==4, "Unexpected size of BackRefIdx");
    ASSERT(getObjectSize(falseObjectSize)!=falseObjectSize, "Error in test: bad choice for false object size");

    void* mem = scalable_malloc(2*slabSize);
    ASSERT(mem, "Memory was not allocated");
    Block* falseBlock = (Block*)alignUp((uintptr_t)mem, slabSize);
    falseBlock->objectSize = falseObjectSize;
    char* falseSO = (char*)falseBlock + falseObjectSize*7;
    ASSERT(alignDown(falseSO, slabSize)==(void*)falseBlock, "Error in test: false object offset is too big");

    void* bufferLOH = scalable_malloc(2*slabSize + headersSize);
    ASSERT(bufferLOH, "Memory was not allocated");
    LargeObjectHdr* falseLO = 
        (LargeObjectHdr*)alignUp((uintptr_t)bufferLOH + headersSize, slabSize);
    LargeObjectHdr* headerLO = (LargeObjectHdr*)falseLO-1;
    headerLO->memoryBlock = (LargeMemoryBlock*)bufferLOH;
    headerLO->memoryBlock->unalignedSize = 2*slabSize + headersSize;
    headerLO->memoryBlock->objectSize = slabSize + headersSize;
    headerLO->backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true);
    setBackRef(headerLO->backRefIdx, headerLO);
    ASSERT(scalable_msize(falseLO) == slabSize + headersSize,
           "Error in test: LOH falsification failed");
    removeBackRef(headerLO->backRefIdx);

    const int NUM_OF_IDX = BR_MAX_CNT+2;
    BackRefIdx idxs[NUM_OF_IDX];
    for (int cnt=0; cnt<2; cnt++) {
        for (int master = -10; master<10; master++) {
            falseBlock->backRefIdx.master = (uint16_t)master;
            headerLO->backRefIdx.master = (uint16_t)master;
        
            for (int bl = -10; bl<BR_MAX_CNT+10; bl++) {
                falseBlock->backRefIdx.offset = (uint16_t)bl;
                headerLO->backRefIdx.offset = (uint16_t)bl;

                for (int largeObj = 0; largeObj<2; largeObj++) {
                    falseBlock->backRefIdx.largeObj = largeObj;
                    headerLO->backRefIdx.largeObj = largeObj;

                    obtainedSize = safer_scalable_msize(falseSO, NULL);
                    ASSERT(obtainedSize==0, "Incorrect pointer accepted");
                    obtainedSize = safer_scalable_msize(falseLO, NULL);
                    ASSERT(obtainedSize==0, "Incorrect pointer accepted");
                }
            }
        }
        if (cnt == 1) {
            for (int i=0; i<NUM_OF_IDX; i++)
                removeBackRef(idxs[i]);
            break;
        }
        for (int i=0; i<NUM_OF_IDX; i++) {
            idxs[i] = BackRefIdx::newBackRef(/*largeObj=*/false);
            setBackRef(idxs[i], NULL);
        }
    }
    char *smallPtr = (char*)scalable_malloc(falseObjectSize);
    obtainedSize = safer_scalable_msize(smallPtr, NULL);
    ASSERT(obtainedSize==getObjectSize(falseObjectSize), "Correct pointer not accepted?");
    scalable_free(smallPtr);

    obtainedSize = safer_scalable_msize(mem, NULL);
    ASSERT(obtainedSize>=2*slabSize, "Correct pointer not accepted?");
    scalable_free(mem);
    scalable_free(bufferLOH);
}
 void operator() (int) const {
     gPtr = scalable_malloc(8);
     my_barr->wait();
     ++FinishedTasks;
 }
 void operator() (bool do_malloc) const {
     my_barr->wait();
     if (do_malloc) scalable_malloc(10);
     ++FinishedTasks;
 }
Exemple #9
0
void* operator new[](size_t sz)
{
    void *res = scalable_malloc(sz);
    if (NULL == res) throw std::bad_alloc();
    return res;
}
Exemple #10
0
void ser_nqueens_rec(int column, list_node* head) {
    int i;
    for (i=0; i<SIZE; i++) {
        list_node *node;
        if (addok(head, i, column)) {
            // add the node
            node = (list_node*)scalable_malloc(sizeof(list_node));
#ifdef DEBUG
            if(node==NULL) out_of_memory = 1;
#endif
#ifdef QUIT_ON_SOLUTION // QUIT as soon as a solution is found
            if (node!=NULL && solution == NULL) {
#else 			// Don't quit. Instead keep scanning the whole search space.
            if (node!=NULL) {
#endif
                node->next = head;
                node->row  = i;
                if (column+1<SIZE) {
                    ser_nqueens_rec(column+1, node);
                } else { // found a solution 
                  //solution = node;
                  solution_count++; // atomic
                }
            }
        }
        // else do nothing -- dead computation branch
    }
}

void nqueens_rec(int column, list_node* head);

class nqueensBody {
public:
    int column;
    list_node* head;

    void operator () (const tbb::blocked_range<int> &range) const {
        for(int i=range.begin(); i<range.end(); i++) {
            list_node *node;
            if (addok(head, i, column)) {
                // add the node
                node = (list_node*)scalable_malloc(sizeof(list_node));
#ifdef DEBUG
                if(node==NULL) out_of_memory = 1;
#endif
#ifdef QUIT_ON_SOLUTION // QUIT as soon as a solution is found
                if (node!=NULL && solution == NULL) {
#else 			// Don't quit. Instead keep scanning the whole search space.
                if (node!=NULL) {
#endif
                    node->next = head;
                    node->row  = i;
                    if (column+1<SIZE) {
#ifdef CUTOFF
                        if (column+1>=CUTOFF_LEVEL)
                            ser_nqueens_rec(column+1, node);
                        else
                            nqueens_rec(column+1, node);
#else
                        nqueens_rec(column+1, node);
#endif

                    } else { // found a solution 
                        //solution = node;
                        solution_count++; //atomic
                        //abort()
                    }
                }
            } // end if addok
        // else do nothing -- dead computation branch
        }
    }

    // Constructor
    nqueensBody(int col, list_node* hd) : column(col), head(hd) { }
};

void nqueens_rec(int column, list_node* head) {
#ifdef TWOLVL_SCHED
    tbb::auto_partitioner firstLevelPartitioner;
    PARTITIONER partitioner;
//    tbb::parallel_for (tbb::blocked_range<int>(0,SIZE,GRAINSIZE), nqueensBody(column,head), ((column==0)?firstLevelPartitioner:partitioner) );
    if (column==0)
        tbb::parallel_for (tbb::blocked_range<int>(0,SIZE,GRAINSIZE), nqueensBody(column,head), firstLevelPartitioner );
    else
        tbb::parallel_for (tbb::blocked_range<int>(0,SIZE,GRAINSIZE), nqueensBody(column,head), partitioner );
#else
    PARTITIONER partitioner;
    tbb::parallel_for (tbb::blocked_range<int>(0,SIZE,GRAINSIZE), nqueensBody(column,head), partitioner);
#endif
}
Exemple #11
0
void ser_solve_tsp_rec(const int (*A)[SIZE], list_node* partial_solution, int position, int partial_weight) 
{

#ifdef PARTIAL_WEIGHT_CUTOFF 
#ifdef NO_LOCKS
    if (partial_weight >= my_weight.local() ) return;
#else
    if (partial_weight >= solution_weight) return;
#endif
#endif

    int i;
    for(i=0; i<SIZE; i++){
        // 1. check if i was already used in the solution so far.
        //    if so, skip it.
        int used = 0;
        int j = position;
        list_node* tmp_p = partial_solution;
               
        while(tmp_p != NULL) {
            if (tmp_p->val == i) { 
                used = 1;
                break;
            }
            tmp_p = tmp_p->next;
        }

        if (!used) {
            //partial_solution[position] = i;
            // allocate a node
            tmp_p = (list_node*) scalable_malloc(sizeof(list_node));
#ifdef DEBUG
            if(tmp_p==NULL) {out_of_memory++;//atomic}
#endif
            if (tmp_p != NULL) {
                int myweight;
                tmp_p->next = partial_solution;
                tmp_p->val  = i;
            
                myweight = partial_weight + A[partial_solution->val][i];
                if (position==SIZE-1) {
                    // 2a. termination 
#ifdef NO_LOCKS
                    myweight += A[i][0];
                    if (myweight < my_weight.local() ) {
                        my_weight.local() = myweight;
                    }
#else
                    myweight += A[i][0];

#ifdef DOUBLE_CHECK
                    if (myweight < solution_weight) {
#endif
                        // acquire lock
                        tbb::mutex::scoped_lock myLock(solution_lock);
                        if (myweight < solution_weight) {
                            // update solution
                            solution_weight = myweight;
                            solution = tmp_p;
                        }
                        // implicit lock release
#ifdef DOUBLE_CHECK
                    }
#endif
#endif

                } else {
                    // 2b. recursion
                    ser_solve_tsp_rec(A, tmp_p, position+1, myweight);
                }
                scalable_free (tmp_p);
            }
        } // end if(!used)
    } // end spawn
}

void solve_tsp_rec(const int (*A)[SIZE], list_node* partial_solution, int position, int partial_weight);


class tspBody {
public:
    //int* A[SIZE];
    const int (*A)[SIZE];
    list_node* partial_solution;
    int position;
    int partial_weight;

    void operator () (const tbb::blocked_range<int> &range) const {
        int i;
        for (i=range.begin(); i<range.end(); i++) {
            // 1. check if node i was already used in the solution so far.
            //    if so, skip it.
            int used = 0;
            int j = position;
            list_node* tmp_p = partial_solution;
               
            while(tmp_p != NULL) {
                if (tmp_p->val == i) { 
                    used = 1;
                    break;
                }
                tmp_p = tmp_p->next;
            }
 
            if (!used) {
                //partial_solution[position] = i;
                // allocate a node
                tmp_p = (list_node*) scalable_malloc( sizeof(list_node) );
#ifdef DEBUG
                if(tmp_p==NULL) {out_of_memory++;//atomic}
#endif
                if (tmp_p != NULL) {
                    int myweight;
                    tmp_p->next = partial_solution;
                    tmp_p->val  = i;
              
                    myweight = partial_weight + A[partial_solution->val][i];
                    if (position==SIZE-1) {
                        // 2a. termination 
#ifdef NO_LOCKS
                        myweight += A[i][0];
                        if (myweight < my_weight.local() ) {
                            my_weight.local() = myweight;
                        }
#else
                        myweight += A[i][0];
#ifdef DOUBLE_CHECK
                        if (myweight < solution_weight) {
#endif 
                            tbb::mutex::scoped_lock myLock(solution_lock);
                            if (myweight < solution_weight) {
                                // lock solution
                                solution_weight = myweight;
                                solution = tmp_p;
                            }
                            // implicit lock release
#ifdef DOUBLE_CHECK
                        }
#endif
#endif

                    } else {
                        // 2b. recursion
#ifdef CUTOFF
                        if (position+1>=CUTOFF_LEVEL) 
                            ser_solve_tsp_rec(A, tmp_p, position+1, myweight);
                        else
                            solve_tsp_rec(A, tmp_p, position+1, myweight);
#else
                        solve_tsp_rec(A, tmp_p, position+1, myweight);
#endif
                    }
                    scalable_free (tmp_p);
                }
            } // end if(!used)
        } // end for loop
    }


    // Constructor
    tspBody(const int A[SIZE][SIZE], list_node* partial_solution, int position, int partial_weight):
        A(A), partial_solution(partial_solution), position(position), partial_weight(partial_weight) {}


};