Пример #1
0
void func(int i)
{
	g_lock.lock();
	std::this_thread::sleep_for(std::chrono::milliseconds(10));
	std::cout << std::this_thread::get_id() << "add : " << i << std::endl;
	sum++;
	g_lock.unlock();
}
Пример #2
0
    void init_master() {
        Options::ThreadAffinity::pin_main_thread();
        num_cpus = decide_num_cpus();
        threads = new TaskExecutor<Options> *[num_cpus];
        task_queues = new TaskQueue*[num_cpus];

        threads[0] = new TaskExecutor<Options>(0, *this);
        task_queues[0] = &threads[0]->get_task_queue();

        startup_lock.unlock();

        while (start_counter != num_cpus)
            Atomic::rep_nop();

        if (!workers_start_paused())
            lock_workers_initialized.unlock();
    }
Пример #3
0
    void init_worker(int id) {
        Options::ThreadAffinity::pin_workerthread(id);
        // allocate Worker on thread
        TaskExecutor<Options> *te = new TaskExecutor<Options>(id, *this);

        // wait until main thread has initialized the threadmanager
        startup_lock.lock();
        startup_lock.unlock();

        threads[id] = te;
        task_queues[id] = &te->get_task_queue();

        Atomic::increase(&start_counter);

        lock_workers_initialized.lock();
        lock_workers_initialized.unlock();

        te->work_loop();
    }
Пример #4
0
    void stop() {
        if (get_thread_num() != 0) {
            
            // workers don't come here until terminate() has been called

            int nv = Atomic::decrease_nv(&start_counter);

            // wait until all workers reached this step
            // all threads must agree that we are shutting
            // down before we can continue and invoke the
            // destructor
            startup_lock.lock();
            startup_lock.unlock();
            return;
        }

        start_executing(); // make sure threads have been started, or we will wait forever in barrier
        barrier_protocol.barrier(*threads[0]);

        startup_lock.lock();

        for (int i = 1; i < get_num_cpus(); ++i)
            threads[i]->terminate();


        // wait for all threads to join
        while (start_counter != 1)
            Atomic::rep_nop();

        // signal that threads can destruct
        startup_lock.unlock();

        for (int i = 1; i < get_num_cpus(); ++i)
            delete threads[i];

        delete [] threads;
        delete [] task_queues;
    }
 /**
  * Return the number of bytes consumed by all free list blocks.
  *
  * This does not define the number of bytes available for actual usable allocation, and should not be used
  * by non-implementation code outside of unit tests or debugging.
  */
 vm_size_t debug_bytes_free () {
     vm_size_t bytes_free = 0;
     
     _lock.lock();
     control_block *first = _free_list;
     for (control_block *b = _free_list; b != NULL; b = b->_next) {
         bytes_free += b->_size;
         
         if (b->_next == first)
             break;
     }
     _lock.unlock();
     
     return bytes_free;
 }
Пример #6
0
    TEST(ParallelUtils,SpinLock) {

        const int N = 1000000;

        SpinLock lock;

        volatile int c =0;

        #pragma omp parallel for num_threads(4)
        for(int i=0; i<N; i++) {
            lock.lock();
            c++;
            lock.unlock();
        }

        EXPECT_EQ(N, c);

    }
Пример #7
0
	~SpinGuard(){
		_mutex->unlock();
	}
Пример #8
0
void func(int n)
{
    g_Lock.lock();
    std::cout << "Output from thread: " << n << std::endl;
    g_Lock.unlock();
}
Пример #9
0
 void start_executing() {
     if (workers_start_paused()) {
         if (lock_workers_initialized.is_locked())
             lock_workers_initialized.unlock();
     }
 }