TEST(ResStringPool, AppendToExistingUTF16) {
  const std::array<uint8_t, 116> data{{
      0x01, 0x00, 0x1C, 0x00, 0x74, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00,
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00,
      0x1C, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
      0x05, 0x00, 0x63, 0x00, 0x6F, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x72, 0x00,
      0x00, 0x00, 0x05, 0x00, 0x64, 0x00, 0x69, 0x00, 0x6D, 0x00, 0x65, 0x00,
      0x6E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x69, 0x00, 0x64, 0x00, 0x00, 0x00,
      0x06, 0x00, 0x6C, 0x00, 0x61, 0x00, 0x79, 0x00, 0x6F, 0x00, 0x75, 0x00,
      0x74, 0x00, 0x00, 0x00, 0x06, 0x00, 0x73, 0x00, 0x74, 0x00, 0x72, 0x00,
      0x69, 0x00, 0x6E, 0x00, 0x67, 0x00, 0x00, 0x00}};
  android::ResStringPool pool(&data, data.size(), false);
  ASSERT_TRUE(!pool.isUTF8());
  size_t out_len;
  auto s = pool.stringAt(0, &out_len);
  assert_u16_string(s, "color");
  ASSERT_EQ(out_len, 5);

  // Make sure the size encoding works for large values.
  auto big_string = make_big_string(35000);
  auto big_chars = big_string.c_str();
  pool.appendString(android::String8(big_chars));
  pool.appendString(android::String8("more more more"));
  android::Vector<char> v;
  pool.serialize(v);
  android::ResStringPool after((void*)v.array(), v.size(), false);

  assert_u16_string(after.stringAt(0, &out_len), "color");
  ASSERT_EQ(out_len, 5);
  assert_u16_string(after.stringAt(1, &out_len), "dimen");
  ASSERT_EQ(out_len, 5);
  assert_u16_string(after.stringAt(2, &out_len), "id");
  ASSERT_EQ(out_len, 2);
  assert_u16_string(after.stringAt(3, &out_len), "layout");
  ASSERT_EQ(out_len, 6);
  assert_u16_string(after.stringAt(4, &out_len), "string");
  ASSERT_EQ(out_len, 6);
  assert_u16_string(after.stringAt(5, &out_len), big_chars);
  ASSERT_EQ(out_len, 35000);
  assert_u16_string(after.stringAt(6, &out_len), "more more more");
  ASSERT_EQ(out_len, 14);
}
void PushingToViewsBlockOutputStream::write(const Block & block)
{
    /** Throw an exception if the sizes of arrays - elements of nested data structures doesn't match.
      * We have to make this assertion before writing to table, because storage engine may assume that they have equal sizes.
      * NOTE It'd better to do this check in serialization of nested structures (in place when this assumption is required),
      * but currently we don't have methods for serialization of nested structures "as a whole".
      */
    Nested::validateArraySizes(block);

    if (output)
        output->write(block);

    /// Don't process materialized views if this block is duplicate
    if (replicated_output && replicated_output->lastBlockIsDuplicate())
        return;

    // Insert data into materialized views only after successful insert into main table
    const Settings & settings = context.getSettingsRef();
    if (settings.parallel_view_processing && views.size() > 1)
    {
        // Push to views concurrently if enabled, and more than one view is attached
        ThreadPool pool(std::min(size_t(settings.max_threads), views.size()));
        for (size_t view_num = 0; view_num < views.size(); ++view_num)
        {
            auto thread_group = CurrentThread::getGroup();
            pool.schedule([=]
            {
                setThreadName("PushingToViews");
                if (thread_group)
                    CurrentThread::attachToIfDetached(thread_group);
                process(block, view_num);
            });
        }
        // Wait for concurrent view processing
        pool.wait();
    }
    else
    {
        // Process sequentially
        for (size_t view_num = 0; view_num < views.size(); ++view_num)
            process(block, view_num);
    }
}
Exemplo n.º 3
0
        void test( size_t nThreadCount )
        {
            ALLOC alloc;

            CPPUNIT_MSG( "Thread count=" << nThreadCount );
            CPPUNIT_MSG("Initialize data..." );

            randomGen<unsigned int> rndGen;

            s_nPassPerThread = s_nPassCount / nThreadCount;

            size_t nThread;
            m_aThreadData = new thread_data[ nThreadCount ];
            for ( nThread = 0; nThread < nThreadCount; ++nThread ) {
                thread_data thData
                    = m_aThreadData[nThread]
                    = new char *[ s_nBlocksPerThread ];
                    for ( size_t i = 0; i < s_nBlocksPerThread; ++i ) {
                        thData[i] = reinterpret_cast<char *>(alloc.allocate( rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr ));
                        CPPUNIT_ASSERT( (reinterpret_cast<uintptr_t>(thData[i]) & (ALLOC::alignment - 1)) == 0 );
                    }
            }
            CPPUNIT_MSG("Initializatin done" );

            CppUnitMini::ThreadPool pool( *this );
            pool.add( new Thread<ALLOC>( pool, alloc ), nThreadCount );
            nThread = 0;
            for ( CppUnitMini::ThreadPool::iterator it = pool.begin(); it != pool.end(); ++it )
                static_cast<Thread<ALLOC> *>(*it)->m_arr = m_aThreadData[nThread++];

            cds::OS::Timer    timer;
            pool.run();
            CPPUNIT_MSG( "  Duration=" << pool.avgDuration() );

            for ( nThread = 0; nThread < nThreadCount; ++nThread ) {
                thread_data thData = m_aThreadData[nThread];
                for ( size_t i = 0; i < s_nBlocksPerThread; ++i ) {
                    alloc.deallocate( reinterpret_cast<typename ALLOC::value_type *>(thData[i]), 1 );
                }
                delete [] thData;
            }
            delete [] m_aThreadData;
        }
vector<string> RepositoryDependencyAnalyser::GetDependency(string modulePath)
{
	ThreadPool pool(5);
	auto metadata = RepositoryMetadataHelper::GetMetadata(modulePath);
	vector<string> fileList;
	vector<string> result;
	for (auto file : metadata.FileList) {
		fileList.push_back(RepositoryMetadataHelper::repository_path + "/" + metadata.getFullName() + "/" + file);
	}
	auto dependencies = test_getDependency(pool, fileList, &this->typeTable);
	for (size_t i = 0; i < dependencies.Size(); i++) {
		auto moduleIt = this->moduleLookupTable.find(dependencies[i].toFile);
		if (moduleIt != this->moduleLookupTable.end() && moduleIt->second != metadata.getFullName() 
			&& !RepositoryMetadata::VersionCompared(moduleIt->second, metadata.getFullName())) {
			result.push_back(moduleIt->second);
		}
	}
	return result;
}
Exemplo n.º 5
0
bool
DocCacheMemory::loadDoc(const TagKey *key, CacheContext *cache_ctx,
    Tag &tag, boost::shared_ptr<CacheData> &cache_data) {

    log()->debug("loading doc in memory cache");
    DocPool *mpool = pool(key);
    assert(NULL != mpool);

    DocCleaner cleaner(cache_ctx->context());
    if (!mpool->loadDoc(key->asString(), tag, cache_data, cleaner)) {
        return false;
    }

    if (!DocCacheBase::checkTag(NULL, NULL, tag, "loading doc from memory cache")) {
        return false;
    }
    
    return true;
}
Exemplo n.º 6
0
void ElementShadow::distributeV0()
{
    host()->setNeedsStyleRecalc(SubtreeStyleChange, StyleChangeReasonForTracing::create(StyleChangeReason::Shadow));
    WillBeHeapVector<RawPtrWillBeMember<HTMLShadowElement>, 32> shadowInsertionPoints;
    DistributionPool pool(*host());

    for (ShadowRoot* root = &youngestShadowRoot(); root; root = root->olderShadowRoot()) {
        HTMLShadowElement* shadowInsertionPoint = 0;
        const WillBeHeapVector<RefPtrWillBeMember<InsertionPoint>>& insertionPoints = root->descendantInsertionPoints();
        for (size_t i = 0; i < insertionPoints.size(); ++i) {
            InsertionPoint* point = insertionPoints[i].get();
            if (!point->isActive())
                continue;
            if (isHTMLShadowElement(*point)) {
                ASSERT(!shadowInsertionPoint);
                shadowInsertionPoint = toHTMLShadowElement(point);
                shadowInsertionPoints.append(shadowInsertionPoint);
            } else {
                pool.distributeTo(point, this);
                if (ElementShadow* shadow = shadowWhereNodeCanBeDistributed(*point))
                    shadow->setNeedsDistributionRecalc();
            }
        }
    }

    for (size_t i = shadowInsertionPoints.size(); i > 0; --i) {
        HTMLShadowElement* shadowInsertionPoint = shadowInsertionPoints[i - 1];
        ShadowRoot* root = shadowInsertionPoint->containingShadowRoot();
        ASSERT(root);
        if (root->isOldest()) {
            pool.distributeTo(shadowInsertionPoint, this);
        } else if (root->olderShadowRoot()->type() == root->type()) {
            // Only allow reprojecting older shadow roots between the same type to
            // disallow reprojecting UA elements into author shadows.
            DistributionPool olderShadowRootPool(*root->olderShadowRoot());
            olderShadowRootPool.distributeTo(shadowInsertionPoint, this);
            root->olderShadowRoot()->setShadowInsertionPointOfYoungerShadowRoot(shadowInsertionPoint);
        }
        if (ElementShadow* shadow = shadowWhereNodeCanBeDistributed(*shadowInsertionPoint))
            shadow->setNeedsDistributionRecalc();
    }
    InspectorInstrumentation::didPerformElementShadowDistribution(host());
}
int main()
{
    stxxl::prefetch_pool<block_type> pool(2);
    pool.resize(10);
    pool.resize(5);

    block_type* blk = new block_type;
    (*blk)[0].integer = 42;
    block_type::bid_type bids[2];
    stxxl::block_manager::get_instance()->new_blocks(stxxl::single_disk(), bids, bids + 2);
    blk->write(bids[0])->wait();
    blk->write(bids[1])->wait();

    pool.hint(bids[0]);
    pool.read(blk, bids[0])->wait();
    pool.read(blk, bids[1])->wait();

    delete blk;
}
Exemplo n.º 8
0
static void testShortProd() {
  DigitPool pool(-5);
//  for(;;) {
//    int useReferenceVersion = inputInt(_T("Use reference version (1=reference, 2=debug"));

//    BigReal::setUseShortProdRefenceVersion(useReferenceVersion == 1);

//    float maxError32Ref = getRelativeError32(FLT_MIN,&pool);
//    BigReal::setUseShortProdRefenceVersion(false);
//    float maxError32FPU = getRelativeError32(FLT_MIN,&pool);
//    BigReal::setUseShortProdRefenceVersion(true);

    const FullFormatBigReal x = BigReal(spaceString(14700,'9')); // inputBigReal(pool, _T("Enter x:"));
    const FullFormatBigReal y = BigReal(spaceString(14700,'9')); // inputBigReal(pool, _T("Enter y:"));

    _tprintf(_T("X:%s\nY:%s\n"), x.toString().cstr(), y.toString().cstr());

    FullFormatBigReal p1(&pool), p2(&pool);

    p1 = BigReal::shortProd(x, y, BIGREAL_0, &pool);

    _tprintf(_T("p1:%s\n"), p1.toString().cstr());

    BigReal::setUseShortProdRefenceVersion(false);

    p2 = BigReal::shortProd(x, y, BIGREAL_0, &pool);

    BigReal::setUseShortProdRefenceVersion(true);

    _tprintf(_T("p2:%s\n"), p2.toString().cstr());

    try {
      p1.assertIsValidBigReal();
    } catch(Exception e) {
      _tprintf(_T("p1 failed:%s\n"), e.what());
    }
    try {
      p2.assertIsValidBigReal();
    } catch(Exception e) {
      _tprintf(_T("p2 failed:%s\n"), e.what());
    }
//  }
}
Exemplo n.º 9
0
/**
 *  This tests the basic functionality of SkDiscardablePixelRef with a
 *  basic SkImageGenerator implementation and several
 *  SkDiscardableMemory::Factory choices.
 */
DEF_TEST(DiscardableAndCachingPixelRef, reporter) {
    check_pixelref(TestImageGenerator::kFailGetPixels_TestType, reporter, nullptr);
    check_pixelref(TestImageGenerator::kSucceedGetPixels_TestType, reporter, nullptr);

    SkAutoTUnref<SkDiscardableMemoryPool> pool(
        SkDiscardableMemoryPool::Create(1, nullptr));
    REPORTER_ASSERT(reporter, 0 == pool->getRAMUsed());
    check_pixelref(TestImageGenerator::kFailGetPixels_TestType, reporter, pool);
    REPORTER_ASSERT(reporter, 0 == pool->getRAMUsed());
    check_pixelref(TestImageGenerator::kSucceedGetPixels_TestType, reporter, pool);
    REPORTER_ASSERT(reporter, 0 == pool->getRAMUsed());

    SkDiscardableMemoryPool* globalPool = SkGetGlobalDiscardableMemoryPool();
    // Only acts differently from nullptr on a platform that has a
    // default discardable memory implementation that differs from the
    // global DM pool.
    check_pixelref(TestImageGenerator::kFailGetPixels_TestType, reporter, globalPool);
    check_pixelref(TestImageGenerator::kSucceedGetPixels_TestType, reporter, globalPool);
}
Exemplo n.º 10
0
static void testAPCSum() {
  DigitPool pool(-5);
  for(;;) {
    const FullFormatBigReal x = inputBigReal(pool, _T("Enter x:"));
    const FullFormatBigReal y = inputBigReal(pool, _T("Enter y:"));
    _tprintf(_T("Enter bias ('<','>','#'):"));
    char bias = getchar();
    FullFormatBigReal p = BigReal::apcSum(bias, x, y, &pool);

    _tprintf(_T("x:%50s y:%50s\n"), x.toString().cstr(), y.toString().cstr());
    _tprintf(_T("APCSum(>,x,y) = %s\n"), p.toString().cstr());

    try {
      p.assertIsValidBigReal();
    } catch(Exception e) {
      _tprintf(_T("%s\n"), e.what());
    }
  }
}
Exemplo n.º 11
0
void LoadTable() {
  const oid_t col_count = state.attribute_count + 1;
  const int tuple_count = state.scale_factor * state.tuples_per_tilegroup;

  auto table_schema = sdbench_table->GetSchema();

  /////////////////////////////////////////////////////////
  // Load in the data
  /////////////////////////////////////////////////////////

  // Insert tuples into tile_group.
  auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
  const bool allocate = true;
  auto txn = txn_manager.BeginTransaction();
  std::unique_ptr<type::AbstractPool> pool(new type::EphemeralPool());

  std::unique_ptr<executor::ExecutorContext> context(
      new executor::ExecutorContext(txn));

  for (int rowid = 0; rowid < tuple_count; rowid++) {
    int populate_value = rowid;

    std::unique_ptr<storage::Tuple> tuple(new storage::Tuple(table_schema, allocate));

    for (oid_t col_itr = 0; col_itr < col_count; col_itr++) {
      auto value = type::ValueFactory::GetIntegerValue(populate_value);
      tuple->SetValue(col_itr, value, pool.get());
    }

    planner::InsertPlan node(sdbench_table.get(), std::move(tuple));
    executor::InsertExecutor executor(&node, context.get());
    executor.Execute();
  }

  auto result = txn_manager.CommitTransaction(txn);

  if (result == ResultType::SUCCESS) {
    LOG_TRACE("commit successfully");
  } else {
    LOG_TRACE("commit failed");
  }
}
Exemplo n.º 12
0
int main( int argc, char *argv[])
{
	try
	{
		tsk::static_pool< tsk::unbounded_prio_queue< int > > pool( tsk::poolsize( 3) ); 

		tsk::task< int > t1( fibonacci_fn, 10);
		tsk::task< int > t2( fibonacci_fn, 10);
		tsk::task< int > t3( fibonacci_fn, 10);
		tsk::task< int > t4( fibonacci_fn, 10);

		tsk::handle< int > h1(
			tsk::async( boost::move( t1) ) );
		tsk::handle< int > h2(
			tsk::async(
				boost::move( t2),
				tsk::new_thread() ) );
		tsk::handle< int > h3(
			tsk::async(
				boost::move( t3),
				2,
				pool) );
		tsk::handle< int > h4(
			tsk::async(
				boost::move( t4),
				2,
				pool) );

		std::cout << h1.get() << std::endl;
		std::cout << h2.get() << std::endl;
		std::cout << h3.get() << std::endl;
		std::cout << h4.get() << std::endl;

		return EXIT_SUCCESS;
	}
	catch ( std::exception const& e)
	{ std::cerr << "exception: " << e.what() << std::endl; }
	catch ( ... )
	{ std::cerr << "unhandled" << std::endl; }

	return EXIT_FAILURE;
}
Exemplo n.º 13
0
TEST(Parallel, mutexRegion) {
    uint32_t var = 0;
    lock_t mutex;

    auto threadFunc = [&var, &mutex](uint32_t idx){
        for (uint32_t i = 0; i < itCnt; i++) {
            mutex_begin(uqlk, mutex);
            var += idx;
            mutex_end();
        }
    };

    thread_pool_t pool(thCnt);
    for (uint32_t idx = 0; idx < thCnt; idx++) {
        pool.add_task(std::bind(threadFunc, idx));
    }
    pool.wait_all();

    ASSERT_EQ((0+7)*8/2 * itCnt, var);
}
Exemplo n.º 14
0
int main()
{
    ThreadPool pool(16);

    Test t;
    for(int i = 0; i < 10; i ++)
    {
        pool.put(boost::bind(&Test::doit, t, i));
    }

    pool.start(3);
    sleep(1);

    printf("new data\n");
    pool.put(boost::bind(&Test::doit, t, 21));
    pool.put(boost::bind(&Test::doit, t, 22));
    sleep(1);

    return 0;
}
Exemplo n.º 15
0
Arquivo: pool.cpp Projeto: kevinic/gtl
	template <class T, class Func_T> void test(Test_Context& tc, Func_T func)
	{
		Node_Pool<T> pool(&m_context, 5);
		T* ptr[5];
		for(int i = 0; i < 5; ++i)
		{
			GTL_TEST_VERIFY(tc, !pool.empty());
			ptr[i] = pool.create(func);
			*ptr[i] =  i;
		}
			
		GTL_TEST_VERIFY(tc, pool.empty());
			
		//Read back and destroy
		for(int i = 0; i < 5; ++i)
		{
			GTL_TEST_EQ(tc, (int) *ptr[i], i);
			pool.destroy(ptr[i]);
		}
	}
Exemplo n.º 16
0
void
test_thread_pool_recursion ()
{
    std::cout << "\nTesting thread pool recursion" << std::endl;
    static spin_mutex print_mutex;
    thread_pool *pool (default_thread_pool());
    pool->resize (2);
    parallel_for (0, 10, [&](int id, int64_t i){
        // sleep long enough that we can push all the jobs before any get
        // done.
        Sysutil::usleep (10);
        // then run something else that itself will push jobs onto the
        // thread pool queue.
        parallel_for (0, 10, [&](int id, int64_t i){
            Sysutil::usleep (2);
            spin_lock lock (print_mutex);
            std::cout << "  recursive running thread " << id << std::endl;
        });
    });
}
Exemplo n.º 17
0
/**
 * DataLoader Constructor
 */
DataLoader::DataLoader(
    PrestoWorker* presto_worker, int32_t port, int32_t sock_fd_, uint64_t split_size) :
  presto_worker_(presto_worker),
  port_(port), sock_fd(sock_fd_),
  DR_partition_size(split_size) {
  total_data_size = 0;
  total_nrows = 0;
  file_id = 0;
  vnode_EOFs.clear();
  read_pool = pool(10);

  //Initialize buffer
  buffer.buf = NULL;
  if(buffer.buf != NULL) {
    free(buffer.buf);
    buffer.buf = NULL;
  }
  buffer.size = 0;
  buffer.buffersize = 0;
}
Exemplo n.º 18
0
    bool RequestIntRedirFiltCond::match(ReqCtx *reqCtx,
                                     FiltCondMatch **fcMatch) const
    {
        RUM_PTRC_COND(pool(),
                      "RequestIntRedirFiltCond::match(ReqCtx *reqCtx, "
                      "FiltCondMatch **fcMatch), "
                      "this: " << (void *)this
                      << ", reqCtx: " << (void *)reqCtx
                      << ", fcMatch: " << (void *)fcMatch);

        bool isMatch = ((reqCtx->req()->prev != NULL) == isIntRedir_);

        RUM_LOG_COND(reqCtx->logger(), APLOG_DEBUG,
                     "isIntRedir: " << isIntRedir_
                     << ", isMatch: " << isMatch);

        apr_pool_t *fcPool = reqCtx->filtCondMatches()->pool();
        *fcMatch = new (fcPool) FiltCondMatch(0, isMatch);

        return isMatch;
    }
Exemplo n.º 19
0
Int_t mp201_parallelHistoFill()
{
   TH1::AddDirectory(false);
   ROOT::TProcessExecutor pool(poolSize);
   auto fillRandomHisto = [](int seed = 0) {
      TRandom3 rndm(seed);
      auto h = new TH1F("myHist", "Filled in parallel", 128, -8, 8);
      for (auto i : ROOT::TSeqI(1000000)) {
         h->Fill(rndm.Gaus(0,1));
      }
      return h;
   };

   auto seeds = ROOT::TSeqI(23);
   ROOT::ExecutorUtils::ReduceObjects<TH1F *> redfunc;
   auto sumRandomHisto = pool.MapReduce(fillRandomHisto, seeds, redfunc);

   auto c = new TCanvas();
   sumRandomHisto->Draw();
   return 0;
}
Exemplo n.º 20
0
Arquivo: random.cpp Projeto: IMCG/CDS
        void test( size_t nThreadCount )
        {
            ALLOC alloc ;

            CPPUNIT_MSG( "Thread count=" << nThreadCount )  ;
            s_nPassPerThread = s_nPassCount / nThreadCount  ;

            CppUnitMini::ThreadPool pool( *this )   ;
            pool.add( new Thread<ALLOC>( pool, alloc ), nThreadCount ) ;

            cds::OS::Timer    timer    ;
            pool.run()  ;
            CPPUNIT_MSG( "  Duration=" << pool.avgDuration() ) ;

            for ( size_t i = 0; i < m_Data.size(); ++i ) {
                if ( m_Data[i].m_pszBlock ) {
                    alloc.deallocate( m_Data[i].m_pszBlock, 1 ) ;
                    m_Data[i].m_pszBlock = NULL ;
                }
            }
        }
Exemplo n.º 21
0
void IOWorker::add_pool(const Address& address, bool is_initial_connection) {
  if (is_closing_) return;

  PoolMap::iterator it = pools_.find(address);
  if (it == pools_.end()) {
    LOG_INFO("Adding pool for host %s io_worker(%p)",
             address.to_string(true).c_str(), static_cast<void*>(this));

    set_host_is_available(address, false);

    SharedRefPtr<Pool> pool(new Pool(this, address, is_initial_connection));
    pools_[address] = pool;
    pool->connect();
  } else  {
    // We could have a connection that's waiting to reconnect. In that case,
    // this will start to connect immediately.
    LOG_DEBUG("Host %s already present attempting to initiate immediate connection",
              address.to_string().c_str());
    it->second->connect();
  }
}
Exemplo n.º 22
0
Int_t mp201_parallelHistoFill(UInt_t poolSize = 4)
{
   TH1::AddDirectory(false);
   TProcPool pool(poolSize);
   auto fillRandomHisto = [](int seed = 0) {
      TRandom3 rndm(seed);
      auto h = new TH1F("myHist", "Filled in parallel", 128, -8, 8);
      for (auto i : ROOT::TSeqI(1000000)) {
         h->Fill(rndm.Gaus(0,1));
      }
      return h;
   };

   TimerRAII timer("Filling Histogram in parallel and drawing it.");
   auto seeds = ROOT::TSeqI(23);
   auto sumRandomHisto = pool.MapReduce(fillRandomHisto, seeds, PoolUtils::ReduceObjects);

   auto c = new TCanvas();
   sumRandomHisto->Draw();
   return 0;
}
Exemplo n.º 23
0
/*
 *  SkCachedData behaves differently (regarding its locked/unlocked state) depending on
 *  when it is in the cache or not. Being in the cache is signaled by calling attachToCacheAndRef()
 *  instead of ref(). (and balanced by detachFromCacheAndUnref).
 *
 *  Thus, among other things, we test the end-of-life behavior when the client is the last owner
 *  and when the cache is.
 */
DEF_TEST(CachedData, reporter) {
    SkAutoTUnref<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Create(1000));

    for (int useDiscardable = 0; useDiscardable <= 1; ++useDiscardable) {
        const size_t size = 100;

        // test with client as last owner
        SkCachedData* data = test_locking(reporter, size, useDiscardable ? pool.get() : nullptr);
        check_data(reporter, data, 2, kInCache, kLocked);
        data->detachFromCacheAndUnref();
        check_data(reporter, data, 1, kNotInCache, kLocked);
        data->unref();

        // test with cache as last owner
        data = test_locking(reporter, size, useDiscardable ? pool.get() : nullptr);
        check_data(reporter, data, 2, kInCache, kLocked);
        data->unref();
        check_data(reporter, data, 1, kInCache, kUnlocked);
        data->detachFromCacheAndUnref();
    }
}
Exemplo n.º 24
0
TEST(Parallel, threadPool) {
    const uint32_t val = 123;

    std::array<uint32_t, thCnt> spawned;
    std::fill(spawned.begin(), spawned.end(), 0);

    auto threadFunc = [&val, &spawned](uint32_t idx){
        ASSERT_EQ(123, val);
        spawned[idx] = 1;
    };

    thread_pool_t pool(thCnt);
    for (uint32_t idx = 0; idx < thCnt; idx++) {
        pool.add_task(std::bind(threadFunc, idx));
    }
    pool.wait_all();

    for (const auto s : spawned) {
        ASSERT_EQ(1, s);
    }
}
Exemplo n.º 25
0
Arquivo: main.cpp Projeto: zhekan/4Fun
int main()
{
    float elapsed_seconds = 0;
    Taimer taimer(&elapsed_seconds);

    ThreadPool pool(3);

    std::cout <<"Start"<< std::endl;
    taimer.start();

    std::list<std::shared_ptr<AData<long>>> pool_weit;
    for (int i=0; i<10; i++)
    {
        int n = std::rand() % 3 + 40;
        std::cout <<n<< std::endl;
        pool_weit.push_back(pool.run_async(&fibonacci, n));
    }

//    pool_weit.push_back(pool.run_async(&fibonacci, 44));
//    pool_weit.push_back(pool.run_async(&fibonacci, 43));
//    pool_weit.push_back(pool.run_async(&fibonacci, 45));
//    pool_weit.push_back(pool.run_async(&fibonacci, 44));

    while(!pool_weit.empty()){
        for (auto it = pool_weit.begin(), end = pool_weit.end(); it!=end; ++it){
            if((*it)->ready){
                std::cout << (*it)->data << std::endl;
                pool_weit.remove(*it);
                --it; // list decreased by one item
            }
        }

        std::this_thread::sleep_for(std::chrono::microseconds(500));
    }

    taimer.stop();
    std::cout <<"\nElapsed time: " << elapsed_seconds << "sec."<< std::endl;

    return 0;
}
void test_case_4()
{
	tsk::static_pool<
		tsk::unbounded_fifo
	> pool( tsk::poolsize( 2) );

	tsk::spin::unbounded_channel< int > buf;

	int n = 37;

	send_data sender( buf);
	recv_data receiver( buf);
	BOOST_CHECK_EQUAL( 0, receiver.value);

	tsk::handle< void > h1 =
		tsk::async(
			tsk::make_task(
				& recv_data::operator(),
				boost::ref( receiver) ),
			pool);

	boost::this_thread::sleep(
			pt::milliseconds( 250) );
	BOOST_CHECK_EQUAL( false, h1.is_ready() );

	tsk::handle< void > h2 =
		tsk::async(
			tsk::make_task(
				& send_data::operator(),
				boost::ref( sender),
				n),
			pool);

	h2.wait();
	BOOST_CHECK_EQUAL( true, h2.is_ready() );
	h1.wait();
	BOOST_CHECK_EQUAL( true, h1.is_ready() );

	BOOST_CHECK_EQUAL( n, receiver.value);
}
Exemplo n.º 27
0
TYPED_TEST_P(HeartBeatTest, Ping) 
{
    typedef TypeParam socket;
    auto log = redhorn::logging::get_logger("main");

    redhorn::iopool pool(redhorn::logging::get_logger("iopool"));
    redhorn::heart_beat heart(redhorn::logging::get_logger("heart"),
                              std::chrono::milliseconds(20),
                              std::chrono::milliseconds(20));
    
    pool.start(2);
    heart.start();

    redhorn::listener listener(port);

    std::thread connect_thread(connect_ping_thread<socket>, redhorn::logging::get_logger("connect_thread"));

    log.info("accepting");
    socket sock = listener.accept<socket>();
    ASSERT_TRUE(static_cast<bool>(sock));

    log.info("accepted");

    pool.bind(sock);
    heart.bind(sock, 
               std::chrono::milliseconds::zero(),
               std::function<message()>(drummer));

    log.info("joining");
    connect_thread.join();
    log.info("joined");
    heart.unbind(sock);

    heart.stop();
    pool.stop();
    heart.wait();
    pool.wait();
    
}
Exemplo n.º 28
0
int main() {
    wtl::ThreadPool pool(2);
    std::vector<std::future<size_t>> futures;
    for (size_t j=0; j<2ul; ++j) {
        for (size_t i=0; i<3ul; ++i) {
            futures.push_back(pool.submit([](const size_t k) {
                std::this_thread::sleep_for(std::chrono::milliseconds(250));
                std::ostringstream oss;
                oss << std::this_thread::get_id() << ": " << k << "\n";
                std::cout << oss.str() << std::flush;
                return k;
            }, i));
        }
        std::cout << std::this_thread::get_id() << ": main queued\n" << std::flush;
        pool.wait();
        std::cout << std::this_thread::get_id() << ": main finished\n" << std::flush;
    }
    for (auto& f: futures) {
        std::cout << f.get() << " ";
    }
    return 0;
}
Exemplo n.º 29
0
// Lance un serveur écoutant sur un port et gérant les connexions sur
// plusieurs threads en utilisant une pool de thread.
void with_server_socket(const int port, const int n_threads,
                        void (*action)(ClientSocket sock))
{
    int _socket = socket_utils::init_socket(INADDR_ANY, port, socket_utils::SERVER_SOCKET);
    ThreadPool pool(n_threads);
    
    socket_utils::listen(_socket, 0);
    
    for (;;) {
        // Lance la gestion du client dans un nouveau thread.
        int client_fd = socket_utils::accept(_socket, NULL, NULL);
    
        thread_args *args = new thread_args;
        args->action = action;
        args->client_fd = client_fd;
        
        pool.inject(_thread_routine, (void *) args);
    }
    
    shutdown(_socket, SHUT_RDWR);
    close(_socket);
}
Exemplo n.º 30
0
int main()
{
    // a memory pool RawAllocator
    // allocates a memory block - initially 4KiB - and splits it into chunks of list_node_size<int>::value big
    // list_node_size<int>::value is the size of each node of a std::list
    memory::memory_pool<> pool(memory::list_node_size<int>::value, 4096u);

    // alias for std::list<int, memory::std_allocator<int, memory::memory_pool<>>
    // a std::list using a memory_pool
    // std_allocator stores a reference to a RawAllocator and provides the Allocator interface
    memory::list<int, memory::memory_pool<>> list(pool);
    list.push_back(3);
    list.push_back(2);
    list.push_back(1);

    merge_sort(list.begin(), list.end());

    // allocate a std::unique_ptr using the pool
    // memory::allocate_shared is also available
    auto ptr = memory::allocate_unique<int>(pool, *list.begin());
    std::cout << *ptr << '\n';
}