コード例 #1
0
ファイル: sched.cpp プロジェクト: iopenstack/mesos
  void statusUpdate(const StatusUpdate& update, const UPID& pid)
  {
    const TaskStatus& status = update.status();

    if (aborted) {
      VLOG(1) << "Ignoring task status update message because "
              << "the driver is aborted!";
      return;
    }

    VLOG(2) << "Received status update " << update << " from " << pid;

    CHECK(framework.id() == update.framework_id());

    // TODO(benh): Note that this maybe a duplicate status update!
    // Once we get support to try and have a more consistent view
    // of what's running in the cluster, we'll just let this one
    // slide. The alternative is possibly dealing with a scheduler
    // failover and not correctly giving the scheduler it's status
    // update, which seems worse than giving a status update
    // multiple times (of course, if a scheduler re-uses a TaskID,
    // that could be bad.

    Stopwatch stopwatch;
    if (FLAGS_v >= 1) {
      stopwatch.start();
    }

    scheduler->statusUpdate(driver, status);

    VLOG(1) << "Scheduler::statusUpdate took " << stopwatch.elapsed();

    // Acknowledge the status update.
    // NOTE: We do a dispatch here instead of directly sending the ACK because,
    // we want to avoid sending the ACK if the driver was aborted when we
    // made the statusUpdate call. This works because, the 'abort' message will
    // be enqueued before the ACK message is processed.
    if (pid > 0) {
      dispatch(self(), &Self::statusUpdateAcknowledgement, update, pid);
    }
  }
コード例 #2
0
ファイル: sched.cpp プロジェクト: iopenstack/mesos
  void rescindOffer(const OfferID& offerId)
  {
    if (aborted) {
      VLOG(1) << "Ignoring rescind offer message because "
              << "the driver is aborted!";
      return;
    }

    VLOG(1) << "Rescinded offer " << offerId;

    savedOffers.erase(offerId);

    Stopwatch stopwatch;
    if (FLAGS_v >= 1) {
      stopwatch.start();
    }

    scheduler->offerRescinded(driver, offerId);

    VLOG(1) << "Scheduler::offerRescinded took " << stopwatch.elapsed();
  }
コード例 #3
0
ファイル: sched.cpp プロジェクト: iopenstack/mesos
  void frameworkMessage(const SlaveID& slaveId,
                        const FrameworkID& frameworkId,
                        const ExecutorID& executorId,
                        const string& data)
  {
    if (aborted) {
      VLOG(1) << "Ignoring framework message because the driver is aborted!";
      return;
    }

    VLOG(2) << "Received framework message";

    Stopwatch stopwatch;
    if (FLAGS_v >= 1) {
      stopwatch.start();
    }

    scheduler->frameworkMessage(driver, executorId, slaveId, data);

    VLOG(1) << "Scheduler::frameworkMessage took " << stopwatch.elapsed();
  }
コード例 #4
0
ファイル: variant.cpp プロジェクト: aleks-f/articles
void doVariant(const std::vector<std::string>& strvec)
{
	MyObj o;

	std::cout << sizeof(o) << std::endl;
	boost::variant<std::string, double> u(1.1);
	/*
	std::cout << "boost::variant" << std::endl;
	std::cout << "==============" << std::endl;
	
	printSize<char>();
	printSize<int>();
	printSize<float>();
	printSize<double>();
	printSize<std::string>();
	*/
	std::vector<std::string>::const_iterator it = strvec.begin();
	std::vector<std::string>::const_iterator end = strvec.end();

	Stopwatch sw;
	sw.start();
	for (; it != end; ++it)
	{
		boost::variant<int, double, std::string> u(*it);
		
		int i = boost::apply_visitor(string_int_converter(), u);
	
		double d = boost::apply_visitor(string_dbl_converter(), u);

		u = i;
		std::string s = boost::apply_visitor(num_string_converter(), u);

		u = d;
		s = boost::apply_visitor(num_string_converter(), u);
	}
	sw.stop();
	std::cout << "variant: " << sw.elapsed()/1000.0 << " [ms]" << std::endl;
	
	std::cout << "==============" << std::endl;
}
コード例 #5
0
ファイル: sched.cpp プロジェクト: iopenstack/mesos
  void noMasterDetected()
  {
    VLOG(1) << "No master detected, waiting for another master";

    // Inform the scheduler about the disconnection if the driver
    // was previously registered with the master.
    if (connected) {
      Stopwatch stopwatch;
      if (FLAGS_v >= 1) {
        stopwatch.start();
      }

      scheduler->disconnected(driver);

      VLOG(1) << "Scheduler::disconnected took " << stopwatch.elapsed();
    }

    // In this case, we don't actually invoke Scheduler::error
    // since we might get reconnected to a master imminently.
    connected = false;
    master = UPID();
  }
コード例 #6
0
void LocalSocketTest::testTimeout()
{
	SocketAddress sas("/tmp/poco.server.tcp.sock");
	EchoServer echoServer(sas);
	SocketAddress sac("/tmp/poco.client.tcp.sock");
	StreamSocket ss(sas, &sac);
	
	Timespan timeout0 = ss.getReceiveTimeout();
	Timespan timeout(250000);
	ss.setReceiveTimeout(timeout);
	Timespan timeout1 = ss.getReceiveTimeout();
	std::cout << "original receive timeout:  " << timeout0.totalMicroseconds() << std::endl;
	std::cout << "requested receive timeout: " << timeout.totalMicroseconds() << std::endl;
	std::cout << "actual receive timeout:    " << timeout1.totalMicroseconds() << std::endl;
	
	// some socket implementations adjust the timeout value
	// assert (ss.getReceiveTimeout() == timeout);
	Stopwatch sw;
	try
	{
		char buffer[256];
		sw.start();
		ss.receiveBytes(buffer, sizeof(buffer));
		fail("nothing to receive - must timeout");
	}
	catch (TimeoutException&)
	{
	}
	assert (sw.elapsed() < 1000000);
	
	timeout0 = ss.getSendTimeout();
	ss.setSendTimeout(timeout);
	timeout1 = ss.getSendTimeout();
	std::cout << "original send timeout:  " << timeout0.totalMicroseconds() << std::endl;
	std::cout << "requested send timeout: " << timeout.totalMicroseconds() << std::endl;
	std::cout << "actual send timeout:    " << timeout1.totalMicroseconds() << std::endl;
	// assert (ss.getSendTimeout() == timeout);
}
コード例 #7
0
ファイル: exec.cpp プロジェクト: CruncherBigData/mesos
  void sendStatusUpdate(const TaskStatus& status)
  {
    if (status.state() == TASK_STAGING) {
      VLOG(1) << "Executor is not allowed to send "
              << "TASK_STAGING status update. Aborting!";

      driver->abort();

      Stopwatch stopwatch;
      if (FLAGS_v >= 1) {
        stopwatch.start();
      }

      executor->error(driver, "Attempted to send TASK_STAGING status update");

      VLOG(1) << "Executor::error took " << stopwatch.elapsed();

      return;
    }

    StatusUpdateMessage message;
    StatusUpdate* update = message.mutable_update();
    update->mutable_framework_id()->MergeFrom(frameworkId);
    update->mutable_executor_id()->MergeFrom(executorId);
    update->mutable_slave_id()->MergeFrom(slaveId);
    update->mutable_status()->MergeFrom(status);
    update->set_timestamp(Clock::now().secs());
    update->set_uuid(UUID::random().toBytes());
    message.set_pid(self());

    VLOG(1) << "Executor sending status update " << *update;

    // Capture the status update.
    updates[UUID::fromBytes(update->uuid())] = *update;

    send(slave, message);
  }
コード例 #8
0
ファイル: TimerTest.cpp プロジェクト: as2120/ZPoco
void TimerTest::testTimer()
{
    Timer t(100, 200);
    assert (t.getStartInterval() == 100);
    assert (t.getPeriodicInterval() == 200);

    Stopwatch sw;
    TimerCallback<TimerTest> tc(*this, &TimerTest::onTimer);
    sw.start();
    t.start(tc);
    _event.wait();
    sw.stop();
    assert (sw.elapsed() >= 80000 && sw.elapsed() < 250000);
    sw.restart();
    _event.wait();
    sw.stop();
    assert (sw.elapsed() >= 180000 && sw.elapsed() < 250000);
    sw.restart();
    _event.wait();
    sw.stop();
    assert (sw.elapsed() >= 180000 && sw.elapsed() < 250000);
    t.stop();
}
コード例 #9
0
void OsmAnd::MapRasterizer_P::rasterize(
    const AreaI area31,
    const std::shared_ptr<const MapPrimitiviser::PrimitivisedObjects>& primitivisedObjects,
    SkCanvas& canvas,
    const bool fillBackground,
    const AreaI* const pDestinationArea,
    MapRasterizer_Metrics::Metric_rasterize* const metric,
    const std::shared_ptr<const IQueryController>& queryController)
{
    const Stopwatch totalStopwatch(metric != nullptr);

    const Context context(
        area31,
        primitivisedObjects,
        pDestinationArea ? *pDestinationArea : AreaI(0, 0, canvas.imageInfo().height(), canvas.imageInfo().width()));

    // Deal with background
    if (fillBackground)
    {
        // Get default background color
        const auto defaultBackgroundColor = context.env->getDefaultBackgroundColor(context.zoom);

        if (pDestinationArea)
        {
            // If destination area is specified, fill only it with background
            SkPaint bgPaint;
            bgPaint.setColor(defaultBackgroundColor.toSkColor());
            bgPaint.setStyle(SkPaint::kFill_Style);
            canvas.drawRectCoords(
                pDestinationArea->top(),
                pDestinationArea->left(),
                pDestinationArea->right(),
                pDestinationArea->bottom(),
                bgPaint);
        }
        else
        {
            // Since destination area is not specified, erase whole canvas with specified color
            canvas.clear(defaultBackgroundColor.toSkColor());
        }
    }

    AreaI destinationArea;
    if (pDestinationArea)
    {
        destinationArea = *pDestinationArea;
    }
    else
    {
        const auto targetSize = canvas.getDeviceSize();
        destinationArea = AreaI(0, 0, targetSize.height(), targetSize.width());
    }

    // Rasterize layers of map:
    rasterizeMapPrimitives(context, canvas, primitivisedObjects->polygons, PrimitivesType::Polygons, queryController);
    if (context.shadowMode != MapPresentationEnvironment::ShadowMode::NoShadow)
        rasterizeMapPrimitives(context, canvas, primitivisedObjects->polylines, PrimitivesType::Polylines_ShadowOnly, queryController);
    rasterizeMapPrimitives(context, canvas, primitivisedObjects->polylines, PrimitivesType::Polylines, queryController);

    if (metric)
        metric->elapsedTime += totalStopwatch.elapsed();
}
コード例 #10
0
ファイル: HTTPLoadTest.cpp プロジェクト: carvalhomb/tsmells
	int main(const std::vector<std::string>& args)
	{
		if (_helpRequested)
		{
			displayHelp();
		}
		else
		{
			URI uri(_uri);
			std::vector<Thread*> threads;

			Stopwatch sw;
			sw.start();
			for (int i = 0; i < _threads; ++i)
			{
				Thread* pt = new Thread(_uri);
				poco_check_ptr(pt);
				threads.push_back(pt);
				HTTPClient* pHTTPClient = new HTTPClient(uri, _repetitions, _cookies, _verbose);
				poco_check_ptr(pHTTPClient);
				threads.back()->start(*pHTTPClient);
			}

			std::vector<Thread*>::iterator it = threads.begin();
			for(; it != threads.end(); ++it)
			{
				(*it)->join();
				delete *it;
			}
			sw.stop();

			HTTPClient::printStats(_uri, HTTPClient::totalAttempts(), HTTPClient::totalSuccessCount(), sw.elapsed());
		}
		
		return Application::EXIT_OK;
	}
コード例 #11
0
ファイル: HTTPLoadTest.cpp プロジェクト: carvalhomb/tsmells
	void run()
	{
		Stopwatch sw;
		std::vector<HTTPCookie> cookies;

		for (int i = 0; i < _repetitions; ++i)
		{
			try
			{
				int usec = 0;
				std::string path(_uri.getPathAndQuery());
				if (path.empty()) path = "/";

				HTTPClientSession session(_uri.getHost(), _uri.getPort());
				HTTPRequest req(HTTPRequest::HTTP_GET, path, HTTPMessage::HTTP_1_1);
				
				if (_cookies)
				{
					NameValueCollection nvc;
					std::vector<HTTPCookie>::iterator it = cookies.begin();
					for(; it != cookies.end(); ++it)
						nvc.add((*it).getName(), (*it).getValue());

					req.setCookies(nvc);
				}

				HTTPResponse res;
				sw.restart();
				session.sendRequest(req);
				std::istream& rs = session.receiveResponse(res);
				NullOutputStream nos;
				StreamCopier::copyStream(rs, nos);
				sw.stop();
				_success += HTTPResponse::HTTP_OK == res.getStatus() ? 1 : 0;
				if (_cookies) res.getCookies(cookies);
				usec = int(sw.elapsed());

				if (_verbose)
				{
					FastMutex::ScopedLock lock(_mutex);
					std::cout 
					<< _uri.toString() << ' ' << res.getStatus() << ' ' << res.getReason() 
					<< ' ' << usec/1000.0 << "ms" << std::endl;
				}

				_usec += usec;
			}
			catch (Exception& exc)
			{
				FastMutex::ScopedLock lock(_mutex);
				std::cerr << exc.displayText() << std::endl;
			}
		}
		
		{
			FastMutex::ScopedLock lock(_mutex);
			_gSuccess += _success;
			_gUsec += _usec;
		}
		if (_verbose)
			printStats(_uri.toString(), _repetitions, _success, _usec);
	}
コード例 #12
0
bool OsmAnd::MapPrimitivesProvider_P::obtainData(
    const TileId tileId,
    const ZoomLevel zoom,
    std::shared_ptr<MapPrimitivesProvider::Data>& outTiledData,
    MapPrimitivesProvider_Metrics::Metric_obtainData* const metric_,
    const IQueryController* const queryController)
{
#if OSMAND_PERFORMANCE_METRICS
    MapPrimitivesProvider_Metrics::Metric_obtainData localMetric;
    const auto metric = metric_ ? metric_ : &localMetric;
#else
    const auto metric = metric_;
#endif

    const Stopwatch totalStopwatch(metric != nullptr);

    std::shared_ptr<TileEntry> tileEntry;

    for (;;)
    {
        // Try to obtain previous instance of tile
        _tileReferences.obtainOrAllocateEntry(tileEntry, tileId, zoom,
            []
            (const TiledEntriesCollection<TileEntry>& collection, const TileId tileId, const ZoomLevel zoom) -> TileEntry*
            {
                return new TileEntry(collection, tileId, zoom);
            });

        // If state is "Undefined", change it to "Loading" and proceed with loading
        if (tileEntry->setStateIf(TileState::Undefined, TileState::Loading))
            break;

        // In case tile entry is being loaded, wait until it will finish loading
        if (tileEntry->getState() == TileState::Loading)
        {
            QReadLocker scopedLcoker(&tileEntry->loadedConditionLock);

            // If tile is in 'Loading' state, wait until it will become 'Loaded'
            while (tileEntry->getState() != TileState::Loaded)
                REPEAT_UNTIL(tileEntry->loadedCondition.wait(&tileEntry->loadedConditionLock));
        }

        if (!tileEntry->dataIsPresent)
        {
            // If there was no data, return same
            outTiledData.reset();
            return true;
        }
        else
        {
            // Otherwise, try to lock tile reference
            outTiledData = tileEntry->dataWeakRef.lock();

            // If successfully locked, just return it
            if (outTiledData)
                return true;

            // Otherwise consider this tile entry as expired, remove it from collection (it's safe to do that right now)
            // This will enable creation of new entry on next loop cycle
            _tileReferences.removeEntry(tileId, zoom);
            tileEntry.reset();
        }
    }

    const Stopwatch totalTimeStopwatch(
#if OSMAND_PERFORMANCE_METRICS
        true
#else
        metric != nullptr
#endif // OSMAND_PERFORMANCE_METRICS
        );

    // Obtain map objects data tile
    std::shared_ptr<IMapObjectsProvider::Data> dataTile;
    std::shared_ptr<Metric> submetric;
    owner->mapObjectsProvider->obtainData(
        tileId,
        zoom,
        dataTile,
        metric ? &submetric : nullptr,
        nullptr);
    if (metric && submetric)
        metric->addOrReplaceSubmetric(submetric);
    if (!dataTile)
    {
        // Store flag that there was no data and mark tile entry as 'Loaded'
        tileEntry->dataIsPresent = false;
        tileEntry->setState(TileState::Loaded);

        // Notify that tile has been loaded
        {
            QWriteLocker scopedLcoker(&tileEntry->loadedConditionLock);
            tileEntry->loadedCondition.wakeAll();
        }

        outTiledData.reset();
        return true;
    }

    // Get primitivised objects
    std::shared_ptr<MapPrimitiviser::PrimitivisedObjects> primitivisedObjects;
    if (owner->mode == MapPrimitivesProvider::Mode::AllObjectsWithoutPolygonFiltering)
    {
        primitivisedObjects = owner->primitiviser->primitiviseAllMapObjects(
            zoom,
            dataTile->mapObjects,
            //NOTE: So far it's safe to turn off this cache. But it has to be rewritten. Since lock/unlock occurs too often, this kills entire performance
            //NOTE: Maybe a QuadTree-based cache with leaf-only locking will save up much. Or use supernodes, like DataBlock
            nullptr, //_primitiviserCache,
            nullptr,
            metric ? metric->findOrAddSubmetricOfType<MapPrimitiviser_Metrics::Metric_primitiviseAllMapObjects>().get() : nullptr);
    }
    else if (owner->mode == MapPrimitivesProvider::Mode::AllObjectsWithPolygonFiltering)
    {
        primitivisedObjects = owner->primitiviser->primitiviseAllMapObjects(
            Utilities::getScaleDivisor31ToPixel(PointI(owner->tileSize, owner->tileSize), zoom),
            zoom,
            dataTile->mapObjects,
            //NOTE: So far it's safe to turn off this cache. But it has to be rewritten. Since lock/unlock occurs too often, this kills entire performance
            //NOTE: Maybe a QuadTree-based cache with leaf-only locking will save up much. Or use supernodes, like DataBlock
            nullptr, //_primitiviserCache,
            nullptr,
            metric ? metric->findOrAddSubmetricOfType<MapPrimitiviser_Metrics::Metric_primitiviseAllMapObjects>().get() : nullptr);
    }
    else if (owner->mode == MapPrimitivesProvider::Mode::WithoutSurface)
    {
        primitivisedObjects = owner->primitiviser->primitiviseWithoutSurface(
            Utilities::getScaleDivisor31ToPixel(PointI(owner->tileSize, owner->tileSize), zoom),
            zoom,
            dataTile->mapObjects,
            //NOTE: So far it's safe to turn off this cache. But it has to be rewritten. Since lock/unlock occurs too often, this kills entire performance
            //NOTE: Maybe a QuadTree-based cache with leaf-only locking will save up much. Or use supernodes, like DataBlock
            nullptr, //_primitiviserCache,
            nullptr,
            metric ? metric->findOrAddSubmetricOfType<MapPrimitiviser_Metrics::Metric_primitiviseWithoutSurface>().get() : nullptr);
    }
    else // if (owner->mode == MapPrimitivesProvider::Mode::WithSurface)
    {
        const auto tileBBox31 = Utilities::tileBoundingBox31(tileId, zoom);
        primitivisedObjects = owner->primitiviser->primitiviseWithSurface(
            tileBBox31,
            PointI(owner->tileSize, owner->tileSize),
            zoom,
            dataTile->tileSurfaceType,
            dataTile->mapObjects,
            //NOTE: So far it's safe to turn off this cache. But it has to be rewritten. Since lock/unlock occurs too often, this kills entire performance
            //NOTE: Maybe a QuadTree-based cache with leaf-only locking will save up much. Or use supernodes, like DataBlock
            nullptr, //_primitiviserCache,
            nullptr,
            metric ? metric->findOrAddSubmetricOfType<MapPrimitiviser_Metrics::Metric_primitiviseWithSurface>().get() : nullptr);
    }

    // Create tile
    const std::shared_ptr<MapPrimitivesProvider::Data> newTiledData(new MapPrimitivesProvider::Data(
        tileId,
        zoom,
        dataTile,
        primitivisedObjects,
        new RetainableCacheMetadata(tileEntry, dataTile->retainableCacheMetadata)));

    // Publish new tile
    outTiledData = newTiledData;

    // Store weak reference to new tile and mark it as 'Loaded'
    tileEntry->dataIsPresent = true;
    tileEntry->dataWeakRef = newTiledData;
    tileEntry->setState(TileState::Loaded);

    // Notify that tile has been loaded
    {
        QWriteLocker scopedLcoker(&tileEntry->loadedConditionLock);
        tileEntry->loadedCondition.wakeAll();
    }

    if (metric)
        metric->elapsedTime = totalStopwatch.elapsed();

#if OSMAND_PERFORMANCE_METRICS
#if OSMAND_PERFORMANCE_METRICS <= 1
    LogPrintf(LogSeverityLevel::Info,
        "%d polygons, %d polylines, %d points primitivised from %dx%d@%d in %fs",
        primitivisedObjects->polygons.size(),
        primitivisedObjects->polylines.size(),
        primitivisedObjects->polygons.size(),
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed());
#else
    LogPrintf(LogSeverityLevel::Info,
        "%d polygons, %d polylines, %d points primitivised from %dx%d@%d in %fs:\n%s",
        primitivisedObjects->polygons.size(),
        primitivisedObjects->polylines.size(),
        primitivisedObjects->polygons.size(),
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed(),
        qPrintable(metric ? metric->toString(QLatin1String("\t - ")) : QLatin1String("(null)")));
#endif // OSMAND_PERFORMANCE_METRICS <= 1
#endif // OSMAND_PERFORMANCE_METRICS
    
    return true;
}
コード例 #13
0
int main(int argc, char ** argv)
try
{
    using namespace DB;

    /** -3 - use reference implementation of LZ4
      * -2 - run all algorithms in round robin fashion
      * -1 - automatically detect best algorithm based on statistics
      * 0..3 - run specified algorithm
      */
    ssize_t variant = argc < 2 ? -1 : parse<ssize_t>(argv[1]);

    MMapReadBufferFromFileDescriptor in(STDIN_FILENO, 0);
//    ReadBufferFromFileDescriptor in(STDIN_FILENO);
    FasterCompressedReadBuffer decompressing_in(in, variant);
//    WriteBufferFromFileDescriptor out(STDOUT_FILENO);
//    HashingWriteBuffer hashing_out(out);

    Stopwatch watch;
//    copyData(decompressing_in, /*hashing_*/out);
    while (!decompressing_in.eof())
    {
        decompressing_in.position() = decompressing_in.buffer().end();
        decompressing_in.next();
    }
    watch.stop();

    std::cout << std::fixed << std::setprecision(3)
        << watch.elapsed() * 1000 / decompressing_in.count()
        << '\n';

/*
//    auto hash = hashing_out.getHash();

    double seconds = watch.elapsedSeconds();
    std::cerr << std::fixed << std::setprecision(3)
        << "Elapsed: " << seconds
        << ", " << formatReadableSizeWithBinarySuffix(in.count()) << " compressed"
        << ", " << formatReadableSizeWithBinarySuffix(decompressing_in.count()) << " decompressed"
        << ", ratio: " << static_cast<double>(decompressing_in.count()) / in.count()
        << ", " << formatReadableSizeWithBinarySuffix(in.count() / seconds) << "/sec. compressed"
        << ", " << formatReadableSizeWithBinarySuffix(decompressing_in.count() / seconds) << "/sec. decompressed"
//        << ", checksum: " << hash.first << "_" << hash.second
        << "\n";

//    decompressing_in.getStatistics().print();

    LZ4::PerformanceStatistics perf_stat = decompressing_in.getPerformanceStatistics();

    std::optional<size_t> best_variant;
    double best_variant_mean = 0;

    for (size_t i = 0; i < LZ4::PerformanceStatistics::NUM_ELEMENTS; ++i)
    {
        const LZ4::PerformanceStatistics::Element & elem = perf_stat.data[i];

        if (elem.count)
        {
            double mean = elem.mean();

            std::cerr << "Variant " << i << ": "
                << "count: " << elem.count
                << ", mean ns/b: " << 1000000000.0 * mean << " (" << formatReadableSizeWithBinarySuffix(1 / mean) << "/sec.)"
                << ", sigma ns/b: " << 1000000000.0 * elem.sigma()
                << "\n";

            if (!best_variant || mean < best_variant_mean)
            {
                best_variant_mean = mean;
                best_variant = i;
            }
        }
    }

    if (best_variant)
        std::cerr << "Best variant: " << *best_variant << "\n";
*/

    return 0;
}
catch (...)
{
    std::cerr << DB::getCurrentExceptionMessage(true);
    return DB::getCurrentExceptionCode();
}
コード例 #14
0
ファイル: bogowinplayer.cpp プロジェクト: domino14/quackle
MoveList SmartBogowin::moves(int nmoves)
{
	Stopwatch stopwatch;
	
	if (currentPosition().bag().empty())
    {
        signalFractionDone(0);
		EndgamePlayer endgame;
		endgame.setPosition(currentPosition());
		return endgame.moves(nmoves);
	}

    // TODO
    // Move this all to an Inferrer class
    //
    // Generate all moves for all racks opp could have had.
    // This can be done efficiently by making a big "rack" out of the bag (with the computer
    // player's own rack removed) and generating moves from that big "rack" with some smarts
    // added to the move generator to not create moves that have so many tiles not in the
    // opp's play that they wouldn't have been possible from any of the racks we're looking
    // for. Make a ProbableRackList of racks from which the opp's play is best or nearly
    // (really what we're looking for is least bad). Most heavily weight the leaves for which
    // the play is as close to optimal as possible (an x-point static mistake), and let the
    // weights taper off to zero as the mistakes approach say x+7.
    //
    // Make the Simulator able to select racks randomly from the ProbableRackList
    if (m_parameters.inferring) {
        int numPlayers = currentPosition().players().size();
        UVcout << "numPlayers: " << numPlayers << endl;
        if (numPlayers == 2) {
            bool hasPreviousPosition;
            GamePosition previous = m_simulator.history().previousPosition(&hasPreviousPosition);
            if (hasPreviousPosition) {
                UVcout << "previous position:" << endl;
                UVcout << previous << endl;
            } else {
                UVcout << "no previous position" << endl;
            }
        }
    }

	UVcout << "SmartBogowin generating move from position:" << endl;
	UVcout << currentPosition() << endl;

	const int zerothPrune = 33;
	int plies = 2;
	
	if (currentPosition().bag().size() <= QUACKLE_PARAMETERS->rackSize() * 2)
		plies = -1;

	const int initialCandidates = m_additionalInitialCandidates + nmoves;
	
	currentPosition().kibitz(initialCandidates);
	
	m_simulator.setIncludedMoves(m_simulator.currentPosition().moves());
	m_simulator.pruneTo(zerothPrune, initialCandidates);
	m_simulator.makeSureConsideredMovesAreIncluded();
	m_simulator.setIgnoreOppos(false);
	
	MoveList staticMoves = m_simulator.moves(/* prune */ true, /* sort by equity */ false);
	m_simulator.moveConsideredMovesToBeginning(staticMoves);
	
	//UVcout << "Bogo static moves: " << staticMoves << endl;
	//UVcout << "Bogo considered moves: " << m_simulator.consideredMoves() << endl;
	
	MoveList firstMove;
	MoveList simmedMoves;

	MoveList::const_iterator it = staticMoves.begin();
	
	firstMove.push_back(*it);

	signalFractionDone(0);

	m_simulator.setIncludedMoves(firstMove);
	m_simulator.simulate(plies, minIterations());
	
	Move best = *m_simulator.moves(/* prune */ true, /* sort by win */ true).begin();
	simmedMoves.push_back(best);
	
	double bestbp = bogopoints(best);
	
	//UVcout << "firstMove: " << best << endl;

	for (++it; it != staticMoves.end(); ++it)
	{
		signalFractionDone(max(static_cast<float>(simmedMoves.size()) / static_cast<float>(staticMoves.size()), static_cast<float>(stopwatch.elapsed()) / static_cast<float>(m_parameters.secondsPerTurn)));

		if (shouldAbort())
			goto sort_and_return;

		//UVcout << "best move: " << best << " with " << bestbp  << " bogopoints." << endl;
		MoveList lookFurther;
		lookFurther.push_back(*it);
		m_simulator.setIncludedMoves(lookFurther);
		m_simulator.simulate(plies, minIterations());
		Move move = *m_simulator.moves(/* prune */ true, /* sort by win */ true).begin();
		double movebp = bogopoints(move);
		//UVcout << "we just simmed " << move << "; bogopoints: " << movebp << endl;
	
		if (movebp + 1.96 * 35.0 / sqrt((double)minIterations()) > bestbp)
		{
			m_simulator.simulate(plies, maxIterations() - minIterations());
			Move move2 = *m_simulator.moves(true, true).begin();
			movebp = bogopoints(move2);
			//UVcout << "sim it some more: " << move2 << " bogopoints: " << movebp << endl;
			simmedMoves.push_back(move2);
	
			if (move2.win > best.win) 
			{
				best = move2;
				bestbp = movebp;
			}
		}
		else
		{
			simmedMoves.push_back(move);
		}
		
		if (stopwatch.exceeded(m_parameters.secondsPerTurn))
		{
			//UVcout << "Bogowinplayer stopwatch exceeded its limit " << m_parameters.secondsPerTurn << ". Returning early." << endl;
			goto sort_and_return;
		}
	}	

	//UVcout << "We had extra time! whoopee!" << endl;

	sort_and_return:
	MoveList::sort(simmedMoves, MoveList::Win);
	MoveList ret;
	MoveList::const_iterator simmedEnd = simmedMoves.end();
	int i = 0;
	for (MoveList::const_iterator simmedIt = simmedMoves.begin();
	     (simmedIt != simmedEnd); ++i, ++simmedIt)
	{
		if (i < nmoves || m_simulator.isConsideredMove(*simmedIt))
			ret.push_back(*simmedIt);
	}

	//UVcout << "bogo returning moves:\n" << ret << endl;
	return ret;
}
コード例 #15
0
void OsmAnd::ObfMapSectionReader_P::readMapObject(
    const ObfReader_P& reader,
    const std::shared_ptr<const ObfMapSectionInfo>& section,
    uint64_t baseId,
    const std::shared_ptr<const ObfMapSectionLevelTreeNode>& treeNode,
    std::shared_ptr<OsmAnd::BinaryMapObject>& mapObject,
    const AreaI* bbox31,
    ObfMapSectionReader_Metrics::Metric_loadMapObjects* const metric)
{
    const auto cis = reader.getCodedInputStream().get();
    const auto baseOffset = cis->CurrentPosition();

    for (;;)
    {
        const auto tag = cis->ReadTag();
        const auto tgn = gpb::internal::WireFormatLite::GetTagFieldNumber(tag);
        switch (tgn)
        {
            case 0:
            {
                if (!ObfReaderUtilities::reachedDataEnd(cis))
                    return;

                if (mapObject && mapObject->points31.isEmpty())
                {
                    LogPrintf(LogSeverityLevel::Warning,
                        "Empty BinaryMapObject %s detected in section '%s'",
                        qPrintable(mapObject->id.toString()),
                        qPrintable(section->name));
                    mapObject.reset();
                }

                return;
            }
            case OBF::MapData::kAreaCoordinatesFieldNumber:
            case OBF::MapData::kCoordinatesFieldNumber:
            {
                const Stopwatch mapObjectPointsStopwatch(metric != nullptr);

                gpb::uint32 length;
                cis->ReadVarint32(&length);
                const auto oldLimit = cis->PushLimit(length);

                PointI p;
                p.x = treeNode->area31.left() & MaskToRead;
                p.y = treeNode->area31.top() & MaskToRead;

                AreaI objectBBox;
                objectBBox.top() = objectBBox.left() = std::numeric_limits<int32_t>::max();
                objectBBox.bottom() = objectBBox.right() = 0;
                auto lastUnprocessedVertexForBBox = 0;

                // In protobuf, a sint32 can be encoded using [1..4] bytes,
                // so try to guess size of array, and preallocate it.
                // (BytesUntilLimit/2) is ~= number of vertices, and is always larger than needed.
                // So it's impossible that a buffer overflow will ever happen. But assert on that.
                const auto probableVerticesCount = (cis->BytesUntilLimit() / 2);
                QVector< PointI > points31(probableVerticesCount);

                auto pPoint = points31.data();
                auto verticesCount = 0;
                bool shouldNotSkip = (bbox31 == nullptr);
                while (cis->BytesUntilLimit() > 0)
                {
                    PointI d;
                    d.x = (ObfReaderUtilities::readSInt32(cis) << ShiftCoordinates);
                    d.y = (ObfReaderUtilities::readSInt32(cis) << ShiftCoordinates);

                    p += d;

                    // Save point into storage
                    assert(points31.size() > verticesCount);
                    *(pPoint++) = p;
                    verticesCount++;

                    // Check if map object should be maintained
                    if (!shouldNotSkip && bbox31)
                    {
                        const Stopwatch mapObjectBboxStopwatch(metric != nullptr);

                        shouldNotSkip = bbox31->contains(p);
                        objectBBox.enlargeToInclude(p);

                        if (metric)
                            metric->elapsedTimeForMapObjectsBbox += mapObjectBboxStopwatch.elapsed();

                        lastUnprocessedVertexForBBox = verticesCount;
                    }
                }

                cis->PopLimit(oldLimit);

                // Since reserved space may be larger than actual amount of data,
                // shrink the vertices array
                points31.resize(verticesCount);

                // If map object has no vertices, retain it in a special way to report later, when
                // it's identifier will be known
                if (points31.isEmpty())
                {
                    // Fake that this object is inside bbox
                    shouldNotSkip = true;
                    objectBBox = treeNode->area31;
                }

                // Even if no vertex lays inside bbox, an edge
                // may intersect the bbox
                if (!shouldNotSkip && bbox31)
                {
                    assert(lastUnprocessedVertexForBBox == points31.size());

                    shouldNotSkip =
                        objectBBox.contains(*bbox31) ||
                        bbox31->intersects(objectBBox);
                }

                // If map object didn't fit, skip it's entire content
                if (!shouldNotSkip)
                {
                    if (metric)
                    {
                        metric->elapsedTimeForSkippedMapObjectsPoints += mapObjectPointsStopwatch.elapsed();
                        metric->skippedMapObjectsPoints += points31.size();
                    }

                    cis->Skip(cis->BytesUntilLimit());
                    break;
                }

                // Update metric
                if (metric)
                {
                    metric->elapsedTimeForNotSkippedMapObjectsPoints += mapObjectPointsStopwatch.elapsed();
                    metric->notSkippedMapObjectsPoints += points31.size();
                }

                // In case bbox is not fully calculated, complete this task
                auto pPointForBBox = points31.data() + lastUnprocessedVertexForBBox;
                while (lastUnprocessedVertexForBBox < points31.size())
                {
                    const Stopwatch mapObjectBboxStopwatch(metric != nullptr);

                    objectBBox.enlargeToInclude(*pPointForBBox);

                    if (metric)
                        metric->elapsedTimeForMapObjectsBbox += mapObjectBboxStopwatch.elapsed();

                    lastUnprocessedVertexForBBox++;
                    pPointForBBox++;
                }

                // Finally, create the object
                if (!mapObject)
                    mapObject.reset(new OsmAnd::BinaryMapObject(section, treeNode->level));
                mapObject->isArea = (tgn == OBF::MapData::kAreaCoordinatesFieldNumber);
                mapObject->points31 = qMove(points31);
                mapObject->bbox31 = objectBBox;
                assert(treeNode->area31.top() - mapObject->bbox31.top() <= 32);
                assert(treeNode->area31.left() - mapObject->bbox31.left() <= 32);
                assert(mapObject->bbox31.bottom() - treeNode->area31.bottom() <= 1);
                assert(mapObject->bbox31.right() - treeNode->area31.right() <= 1);
                assert(mapObject->bbox31.right() >= mapObject->bbox31.left());
                assert(mapObject->bbox31.bottom() >= mapObject->bbox31.top());

                break;
            }
            case OBF::MapData::kPolygonInnerCoordinatesFieldNumber:
            {
                if (!mapObject)
                    mapObject.reset(new OsmAnd::BinaryMapObject(section, treeNode->level));

                gpb::uint32 length;
                cis->ReadVarint32(&length);
                auto oldLimit = cis->PushLimit(length);

                PointI p;
                p.x = treeNode->area31.left() & MaskToRead;
                p.y = treeNode->area31.top() & MaskToRead;

                // Preallocate memory
                const auto probableVerticesCount = (cis->BytesUntilLimit() / 2);
                mapObject->innerPolygonsPoints31.push_back(qMove(QVector< PointI >(probableVerticesCount)));
                auto& polygon = mapObject->innerPolygonsPoints31.last();

                auto pPoint = polygon.data();
                auto verticesCount = 0;
                while (cis->BytesUntilLimit() > 0)
                {
                    PointI d;
                    d.x = (ObfReaderUtilities::readSInt32(cis) << ShiftCoordinates);
                    d.y = (ObfReaderUtilities::readSInt32(cis) << ShiftCoordinates);

                    p += d;

                    // Save point into storage
                    assert(polygon.size() > verticesCount);
                    *(pPoint++) = p;
                    verticesCount++;
                }

                // Shrink memory
                polygon.resize(verticesCount);

                cis->PopLimit(oldLimit);

                break;
            }
            case OBF::MapData::kAdditionalTypesFieldNumber:
            case OBF::MapData::kTypesFieldNumber:
            {
                if (!mapObject)
                    mapObject.reset(new OsmAnd::BinaryMapObject(section, treeNode->level));

                auto& attributeIds = (tgn == OBF::MapData::kAdditionalTypesFieldNumber)
                    ? mapObject->additionalAttributeIds
                    : mapObject->attributeIds;

                gpb::uint32 length;
                cis->ReadVarint32(&length);
                auto oldLimit = cis->PushLimit(length);

                // Preallocate space
                attributeIds.reserve(cis->BytesUntilLimit());

                while (cis->BytesUntilLimit() > 0)
                {
                    gpb::uint32 attributeId;
                    cis->ReadVarint32(&attributeId);

                    attributeIds.push_back(attributeId);
                }

                // Shrink preallocated space
                attributeIds.squeeze();

                cis->PopLimit(oldLimit);

                break;
            }
            case OBF::MapData::kStringNamesFieldNumber:
            {
                gpb::uint32 length;
                cis->ReadVarint32(&length);
                auto oldLimit = cis->PushLimit(length);

                while (cis->BytesUntilLimit() > 0)
                {
                    bool ok;

                    gpb::uint32 stringRuleId;
                    ok = cis->ReadVarint32(&stringRuleId);
                    assert(ok);
                    gpb::uint32 stringId;
                    ok = cis->ReadVarint32(&stringId);
                    assert(ok);

                    mapObject->captions.insert(stringRuleId, qMove(ObfReaderUtilities::encodeIntegerToString(stringId)));
                    mapObject->captionsOrder.push_back(stringRuleId);
                }

                cis->PopLimit(oldLimit);

                break;
            }
            case OBF::MapData::kIdFieldNumber:
            {
                const auto d = ObfReaderUtilities::readSInt64(cis);
                const auto rawId = static_cast<uint64_t>(d + baseId);
                mapObject->id = ObfObjectId::generateUniqueId(rawId, baseOffset, section);

                //////////////////////////////////////////////////////////////////////////
                //if (mapObject->id.getOsmId() == 49048972u)
                //{
                //    int i = 5;
                //}
                //////////////////////////////////////////////////////////////////////////

                break;
            }
            default:
                ObfReaderUtilities::skipUnknownField(cis, tag);
                break;
        }
    }
}
コード例 #16
0
ファイル: leveldb.cpp プロジェクト: EronWright/mesos
Try<Storage::State> LevelDBStorage::restore(const string& path)
{
  leveldb::Options options;
  options.create_if_missing = true;

  // TODO(benh): Can't use varint comparator until bug discussed at
  // groups.google.com/group/leveldb/browse_thread/thread/17eac39168909ba7
  // gets fixed. For now, we are using the default byte-wise
  // comparator and *assuming* that the encoding from unsigned long to
  // string produces a stable ordering. Checks below.
  // options.comparator = &comparator;

  const string& one = encode(1);
  const string& two = encode(2);
  const string& ten = encode(10);

  CHECK(leveldb::BytewiseComparator()->Compare(one, two) < 0);
  CHECK(leveldb::BytewiseComparator()->Compare(two, one) > 0);
  CHECK(leveldb::BytewiseComparator()->Compare(one, ten) < 0);
  CHECK(leveldb::BytewiseComparator()->Compare(ten, two) > 0);
  CHECK(leveldb::BytewiseComparator()->Compare(ten, ten) == 0);

  Stopwatch stopwatch;
  stopwatch.start();

  leveldb::Status status = leveldb::DB::Open(options, path, &db);

  if (!status.ok()) {
    // TODO(benh): Consider trying to repair the DB.
    return Error(status.ToString());
  }

  LOG(INFO) << "Opened db in " << stopwatch.elapsed();

  stopwatch.start(); // Restart the stopwatch.

  // TODO(benh): Conditionally compact to avoid long recovery times?
  db->CompactRange(nullptr, nullptr);

  LOG(INFO) << "Compacted db in " << stopwatch.elapsed();

  State state;
  state.begin = 0;
  state.end = 0;

  // TODO(benh): Consider just reading the "promise" record (e.g.,
  // 'encode(0, false)') and then iterating over the rest of the
  // records and confirming that they are all indeed of type
  // Record::Action.

  stopwatch.start(); // Restart the stopwatch.

  leveldb::Iterator* iterator = db->NewIterator(leveldb::ReadOptions());

  LOG(INFO) << "Created db iterator in " << stopwatch.elapsed();

  stopwatch.start(); // Restart the stopwatch.

  iterator->SeekToFirst();

  LOG(INFO) << "Seeked to beginning of db in " << stopwatch.elapsed();

  stopwatch.start(); // Restart the stopwatch.

  uint64_t keys = 0;

  while (iterator->Valid()) {
    keys++;
    const leveldb::Slice& slice = iterator->value();

    google::protobuf::io::ArrayInputStream stream(slice.data(), slice.size());

    Record record;

    if (!record.ParseFromZeroCopyStream(&stream)) {
      return Error("Failed to deserialize record");
    }

    switch (record.type()) {
      case Record::METADATA: {
        CHECK(record.has_metadata());
        state.metadata.CopyFrom(record.metadata());
        break;
      }

      // DEPRECATED!
      case Record::PROMISE: {
        CHECK(record.has_promise());
        // This replica is in old format. Set its status to VOTING
        // since there is no catch-up logic in the old code and this
        // replica is obviously not empty.
        state.metadata.set_status(Metadata::VOTING);
        state.metadata.set_promised(record.promise().proposal());
        break;
      }

      case Record::ACTION: {
        CHECK(record.has_action());
        const Action& action = record.action();
        if (action.has_learned() && action.learned()) {
          state.learned.insert(action.position());
          state.unlearned.erase(action.position());
          if (action.has_type() && action.type() == Action::TRUNCATE) {
            state.begin = std::max(state.begin, action.truncate().to());
          }
        } else {
          state.learned.erase(action.position());
          state.unlearned.insert(action.position());
        }
        state.end = std::max(state.end, action.position());

        // Cache the first position in this replica so during a
        // truncation, we can attempt to delete all positions from the
        // first position up to the truncate position. Note that this
        // is not the beginning position of the log, but rather the
        // first position that remains (i.e., hasn't been deleted) in
        // leveldb.
        first = min(first, action.position());
        break;
      }

      default: {
        return Error("Bad record");
      }
    }

    iterator->Next();
  }

  LOG(INFO) << "Iterated through " << keys
            << " keys in the db in " << stopwatch.elapsed();

  delete iterator;

  return state;
}
コード例 #17
0
bool OsmAnd::BinaryMapDataProvider_P::obtainData(
    const TileId tileId,
    const ZoomLevel zoom,
    std::shared_ptr<MapTiledData>& outTiledData,
    BinaryMapDataProvider_Metrics::Metric_obtainData* const metric_,
    const IQueryController* const queryController)
{
#if OSMAND_PERFORMANCE_METRICS
    BinaryMapDataProvider_Metrics::Metric_obtainData localMetric;
    const auto metric = metric_ ? metric_ : &localMetric;
#else
    const auto metric = metric_;
#endif

    std::shared_ptr<TileEntry> tileEntry;

    for (;;)
    {
        // Try to obtain previous instance of tile
        _tileReferences.obtainOrAllocateEntry(tileEntry, tileId, zoom,
            []
            (const TiledEntriesCollection<TileEntry>& collection, const TileId tileId, const ZoomLevel zoom) -> TileEntry*
            {
                return new TileEntry(collection, tileId, zoom);
            });

        // If state is "Undefined", change it to "Loading" and proceed with loading
        if (tileEntry->setStateIf(TileState::Undefined, TileState::Loading))
            break;

        // In case tile entry is being loaded, wait until it will finish loading
        if (tileEntry->getState() == TileState::Loading)
        {
            QReadLocker scopedLcoker(&tileEntry->_loadedConditionLock);

            // If tile is in 'Loading' state, wait until it will become 'Loaded'
            while (tileEntry->getState() != TileState::Loaded)
                REPEAT_UNTIL(tileEntry->_loadedCondition.wait(&tileEntry->_loadedConditionLock));
        }

        // Try to lock tile reference
        outTiledData = tileEntry->_tile.lock();

        // If successfully locked, just return it
        if (outTiledData)
            return true;

        // Otherwise consider this tile entry as expired, remove it from collection (it's safe to do that right now)
        // This will enable creation of new entry on next loop cycle
        _tileReferences.removeEntry(tileId, zoom);
        tileEntry.reset();
    }

    const Stopwatch totalTimeStopwatch(
#if OSMAND_PERFORMANCE_METRICS
        true
#else
        metric != nullptr
#endif // OSMAND_PERFORMANCE_METRICS
        );

    // Obtain OBF data interface
    const Stopwatch obtainObfInterfaceStopwatch(metric != nullptr);
    const auto& dataInterface = owner->obfsCollection->obtainDataInterface();
    if (metric)
        metric->elapsedTimeForObtainingObfInterface += obtainObfInterfaceStopwatch.elapsed();

    // Get bounding box that covers this tile
    const auto tileBBox31 = Utilities::tileBoundingBox31(tileId, zoom);

    // Perform read-out
    const Stopwatch totalReadTimeStopwatch(metric != nullptr);
    QList< std::shared_ptr< const ObfMapSectionReader::DataBlock > > referencedMapDataBlocks;
    QList< std::shared_ptr<const Model::BinaryMapObject> > referencedMapObjects;
    QList< proper::shared_future< std::shared_ptr<const Model::BinaryMapObject> > > futureReferencedMapObjects;
    QList< std::shared_ptr<const Model::BinaryMapObject> > loadedMapObjects;
    QSet< uint64_t > loadedSharedMapObjects;
    auto tileFoundation = MapFoundationType::Undefined;
    dataInterface->loadMapObjects(
        &loadedMapObjects,
        &tileFoundation,
        zoom,
        &tileBBox31,
        [this, zoom, &referencedMapObjects, &futureReferencedMapObjects, &loadedSharedMapObjects, tileBBox31, metric]
        (const std::shared_ptr<const ObfMapSectionInfo>& section, const uint64_t id, const AreaI& bbox, const ZoomLevel firstZoomLevel, const ZoomLevel lastZoomLevel) -> bool
        {
            const Stopwatch objectsFilteringStopwatch(metric != nullptr);

            // This map object may be shared only in case it crosses bounds of a tile
            const auto canNotBeShared = tileBBox31.contains(bbox);

            // If map object can not be shared, just read it
            if (canNotBeShared)
            {
                if (metric)
                    metric->elapsedTimeForObjectsFiltering += objectsFilteringStopwatch.elapsed();

                return true;
            }

            // Otherwise, this map object can be shared, so it should be checked for
            // being present in shared mapObjects storage, or be reserved there
            std::shared_ptr<const Model::BinaryMapObject> sharedMapObjectReference;
            proper::shared_future< std::shared_ptr<const Model::BinaryMapObject> > futureSharedMapObjectReference;
            if (_sharedMapObjects.obtainReferenceOrFutureReferenceOrMakePromise(id, zoom, Utilities::enumerateZoomLevels(firstZoomLevel, lastZoomLevel), sharedMapObjectReference, futureSharedMapObjectReference))
            {
                if (sharedMapObjectReference)
                {
                    // If map object is already in shared objects cache and is available, use that one
                    referencedMapObjects.push_back(qMove(sharedMapObjectReference));
                }
                else
                {
                    futureReferencedMapObjects.push_back(qMove(futureSharedMapObjectReference));
                }

                if (metric)
                    metric->elapsedTimeForObjectsFiltering += objectsFilteringStopwatch.elapsed();

                return false;
            }

            // This map object was reserved, and is going to be shared, but needs to be loaded
            loadedSharedMapObjects.insert(id);
            return true;
        },
        _dataBlocksCache.get(),
        &referencedMapDataBlocks,
        nullptr,// query controller
        metric ? &metric->loadMapObjectsMetric : nullptr);

    // Process loaded-and-shared map objects
    for (auto& mapObject : loadedMapObjects)
    {
        // Check if this map object is shared
        if (!loadedSharedMapObjects.contains(mapObject->id))
            continue;

        // Add unique map object under lock to all zoom levels, for which this map object is valid
        assert(mapObject->level);
        _sharedMapObjects.fulfilPromiseAndReference(
            mapObject->id,
            Utilities::enumerateZoomLevels(mapObject->level->minZoom, mapObject->level->maxZoom),
            mapObject);
    }

    for (auto& futureMapObject : futureReferencedMapObjects)
    {
        auto mapObject = futureMapObject.get();

        referencedMapObjects.push_back(qMove(mapObject));
    }

    if (metric)
        metric->elapsedTimeForRead += totalReadTimeStopwatch.elapsed();

    // Prepare data for the tile
    const auto sharedMapObjectsCount = referencedMapObjects.size() + loadedSharedMapObjects.size();
    const auto allMapObjects = loadedMapObjects + referencedMapObjects;

    // Create tile
    const std::shared_ptr<BinaryMapDataTile> newTile(new BinaryMapDataTile(
        _dataBlocksCache,
        referencedMapDataBlocks,
        tileFoundation,
        allMapObjects,
        tileId,
        zoom));
    newTile->_p->_weakLink = _link->getWeak();
    newTile->_p->_refEntry = tileEntry;

    // Publish new tile
    outTiledData = newTile;

    // Store weak reference to new tile and mark it as 'Loaded'
    tileEntry->_tile = newTile;
    tileEntry->setState(TileState::Loaded);

    // Notify that tile has been loaded
    {
        QWriteLocker scopedLcoker(&tileEntry->_loadedConditionLock);
        tileEntry->_loadedCondition.wakeAll();
    }

    if (metric)
    {
        metric->elapsedTime += totalTimeStopwatch.elapsed();
        metric->objectsCount += allMapObjects.size();
        metric->uniqueObjectsCount += allMapObjects.size() - sharedMapObjectsCount;
        metric->sharedObjectsCount += sharedMapObjectsCount;
    }

#if OSMAND_PERFORMANCE_METRICS
#if OSMAND_PERFORMANCE_METRICS <= 1
    LogPrintf(LogSeverityLevel::Info,
        "%d map objects (%d unique, %d shared) read from %dx%d@%d in %fs",
        allMapObjects.size(),
        allMapObjects.size() - sharedMapObjectsCount,
        sharedMapObjectsCount,
        tileId.x,
        tileId.y,
        zoom,
        totalTimeStopwatch.elapsed());
#else
    LogPrintf(LogSeverityLevel::Info,
        "%d map objects (%d unique, %d shared) read from %dx%d@%d in %fs:\n%s",
        allMapObjects.size(),
        allMapObjects.size() - sharedMapObjectsCount,
        sharedMapObjectsCount,
        tileId.x,
        tileId.y,
        zoom,
        totalTimeStopwatch.elapsed(),
        qPrintable(metric ? metric->toString(QLatin1String("\t - ")) : QLatin1String("(null)")));
#endif // OSMAND_PERFORMANCE_METRICS <= 1
#endif // OSMAND_PERFORMANCE_METRICS

    return true;
}
コード例 #18
0
ファイル: DynamicAny.cpp プロジェクト: aleks-f/articles
void doPerformance(int count)
{
	std::cout << "Loop count: " << count << std::endl;

	std::cout << "Static cast Int32 to double:" << std::endl;
	std::cout << "--------------------------" << std::endl;
	fos << "Static cast Int32 to double" << std::endl;

	Int32 i = 0;
	double d;
	Stopwatch sw; sw.start();
	do { staticCastInt32ToDouble(d, i); }
	while (++i < count); sw.stop();
	print("static_cast<double>(Int32)", sw.elapsed());

	Any a = 1.0;
	i = 0; sw.start();
	do { unsafeAnyCastAnyToDouble(d, a); }
	while (++i < count); sw.stop();
	print("UnsafeAnyCast<double>(Int32)", sw.elapsed());

	std::cout << std::endl
		 << "Conversion Int32 to double:" << std::endl;
	std::cout << "--------------------------" << std::endl;
	fos << "Conversion Int32 to double" << std::endl;

	i = 0; sw.start();
	do { lexicalCastInt32ToDouble(d, i); }
	while (++i < count); sw.stop();
	print("boost::lexical_cast<double>(Int32)", sw.elapsed());

	DynamicAny da = 1;
	i = 0; sw.restart();
	do { convertInt32ToDouble(d, da); }
	while (++i < count); sw.stop();
	print("DynamicAny<Int32>::convert<double>()", sw.elapsed());

	i = 0; sw.restart();
	do { assignInt32ToDouble(d, da); }
	while (++i < count); sw.stop();
	print("operator=(double, DynamicAny<Int32>)", sw.elapsed());


	std::cout << std::endl
		 << "Conversion signed Int32 to UInt16:" << std::endl;
	std::cout << "--------------------------" << std::endl;
	fos << "Conversion signed Int32 to UInt16" << std::endl;

	UInt16 us = 0; Int32 j = 1; i = 0; sw.start();
	do { lexicalCastInt32toUInt16(us, j); }
	while (++i < count); sw.stop();
	print("boost::lexical_cast<UInt16>(Int32)", sw.elapsed());

	i = 0; sw.restart();
	do { convertInt32toUInt16(us, da); }
	while (++i < count); sw.stop();
	print("DynamicAny<Int32>::convert<UInt16>()", sw.elapsed());

	i = 0; sw.restart();
	do { assignInt32toUInt16(us, da); }
	while (++i < count); sw.stop();
	print("operator=(UInt16, DynamicAny<Int32>)", sw.elapsed());

	std::cout << std::endl
		<< "Conversion string to double:" << std::endl;
	std::cout << "-----------" << std::endl;
	fos << "Conversion string to double" << std::endl;

	std::string s = "1234.5";
	i = 0;
	sw.start();
	do { lexicalCastStringToDouble(d, s); }
	while (++i < count); sw.stop();
	print("boost::lexical_cast<double>(string)", sw.elapsed());

	DynamicAny ds = "1234.5";
	i = 0; sw.restart();
	do { convertStringToDouble(d, ds); }
	while (++i < count); sw.stop();
	print("DynamicAny<string>::convert<double>()", sw.elapsed());

	i = 0; sw.restart();
	do { assignStringToDouble(d, ds); }
	while (++i < count); sw.stop();
	print("operator=(double, DynamicAny<string>)", sw.elapsed());

	std::cout << std::endl
		<< "Extraction double:" << std::endl;
	std::cout << "-----------" << std::endl;
	fos << "Extraction double" << std::endl;

	a = 1.0;
	i = 0; sw.restart();
	do { anyCastRefDouble(d, a); }
	while (++i < count); sw.stop();
	print("RefAnyCast<double>(Any&)", sw.elapsed());

	i = 0; sw.restart();
	do { anyCastPtrDouble(d, a); }
	while (++i < count); sw.stop();
	print("AnyCast<double>(Any*)", sw.elapsed());

	da = 1.0; i = 0;
	sw.restart();
	do { extractDouble(d, da); }
	while (++i < count); sw.stop();
	print("DynamicAny::extract<double>()", sw.elapsed());


	std::cout << std::endl
		<< "Extraction string:" << std::endl;
	std::cout << "-----------" << std::endl;
	fos << "Extraction string" << std::endl;

	Any as = std::string("1234.5");
	i = 0; sw.restart();
	do { anyCastRefString(s, as); }
	while (++i < count); sw.stop();
	print("RefAnyCast<std::string>(Any&)", sw.elapsed());

	i = 0; sw.restart();
	do { anyCastPtrString(s, as); }
	while (++i < count); sw.stop();
	print("AnyCast<std::string>(Any*)", sw.elapsed());

	ds = "1234.5"; i = 0;
	sw.restart();
	do { extractString(s, ds); }
	while (++i < count); sw.stop();
	print("DynamicAny::extract<std::string>()", sw.elapsed());

	fos.close();
}
コード例 #19
0
void PollSetTest::testPoll()
{
	EchoServer echoServer1;
	EchoServer echoServer2;
	StreamSocket ss1;
	StreamSocket ss2;

	ss1.connect(SocketAddress("127.0.0.1", echoServer1.port()));
	ss2.connect(SocketAddress("127.0.0.1", echoServer2.port()));

	PollSet ps;
	ps.add(ss1, PollSet::POLL_READ);

	// nothing readable
	Stopwatch sw;
	sw.start();
	Timespan timeout(1000000);
	assert (ps.poll(timeout).empty());
	assert (sw.elapsed() >= 900000);
	sw.restart();

	ps.add(ss2, PollSet::POLL_READ);

	// ss1 must be writable, if polled for
	ps.update(ss1, PollSet::POLL_READ | PollSet::POLL_WRITE);
	PollSet::SocketModeMap sm = ps.poll(timeout);
	assert (sm.find(ss1) != sm.end());
	assert (sm.find(ss2) == sm.end());
	assert (sm.find(ss1)->second == PollSet::POLL_WRITE);
	assert (sw.elapsed() < 100000);

	ps.update(ss1, PollSet::POLL_READ);

	ss1.sendBytes("hello", 5);
	char buffer[256];
	sw.restart();
	sm = ps.poll(timeout);
	assert (sm.find(ss1) != sm.end());
	assert (sm.find(ss2) == sm.end());
	assert (sm.find(ss1)->second == PollSet::POLL_READ);
	assert (sw.elapsed() < 100000);

	int n = ss1.receiveBytes(buffer, sizeof(buffer));
	assert (n == 5);
	assert (std::string(buffer, n) == "hello");


	ss2.sendBytes("HELLO", 5);
	sw.restart();
	sm = ps.poll(timeout);
	assert (sm.find(ss1) == sm.end());
	assert (sm.find(ss2) != sm.end());
	assert (sm.find(ss2)->second == PollSet::POLL_READ);
	assert (sw.elapsed() < 100000);

	n = ss2.receiveBytes(buffer, sizeof(buffer));
	assert (n == 5);
	assert (std::string(buffer, n) == "HELLO");

	ps.remove(ss2);

	ss2.sendBytes("HELLO", 5);
	sw.restart();
	sm = ps.poll(timeout);
	assert (sm.empty());

	n = ss2.receiveBytes(buffer, sizeof(buffer));
	assert (n == 5);
	assert (std::string(buffer, n) == "HELLO");

	ss1.close();
	ss2.close();
}
コード例 #20
0
bool OsmAnd::BinaryMapRasterBitmapTileProvider_GPU_P::obtainData(
    const TileId tileId,
    const ZoomLevel zoom,
    std::shared_ptr<MapTiledData>& outTiledData,
    BinaryMapRasterBitmapTileProvider_Metrics::Metric_obtainData* const metric_,
    const IQueryController* const queryController)
{
#if OSMAND_PERFORMANCE_METRICS
    BinaryMapRasterBitmapTileProvider_Metrics::Metric_obtainData localMetric;
    const auto metric = metric_ ? metric_ : &localMetric;
#else
    const auto metric = metric_;
#endif

    const Stopwatch totalStopwatch(
#if OSMAND_PERFORMANCE_METRICS
        true
#else
        metric != nullptr
#endif // OSMAND_PERFORMANCE_METRICS
        );

    // Obtain offline map primitives tile
    std::shared_ptr<MapTiledData> primitivesTile_;
    owner->primitivesProvider->obtainData(tileId, zoom, primitivesTile_);
    if (!primitivesTile_)
    {
        outTiledData.reset();
        return true;
    }
    const auto primitivesTile = std::static_pointer_cast<BinaryMapPrimitivesTile>(primitivesTile_);

    //TODO: SkGpuDevice
    // Allocate rasterization target
    const auto tileSize = owner->getTileSize();
    const std::shared_ptr<SkBitmap> rasterizationSurface(new SkBitmap());
    rasterizationSurface->setConfig(SkBitmap::kARGB_8888_Config, tileSize, tileSize);
    if (!rasterizationSurface->allocPixels())
    {
        LogPrintf(LogSeverityLevel::Error, "Failed to allocate buffer for ARGB8888 rasterization surface %dx%d", tileSize, tileSize);
        return false;
    }
    SkBitmapDevice rasterizationTarget(*rasterizationSurface);

    // Create rasterization canvas
    SkCanvas canvas(&rasterizationTarget);

    // Perform actual rendering
    if (!primitivesTile->primitivisedArea->isEmpty())
    {
        _mapRasterizer->rasterize(
            primitivesTile->primitivisedArea,
            canvas,
            true,
            nullptr,
            metric ? &metric->rasterizeMetric : nullptr,
            queryController);
    }
    else
    {
        // If there is no data to rasterize, tell that this tile is not available
        outTiledData.reset();
        return true;
    }

    // Or supply newly rasterized tile
    outTiledData.reset(new BinaryMapRasterizedTile(
        primitivesTile,
        rasterizationSurface,
        AlphaChannelData::NotPresent,
        owner->getTileDensityFactor(),
        tileId,
        zoom));

    if (metric)
        metric->elapsedTime += totalStopwatch.elapsed();

#if OSMAND_PERFORMANCE_METRICS
#if OSMAND_PERFORMANCE_METRICS <= 1
    LogPrintf(LogSeverityLevel::Info,
        "%dx%d@%d rasterized on GPU in %fs",
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed());
#else
    LogPrintf(LogSeverityLevel::Info,
        "%dx%d@%d rasterized on GPU in %fs:\n%s",
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed(),
        qPrintable(metric ? metric->toString(QLatin1String("\t - ")) : QLatin1String("(null)")));
#endif // OSMAND_PERFORMANCE_METRICS <= 1
#endif // OSMAND_PERFORMANCE_METRICS

    return true;
}
コード例 #21
0
MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithDateInterval & block_with_dates, Int64 temp_index)
{
    /// For logging
    Stopwatch stopwatch;

    Block & block = block_with_dates.block;
    UInt16 min_date = block_with_dates.min_date;
    UInt16 max_date = block_with_dates.max_date;

    const auto & date_lut = DateLUT::instance();

    DayNum_t min_month = date_lut.toFirstDayNumOfMonth(DayNum_t(min_date));
    DayNum_t max_month = date_lut.toFirstDayNumOfMonth(DayNum_t(max_date));

    if (min_month != max_month)
        throw Exception("Logical error: part spans more than one month.");

    size_t part_size = (block.rows() + data.index_granularity - 1) / data.index_granularity;

    static const String TMP_PREFIX = "tmp_insert_";
    String part_name = ActiveDataPartSet::getPartName(DayNum_t(min_date), DayNum_t(max_date), temp_index, temp_index, 0);

    MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared<MergeTreeData::DataPart>(data);
    new_data_part->name = part_name;
    new_data_part->relative_path = TMP_PREFIX + part_name;
    new_data_part->is_temp = true;

    Poco::File(new_data_part->getFullPath()).createDirectories();

    /// If you need to compute some columns to sort, we do it.
    if (data.merging_params.mode != MergeTreeData::MergingParams::Unsorted)
        data.getPrimaryExpression()->execute(block);

    SortDescription sort_descr = data.getSortDescription();

    ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterBlocks);

    /// Sort.
    IColumn::Permutation * perm_ptr = nullptr;
    IColumn::Permutation perm;
    if (data.merging_params.mode != MergeTreeData::MergingParams::Unsorted)
    {
        if (!isAlreadySorted(block, sort_descr))
        {
            stableGetPermutation(block, sort_descr, perm);
            perm_ptr = &perm;
        }
        else
            ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterBlocksAlreadySorted);
    }

    NamesAndTypesList columns = data.getColumnsList().filter(block.getColumnsList().getNames());
    MergedBlockOutputStream out(data, new_data_part->getFullPath(), columns, CompressionMethod::LZ4);

    out.writePrefix();
    out.writeWithPermutation(block, perm_ptr);
    MergeTreeData::DataPart::Checksums checksums = out.writeSuffixAndGetChecksums();

    new_data_part->left_date = DayNum_t(min_date);
    new_data_part->right_date = DayNum_t(max_date);
    new_data_part->left = temp_index;
    new_data_part->right = temp_index;
    new_data_part->level = 0;
    new_data_part->size = part_size;
    new_data_part->modification_time = time(0);
    new_data_part->month = min_month;
    new_data_part->columns = columns;
    new_data_part->checksums = checksums;
    new_data_part->index.swap(out.getIndex());
    new_data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(new_data_part->getFullPath());

    ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterRows, block.rows());
    ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterUncompressedBytes, block.bytes());
    ProfileEvents::increment(ProfileEvents::MergeTreeDataWriterCompressedBytes, new_data_part->size_in_bytes);

    if (auto part_log = context.getPartLog(data.getDatabaseName(), data.getTableName()))
    {
        PartLogElement elem;
        elem.event_time = time(0);

        elem.event_type = PartLogElement::NEW_PART;
        elem.size_in_bytes = new_data_part->size_in_bytes;
        elem.duration_ms = stopwatch.elapsed() / 1000000;

        elem.database_name = new_data_part->storage.getDatabaseName();
        elem.table_name = new_data_part->storage.getTableName();
        elem.part_name = part_name;

        part_log->add(elem);
    }

    return new_data_part;
}
コード例 #22
0
void LocalSocketTest::testSocketsPerformance()
{
	Timestamp::TimeDiff local = 0, net = 0;
	std::size_t initBufSize = 1;
	std::size_t initReps = 1;
	bool noDelay[2] = { true, false };

	std::cout << std::endl << "OS Name:         " << Environment::osName() << std::endl;
	std::cout << "OS Version:      " << Environment::osVersion() << std::endl;
	std::cout << "OS Architecture: " << Environment::osArchitecture() << std::endl;

	for (int d = 0; d < 2; ++d)
	{
		double locData = 0.0, locTime = 0.0, netData = 0.0, netTime = 0.0;
		std::ostringstream os;
		os << Environment::osName() << '-' 
			<< Environment::osVersion() << '-' 
			<< Environment::osArchitecture() << "-TCP";
		if (noDelay[d]) os << "-nodelay";
		os << ".csv";
		File f(os.str());
		if (f.exists()) f.remove();
		FileOutputStream fos(os.str());

		for (std::size_t repetitions = initReps;
				repetitions <= 100000;
				repetitions *= 10)
		{
			for (std::size_t bufSize = initBufSize; bufSize < 20000; bufSize *= 2)
			{
				char* pBuf = new char[bufSize];
				{
					SocketAddress sas("/tmp/poco.server.tcp.sock");
					EchoServer echoServer(sas, bufSize);
					SocketAddress sac("/tmp/poco.client.tcp.sock");
					StreamSocket ss(sas, &sac);
					int recv = 0, sent = 0;
					Stopwatch sw; int i = 0;
					for (; i < repetitions; ++i)
					{
						sent = 0; recv = 0; local = 0;

						do
						{
							int s;
							sw.restart();
							s = ss.sendBytes(pBuf + sent, bufSize - sent);
							sw.stop();
							local += sw.elapsed();
							sent += s;
						} while (sent < bufSize);

						do
						{
							int r;
							sw.restart();
							r = ss.receiveBytes(pBuf + recv, bufSize - recv);
							sw.stop();
							local += sw.elapsed();
							recv += r;
						} while (recv < bufSize);

						locData += sent;
						locData += recv;
						locTime += local;

						poco_assert (sent == bufSize && recv == bufSize);
					}
					
					std::cout << "Local TCP socket, " << i << " repetitions, " << bufSize << " bytes, " 
						<< local << " [us]." << std::endl;
					ss.close();
				}

				{
					SocketAddress sa("localhost", 12345);
					EchoServer echoServer(sa, bufSize);
					StreamSocket ss;
					ss.connect(SocketAddress(sa.host(), echoServer.port()));
					if (noDelay[d]) ss.setNoDelay(true);
					int recv = 0, sent = 0;
					Stopwatch sw; int i = 0; 
					for (; i < repetitions; ++i)
					{
						sent = 0; recv = 0; net = 0;
						do
						{
							int s;
							sw.restart();
							s = ss.sendBytes(pBuf + sent, bufSize - sent);
							sw.stop();
							net += sw.elapsed();
							sent += s;
						} while (sent < bufSize);

						do
						{
							int r;
							sw.restart();
							r = ss.receiveBytes(pBuf + recv, bufSize - recv);
							sw.stop();
							net += sw.elapsed();
							recv += r;
						} while (recv < bufSize);

						netData += sent;
						netData += recv;
						netTime += net;

						poco_assert (sent == bufSize && recv == bufSize);
					}
					
					std::cout << "Network TCP socket, " << i << " repetitions, " << bufSize << " bytes, " 
						<< net << " [us]." << std::endl;
					fos << i << ',' << bufSize << ',';
					ss.close();
				}
				delete pBuf;

				double ratio = ((double) net) / ((double) local);
				double diff = ((double) net) - ((double) local);
				std::cout << "Ratio: " << ratio << "(" << diff / 1000.0 << "[us/msg]" << std::endl;

				fos << ratio << std::endl;
			}
		}
		poco_assert (locData == netData);

		double locDTR = ((locData / (locTime / Timestamp::resolution())) * 8) / 1000000;
		double netDTR = ((netData / (netTime / Timestamp::resolution())) * 8) / 1000000;

		std::cout << (d ? "NO DELAY" : "DELAY") << std::endl
			<< "=================================" << std::endl
			<< "Local DTR: " << ((locData / (locTime / Timestamp::resolution())) * 8) / 1000000 << " [Mbit/s]" << std::endl
			<< "Network DTR: " << ((netData / (netTime / Timestamp::resolution())) * 8) / 1000000 << " [Mbit/s]" << std::endl
			<< "Local sockets speedup: " << ((locDTR / netDTR) * 100) - 100 << '%' << std::endl
			<< "=================================" << std::endl;
		
		fos << "=================================" << std::endl
			<< "Local DTR: " << ((locData / (locTime / Timestamp::resolution())) * 8) / 1000000 << " [Mbit/s]" << std::endl
			<< "Network DTR: " << ((netData / (netTime / Timestamp::resolution())) * 8) / 1000000 << " [Mbit/s]" << std::endl
			<< "Local sockets speedup: " << ((locDTR / netDTR) * 100) - 100 << '%' << std::endl
			<< "=================================" << std::endl;

		fos.close();
	}
}
コード例 #23
0
void OsmAnd::ObfMapSectionReader_P::readMapObjectsBlock(
    const ObfReader_P& reader,
    const std::shared_ptr<const ObfMapSectionInfo>& section,
    const std::shared_ptr<const ObfMapSectionLevelTreeNode>& tree,
    QList< std::shared_ptr<const OsmAnd::BinaryMapObject> >* resultOut,
    const AreaI* bbox31,
    const FilterReadingByIdFunction filterById,
    const VisitorFunction visitor,
    const std::shared_ptr<const IQueryController>& queryController,
    ObfMapSectionReader_Metrics::Metric_loadMapObjects* const metric)
{
    const auto cis = reader.getCodedInputStream().get();

    QList< std::shared_ptr<BinaryMapObject> > intermediateResult;
    QStringList mapObjectsCaptionsTable;
    gpb::uint64 baseId = 0;
    for (;;)
    {
        const auto tag = cis->ReadTag();
        switch (gpb::internal::WireFormatLite::GetTagFieldNumber(tag))
        {
            case 0:
            {
                if (!ObfReaderUtilities::reachedDataEnd(cis))
                    return;

                for (const auto& mapObject : constOf(intermediateResult))
                {
                    // Fill mapObject captions from string-table
                    for (auto& caption : mapObject->captions)
                    {
                        const auto stringId = ObfReaderUtilities::decodeIntegerFromString(caption);

                        if (stringId >= mapObjectsCaptionsTable.size())
                        {
                            LogPrintf(LogSeverityLevel::Error,
                                "Data mismatch: string #%d (map object %s not found in string table (size %d) in section '%s'",
                                stringId,
                                qPrintable(mapObject->id.toString()),
                                mapObjectsCaptionsTable.size(), qPrintable(section->name));
                            caption = QString::fromLatin1("#%1 NOT FOUND").arg(stringId);
                            continue;
                        }
                        caption = mapObjectsCaptionsTable[stringId];
                    }

                    //////////////////////////////////////////////////////////////////////////
                    //if (mapObject->id.getOsmId() == 49048972u)
                    //{
                    //    int i = 5;
                    //}
                    //////////////////////////////////////////////////////////////////////////

                    if (!visitor || visitor(mapObject))
                    {
                        if (resultOut)
                            resultOut->push_back(qMove(mapObject));
                    }
                }

                return;
            }
            case OBF::MapDataBlock::kBaseIdFieldNumber:
            {
                cis->ReadVarint64(&baseId);
                //////////////////////////////////////////////////////////////////////////
                //if (bbox31)
                //    LogPrintf(LogSeverityLevel::Debug, "BBOX %d %d %d %d - MAP BLOCK %" PRIi64, bbox31->top, bbox31->left, bbox31->bottom, bbox31->right, baseId);
                //////////////////////////////////////////////////////////////////////////
                break;
            }
            case OBF::MapDataBlock::kDataObjectsFieldNumber:
            {
                gpb::uint32 length;
                cis->ReadVarint32(&length);
                const auto offset = cis->CurrentPosition();

                // Read map object content
                const Stopwatch readMapObjectStopwatch(metric != nullptr);
                std::shared_ptr<OsmAnd::BinaryMapObject> mapObject;
                auto oldLimit = cis->PushLimit(length);
                
                readMapObject(reader, section, baseId, tree, mapObject, bbox31, metric);

                ObfReaderUtilities::ensureAllDataWasRead(cis);
                cis->PopLimit(oldLimit);

                // Update metric
                if (metric)
                    metric->visitedMapObjects++;

                // If map object was not read, skip it
                if (!mapObject)
                {
                    if (metric)
                        metric->elapsedTimeForOnlyVisitedMapObjects += readMapObjectStopwatch.elapsed();

                    break;
                }

                // Update metric
                if (metric)
                {
                    metric->elapsedTimeForOnlyAcceptedMapObjects += readMapObjectStopwatch.elapsed();

                    metric->acceptedMapObjects++;
                }

                //////////////////////////////////////////////////////////////////////////
                //if (mapObject->id.getOsmId() == 49048972u)
                //{
                //    int i = 5;
                //}
                //////////////////////////////////////////////////////////////////////////

                // Check if map object is desired
                const auto shouldReject = filterById && !filterById(
                    section,
                    mapObject->id,
                    mapObject->bbox31,
                    mapObject->level->minZoom,
                    mapObject->level->maxZoom);
                if (shouldReject)
                    break;

                // Save object
                intermediateResult.push_back(qMove(mapObject));

                break;
            }
            case OBF::MapDataBlock::kStringTableFieldNumber:
            {
                gpb::uint32 length;
                cis->ReadVarint32(&length);
                const auto offset = cis->CurrentPosition();
                auto oldLimit = cis->PushLimit(length);
                if (intermediateResult.isEmpty())
                {
                    cis->Skip(cis->BytesUntilLimit());
                    cis->PopLimit(oldLimit);
                    break;
                }
                
                ObfReaderUtilities::readStringTable(cis, mapObjectsCaptionsTable);

                ObfReaderUtilities::ensureAllDataWasRead(cis);
                cis->PopLimit(oldLimit);

                break;
            }
            default:
                ObfReaderUtilities::skipUnknownField(cis, tag);
                break;
        }
    }
}
コード例 #24
0
void DatagramLocalSocketTest::testDatagramSocketPerformance()
{
	Timestamp::TimeDiff local, net;
	std::size_t initBufSize = 1;
	std::size_t initReps = 1;
	double locData = 0.0, locTime = 0.0, netData = 0.0, netTime = 0.0;

	std::cout << std::endl << "OS Name:         " << Environment::osName() << std::endl;
	std::cout << "OS Version:      " << Environment::osVersion() << std::endl;
	std::cout << "OS Architecture: " << Environment::osArchitecture() << std::endl;

	std::ostringstream os;
	os << Environment::osName() << '-' 
		<< Environment::osVersion() << '-' 
		<< Environment::osArchitecture()
		<< "-UDP.csv";
	File f(os.str());
	if (f.exists()) f.remove();
	FileOutputStream fos(os.str());

	for (std::size_t repetitions = initReps;
			repetitions <= 100000;
			repetitions *= 10)
	{
		for (std::size_t bufSize = initBufSize; bufSize < 20000; bufSize *= 2)
		{
			char* pBuf = new char[bufSize];
			{
				UDPLocalEchoServer echoServer(bufSize);
				DatagramSocket ss(SocketAddress("/tmp/poco.client.udp.sock"), true);
				
				SocketAddress addr(echoServer.address().toString());
				ss.connect(addr);
				int recv = 0, sent = 0;
				Stopwatch sw; int i = 0;
				for (; i < repetitions; ++i)
				{
					sent = 0; recv = 0; local = 0;

					do
					{
						int s;
						sw.restart();
						s = ss.sendBytes(pBuf + sent, bufSize - sent);
						sw.stop();
						local += sw.elapsed();
						sent += s;
					} while (sent < bufSize);

					do
					{
						int r;
						sw.restart();
						r = ss.receiveBytes(pBuf + recv, bufSize - recv);
						sw.stop();
						local += sw.elapsed();
						recv += r;
					} while (recv < bufSize);

					locData += sent;
					locData += recv;
					locTime += local;

					poco_assert (sent == bufSize && recv == bufSize);
				}

				std::cout << "Local UDP socket, " << i << " repetitions, " << bufSize << " bytes, " 
					<< local << " [us]." << std::endl;
				ss.close();
			}

			{
				UDPEchoServer echoServer(bufSize);
				DatagramSocket ss;
				ss.connect(SocketAddress("localhost", echoServer.port()));
				int recv = 0, sent = 0;
				Stopwatch sw; int i = 0; 
				for (; i < repetitions; ++i)
				{
					sent = 0; recv = 0; net = 0;

					do
					{
						int s;
						sw.restart();
						s = ss.sendBytes(pBuf + sent, bufSize - sent);
						sw.stop();
						net += sw.elapsed();
						sent += s;
					} while (sent < bufSize);

					do
					{
						int r;
						sw.restart();
						r = ss.receiveBytes(pBuf + recv, bufSize - recv);
						sw.stop();
						net += sw.elapsed();
						recv += r;
					} while (recv < bufSize);

					netData += sent;
					netData += recv;
					netTime += net;

					poco_assert (sent == bufSize && recv == bufSize);
				}
				
				std::cout << "Network UDP socket, " << i << " repetitions, " << bufSize << " bytes, " 
					<< net << " [us]." << std::endl;
				fos << i << ',' << bufSize << ',';
				ss.close();
			}
			delete pBuf;

			double ratio = local ? ((double) net) / ((double) local) : (double) net;
			double diff = ((double) net) - ((double) local);
			std::cout << "Ratio: " << ratio << "(" << diff / 1000.0 << "[us/msg]" << ')' << std::endl;

			fos << ratio << std::endl;
		}
	}

	poco_assert (locData == netData);

	double locDTR = ((locData / (locTime / Timestamp::resolution())) * 8) / 1000000;
	double netDTR = ((netData / (netTime / Timestamp::resolution())) * 8) / 1000000;

	std::cout << "=================================" << std::endl
		<< "Local DTR: " << locDTR << " [Mbit/s]" << std::endl
		<< "Network DTR: " << netDTR << " [Mbit/s]" << std::endl
		<< "Local sockets speedup: " << ((locDTR / netDTR) * 100) - 100 << '%' << std::endl
		<< "=================================" << std::endl;

	fos << "=================================" << std::endl
		<< "Local DTR: " << locDTR << " [Mbit/s]" << std::endl
		<< "Network DTR: " << netDTR << " [Mbit/s]" << std::endl
		<< "Local sockets speedup: " << ((locDTR / netDTR) * 100) - 100 << '%' << std::endl
		<< "=================================" << std::endl;

	fos.close();
}
コード例 #25
0
void OsmAnd::ObfMapSectionReader_P::loadMapObjects(
    const ObfReader_P& reader,
    const std::shared_ptr<const ObfMapSectionInfo>& section,
    ZoomLevel zoom,
    const AreaI* bbox31,
    QList< std::shared_ptr<const OsmAnd::BinaryMapObject> >* resultOut,
    MapSurfaceType* outBBoxOrSectionSurfaceType,
    const ObfMapSectionReader::FilterByIdFunction filterById,
    const VisitorFunction visitor,
    DataBlocksCache* cache,
    QList< std::shared_ptr<const DataBlock> >* outReferencedCacheEntries,
    const std::shared_ptr<const IQueryController>& queryController,
    ObfMapSectionReader_Metrics::Metric_loadMapObjects* const metric)
{
    const auto cis = reader.getCodedInputStream().get();

    const auto filterReadById =
        [filterById, zoom]
        (const std::shared_ptr<const ObfMapSectionInfo>& section,
        const ObfObjectId mapObjectId,
        const AreaI& bbox,
        const ZoomLevel firstZoomLevel,
        const ZoomLevel lastZoomLevel) -> bool
        {
            return filterById(section, mapObjectId, bbox, firstZoomLevel, lastZoomLevel, zoom);
        };

    // Ensure encoding/decoding rules are read
    if (section->_p->_attributeMappingLoaded.loadAcquire() == 0)
    {
        QMutexLocker scopedLocker(&section->_p->_attributeMappingLoadMutex);
        if (!section->_p->_attributeMapping)
        {
            // Read encoding/decoding rules
            cis->Seek(section->offset);
            auto oldLimit = cis->PushLimit(section->length);

            const std::shared_ptr<ObfMapSectionAttributeMapping> attributeMapping(new ObfMapSectionAttributeMapping());
            readAttributeMapping(reader, section, attributeMapping);
            section->_p->_attributeMapping = attributeMapping;

            cis->PopLimit(oldLimit);

            section->_p->_attributeMappingLoaded.storeRelease(1);
        }
    }

    ObfMapSectionReader_Metrics::Metric_loadMapObjects localMetric;

    auto bboxOrSectionSurfaceType = MapSurfaceType::Undefined;
    if (outBBoxOrSectionSurfaceType)
        *outBBoxOrSectionSurfaceType = bboxOrSectionSurfaceType;
    QList< std::shared_ptr<const DataBlock> > danglingReferencedCacheEntries;
    for (const auto& mapLevel : constOf(section->levels))
    {
        // Update metric
        if (metric)
            metric->visitedLevels++;

        if (mapLevel->minZoom > zoom || mapLevel->maxZoom < zoom)
            continue;

        if (bbox31)
        {
            const Stopwatch bboxLevelCheckStopwatch(metric != nullptr);

            const auto shouldSkip =
                !bbox31->contains(mapLevel->area31) &&
                !mapLevel->area31.contains(*bbox31) &&
                !bbox31->intersects(mapLevel->area31);

            if (metric)
                metric->elapsedTimeForLevelsBbox += bboxLevelCheckStopwatch.elapsed();

            if (shouldSkip)
                continue;
        }

        const Stopwatch treeNodesStopwatch(metric != nullptr);
        if (metric)
            metric->acceptedLevels++;

        // If there are no tree nodes in map level, it means they are not loaded.
        // Since loading may be called from multiple threads, loading of root nodes needs synchronization
        // Ensure encoding/decoding rules are read
        if (mapLevel->_p->_rootNodesLoaded.loadAcquire() == 0)
        {
            QMutexLocker scopedLocker(&mapLevel->_p->_rootNodesLoadMutex);
            if (!mapLevel->_p->_rootNodes)
            {
                cis->Seek(mapLevel->offset);
                auto oldLimit = cis->PushLimit(mapLevel->length);

                cis->Skip(mapLevel->firstDataBoxInnerOffset);
                const std::shared_ptr< QList< std::shared_ptr<const ObfMapSectionLevelTreeNode> > > rootNodes(
                    new QList< std::shared_ptr<const ObfMapSectionLevelTreeNode> >());
                readMapLevelTreeNodes(reader, section, mapLevel, *rootNodes);
                mapLevel->_p->_rootNodes = rootNodes;

                cis->PopLimit(oldLimit);

                mapLevel->_p->_rootNodesLoaded.storeRelease(1);
            }
        }

        // Collect tree nodes with data
        QList< std::shared_ptr<const ObfMapSectionLevelTreeNode> > treeNodesWithData;
        for (const auto& rootNode : constOf(*mapLevel->_p->_rootNodes))
        {
            // Update metric
            if (metric)
                metric->visitedNodes++;

            if (bbox31)
            {
                const Stopwatch bboxNodeCheckStopwatch(metric != nullptr);

                const auto shouldSkip =
                    !bbox31->contains(rootNode->area31) &&
                    !rootNode->area31.contains(*bbox31) &&
                    !bbox31->intersects(rootNode->area31);

                // Update metric
                if (metric)
                    metric->elapsedTimeForNodesBbox += bboxNodeCheckStopwatch.elapsed();

                if (shouldSkip)
                    continue;
            }

            // Update metric
            if (metric)
                metric->acceptedNodes++;

            if (rootNode->dataOffset > 0)
                treeNodesWithData.push_back(rootNode);

            auto rootSubnodesSurfaceType = MapSurfaceType::Undefined;
            if (rootNode->hasChildrenDataBoxes)
            {
                cis->Seek(rootNode->offset);
                auto oldLimit = cis->PushLimit(rootNode->length);

                cis->Skip(rootNode->firstDataBoxInnerOffset);
                readTreeNodeChildren(reader, section, rootNode, rootSubnodesSurfaceType, &treeNodesWithData, bbox31, queryController, metric);
                
                ObfReaderUtilities::ensureAllDataWasRead(cis);
                cis->PopLimit(oldLimit);
            }

            const auto surfaceTypeToMerge = (rootSubnodesSurfaceType != MapSurfaceType::Undefined) ? rootSubnodesSurfaceType : rootNode->surfaceType;
            if (surfaceTypeToMerge != MapSurfaceType::Undefined)
            {
                if (bboxOrSectionSurfaceType == MapSurfaceType::Undefined)
                    bboxOrSectionSurfaceType = surfaceTypeToMerge;
                else if (bboxOrSectionSurfaceType != surfaceTypeToMerge)
                    bboxOrSectionSurfaceType = MapSurfaceType::Mixed;
            }
        }

        // Sort blocks by data offset to force forward-only seeking
        std::sort(treeNodesWithData,
            []
            (const std::shared_ptr<const ObfMapSectionLevelTreeNode>& l, const std::shared_ptr<const ObfMapSectionLevelTreeNode>& r) -> bool
            {
                return l->dataOffset < r->dataOffset;
            });

        // Update metric
        const Stopwatch mapObjectsStopwatch(metric != nullptr);
        if (metric)
            metric->elapsedTimeForNodes += treeNodesStopwatch.elapsed();

        // Read map objects from their blocks
        for (const auto& treeNode : constOf(treeNodesWithData))
        {
            if (queryController && queryController->isAborted())
                break;

            DataBlockId blockId;
            blockId.sectionRuntimeGeneratedId = section->runtimeGeneratedId;
            blockId.offset = treeNode->dataOffset;

            if (cache && cache->shouldCacheBlock(blockId, treeNode->area31, bbox31))
            {
                // In case cache is provided, read and cache
                const auto levelZooms = Utilities::enumerateZoomLevels(treeNode->level->minZoom, treeNode->level->maxZoom);

                std::shared_ptr<const DataBlock> dataBlock;
                std::shared_ptr<const DataBlock> sharedBlockReference;
                proper::shared_future< std::shared_ptr<const DataBlock> > futureSharedBlockReference;
                if (cache->obtainReferenceOrFutureReferenceOrMakePromise(blockId, zoom, levelZooms, sharedBlockReference, futureSharedBlockReference))
                {
                    // Got reference or future reference

                    // Update metric
                    if (metric)
                        metric->mapObjectsBlocksReferenced++;

                    if (sharedBlockReference)
                    {
                        // Ok, this block was already loaded, just use it
                        dataBlock = sharedBlockReference;
                    }
                    else
                    {
                        // Wait until it will be loaded
                        dataBlock = futureSharedBlockReference.get();
                    }
                }
                else
                {
                    // Made a promise, so load entire block into temporary storage
                    QList< std::shared_ptr<const BinaryMapObject> > mapObjects;

                    cis->Seek(treeNode->dataOffset);

                    gpb::uint32 length;
                    cis->ReadVarint32(&length);
                    const auto oldLimit = cis->PushLimit(length);

                    readMapObjectsBlock(
                        reader,
                        section,
                        treeNode,
                        &mapObjects,
                        nullptr,
                        nullptr,
                        nullptr,
                        nullptr,
                        metric ? &localMetric : nullptr);

                    ObfReaderUtilities::ensureAllDataWasRead(cis);
                    cis->PopLimit(oldLimit);

                    // Update metric
                    if (metric)
                        metric->mapObjectsBlocksRead++;

                    // Create a data block and share it
                    dataBlock.reset(new DataBlock(blockId, treeNode->area31, treeNode->surfaceType, mapObjects));
                    cache->fulfilPromiseAndReference(blockId, levelZooms, dataBlock);
                }

                if (outReferencedCacheEntries)
                    outReferencedCacheEntries->push_back(dataBlock);
                else
                    danglingReferencedCacheEntries.push_back(dataBlock);

                // Process data block
                for (const auto& mapObject : constOf(dataBlock->mapObjects))
                {
                    //////////////////////////////////////////////////////////////////////////
                    //if (mapObject->id.getOsmId() == 49048972u)
                    //{
                    //    if (bbox31 && *bbox31 == Utilities::tileBoundingBox31(TileId::fromXY(1052, 673), ZoomLevel11))
                    //    {
                    //        const auto t = mapObject->toString();
                    //        int i = 5;
                    //    }
                    //}
                    //////////////////////////////////////////////////////////////////////////

                    if (metric)
                        metric->visitedMapObjects++;

                    if (bbox31)
                    {
                        const auto shouldNotSkip =
                            mapObject->bbox31.contains(*bbox31) ||
                            bbox31->intersects(mapObject->bbox31);

                        if (!shouldNotSkip)
                            continue;
                    }

                    // Check if map object is desired
                    const auto shouldReject = filterById && !filterById(
                        section,
                        mapObject->id,
                        mapObject->bbox31,
                        mapObject->level->minZoom,
                        mapObject->level->maxZoom,
                        zoom);
                    if (shouldReject)
                        continue;

                    if (!visitor || visitor(mapObject))
                    {
                        if (metric)
                            metric->acceptedMapObjects++;

                        if (resultOut)
                            resultOut->push_back(qMove(mapObject));
                    }
                }
            }
            else
            {
                // In case there's no cache, simply read

                cis->Seek(treeNode->dataOffset);

                gpb::uint32 length;
                cis->ReadVarint32(&length);
                const auto oldLimit = cis->PushLimit(length);

                readMapObjectsBlock(
                    reader,
                    section,
                    treeNode,
                    resultOut,
                    bbox31,
                    filterById != nullptr ? filterReadById : FilterReadingByIdFunction(),
                    visitor,
                    queryController,
                    metric);

                ObfReaderUtilities::ensureAllDataWasRead(cis);
                cis->PopLimit(oldLimit);

                // Update metric
                if (metric)
                    metric->mapObjectsBlocksRead++;
            }

            // Update metric
            if (metric)
                metric->mapObjectsBlocksProcessed++;
        }

        // Update metric
        if (metric)
            metric->elapsedTimeForMapObjectsBlocks += mapObjectsStopwatch.elapsed();
    }

    // In case cache was supplied, but referenced cache entries output collection was not specified,
    // release all dangling references
    if (cache && !outReferencedCacheEntries)
    {
        for (auto& referencedCacheEntry : danglingReferencedCacheEntries)
            cache->releaseReference(referencedCacheEntry->id, zoom, referencedCacheEntry);
        danglingReferencedCacheEntries.clear();
    }

    if (outBBoxOrSectionSurfaceType)
        *outBBoxOrSectionSurfaceType = bboxOrSectionSurfaceType;

    // In case cache was used, and metric was requested, some values must be taken from different parts
    if (cache && metric)
    {
        metric->elapsedTimeForOnlyAcceptedMapObjects += localMetric.elapsedTimeForOnlyAcceptedMapObjects;
    }
}
コード例 #26
0
ファイル: sysutils.cpp プロジェクト: Bhushan1002/SFrame
UInt64
timeElapsed()  // nofail, in milliseconds.
{
    return s_stopwatch.elapsed();
}
コード例 #27
0
ファイル: benchmark.cpp プロジェクト: 447327642/mesos
Try<Nothing> Benchmark::execute(int argc, char** argv)
{
  flags.setUsageMessage(
      "Usage: " + name() + " [options]\n"
      "\n"
      "This command is used to do performance test on the\n"
      "replicated log. It takes a trace file of write sizes\n"
      "and replay that trace to measure the latency of each\n"
      "write. The data to be written for each write can be\n"
      "specified using the --type flag.\n"
      "\n");

  // Configure the tool by parsing command line arguments.
  if (argc > 0 && argv != NULL) {
    Try<Nothing> load = flags.load(None(), argc, argv);
    if (load.isError()) {
      return Error(flags.usage(load.error()));
    }

    if (flags.help) {
      return Error(flags.usage());
    }

    process::initialize();
    logging::initialize(argv[0], flags);
  }

  if (flags.quorum.isNone()) {
    return Error(flags.usage("Missing required option --quorum"));
  }

  if (flags.path.isNone()) {
    return Error(flags.usage("Missing required option --path"));
  }

  if (flags.servers.isNone()) {
    return Error(flags.usage("Missing required option --servers"));
  }

  if (flags.znode.isNone()) {
    return Error(flags.usage("Missing required option --znode"));
  }

  if (flags.input.isNone()) {
    return Error(flags.usage("Missing required option --input"));
  }

  if (flags.output.isNone()) {
    return Error(flags.usage("Missing required option --output"));
  }

  // Initialize the log.
  if (flags.initialize) {
    Initialize initialize;
    initialize.flags.path = flags.path;

    Try<Nothing> execution = initialize.execute();
    if (execution.isError()) {
      return Error(execution.error());
    }
  }

  // Create the log.
  Log log(
      flags.quorum.get(),
      flags.path.get(),
      flags.servers.get(),
      Seconds(10),
      flags.znode.get());

  // Create the log writer.
  Log::Writer writer(&log);

  Future<Option<Log::Position> > position = writer.start();

  if (!position.await(Seconds(15))) {
    return Error("Failed to start a log writer: timed out");
  } else if (!position.isReady()) {
    return Error("Failed to start a log writer: " +
                 (position.isFailed()
                  ? position.failure()
                  : "Discarded future"));
  }

  // Statistics to output.
  vector<Bytes> sizes;
  vector<Duration> durations;
  vector<Time> timestamps;

  // Read sizes from the input trace file.
  ifstream input(flags.input.get().c_str());
  if (!input.is_open()) {
    return Error("Failed to open the trace file " + flags.input.get());
  }

  string line;
  while (getline(input, line)) {
    Try<Bytes> size = Bytes::parse(strings::trim(line));
    if (size.isError()) {
      return Error("Failed to parse the trace file: " + size.error());
    }

    sizes.push_back(size.get());
  }

  input.close();

  // Generate the data to be written.
  vector<string> data;
  for (size_t i = 0; i < sizes.size(); i++) {
    if (flags.type == "one") {
      data.push_back(string(sizes[i].bytes(), static_cast<char>(0xff)));
    } else if (flags.type == "random") {
      data.push_back(string(sizes[i].bytes(), ::random() % 256));
    } else {
      data.push_back(string(sizes[i].bytes(), 0));
    }
  }

  Stopwatch stopwatch;
  stopwatch.start();

  for (size_t i = 0; i < sizes.size(); i++) {
    Stopwatch stopwatch;
    stopwatch.start();

    position = writer.append(data[i]);

    if (!position.await(Seconds(10))) {
      return Error("Failed to append: timed out");
    } else if (!position.isReady()) {
      return Error("Failed to append: " +
                   (position.isFailed()
                    ? position.failure()
                    : "Discarded future"));
    } else if (position.get().isNone()) {
      return Error("Failed to append: exclusive write promise lost");
    }

    durations.push_back(stopwatch.elapsed());
    timestamps.push_back(Clock::now());
  }

  cout << "Total number of appends: " << sizes.size() << endl;
  cout << "Total time used: " << stopwatch.elapsed() << endl;

  // Ouput statistics.
  ofstream output(flags.output.get().c_str());
  if (!output.is_open()) {
    return Error("Failed to open the output file " + flags.output.get());
  }

  for (size_t i = 0; i < sizes.size(); i++) {
    output << timestamps[i]
           << " Appended " << sizes[i].bytes() << " bytes"
           << " in " << durations[i].ms() << " ms" << endl;
  }

  return Nothing();
}
コード例 #28
0
std::shared_ptr<SkBitmap> OsmAnd::MapRasterLayerProvider_GPU_P::rasterize(
    const TileId tileId,
    const ZoomLevel zoom,
    const std::shared_ptr<const MapPrimitivesProvider::Data>& primitivesTile,
    MapRasterLayerProvider_Metrics::Metric_obtainData* const metric_,
    const IQueryController* const queryController)
{
#if OSMAND_PERFORMANCE_METRICS
    MapRasterLayerProvider_Metrics::Metric_obtainData localMetric;
    const auto metric = metric_ ? metric_ : &localMetric;
#else
    const auto metric = metric_;
#endif

    const Stopwatch totalStopwatch(
#if OSMAND_PERFORMANCE_METRICS
        true
#else
        metric != nullptr
#endif // OSMAND_PERFORMANCE_METRICS
        );

    //TODO: SkGpuDevice
    // Allocate rasterization target
    const auto tileSize = owner->getTileSize();
    const std::shared_ptr<SkBitmap> rasterizationSurface(new SkBitmap());
    rasterizationSurface->setConfig(SkBitmap::kARGB_8888_Config, tileSize, tileSize);
    if (!rasterizationSurface->allocPixels())
    {
        LogPrintf(LogSeverityLevel::Error, "Failed to allocate buffer for ARGB8888 rasterization surface %dx%d", tileSize, tileSize);
        return nullptr;
    }
    SkBitmapDevice rasterizationTarget(*rasterizationSurface);

    // Create rasterization canvas
    SkCanvas canvas(&rasterizationTarget);

    // Perform actual rasterization
    if (!owner->fillBackground)
        canvas.clear(SK_ColorTRANSPARENT);
    _mapRasterizer->rasterize(
        Utilities::tileBoundingBox31(tileId, zoom),
        primitivesTile->primitivisedObjects,
        canvas,
        owner->fillBackground,
        nullptr,
        metric ? metric->findOrAddSubmetricOfType<MapRasterizer_Metrics::Metric_rasterize>().get() : nullptr,
        queryController);

    if (metric)
        metric->elapsedTime += totalStopwatch.elapsed();

#if OSMAND_PERFORMANCE_METRICS
#if OSMAND_PERFORMANCE_METRICS <= 1
    LogPrintf(LogSeverityLevel::Info,
        "%dx%d@%d rasterized on CPU in %fs",
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed());
#else
    LogPrintf(LogSeverityLevel::Info,
        "%dx%d@%d rasterized on CPU in %fs:\n%s",
        tileId.x,
        tileId.y,
        zoom,
        totalStopwatch.elapsed(),
        qPrintable(metric ? metric->toString(QLatin1String("\t - ")) : QLatin1String("(null)")));
#endif // OSMAND_PERFORMANCE_METRICS <= 1
#endif // OSMAND_PERFORMANCE_METRICS

    return rasterizationSurface;
}
コード例 #29
0
ファイル: leveldb.cpp プロジェクト: EronWright/mesos
Try<Nothing> LevelDBStorage::persist(const Action& action)
{
  Stopwatch stopwatch;
  stopwatch.start();

  Record record;
  record.set_type(Record::ACTION);
  record.mutable_action()->MergeFrom(action);

  string value;

  if (!record.SerializeToString(&value)) {
    return Error("Failed to serialize record");
  }

  leveldb::WriteOptions options;
  options.sync = true;

  leveldb::Status status = db->Put(options, encode(action.position()), value);

  if (!status.ok()) {
    return Error(status.ToString());
  }

  // Updated the first position. Notice that we use 'min' here instead
  // of checking 'isNone()' because it's likely that log entries are
  // written out of order during catch-up (e.g. if a random bulk
  // catch-up policy is used).
  first = min(first, action.position());

  LOG(INFO) << "Persisting action (" << value.size()
            << " bytes) to leveldb took " << stopwatch.elapsed();

  // Delete positions if a truncate action has been *learned*. Note
  // that we do this in a best-effort fashion (i.e., we ignore any
  // failures to the database since we can always try again).
  if (action.has_type() && action.type() == Action::TRUNCATE &&
      action.has_learned() && action.learned()) {
    CHECK(action.has_truncate());

    stopwatch.start(); // Restart the stopwatch.

    // To actually perform the truncation in leveldb we need to remove
    // all the keys that represent positions no longer in the log. We
    // do this by attempting to delete all keys that represent the
    // first position we know is still in leveldb up to (but
    // excluding) the truncate position. Note that this works because
    // the semantics of WriteBatch are such that even if the position
    // doesn't exist (which is possible because this replica has some
    // holes), we can attempt to delete the key that represents it and
    // it will just ignore that key. This is *much* cheaper than
    // actually iterating through the entire database instead (which
    // was, for posterity, the original implementation). In addition,
    // caching the "first" position we know is in the database is
    // cheaper than using an iterator to determine the first position
    // (which was, for posterity, the second implementation).

    leveldb::WriteBatch batch;

    CHECK_SOME(first);

    // Add positions up to (but excluding) the truncate position to
    // the batch starting at the first position still in leveldb. It's
    // likely that the first position is greater than the truncate
    // position (e.g., during catch-up). In that case, we do nothing
    // because there is nothing we can truncate.
    // TODO(jieyu): We might miss a truncation if we do random (i.e.,
    // out of order) bulk catch-up and the truncate operation is
    // caught up first.
    uint64_t index = 0;
    while ((first.get() + index) < action.truncate().to()) {
      batch.Delete(encode(first.get() + index));
      index++;
    }

    // If we added any positions, attempt to delete them!
    if (index > 0) {
      // We do this write asynchronously (e.g., using default options).
      leveldb::Status status = db->Write(leveldb::WriteOptions(), &batch);

      if (!status.ok()) {
        LOG(WARNING) << "Ignoring leveldb batch delete failure: "
                     << status.ToString();
      } else {
        // Save the new first position!
        CHECK_LT(first.get(), action.truncate().to());
        first = action.truncate().to();

        LOG(INFO) << "Deleting ~" << index
                  << " keys from leveldb took " << stopwatch.elapsed();
      }
    }
  }

  return Nothing();
}
コード例 #30
0
void ReplicatedMergeTreeBlockOutputStream::write(const Block & block)
{
    last_block_is_duplicate = false;

    /// TODO Is it possible to not lock the table structure here?
    storage.data.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event);

    auto zookeeper = storage.getZooKeeper();
    assertSessionIsNotExpired(zookeeper);

    /** If write is with quorum, then we check that the required number of replicas is now live,
      *  and also that for all previous parts for which quorum is required, this quorum is reached.
      * And also check that during the insertion, the replica was not reinitialized or disabled (by the value of `is_active` node).
      * TODO Too complex logic, you can do better.
      */
    if (quorum)
        checkQuorumPrecondition(zookeeper);

    auto part_blocks = storage.writer.splitBlockIntoParts(block);

    for (auto & current_block : part_blocks)
    {
        Stopwatch watch;

        /// Write part to the filesystem under temporary name. Calculate a checksum.

        MergeTreeData::MutableDataPartPtr part = storage.writer.writeTempPart(current_block);

        String block_id;

        if (deduplicate)
        {
            SipHash hash;
            part->checksums.computeTotalChecksumDataOnly(hash);
            union
            {
                char bytes[16];
                UInt64 words[2];
            } hash_value;
            hash.get128(hash_value.bytes);

            /// We add the hash from the data and partition identifier to deduplication ID.
            /// That is, do not insert the same data to the same partition twice.
            block_id = part->info.partition_id + "_" + toString(hash_value.words[0]) + "_" + toString(hash_value.words[1]);

            LOG_DEBUG(log, "Wrote block with ID '" << block_id << "', " << block.rows() << " rows");
        }
        else
        {
            LOG_DEBUG(log, "Wrote block with " << block.rows() << " rows");
        }

        try
        {
            commitPart(zookeeper, part, block_id);

            /// Set a special error code if the block is duplicate
            int error = (deduplicate && last_block_is_duplicate) ? ErrorCodes::INSERT_WAS_DEDUPLICATED : 0;
            PartLog::addNewPart(storage.context, part, watch.elapsed(), ExecutionStatus(error));
        }
        catch (...)
        {
            PartLog::addNewPart(storage.context, part, watch.elapsed(), ExecutionStatus::fromCurrentException(__PRETTY_FUNCTION__));
            throw;
        }
    }
}