Example #1
0
void TileCacheTests::testPerformance()
{
    std::string documentPath, documentURL;
    getDocumentPathAndURL("hello.odt", documentPath, documentURL);
    Poco::Net::HTTPRequest request(Poco::Net::HTTPRequest::HTTP_GET, documentURL);

    auto socket = *loadDocAndGetSocket(_uri, documentURL, "tile-performance ");
    getResponseMessage(socket, "invalidatetiles:");

    Poco::Timestamp timestamp;
    for (auto x = 0; x < 5; ++x)
    {
        sendTextFrame(socket, "tilecombine part=0 width=256 height=256 tileposx=0,3840,7680,11520,0,3840,7680,11520 tileposy=0,0,0,0,3840,3840,3840,3840 tilewidth=3840 tileheight=3840");
        for (auto i = 0; i < 8; ++i)
        {
            auto tile = getResponseMessage(socket, "tile:", "tile-performance ");
            CPPUNIT_ASSERT_MESSAGE("did not receive a tile: message as expected", !tile.empty());
        }
    }

    std::cerr << "Tile rendering roundtrip for 5 x 8 tiles combined: " << timestamp.elapsed() / 1000.
              << " ms. Per-tilecombine: " << timestamp.elapsed() / (1000. * 5)
              << " ms. Per-tile: " << timestamp.elapsed() / (1000. * 5 * 8) << "ms."
              << std::endl;

    socket.shutdown();
}
int
FFmpegTagger::IORead(void *opaque, uint8_t *buf, int buf_size)
{
    URLContext* pUrlContext = (URLContext*)opaque;

    std::istream* pInputStream = (std::istream*)pUrlContext->priv_data;
    if (!pInputStream) {
        LOGNS(Omm::AvStream, avstream, error, "IORead failed, std::istream not set");
        return -1;
    }

    pInputStream->read((char*)buf, buf_size);
    Poco::Timestamp::TimeDiff time = _timestamp.elapsed();
    _timestamp.update();
    Poco::Timestamp::TimeDiff startTime = _startTimestamp.elapsed();
    int bytes = pInputStream->gcount();
    _totalBytes += bytes;
    if (!pInputStream->good()) {
        LOGNS(Omm::AvStream, avstream, error, "IORead failed to read from std::istream");
        return -1;
    }
    LOGNS(Omm::AvStream, avstream, trace, "IORead() bytes read: " + Poco::NumberFormatter::format(bytes) + " in " + Poco::NumberFormatter::format(time/1000.0, 3) + " msec (" +  Poco::NumberFormatter::format(bytes*1000/time) + " kB/s), total : " +\
    Poco::NumberFormatter::format(_totalBytes/1000) + "kB in " + Poco::NumberFormatter::format(startTime/(Poco::Timestamp::TimeDiff)1000000) + " sec (" + Poco::NumberFormatter::format((Poco::Timestamp::TimeDiff)(_totalBytes*1000)/startTime) + "kB/s)");
    return bytes;
}
Example #3
0
void ThreadImpl::sleepImpl(long milliseconds)
{
#if defined(__digital__)
		// This is specific to DECThreads
		struct timespec interval;
		interval.tv_sec  = milliseconds / 1000;
		interval.tv_nsec = (milliseconds % 1000)*1000000;
		pthread_delay_np(&interval);
#elif POCO_OS == POCO_OS_LINUX || POCO_OS == POCO_OS_ANDROID || POCO_OS == POCO_OS_MAC_OS_X || POCO_OS == POCO_OS_QNX || POCO_OS == POCO_OS_VXWORKS
	Poco::Timespan remainingTime(1000*Poco::Timespan::TimeDiff(milliseconds));
	int rc;
	do
	{
		struct timespec ts;
		ts.tv_sec  = (long) remainingTime.totalSeconds();
		ts.tv_nsec = (long) remainingTime.useconds()*1000;
		Poco::Timestamp start;
		rc = ::nanosleep(&ts, 0);
		if (rc < 0 && errno == EINTR)
		{
			Poco::Timestamp end;
			Poco::Timespan waited = start.elapsed();
			if (waited < remainingTime)
				remainingTime -= waited;
			else
				remainingTime = 0;
		}
	}
	while (remainingTime > 0 && rc < 0 && errno == EINTR);
	if (rc < 0 && remainingTime > 0) throw Poco::SystemException("Thread::sleep(): nanosleep() failed");
#else
	Poco::Timespan remainingTime(1000*Poco::Timespan::TimeDiff(milliseconds));
	int rc;
	do
	{
		struct timeval tv;
		tv.tv_sec  = (long) remainingTime.totalSeconds();
		tv.tv_usec = (long) remainingTime.useconds();
		Poco::Timestamp start;
		rc = ::select(0, NULL, NULL, NULL, &tv);
		if (rc < 0 && errno == EINTR)
		{
			Poco::Timestamp end;
			Poco::Timespan waited = start.elapsed();
			if (waited < remainingTime)
				remainingTime -= waited;
			else
				remainingTime = 0;
		}
	}
	while (remainingTime > 0 && rc < 0 && errno == EINTR);
	if (rc < 0 && remainingTime > 0) throw Poco::SystemException("Thread::sleep(): select() failed");
#endif
}
void ThreadTest::testSleep()
{
	Poco::Timestamp start;
	Thread::sleep(200);
	Poco::Timespan elapsed = start.elapsed();
	assert (elapsed.totalMilliseconds() >= 190 && elapsed.totalMilliseconds() < 250);
}
Example #5
0
void StartupStore(const TCHAR *Str, ...)
{
  TCHAR buf[(MAX_PATH*2)+1]; // 260 chars normally  FIX 100205
  va_list ap;

  va_start(ap, Str);
  _vsntprintf(buf, countof(buf), Str, ap);
  va_end(ap);

  LockStartupStore();

  FILE *startupStoreFile = NULL;
  static TCHAR szFileName[MAX_PATH];
  static Poco::Timestamp StartTime;
  static bool initialised = false;
  if (!initialised) {
	LocalPath(szFileName, TEXT(LKF_RUNLOG));
	initialised = true;
  } 

  startupStoreFile = _tfopen(szFileName, TEXT("ab+"));
  if (startupStoreFile != NULL) {
    char sbuf[(MAX_PATH*2)+1]; // FIX 100205
    
    int i = TCHAR2utf(buf, sbuf, sizeof(sbuf));
    
    if (i > 0) {
      if (sbuf[i - 1] == 0x0a && (i == 1 || (i > 1 && sbuf[i-2] != 0x0d)))
        sprintf(sbuf + i - 1, SNEWLINE);
      fprintf(startupStoreFile, "[%09u] %s", (unsigned int)StartTime.elapsed()/1000, sbuf);
    }
    fclose(startupStoreFile);
  }
  UnlockStartupStore();
}
void TemplateMatchingTrainer::evaluateBaseline(const std::string &meanForAlign, const std::string &dataPath, const std::string &pcaDepthmapPath)
{
    FaceAligner aligner(Mesh::fromFile(meanForAlign), std::string());
    std::vector<std::string> fileNames = Face::LinAlg::Loader::listFiles(dataPath, "*.binz", Face::LinAlg::Loader::Filename);
    int n = fileNames.size();
    std::vector<int> ids = Face::Biometrics::BioDataProcessing::getIds(fileNames, "-");
    std::vector<Mesh> meshes(n);
    std::vector<double> times(n);

    #pragma omp parallel for
    for (int i = 0; i < n; i++)
    {
        //if (ids[i] >= 1002) break;
        //if (ids[i] < 5000) continue;

        Mesh m = Mesh::fromFile(dataPath + Poco::Path::separator() + fileNames[i]);
        SurfaceProcessor::mdenoising(m, 0.01, 10, 0.01);

        Poco::Timestamp before;
        aligner.icpAlign(m, 50, FaceAligner::TemplateMatching);
        times[i] = before.elapsed()/1000;
        meshes[i] = m;
    }

    std::cout << "mean align time (ms) :" << (std::accumulate(times.begin(), times.end(), 0.0)/n) << std::endl;
    evaluate(meshes, ids, pcaDepthmapPath);
}
Example #7
0
Notification* TimedNotificationQueue::waitDequeueNotification(long milliseconds)
{
	while (milliseconds >= 0)
	{
		_mutex.lock();
		NfQueue::iterator it = _nfQueue.begin();
		if (it != _nfQueue.end())
		{
			_mutex.unlock();
			Poco::Timestamp now;
			Timestamp::TimeDiff sleep = it->first - now;
			if (sleep <= 0)
			{
				return dequeueOne(it).duplicate();
			}
			else if (sleep <= 1000*Timestamp::TimeDiff(milliseconds))
			{
				if (!wait(sleep))
				{
					return dequeueOne(it).duplicate();
				}
				else 
				{
					milliseconds -= static_cast<long>((now.elapsed() + 999)/1000);
					continue;
				}
			}
		}
		else
		{
			_mutex.unlock();
		}
		if (milliseconds > 0)
		{
			Poco::Timestamp now;
			_nfAvailable.tryWait(milliseconds);
			milliseconds -= static_cast<long>((now.elapsed() + 999)/1000);
		}
		else return 0;
	}
	return 0;
}
void MultiBiomertricsAutoTuner::fillEvaluations(const Input &sourceDatabaseTrainData,
                                                const Input &targetDatabaseTrainData,
                                                const Settings &settings, std::vector<Evaluation> &evaluations)
{
    int allUnitsCount = settings.params.size();
    Poco::Timestamp stamp;
    #pragma omp parallel for
    for (int i = 0; i < allUnitsCount; i++)
    {
        trainUnit(settings.params[i], sourceDatabaseTrainData, targetDatabaseTrainData, evaluations, i);
    }
    std::cout << "Training individual classifiers took " << (stamp.elapsed()/1000) << "ms";
}
void RealSenseSensorImpl::faceDetect(bool depthMask)
{
	Poco::Int64 maskTime = 0;
	Poco::Int64 detectTime = 0;

	Poco::Timestamp now;
	ImageGrayscale mask(grayscalePreview.clone());
	if (depthMask) {
		PXCCapture::Sample *sample = senseManager->QuerySample();
		mask = RealSenseProcessor::depthMask(sample->depth, sample->color, projection, grayscalePreview);
		//cv::imshow("Preview mask", mask);
	}
	maskTime = now.elapsed();

	now.update();
	cv::resize(mask, resizedPreview, cv::Size(320, 240), 0, 0, CV_INTER_NN);
	roiPreview = resizedPreview(detectRoi);
	tracker->detect(roiPreview);
	detectTime = now.elapsed();

	if (depthMask && tracker->isLastDetect()) {
		cv::Rect faceRoi = toRealSizeRoi(tracker->getLastRegion());
		mask = RealSenseProcessor::fillHoles(faceRoi, mask, grayscalePreview, 0.0);
		//cv::imshow("grayscalePreview mask 2", mask);
	}

	//std::cout << std::dec << "Mask time: " << maskTime << "; Face detect time: " << detectTime << std::endl;

	/*
	if (Face::Settings::instance().debug()) {
		if (tracker->isLastDetect()) {
			cv::rectangle(colorPreview, toRealSizeRoi(tracker->getLastRegion()), 255);
		}
		//cv::imshow("detection", roiPreview);
	}*/
}
Example #10
0
    virtual void newChild(const std::shared_ptr<Poco::Net::WebSocket> &socket) override
    {
        _childSockets.push_back(socket);
        if (_childSockets.size() > NumToPrefork)
        {
            Poco::Timestamp::TimeDiff elapsed = _startTime.elapsed();

            auto totalTime = (1000. * elapsed)/Poco::Timestamp::resolution();
            Log::info() << "Launched " << _childSockets.size() << " in "
                        << totalTime << Log::end;
            size_t totalPSSKb = 0;
            size_t totalDirtyKb = 0;
            // Skip the last one as it's not completely initialized yet.
            for (size_t i = 0; i < _childSockets.size() - 1; ++i)
            {
                Log::info() << "Getting memory of child #" << i + 1 << " of " << _childSockets.size() << Log::end;
                if (!getMemory(_childSockets[i], totalPSSKb, totalDirtyKb))
                {
                    exitTest(TestResult::TEST_FAILED);
                    return;
                }
            }

            Log::info() << "Memory use total   " << totalPSSKb << "k shared "
                        << totalDirtyKb << "k dirty" << Log::end;

            totalPSSKb /= _childSockets.size();
            totalDirtyKb /= _childSockets.size();
            Log::info() << "Memory use average " << totalPSSKb << "k shared "
                        << totalDirtyKb << "k dirty" << Log::end;

            Log::info() << "Launch time total   " << totalTime << " ms" << Log::end;
            totalTime /= _childSockets.size();
            Log::info() << "Launch time average " << totalTime << " ms" << Log::end;

            if (!_failure.empty())
            {
                Log::error("UnitPrefork failed due to: " + _failure);
                exitTest(TestResult::TEST_FAILED);
            }
            else
            {
                Log::error("UnitPrefork success.");
                exitTest(TestResult::TEST_OK);
            }
        }
    }
Example #11
0
    virtual void newChild(WebSocketHandler &) override
    {
        _childSockets++;
        LOG_INF("Unit-prefork: got new child, have " << _childSockets << " of " << NumToPrefork);

        if (_childSockets >= NumToPrefork)
        {
            Poco::Timestamp::TimeDiff elapsed = _startTime.elapsed();

            const double totalTime = (1000. * elapsed)/Poco::Timestamp::resolution();
            LOG_INF("Launched " << _childSockets << " in " << totalTime);
            std::cerr << "Launch time total   " << totalTime << " ms" << std::endl;
            std::cerr << "Launch time average " << (totalTime / _childSockets) << " ms" << std::endl;

            exitTest(TestResult::Ok);
        }
    }
Example #12
0
RealSenseSensor::State RealSenseSensorImpl::go()
{
	try {
		if (brightnessToSet != 0) {
			Poco::Timestamp now;
			check(device->SetColorBrightness(brightnessToSet), "Set color brightness failed");
			printTimeIfBig(now.elapsed(), "setColorBrightnessTime");
			brightnessToSet = 0;
		}

		switch (state) {
			case RealSenseSensor::STATE_OFF:
				// do nothing
				break;

			case RealSenseSensor::STATE_IDLE:
				// Check for face presence every 5th frame
				// Start positioning if face is present long enough
				doIdle();
				break;

			case RealSenseSensor::STATE_POSITIONING:
				// Navigate user to proper position
				doPositioning();
				break;

			case RealSenseSensor::STATE_CAPTURING:
				// Just convert current depth and color buffer to mesh
				doCapturing();
				break;

			case RealSenseSensor::STATE_CAPTURING_DONE:
				// do nothing
				doCapturingDone();
				break;

			default:
				break;
		}
	} catch (std::exception& e) {
		std::cerr << "go() exception in state " << getState() << ": " << e.what() << std::endl;
	}

	return getState();
}
Example #13
0
void ChildProcessSession::sendFontRendering(const char* /*buffer*/, int /*length*/, StringTokenizer& tokens)
{
    std::string font, decodedFont;
    int width, height;
    unsigned char *pixmap;

    if (tokens.count() < 2 ||
        !getTokenString(tokens[1], "font", font))
    {
        sendTextFrame("error: cmd=renderfont kind=syntax");
        return;
    }

    Poco::Mutex::ScopedLock lock(_mutex);

   _loKitDocument->pClass->setView(_loKitDocument, _viewId);

    URI::decode(font, decodedFont);
    std::string response = "renderfont: " + Poco::cat(std::string(" "), tokens.begin() + 1, tokens.end()) + "\n";

    std::vector<char> output;
    output.resize(response.size());
    std::memcpy(output.data(), response.data(), response.size());

    Poco::Timestamp timestamp;
    pixmap = _loKitDocument->pClass->renderFont(_loKitDocument, decodedFont.c_str(), &width, &height);
    Log::trace("renderFont [" + font + "] rendered in " + std::to_string(timestamp.elapsed()/1000.) + "ms");

    if (pixmap != nullptr)
    {
        if (!Util::encodePNGAndAppendToBuffer(pixmap, width, height, output, LOK_TILEMODE_RGBA))
        {
            sendTextFrame("error: cmd=renderfont kind=failure");
            delete[] pixmap;
            return;
        }
        delete[] pixmap;
    }

    sendBinaryFrame(output.data(), output.size());
}
Example #14
0
void Topology::flushCache() {
#ifdef DEBUG_TFC
  StartupStore(TEXT("---flushCache() starts%s"),NEWLINE);
  Poco::Timestamp starttick;;
#endif
  switch (cache_mode) {
	case 0:  // Original
	case 1:  // Bounds array in memory
		for (int i=0; i<shpfile.numshapes; i++) {
			removeShape(i);
		}
		break;
	case 2:  // Shapes in memory
		for (int i=0; i<shpfile.numshapes; i++) {
			shpCache[i] = NULL;
		}
		break;
  }//sw		
  shapes_visible_count = 0;
#ifdef DEBUG_TFC
  StartupStore(TEXT("   flushCache() ends (%dms)%s"),Poco::Timespan(starttick.elapsed()).totalMilliseconds(),NEWLINE);
#endif
}
Example #15
0
void ThreadImpl::sleepImpl(long milliseconds)
{
    Poco::Timespan remainingTime(1000*Poco::Timespan::TimeDiff(milliseconds));
    int rc;
    do
    {
        struct timespec ts;
        ts.tv_sec  = (long) remainingTime.totalSeconds();
        ts.tv_nsec = (long) remainingTime.useconds()*1000;
        Poco::Timestamp start;
        rc = ::nanosleep(&ts, 0);
        if (rc < 0 && errno == EINTR)
        {
            Poco::Timestamp end;
            Poco::Timespan waited = start.elapsed();
            if (waited < remainingTime)
                remainingTime -= waited;
            else
                remainingTime = 0;
        }
    }
    while (remainingTime > 0 && rc < 0 && errno == EINTR);
    if (rc < 0 && remainingTime > 0) throw Poco::SystemException("Thread::sleep(): nanosleep() failed");
}
Example #16
0
int main (int argc, char** argv) {

    const bool USE_OPENCV_DISPLAY = false; // Use OpenCV or CImg for display?

    pI::BaseApplication app;
    pI::Runtime runtime (app.GetRuntime());
    LoadPlugins (app);

    // Creates and initializes all the necessary plugins.
    const int DeviceIndex = 0;
    const bool ShowRGB = true;

    // Freenect/GetVideoAndDepth returns both the RGB image and the depth map captured by a Kinect.
    boost::shared_ptr<pI::pIn> getImageAndDepth
        = app.SpawnAndInitialize (runtime,
                                  "Freenect/GetVideoAndDepth",
                                  DeviceIndex,
                                  ShowRGB ? 0 : 2,   // Video format: FREENECT_VIDEO_RGB or FREENECT_VIDEO_IR_8BIT
                                  0                  // Depth format: FREENECT_DEPTH_11BIT
                                 );
    pI::Arguments getImageAndDepth_in = getImageAndDepth->GetInputSignature(),
                  getImageAndDepth_out = getImageAndDepth->GetOutputSignature();

    // Freenect/SetTiltAndLED sets the tilt angle and LED status of a Kinect.
    boost::shared_ptr<pI::pIn> setTiltAndLED = app.SpawnAndInitialize (runtime, "Freenect/SetTiltAndLED", DeviceIndex);
    pI::Arguments setTiltAndLED_in = setTiltAndLED->GetInputSignature(),
                  setTiltAndLED_out = setTiltAndLED->GetOutputSignature();

    // Color/ApplyRGBPalette applies an arbitrary RGB palette onto a matrix
    boost::shared_ptr<pI::pIn> applyRGB (runtime.SpawnPlugin ("pIn/Color/ApplyRGBPalette"));
    pI::Arguments applyRGB_params = applyRGB->GetParameterSignature();

    pI::RGBPalette rgb (applyRGB_params[0]);
    rgb.SetCount (2048);

    std::map<size_t, uint32_t> freenect_glview_colors; // Coloring as used in Freenect's glview example
    freenect_glview_colors[0]    = (0xFFFFFF); // white
    freenect_glview_colors[256]  = (0xFF0000); // red
    freenect_glview_colors[512]  = (0xFFFF00); // yellow
    freenect_glview_colors[768]  = (0x00FF00); // green
    freenect_glview_colors[1024] = (0x00FFFF); // cyan
    freenect_glview_colors[1280] = (0x0000FF); // blue
    freenect_glview_colors[1536] = (0x000000); // black

    std::map<size_t, uint32_t> terrain; // Alternative coloring; names taken from http://en.wikipedia.org/wiki/List_of_colors
    terrain[0]     = (0xFFFFFF); // White
    terrain[256]   = (0x964B00); // Brown (traditional)
    terrain[512]   = (0xFBEC5D); // Maize
    terrain[768]   = (0x66FF00); // Bright green
    terrain[1024]  = (0x014421); // Forest green (traditional)
    terrain[1025]  = (0x87CEFA); // Light sky blue
    terrain[1280]  = (0x00008B); // Dark blue
    terrain[2047]  = (0x000000); // Black
    terrain[2048]  = (0x696969); // Dim gray

    ColorRamp ramp (terrain);

    for (int i = 0; i < 2048; i++) {
        float v = powf ( (i / 2048.), 3);
        uint16_t gamma = 9216 * v;

        uint32_t c = ramp.GetColor (gamma);
        rgb.SetR (i, red (c));
        rgb.SetG (i, green (c));
        rgb.SetB (i, blue (c));
    }

    applyRGB->Initialize (applyRGB_params);

    pI::Arguments applyRGB_in = applyRGB->GetInputSignature(),
                  applyRGB_out = applyRGB->GetOutputSignature();

    // OpenCV/Draw/Rectangle draws the inner rectangle.
    boost::shared_ptr<pI::pIn> drawRectImage = app.SpawnAndInitialize (runtime, "OpenCV/Draw/Rectangle");
    pI::Arguments drawRectImage_in = drawRectImage->GetInputSignature(),
                  drawRectImage_out = drawRectImage->GetOutputSignature();

    boost::shared_ptr<pI::pIn> drawRectDepth = app.SpawnAndInitialize (runtime, "OpenCV/Draw/Rectangle");
    pI::Arguments drawRectDepth_in = drawRectDepth->GetInputSignature(),
                  drawRectDepth_out = drawRectDepth->GetOutputSignature();

    // OpenCV/Draw/PutText draws a text string into an image.
    boost::shared_ptr<pI::pIn> puttext
        = app.SpawnAndInitialize (runtime,
                                  "OpenCV/Draw/PutText",
                                  0,                // font type: CV_FONT_HERSHEY_SIMPLEX
                                  pI_FALSE,         // render font italic: no
                                  0.75, 0.75,       // horizontal and vertical scale
                                  0.0,              // shear: no shear
                                  1,                // thickness of the text strokes: 1
                                  2                 // type of the line: CV_AA
                                 );
    pI::Arguments puttext_in = puttext->GetInputSignature(),
                  puttext_out = puttext->GetOutputSignature();

    // OpenCV/IO/SaveImage saves images to disk.
    boost::shared_ptr<pI::pIn> saveImage (runtime.SpawnPlugin ("OpenCV/IO/SaveImage"));
    saveImage->Initialize (saveImage->GetParameterSignature());
    pI::Arguments saveImage_in = saveImage->GetInputSignature(),
                  saveImage_out = saveImage->GetOutputSignature();

    // OpenCV/IO/ShowImage or CImg/Display displays the image and the depth map.
    boost::shared_ptr<pI::pIn> displayImage, displayDepth;

    if (USE_OPENCV_DISPLAY) {
        displayImage = app.SpawnAndInitialize (runtime, "OpenCV/IO/ShowImage",
                                               "RGB image window", false, 640, 480, 0, 0);
        displayDepth = app.SpawnAndInitialize (runtime, "OpenCV/IO/ShowImage",
                                               "Depth map window", false, 640, 480, 650, 0);
    } else {
        displayImage   = app.SpawnAndInitialize (runtime, "CImg/Display", pI_TRUE);
        displayDepth = app.SpawnAndInitialize (runtime, "CImg/Display", pI_TRUE);
    }

    pI::Arguments displayImage_in = displayImage->GetInputSignature(),
                  displayImage_out = displayImage->GetOutputSignature();

    pI::Arguments displayDepth_in = displayDepth->GetInputSignature(),
                  displayDepth_out = displayDepth->GetOutputSignature();


    // Now that all plugins are initialized, connect them.

    drawRectImage_in[0] = getImageAndDepth_out[0];
    pI::IntValue (drawRectImage_in[1]).SetData (310);
    pI::IntValue (drawRectImage_in[2]).SetData (230);
    pI::IntValue (drawRectImage_in[3]).SetData (330);
    pI::IntValue (drawRectImage_in[4]).SetData (250);
    pI::IntValue (drawRectImage_in[5]).SetData (255);
    pI::IntValue (drawRectImage_in[6]).SetData (255);
    pI::IntValue (drawRectImage_in[7]).SetData (255);
    pI::IntValue (drawRectImage_in[8]).SetData (1);
    pI::StringSelection (drawRectImage_in[9]).SetIndex (0);
    pI::IntValue (drawRectImage_in[10]).SetData (0);

    displayImage_in[0] = drawRectImage_in[0];

    pI::StringValue fileName (saveImage_in[0]);
    saveImage_in[1] = getImageAndDepth_out[0];

    applyRGB_in[0] =  getImageAndDepth_out[1];
    drawRectDepth_in[0] = applyRGB_out[0];
    pI::IntValue (drawRectDepth_in[1]).SetData (310);
    pI::IntValue (drawRectDepth_in[2]).SetData (230);
    pI::IntValue (drawRectDepth_in[3]).SetData (330);
    pI::IntValue (drawRectDepth_in[4]).SetData (250);
    pI::IntValue (drawRectDepth_in[5]).SetData (255);
    pI::IntValue (drawRectDepth_in[6]).SetData (255);
    pI::IntValue (drawRectDepth_in[7]).SetData (255);
    pI::IntValue (drawRectDepth_in[8]).SetData (1);
    pI::StringSelection (drawRectDepth_in[9]).SetIndex (0);
    pI::IntValue (drawRectDepth_in[10]).SetData (0);
    displayDepth_in[0] = drawRectDepth_in[0];

    pI::StringValue puttext_text (puttext_in[1]);
    pI::IntValue puttext_x (puttext_in[2]);
    pI::IntValue puttext_y (puttext_in[3]);
    pI::IntValue puttext_R (puttext_in[4]);
    pI::IntValue puttext_G (puttext_in[5]);
    pI::IntValue puttext_B (puttext_in[6]);
    pI::BoolValue puttext_do_draw (puttext_in[7]);

    puttext_in[0] = drawRectDepth_in[0];
    puttext_R.SetData (255);
    puttext_G.SetData (255);
    puttext_B.SetData (255);
    puttext_do_draw.SetData (pI_TRUE);

    puttext_x.SetData (285);
    puttext_y.SetData (272);

    // Executes getImageAndDepth and setTiltAndLED a first time to initialize Kinect.

    getImageAndDepth->Execute (getImageAndDepth_in, getImageAndDepth_out);

    pI::IntValue newTilt (setTiltAndLED_in[0]);
    pI::IntValue newLED (setTiltAndLED_in[1]);

    newTilt.SetData (0); // Tilt angle 0 degree
    newLED.SetData (1); // LED status Green

    setTiltAndLED->Execute (setTiltAndLED_in, setTiltAndLED_out);

    Poco::Timestamp now;

    bool doLoop = true;
    bool showDistance = false;

    try {
        while (doLoop) {
            while (!kbhit()) {

                // Get Depth and RGB data, ...
                getImageAndDepth->Execute (getImageAndDepth_in, getImageAndDepth_out);

                // ... apply depth to color image conversion, ...
                applyRGB->Execute (applyRGB_in, applyRGB_out);

                // ... draw a white rectangle into the center of both image and map, ...
                if (showDistance) {
                    drawRectImage->Execute (drawRectImage_in, drawRectImage_out);
                    drawRectDepth->Execute (drawRectDepth_in, drawRectDepth_out);
                }

                // ... and perhaps the result of distance measurement, ...
                double avgDistance = 0.;

                if (showDistance) {
                    pI::ShortMatrix depth (getImageAndDepth_out[1]);
                    double sumDistance = 0.;
                    int sumCount = 0;

                    for (int j = 230; j <= 250; ++j) {
                        for (int i = 310; i <= 330; ++i) {
                            int dep = depth.GetData (j, i);

                            if (dep < 2047) {
                                sumDistance += RawDepthToMeters (dep);
                                ++sumCount;
                            }
                        }
                    }

                    if (sumCount > 0) {
                        avgDistance = sumDistance / (double) sumCount;
                    }

                    char str[32];
                    sprintf (str, "%.2f m", avgDistance);
                    puttext_text.SetData (str);

                    puttext->Execute (puttext_in, puttext_out);
                }

                // ... and display both the image and the depth map.
                displayImage->Execute (displayImage_in, displayImage_out);
                displayDepth->Execute (displayDepth_in, displayDepth_out);

                std::cout << "Tilt " << (int) newTilt.GetData();
                std::cout << "; " << (1000000. / now.elapsed()) << " frames/sec          \r";

                now.update();
            }

            int ch = getch();

            switch (ch) {
            case 'q':
            case 'Q':
            case 27 /* ESC */:
                // Terminates
                newTilt.SetData (0); // Tilt angle 0 degree
                newLED.SetData (0); // LED status off

                setTiltAndLED->Execute (setTiltAndLED_in, setTiltAndLED_out);

                doLoop = false;
                break;

            case 'm':
            case 'M': // Measurement mode
                showDistance = !showDistance;
                break;

            case 's':
            case 'S': { // Takes snapshot

                std::string dateTime = Poco::DateTimeFormatter::format (Poco::LocalDateTime(), "%Y-%m-%d_%H-%M-%S");

                std::string name = std::string ("KinectImage_") + dateTime + std::string (".png");
                fileName.SetData (const_cast<char*> (name.c_str()));

                saveImage->Execute (saveImage_in);

                std::cout << "Saved " << name << std::endl;

                // HACK Experimental: saves PCL point cloud

                pI::ShortMatrix depth (getImageAndDepth_out[1]);
                int width = depth.GetCols();
                int height = depth.GetRows();

                pcl::PointCloud<pcl::PointXYZ> cloud;

                for (int j = 0; j < height; ++j) {
                    for (int i = 0; i < width; ++i) {
                        int dep = depth.GetData (j, i);

                        if (dep < 2048) {
                            cloud.push_back (DepthToWorld (i, j, dep));
                        }
                    }
                }

                name = std::string ("PointCloud_") + dateTime + std::string (".pcd");
                pcl::io::savePCDFileASCII (name, cloud);

                std::cout << "Saved " << name << std::endl;
            }
            break;
            case '+':
            case 'u':
            case 'U':
                //std::cout << std::endl << "Tilt: " << newTilt.GetData();
                newTilt.SetData (newTilt.GetData() + 1);
                //std::cout << " -> " << newTilt.GetData() << std::endl;
                setTiltAndLED->Execute (setTiltAndLED_in, setTiltAndLED_out);
                break;
            case '-':
            case 'd':
            case 'D':
                //std::cout << std::endl << "Tilt: " << newTilt.GetData();
                newTilt.SetData (newTilt.GetData() - 1);
                //std::cout << " -> " << newTilt.GetData() << std::endl;
                setTiltAndLED->Execute (setTiltAndLED_in, setTiltAndLED_out);
                break;
            default:
                break;
            }
        }
    } catch (pI::exception::ExecutionException& e) {
        std::cerr << e.what() << std::endl;
    }
}
void TemplateMatchingTrainer::train(const std::string &meanForAlign, const std::string &dataPath, const std::string &pcaDepthmapPath)
{
    FaceAligner aligner(Mesh::fromFile(meanForAlign), std::string());
    std::vector<std::string> fileNames = Face::LinAlg::Loader::listFiles(dataPath, "*.binz", Face::LinAlg::Loader::BaseFilename);
    std::vector<int> ids = Face::Biometrics::BioDataProcessing::getIds(fileNames, "-");
    int n = fileNames.size();

    cv::FileStorage storage(pcaDepthmapPath, cv::FileStorage::READ);
    auto unit = Face::Biometrics::MultiExtractor::Unit::parse("image depth scale-0.5 zpca correlation");
    unit->featureExtractor->deserialize(storage);

    std::vector<cv::Size> templateSizes;
    templateSizes   /*<< cv::Size(40, 70)  << cv::Size(60, 70)*/  .push_back(cv::Size(60, 90))  /*<< cv::Size(90, 90)*/;
    std::vector<cv::Point> templateCenters;
    templateCenters /*<< cv::Point(20, 50) << cv::Point(30, 50)*/  .push_back(cv::Point(30, 60)) /*<< cv::Point(45, 60)*/;
    for (int method = cv::TM_CCOEFF_NORMED; method <= cv::TM_CCOEFF_NORMED; method++)
    {
        std::cout << "method" << method << std::endl;
        for (int type = FaceAligner::CVTemplateMatchingSettings::Mean; type <= FaceAligner::CVTemplateMatchingSettings::Mean; type++)
        {
            switch (type)
            {
            case FaceAligner::CVTemplateMatchingSettings::Texture:
                std::cout << "Texture" << std::endl;
                break;
            case FaceAligner::CVTemplateMatchingSettings::Depth:
                std::cout << "Depth" << std::endl;
                break;
            case FaceAligner::CVTemplateMatchingSettings::Mean:
                std::cout << "Mean" << std::endl;
                break;
            case FaceAligner::CVTemplateMatchingSettings::Gauss:
                std::cout << "Gauss" << std::endl;
                break;
            case FaceAligner::CVTemplateMatchingSettings::Index:
                std::cout << "Index" << std::endl;
                break;
            case FaceAligner::CVTemplateMatchingSettings::Eigen:
                std::cout << "Eigen" << std::endl;
                break;
            }

            for (unsigned int templateIndex = 0; templateIndex < templateSizes.size(); templateIndex++)
            {
                std::cout << "template " << templateSizes[templateIndex].width << " " << templateSizes[templateIndex].height << std::endl;

                std::vector<Mesh> meshes(n);
                std::vector<Landmarks> landmarks(n);
                #pragma omp parallel for
                for (int i = 0; i < n; i++)
                {
                    Mesh m = Mesh::fromFile(dataPath + Poco::Path::separator() + fileNames[i] + ".binz");
                    landmarks[i] = Landmarks(dataPath + Poco::Path::separator() + fileNames[i] + ".yml");
                    m.translate(-landmarks[i].get(Landmarks::Nosetip));
                    SurfaceProcessor::mdenoising(m, 0.01, 10, 0.01);
                    meshes[i] = m;
                }

                //qDebug() << "mean template";
                int meanTemplateCount = 50;
                cv::Mat_<float> templateImg = cv::Mat_<float>::zeros(templateSizes[templateIndex]);
                for (int i = 0; i < meanTemplateCount; i++)
                {
                    MapConverter mc;
                    Map depth = SurfaceProcessor::depthmap(meshes[i], mc, 1.0, SurfaceProcessor::ZCoord);
                    Map blurredDepth = depth;
                    blurredDepth.applyCvGaussBlur(7, 3);

                    cv::Point2d noseTip = mc.MeshToMapCoords(depth, cv::Point2d(0.0, 0.0));
                    cv::Rect roi(noseTip.x - templateCenters[templateIndex].x,
                                 noseTip.y - templateCenters[templateIndex].y,
                                 templateSizes[templateIndex].width, templateSizes[templateIndex].height);

                    cv::Mat_<float> img;

                    CurvatureStruct cs = SurfaceProcessor::calculateCurvatures(blurredDepth);
                    switch (type)
                    {
                    case FaceAligner::CVTemplateMatchingSettings::Texture:
                        SurfaceProcessor::depthmap(meshes[i], mc, 1.0, SurfaceProcessor::Texture_I).toMatrix(0, 0, 255)(roi).convertTo(img, CV_32F);
                        break;
                    case FaceAligner::CVTemplateMatchingSettings::Depth:
                        depth.values(roi).convertTo(img, CV_32F);
                        break;
                    case FaceAligner::CVTemplateMatchingSettings::Mean:
                        img = cs.meanMatrix()(roi);
                        break;
                    case FaceAligner::CVTemplateMatchingSettings::Gauss:
                        img = cs.gaussMatrix()(roi);
                        break;
                    case FaceAligner::CVTemplateMatchingSettings::Index:
                        img = cs.indexMatrix()(roi);
                        break;
                    case FaceAligner::CVTemplateMatchingSettings::Eigen:
                        img = cs.pclMatrix()(roi);
                        break;
                    }

                    templateImg += img;
                }
                templateImg /= meanTemplateCount;
                //double min, max; cv::minMaxIdx(templateImg, &min, &max);
                //cv::imshow("mean", templateImg); //(meanDepth - min)/(max - min));
                //cv::waitKey();

                aligner.cvTemplateMatchingSettings.center = templateCenters[templateIndex];
                aligner.cvTemplateMatchingSettings.comparisonMethod = method;
                aligner.cvTemplateMatchingSettings.templateImage = templateImg;
                aligner.cvTemplateMatchingSettings.inputImageType = (FaceAligner::CVTemplateMatchingSettings::InputImageType)type;

                cv::FileStorage storage("../../test/preAlignTemplate.yml", cv::FileStorage::WRITE);
                aligner.cvTemplateMatchingSettings.serialize(storage);

                //qDebug() << "align";
                std::vector<double> times(n);
                #pragma omp parallel for
                for (int i = 0; i < n; i++)
                {
                    meshes[i].translate(landmarks[i].get(Landmarks::Nosetip));

                    Poco::Timestamp before;
                    aligner.icpAlign(meshes[i], 50, FaceAligner::CVTemplateMatching);
                    times[i] = before.elapsed()/1000;
                }

                std::cout << "mean align time (ms): " << (std::accumulate(times.begin(), times.end(), 0.0)/n) << std::endl;
                evaluate(meshes, ids, pcaDepthmapPath);
            }
        }
    }
}
Example #18
0
void RealSenseSensorImpl::singleCapture(bool calcUvMap, bool calcLandmarks, bool releaseFrame)
{
	Poco::Int64 acquireTime = 0;
	Poco::Int64 getBuffersTime = 0;
	
	Poco::Timestamp now;
	sts = senseManager->AcquireFrame(true);

	if (sts != PXC_STATUS_NO_ERROR) {
		senseManager->ReleaseFrame();
		std::string msg("Can't acquire frame from camera (" + std::to_string(sts) + ")");
		std::cerr << msg << std::endl;
		throw FACELIB_EXCEPTION(msg);
	}
	PXCCapture::Sample *sample = senseManager->QuerySample();
	acquireTime = now.elapsed();
	
	now.update();
	getBuffers(sample->depth, sample->color, calcUvMap, calcLandmarks);
	getBuffersTime = now.elapsed();

	try {
		brightnessToSet = 0;
		Poco::Int64 detectFaceRoiTime = 0;
		Poco::Int64 meanCalcTime = 0;
		Poco::Int64 queryColorBrightnessTime = 0;
		Poco::Int64 queryColorBrightnessInfoTime = 0;
		Poco::Int64 setColorBrightnessTime = 0;

		now.update();
		cv::Rect croi = realsenseDepthFaceTracker->detectFaceRoi(depthMatrix, sample->depth, RealSenseDepthFaceTracker::Domain::Color, false /*debug*/);
		detectFaceRoiTime = now.elapsed();

		// brightness control
		if ((settings.brightnessControl == static_cast<int>(RealSenseSensor::BrightnessControlType::HeadRoiBased) && croi.area() && 
			(state == RealSenseSensor::STATE_POSITIONING || state == RealSenseSensor::STATE_IDLE))) {
			now.update();
			cv::Mat mask = cv::Mat::zeros(grayscalePreview.size(), CV_8U);
			cv::rectangle(mask, croi, cv::Scalar(255), CV_FILLED);
			int mean = cv::mean(grayscalePreview, mask)[0];
			meanCalcTime = now.elapsed();

//			if (Face::Settings::instance().debug()) {
//				cv::Mat maskDrawing = grayscalePreview.clone();
//				cv::rectangle(maskDrawing, croi, cv::Scalar(255));
//				cv::imshow("Depth roi for gain projected to color", maskDrawing);
//			}

			now.update();
			pxcI32 brightness = device->QueryColorBrightness();
			queryColorBrightnessTime = now.elapsed();
			diff = targetColorMean - mean;

			auto newValue = [=](const PXCCapture::Device::PropertyInfo& info, pxcI32 val, pxcI32 newVal) -> pxcI32 {
				pxcI32 newV = std::max(info.range.min, std::min(static_cast<pxcF32>(newVal), info.range.max));
				if (Face::Settings::instance().debug()) {
					std::stringstream sstr;
					sstr << "Val: " << val << "; adjusted val: " << newV << "; mean: " << mean << "/" << targetColorMean;
					shadowText(grayscalePreview, sstr.str(), cv::Point(150, 30), 0.5);
				}
				return newV;
			};

			int _diff = diff == 0 ? 0 : diff / abs(diff);
			pxcI32 newBrightness = newValue(device->QueryColorBrightnessInfo(), brightness, brightness + _diff);
			if (abs(diff) > 15) {
				brightnessToSet = newBrightness;
			}
		}
	} catch (std::exception& e) {
		std::cout << " --- Brightness control exception: " << e.what() << std::endl;
	}

	//std::cout << std::dec << "Acquire time: " << acquireTime << "; getBuffers time: " << getBuffersTime << std::endl;

	if (releaseFrame) {
		senseManager->ReleaseFrame();
	}
}
		void PrioNotificationBasedRunnable::run() {
			threadDebug();

			LOG_STREAM_DEBUG << thread.name() << ": bg thread started" << LE;
			try {
				Poco::Notification::Ptr pNf(queue.waitDequeueNotification());

				while (pNf) {

					Poco::Timestamp t;
					LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): Accept notification '" << pNf->name() << "'" << LE;

					//quit notification
					QuitNotification::Ptr pQuitNf = pNf.cast<QuitNotification>();
					if (pQuitNf) {
						LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): Quit" << LE;
						break;
					}

					this->processNotification(pNf);

				LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): Process notification '" << pNf->name() << "' finished in " << t.elapsed() / 1000 << "ms" << LE;
				pNf = queue.waitDequeueNotification();
				LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): After waitDequeueNotification, ptr: " << (long int) pNf.get() << LE;
				if (!pNf.isNull()) {
					LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): After waitDequeueNotification, name: '" << pNf->name() << LE;
				}
			}
			LOG_NAMED_STREAM_INFO(LOG_THREAD) << thread.name() << "(>): Out from while cycle; ptr: " << (long int) pNf.get() << LE;
		} catch (Poco::Exception & e) {
			LOG_NAMED_STREAM_FATAL(LOG_THREAD) << thread.name() << ": Worker thread finished with exception " << e.displayText() << LE;
		} catch (std::runtime_error & e) {
			LOG_NAMED_STREAM_FATAL(LOG_THREAD) << thread.name() << ": Worker thread finished with std runtime exception " << e.what() << LE;
		} catch (std::exception & e) {
			LOG_NAMED_STREAM_FATAL(LOG_THREAD) << thread.name() << ": Worker thread finished with std exception " << e.what() << LE;
		} catch (...) {
			LOG_NAMED_STREAM_FATAL(LOG_THREAD) << thread.name() << ": Worker thread finished with unknown exception" << LE;
		}
		LOG_STREAM_DEBUG << thread.name() << ": bg thread finished" << LE;
	}
Example #20
0
void RealSenseSensorImpl::doPositioning()
{
	Poco::Int64 singleCaptureTime = 0;
	Poco::Int64 faceDetectTime = 0;
	Poco::Int64 pose1Time = 0;
	Poco::Int64 pose2Time = 0;
	Poco::Int64 frameReleaseTime = 0;
	Poco::Int64 frameTime = 0;

	static Poco::Timestamp frameTimer;

	RealSenseSensor::PositioningOutput prevPosOutput = positioningOutput;
	positioningOutput = RealSenseSensor::POS_NONE;

	Poco::Timestamp now;
	singleCapture(false, false, false);
	singleCaptureTime = now.elapsed();
	
	now.update();
	faceDetect(settings.hasPosFeature(RealSenseSensor::PositioningFeature::DepthMask));
	faceDetectTime = now.elapsed();

	frameTime = frameTimer.elapsed();
	frameTimer.update();

	calcMinZ();

	if (tracker->getConsecutiveNonDetects() >= settings.consecutiveNonDetectsToStopPositioning ||
			(tracker->isLastDetect() && nearestZpoint > settings.maximalPositioningDistance)) {
		setState(RealSenseSensor::STATE_IDLE);
		senseManager->ReleaseFrame();
		return;
	}

	if (tracker->getConsecutiveNonDetects() > 10) {
		positioningOutput = RealSenseSensor::POS_NONE;
		senseManager->ReleaseFrame();
		return;
	}

	if (!tracker->isLastDetect()) {
		senseManager->ReleaseFrame();
		return;
	}

	cv::Rect reg = toRealSizeRoi(tracker->getLastRegion());
	double distance = calcDistance(reg);

	now.update();

	FacePoseEstimator::Pose pose = FacePoseEstimator::Pose::UnknownPose();

	if (settings.hasPosFeature(RealSenseSensor::PositioningFeature::PoseEstimation) && tracker->isLastDetect()) {
		landmarks = lmDetector.getLandmarks(grayscalePreview, toRealSizeRoi4LMDetector(tracker->getLastRegion()), false);
		std::map<std::string, std::vector<cv::Point2d> > selectedLMPoints = lmDetector.lmGroupPoints(landmarks);
		pose1Time = now.elapsed();
		
		now.update();
		pose = facePoseEstimator->facePose(selectedLMPoints, realsenseProjection);
		pose2Time = now.elapsed();

		if (Face::Settings::instance().debug() && pose != FacePoseEstimator::Pose::UnknownPose()) {
			cv::Scalar red(0, 0, 255);
			cv::Scalar green(0, 255, 0);
			cv::Scalar white(255, 255, 255);
			cv::Scalar black(0, 0, 0);

			{
				positioningFrameStats.update();
				std::stringstream sstr;
				sstr.precision(1);
				sstr.setf(std::ios::fixed, std::ios::floatfield);
				sstr << "Pitch: " << pose.pitch - pose.relativePitch << "/" << facePoseEstimator->getSettings().pitchZeroAngle <<
					", Yaw: " << pose.yaw - pose.relativeYaw << "/" << facePoseEstimator->getSettings().yawZeroAngle <<
					", FPS: " << positioningFrameStats.fps();
				sstr.flush();
				shadowText(grayscalePreview, sstr.str(), cv::Point(150, 15), 0.5);

				{
					std::stringstream sstrDebug;
					sstrDebug << std::dec << "Capture time: " << singleCaptureTime / 1000 << "; FaceDetect time: " << faceDetectTime / 1000;
					shadowText(grayscalePreview, sstrDebug.str(), cv::Point(150, 45), 0.5);
				}

				{
					std::stringstream sstrDebug;
					sstrDebug << std::dec << "pose time: " << pose1Time / 1000 << "/" << pose2Time / 1000 << "; frame time: " << frameTime / 1000;
					shadowText(grayscalePreview, sstrDebug.str(), cv::Point(150, 60), 0.5);
				}

				auto mirror = [](const cv::Mat& img, const cv::Point& p) -> cv::Point {
					return cv::Point(img.cols - p.x, p.y);
				};

				cv::line(grayscalePreview, mirror(grayscalePreview, pose.imageAxisX.from), mirror(grayscalePreview, pose.imageAxisX.to), black);
				cv::line(grayscalePreview, mirror(grayscalePreview, pose.imageAxisY.from), mirror(grayscalePreview, pose.imageAxisY.to), black);
			}

			cv::rectangle(grayscalePreview, toRealSizeRoi4LMDetector(tracker->getLastRegion()), 255);

			for (const auto& lm : landmarks) {
				cv::circle(grayscalePreview, lm, 2, black);
			}
		}

		landmarks.clear();
	}

	// add hysteresis for distance
	double targetMaxDistance = settings.maximalDistance;
	if (prevPosOutput == RealSenseSensor::POS_MOVE_CLOSER) {
		targetMaxDistance *= 0.96;
	}
	double targetMinDistance = settings.minimalDistance;
	if (prevPosOutput == RealSenseSensor::POS_MOVE_FAR) {
		targetMinDistance *= 1.04;
	}

	if (distance > targetMaxDistance) {
		positioningOutput = RealSenseSensor::POS_MOVE_CLOSER;
	}
	else if (distance < targetMinDistance) {
		positioningOutput = RealSenseSensor::POS_MOVE_FAR;
	}
	else {
		FacePoseEstimator::AlignInstruction instruction = facePoseEstimator->giveFaceAlignInstruction(pose);
		convert(instruction, positioningOutput);
	}

	if (tracker->positionUnsteady() || positioningOutput != RealSenseSensor::POS_DONTMOVE) {
		tracker->init();
	}

	if (positioningOutput == RealSenseSensor::POS_DONTMOVE && tracker->getConsecutiveDetects() >= settings.consecutiveDetectsToStartCapturing) {
		setState(RealSenseSensor::STATE_CAPTURING);
	}

	senseManager->ReleaseFrame();
//	std::cout << std::dec << "SingleCapture time: " << singleCaptureTime << "; FaceDetect time: " << faceDetectTime;
//	std::cout << std::dec << "; pose time: " << pose1Time << "/" << pose2Time << "; frame time: " << frameTime << std::endl;
}
Example #21
0
void ChildProcessSession::sendTile(const char* /*buffer*/, int /*length*/, StringTokenizer& tokens)
{
    Poco::Mutex::ScopedLock lock(_mutex);

    int part, width, height, tilePosX, tilePosY, tileWidth, tileHeight;

    if (tokens.count() < 8 ||
        !getTokenInteger(tokens[1], "part", part) ||
        !getTokenInteger(tokens[2], "width", width) ||
        !getTokenInteger(tokens[3], "height", height) ||
        !getTokenInteger(tokens[4], "tileposx", tilePosX) ||
        !getTokenInteger(tokens[5], "tileposy", tilePosY) ||
        !getTokenInteger(tokens[6], "tilewidth", tileWidth) ||
        !getTokenInteger(tokens[7], "tileheight", tileHeight))
    {
        sendTextFrame("error: cmd=tile kind=syntax");
        return;
    }

    if (part < 0 ||
        width <= 0 ||
        height <= 0 ||
        tilePosX < 0 ||
        tilePosY < 0 ||
        tileWidth <= 0 ||
        tileHeight <= 0)
    {
        sendTextFrame("error: cmd=tile kind=invalid");
        return;
    }

    _loKitDocument->pClass->setView(_loKitDocument, _viewId);

    std::string response = "tile: " + Poco::cat(std::string(" "), tokens.begin() + 1, tokens.end()) + "\n";

    std::vector<char> output;
    output.reserve(4 * width * height);
    output.resize(response.size());
    std::memcpy(output.data(), response.data(), response.size());

    unsigned char *pixmap = new unsigned char[4 * width * height];
    memset(pixmap, 0, 4 * width * height);

    if (_docType != "text" && part != _loKitDocument->pClass->getPart(_loKitDocument))
    {
        _loKitDocument->pClass->setPart(_loKitDocument, part);
    }

    Poco::Timestamp timestamp;
    _loKitDocument->pClass->paintTile(_loKitDocument, pixmap, width, height, tilePosX, tilePosY, tileWidth, tileHeight);
    Log::trace() << "paintTile at [" << tilePosX << ", " << tilePosY
                 << "] rendered in " << (timestamp.elapsed()/1000.) << " ms" << Log::end;

    LibreOfficeKitTileMode mode = static_cast<LibreOfficeKitTileMode>(_loKitDocument->pClass->getTileMode(_loKitDocument));
    if (!Util::encodePNGAndAppendToBuffer(pixmap, width, height, output, mode))
    {
        sendTextFrame("error: cmd=tile kind=failure");
        return;
    }

    delete[] pixmap;

    sendBinaryFrame(output.data(), output.size());
}
Example #22
0
void Topology::updateCache(rectObj thebounds, bool purgeonly) {
  if (!triggerUpdateCache) return;

  if (!shapefileopen) return;

  in_scale = CheckScale();

  if (!in_scale) {
    // not visible, so flush the cache
    // otherwise we waste time on looking up which shapes are in bounds
    flushCache();
    triggerUpdateCache = false;
    in_scale_last = false;
    return;
  }

  if (purgeonly) {
    in_scale_last = in_scale;
    return;
  }

  triggerUpdateCache = false;

#ifdef DEBUG_TFC
  StartupStore(TEXT("---UpdateCache() starts, mode%d%s"),cache_mode,NEWLINE);
  Poco::Timestamp starttick;
#endif

  if(msRectOverlap(&shpfile.bounds, &thebounds) != MS_TRUE) {
    // this happens if entire shape is out of range
    // so clear buffer.
    flushCache();
    in_scale_last = in_scale;
    return;
  }

  bool smaller = false;
  bool bigger = false;
  bool in_scale_again = in_scale && !in_scale_last;
  int shapes_loaded = 0;
  shapes_visible_count = 0;
  in_scale_last = in_scale;
  
  switch (cache_mode) {
    case 0: // Original code plus one special case
      smaller = (msRectContained(&thebounds, &lastBounds) == MS_TRUE);
      if (smaller) { //Special case, search inside, we don't need to load additional shapes, just remove
        shapes_visible_count = 0;
        for (int i=0; i<shpfile.numshapes; i++) {
          if (shpCache[i]) {
            if(msRectOverlap(&(shpCache[i]->shape.bounds), &thebounds) != MS_TRUE) {
              removeShape(i);
            } else shapes_visible_count++;
          }
        }//for
      } else { 
        //In this case we have to run the original algoritm
        msSHPWhichShapes(&shpfile, thebounds, 0);
        shapes_visible_count = 0;
        for (int i=0; i<shpfile.numshapes; i++) {
          if (msGetBit(shpfile.status, i)) {
            if (shpCache[i]==NULL) {
              // shape is now in range, and wasn't before
              shpCache[i] = addShape(i);
              shapes_loaded++;
            }
            shapes_visible_count++;
          } else {
            removeShape(i);
          }
        }//for
      }
      break;

    case 1:  // Bounds array in memory
      bigger = (msRectContained(&lastBounds, &thebounds) == MS_TRUE);
      smaller = (msRectContained(&thebounds, &lastBounds) == MS_TRUE);
      if (bigger || in_scale_again) { //We don't need to remove shapes, just load, so skip loaded ones
        for (int i=0; i<shpfile.numshapes; i++) {
          if (shpCache[i]) continue;
          if(msRectOverlap(&shpBounds[i], &thebounds) == MS_TRUE) {
            // shape is now in range, and wasn't before
            shpCache[i] = addShape(i);
            shapes_loaded++;
          }
        }//for
        shapes_visible_count+=shapes_loaded;
      } else
      if (smaller) { //Search inside, we don't need to load additional shapes, just remove
        for (int i=0; i<shpfile.numshapes; i++) {
          if (shpCache[i]==NULL) continue;
          if(msRectOverlap(&shpBounds[i], &thebounds) != MS_TRUE) {
            removeShape(i);
          } else shapes_visible_count++;
        }//for
      } else { 
        //Otherwise we have to search the all array
        for (int i=0; i<shpfile.numshapes; i++) {
          if(msRectOverlap(&shpBounds[i], &thebounds) == MS_TRUE) {
            if (shpCache[i]==NULL) {
              // shape is now in range, and wasn't before
              shpCache[i] = addShape(i);
              shapes_loaded++;
            }
            shapes_visible_count++;
          } else {
            removeShape(i);
          }
        }//for
      }
      break;

    case 2: // All shapes in memory	
      XShape *pshp;
      shapes_visible_count = 0;
      for (int i=0; i<shpfile.numshapes; i++) {
        pshp = shps[i];
        if(msRectOverlap(&(pshp->shape.bounds), &thebounds) == MS_TRUE) {
          shpCache[i] = pshp;
          shapes_visible_count++;
        } else {
          shpCache[i] = NULL;
        }
      }//for
      break;
    }//sw

    lastBounds = thebounds;

#ifdef DEBUG_TFC
  long free_size = CheckFreeRam();
  StartupStore(TEXT("   UpdateCache() ends, shps_visible=%d ram=%luM (%dms)%s"),shapes_visible_count, free_size/(1024*1024), Poco::Timespan(starttick.elapsed()).totalMilliseconds(),NEWLINE);
#endif
}