コード例 #1
0
SessionImpl::SessionImpl(const InstanceImpl& instance_impl):
		SessionBase{ instance_impl.id() + "::" + newUUID() },
		m_instance{ instance_impl },
		m_uplink{ SessionBase::m_pointer }
{
	env().logInfo(
			"New FreeAX25_TCPServer::SessionImpl(" + m_id + ")");
}
コード例 #2
0
ファイル: daemon.cpp プロジェクト: Simage/openalpr
void streamRecognitionThread(void* arg)
{
  CaptureThreadData* tdata = (CaptureThreadData*) arg;
  
  LOG4CPLUS_INFO(logger, "country: " << tdata->country_code << " -- config file: " << tdata->config_file );
  LOG4CPLUS_INFO(logger, "Stream " << tdata->camera_id << ": " << tdata->stream_url);
  
  Alpr alpr(tdata->country_code, tdata->config_file);
  alpr.setTopN(tdata->top_n);
  
  
  int framenum = 0;
  
  LoggingVideoBuffer videoBuffer(logger);
  
  videoBuffer.connect(tdata->stream_url, 5);
  
  cv::Mat latestFrame;
  
  std::vector<uchar> buffer;
  
  LOG4CPLUS_INFO(logger, "Starting camera " << tdata->camera_id);
  
  while (daemon_active)
  {
    int response = videoBuffer.getLatestFrame(&latestFrame);
    
    if (response != -1)
    {
      
      timespec startTime;
      getTime(&startTime);
      cv::imencode(".bmp", latestFrame, buffer );
      std::vector<AlprResult> results = alpr.recognize(buffer);
      
      timespec endTime;
      getTime(&endTime);
      double totalProcessingTime = diffclock(startTime, endTime);
      
      if (tdata->clock_on)
      {
	LOG4CPLUS_INFO(logger, "Camera " << tdata->camera_id << " processed frame in: " << totalProcessingTime << " ms.");
      }
      
      if (results.size() > 0)
      {
	// Create a UUID for the image
	std::string uuid = newUUID();
	
	// Save the image to disk (using the UUID)
	if (tdata->output_images)
	{
	  std::stringstream ss;
	  ss << tdata->output_image_folder << "/" << uuid << ".jpg";
	  
	  cv::imwrite(ss.str(), latestFrame);
	}
	
	// Update the JSON content to include UUID and camera ID
  
	std::string json = alpr.toJson(results, totalProcessingTime);
	
	cJSON *root = cJSON_Parse(json.c_str());
	cJSON_AddStringToObject(root,	"uuid",		uuid.c_str());
	cJSON_AddNumberToObject(root,	"camera_id",	tdata->camera_id);
	cJSON_AddStringToObject(root, 	"site_id", 	tdata->site_id.c_str());
	cJSON_AddNumberToObject(root,	"img_width",	latestFrame.cols);
	cJSON_AddNumberToObject(root,	"img_height",	latestFrame.rows);

	char *out;
	out=cJSON_PrintUnformatted(root);
	cJSON_Delete(root);
	
	std::string response(out);
	
	free(out);
	
	// Push the results to the Beanstalk queue
	for (int j = 0; j < results.size(); j++)
	{
	  LOG4CPLUS_DEBUG(logger, "Writing plate " << results[j].bestPlate.characters << " (" <<  uuid << ") to queue.");
	}
	
	writeToQueue(response);
      }
    }
    
    usleep(10000);
  }
  
  
  videoBuffer.disconnect();
  
  LOG4CPLUS_INFO(logger, "Video processing ended");
  
  delete tdata;
}
コード例 #3
0
ファイル: internaltracker.cpp プロジェクト: telnykha/VideoA
int TInternalFaceTracker::SetImage(awpImage* pImage)
{

    if (this->m_model == NULL || this->m_needInit)
    {
        this->Init(pImage);
        return 0;
    }


    awpImage* copy = NULL;
    awpCopyImage(pImage, &copy);
    awpConvert(copy, AWP_CONVERT_3TO1_BYTE);

    //выполняется поиск лиц.
    int num = TInternalFaceDetector::SetImage(copy);
    //выполняется сопровождение найденных лиц.
    this->track(copy);
    //расфасовка
    if ( num > 0)
    {     // для каждого найденного лица
          // обновляется существующий массив лиц
          for (int i =0; i < num; i++)
          {
            awpRect r1 = this->m_objects[i].rect;
            for (int j = 0; j < m_faces.GetCount(); j++)
            {
               IFaceObject* o = (IFaceObject*)m_faces.Get(j);
               awpRect r2;
               memcpy(&r2, o->bounds(), sizeof(awpRect));

               if (_awpRectsOverlap(r1, r2) > 0)
               {
                  o->update(copy, &r1);
                  this->m_objects[i].hasObject = false;
               }
            }
          }
          //формируется событие найдено новое лицо
          for (int i = 0; i < num; i++)
          {
             if (this->m_objects[i].hasObject)
             {
                TFlowFaceObject* o = new TFlowFaceObject(this->m_next_id++,
                copy, &this->m_objects[i].rect);
                m_faces.Add(o);
                //todo: событие лицо найдено.
                if (this->faceFound != NULL)
                        this->faceFound(o->GetImage(0), o->id());
             }
          }

    }
    // проверяется состояние существующего массива лиц.
    // если значение переменной health < 0 объект удаляется из
    // наблюдения.
    for (int i = m_faces.GetCount() -1 ; i >= 0; i--)
    {
       IFaceObject* o = (IFaceObject*)m_faces.Get(i);
       if (o->health() < 0)
       {
          string strFileName = "";
          if (this->m_save_tracks)
          {
            strFileName = this->m_outPath;
            strFileName += "\\";
            strFileName += newUUID();
            strFileName += ".awp";
            o->SaveTrack(strFileName.c_str());
          }
          //объект потерян.
          if (this->saveTrack != NULL)
              this->saveTrack(strFileName.c_str(), o->id());
          m_faces.Delete(i);
       }
       else
       {
        // Объкт изменил местоположение
        if (this->facePos != NULL)
        {
            awpRect* r = o->bounds();
            if (r != NULL)
            {
                AWPDOUBLE v, vv, vvv;
                awpDetectItem item;
                item.rect = *r;
                item.hasObject = true;

                awpPoint pp;
                pp.X = (item.rect.left + item.rect.right) / 2;
                pp.Y = item.rect.top;

                awpImageYHToLength(&camera, copy, item.rect.top, 1800, &vv);
                awpImageObjectHWidth(&camera, copy, &item.rect, 1800, &v);
                awpImagePointToShiftHX(&camera, copy, &pp, 1800, &vvv);

                item.Distance = vv;
                item.Width    = v;
                item.Shift    = vvv;
                item.Height   = 1800;

                this->facePos( NULL, &item, o->id());


            }
         }
       }
    }
    awpReleaseImage(&copy);

    return m_faces.GetCount();
}