void TestObjectsObjectSave(void)
{
	CFileUtil						cFileUtil;
	Ptr<CTestDoubleNamedString>		pDouble;
	BOOL							bResult;
	CIndexedConfig					cConfig;

	cFileUtil.RemoveDir("Output");
	cFileUtil.MakeDir("Output/ObjectSave");
	cConfig.OptimiseForStreaming("Output/ObjectSave");
	cConfig.SetObjectCacheSize(128 MB);

	ObjectsInit(&cConfig);

	pDouble = SetupObjectsForDehollowfication();

	AssertLongLongInt(0, gcObjects.NumDatabaseObjects());
	AssertLongLongInt(9, gcObjects.NumMemoryIndexes());
	AssertLongLongInt(6, gcObjects.NumMemoryNames());
	AssertTrue(pDouble.IsDirty());
	
	bResult = gcObjects.Save(pDouble.BaseObject());
	AssertTrue(bResult);
	AssertTrue(pDouble.IsDirty());  //This object is *still* dirty after save.  Almost no objects will answer true to IsDirty.

	AssertLongLongInt(1, gcObjects.NumDatabaseObjects());
	AssertLongLongInt(9, gcObjects.NumMemoryIndexes());
	AssertLongLongInt(6, gcObjects.NumMemoryNames());
	AssertInt(106, pDouble->SerialisedSize());
	AssertLongLongInt(1, gcObjects.NumDatabaseObjectsCached(106));
	AssertLongLongInt(0, gcObjects.NumDatabaseObjectsCached(118));

	bResult = gcObjects.Save(pDouble.BaseObject());
	AssertTrue(bResult);
	AssertLongLongInt(1, gcObjects.NumDatabaseObjects());
	AssertInt(106, pDouble->SerialisedSize());
	AssertLongLongInt(1, gcObjects.NumDatabaseObjectsCached(106));
	AssertLongLongInt(0, gcObjects.NumDatabaseObjectsCached(118));
	
	pDouble->mszString = OMalloc(CString);
	pDouble->mszString->Init("A String");

	bResult = gcObjects.Save(pDouble.BaseObject());
	AssertTrue(bResult);
	AssertLongLongInt(1, gcObjects.NumDatabaseObjects());
	AssertInt(118, pDouble->SerialisedSize());
	AssertLongLongInt(0, gcObjects.NumDatabaseObjectsCached(106));
	AssertLongLongInt(1, gcObjects.NumDatabaseObjectsCached(118));

	pDouble->mszString = OMalloc(CString);
	pDouble->mszString->Init("Different Object");

	AssertInt(118, pDouble->SerialisedSize());
	bResult = gcObjects.Save(pDouble.BaseObject());
	AssertTrue(bResult);
	AssertLongLongInt(1, gcObjects.NumDatabaseObjects());
	AssertInt(118, pDouble->SerialisedSize());
	AssertLongLongInt(0, gcObjects.NumDatabaseObjectsCached(106));
	AssertLongLongInt(1, gcObjects.NumDatabaseObjectsCached(118));

	ObjectsKill();
}
Example #2
0
 void clear()
 {
     context.reset();
 }
Example #3
0
int main(int argc, char *argv[]) {
    Ptr<Instance::Manager> manager = shippingInstanceManager();

    if (manager == NULL) {
        cerr << "Unexpected NULL manager." << endl;
        return 1;
    }

    Ptr<Instance> stats = manager->instanceNew("myStats", "Stats");

    if (stats == NULL) {
        cerr << "Unexpected NULL stats." << endl;
        return 1;
    }

    Ptr<Instance> fleet = manager->instanceNew("myFleet", "Fleet");

    if (fleet == NULL) {
        cerr << "Unexpected NULL." << endl;
        return 1;
    }

    fleet->attributeIs("Boat, speed", "60");
    fleet->attributeIs("Truck, capacity", "50");
    fleet->attributeIs("Plane, cost", "20");
    cout << "fleet->attribute('Boat, speed'): " << fleet->attribute("Boat, speed") << endl;

    // --- Create instances
    // -- Locations
    // customers
    Ptr<Instance> customer1 = manager->instanceNew("customer1", "Customer");  
    Ptr<Instance> customer2 = manager->instanceNew("customer2", "Customer");  
    // ports
    Ptr<Instance> port1 = manager->instanceNew("port1", "Port");  

    if (customer1 == NULL || customer2 == NULL || port1 == NULL) {
        cerr << "Unexpected NULL customer or port." << endl;
	return 1;
    }

    // -- Segments
    // boat
    Ptr<Instance> boatSeg1 = manager->instanceNew("boatSeg1", "Boat segment");  
    Ptr<Instance> boatSeg2 = manager->instanceNew("boatSeg2", "Boat segment");  
    // truck
    Ptr<Instance> truckSeg1 = manager->instanceNew("truckSeg1", "Truck segment");  
    Ptr<Instance> truckSeg2 = manager->instanceNew("truckSeg2", "Truck segment");  

    if (boatSeg1 == NULL || boatSeg2 == NULL || truckSeg1 == NULL || truckSeg2 == NULL) {
        cerr << "Unexpected NULL segment." << endl;
        return 1;
    }

    // connections
    // customer1 <---> port1
    truckSeg1->attributeIs("source", "customer1");
    truckSeg2->attributeIs("source", "port1");
    truckSeg1->attributeIs("return segment", "truckSeg2");
    cout << "truckSeg1->attribute('source'): " << truckSeg1->attribute("source") << endl;
    
    // customer2 <---> port1
    boatSeg1->attributeIs("source", "customer2");
    boatSeg2->attributeIs("source", "port1");
    boatSeg1->attributeIs("return segment", "boatSeg2");
    cout << "boatSeg1->attribute('return segment'): " << boatSeg1->attribute("return segment") << endl;

    // -- Segment lengths
    boatSeg1->attributeIs("length", "400");
    boatSeg2->attributeIs("length", "400");
    truckSeg1->attributeIs("length", "900");
    truckSeg2->attributeIs("length", "900");

    // -- Segment difficulties
    boatSeg1->attributeIs("difficulty", "1");
    boatSeg2->attributeIs("difficulty", "1");
    truckSeg1->attributeIs("difficulty", "1");
    truckSeg2->attributeIs("difficulty", "1");
    
    // -- Segment expedite support
    boatSeg1->attributeIs("expedite support", "yes");
    boatSeg2->attributeIs("expedite support", "yes");
    truckSeg1->attributeIs("expedite support", "yes");
    truckSeg2->attributeIs("expedite support", "yes");

    // -- Connectivity queries
    Ptr<Instance> conn = manager->instanceNew("myConn", "Conn");

    if (conn == NULL) {
        cerr << "Unexpected NULL conn." << endl;
        return 1;
    }

    cout << "**** explore customer1 : distance 1500 ****" << endl;
    cout << conn->attribute("explore customer1 : distance 1500") << endl;
    cout << endl;

    cout << "*** connect customer2 : customer1 ****" << endl;
    cout << conn->attribute("connect customer2 : customer1") << endl;
    cout << endl;

    // -- Statistics queries
    cout << "===== Stats attributes =====" << endl;
    cout << " --- Segments --- " << endl;
    cout << "# Truck segments : " << stats->attribute("Truck segment") << endl;
    cout << "Expediting %     : " << stats->attribute("expedite percentage") << endl;
    cout << " --- Terminals --- " << endl;
    cout << "# Plane terminals: " << stats->attribute("Plane terminal") << endl;
    cout << " --- Ports and customers --- " << endl;
    cout << "# Ports          : " << stats->attribute("Port") << endl;

    cerr << "Done!" << endl;

    return 0;
}
Example #4
0
 MD::MarkovData(const string &lab, Ptr<MarkovData> last, bool grow)
   : CD(lab, last->key(), grow)
 {
   set_prev(last);
   last->set_next(this);
 }
Example #5
0
bool initModule_nonfree(void)
{
    Ptr<Algorithm> sift = createSIFT(), surf = createSURF();
    return sift->info() != 0 && surf->info() != 0;
}
Example #6
0
Ptr<ModelBase> by_type(std::string type, usage use, Ptr<Options> options) {
  Ptr<ExpressionGraph> graph = nullptr; // graph unknown at this stage
  // clang-format off
  if(type == "s2s" || type == "amun" || type == "nematus") {
    return models::encoder_decoder()(options)
        ("usage", use)
        ("original-type", type)
            .push_back(models::encoder()("type", "s2s"))
            .push_back(models::decoder()("type", "s2s"))
            .construct(graph);
  }

  if(type == "transformer") {
    return models::encoder_decoder()(options)
        ("usage", use)
        .push_back(models::encoder()("type", "transformer"))
        .push_back(models::decoder()("type", "transformer"))
        .construct(graph);
  }

  if(type == "transformer_s2s") {
    return models::encoder_decoder()(options)
        ("usage", use)
        ("original-type", type)
            .push_back(models::encoder()("type", "transformer"))
            .push_back(models::decoder()("type", "s2s"))
            .construct(graph);
  }

  if(type == "lm") {
    auto idx = options->has("index") ? options->get<size_t>("index") : 0;
    std::vector<int> dimVocabs = options->get<std::vector<int>>("dim-vocabs");
    int vocab = dimVocabs[0];
    dimVocabs.resize(idx + 1);
    std::fill(dimVocabs.begin(), dimVocabs.end(), vocab);

    return models::encoder_decoder()(options)
        ("usage", use)
        ("type", "s2s")
        ("original-type", type)
            .push_back(models::decoder()
                       ("index", idx)
                       ("dim-vocabs", dimVocabs))
            .construct(graph);
  }

  if(type == "multi-s2s") {
    size_t numEncoders = 2;
    auto ms2sFactory = models::encoder_decoder()(options)
        ("usage", use)
        ("type", "s2s")
        ("original-type", type);

    for(size_t i = 0; i < numEncoders; ++i) {
      auto prefix = "encoder" + std::to_string(i + 1);
      ms2sFactory.push_back(models::encoder()("prefix", prefix)("index", i));
    }

    ms2sFactory.push_back(models::decoder()("index", numEncoders));

    return ms2sFactory.construct(graph);
  }

  if(type == "shared-multi-s2s") {
    size_t numEncoders = 2;
    auto ms2sFactory = models::encoder_decoder()(options)
        ("usage", use)
        ("type", "s2s")
        ("original-type", type);

    for(size_t i = 0; i < numEncoders; ++i) {
      auto prefix = "encoder";
      ms2sFactory.push_back(models::encoder()("prefix", prefix)("index", i));
    }

    ms2sFactory.push_back(models::decoder()("index", numEncoders));

    return ms2sFactory.construct(graph);
  }

  if(type == "multi-transformer") {
    size_t numEncoders = 2;
    auto mtransFactory = models::encoder_decoder()(options)
        ("usage", use)
        ("type", "transformer")
        ("original-type", type);

    for(size_t i = 0; i < numEncoders; ++i) {
      auto prefix = "encoder" + std::to_string(i + 1);
      mtransFactory.push_back(models::encoder()("prefix", prefix)("index", i));
    }
    mtransFactory.push_back(models::decoder()("index", numEncoders));

    return mtransFactory.construct(graph);
  }

  if(type == "shared-multi-transformer") {
    size_t numEncoders = 2;
    auto mtransFactory = models::encoder_decoder()(options)
        ("usage", use)
        ("type", "transformer")
        ("original-type", type);

    for(size_t i = 0; i < numEncoders; ++i) {
      auto prefix = "encoder";
      mtransFactory.push_back(models::encoder()("prefix", prefix)("index", i));
    }
    mtransFactory.push_back(models::decoder()("index", numEncoders));

    return mtransFactory.construct(graph);
  }

  if(type == "lm-transformer") {
    auto idx = options->has("index") ? options->get<size_t>("index") : 0;
    std::vector<int> dimVocabs = options->get<std::vector<int>>("dim-vocabs");
    int vocab = dimVocabs[0];
    dimVocabs.resize(idx + 1);
    std::fill(dimVocabs.begin(), dimVocabs.end(), vocab);

    return models::encoder_decoder()(options)
        ("usage", use)
        ("type", "transformer")
        ("original-type", type)
            .push_back(models::decoder()
                       ("index", idx)
                       ("dim-vocabs", dimVocabs))
            .construct(graph);
  }

  if(type == "bert") {                           // for full BERT training
    return models::encoder_classifier()(options) //
        ("original-type", "bert")                // so we can query this
        ("usage", use)                           //
        .push_back(models::encoder()             //
                    ("type", "bert-encoder")     // close to original transformer encoder
                    ("index", 0))                //
        .push_back(models::classifier()          //
                    ("prefix", "masked-lm")      // prefix for parameter names
                    ("type", "bert-masked-lm")   //
                    ("index", 0))                // multi-task learning with MaskedLM
        .push_back(models::classifier()          //
                    ("prefix", "next-sentence")  // prefix for parameter names
                    ("type", "bert-classifier")  //
                    ("index", 1))                // next sentence prediction
        .construct(graph);
  }

  if(type == "bert-classifier") {                // for BERT fine-tuning on non-BERT classification task
    return models::encoder_classifier()(options) //
        ("original-type", "bert-classifier")     // so we can query this if needed
        ("usage", use)                           //
        .push_back(models::encoder()             //
                    ("type", "bert-encoder")     //
                    ("index", 0))                // close to original transformer encoder
        .push_back(models::classifier()          //
                    ("type", "bert-classifier")  //
                    ("index", 1))                // next sentence prediction
        .construct(graph);
  }

#ifdef COMPILE_EXAMPLES
  // @TODO: examples should be compiled optionally
  if(type == "mnist-ffnn") {
    auto mnist = New<MnistFeedForwardNet>(options);
    if(use == usage::scoring)
      return New<Scorer>(mnist, New<MNISTLogsoftmax>());
    else if(use == usage::training)
      return New<Trainer>(mnist, New<MNISTCrossEntropyCost>());
    else
      return mnist;
  }
#endif

#ifdef CUDNN
#ifdef COMPILE_EXAMPLES
  if(type == "mnist-lenet") {
    auto mnist = New<MnistLeNet>(options);
    if(use == usage::scoring)
      return New<Scorer>(mnist, New<MNISTLogsoftmax>());
    else if(use == usage::training)
      return New<Trainer>(mnist, New<MNISTCrossEntropyCost>());
    else
      return mnist;
  }
#endif
  if(type == "char-s2s") {
    return models::encoder_decoder()(options)
        ("usage", use)
        ("original-type", type)
            .push_back(models::encoder()("type", "char-s2s"))
            .push_back(models::decoder()("type", "s2s"))
            .construct(graph);
  }
#endif

  // clang-format on
  ABORT("Unknown model type: {}", type);
}
Example #7
0
 double MarkovModel::pdf(Ptr<DataPointType> dp, bool logscale)const{
   double ans=0;
   if(!!dp->prev()){
     ans = Q(dp->prev()->value(), dp->value());
   }else ans = pi0(dp->value());
   return logscale ? safelog(ans) : ans; }
Example #8
0
 bool train(const Ptr<TrainData>& data, int)
 {
     Mat samples = data->getTrainSamples(), labels;
     return trainEM(samples, labels, noArray(), noArray());
 }
Example #9
0
MgStringCollection* MgSecurityManager::Authenticate(
    MgUserInformation* userInformation, MgStringCollection* requiredRoles,
    bool returnAssignedRoles)
{
    ACE_MT(ACE_GUARD_RETURN(ACE_Recursive_Thread_Mutex, ace_mon, sm_mutex, NULL));

    Ptr<MgStringCollection> assignedRoles;

    MG_TRY()

    if (NULL == userInformation)
    {
        throw new MgAuthenticationFailedException(
            L"MgSecurityManager.Authenticate", __LINE__, __WFILE__, NULL, L"", NULL);
    }

    STRING user = userInformation->GetUserName();
    STRING session = userInformation->GetMgSessionId();

    if (session.empty())
    {
        if (user.empty())
        {
            throw new MgAuthenticationFailedException(
                L"MgSecurityManager.Authenticate", __LINE__, __WFILE__, NULL, L"", NULL);
        }

        const MgUserInfo* userInfo = sm_securityCache->GetUserInfo(user);
        assert(NULL != userInfo);

        if (userInformation->GetPassword() != userInfo->GetPassword())
        {
            throw new MgAuthenticationFailedException(
                L"MgSecurityManager.Authenticate", __LINE__, __WFILE__, NULL, L"", NULL);
        }
    }
    else
    {
        user = MgSessionManager::UpdateLastAccessedTime(session);
    }

    if (NULL != requiredRoles &&
            !sm_securityCache->IsUserInRoles(user, requiredRoles))
    {
        MG_LOG_AUTHENTICATION_ENTRY(MgResources::UnauthorizedAccess.c_str());

        throw new MgUnauthorizedAccessException(
            L"MgSecurityManager.Authenticate", __LINE__, __WFILE__, NULL, L"", NULL);
    }

    if (returnAssignedRoles)
    {
        assignedRoles = sm_securityCache->EnumerateRoles(user);
    }

    // Commented out logging of successful authentication because it creates lots of entries in the Authentication.log
//    MG_LOG_AUTHENTICATION_ENTRY(MgResources::Success.c_str());

    MG_CATCH(L"MgSecurityManager.Authenticate")

    if (mgException != NULL)
    {
        MG_LOG_AUTHENTICATION_ENTRY(MgResources::Failure.c_str());
    }

    MG_THROW()

    return assignedRoles.Detach();
}
Example #10
0
int main(int _nargs, char **_vargs)
{
	DebugLevel debugLevel = LOW;
	bool showImages = false;

	// Delete previous results
	system("rm -rf ./output/*");

	if (_nargs < 3)
	{
		cout << "Not enough arguments\nUsage:\n\tHomework1 <input_file>\n\n";
		return EXIT_FAILURE;
	}

	// Initialize printer
	Printer::getInstance()->calculateConversionRate(1000, 1000);

	// Method to be used to solve the visual odometry problem
	string method = _vargs[2];
	cout << "Using method " << method << "\n";

	// Get the calibration matrix
	Mat K = Mat::zeros(3, 3, CV_64FC1);
	Loader::loadCalibrationMatrix(K, _vargs[1]);

	// Load images
	vector<Mat> images;
	Loader::loadInput(images, _vargs[1]);

	// Get ground truth
	vector<Mat> groundTruth;
	Loader::loadGroundTruth(groundTruth, _vargs[1], images.size());

	// Print groundtruth trajectory
	Printer::printTrajectory(groundTruth, "groundTruth.png", RED);

	Ptr<FeatureDetector> featureExtractor = FeatureDetector::create("HARRIS");
	Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create("SIFT");
	FlannBasedMatcher matcher;

	vector<vector<KeyPoint>> keypoints;
	vector<Mat> descriptors;
	vector<Mat> trajectory;
	Mat start = Mat::zeros(4, 1, CV_64FC1);
	start.at<double>(0, 3) = 1;
	trajectory.push_back(start);

	// Process data
	initModule_nonfree();
	for (size_t j = 0; j < images.size(); j++)
	{
		cout << "*** Processing image " << j << " ***\n";
		Mat image = images[j];

		// Keypoints extraction
		keypoints.push_back(vector<KeyPoint>());
		featureExtractor->detect(image, keypoints.back());

		// Feature extraction
		descriptors.push_back(Mat());
		descriptorExtractor->compute(image, keypoints.back(), descriptors.back());

		if (j > 0)
		{
			int train = keypoints.size() - 1;
			int query = train - 1;

			// Match points between images
			vector<DMatch> matches = Helper::getMatches(descriptors[query], descriptors[train], matcher);

			if (showImages)
				Helper::showMatches(images[query], images[train], keypoints[query], keypoints[train], matches);

			if (matches.size() >= 8)
			{
				Mat transformation;
				if (method.compare("essential_matrix") == 0)
				{
					/** RESOLUTION USING ESSENTIAL MATRIX */

					// Calculate the fundamental matrix
					Mat F;
					Helper::getFundamentalMatrix(F, matches, keypoints[query], keypoints[train]);
					if (debugLevel >= LOW)
						Printer::printMatrix<double>(F, 3, "F:");

					// Calculate E
					//Mat E;
					//Helper::getEssentialMatrix(E, K, F);
					//if (debugLevel >= MEDIUM)
					//Printer::printMatrix<double>(E, 3, "E:");

					// Calculate the motion between the two images
					Mat R, tx, points3D;
					Helper::calculateMotion(F, K, R, tx, points3D, keypoints[query], keypoints[train], matches);

					hconcat(R, tx, transformation);
				}
				else if (method.compare("homography") == 0)
				{
					/** RESOLUTION USING THE HOMOGRAPHY */
					vector<unsigned char> matchesUsedMask;
					vector<Point2f> trainPoints, queryPoints;
					Helper::extractPoints(keypoints[train], keypoints[query], matches, trainPoints, queryPoints);
					Mat H = findHomography(trainPoints, queryPoints, RANSAC, 4, matchesUsedMask);

					transformation = Mat::eye(3, 4, CV_64FC1);
					double norm1 = (double) norm(H.col(0));
					double norm2 = (double) norm(H.col(1));
					double tnorm = (norm1 + norm2) / 2.0f;

					Mat v1 = H.col(0);
					Mat v2 = transformation.col(0);
					cv::normalize(v1, v2);

					v1 = H.col(1);
					v2 = transformation.col(1);
					cv::normalize(v1, v2);

					v1 = transformation.col(0);
					v2 = transformation.col(1);
					Mat v3 = v1.cross(v2);
					Mat c2 = transformation.col(2);
					v3.copyTo(c2);

					transformation.col(3) = H.col(2) / tnorm;
				}

				Mat pose = transformation * trajectory.back().clone();
				vconcat(pose, Mat::eye(1, 1, CV_64FC1), pose);
				trajectory.push_back(pose);

				if (debugLevel >= LOW)
					Printer::printMatrix<double>(trajectory.back(), 3, "Pose:");
			}
			else
			{
				keypoints.erase(keypoints.begin() + keypoints.size() - 1, keypoints.end());
			}
		}
	}

	Printer::getInstance()->calculateConversionRate(400, 400);

	cout << "Printing final trajectory\n";
	Printer::printTrajectory2(trajectory, "trajectory.png", GREEN);

	return EXIT_SUCCESS;
}
Example #11
0
int main(int argc, char *argv[])
{
    const char *keys =
        "{ help h usage ? |    | show this message }"
        "{ path p         |true| path to dataset (bounding/, camera/, P/, png/ folders) }";
    CommandLineParser parser(argc, argv, keys);
    string path(parser.get<string>("path"));
    if (parser.has("help") || path=="true")
    {
        parser.printMessage();
        return -1;
    }

    Ptr<MSM_epfl> dataset = MSM_epfl::create();
    dataset->load(path);

    // ***************
    // dataset contains all information for each image.
    // For example, let output dataset size and first object.
    printf("dataset size: %u\n", (unsigned int)dataset->getTrain().size());
    MSM_epflObj *example = static_cast<MSM_epflObj *>(dataset->getTrain()[0].get());
    printf("first image:\nname: %s\n", example->imageName.c_str());

    printf("\nbounding:\n");
    for (int i=0; i<2; ++i)
    {
        for (int j=0; j<3; ++j)
        {
            printf("%f ", example->bounding(i, j));
        }
        printf("\n");
    }

    printf("\ncamera:\n");
    for (int i=0; i<3; ++i)
    {
        for (int j=0; j<3; ++j)
        {
            printf("%f ", example->camera.mat1(i, j));
        }
        printf("\n");
    }
    printf("\n");

    for (int i=0; i<3; ++i)
    {
        printf("%f ", example->camera.mat2[i]);
    }
    printf("\n\n");

    for (int i=0; i<3; ++i)
    {
        for (int j=0; j<3; ++j)
        {
            printf("%f ", example->camera.mat3(i, j));
        }
        printf("\n");
    }
    printf("\n");

    for (int i=0; i<3; ++i)
    {
        printf("%f ", example->camera.mat4[i]);
    }
    printf("\n\n");

    printf("image width: %u, height: %u\n", example->camera.imageWidth, example->camera.imageHeight);

    printf("\nP:\n");
    for (int i=0; i<3; ++i)
    {
        for (int j=0; j<4; ++j)
        {
            printf("%f ", example->p(i, j));
        }
        printf("\n");
    }

    return 0;
}
Example #12
0
void
FibHelper::RemoveRoute(Ptr<Node> node, const Name& prefix, Ptr<Node> otherNode)
{
  for (uint32_t deviceId = 0; deviceId < node->GetNDevices(); deviceId++) {
    Ptr<PointToPointNetDevice> netDevice =
      DynamicCast<PointToPointNetDevice>(node->GetDevice(deviceId));
    if (netDevice == 0)
      continue;

    Ptr<Channel> channel = netDevice->GetChannel();
    if (channel == 0)
      continue;

    if (channel->GetDevice(0)->GetNode() == otherNode
        || channel->GetDevice(1)->GetNode() == otherNode) {
      Ptr<L3Protocol> ndn = node->GetObject<L3Protocol>();
      NS_ASSERT_MSG(ndn != 0, "Ndn stack should be installed on the node");

      shared_ptr<Face> face = ndn->getFaceByNetDevice(netDevice);
      NS_ASSERT_MSG(face != 0, "There is no face associated with the p2p link");

      RemoveRoute(node, prefix, face);

      return;
    }
  }

  NS_FATAL_ERROR("Cannot remove route: Node# " << node->GetId() << " and Node# " << otherNode->GetId()
                                            << " are not connected");
}
Example #13
0
///----------------------------------------------------------------------------
/// <summary>
/// Executes the operation.
/// </summary>
///
/// <exceptions>
/// MgException
/// </exceptions>
///----------------------------------------------------------------------------
void MgOpGetLayer::Execute()
{
    ACE_DEBUG((LM_DEBUG, ACE_TEXT("  (%t) MgOpGetLayer::Execute()\n")));

    MG_LOG_OPERATION_MESSAGE(L"GetLayer");

    MG_SERVER_DRAWING_SERVICE_TRY()

    MG_LOG_OPERATION_MESSAGE_INIT(m_packet.m_OperationVersion, m_packet.m_NumArguments);

    ACE_ASSERT(m_stream != NULL);

    if (3 == m_packet.m_NumArguments)
    {
        Ptr<MgResourceIdentifier> identifier = (MgResourceIdentifier*)m_stream->GetObject();

        STRING sectionName;
        m_stream->GetString(sectionName);

        STRING layerName;
        m_stream->GetString(layerName);

        BeginExecution();

        MG_LOG_OPERATION_MESSAGE_PARAMETERS_START();
        MG_LOG_OPERATION_MESSAGE_ADD_STRING((NULL == identifier) ? L"MgResourceIdentifier" : identifier->ToString().c_str());
        MG_LOG_OPERATION_MESSAGE_ADD_SEPARATOR();
        MG_LOG_OPERATION_MESSAGE_ADD_STRING(sectionName.c_str());
        MG_LOG_OPERATION_MESSAGE_ADD_SEPARATOR();
        MG_LOG_OPERATION_MESSAGE_ADD_STRING(layerName.c_str());
        MG_LOG_OPERATION_MESSAGE_PARAMETERS_END();

        Validate();

        Ptr<MgByteReader> byteReader = m_service->GetLayer(identifier, sectionName, layerName);

        EndExecution(byteReader);
    }
    else
    {
        MG_LOG_OPERATION_MESSAGE_PARAMETERS_START();
        MG_LOG_OPERATION_MESSAGE_PARAMETERS_END();
    }

    if (!m_argsRead)
    {
        throw new MgOperationProcessingException(L"MgOpGetLayer.Execute",
            __LINE__, __WFILE__, NULL, L"", NULL);
    }

    // Successful operation
    MG_LOG_OPERATION_MESSAGE_ADD_STRING(MgResources::Success.c_str());

    MG_SERVER_DRAWING_SERVICE_CATCH(L"MgOpGetLayer.Execute");

    if (mgException != NULL)
    {
        // Failed operation
        MG_LOG_OPERATION_MESSAGE_ADD_STRING(MgResources::Failure.c_str());
    }

    // Add access log entry for operation
    MG_LOG_OPERATION_MESSAGE_ACCESS_ENTRY();

    MG_SERVER_DRAWING_SERVICE_THROW()
}
Example #14
0
bool stitch(vector<Mat> orig_images) {
// Check if have enough images
try {
    int num_images = static_cast<int>(orig_images.size());
    if (num_images < 1)
    {
        return 1;
    }

    if (num_images < 2) {
        imwrite(result_name, orig_images[0]);
	return 1;
    }

    double work_scale = 1, seam_scale = 1, compose_scale = 1;
    bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;

    LOGLN("Finding features...");
#if ENABLE_LOG
    int64 t = getTickCount();
#endif

    Ptr<FeaturesFinder> finder;
    if (features_type == "surf")
    {
#if defined(HAVE_OPENCV_NONFREE) && defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            finder = new SurfFeaturesFinderGpu();
        else
#endif
            finder = new SurfFeaturesFinder();
    }
    else if (features_type == "orb")
    {
        finder = new OrbFeaturesFinder();
    }
    else
    {
        cout << "Unknown 2D features type: '" << features_type << "'.\n";
        return -1;
    }

    Mat full_img, img;
    vector<ImageFeatures> features(num_images);
    vector<Mat> images(num_images);
    vector<Size> full_img_sizes(num_images);
    double seam_work_aspect = 1;

    for (int i = 0; i < num_images; ++i)
    {
        full_img = orig_images[i];
        full_img_sizes[i] = full_img.size();

        if (full_img.empty())
        {
            // LOGLN("Can't open image " << img_names[i]);
            return -1;
        }
        if (work_megapix < 0)
        {
            img = full_img;
            work_scale = 1;
            is_work_scale_set = true;
        }
        else
        {
            if (!is_work_scale_set)
            {
                work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
                is_work_scale_set = true;
            }
            resize(full_img, img, Size(), work_scale, work_scale);
        }
        if (!is_seam_scale_set)
        {
            seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
            seam_work_aspect = seam_scale / work_scale;
            is_seam_scale_set = true;
        }

        (*finder)(img, features[i]);
        features[i].img_idx = i;
        LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());

        resize(full_img, img, Size(), seam_scale, seam_scale);
        images[i] = img.clone();
    }

    finder->collectGarbage();
    full_img.release();
    img.release();

    LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    LOG("Pairwise matching");
#if ENABLE_LOG
    t = getTickCount();
#endif
    vector<MatchesInfo> pairwise_matches;
    BestOf2NearestMatcher matcher(try_gpu, match_conf);
    Mat matchMask(features.size(),features.size(),CV_8U,Scalar(0));
    for (int i = 0; i < num_images -1; ++i)
    {
        matchMask.at<char>(i,i+1) =1;
    }
    matcher(features, pairwise_matches,matchMask);
    matcher.collectGarbage();

    LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Check if we should save matches graph
    if (save_graph)
    {
        LOGLN("Saving matches graph...");
        ofstream f(save_graph_to.c_str());
        // f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
    }

    // Leave only images we are sure are from the same panorama
    vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
    vector<Mat> img_subset, untouched_images;
    vector<Size> full_img_sizes_subset;
    for (size_t i = 0; i < indices.size(); ++i)
    {
        untouched_images.push_back(orig_images[indices[i]]);
        img_subset.push_back(images[indices[i]]);
        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
    }

    images = img_subset;
    full_img_sizes = full_img_sizes_subset;

    // Check if we still have enough images
    num_images = static_cast<int>(images.size());
    if (num_images < 2)
    {
        LOGLN("Need more images");
        return -1;
    }

    HomographyBasedEstimator estimator;
    vector<CameraParams> cameras;
    estimator(features, pairwise_matches, cameras);

    for (size_t i = 0; i < cameras.size(); ++i)
    {
        Mat R;
        cameras[i].R.convertTo(R, CV_32F);
        cameras[i].R = R;
        LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
    }

    Ptr<detail::BundleAdjusterBase> adjuster;
    if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
    else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
    else
    {
        cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
        return -1;
    }
    adjuster->setConfThresh(conf_thresh);
    Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
    if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
    if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
    if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
    if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
    if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
    adjuster->setRefinementMask(refine_mask);
    (*adjuster)(features, pairwise_matches, cameras);

    // Find median focal length

    vector<double> focals;
    for (size_t i = 0; i < cameras.size(); ++i)
    {
        LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
        focals.push_back(cameras[i].focal);
    }

    sort(focals.begin(), focals.end());
    float warped_image_scale;
    if (focals.size() % 2 == 1)
        warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
    else
        warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

    if (do_wave_correct)
    {
        vector<Mat> rmats;
        for (size_t i = 0; i < cameras.size(); ++i)
            rmats.push_back(cameras[i].R);
        waveCorrect(rmats, wave_correct);
        for (size_t i = 0; i < cameras.size(); ++i)
            cameras[i].R = rmats[i];
    }

    LOGLN("Warping images (auxiliary)... ");
#if ENABLE_LOG
    t = getTickCount();
#endif

    vector<Point> corners(num_images);
    vector<Mat> masks_warped(num_images);
    vector<Mat> images_warped(num_images);
    vector<Size> sizes(num_images);
    vector<Mat> masks(num_images);

    // Preapre images masks
    for (int i = 0; i < num_images; ++i)
    {
        masks[i].create(images[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks

    Ptr<WarperCreator> warper_creator;
#if defined(HAVE_OPENCV_GPU)
    if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
    {
        if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
    }
    else
#endif
    {
        if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
        else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
        else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
        else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
        else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
        else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
        else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
        else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
        else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
        else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
        else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
        else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
        else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
    }

    if (warper_creator.empty())
    {
        cout << "Can't create the following warper '" << warp_type << "'\n";
        return 1;
    }

    Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));

    for (int i = 0; i < num_images; ++i)
    {
        Mat_<float> K;
        cameras[i].K().convertTo(K, CV_32F);
        float swa = (float)seam_work_aspect;
        K(0,0) *= swa; K(0,2) *= swa;
        K(1,1) *= swa; K(1,2) *= swa;

        corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
        sizes[i] = images_warped[i].size();

        warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }

    vector<Mat> images_warped_f(num_images);
    for (int i = 0; i < num_images; ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);

    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
    compensator->feed(corners, images_warped, masks_warped);

    Ptr<SeamFinder> seam_finder;
    if (seam_find_type == "no")
        seam_finder = new detail::NoSeamFinder();
    else if (seam_find_type == "voronoi")
        seam_finder = new detail::VoronoiSeamFinder();
    else if (seam_find_type == "gc_color")
    {
#if defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
        else
#endif
            seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
    }
    else if (seam_find_type == "gc_colorgrad")
    {
#if defined(HAVE_OPENCV_GPU)
        if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
            seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
        else
#endif
            seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
    }
    else if (seam_find_type == "dp_color")
        seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
    else if (seam_find_type == "dp_colorgrad")
        seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
    if (seam_finder.empty())
    {
        cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
        return 1;
    }

    seam_finder->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    images.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

    LOGLN("Compositing...");
#if ENABLE_LOG
    t = getTickCount();
#endif

    Mat img_warped, img_warped_s;
    Mat dilated_mask, seam_mask, mask, mask_warped;
    Ptr<Blender> blender;
    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;

    for (int img_idx = 0; img_idx < num_images; ++img_idx)
    {
        LOGLN("Compositing image #" << indices[img_idx]+1);

        // Read image and resize it if necessary
        full_img = untouched_images[img_idx];
        if (!is_compose_scale_set)
        {
            if (compose_megapix > 0)
                compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
            is_compose_scale_set = true;

            // Compute relative scales
            //compose_seam_aspect = compose_scale / seam_scale;
            compose_work_aspect = compose_scale / work_scale;

            // Update warped image scale
            warped_image_scale *= static_cast<float>(compose_work_aspect);
            warper = warper_creator->create(warped_image_scale);

            // Update corners and sizes
            for (int i = 0; i < num_images; ++i)
            {
                // Update intrinsics
                cameras[i].focal *= compose_work_aspect;
                cameras[i].ppx *= compose_work_aspect;
                cameras[i].ppy *= compose_work_aspect;

                // Update corner and size
                Size sz = full_img_sizes[i];
                if (std::abs(compose_scale - 1) > 1e-1)
                {
                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);
                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);
                }

                Mat K;
                cameras[i].K().convertTo(K, CV_32F);
                Rect roi = warper->warpRoi(sz, K, cameras[i].R);
                corners[i] = roi.tl();
                sizes[i] = roi.size();
            }
        }
        if (abs(compose_scale - 1) > 1e-1)
            resize(full_img, img, Size(), compose_scale, compose_scale);
        else
            img = full_img;
        full_img.release();
        Size img_size = img.size();

        Mat K;
        cameras[img_idx].K().convertTo(K, CV_32F);

        // Warp the current image
        warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

        // Warp the current image mask
        mask.create(img_size, CV_8U);
        mask.setTo(Scalar::all(255));
        warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

        // Compensate exposure
        compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);

        img_warped.convertTo(img_warped_s, CV_16S);
        img_warped.release();
        img.release();
        mask.release();

        dilate(masks_warped[img_idx], dilated_mask, Mat());
        resize(dilated_mask, seam_mask, mask_warped.size());
        mask_warped = seam_mask & mask_warped;

        if (blender.empty())
        {
            blender = Blender::createDefault(blend_type, try_gpu);
            Size dst_sz = resultRoi(corners, sizes).size();
            float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
            if (blend_width < 1.f)
                blender = Blender::createDefault(Blender::NO, try_gpu);
            else if (blend_type == Blender::MULTI_BAND)
            {
                MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
                mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
                LOGLN("Multi-band blender, number of bands: " << mb->numBands());
            }
            else if (blend_type == Blender::FEATHER)
            {
                FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
                fb->setSharpness(1.f/blend_width);
                LOGLN("Feather blender, sharpness: " << fb->sharpness());
            }
            blender->prepare(corners, sizes);
        }

        // Blend the current image
        blender->feed(img_warped_s, mask_warped, corners[img_idx]);
    }

    Mat result;
    Mat result_mask;
    blender->blend(result, result_mask);

    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    imwrite(result_name, result);

    LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
    return 1;
}catch (cv::Exception &e) {
	const char* err_msg = e.what();
	cout << err_msg << endl;
	return -1;
}
}
Example #15
0
Mat Tracker::process(const Mat frame, Stats& stats)
{
    TickMeter tm;
    vector<KeyPoint> kp;
    Mat desc;

    tm.start();
    detector->detectAndCompute(frame, noArray(), kp, desc);
    stats.keypoints = (int)kp.size();

    vector< vector<DMatch> > matches;
    vector<KeyPoint> matched1, matched2;
    matcher->knnMatch(first_desc, desc, matches, 2);
    for(unsigned i = 0; i < matches.size(); i++) {
        if(matches[i][0].distance < nn_match_ratio * matches[i][1].distance) {
            matched1.push_back(first_kp[matches[i][0].queryIdx]);
            matched2.push_back(      kp[matches[i][0].trainIdx]);
        }
    }
    stats.matches = (int)matched1.size();

    Mat inlier_mask, homography;
    vector<KeyPoint> inliers1, inliers2;
    vector<DMatch> inlier_matches;
    if(matched1.size() >= 4) {
        homography = findHomography(Points(matched1), Points(matched2),
                                    RANSAC, ransac_thresh, inlier_mask);
    }
    tm.stop();
    stats.fps = 1. / tm.getTimeSec();

    if(matched1.size() < 4 || homography.empty()) {
        Mat res;
        hconcat(first_frame, frame, res);
        stats.inliers = 0;
        stats.ratio = 0;
        return res;
    }
    for(unsigned i = 0; i < matched1.size(); i++) {
        if(inlier_mask.at<uchar>(i)) {
            int new_i = static_cast<int>(inliers1.size());
            inliers1.push_back(matched1[i]);
            inliers2.push_back(matched2[i]);
            inlier_matches.push_back(DMatch(new_i, new_i, 0));
        }
    }
    stats.inliers = (int)inliers1.size();
    stats.ratio = stats.inliers * 1.0 / stats.matches;

    vector<Point2f> new_bb;
    perspectiveTransform(object_bb, new_bb, homography);
    Mat frame_with_bb = frame.clone();
    if(stats.inliers >= bb_min_inliers) {
        drawBoundingBox(frame_with_bb, new_bb);
    }
    Mat res;
    drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
                inlier_matches, res,
                Scalar(255, 0, 0), Scalar(255, 0, 0));
    return res;
}
Example #16
0
bool _isempty (Ptr<Patch> i)
{
  return i.empty();
}
Example #17
0
int main(int argc, char **argv)
{
    if( argc < 2 )
    {
        help();
        return -1;
    }
    structured_light::SinusoidalPattern::Params params;
    phase_unwrapping::HistogramPhaseUnwrapping::Params paramsUnwrapping;

    // Retrieve parameters written in the command line
    CommandLineParser parser(argc, argv, keys);
    params.width = parser.get<int>(0);
    params.height = parser.get<int>(1);
    params.nbrOfPeriods = parser.get<int>(2);
    params.setMarkers = parser.get<bool>(3);
    params.horizontal = parser.get<bool>(4);
    params.methodId = parser.get<int>(5);
    String outputCapturePath = parser.get<String>(6);

    params.shiftValue = static_cast<float>(2 * CV_PI / 3);
    params.nbrOfPixelsBetweenMarkers = 70;
    String outputPatternPath = parser.get<String>(7);
    String outputWrappedPhasePath = parser.get<String>(8);
    String outputUnwrappedPhasePath = parser.get<String>(9);
    String reliabilitiesPath = parser.get<String>(10);

    Ptr<structured_light::SinusoidalPattern> sinus = structured_light::SinusoidalPattern::create(params);
    Ptr<phase_unwrapping::HistogramPhaseUnwrapping> phaseUnwrapping;

    vector<Mat> patterns;
    Mat shadowMask;
    Mat unwrappedPhaseMap, unwrappedPhaseMap8;
    Mat wrappedPhaseMap, wrappedPhaseMap8;
    //Generate sinusoidal patterns
    sinus->generate(patterns);


    VideoCapture cap(CAP_PVAPI);
    if( !cap.isOpened() )
    {
        cout << "Camera could not be opened" << endl;
        return -1;
    }
    cap.set(CAP_PROP_PVAPI_PIXELFORMAT, CAP_PVAPI_PIXELFORMAT_MONO8);

    namedWindow("pattern", WINDOW_NORMAL);
    setWindowProperty("pattern", WND_PROP_FULLSCREEN, WINDOW_FULLSCREEN);
    imshow("pattern", patterns[0]);
    cout << "Press any key when ready" << endl;
    waitKey(0);

    int nbrOfImages = 30;
    int count = 0;

    vector<Mat> img(nbrOfImages);
    Size camSize(-1, -1);

    while( count < nbrOfImages )
    {
        for(int i = 0; i < (int)patterns.size(); ++i )
        {
            imshow("pattern", patterns[i]);
            waitKey(300);
            cap >> img[count];
            count += 1;
        }
    }

    cout << "press enter when ready" << endl;
    bool loop = true;
    while ( loop )
    {
        char c = (char) waitKey(0);
        if( c == 10 )
        {
            loop = false;
        }
    }

    switch(params.methodId)
    {
    case structured_light::FTP:
        for( int i = 0; i < nbrOfImages; ++i )
        {
            /*We need three images to compute the shadow mask, as described in the reference paper
             * even if the phase map is computed from one pattern only
            */
            vector<Mat> captures;
            if( i == nbrOfImages - 2 )
            {
                captures.push_back(img[i]);
                captures.push_back(img[i-1]);
                captures.push_back(img[i+1]);
            }
            else if( i == nbrOfImages - 1 )
            {
                captures.push_back(img[i]);
                captures.push_back(img[i-1]);
                captures.push_back(img[i-2]);
            }
            else
            {
                captures.push_back(img[i]);
                captures.push_back(img[i+1]);
                captures.push_back(img[i+2]);
            }
            sinus->computePhaseMap(captures, wrappedPhaseMap, shadowMask);
            if( camSize.height == -1 )
            {
                camSize.height = img[i].rows;
                camSize.width = img[i].cols;
                paramsUnwrapping.height = camSize.height;
                paramsUnwrapping.width = camSize.width;
                phaseUnwrapping =
                    phase_unwrapping::HistogramPhaseUnwrapping::create(paramsUnwrapping);
            }
            sinus->unwrapPhaseMap(wrappedPhaseMap, unwrappedPhaseMap, camSize, shadowMask);

            phaseUnwrapping->unwrapPhaseMap(wrappedPhaseMap, unwrappedPhaseMap, shadowMask);
            Mat reliabilities, reliabilities8;
            phaseUnwrapping->getInverseReliabilityMap(reliabilities);
            reliabilities.convertTo(reliabilities8, CV_8U, 255,128);

            ostringstream tt;
            tt << i;
            imwrite(reliabilitiesPath + tt.str() + ".png", reliabilities8);

            unwrappedPhaseMap.convertTo(unwrappedPhaseMap8, CV_8U, 1, 128);
            wrappedPhaseMap.convertTo(wrappedPhaseMap8, CV_8U, 255, 128);

            if( !outputUnwrappedPhasePath.empty() )
            {
                ostringstream name;
                name << i;
                imwrite(outputUnwrappedPhasePath + "_FTP_" + name.str() + ".png", unwrappedPhaseMap8);
            }

            if( !outputWrappedPhasePath.empty() )
            {
                ostringstream name;
                name << i;
                imwrite(outputWrappedPhasePath + "_FTP_" + name.str() + ".png", wrappedPhaseMap8);
            }
        }
        break;
    case structured_light::PSP:
    case structured_light::FAPS:
        for( int i = 0; i < nbrOfImages - 2; ++i )
        {
            vector<Mat> captures;
            captures.push_back(img[i]);
            captures.push_back(img[i+1]);
            captures.push_back(img[i+2]);

            sinus->computePhaseMap(captures, wrappedPhaseMap, shadowMask);

            if( camSize.height == -1 )
            {
                camSize.height = img[i].rows;
                camSize.width = img[i].cols;
                paramsUnwrapping.height = camSize.height;
                paramsUnwrapping.width = camSize.width;
                phaseUnwrapping =
                    phase_unwrapping::HistogramPhaseUnwrapping::create(paramsUnwrapping);
            }
            sinus->unwrapPhaseMap(wrappedPhaseMap, unwrappedPhaseMap, camSize, shadowMask);
            unwrappedPhaseMap.convertTo(unwrappedPhaseMap8, CV_8U, 1, 128);
            wrappedPhaseMap.convertTo(wrappedPhaseMap8, CV_8U, 255, 128);

            phaseUnwrapping->unwrapPhaseMap(wrappedPhaseMap, unwrappedPhaseMap, shadowMask);
            Mat reliabilities, reliabilities8;
            phaseUnwrapping->getInverseReliabilityMap(reliabilities);
            reliabilities.convertTo(reliabilities8, CV_8U, 255,128);

            ostringstream tt;
            tt << i;
            imwrite(reliabilitiesPath + tt.str() + ".png", reliabilities8);

            if( !outputUnwrappedPhasePath.empty() )
            {
                ostringstream name;
                name << i;
                if( params.methodId == structured_light::PSP )
                    imwrite(outputUnwrappedPhasePath + "_PSP_" + name.str() + ".png", unwrappedPhaseMap8);
                else
                    imwrite(outputUnwrappedPhasePath + "_FAPS_" + name.str() + ".png", unwrappedPhaseMap8);
            }

            if( !outputWrappedPhasePath.empty() )
            {
                ostringstream name;
                name << i;
                if( params.methodId == structured_light::PSP )
                    imwrite(outputWrappedPhasePath + "_PSP_" + name.str() + ".png", wrappedPhaseMap8);
                else
                    imwrite(outputWrappedPhasePath + "_FAPS_" + name.str() + ".png", wrappedPhaseMap8);
            }

            if( !outputCapturePath.empty() )
            {
                ostringstream name;
                name << i;
                if( params.methodId == structured_light::PSP )
                    imwrite(outputCapturePath + "_PSP_" + name.str() + ".png", img[i]);
                else
                    imwrite(outputCapturePath + "_FAPS_" + name.str() + ".png", img[i]);
                if( i == nbrOfImages - 3 )
                {
                    if( params.methodId == structured_light::PSP )
                    {
                        ostringstream nameBis;
                        nameBis << i+1;
                        ostringstream nameTer;
                        nameTer << i+2;
                        imwrite(outputCapturePath + "_PSP_" + nameBis.str() + ".png", img[i+1]);
                        imwrite(outputCapturePath + "_PSP_" + nameTer.str() + ".png", img[i+2]);
                    }
                    else
                    {
                        ostringstream nameBis;
                        nameBis << i+1;
                        ostringstream nameTer;
                        nameTer << i+2;
                        imwrite(outputCapturePath + "_FAPS_" + nameBis.str() + ".png", img[i+1]);
                        imwrite(outputCapturePath + "_FAPS_" + nameTer.str() + ".png", img[i+2]);
                    }
                }
            }
        }
        break;
    default:
        cout << "error" << endl;
    }
    cout << "done" << endl;

    if( !outputPatternPath.empty() )
    {
        for( int i = 0; i < 3; ++ i )
        {
            ostringstream name;
            name << i + 1;
            imwrite(outputPatternPath + name.str() + ".png", patterns[i]);
        }
    }

    loop = true;
    while( loop )
    {
        char key = (char) waitKey(0);
        if( key == 27 )
        {
            loop = false;
        }
    }
    return 0;
}
Example #18
0
 void operator()(const T& in, const T& mask, std::vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
 {
     surf->detectAndCompute(in, mask, pts, descriptors, useProvided);
 }
/// <summary>
/// Executes the specific request.
/// </summary>
/// <returns>
/// MgHttpResponse
/// This contains the response (including MgHttpResult and StatusCode) from the server.
/// </returns>
void MgHttpWfsDescribeFeatureType::Execute(MgHttpResponse& hResponse)
{
    Ptr<MgHttpResult> hResult = hResponse.GetResult();

    MG_HTTP_HANDLER_TRY()

    // We have to wrap the request parameters, since the outside
    // world is case-sensitive (with respect to names,) but
    // we need our parameters NOT to be so.
    Ptr<MgHttpRequestParam> origReqParams = m_hRequest->GetRequestParam();
    MgHttpRequestParameters Parms(origReqParams);
    MgHttpResponseStream Out;

    MgOgcServer::SetLoader(GetDocument);

    MgUserInformation::SetCurrentUserInfo(m_userInfo);

    // Instance a server-lette
    MgOgcWfsServer Wfs(Parms,Out);

    // Determine required feature types
    CPSZ pszFeatureTypes = Wfs.RequestParameter(MgHttpResourceStrings::reqWfsTypeName.c_str());
    STRING sFeatureTypes = pszFeatureTypes? pszFeatureTypes : _("");
    Ptr<MgStringCollection> featureTypeList;
    if(sFeatureTypes.empty())
    {
        featureTypeList = NULL;
    }
    else
    {
        featureTypeList = MgStringCollection::ParseCollection(sFeatureTypes, L",");
    }

    Ptr<MgResourceService> pResourceService = (MgResourceService*)(CreateService(MgServiceType::ResourceService));
    Ptr<MgFeatureService> pFeatureService = (MgFeatureService*)(CreateService(MgServiceType::FeatureService));

    // Retrieve feature definitions
    auto_ptr<MgWfsFeatureDefinitions> pFeatureTypes;
    if(NULL == featureTypeList)
    {
        pFeatureTypes.reset(new MgWfsFeatureDefinitions(pResourceService,pFeatureService));
    }
    else
    {
        pFeatureTypes.reset(new MgWfsFeatureDefinitions(pResourceService,pFeatureService,featureTypeList));
    }
    Wfs.SetFeatureDefinitions(pFeatureTypes.get());

    // In order to validate request we have to invoke the ProcessRequest
    if(!Wfs.ProcessRequest(this))
    {
        // Obtain the response byte reader
        Ptr<MgByteReader> errorResponse = Out.Stream().GetReader();

        // Set the result
        hResult->SetResultObject(errorResponse, errorResponse->GetMimeType());
        return;
    }

    // Determine required output format
    // This part must behind the Wfs.ProcessRequest, where parameters have been validated.
    CPSZ pszOutputFormat = Wfs.RequestParameter(MgHttpResourceStrings::reqWfsOutputFormat.c_str());
    STRING sOutputFormat = pszOutputFormat? pszOutputFormat : _("");
    if(sOutputFormat.empty())
    {
        sOutputFormat = Wfs.GetDefaultDescribeFeatureTypeOutputFormat(STRING(Wfs.RequestParameter(MgHttpResourceStrings::reqWfsVersion.c_str())));
    }

    if(pFeatureTypes->InSameNamespace()) 
    {
        STRING sPrefix = L"";
        STRING sUrl = L"";
        STRING sResource = L""; // TODO: look for this in arg, since POST may put it there to save us trouble.
        STRING sSchemaHash = L"";
        Ptr<MgResourceIdentifier> idResource;
        Ptr<MgStringCollection> pFeatureClasses = new MgStringCollection();

        while(pFeatureTypes->ReadNext())
        {
            STRING sClassFullName = pFeatureTypes->GetClassFullName();
            
            if(!sFeatureTypes.empty() && STRING::npos == sFeatureTypes.find(sClassFullName))
            {
                continue;
            }

            STRING::size_type iPos = sClassFullName.find(_(":")); //NOXLATE
            if(iPos != STRING::npos)
            {
                if(sPrefix.empty())
                {
                    sPrefix = sClassFullName.substr(0,iPos);
                }

                STRING sClass = sClassFullName.substr(iPos+1);

                sUrl = pFeatureTypes->GetNamespaceUrl();

                if(NULL == idResource)
                {
                    if(pFeatureTypes->PrefixToFeatureSource(sPrefix, sResource, sSchemaHash)) {
                        idResource = new MgResourceIdentifier(sResource);
                    }
                    else
                    {
                        // Badly formed feature type?  Throw an exception.
                        GenerateTypeNameException(hResult,sFeatureTypes);
                        return;
                    }
                }

                pFeatureClasses->Add(((sSchemaHash.size()==0) ? sClass : sSchemaHash + _(":") + sClass)); //NOXLATE
            }
            else {
                // Badly formed feature type?  Throw an exception.
                GenerateTypeNameException(hResult,sFeatureTypes);
                return;
            }
        }

        if(pFeatureClasses->GetCount() == 0)
        {
            // Badly formed feature type?  Throw an exception.
            GenerateTypeNameException(hResult,sFeatureTypes);
            return;
        }

        Ptr<MgByteReader> response  = pFeatureService->DescribeWfsFeatureType(idResource, pFeatureClasses, sPrefix, sUrl);

        // Set the result
        hResult->SetResultObject(response, sOutputFormat);
    }
    else {
        // There's more than one feature, so we need to enumerate
        // them and have each get imported.
        //
        if(!pFeatureTypes->SubsetFeatureList(sFeatureTypes.c_str()))
            GenerateTypeNameException(hResult,sFeatureTypes);
        else {

#ifdef _WFS_LOGGING
            MyLog.Write(_("WFS::DescribeFeatureType\r\n"));
#endif
            // Execute the request
            //Wfs.ProcessRequest(this);

            // Slurp the results.
            Ptr<MgByteReader> capabilities = Out.Stream().GetReader();

            // Set the result
            hResult->SetResultObject(capabilities, capabilities->GetMimeType());
        }
    }

    MG_HTTP_HANDLER_CATCH_AND_THROW_EX(L"MgHttpWfsDescribeFeatureType.Execute")
}
int main(int argc, char *argv[])
{
    const char *keys =
            "{ help h usage ? |    | show this message }"
            "{ path p         |true| path to dataset }";
    CommandLineParser parser(argc, argv, keys);
    string path(parser.get<string>("path"));
    if (parser.has("help") || path=="true")
    {
        parser.printMessage();
        return -1;
    }

    // loading dataset
    Ptr<AR_hmdb> dataset = AR_hmdb::create();
    dataset->load(path);

    int numSplits = dataset->getNumSplits();
    printf("splits number: %u\n", numSplits);


    const unsigned int descriptorNum = 162;
    const unsigned int clusterNum = 4000;
    const unsigned int sampleNum = 5613856; // max for all 3 splits

    vector<double> res;
    for (int currSplit=0; currSplit<numSplits; ++currSplit)
    {

        Mat1f samples(sampleNum, descriptorNum);
        unsigned int currSample = 0;
        vector< Ptr<Object> > &curr = dataset->getTrain(currSplit);
        unsigned int numTrainFiles = getNumFiles(curr);
        unsigned int numFeatures = 0;
        for (unsigned int i=0; i<curr.size(); ++i)
        {
            AR_hmdbObj *example = static_cast<AR_hmdbObj *>(curr[i].get());
            vector<string> &videoNames = example->videoNames;
            for (vector<string>::iterator it=videoNames.begin(); it!=videoNames.end(); ++it)
            {
                string featuresFile = *it + ".txt";
                string featuresFullPath = path + "hmdb51_org_stips/" + example->name + "/" + featuresFile;

                ifstream infile(featuresFullPath.c_str());
                string line;
                // skip header
                for (unsigned int j=0; j<3; ++j)
                {
                    getline(infile, line);
                }
                while (getline(infile, line))
                {
                    numFeatures++;
                    if (currSample < sampleNum)
                    {
                        // 7 skip, hog+hof: 72+90 read
                        vector<string> elems;
                        split(line, elems, '\t');

                        for (unsigned int j=0; j<descriptorNum; ++j)
                        {
                            samples(currSample, j) = (float)atof(elems[j+7].c_str());
                        }
                        currSample++;
                    }
                }
            }
        }
        printf("split %u, train features number: %u, samples number: %u\n", currSplit, numFeatures, currSample);

        // clustering
        Mat1f centers(clusterNum, descriptorNum);
        ::cvflann::KMeansIndexParams kmean_params;
        unsigned int resultClusters = hierarchicalClustering< L2<float> >(samples, centers, kmean_params);
        if (resultClusters < clusterNum)
        {
            centers = centers.rowRange(Range(0, resultClusters));
        }
        Index flann_index(centers, KDTreeIndexParams());
        printf("resulted clusters number: %u\n", resultClusters);


        Mat1f trainData(numTrainFiles, resultClusters);
        Mat1i trainLabels(numTrainFiles, 1);

        for (unsigned int i=0; i<numTrainFiles; ++i)
        {
            for (unsigned int j=0; j<resultClusters; ++j)
            {
                trainData(i, j) = 0;
            }
        }

        printf("calculating train histograms\n");
        fillData(path, curr, flann_index, trainData, trainLabels);

        printf("train svm\n");
        SVM::Params params;
        params.svmType = SVM::C_SVC;
        params.kernelType = SVM::POLY; //SVM::RBF;
        params.degree = 0.5;
        params.gamma = 1;
        params.coef0 = 1;
        params.C = 1;
        params.nu = 0.5;
        params.p = 0;
        params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
        Ptr<SVM> svm = SVM::create(params);
        svm->train(trainData, ROW_SAMPLE, trainLabels);

        // prepare to predict
        curr = dataset->getTest(currSplit);
        unsigned int numTestFiles = getNumFiles(curr);
        Mat1f testData(numTestFiles, resultClusters);
        Mat1i testLabels(numTestFiles, 1); // ground true

        for (unsigned int i=0; i<numTestFiles; ++i)
        {
            for (unsigned int j=0; j<resultClusters; ++j)
            {
                testData(i, j) = 0;
            }
        }

        printf("calculating test histograms\n");
        fillData(path, curr, flann_index, testData, testLabels);

        printf("predicting\n");
        Mat1f testPredicted(numTestFiles, 1);
        svm->predict(testData, testPredicted);

        unsigned int correct = 0;
        for (unsigned int i=0; i<numTestFiles; ++i)
        {
            if ((int)testPredicted(i, 0) == testLabels(i, 0))
            {
                correct++;
            }
        }
        double accuracy = 1.0*correct/numTestFiles;
        printf("correctly recognized actions: %f\n", accuracy);
        res.push_back(accuracy);

    }

    double accuracy = 0.0;
    for (unsigned int i=0; i<res.size(); ++i)
    {
        accuracy += res[i];
    }
    printf("average: %f\n", accuracy/res.size());

    return 0;
}
Example #21
0
 MD::MarkovData(uint val, Ptr<MarkovData> last)
   : CD(val, last->key())
 {
   set_prev(last);
   last->set_next(this);
 }
Ptr<MobilityModel>
BuildingsPathlossTestCase::CreateMobilityModel (uint16_t index)
{

  /*
   * The purpose of this method is to defer the creation of the
   * MobilityModel instances to when DoRun() is called. In a previous
   * version, MobilityModel instances where created directly in the
   * constructor of the test suite, which caused subtle bugs due to
   * "static initialization order fiasco". An example of such a subtle
   * bug is that logging via NS_LOG failed for some modules.
   * 
   */

  double hm = 1;
  double hb = 30;
  double henbHeight = 10.0;

  Ptr<MobilityModel> mm;
  
  switch (index)
    {
    case 1:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (0.0, 0.0, hb));
      break;

    case 2:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (2000, 0.0, hm));
      break;

    case 3:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (100, 0.0, hm));
      break;

    case 4:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (900, 0.0, hm));
      break;

    case 5:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-5, 0.0, hm));
      break;

    case 6:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-5, 30, henbHeight));
      break;

    case 7:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-2000, 0.0, hm));
      break;

    case 8:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-100, 0.0, hm));
      break;

    case 9:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (0, 0.0, hm));
      break;

    case 10:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-100, 0.0, henbHeight));
      break;

    case 11:
      mm = CreateObject<ConstantPositionMobilityModel> ();
      mm->SetPosition (Vector (-500, 0.0, henbHeight));
      break;

    default:
      mm = 0;
      break;
    }
  Ptr<MobilityBuildingInfo> buildingInfo = CreateObject<MobilityBuildingInfo> ();
  mm->AggregateObject (buildingInfo); // operation usually done by BuildingsHelper::Install
  BuildingsHelper::MakeConsistent (mm); 
  return mm;
}
Example #23
0
int main(){
	string dir = "Caltech_11classes/test1", filepath;
	string dir1 = "Caltech_11classes/test";
	DIR *dp;
	struct dirent *dirp;
	struct stat filestat;

	Mat inverse_index,vocabulary,img_vector;
	FileStorage fs("inverse_index.yml", FileStorage::READ);
	fs["inv_index"] >> inverse_index;
	FileStorage fs1("vocabulary.yml", FileStorage::READ);
	fs1["vocabulary"] >> vocabulary;
	FileStorage fs2("image_vector.yml", FileStorage::READ);


	int no_words = inverse_index.rows;
	int no_images = inverse_index.cols;
	Ptr<DescriptorExtractor > extractor(
			new OpponentColorDescriptorExtractor(
				Ptr<DescriptorExtractor>(new SiftDescriptorExtractor())
				)
			);

	Ptr<FeatureDetector> featureDetector = FeatureDetector::create( "SIFT");
	Ptr<BOWImgDescriptorExtractor> bowExtractor;
	Ptr<DescriptorMatcher> descMatcher = DescriptorMatcher::create( "BruteForce" );
	bowExtractor = new BOWImgDescriptorExtractor( extractor, descMatcher );
	bowExtractor->setVocabulary( vocabulary );
	vector<KeyPoint> keypoints;
	SiftFeatureDetector detector(100);
	int True = 0;
	int False = 0;
	DIR *dp1;
	struct dirent *dirp1;
	struct stat filestat1;
	dp1 = opendir( dir1.c_str() );
	while (dirp1 = readdir( dp1 ))
	{
		Mat img,response_hist;
		filepath = dir1 + "/" + dirp1->d_name;
		if (stat( filepath.c_str(), &filestat )) continue;
		if (S_ISDIR( filestat.st_mode ))         continue;

		img = imread("test_file.jpg");
//		string in_class(filepath,23,3);

		if (!img.data) {
			continue;
		}
	detector.detect(img,keypoints);
	bowExtractor->compute(img, keypoints, response_hist);
	//cout << response_hist << endl;
	int i,j;
	char c[100];
	int wordcount[no_images];
	for(i=0;i<no_images;i++)
		wordcount[i] = 0;
	int count = 0;
	//cout << inverse_index << endl;
	for(i=0;i<response_hist.cols;i++){
		//	cout << "res = " << response_hist.at<int>(0,i) << endl;	
		if(response_hist.at<int>(0,i) > 100){
			//	cout << "word " << i << " " ;
			count++;
			for(j=0;j<no_images;j++){
				if(inverse_index.at<int>(i,j) > 100 ){
					//		cout << j << " ";
					//			cout << inverse_index.at<int>(i,j) << " ";
					wordcount[j]++;
				}
			}
		}
	}
	double min_val = 10000000000;
	double max_val = -1;
	double val;
	int imno,im_no;
	int min_index;
	double top_val[8];
	int index_val[8];
	int pos = 0;
	for(i=0;i<no_images;i++){
		if(wordcount[i] > count/4){
			sprintf(c,"img%d",i);
			fs2[string(c)] >> img_vector;
			val = img_vector.dot(response_hist)/(norm(img_vector) * norm(response_hist));
			if(pos < 8 ){
				if(min_val > val){
					min_val = val;
					min_index = pos;
				}
				top_val[pos] = val;
				index_val[pos] = i;
				pos++;
			}
			else{
				if(min_val < val ){
					top_val[min_index] = val;
					index_val[min_index] = i;
					min_val = 10000000000;
					for(int com = 0 ; com < pos ; com++){
						if(top_val[com] < min_val){
							min_val = top_val[com];
							min_index = com;
						}
					}
				}
			}
			if(val > max_val){
					max_val = val;
					im_no = i;
			}
			//		cout << i << endl;
		}
	}
	dp = opendir( dir.c_str() );
	//cout << im_no << endl;
	//namedWindow("input",-1);
	//imshow("input",img);
	i = 0;
	imno = 0;
	int flag = 0;
	int fin;
	vector<double> sorted_val;
	sorted_val.assign(top_val,top_val+8);
	sort(sorted_val.begin(),sorted_val.end());
	while (dirp = readdir( dp ))
	{
		filepath = dir + "/" + dirp->d_name;
		if (stat( filepath.c_str(), &filestat )) continue;
		if (S_ISDIR( filestat.st_mode ))         continue;

		img = imread(filepath);
		if (!img.data) {
			continue;
		}
		
		if(in_check(imno,index_val,pos) /*|| imno == im_no*/){
			//cout << string(filepath,24,3) << endl;
			//if(out_class == in_class)
			//	flag = 1;
	//		cout<<top_val[i]<<endl;
			for(fin = 0 ; fin < 8 ; fin++)
				if(sorted_val[fin] == top_val[i])
					break;	
	//		cout<< fin <<endl;	
			sprintf(c,"output%d.jpg",7-fin);
		//	namedWindow(c,-1);
		//	imshow(c,img);
		//	cvWaitKey(1000);
			imwrite(c,img);
			i++;
		}
		if( i> pos)
			break;
		imno++;
	}
	break;
	}
int main( int argc, char** argv )
{
  CommandLineParser parser( argc, argv, keys );

  String tracker_algorithm = parser.get<String>( 0 );
  String video_name = parser.get<String>( 1 );

  if( tracker_algorithm.empty() || video_name.empty() )
  {
    help();
    return -1;
  }

  //open the capture
  VideoCapture cap;
  cap.open( video_name );

  if( !cap.isOpened() )
  {
    help();
    cout << "***Could not initialize capturing...***\n";
    cout << "Current parameter's value: \n";
    parser.printMessage();
    return -1;
  }

  Mat frame;
  paused = true;
  namedWindow( "Tracking API", 1 );
  setMouseCallback( "Tracking API", onMouse, 0 );

  //instantiates the specific Tracker
  Ptr<Tracker> tracker = Tracker::create( tracker_algorithm );
  if( tracker == NULL )
  {
    cout << "***Error in the instantiation of the tracker...***\n";
    return -1;
  }

  //get the first frame
  cap >> frame;
  frame.copyTo( image );
  imshow( "Tracking API", image );

  bool initialized = false;
  for ( ;; )
  {
    if( !paused )
    {
      cap >> frame;
      frame.copyTo( image );

      if( !initialized && selectObject )
      {
        //initializes the tracker
        if( !tracker->init( frame, boundingBox ) )
        {
          cout << "***Could not initialize tracker...***\n";
          return -1;
        }
        initialized = true;
      }
      else if( initialized )
      {
        //updates the tracker
        if( tracker->update( frame, boundingBox ) )
        {
          rectangle( image, boundingBox, Scalar( 255, 0, 0 ), 2, 1 );
        }
      }
      imshow( "Tracking API", image );
    }

    char c = (char) waitKey( 2 );
    if( c == 'q' )
      break;
    if( c == 'p' )
      paused = !paused;

  }
Example #25
0
	bool ItemFilter(Ptr<FileProperties> a)
	{
		return filteredFileType==L"" || a->GetTypeName()==filteredFileType;
	}
int main(int argc, char *argv[])
{
    CommandLineParser parser(argc, argv, keys);
    string datasetRootPath = parser.get<string>(0);
    int datasetID = parser.get<int>(1);

    if (datasetRootPath.empty())
    {
        help();
        return -1;
    }

    Mat frame;
    paused = false;
    namedWindow("GOTURN Tracking", 0);
    setMouseCallback("GOTURN Tracking", onMouse, 0);

    //Create GOTURN tracker
    Ptr<Tracker> tracker = Tracker::create("GOTURN");

    //Load and init full ALOV300++ dataset with a given datasetID, as alternative you can use loadAnnotatedOnly(..)
    //to load only frames with labled ground truth ~ every 5-th frame
    Ptr<cv::datasets::TRACK_alov> dataset = TRACK_alov::create();
    dataset->load(datasetRootPath);
    dataset->initDataset(datasetID);

    //Read first frame
    dataset->getNextFrame(frame);
    frame.copyTo(image);
    rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
    imshow("GOTURN Tracking", image);

    bool initialized = false;
    paused = true;
    int frameCounter = 0;

    //Time measurment
    int64 e3 = getTickCount();

    for (;;)
    {
        if (!paused)
        {
            //Time measurment
            int64 e1 = getTickCount();
            if (initialized){
                if (!dataset->getNextFrame(frame))
                    break;
                frame.copyTo(image);
            }

            if (!initialized && selectObjects)
            {
                //Initialize the tracker and add targets
                if (!tracker->init(frame, boundingBox))
                {
                    cout << "Tracker Init Error!!!";
                    return 0;
                }
                rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
                initialized = true;
            }
            else if (initialized)
            {
                //Update all targets
                if (tracker->update(frame, boundingBox))
                {
                    rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
                }
            }
            imshow("GOTURN Tracking", frame);
            frameCounter++;
            //Time measurment
            int64 e2 = getTickCount();
            double t1 = (e2 - e1) / getTickFrequency();
            cout << frameCounter << "\tframe :  " << t1 * 1000.0 << "ms" << endl;
        }

        char c = (char)waitKey(2);
        if (c == 'q')
            break;
        if (c == 'p')
            paused = !paused;
    }

    //Time measurment
    int64 e4 = getTickCount();
    double t2 = (e4 - e3) / getTickFrequency();
    cout << "Average Time for Frame:  " << t2 * 1000.0 / frameCounter << "ms" << endl;
    cout << "Average FPS:  " << 1.0 / t2*frameCounter << endl;


    waitKey(0);

    return 0;
}
Example #27
0
int predictUsingFile(string folderpath,CascadeClassifier faceCascade,CascadeClassifier eyeCascade1,CascadeClassifier eyeCascade2){

	Ptr<BasicFaceRecognizer> model;
	vector<string> folders;
	vector<string> models;
	int identity=-1;

	//riempio con tutte le sottocartelle
	getdir(modelsaved,folders);
	vector<string>::size_type n_folder=folders.size();
	for(uint i=0;i<n_folder;i++){
		getdir(folders[i],models);
	}
	//riempio con i path assoluti per i tutti i modelli salvati
	vector<string>::size_type n_models=models.size();
	for(uint i=0;i<n_models;i++){
		getdir(folders[i],models);
	}

	Mat imageOriginal=imread(folderpath);
	if(imageOriginal.empty()){
		cerr << "ERROR: Couldn't read the file." << endl;
	    exit(1);
	}
	// Get a copy of the camera frame that we can draw onto.
	Mat displayedFrame;
	imageOriginal.copyTo(displayedFrame);

    // Find a face and preprocess it to have a standard size and contrast & brightness.
    Rect faceRect;  // Position of detected face.
    Rect searchedLeftEye, searchedRightEye; // top-left and top-right regions of the face, where eyes were searched.
    Point leftEye, rightEye;    // Position of the detected eyes.
    Mat preprocessedFace = getPreprocessedFace(displayedFrame, faceWidth, faceCascade, eyeCascade1, eyeCascade2, preprocessLeftAndRightSeparately, &faceRect, &leftEye, &rightEye, &searchedLeftEye, &searchedRightEye);
    bool gotFaceAndEyes = false;
    if (preprocessedFace.data)
       gotFaceAndEyes = true;
	Mat reconstructedFace;
    string outputStr="Unknown";
    double similarity=100.0;
    bool trovato=false;

    if(gotFaceAndEyes){
    	for(unsigned int i=0;i<n_models && !trovato;i++){
    		model=createEigenFaceRecognizer();
    		string next=models[i];
    		model->load(next);
    		// Generate a face approximation by back-projecting the eigenvectors & eigenvalues.
    		reconstructedFace = reconstructFace(model, preprocessedFace);
    		// Verify whether the reconstructed face looks like the preprocessed face, otherwise it is probably an unknown person.
    		similarity = getSimilarity(preprocessedFace, reconstructedFace);
    		if(similarity<UNKNOWN_PERSON_THRESHOLD){
    			// Identify who the person is in the preprocessed face image.
    			identity = model->predict(preprocessedFace);
    			outputStr = toString(identity);
    			trovato=true;
    		}
    		model.~Ptr();
    	}
    }
   	// Since the confidence is low, assume it is an unknown person.
    //cout << "Identity: " << "§"<<outputStr<<"§" << ". Similarity: " << similarity << endl;
    cout<<outputStr<<endl;
	if(outputStr.compare("Unknown")==0)
		return -2;
	else
		return atoi(outputStr.c_str());
}
Example #28
0
int main(int argc, char **argv)
{
    CommandLineParser parser(argc, argv, "{@input_path |0|input path can be a camera id, like 0,1,2 or a video filename}");
    parser.printMessage();
    string input_path = parser.get<string>(0);
    string video_name = input_path;

    VideoCapture video_in;

    if ( ( isdigit(input_path[0]) && input_path.size() == 1 ) )
    {
    int camera_no = input_path[0] - '0';
        video_in.open( camera_no );
    }
    else {
        video_in.open(video_name);
    }

    if(!video_in.isOpened()) {
        cerr << "Couldn't open " << video_name << endl;
        return 1;
    }

    Stats stats, akaze_stats, orb_stats;
    Ptr<AKAZE> akaze = AKAZE::create();
    akaze->setThreshold(akaze_thresh);
    Ptr<ORB> orb = ORB::create();
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    example::Tracker akaze_tracker(akaze, matcher);
    example::Tracker orb_tracker(orb, matcher);

    Mat frame;
    namedWindow(video_name, WINDOW_NORMAL);
    cout << "\nPress any key to stop the video and select a bounding box" << endl;

    while ( waitKey(1) < 1 )
    {
        video_in >> frame;
        cv::resizeWindow(video_name, frame.size());
        imshow(video_name, frame);
    }

    vector<Point2f> bb;
    cv::Rect uBox = cv::selectROI(video_name, frame);
    bb.push_back(cv::Point2f(static_cast<float>(uBox.x), static_cast<float>(uBox.y)));
    bb.push_back(cv::Point2f(static_cast<float>(uBox.x+uBox.width), static_cast<float>(uBox.y)));
    bb.push_back(cv::Point2f(static_cast<float>(uBox.x+uBox.width), static_cast<float>(uBox.y+uBox.height)));
    bb.push_back(cv::Point2f(static_cast<float>(uBox.x), static_cast<float>(uBox.y+uBox.height)));

    akaze_tracker.setFirstFrame(frame, bb, "AKAZE", stats);
    orb_tracker.setFirstFrame(frame, bb, "ORB", stats);

    Stats akaze_draw_stats, orb_draw_stats;
    Mat akaze_res, orb_res, res_frame;
    int i = 0;
    for(;;) {
        i++;
        bool update_stats = (i % stats_update_period == 0);
        video_in >> frame;
        // stop the program if no more images
        if(frame.empty()) break;

        akaze_res = akaze_tracker.process(frame, stats);
        akaze_stats += stats;
        if(update_stats) {
            akaze_draw_stats = stats;
        }

        orb->setMaxFeatures(stats.keypoints);
        orb_res = orb_tracker.process(frame, stats);
        orb_stats += stats;
        if(update_stats) {
            orb_draw_stats = stats;
        }

        drawStatistics(akaze_res, akaze_draw_stats);
        drawStatistics(orb_res, orb_draw_stats);
        vconcat(akaze_res, orb_res, res_frame);
        cv::imshow(video_name, res_frame);
        if(waitKey(1)==27) break; //quit on ESC button
    }
    akaze_stats /= i - 1;
    orb_stats /= i - 1;
    printStatistics("AKAZE", akaze_stats);
    printStatistics("ORB", orb_stats);
    return 0;
}
 RM::RegressionModel(Ptr<GlmCoefs> beta, Ptr<UnivParams> sigsq)
   : GlmModel(),
     ParamPolicy(beta, sigsq),
     DataPolicy(new NeRegSuf(beta->nvars_possible()))
 {}
/**
 * Main entry called from Matlab
 * @param nlhs number of left-hand-side arguments
 * @param plhs pointers to mxArrays in the left-hand-side
 * @param nrhs number of right-hand-side arguments
 * @param prhs pointers to mxArrays in the right-hand-side
 */
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    // Check the number of arguments
    nargchk(nrhs>=2 && nlhs<=2);

    // Argument vector
    vector<MxArray> rhs(prhs, prhs+nrhs);
    int id = rhs[0].toInt();
    string method(rhs[1].toString());

    // Constructor is called. Create a new object from argument
    if (method == "new") {
        nargchk(nrhs==2 && nlhs<=1);
        obj_[++last_id] = LogisticRegression::create();
        plhs[0] = MxArray(last_id);
        return;
    }

    // Big operation switch
    Ptr<LogisticRegression> obj = obj_[id];
    if (method == "delete") {
        nargchk(nrhs==2 && nlhs==0);
        obj_.erase(id);
    }
    else if (method == "clear") {
        nargchk(nrhs==2 && nlhs==0);
        obj->clear();
    }
    else if (method == "load") {
        nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0);
        string objname;
        bool loadFromString = false;
        for (int i=3; i<nrhs; i+=2) {
            string key(rhs[i].toString());
            if (key == "ObjName")
                objname = rhs[i+1].toString();
            else if (key == "FromString")
                loadFromString = rhs[i+1].toBool();
            else
                mexErrMsgIdAndTxt("mexopencv:error",
                    "Unrecognized option %s", key.c_str());
        }
        obj_[id] = (loadFromString ?
            Algorithm::loadFromString<LogisticRegression>(rhs[2].toString(), objname) :
            Algorithm::load<LogisticRegression>(rhs[2].toString(), objname));
    }
    else if (method == "save") {
        nargchk(nrhs==3 && nlhs<=1);
        string fname(rhs[2].toString());
        if (nlhs > 0) {
            // write to memory, and return string
            FileStorage fs(fname, FileStorage::WRITE + FileStorage::MEMORY);
            fs << obj->getDefaultName() << "{";
            fs << "format" << 3;
            obj->write(fs);
            fs << "}";
            plhs[0] = MxArray(fs.releaseAndGetString());
        }
        else
            // write to disk
            obj->save(fname);
    }
    else if (method == "empty") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->empty());
    }
    else if (method == "getDefaultName") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->getDefaultName());
    }
    else if (method == "getVarCount") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->getVarCount());
    }
    else if (method == "isClassifier") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->isClassifier());
    }
    else if (method == "isTrained") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->isTrained());
    }
    else if (method == "train") {
        nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1);
        vector<MxArray> dataOptions;
        int flags = 0;
        for (int i=4; i<nrhs; i+=2) {
            string key(rhs[i].toString());
            if (key == "Data")
                dataOptions = rhs[i+1].toVector<MxArray>();
            else if (key == "Flags")
                flags = rhs[i+1].toInt();
            else
                mexErrMsgIdAndTxt("mexopencv:error",
                    "Unrecognized option %s", key.c_str());
        }
        Ptr<TrainData> data;
        if (rhs[2].isChar())
            data = loadTrainData(rhs[2].toString(),
                dataOptions.begin(), dataOptions.end());
        else
            data = createTrainData(
                rhs[2].toMat(CV_32F),
                rhs[3].toMat(CV_32F),
                dataOptions.begin(), dataOptions.end());
        bool b = obj->train(data, flags);
        plhs[0] = MxArray(b);
    }
    else if (method == "calcError") {
        nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=2);
        vector<MxArray> dataOptions;
        bool test = false;
        for (int i=4; i<nrhs; i+=2) {
            string key(rhs[i].toString());
            if (key == "Data")
                dataOptions = rhs[i+1].toVector<MxArray>();
            else if (key == "TestError")
                test = rhs[i+1].toBool();
            else
                mexErrMsgIdAndTxt("mexopencv:error",
                    "Unrecognized option %s", key.c_str());
        }
        Ptr<TrainData> data;
        if (rhs[2].isChar())
            data = loadTrainData(rhs[2].toString(),
                dataOptions.begin(), dataOptions.end());
        else
            data = createTrainData(
                rhs[2].toMat(CV_32F),
                rhs[3].toMat(CV_32F),
                dataOptions.begin(), dataOptions.end());
        Mat resp;
        float err = obj->calcError(data, test, (nlhs>1 ? resp : noArray()));
        plhs[0] = MxArray(err);
        if (nlhs>1)
            plhs[1] = MxArray(resp);
    }
    else if (method == "predict") {
        nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=2);
        int flags = 0;
        for (int i=3; i<nrhs; i+=2) {
            string key(rhs[i].toString());
            if (key == "Flags")
                flags = rhs[i+1].toInt();
            else
                mexErrMsgIdAndTxt("mexopencv:error",
                    "Unrecognized option %s", key.c_str());
        }
        Mat samples(rhs[2].toMat(CV_32F)),
            results;
        float f = obj->predict(samples, results, flags);
        plhs[0] = MxArray(results);
        if (nlhs>1)
            plhs[1] = MxArray(f);
    }
    else if (method == "get_learnt_thetas") {
        nargchk(nrhs==2 && nlhs<=1);
        plhs[0] = MxArray(obj->get_learnt_thetas());
    }
    else if (method == "get") {
        nargchk(nrhs==3 && nlhs<=1);
        string prop(rhs[2].toString());
        if (prop == "Iterations")
            plhs[0] = MxArray(obj->getIterations());
        else if (prop == "LearningRate")
            plhs[0] = MxArray(obj->getLearningRate());
        else if (prop == "MiniBatchSize")
            plhs[0] = MxArray(obj->getMiniBatchSize());
        else if (prop == "Regularization")
            plhs[0] = MxArray(InvRegularizationType[obj->getRegularization()]);
        else if (prop == "TermCriteria")
            plhs[0] = MxArray(obj->getTermCriteria());
        else if (prop == "TrainMethod")
            plhs[0] = MxArray(InvTrainingMethodType[obj->getTrainMethod()]);
        else
            mexErrMsgIdAndTxt("mexopencv:error",
                "Unrecognized property %s", prop.c_str());
    }
    else if (method == "set") {
        nargchk(nrhs==4 && nlhs==0);
        string prop(rhs[2].toString());
        if (prop == "Iterations")
            obj->setIterations(rhs[3].toInt());
        else if (prop == "LearningRate")
            obj->setLearningRate(rhs[3].toDouble());
        else if (prop == "MiniBatchSize")
            obj->setMiniBatchSize(rhs[3].toInt());
        else if (prop == "Regularization")
            obj->setRegularization(RegularizationType[rhs[3].toString()]);
        else if (prop == "TermCriteria")
            obj->setTermCriteria(rhs[3].toTermCriteria());
        else if (prop == "TrainMethod")
            obj->setTrainMethod(TrainingMethodType[rhs[3].toString()]);
        else
            mexErrMsgIdAndTxt("mexopencv:error",
                "Unrecognized property %s", prop.c_str());
    }
    else
        mexErrMsgIdAndTxt("mexopencv:error","Unrecognized operation");
}