コード例 #1
0
void FacenetClassifier::create_input_tensor (long start_index, long end_index) {
	cout << "Using " << input_images.size() << " images" << endl;
	cout << "Start Index:" << start_index << " End Index:" << end_index << endl;
	Tensor input_tensor(DT_FLOAT, TensorShape({(int) (end_index - start_index), 160, 160, 3}));
	// get pointer to memory for that Tensor
	float *p = input_tensor.flat<float>().data();
	int i;
	
	for (i = 0; i < (end_index - start_index) ; i++) {
		// create a "fake" cv::Mat from it 

		Mat camera_image(160, 160, CV_32FC3, p + i*160*160*3);	
		input_images[i + start_index].convertTo(camera_image, CV_32FC3);
	}
	cout << input_tensor.DebugString() << endl;
	this->input_tensor = Tensor (input_tensor);
}
コード例 #2
0
ファイル: TensorFlowEngine.cpp プロジェクト: smistad/FAST
void TensorFlowEngine::run() {
	if(mInputNodes.empty())
		throw Exception("At least one output node has to be given to the NeuralNetwork before execution");
	if(mOutputNodes.empty())
		throw Exception("At least one output node has to be given to the NeuralNetwork before execution");

	// For each input, create a tensorflow tensor:
	std::vector <std::pair<std::string, tensorflow::Tensor>> input_tensors;
	for(auto inputNode : mInputNodes) {
		const std::string name = inputNode.first;
		if(!inputNode.second.data)
			throw Exception("Input node " + name + " has not received any data");
		auto shape = inputNode.second.data->getShape();
		if(shape.getUnknownDimensions() > 0)
		    throw Exception("Input shape must be fully known when executing NN");

		// Construct tensorflow tensor
        tensorflow::TensorShape tensorShape;
        for(auto i : shape.getAll()) {
            tensorShape.AddDim(i);
        }
        tensorflow::Tensor input_tensor(
                tensorflow::DT_FLOAT,
                tensorShape
        );

        // Give tensor data to tensorflow
        // TODO is the data here actually moved?
        TensorAccess::pointer access = inputNode.second.data->getAccess(ACCESS_READ);
        switch(shape.getDimensions()) {
            case 2:
                input_tensor.tensor<float, 2>() = std::move(access->getData<2>());
                break;
            case 3:
                input_tensor.tensor<float, 3>() = std::move(access->getData<3>());
                break;
            case 4:
                input_tensor.tensor<float, 4>() = std::move(access->getData<4>());
                break;
            case 5:
                input_tensor.tensor<float, 5>() = std::move(access->getData<5>());
                break;
			case 6:
				input_tensor.tensor<float, 6>() = std::move(access->getData<6>());
				break;
            default:
                throw Exception("Invalid tensor dimension size");
		}

		// Add tensorflow tensor to list of input tensors
		input_tensors.push_back(std::make_pair(name, input_tensor));
	}

    for(std::string name : mLearningPhaseTensors) {
        // Create a scalar tensor which tells the system we are NOT doing training
        tensorflow::Tensor input_tensor2(
                tensorflow::DT_BOOL,
                tensorflow::TensorShape() // Scalar
        );
        auto input_tensor_mapped2 = input_tensor2.tensor<bool, 0>();
        input_tensor_mapped2(0) = false;
        input_tensors.push_back(std::make_pair(name, input_tensor2));
    }

	std::vector<tensorflow::Tensor> output_tensors;

	reportInfo() << "Running network" << reportEnd();
	tensorflow::Status s;
	//mRuntimeManager->startRegularTimer("network_execution");
	std::vector<std::string> outputNames;
	for(auto node : mOutputNodes)
	    outputNames.push_back(node.first);
	s = mSession->Run(input_tensors, outputNames, {}, &output_tensors);
	//mRuntimeManager->stopRegularTimer("network_execution");

	if (!s.ok()) {
		throw Exception("Error during inference: " + s.ToString());
	}
	reportInfo() << "Finished executing network" << reportEnd();

    // Collect all output data as FAST tensors
    for(int j = 0; j < outputNames.size(); ++j) {
        const std::string outputName = outputNames[j];
        const NetworkNode node = mOutputNodes[outputName];
        auto tensor = TensorflowTensor::New();
        tensor->create(std::move(output_tensors[j]));
        mOutputNodes[outputName].data = tensor;
	}
	reportInfo() << "Finished parsing output" << reportEnd();
}
コード例 #3
0
ファイル: Tensorflow.cpp プロジェクト: hcmlab/mobileSSI
    void Tensorflow::consume(IConsumer::info consume_info,
                             ssi_size_t stream_in_num,
                             ssi_stream_t stream_in[]) {

        ssi_real_t *dataptr = ssi_pcast(ssi_real_t, stream_in[0].ptr);

        tensorflow::Session *session;
        tensorflow::Status status = NewSession(tensorflow::SessionOptions(), &session);
        if (!status.ok()) {
            ssi_wrn("status: %s \n", status.ToString().c_str());
            return;
        }

        tensorflow::GraphDef graph_def;

#if __ANDROID__
        status = ReadTextProto(tensorflow::Env::Default(), "/sdcard/android_xmlpipe/frozen_graph.pb", &graph_def);
#else
        status = ReadTextProto(tensorflow::Env::Default(),
                               "/home/mainuser/code/SSI/mobileSSI/plugins/tensorflow/test_files/frozen_graph.pb",
                               &graph_def);
#endif

        if (!status.ok()) {
            ssi_wrn("status: %s \n", status.ToString().c_str());
            return;
        }

        status = session->Create(graph_def);
        if (!status.ok()) {
            ssi_wrn("status: %s \n", status.ToString().c_str());
            return;
        }

        int number_dim = stream_in[0].dim;
        int number_test = stream_in[0].num; // 4072
        int number_classes = 4;

        tensorflow::Tensor input_tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({number_test, number_dim}));
        auto dst = input_tensor.flat<float>().data();
        for (int i = 0; i < stream_in[0].num; i++) {
            std::copy_n(dataptr + i * number_dim, number_dim, dst);
            dst += number_dim;
        }

        std::vector<std::pair<std::string, tensorflow::Tensor>> inputs = {{"input_TT", input_tensor}};
        std::vector<tensorflow::Tensor> outputs;
        status = session->Run(inputs, {"output_TT"}, {}, &outputs);
        if (!status.ok()) {
            ssi_wrn("status: %s \n", status.ToString().c_str());
            return;
        }

        std::vector<int> number_hits(number_classes, 0);
        for (std::vector<tensorflow::Tensor>::iterator it = outputs.begin(); it != outputs.end(); ++it) {
            auto items = it->shaped<float, 2>({number_test, number_classes});
            for (int i = 0; i < number_test; i++) {
                int arg_max = 0;
                float val_max = items(i, 0);
                for (int j = 0; j < number_classes; j++) {
                    if (items(i, j) > val_max) {
                        arg_max = j;
                        val_max = items(i, j);
                    }
                }
                for (int i = 0; i < number_classes; i++) {
                    if (arg_max == i) {
                        number_hits[i]++;
                    }
                }
            }
        }

        std::string classes[] = {"ambient_animals", "rain", "running_water", "traffic"};
        for (int i = 0; i < number_classes; i++) {
            float accuracy = (float) number_hits[i] / number_test;
            ssi_wrn("accuracy for class %s : %f \n", classes[i].c_str(), accuracy);
            _probs[i] = accuracy;
        }
        session->Close();

        _handler->handle(consume_info.time, consume_info.dur, 4, 0, _probs, _class_names, 0, 0);
    }