/// <summary> /// The example shows /// - how to load a pretrained model and evaluate several nodes by combining their outputs /// Note: The example uses the model trained by <CNTK>/Examples/Image/Classification/ResNet/Python/TrainResNet_CIFAR10.py /// Please see README.md in <CNTK>/Examples/Image/Classification/ResNet about how to train the model. /// The parameter 'modelFilePath' specifies the path to the model. /// </summary> void EvaluateCombinedOutputs(const wchar_t* modelFilePath, const DeviceDescriptor& device) { printf("\n===== Evaluate combined outputs =====\n"); // Load the model. FunctionPtr modelFunc = Function::Load(modelFilePath, device); // Get node of interest std::wstring intermediateLayerName = L"final_avg_pooling"; FunctionPtr interLayerPrimitiveFunc = modelFunc->FindByName(intermediateLayerName); Variable poolingOutput = interLayerPrimitiveFunc->Output(); // Create a function which combine outputs from the node "final_avg_polling" and the final layer of the model. FunctionPtr evalFunc = Combine( { modelFunc->Output(), poolingOutput }); Variable inputVar = evalFunc->Arguments()[0]; // Prepare input data. // For evaluating an image, you first need to perform some image preprocessing to make sure that the input image has the correct size and layout // that match the model inputs. // Please note that the model used by this example expects the CHW image layout. // inputVar.Shape[0] is image width, inputVar.Shape[1] is image height, and inputVar.Shape[2] is channels. // For simplicity and avoiding external dependencies, we skip the preprocessing step here, and just use some artificially created data as input. std::vector<float> inputData(inputVar.Shape().TotalSize()); for (size_t i = 0; i < inputData.size(); ++i) { inputData[i] = static_cast<float>(i % 255); } // Create input value and input data map ValuePtr inputVal = Value::CreateBatch(inputVar.Shape(), inputData, device); std::unordered_map<Variable, ValuePtr> inputDataMap = { { inputVar, inputVal } }; // Create output data map. Using null as Value to indicate using system allocated memory. // Alternatively, create a Value object and add it to the data map. Variable modelOutput = evalFunc->Outputs()[0]; Variable interLayerOutput = evalFunc->Outputs()[1]; std::unordered_map<Variable, ValuePtr> outputDataMap = { { modelOutput, nullptr }, { interLayerOutput, nullptr } }; // Start evaluation on the device evalFunc->Evaluate(inputDataMap, outputDataMap, device); // Get evaluate result as dense outputs for(auto & outputVariableValuePair : outputDataMap) { auto variable = outputVariableValuePair.first; auto value = outputVariableValuePair.second; std::vector<std::vector<float>> outputData; value->CopyVariableValueTo(variable, outputData); PrintOutput<float>(variable.Shape().TotalSize(), outputData); } }
void OutputFunctionInfo(FunctionPtr func) { auto inputVariables = func->Arguments(); fprintf(stderr, "Function '%S': Input Variables (count=%lu)\n", func->Name().c_str(), inputVariables.size()); for_each(inputVariables.begin(), inputVariables.end(), [](const Variable v) { fprintf(stderr, " name=%S, kind=%d\n", v.Name().c_str(), static_cast<int>(v.Kind())); }); auto outputVariables = func->Outputs(); fprintf(stderr, "Function '%S': Output Variables (count=%lu)\n", func->Name().c_str(), outputVariables.size()); for_each(outputVariables.begin(), outputVariables.end(), [](const Variable v) { fprintf(stderr, " name=%S, kind=%d\n", v.Name().c_str(), static_cast<int>(v.Kind())); }); }
static void PopulateNodeDef(const std::wstring& scope, const FunctionPtr& src, tensorflow::NodeDef& dst) { PopulateNodeDef(GetScopedName(scope, src), src->OpName(), src->Output().GetDataType(), src->Outputs(), dst); }
inline bool GetOutputVaraiableByName(FunctionPtr evalFunc, std::wstring varName, Variable& var) { return GetVariableByName(evalFunc->Outputs(), varName, var); }