示例#1
0
文件: Variable.cpp 项目: rlugojr/CNTK
    void Variable::SetValue(const NDArrayViewPtr& value)
    {
        if (!IsParameter())
            LogicError("Variable::SetValue can be only invoked on a Parameter variable!");
        else if (GetDataType() != value->GetDataType()) 
            LogicError("Variable::SetValue: 'source' and 'destination' have different data types!");
        else if (Shape() != value->Shape() && (AsTensorShape(Shape()) != AsTensorShape(value->Shape())))
            LogicError("Variable::SetValue: 'source' and 'destination' have different shapes!");

        bool alreadySet = false;
        if (m_dataFields->m_initValueFlag)
        {
            // In the case of lazy initialization, try to avoid the redundant call to the initializer. 
            std::call_once(*m_dataFields->m_initValueFlag, [=, &value, &alreadySet] {
                // If the variable hasn't been initialized yet, clone the content of the supplied value and delete the initializer.
                m_dataFields->m_value = value->DeepClone(*m_dataFields->m_valueInitializationDevice, false);
                m_dataFields->m_valueInitializer = nullptr;
                m_dataFields->m_valueInitializationDevice = nullptr;
                alreadySet = true;
            });
        }

        assert(m_dataFields->m_value != nullptr);
        if (!alreadySet)
        {
            // alreadySet is false, the lambda above wasn't called and the variable has been initialized before,
            // get a pointer to its value and simply copy the content of the supplied value.
            m_dataFields->m_value->CopyFrom(*value);
        }
    }
        void TensorBoardFileWriter::WriteImage(const std::wstring& name, NDArrayViewPtr imageData, uint64_t step)
        {
            assert(imageData != nullptr);
            tensorflow::Event event;
            event.set_wall_time(static_cast<double>(std::time(0)));
            tensorflow::Summary* summary = event.mutable_summary();

            std::vector<size_t> dimensions = imageData->Shape().Dimensions();
            const size_t batch_size = dimensions.at(3);
            const size_t depth = dimensions.at(2);
            const size_t width = dimensions.at(1);
            const size_t height = dimensions.at(0);
            const DataType dtype = imageData->GetDataType();

            std::vector<size_t> start(4, 0);
            std::vector<size_t> extent;
            extent.push_back(height);
            extent.push_back(width);
            extent.push_back(depth);
            extent.push_back(1);
            const int compression = -1;
            
            const std::vector<size_t> imageDim({height, width, depth});
            NDShape imageShape(imageDim);

            for (size_t i = 0; i < batch_size; i++) {
                tensorflow::Summary::Value* summaryValue = summary->add_value();
                summaryValue->set_tag(ToString(name) + "/image/" + std::to_string(i));

                tensorflow::Summary::Image* summaryImage = summaryValue->mutable_image();
                summaryImage->set_height(height);
                summaryImage->set_width(width);
                summaryImage->set_colorspace(depth);
                start.back() = static_cast<size_t>(i);
                auto image = imageData->SliceView(start, extent)->AsShape(imageDim);
                vector<uchar> buffer;

                switch (dtype)
                {
                case DataType::Float:
                    WriteImageToBuffer(image->WritableDataBuffer<float>(), height, width, CV_32FC(depth), buffer);
                    break;
                
                case DataType::Double:
                    WriteImageToBuffer(image->WritableDataBuffer<double>(), height, width, CV_64FC(depth), buffer);
                    break;

                default:
                    fprintf(stderr, "TensorBoardFileWriter: Unsupported data type: %d ", static_cast<int>(dtype));
                    break;
                }

                string str(buffer.begin(), buffer.end());
                summaryImage->set_encoded_image_string(str);
            }
            
            WriteRecord(Serialize(event));
        }
示例#3
0
文件: Value.cpp 项目: 1132520084/CNTK
    Value::Value(const NDArrayViewPtr& data, const NDMaskPtr& mask)
        : m_data(data), m_mask(mask)
    {
        if (mask != nullptr)
        {
            auto dataShape = data->Shape();
            auto maskShape = mask->Shape();

            if (maskShape.NumAxes() > dataShape.NumAxes())
                InvalidArgument("The number of axes of the mask of a Value object cannot exceed the number of axes of the data NDArrayView object");

            if (dataShape.SubShape(dataShape.NumAxes() - maskShape.NumAxes()) != maskShape)
                InvalidArgument("Invalid Value object; the data and mask are incompatible. The trailing dimensions of the data do not match the dimensions of the mask");
        }
    }
示例#4
0
文件: Value.cpp 项目: hahatt/CNTK
    Value::Value(const NDArrayViewPtr& data, const NDMaskPtr& mask)
        : m_data(data), m_mask(mask)
    {
        if (mask != nullptr)
        {
            auto dataShape = data->Shape();
            auto maskShape = mask->Shape();

            if (maskShape.Rank() > dataShape.Rank())
                InvalidArgument("The rank (%d) of the mask of a Value object cannot exceed the rank (%d) of the data NDArrayView object", (int)maskShape.Rank(), (int)dataShape.Rank());

            if (dataShape.SubShape(dataShape.Rank() - maskShape.Rank()) != maskShape)
                InvalidArgument("Invalid Value object; the data and mask are incompatible. The trailing dimensions of the data with shape %S do not match the dimensions of the mask with shape %S", AsStringForErrorReporting(dataShape).c_str(), AsStringForErrorReporting(maskShape).c_str());
        }
    }
    void DistributedLearnerBase::ConvertToOrdered(const std::unordered_map<Parameter, NDArrayViewPtr>& gradientValues, std::vector<std::pair<Parameter, NDArrayViewPtr>>& result, std::unordered_map<Parameter, NDArrayViewPtr>* convertedGradientValues)
    {
        result.reserve(gradientValues.size());
        result.clear();

        if (convertedGradientValues)
            convertedGradientValues->clear();

        for (auto g : gradientValues)
        {
            NDArrayViewPtr p = g.second;
            // convert sparse gradient to dense for accumulation
            if (m_convertSparseToDense && p->GetStorageFormat() != StorageFormat::Dense)
            {
                NDArrayViewPtr pDense = MakeSharedObject<NDArrayView>(0, p->GetDataType(), p->Shape(), p->Device());
                pDense->CopyFrom(*p);
                p = pDense;
            }
            auto pair = std::make_pair(g.first, p);
            result.push_back(pair);

            if (convertedGradientValues)
                convertedGradientValues->insert(pair);
        }

        std::sort(result.begin(), result.end(),
            [](const std::pair<Parameter, NDArrayViewPtr>& a, const std::pair<Parameter, NDArrayViewPtr>& b) { return a.first.Uid() < b.first.Uid(); });
    }
示例#6
0
 inline size_t GetBufferSize(const NDArrayViewPtr& viewPtr)
 {
     return viewPtr->Shape().TotalSize() * DataTypeSize(viewPtr->GetDataType());
 }