void NDMask::MaskSection(const std::vector<size_t>& sectionOffset, const NDShape& sectionShape) { // TODO: Implement batching of masking operation for masks residing on GPUs to avoid making // GPU invocations for each MaskSection call. if (sectionOffset.size() > m_maskShape.NumAxes()) LogicError("NDMask::MaskSection: The sectionOffset cannot have dimensionality higher than the number of axes of 'this' mask"); if (sectionShape.NumAxes() > m_maskShape.NumAxes()) LogicError("NDMask::MaskSection: The section shape cannot have an axes count higher than the number of axes of 'this' mask"); std::vector<size_t> offset(m_maskShape.NumAxes(), 0); for (size_t i = 0; i < sectionOffset.size(); ++i) offset[i] = sectionOffset[i]; NDShape shape = sectionShape.AppendShape(NDShape(m_maskShape.NumAxes() - sectionShape.NumAxes(), NDShape::InferredDimension)); auto maskMatrix = GetMatrix(); size_t rowOffset = offset[0]; size_t colOffset = offset[1]; size_t sliceRowLength = (shape[0] != NDShape::InferredDimension) ? shape[0] : (maskMatrix->GetNumRows() - rowOffset); size_t sliceColLength = (shape[1] != NDShape::InferredDimension) ? shape[1] : (maskMatrix->GetNumCols() - colOffset); if ((rowOffset == 0) && (sliceRowLength == maskMatrix->GetNumRows())) maskMatrix->ColumnSlice(colOffset, sliceColLength).SetValue(0); else { // Since Matrix does not support strides in the row dimension, we will need to create separate slices for each column for (size_t i = colOffset; i < (colOffset + sliceColLength); ++i) { auto column = maskMatrix->ColumnSlice(i, 1); column.Reshape(1, maskMatrix->GetNumRows()); column.ColumnSlice(rowOffset, sliceRowLength).SetValue(0); } } }
// The method describes how input stream is transformed to the output stream. Called once per applied stream. // Scale transformer transforms the stream so that all samples are of the same size. StreamInformation ScaleTransformer::Transform(const StreamInformation& inputStream) { TransformBase::Transform(inputStream); auto dims = ImageDimensions(m_imgWidth, m_imgHeight, m_imgChannels).AsTensorShape(HWC).GetDims(); m_outputStream.m_sampleLayout = NDShape(std::vector<size_t>(dims.begin(), dims.end())); return m_outputStream; }
// The method describes how input stream is transformed to the output stream. Called once per applied stream. // Transpose transformer expects the dense input stream with samples as HWC and outputs CHW. StreamInformation TransposeTransformer::Transform(const StreamInformation& inputStream) { m_outputStream = TransformBase::Transform(inputStream); // Changing from NHWC to NCHW m_outputStream.m_elementType = m_precision; if (!m_inputStream.m_sampleLayout.IsUnknown()) { ImageDimensions dimensions(TensorShape(m_inputStream.m_sampleLayout.Dimensions()), HWC); auto dims = dimensions.AsTensorShape(CHW).GetDims(); m_outputStream.m_sampleLayout = NDShape(std::vector<size_t>(dims.begin(), dims.end())); } return m_outputStream; }