Пример #1
0
// binary zip operation, e.g. Plus
// If allowBroadcast then one can be a sub-dimension of the other (if layout then only for rows, otherwise for cols, too).
// This also helpfully resizes the children if not yet sized.
void ComputationNodeBase::ValidateBinaryZip(bool isFinalValidationPass, bool allowBroadcast)
{
    assert(m_inputs.size() == 2);
    ComputationNodeBase::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    ValidateInferBinaryInputDims();

    if (isFinalValidationPass)
        ValidateMBLayout(Input(0), Input(1));

    // result has tensor shape with dimensions being the max over both
    let shape0 = GetInputSampleLayout(0);
    let shape1 = GetInputSampleLayout(1);
    SmallVector<size_t> dims = shape0.GetDims();
    if (shape1.GetRank() > dims.size())
        dims.resize(shape1.GetRank(), 1); // pad with ones

    // If rank of [0] is higher than we only need to take max over rank [1].
    // If rank of [1] is higher then we have padded to equal lentgh.
    for (size_t k = 0; k < shape1.GetRank(); k++)
    {
        size_t dim1 = shape1[k];
        // BUGBUG: We must consider the allowBroadcast flag here.
        if (dims[k] <= 1 && dim1 != 0)                     // is [0] broadcasting (1) or unspecified (0)?
            dims[k] = dim1;                                // then use dimension we broadcast to
        else if (dim1 <= 1 && dims[k] != 0)                // if [1] is broadcasting or unspecified
            ;                                              // then dims is already correct
        else if (isFinalValidationPass && dim1 != dims[k]) // no broadcasting or unspecified: they must match
            InvalidArgument("%ls: Input dimensions [%s] and [%s] are not compatible.",
                            NodeDescription().c_str(), string(shape0).c_str(), string(shape1).c_str());
    }

    SetDims(TensorShape(dims), HasMBLayout());
}
Пример #2
0
/*virtual*/ void ReduceElementsNode<ElemType>::Validate(bool isFinalValidationPass) /*override*/
{
    Base::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    // validate the opcode (in case we got instantiated empty and never updated)
    ValidateOp();

    let shape = Input(0)->GetSampleLayout();
    auto dims = shape.GetDims();
    size_t reducedDim = 0; // (init to keep compiler happy)
    if (m_axis == 0)
    {
        reducedDim = shape.GetNumElements();
        dims = { 1 };                       // entire sample is reduced to a scalar
    }
    else if (m_axis - 1 >= 0 && m_axis - 1 < dims.size())
    {
        reducedDim = dims[m_axis - 1];
        dims[m_axis - 1] = 1;               // one axis is reduced to a scalar
    }
    else if (isFinalValidationPass)
        InvalidArgument("The shape of %ls [%s] has no axis %d", NodeDescription().c_str(), string(shape).c_str(), m_axis);

    // for "Mean", we must divide by #elements
    if (isFinalValidationPass && m_operation == L"Mean")
        m_scale = (ElemType)(1.0 / reducedDim);
    else
        m_scale = (ElemType)1;

    SetDims(TensorShape(dims), Input(0)->HasMBLayout());
}
Пример #3
0
// binary reduce-to-(1,1) operation, e.g. CrossEntropyWithSoftmaxNode
// Currently only called by criterion nodes.
// This function also infers child LearnableParameters. In case you wonder why this is needed for criterion nodes, there are edge cases, e.g. a
// learnable parameter being regularized by a criterion node, where the learnable parameter is fed both into that criterion node and other places.
void ComputationNodeBase::ValidateBinaryReduce(bool isFinalValidationPass)
{
    ComputationNodeBase::Validate(isFinalValidationPass);
    m_pMBLayout = nullptr; // this node does not hold mini-batch data
    ValidateInferBinaryInputDims();

    if (isFinalValidationPass)
    {
        if (!(Input(0)->GetSampleLayout().IsElementwiseCompatibleWith(Input(1)->GetSampleLayout())))
        {
            string s1 = Input(0)->GetSampleLayout();
            string s2 = Input(1)->GetSampleLayout();
            // BUGBUG: Allow broadcasting?
            LogicError("%ls: The tensor dimensions in the inputs do not match. %s != %s", NodeDescription().c_str(), s1.c_str(), s2.c_str());
        }
        else if (!(Input(0)->HasMBLayout()))
            LogicError("%ls: Expected MBLayout in Input 0.", NodeDescription().c_str());
        else if (!(Input(1)->HasMBLayout()))
            LogicError("%ls: Expected MBLayout in Input 1.", NodeDescription().c_str());
        // Shape of the MBLayouts is checked at runtime.
    }
    SetDims(TensorShape(1), false);
}
/*virtual*/ void ReduceElementsNode<ElemType>::Validate(bool isFinalValidationPass) /*override*/
{
    Base::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    // validate the opcode (in case we got instantiated empty and never updated)
    ValidateOp();

    let shape = Input(0)->GetSampleLayout();
    auto dims = shape.GetDims();
    if (m_axis == 0)
        dims = { 1 };                       // entire sample is reduced to a scalar
    else if (m_axis - 1 >= 0 && m_axis - 1 < dims.size())
        dims[m_axis - 1] = 1;               // one axis is reduced to a scalar
    else if (isFinalValidationPass)
        InvalidArgument("The shape of %ls [%s] has no axis %d", NodeDescription().c_str(), string(shape).c_str(), m_axis);

    SetDims(TensorShape(dims), Input(0)->HasMBLayout());
}
Пример #5
0
static bool AddNodeValue (TRI_json_t* row, TRI_aql_node_t* const node) {
  TRI_json_t* result;
  TRI_json_t* type;
  TRI_json_t* value;

  result = TRI_CreateArrayJson(TRI_UNKNOWN_MEM_ZONE);

  if (result == NULL) {
    return false;
  }

  type = NodeType(node);

  if (type != NULL) {
    TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE,
                         result,
                         "type",
                         type);
  }

  value = NodeDescription(node);
  if (value != NULL) {
    TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE,
                         result,
                         "value",
                         value);
  }

  if (node->_type == TRI_AQL_NODE_COLLECTION) {
    TRI_json_t* extra = TRI_GetJsonCollectionHintAql(TRI_AQL_NODE_DATA(node));

    if (extra != NULL) {
      TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE,
                           result,
                           "extra",
                           extra);
    }
  }

  TRI_Insert3ArrayJson(TRI_UNKNOWN_MEM_ZONE, row, "expression", result);

  return true;
}
Пример #6
0
// binary zip operation, e.g. Plus
// If allowBroadcast then one can be a sub-dimension of the other (if layout then only for rows, otherwise for cols, too).
// This also helpfully resizes the children if not yet sized.
void ComputationNodeBase::ValidateBinaryZip(bool isFinalValidationPass, bool allowBroadcast)
{
    assert(m_inputs.size() == 2);
    ComputationNodeBase::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    ValidateInferBinaryInputDims();

    if (isFinalValidationPass &&
        Input(0)->GetMBLayout() != Input(1)->GetMBLayout() && Input(0)->HasMBLayout() && Input(1)->HasMBLayout())
    {
        LogicError("%ls: Minibatch layouts are not the same between arguments and might get out of sync during runtime. If this is by design, use ReconcileDynamicAxis() to forward layouts between nodes.", NodeDescription().c_str());
    }

    // result has tensor shape with dimensions being the max over both
    let shape0 = GetInputSampleLayout(0);
    let shape1 = GetInputSampleLayout(1);
    SmallVector<size_t> dims = shape0.GetDims();
    if (shape1.GetRank() > dims.size())
        dims.resize(shape1.GetRank(), 1); // pad with ones

    // If rank of [0] is higher than we only need to take max over rank [1].
    // If rank of [1] is higher then we have padded to equal lentgh.
    for (size_t k = 0; k < shape1.GetRank(); k++)
    {
        size_t dim1 = shape1[k];
        // BUGBUG: We must consider the allowBroadcast flag here.
        if (dims[k] == 1)                                  // is [0] broadcasting?
            dims[k] = dim1;                                // then use dimension we broadcast to
        else if (dim1 == 1)                                // if [1] is broadcasting
            ;                                              // dims is already correct
        else if (isFinalValidationPass && dim1 != dims[k]) // no broadcasting: they must match
            InvalidArgument("%ls: Input dimensions [%s] and [%s] are not compatible.",
                            NodeDescription().c_str(), string(shape0).c_str(), string(shape1).c_str());
    }

    SetDims(TensorShape(dims), HasMBLayout());
}
Пример #7
0
	void DescriptionsManager::processMessage(const Message* message)
	{
		// if we have a disconnection message
		{
			const Disconnected *disconnected = dynamic_cast<const Disconnected *>(message);
			if (disconnected)
			{
				NodesDescriptionsMap::iterator it = nodesDescriptions.find(disconnected->source);
				if (it != nodesDescriptions.end())
					nodesDescriptions.erase(it);
			}
		}
		
		// if we have an initial description
		{
			const Description *description = dynamic_cast<const Description *>(message);
			if (description)
			{
				NodesDescriptionsMap::iterator it = nodesDescriptions.find(description->source);
				
				// We can receive a description twice, for instance if there is another IDE connected
				if (it != nodesDescriptions.end())
					return;
				
				// Call a user function when a node protocol version mismatches
				if (description->protocolVersion != ASEBA_PROTOCOL_VERSION)
				{
					nodeProtocolVersionMismatch(description->name, description->protocolVersion);
					return;
				}
				
				// create node and copy description into it
				nodesDescriptions[description->source] = NodeDescription(*description);
				checkIfNodeDescriptionComplete(description->source, nodesDescriptions[description->source]);
			}
		}
		
		// if we have a named variabledescription
		{
			const NamedVariableDescription *description = dynamic_cast<const NamedVariableDescription *>(message);
			if (description)
			{
				NodesDescriptionsMap::iterator it = nodesDescriptions.find(description->source);
				
				// we must have received a description first
				if (it == nodesDescriptions.end())
					return;
				
				// copy description into array if array is empty
				if (it->second.namedVariablesReceptionCounter < it->second.namedVariables.size())
				{
					it->second.namedVariables[it->second.namedVariablesReceptionCounter++] = *description;
					checkIfNodeDescriptionComplete(it->first, it->second);
				}
			}
		}
		
		// if we have a local event description
		{
			const LocalEventDescription *description = dynamic_cast<const LocalEventDescription *>(message);
			if (description)
			{
				NodesDescriptionsMap::iterator it = nodesDescriptions.find(description->source);
				
				// we must have received a description first
				if (it == nodesDescriptions.end())
					return;
				
				// copy description into array if array is empty
				if (it->second.localEventsReceptionCounter < it->second.localEvents.size())
				{
					it->second.localEvents[it->second.localEventsReceptionCounter++] = *description;
					checkIfNodeDescriptionComplete(it->first, it->second);
				}
			}
		}
		
		// if we have a native function description
		{
			const NativeFunctionDescription *description = dynamic_cast<const NativeFunctionDescription *>(message);
			if (description)
			{
				NodesDescriptionsMap::iterator it = nodesDescriptions.find(description->source);
				
				// we must have received a description first
				if (it == nodesDescriptions.end())
					return;
				
				// copy description into array
				if (it->second.nativeFunctionReceptionCounter < it->second.nativeFunctions.size())
				{
					it->second.nativeFunctions[it->second.nativeFunctionReceptionCounter++] = *description;
					checkIfNodeDescriptionComplete(it->first, it->second);
				}
			}
		}
	}
/*virtual*/ void GatherPackedNode<ElemType>::Validate(bool isFinalValidationPass) /*override*/
{
    ComputationNodeBase::Validate(isFinalValidationPass);

    // inherit MBLayout from indexData
    m_pMBLayout = Input(INDEXDATA)->GetMBLayout();
    if (isFinalValidationPass && (!Input(INDEXDATA)->HasMBLayout()))
        LogicError("%ls requires first argument (index data) to have a time dimension.", NodeDescription().c_str());

    bool sourceHasTimeDimension = Input(SOURCEDATA)->HasMBLayout();

    if (isFinalValidationPass && Input(INDEXDATA)->GetSampleLayout().GetNumElements() != 1)
        InvalidArgument("%ls requires the first argument (index data) to be a scalar time sequence.", NodeDescription().c_str());

    // inherit tensor dimension from sourceData, minus the last (column or time) dimension. TODO this needs to become simpler...
    if (sourceHasTimeDimension)
        SetDims(Input(SOURCEDATA)->GetSampleLayout(), HasMBLayout());
    else
    {
        SmallVector<size_t> layout = { 1 }; // Scalar
        if (Input(SOURCEDATA)->GetSampleLayout().GetRank() > 1)
        {
            auto srcLayout = Input(SOURCEDATA)->GetSampleLayout().GetDims();
            layout.assign(srcLayout.begin(), srcLayout.end() - 1);
        }
        SetDims(TensorShape(layout), HasMBLayout());
    }
}
void ReduceElementsNode<ElemType>::ValidateOp()
{
#if 1 // legacy with initial experiments, delete this soon
    if (m_operation == L"Plus") m_reductionOp = ElementWiseOperator::opSum;
    else
#endif
    if (m_operation == L"Sum"     ) m_reductionOp = ElementWiseOperator::opSum;
    else if (m_operation == L"Max") m_reductionOp = ElementWiseOperator::opMax;
    else if (m_operation == L"Min") m_reductionOp = ElementWiseOperator::opMin;

    // more here
    else InvalidArgument("%ls was given an invalid operation code '%ls'. Allowed are: 'Sum', 'Max', 'Min'.", NodeDescription().c_str(), m_operation.c_str());
}
Пример #10
0
// N-nary zip operation, e.g. for TernaryZip for clip()
// If allowBroadcast then one can be a sub-dimension of the other (if layout then only for rows, otherwise for cols, too).
// This also helpfully resizes the children if not yet sized.
void ComputationNodeBase::ValidateNaryZip(bool isFinalValidationPass, bool allowBroadcast, size_t numInputs)
{
    assert(m_inputs.size() == numInputs);
    ComputationNodeBase::Validate(isFinalValidationPass);
    InferMBLayoutFromInputsForStandardCase(isFinalValidationPass);

    ValidateInferNaryInputDims(numInputs);

    // check minibatch layout consistency for all possible pairs (n choose 2)
    if (isFinalValidationPass)
        for (size_t i = 0; i < numInputs; i++)
            for (size_t j = i + 1; j < numInputs; j++)
                ValidateMBLayout(Input(i), Input(j));

    // result has tensor shape with dimensions being the max over all inputs
    let shape0 = GetInputSampleLayout(0);

    // dims is max over all inputs
    size_t maxRank = shape0.GetRank();    
    for (size_t i = 1; i < numInputs; i++)
    {
        let shape = GetInputSampleLayout(i);
        if (shape.GetRank() > maxRank)
            maxRank = shape.GetRank();
    }        
    SmallVector<size_t> dims = shape0.GetDims();
    dims.resize(maxRank, 1); // pad with 1

    // first check for invalid dimensions
    for (size_t k = 0; k < maxRank; k++)
    {
        size_t maxDim = 0;
        TensorShape maxShape = shape0; // arbitrary; this is just used for the error message
        for (size_t i = 0; i < numInputs; i++)
        {
            let currentShape = GetInputSampleLayout(i);
            size_t currentRank = currentShape.GetRank();
            // make sure that the rank of this input is bigger than the current index (otherwise, these are implied singleton dimensions that do not need to be checked)
            if (currentRank > k)
            {
                size_t currentDim = currentShape[k];
                if (currentDim > 1 && maxDim != currentDim && maxDim > 1) // 1=broadcasting, 0=not known yet, meant to be inferred
                {
                    InvalidArgument("%ls: Input dimensions [%s] and [%s] are not compatible.",
                        NodeDescription().c_str(), string(maxShape).c_str(), string(currentShape).c_str());
                }
                else if (currentDim > maxDim)
                {
                    maxDim = currentDim;
                    maxShape = currentShape;
                }
            }
        }
    }

    // now set up the right dims
    for (size_t k = 0; k < maxRank; k++)
    {
        for (size_t i = 0; i < numInputs; i++)
        {
            let shape = GetInputSampleLayout(i);

            if (shape.GetRank() > k)
            {
                size_t dim = shape[k];
                if (dims[k] <= 1 && dim != 0)
                    dims[k] = dim;
            }
        }
    }

    SetDims(TensorShape(dims), HasMBLayout());
}
Пример #11
0
void ReduceElementsNode<ElemType>::ValidateOp()
{
    if (m_operation == L"Plus") m_op = ElementWiseOperator::opSum;
    // more here
    else InvalidArgument("%ls was given an invalid operation code '%ls'. Allowed are: 'Plus'. And a few more soon.", NodeDescription().c_str(), m_operation.c_str());
}