static NeuralNetwork extractTileFromNetwork(const NeuralNetwork& network,
	unsigned int outputNeuron)
{
	// Get the connected subgraph
	auto newNetwork = network.getSubgraphConnectedToThisOutput(outputNeuron); 

	util::log("NeuronVisualizer")
		<< "sliced out tile with shape: " << newNetwork.shapeString() << ".\n";
		
	// Remove all other connections from the final layer
	#if 1
	size_t block  = (outputNeuron % newNetwork.getOutputNeurons()) / newNetwork.getOutputBlockingFactor();
	size_t offset = (outputNeuron % newNetwork.getOutputNeurons()) % newNetwork.getOutputBlockingFactor();	

	auto& outputLayer = newNetwork.back();
	
	assert(block < outputLayer.blocks());
	
	Matrix weights = outputLayer[block].slice(0, offset, outputLayer.getInputBlockingFactor(), 1);
	Matrix bias    = outputLayer.at_bias(block).slice(0, offset, 1, 1);
	
	outputLayer.resize(1, outputLayer.getInputBlockingFactor(), 1);
	
	outputLayer[0]         = weights;
	outputLayer.at_bias(0) = bias;

	util::log("NeuronVisualizer")
		<< " trimmed to: " << newNetwork.shapeString() << ".\n";
	#endif
	return newNetwork;
}
示例#2
0
void SoftmaxLayer::runForwardImplementation(Bundle& bundle)
{
    auto& inputActivationsVector  = bundle[ "inputActivations"].get<MatrixVector>();
    auto& outputActivationsVector = bundle["outputActivations"].get<MatrixVector>();

    assert(inputActivationsVector.size() == 1);

    auto inputActivations = foldTime(inputActivationsVector.back());

    util::log("SoftmaxLayer") << " Running forward propagation of matrix "
        << inputActivations.shapeString() << "\n";

    if(util::isLogEnabled("SoftmaxLayer::Detail"))
    {
        util::log("SoftmaxLayer::Detail") << " input: "
            << inputActivations.debugString();
    }

    auto outputActivations = softmax(inputActivations);

    if(util::isLogEnabled("SoftmaxLayer::Detail"))
    {
        util::log("SoftmaxLayer::Detail") << " outputs: "
            << outputActivations.debugString();
    }

    saveMatrix("outputActivations", outputActivations);

    outputActivationsVector.push_back(unfoldTime(outputActivations,
        inputActivationsVector.front().size()));
}
示例#3
0
Layer::BlockSparseMatrix Layer::runReverse(const BlockSparseMatrix& m) const
{
	if(util::isLogEnabled("Layer"))
	{
		util::log("Layer") << " Running reverse propagation on matrix (" << m.rows()
			<< " rows, " << m.columns() << " columns) through layer with dimensions ("
			<< blocks() << " blocks, "
			<< getInputCount() << " inputs, " << getOutputCount()
			<< " outputs, " << blockStep() << " block step).\n";
		util::log("Layer") << "  layer: " << m_sparseMatrix.shapeString() << "\n";
  	}
 
	auto result = m.reverseConvolutionalMultiply(m_sparseMatrix.transpose());

	if(util::isLogEnabled("Layer"))
	{
		util::log("Layer") << "  output: " << result.shapeString() << "\n";
	}
	
	if(util::isLogEnabled("Layer::Detail"))
	{
		util::log("Layer::Detail") << "  output: " << result.debugString() << "\n";
	}

	return result;
}
示例#4
0
Layer::BlockSparseMatrix Layer::runInputs(const BlockSparseMatrix& m) const
{
	if(util::isLogEnabled("Layer"))
	{
		util::log("Layer") << " Running forward propagation on matrix (" << m.rows()
			<< " rows, " << m.columns() << " columns) through layer with dimensions ("
			<< blocks() << " blocks, "
			<< getInputCount() << " inputs, " << getOutputCount()
			<< " outputs, " << blockStep() << " block step).\n";
		util::log("Layer") << "  layer: " << m_sparseMatrix.shapeString() << "\n";
	}
	
	if(util::isLogEnabled("Layer::Detail"))
	{
		util::log("Layer::Detail") << "  input: " << m.debugString() << "\n";
		util::log("Layer::Detail") << "  layer: " << m_sparseMatrix.debugString() << "\n";
		util::log("Layer::Detail") << "  bias:  " << m_bias.debugString() << "\n";
	}
	
	auto unbiasedOutput = m.convolutionalMultiply(m_sparseMatrix, blockStep());
	auto output = unbiasedOutput.convolutionalAddBroadcastRow(m_bias);
	
	output.sigmoidSelf();
	
	if(util::isLogEnabled("Layer"))
	{
		util::log("Layer") << "  output: " << output.shapeString() << "\n";
	}
	
	if(util::isLogEnabled("Layer::Detail"))
	{
		util::log("Layer::Detail") << "  output: " << output.debugString() << "\n";
	}

	return output;
}
示例#5
0
std::string Matrix::debugString() const
{
    return shapeString() + "\n" + matrix::toString(*this, 16) + "\n";
}
void CTCDecoderLayer::runReverseImplementation(Bundle& bundle)
{
    auto& inputDeltaVector = bundle["inputDeltas"].get<matrix::MatrixVector>();
    auto& outputActivationWeightDeltas =
        bundle["outputActivationWeightDeltas"].get<matrix::Matrix>();

    auto outputActivationWeights = loadMatrix("outputActivationWeights");
    auto inputPaths = loadMatrix("inputPaths");

    auto inputActivations = loadMatrix("inputActivations");

    Matrix inputDeltas = zeros(inputActivations.size(), inputActivations.precision());

    matrix::ctcBeamSearchInputGradients(inputDeltas, outputActivationWeights,
        inputPaths, outputActivationWeightDeltas, inputActivations);

    if(util::isLogEnabled("CTCDecoderLayer::Detail"))
    {
        util::log("CTCDecoderLayer::Detail") << "  beam search input deltas: "
            << inputDeltas.debugString();
    }
    else
    {
        util::log("CTCDecoderLayer") << "  beam search input deltas size: "
            << inputDeltas.shapeString() << "\n";
    }

    if(_implementation->hasCostFunction())
    {
        auto inputTimesteps = _implementation->getInputTimesteps();
        auto inputLabels    = _implementation->getInputLabels();

        auto ctcInputDeltas = computeCtcInputDeltas(_implementation->getCostFunctionName(),
            _implementation->getCostFunctionWeight(),
            inputActivations, inputLabels, inputTimesteps);

        // TODO: see if it is possible to remove this line, it is only necessary because
        //       by CTC can't tell the difference between a beam and a minibatch
        size_t miniBatchSize = inputActivations.size()[1];

        apply(ctcInputDeltas, ctcInputDeltas, matrix::Multiply(miniBatchSize));

        if(util::isLogEnabled("CTCDecoderLayer::Detail"))
        {
            util::log("CTCDecoderLayer::Detail") << "  ctc input deltas: "
                << ctcInputDeltas.debugString();
        }
        else
        {
            util::log("CTCDecoderLayer") << "  ctc input deltas size: "
                << ctcInputDeltas.shapeString() << "\n";
        }

        apply(inputDeltas, inputDeltas, ctcInputDeltas, matrix::Add());

        if(util::isLogEnabled("CTCDecoderLayer::Detail"))
        {
            util::log("CTCDecoderLayer::Detail") << "  combined input deltas: "
                << inputDeltas.debugString();
        }
        else
        {
            util::log("CTCDecoderLayer") << "  combined input deltas size: "
                << inputDeltas.shapeString() << "\n";
        }
    }

    inputDeltaVector.push_back(inputDeltas);
}