コード例 #1
0
int Network::Train(vector<long double> inputs,vector<long double> outputs){ //The standard Backprop Learning algorithm
    int i,j,k;
    double Target, Actual, Delta;
    SetInputs(inputs); //Set the inputs
    Update(); //Update all the values
    //SetOutputs(outputs); //Set the outputs
    for(i=net_tot_layers-1;i>0;i--){ //Go from last layer to first layer
        for(j=0;j<net_layers[i];j++) {//Go thru every neuron
            if(i==net_tot_layers-1){ //Output layer, Needs special atential
                Target=outputs[j]; //Target value
                Actual=Layers[i].Neurons[j].n_value; //Actual value
                Delta= (Target - Actual) * Actual * (1 - Actual); //Function to compute error
                Layers[i].Neurons[j].n_delta=Delta; //Compute the delta
                for(k=0;k<net_layers[i-1];k++) {
                    Layers[i-1].Neurons[k].Dendrites[j].d_weight += Delta*net_learning_rate*Layers[i-1].Neurons[k].n_value; //Calculate the new weights
                }

                Layers[i].Neurons[j].n_bias = Layers[i].Neurons[j].n_bias + Delta*net_learning_rate*1; //n_value is always 1 for bias
            } else { //Here
                //Target value
                Actual=Layers[i].Neurons[j].n_value; //valor actual de la neurona
                Delta=  Actual * (1 - Actual)* SigmaWeightDelta(i,j); //Function para calcular el  error sumatoria de pesos y el anterior delta
                for(k=0;k<net_layers[i-1];k++){
                    Layers[i-1].Neurons[k].Dendrites[j].d_weight += Delta*net_learning_rate*Layers[i-1].Neurons[k].n_value; //Calculate the new weights
                }
                if(i!=0) //Input layer does not have a bias
                    Layers[i].Neurons[j].n_bias = Layers[i].Neurons[j].n_bias + Delta*net_learning_rate*1; //n_value is always 1 for bias
            }
        }
    } return 0;
}
コード例 #2
0
ファイル: metamodel.cpp プロジェクト: EISALab/AMGAgroundwater
void CRegionalNeuralModel::SetLayerNodes( int* prLayerNodes, int nLayers )
{
	ASSERT( nLayers>=3 );
	m_vcLayerNodes.assign( prLayerNodes, prLayerNodes+nLayers );

	SetInputs( m_vcLayerNodes[0] );
	SetOutputs( m_vcLayerNodes[nLayers-1] );
}
コード例 #3
0
    JackPortAudioAdapter::JackPortAudioAdapter ( jack_nframes_t buffer_size, jack_nframes_t sample_rate, const JSList* params )
            : JackAudioAdapterInterface ( buffer_size, sample_rate )
    {
        jack_log ( "JackPortAudioAdapter::JackPortAudioAdapter buffer_size = %d, sample_rate = %d", buffer_size, sample_rate );

        const JSList* node;
        const jack_driver_param_t* param;
        int in_max = 0;
        int out_max = 0;

        fInputDevice = Pa_GetDefaultInputDevice();
        fOutputDevice = Pa_GetDefaultOutputDevice();

        for (node = params; node; node = jack_slist_next(node))
        {
            param = (const jack_driver_param_t*) node->data;

            switch (param->character)
            {
            case 'i' :
                fCaptureChannels = param->value.ui;
                break;
            case 'o' :
                fPlaybackChannels = param->value.ui;
                break;
            case 'C' :
                if ( fPaDevices.GetInputDeviceFromName(param->value.str, fInputDevice, in_max) < 0 )
                {
                    jack_error ( "Can't use %s, taking default input device", param->value.str );
                    fInputDevice = Pa_GetDefaultInputDevice();
                }
                break;
            case 'P' :
                if ( fPaDevices.GetOutputDeviceFromName(param->value.str, fOutputDevice, out_max) < 0 )
                {
                    jack_error ( "Can't use %s, taking default output device", param->value.str );
                    fOutputDevice = Pa_GetDefaultOutputDevice();
                }
                break;
            case 'r' :
                SetAdaptedSampleRate ( param->value.ui );
                break;
            case 'p' :
                SetAdaptedBufferSize ( param->value.ui );
                break;
            case 'd' :
                if ( fPaDevices.GetInputDeviceFromName ( param->value.str, fInputDevice, in_max ) < 0 )
                    jack_error ( "Can't use %s, taking default input device", param->value.str );
                if ( fPaDevices.GetOutputDeviceFromName ( param->value.str, fOutputDevice, out_max ) < 0 )
                    jack_error ( "Can't use %s, taking default output device", param->value.str );
                break;
            case 'l' :
                fPaDevices.DisplayDevicesNames();
                break;
            case 'q':
                fQuality = param->value.ui;
                break;
            case 'g':
                fRingbufferCurSize = param->value.ui;
                fAdaptative = false;
                break;
            }
        }

        //max channels
        if ( in_max == 0 )
            in_max = fPaDevices.GetDeviceInfo ( fInputDevice )->maxInputChannels;
        if ( out_max == 0 )
            out_max = fPaDevices.GetDeviceInfo ( fOutputDevice )->maxOutputChannels;

        //effective channels
        if ( ( fCaptureChannels == 0 ) || ( fCaptureChannels > in_max ) )
            fCaptureChannels = in_max;
        if ( ( fPlaybackChannels == 0 ) || ( fPlaybackChannels > out_max ) )
            fPlaybackChannels = out_max;

        //set adapter interface channels
        SetInputs ( fCaptureChannels );
        SetOutputs ( fPlaybackChannels );
    }
コード例 #4
0
static void TestModeTransMsg()
{
  Setup();
  SetInputs();
  m_NavigationDataToIGTLMessageFilter->SetOperationMode(
    mitk::NavigationDataToIGTLMessageFilter::ModeSendTransMsg);

  //Process
  mitk::IGTLMessage::Pointer msg0 = m_NavigationDataToIGTLMessageFilter->GetOutput();
  mitk::IGTLMessage::Pointer msg1 = m_NavigationDataToIGTLMessageFilter->GetOutput(1);
  mitk::IGTLMessage::Pointer msg2 = m_NavigationDataToIGTLMessageFilter->GetOutput(2);
  mitk::IGTLMessage::Pointer msg3 = m_NavigationDataToIGTLMessageFilter->GetOutput(3);

  msg0->Update();

  igtl::TransformMessage::Pointer igtlMsg0 =
    dynamic_cast<igtl::TransformMessage*>(msg0->GetMessage().GetPointer());
  igtl::TransformMessage::Pointer igtlMsg3 =
    dynamic_cast<igtl::TransformMessage*>(msg3->GetMessage().GetPointer());

  MITK_TEST_OUTPUT(<< "Testing the converted OpenIGTLink messages:");
  MITK_TEST_CONDITION(igtlMsg0.IsNotNull(), "Message0 is not null?");
  MITK_TEST_CONDITION(igtlMsg3.IsNotNull(), "Message3 is not null?");

  //Convert the data from the igtl message back to mitk types
  mitk::AffineTransform3D::Pointer affineTransformation0 =
    mitk::AffineTransform3D::New();
  igtl::Matrix4x4 transformation0_;
  mitk::Matrix3D  transformation0;
  mitk::Vector3D  offset0;
  igtlMsg0->GetMatrix(transformation0_);
  for (unsigned int r = 0; r < 3; r++)
  {
    for (unsigned int c = 0; c < 3; c++)
    {
      transformation0.GetVnlMatrix().set(r, c, transformation0_[r][c]);
    }
    offset0.SetElement(r, transformation0_[r][3]);
  }
  //convert the igtl matrix here and set it in the affine transformation
  affineTransformation0->SetMatrix(transformation0);
  affineTransformation0->SetOffset(offset0);
  //the easiest way to convert the affine transform to position and quaternion
  mitk::NavigationData::Pointer nd0 =
    mitk::NavigationData::New(affineTransformation0, true);

  mitk::AffineTransform3D::Pointer affineTransformation3 =
    mitk::AffineTransform3D::New();
  igtl::Matrix4x4 transformation3_;
  mitk::Matrix3D  transformation3;
  mitk::Vector3D  offset3;
  igtlMsg3->GetMatrix(transformation3_);
  for (unsigned int r = 0; r < 3; r++)
  {
    for (unsigned int c = 0; c < 3; c++)
    {
      transformation3.GetVnlMatrix().set(r, c, transformation3_[r][c]);
    }
    offset3.SetElement(r, transformation3_[r][3]);
  }
  //convert the igtl matrix here and set it in the affine transformation
  affineTransformation3->SetMatrix(transformation3);
  affineTransformation3->SetOffset(offset3);
  //the easiest way to convert the affine transform to position and quaternion
  mitk::NavigationData::Pointer nd3 =
    mitk::NavigationData::New(affineTransformation3, true);

  MITK_TEST_OUTPUT(<< "Testing the conversion of navigation data object to Trans OpenIGTLink messages:");
  MITK_TEST_CONDITION(mitk::Equal(nd0->GetPosition(), m_NavigationDataToIGTLMessageFilter->GetInput(0)->GetPosition()), "Position0 correct?");
  MITK_TEST_CONDITION(mitk::Equal(nd3->GetPosition(), m_NavigationDataToIGTLMessageFilter->GetInput(3)->GetPosition()), "Position3 correct?");
  MITK_TEST_CONDITION(mitk::Equal(nd0->GetOrientation(), m_NavigationDataToIGTLMessageFilter->GetInput(0)->GetOrientation()), "Orientation0 correct?");
  MITK_TEST_CONDITION(mitk::Equal(nd3->GetOrientation(), m_NavigationDataToIGTLMessageFilter->GetInput(3)->GetOrientation()), "Orientation3 correct?");
}
コード例 #5
0
static void TestModeQTransMsg()
{
  Setup();
  SetInputs();
  m_NavigationDataToIGTLMessageFilter->SetOperationMode(
    mitk::NavigationDataToIGTLMessageFilter::ModeSendQTransMsg);

  //Process
  mitk::IGTLMessage::Pointer msg0 = m_NavigationDataToIGTLMessageFilter->GetOutput();
  mitk::IGTLMessage::Pointer msg1 = m_NavigationDataToIGTLMessageFilter->GetOutput(1);
  mitk::IGTLMessage::Pointer msg2 = m_NavigationDataToIGTLMessageFilter->GetOutput(2);
  mitk::IGTLMessage::Pointer msg3 = m_NavigationDataToIGTLMessageFilter->GetOutput(3);

  MITK_INFO << "In: " << msg0->GetSource()->GetNumberOfIndexedInputs() << " Out: " << msg0->GetSource()->GetNumberOfIndexedOutputs();
  MITK_INFO << msg0->GetMessage().GetPointer();

  msg0->Update();

  igtl::PositionMessage::Pointer igtlMsg0 =
    dynamic_cast<igtl::PositionMessage*>(msg0->GetMessage().GetPointer());
  igtl::PositionMessage::Pointer igtlMsg3 =
    dynamic_cast<igtl::PositionMessage*>(msg3->GetMessage().GetPointer());

  MITK_TEST_OUTPUT(<< "Testing the converted OpenIGTLink messages:");
  MITK_TEST_CONDITION(igtlMsg0.IsNotNull(), "Message0 is not null?");
  MITK_TEST_CONDITION(igtlMsg3.IsNotNull(), "Message3 is not null?");

  //Convert the data from the igtl message back to mitk types
  float pos0_[3];
  float orientation0_[4];
  igtlMsg0->GetPosition(pos0_);
  igtlMsg0->GetQuaternion(orientation0_);
  mitk::NavigationData::PositionType pos0;
  pos0[0] = pos0_[0];
  pos0[1] = pos0_[1];
  pos0[2] = pos0_[2];
  mitk::NavigationData::OrientationType orientation0;
  orientation0[0] = orientation0_[0];
  orientation0[1] = orientation0_[1];
  orientation0[2] = orientation0_[2];
  orientation0[3] = orientation0_[3];
  float pos3_[3];
  float orientation3_[4];
  igtlMsg3->GetPosition(pos3_);
  igtlMsg3->GetQuaternion(orientation3_);
  mitk::NavigationData::PositionType pos3;
  pos3[0] = pos3_[0];
  pos3[1] = pos3_[1];
  pos3[2] = pos3_[2];
  mitk::NavigationData::OrientationType orientation3;
  orientation3[0] = orientation3_[0];
  orientation3[1] = orientation3_[1];
  orientation3[2] = orientation3_[2];
  orientation3[3] = orientation3_[3];

  MITK_TEST_OUTPUT(<< "Testing the conversion of navigation data object to QTrans OpenIGTLink messages:");
  MITK_TEST_CONDITION(mitk::Equal(pos0, m_NavigationDataToIGTLMessageFilter->GetInput(0)->GetPosition()), "Position0 correct?");
  MITK_TEST_CONDITION(mitk::Equal(pos3, m_NavigationDataToIGTLMessageFilter->GetInput(3)->GetPosition()), "Position3 correct?");
  MITK_TEST_CONDITION(mitk::Equal(orientation0, m_NavigationDataToIGTLMessageFilter->GetInput(0)->GetOrientation()), "Orientation0 correct?");
  MITK_TEST_CONDITION(mitk::Equal(orientation3, m_NavigationDataToIGTLMessageFilter->GetInput(3)->GetOrientation()), "Orientation3 correct?");
}
コード例 #6
0
ファイル: JackNetAdapter.cpp プロジェクト: antkazam/jack2
    JackNetAdapter::JackNetAdapter(jack_client_t* jack_client, jack_nframes_t buffer_size, jack_nframes_t sample_rate, const JSList* params)
            : JackAudioAdapterInterface(buffer_size, sample_rate), JackNetSlaveInterface(), fThread(this)
    {
        jack_log("JackNetAdapter::JackNetAdapter");

        /*
        Global parameter setting : we can't call JackNetSlaveInterface constructor with some parameters before,
        because we don't have full parametering right now, parameters will be parsed from the param list,
        and then JackNetSlaveInterface will be filled with proper values.
        */
        char multicast_ip[32];
        uint udp_port;
        GetHostName(fParams.fName, JACK_CLIENT_NAME_SIZE);
        fSocket.GetName(fParams.fSlaveNetName);
        fParams.fMtu = DEFAULT_MTU;
        // Desactivated for now...
        fParams.fTransportSync = 0;
        int send_audio = -1;
        int return_audio = -1;
        fParams.fSendMidiChannels = 0;
        fParams.fReturnMidiChannels = 0;
        fParams.fSampleRate = sample_rate;
        fParams.fPeriodSize = buffer_size;
        fParams.fSlaveSyncMode = 1;
        fParams.fNetworkLatency = 2;
        fParams.fSampleEncoder = JackFloatEncoder;
        fClient = jack_client;
    
        // Possibly use env variable
        const char* default_udp_port = getenv("JACK_NETJACK_PORT");
        udp_port = (default_udp_port) ? atoi(default_udp_port) : DEFAULT_PORT;

        const char* default_multicast_ip = getenv("JACK_NETJACK_MULTICAST");
        if (default_multicast_ip) {
            strcpy(multicast_ip, default_multicast_ip);
        } else {
            strcpy(multicast_ip, DEFAULT_MULTICAST_IP);
        }

        //options parsing
        const JSList* node;
        const jack_driver_param_t* param;
        for (node = params; node; node = jack_slist_next(node))
        {
            param = (const jack_driver_param_t*) node->data;

            switch (param->character) {
                case 'a' :
                    assert(strlen(param->value.str) < 32);
                    strcpy(multicast_ip, param->value.str);
                    break;
                case 'p' :
                    udp_port = param->value.ui;
                    break;
                case 'M' :
                    fParams.fMtu = param->value.i;
                    break;
                case 'C' :
                    send_audio = param->value.i;
                    break;
                case 'P' :
                    return_audio = param->value.i;
                    break;
                case 'n' :
                    strncpy(fParams.fName, param->value.str, JACK_CLIENT_NAME_SIZE);
                    break;
                case 't' :
                    fParams.fTransportSync = param->value.ui;
                    break;
            #if HAVE_CELT
                case 'c':
                    if (param->value.i > 0) {
                        fParams.fSampleEncoder = JackCeltEncoder;
                        fParams.fKBps = param->value.i;
                    }
                    break;
            #endif
            #if HAVE_OPUS
                case 'O':
                    if (param->value.i > 0) {
                        fParams.fSampleEncoder = JackOpusEncoder;
                        fParams.fKBps = param->value.i;
                    }
                    break;
            #endif
                case 'l' :
                    fParams.fNetworkLatency = param->value.i;
                    if (fParams.fNetworkLatency > NETWORK_MAX_LATENCY) {
                        jack_error("Error : network latency is limited to %d\n", NETWORK_MAX_LATENCY);
                        throw std::bad_alloc();
                    }
                    break;
                case 'q':
                    fQuality = param->value.ui;
                    break;
                case 'g':
                    fRingbufferCurSize = param->value.ui;
                    fAdaptative = false;
                    break;
             }
        }

        strcpy(fMulticastIP, multicast_ip);

        // Set the socket parameters
        fSocket.SetPort(udp_port);
        fSocket.SetAddress(fMulticastIP, udp_port);

        // If not set, takes default
        fParams.fSendAudioChannels = (send_audio == -1) ? 2 : send_audio;

        // If not set, takes default
        fParams.fReturnAudioChannels = (return_audio == -1) ? 2 : return_audio;

        // Set the audio adapter interface channel values
        SetInputs(fParams.fSendAudioChannels);
        SetOutputs(fParams.fReturnAudioChannels);

        // Soft buffers will be allocated later (once network initialization done)
        fSoftCaptureBuffer = NULL;
        fSoftPlaybackBuffer = NULL;
    }