Esempio n. 1
0
/*****************************************************************************
  FUNCTION : propagateTDNetBackward

  PURPOSE  : Time Delay Backward error propagation (topological).
  NOTES    : Start calculating the average of the corresponding links in 
             all TD-steps. This average is used to update the links of the 
	     1st. receptive field.
  RETURNS  : network error
  UPDATE   : 19.02.1993
******************************************************************************/
	float SnnsCLib::propagateTDNetBackward(int pattern_no, int sub_pat_no,
				     float learn_parameter, 
				     float delta_max)
{ 
    register struct Link   *link_ptr;
    register struct Site   *site_ptr;
    register struct Unit   *unit_ptr, *unit_ptr1 ;
    register struct Unit   *ref_unit;
    register Patterns      out_pat;
    register float         error,  sum_error, eta, devit, learn_error;
    register TopoPtrArray  topo_ptr;
    int                    i; //last_log_layer,
    int                    size;

    sum_error = 0.0;		/*  reset network error  */
    eta = learn_parameter;	/*  store learn_parameter in CPU register  */

    /*  calculate address of the output pattern (with number pattern_no + 1) */

    topo_ptr = topo_ptr_array + (no_of_topo_units + 2);
    //last_log_layer = (*topo_ptr)->lln;
    out_pat = kr_getSubPatData(pattern_no,sub_pat_no,OUTPUT,&size);
    out_pat += size;

    /* calculate output units only: begin at the end of topo_pointer_array */
    unit_ptr = *topo_ptr;
    while (unit_ptr != (struct Unit *) NULL){
	devit = *(--out_pat) - unit_ptr->Out.output; /*  calc. devitation */

	if ( (float) fabs( devit ) <= delta_max ){
	    unit_ptr = *--topo_ptr;
	    continue;
	}

	sum_error += devit * devit; /*  sum up the error of the network  */

	/* calculate error for output units	 */
	/* output layer cannot have time delay structure, so no 
	   distinction is necessary*/
	error = devit * (this->*unit_ptr->act_deriv_func) ( unit_ptr ); 

	/* calc. the error for adjusting weights and bias of pred. units  */
	learn_error = eta * error; 

	/* adjust bias value  */
	unit_ptr->value_b += learn_error;
	unit_ptr->value_c += 1.0;

	if (UNIT_HAS_DIRECT_INPUTS( unit_ptr )){ /* the unit has direkt links */
	    /* error must be saved for each unit of the hiddenlayer */
	    FOR_ALL_LINKS( unit_ptr, link_ptr ){
		/* adjust link weights and calc. sum of errors of pred. units*/
		link_ptr->to->Aux.flint_no += link_ptr->weight * error;
		link_ptr->value_b += learn_error * link_ptr->to->Out.output;
		link_ptr->value_c += 1.0;
	    }
	}else{ /* the unit has sites: not necessary for TD-Network  */
Esempio n. 2
0
/*****************************************************************************
  FUNCTION : cc_getErr
  PURPOSE  : get sum of squared errors (sse) = (o_actual - y_desired)^2
  NOTES    :

  UPDATE   : 19.01.96
******************************************************************************/
float cc_getErr (int StartPattern, int EndPattern)
{
    int p=0, sub, start, end, n,  pat, dummy;
    float sse=0, devit,error;
    register Patterns out_pat;
    register struct Unit *OutputUnitPtr;
    int Correct;
    int WhichWin,CorrWin;
    float MaxAct;

    KernelErrorCode = kr_initSubPatternOrder(StartPattern,EndPattern);
    ERROR_CHECK;
    cc_getPatternParameter(StartPattern,EndPattern,&start,&end,&n);
    ERROR_CHECK;
    SumSqError = 0.0;

    for(p=start; p<=end;p++){
	Correct=TRUE;
	MaxAct=0.0;
	cc_getActivationsForActualPattern(p,start,&pat,&sub);
	PROPAGATE_THROUGH_OUTPUT_LAYER(OutputUnitPtr,dummy,p);

	out_pat = kr_getSubPatData(pat,sub,OUTPUT,NULL);

	FOR_ALL_OUTPUT_UNITS(OutputUnitPtr,dummy){
	    if (*out_pat > 0.5) CorrWin = dummy;
	    devit =  OutputUnitPtr->Out.output - *(out_pat++);
	    if  (OutputUnitPtr->Out.output > MaxAct)
	    {
		MaxAct=OutputUnitPtr->Out.output;
		WhichWin=dummy;
	    }
	    if (abs(devit) > 0.2) Correct=FALSE;
	    sse += devit*devit;
	    error = devit * 
		(((OutputUnitPtr->act_deriv_func == ACT_DERIV_Custom_Python) ? 
			kr_PythonActFunction(OutputUnitPtr->python_act_deriv_func,
						OutputUnitPtr) :
			(OutputUnitPtr->act_deriv_func) (OutputUnitPtr))  + cc_fse);
	    SumSqError += error*error;
	}
    }
    cc_actualNetSaved=TRUE;
    return sse;
}
Esempio n. 3
0
/*****************************************************************************
  FUNCTION : cc_getErr
  PURPOSE  : get sum of squared errors (sse) = (o_actual - y_desired)^2
  NOTES    :

  UPDATE   : 19.01.96
******************************************************************************/
float SnnsCLib::cc_getErr (int StartPattern, int EndPattern)
{
    int p=0, sub, start, end, n,  pat, dummy;
    float sse=0, devit,error;
    register Patterns out_pat;
    register struct Unit *OutputUnitPtr;
    //int Correct;
    //int WhichWin,CorrWin;
    float MaxAct;

    KernelErrorCode = kr_initSubPatternOrder(StartPattern,EndPattern);
    ERROR_CHECK;
    cc_getPatternParameter(StartPattern,EndPattern,&start,&end,&n);
    ERROR_CHECK;
    SumSqError = 0.0;

    for(p=start; p<=end;p++){
	//Correct=TRUE;
	MaxAct=0.0;
	cc_getActivationsForActualPattern(p,start,&pat,&sub);
	PROPAGATE_THROUGH_OUTPUT_LAYER(OutputUnitPtr,dummy,p);

	out_pat = kr_getSubPatData(pat,sub,OUTPUT,NULL);

	FOR_ALL_OUTPUT_UNITS(OutputUnitPtr,dummy){
	    //if (*out_pat > 0.5) CorrWin = dummy;
	    devit =  OutputUnitPtr->Out.output - *(out_pat++);
	    if  (OutputUnitPtr->Out.output > MaxAct)
	    {
		MaxAct=OutputUnitPtr->Out.output;
		//WhichWin=dummy;
	    }
	    //if (fabs(devit) > 0.2) Correct=FALSE;
	    sse += devit*devit;
	    error = devit * 
		((this->*OutputUnitPtr->act_deriv_func)(OutputUnitPtr) + cc_fse);
	    SumSqError += error*error;
	}
    }
Esempio n. 4
0
/*****************************************************************************
  FUNCTION : propagateTDNetForward

  PURPOSE  : topological TimeDelay forward propagation
  NOTES    : needs only the weight matrix of one receptive field for 
	     propagating one pattern through the net
	     If the provided pattern_no is < 0, no pattern is loaded into
             the input layer but all other layers are propagated as usual
  RETURNS  :
  UPDATE   : 19.02.1993
******************************************************************************/
void  SnnsCLib::propagateTDNetForward(int pattern_no, int sub_pat_no)
{ 
  register struct Unit    *unit_ptr;
  register struct Link    *link_ptr;
  register Patterns       in_pat;
  register TopoPtrArray   topo_ptr;
  int                     i;

  if (pattern_no >= 0){
      /*  calculate startaddress for input pattern array  */

      in_pat = kr_getSubPatData(pattern_no,sub_pat_no,INPUT,NULL);
      topo_ptr = topo_ptr_array;

      /* copy pattern into input unit's activation and calculate output of the 
	 input units. */
      /* order of the topoptrarray: input-, hidden- and then outputunits */    

      unit_ptr = *++topo_ptr;
      while (unit_ptr != (struct Unit *) NULL){
	  /*  topo_ptr points to a (topological sorted) unit stucture 
	      (input units first)  */

	  if (unit_ptr->out_func == OUT_IDENTITY){
	      /*  identity output function: don't call the output function  */
	      unit_ptr->Out.output = unit_ptr->act = *in_pat++;
	  }else{
	      /*  no identity output function: calculate unit's output also  */
	      unit_ptr->Out.output = 
		  (this->*unit_ptr->out_func) (unit_ptr->act = *in_pat++);
	  } /*if*/
	  unit_ptr = *++topo_ptr;
      }
  }else{
      /* set unit_ptr and topo_ptr as if an input pattern was provided */
      topo_ptr = topo_ptr_array;
      unit_ptr = *++topo_ptr;
      while (unit_ptr != (struct Unit *) NULL)
      {
	  unit_ptr = *++topo_ptr;
      }
  }
      
  /* Propagate input to hidden, hidden to hidden and hidden to output */

  for (i=0; i<2; i++){
      unit_ptr = *++topo_ptr;
      while (unit_ptr != NULL){
	  /*  initialization for propagating hidden units  */
	  /*  clear error values */
	  unit_ptr->Aux.flint_no = 0.0;

	  if (UNIT_HAS_DIRECT_INPUTS(unit_ptr)){
	      /* this is a reference unit, initialize link weight change */
	      /* and counter of link usage */
	      FOR_ALL_LINKS(unit_ptr, link_ptr){
		  link_ptr->value_b = link_ptr->value_c = 0.0;
	      }
	  }

	  /* reset bias-change and td-step-counter before each lerning epoch */
	  unit_ptr->value_b = unit_ptr->value_c = 0.0;

	  unit_ptr->act = (this->*unit_ptr->act_func) (unit_ptr);
	  if (unit_ptr->out_func == OUT_IDENTITY){
	      /*  identity output function: don't call the output function  */
	      unit_ptr->Out.output = unit_ptr->act;
	  }else{
	      /*  no identity output function: calculate unit's output also  */
	      unit_ptr->Out.output = (this->*unit_ptr->out_func) (unit_ptr->act);
	  }      
	  unit_ptr = *++topo_ptr;
      }
  }
} /*endfunction*/