void FB_CTUD::executeEvent(AppBlocInfo pa_stAppBlocInfo, EVENT_UID pa_unEIID) {

	if (m_unEventREQID == pa_unEIID) {
		if (true == R()) {
			CV() = 0;
		}
		else {
			if (true == LD()) {
				CV() = PV();
			}
			else {
				if (!(CU() && CD())) {
					if ((CU() && (CV() < CIEC_DT_INT::scm_nMaxVal))) {
						CV() = static_cast<FzrteInt16>(CV() + 1);
					}
					else {
						if ((CD() && (CV() > CIEC_DT_INT::scm_nMinVal))) {
							CV() = static_cast<FzrteInt16>(CV() - 1);
						}
					}
				}
			}
		}
		QU() = (CV() >= PV());
		QD() = (CV() <= 0);

		SendOutput(m_unEventCNFID);
	}
}
void FB_CTUD_DINT::executeEvent(int pa_nEIID) {
  if (pa_nEIID == scm_nEventREQID) {
    if (true == R()) {
      CV() = 0;
    }
    else {
      if (true == LD()) {
        CV() = PV();
      }
      else {
        if (!(CU() && CD())) {
          if ((CU() && (CV() < CIEC_DINT::scm_nMaxVal))) {
            CV() = CV() + 1;
          }
          else {
            if ((CD() && (CV() > CIEC_DINT::scm_nMinVal))) {
              CV() = CV() - 1;
            }
          }
        }
      }
    }
    QU() = (CV() >= PV());
    QD() = (CV() <= 0);
    sendOutputEvent(scm_nEventCNFID);
  }
}
Example #3
0
bool svm_train(size_t l,
               size_t n,
               const float *y,
               const FeatureNode **x,
               double C,
               double *w) {
  size_t active_size = l;
  double PGmax_old = kINF;
  double PGmin_old = -kINF;
  std::vector<double> QD(l);
  std::vector<size_t> index(l);
  std::vector<double> alpha(l);

  std::fill(w, w + n, 0.0);
  std::fill(alpha.begin(), alpha.end(), 0.0);

  for (size_t i = 0; i < l; ++i) {
    index[i] = i;
    QD[i] = 0;
    for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
      QD[i] += (f->value * f->value);
    }
  }

  static const size_t kMaxIteration = 2000;
  for (size_t iter = 0; iter < kMaxIteration; ++iter) {
    double PGmax_new = -kINF;
    double PGmin_new = kINF;
    std::random_shuffle(index.begin(), index.begin() + active_size);

    for (size_t s = 0; s < active_size; ++s) {
      const size_t i = index[s];
      double G = 0;

      for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
        G += w[f->index] * f->value;
      }

      G = G * y[i] - 1;
      double PG = 0.0;

      if (alpha[i] == 0.0) {
        if (G > PGmax_old) {
          active_size--;
          std::swap(index[s], index[active_size]);
          s--;
          continue;
        } else if (G < 0.0) {
          PG = G;
        }
      } else if (alpha[i] == C) {
        if (G < PGmin_old) {
          active_size--;
          std::swap(index[s], index[active_size]);
          s--;
          continue;
        } else if (G > 0.0) {
          PG = G;
        }
      } else {
        PG = G;
      }

      PGmax_new = std::max(PGmax_new, PG);
      PGmin_new = std::min(PGmin_new, PG);

      if (std::abs(PG) > 1.0e-12) {
        const double alpha_old = alpha[i];
        alpha[i] = std::min(std::max(alpha[i] - G/QD[i], 0.0), C);
        const double d = (alpha[i] - alpha_old)* y[i];
        for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
          w[f->index] += d * f->value;
        }
      }
    }

    if (iter % 4 == 0) {
      std::cout << "." << std::flush;
    }

    if ((PGmax_new - PGmin_new) <= kEPS) {
      if (active_size == l) {
        break;
      } else {
        active_size = l;
        PGmax_old = kINF;
        PGmin_old = -kINF;
        continue;
      }
    }

    PGmax_old = PGmax_new;
    PGmin_old = PGmin_new;
    if (PGmax_old <= 0) {
      PGmax_old = kINF;
    }
    if (PGmin_old >= 0) {
      PGmin_old = -kINF;
    }
  }

  std::cout << std::endl;

  return true;
}
Example #4
0
bool svm_train(size_t l,
               size_t n,
               const float *y,
               const FeatureNode **x,
               double C,
               double *w) {
  //l: y.size number of characters to train
  //n: w.size
  //*y: vector of +1.0 and -1.0
  //**x: vector of features
  //C: 1.0 <- regularization parameter 
  //*w: empty vector of length n
  size_t active_size = l;
  double PGmax_old = kINF;
  double PGmin_old = -kINF;
  std::vector<double> QD(l);
  std::vector<size_t> index(l);
  std::vector<double> alpha(l);

  //length n is max_dim + 1, set in Trainer::add(character)
  std::fill(w, w + n, 0.0);
  std::fill(alpha.begin(), alpha.end(), 0.0);

  //Newton-Raphson method is used below to determine alpha with the inequality constraint:
  // 0 <= alpha <= C

  //QD = x'x
  //dotproduct of feature vector with itself which serves probably as a constant representing the 2. partial derivative of the Lagrangian Term?
  for (size_t i = 0; i < l; ++i) {
    index[i] = i;
    QD[i] = 0;
    for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
      QD[i] += (f->value * f->value);
    }
  }

  static const size_t kMaxIteration = 2000;
  for (size_t iter = 0; iter < kMaxIteration; ++iter) {
    double PGmax_new = -kINF;
    double PGmin_new = kINF;
    //random shuffle index from 0 to l
    //rearrange the elements in the index
    //what is the purpose of this?
    std::random_shuffle(index.begin(), index.begin() + active_size);

    //for each character represented by i
    //calculate the coefficient alpha according to the Newton-Raphson method:
    //alpha_new = alpha_old - L(alpha)'/L(alpha)''
    //and the corresponding w = sum(alpha_i y_i x_i)
    for (size_t s = 0; s < active_size; ++s) {
      const size_t i = index[s];
      double G = 0;

      std::cout << "Index" << i << "\n";
      for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
        G += w[f->index] * f->value;
      }

      G = G * y[i] - 1;
      double PG = 0.0;

      if (alpha[i] == 0.0) {
        if (G > PGmax_old) {
          active_size--;
          std::swap(index[s], index[active_size]);
          s--;
          continue;
        } else if (G < 0.0) {
          PG = G;
        }
      } else if (alpha[i] == C) {
        if (G < PGmin_old) {
          active_size--;
          std::swap(index[s], index[active_size]);
          s--;
          continue;
        } else if (G > 0.0) {
          PG = G;
        }
      } else {
        PG = G;
      } 

      PGmax_new = std::max(PGmax_new, PG);
      PGmin_new = std::min(PGmin_new, PG);

      if (std::abs(PG) > 1.0e-12) {
        const double alpha_old = alpha[i];
        //the constraint 0 <= alpha <= C
        //alpha_new = alpha_old - G/QD
        alpha[i] = std::min(std::max(alpha[i] - G/QD[i], 0.0), C);

        //d = G/QD * y[i]
        //why subtract alpha_old?
        const double d = (alpha[i] - alpha_old)* y[i];
        for (const FeatureNode *f = x[i]; f->index >= 0; ++f) {
          w[f->index] += d * f->value;
        }
      }
      
    }

    if (iter % 4 == 0) {
      std::cout << "." << std::flush;
    }

    if ((PGmax_new - PGmin_new) <= kEPS) {
      if (active_size == l) {
        break;
      } else {
        active_size = l;
        PGmax_old = kINF;
        PGmin_old = -kINF;
        continue;
      }
    }

    PGmax_old = PGmax_new;
    PGmin_old = PGmin_new;
    if (PGmax_old <= 0) {
      PGmax_old = kINF;
    }
    if (PGmin_old >= 0) {
      PGmin_old = -kINF;
    }
  }

  std::cout << std::endl;

  return true;
}