示例#1
0
static int put(struct store *st, const void *buf, size_t len, struct addr *at)
{
    struct fstore *fst;
    struct addr pa;
    idx_t i, pi;
    struct idxent ie;
    loff_t leoff;
    int c;
    struct logent le;
    
    if(len > STORE_MAXBLSZ) {
	errno = E2BIG;
	return(-1);
    }

    fst = st->pdata;
    hash(buf, len, &pa);
    if(at != NULL)
	memcpy(at->hash, pa.hash, 32);
    
    if(lookup(fst, &pa, &pi) != -1)
	return(0);
    
    memcpy(le.magic, LOGENTMAGIC, 4);
    le.name = pa;
    le.len = len;
    le.fl = 0;
    /* XXX: Thread safety { */
    leoff = fst->logsize;
    fst->logsize += sizeof(le) + len;
    /* } */
    /* XXX: Handle data with embedded LOGENTMAGIC */
    writeall(fst->logfd, &le, sizeof(le), leoff);
    writeall(fst->logfd, buf, len, leoff + sizeof(le));

    i = newindex(fst);
    assert(!getidx(fst, i, &ie));
    ie.addr = pa;
    ie.off = leoff;
    assert(!putidx(fst, i, &ie));
    if(pi != -1) {
	assert(!getidx(fst, pi, &ie));
	c = addrcmp(&pa, &ie.addr);
	if(c < 0)
	    ie.l = i;
	else
	    ie.r = i;
	assert(!putidx(fst, pi, &ie));
    }
    
    return(0);
}
示例#2
0
  // Calls the same method of the superclass.
  bool svm::genericTrain(const dmatrix& input, const ivector& ids) {

    char buffer[80];

    if (validProgressObject()) {
      getProgressObject().reset();
      getProgressObject().setTitle("SVM: Training");
      getProgressObject().setMaxSteps(nClasses);
    }

    bias.resize(nClasses,getParameters().bias,false,true);
    trainData=new dmatrix(input);
    alpha.resize(nClasses,input.rows(),0,false,true);
    makeTargets(ids);
    errorCache.resize(input.rows());

    const parameters& param=getParameters();

    C=param.C;
    tolerance=param.tolerance;
    epsilon=param.epsilon;
    bool abort=false;

    // train one SVM for each class
    for (int cid=0; cid<nClasses && !abort; cid++) {
      int numChanged=0;
      bool examineAll=true;

      currentTarget=&target->getRow(cid);
      currentClass=cid;
      currentAlpha=&alpha.getRow(cid);

      _lti_debug("Training class " << cid << "\n");

      fillErrorCache();

      while ((numChanged > 0 || examineAll) && !abort) {
        numChanged=0;
        if (examineAll) {
          // iterate over all alphas
          for (int i=0; i<trainData->rows(); i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, look only at non-bound alphas
          examineAll=false;
        } else {
          // iterate over all non-0 and non-C alphas
          int *tmpAlpha=new int[alpha.getRow(cid).size()];
          int j=0,i=0;
          for (i=0; i<alpha.getRow(cid).size(); i++) {
            if (alpha.getRow(cid).at(i) != 0.0 &&
                alpha.getRow(cid).at(i) != C) {
              tmpAlpha[j++]=i;
            }
          }
          delete[] tmpAlpha;
          for (i=0; i<j; i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, examine all if we did not succeed this time
          if (numChanged == 0) {
            examineAll=true;
          }
        }
      }
      // update progress info object
      if (validProgressObject()) {
        sprintf(buffer,"numChanged=%d, error=%f",numChanged,errorSum);
        getProgressObject().step(buffer);
        abort=abort || getProgressObject().breakRequested();
      }

     // now limit the number of support vectors
      // does not work yet, so disable it
      if (0) {
        int supnum=0;
        ivector index(currentAlpha->size());
        ivector newindex(currentAlpha->size());
        dvector newkey(currentAlpha->size());
        for (int i=0; i<currentAlpha->size(); i++) {
          if (currentAlpha->at(i) > 0) {
            supnum++;
          }
          index[i]=i;
        }
        if (supnum > param.nSupport && param.nSupport > 0) {
          lti::sort2<double> sorter;
          sorter.apply(*currentAlpha,index,newkey,newindex);

          int i;
          for (i=0; i<newkey.size() &&
                 lti::abs(newkey[i]) > std::numeric_limits<double>::epsilon(); i++) {
          }
          for (int j=i; j<currentAlpha->size()-param.nSupport; j++) {
            currentAlpha->at(newindex[j])=0;
          }
          _lti_debug("Final alpha: " << *currentAlpha << std::endl);
        }
      }
    }

    defineOutputTemplate();

    _lti_debug("alpha:\n" << alpha << "\n");

    // make sure that all lagrange multipliers are larger than
    // zero, otherwise we might get into trouble later
    alpha.apply(rectify);

    if (abort) {
      setStatusString("Training aborted by user!");
    }
    return !abort;
  }