Exemple #1
0
/**
 * This should allocate memory for sf_inode_info, compute a unique inode
 * number, get an inode from vfs, initialize inode info, instantiate
 * dentry.
 *
 * @param parent        inode entry of the directory
 * @param dentry        directory cache entry
 * @param path          path name
 * @param info          file information
 * @param handle        handle
 * @returns 0 on success, Linux error code otherwise
 */
static int sf_instantiate(struct inode *parent, struct dentry *dentry,
                          SHFLSTRING *path, PSHFLFSOBJINFO info, SHFLHANDLE handle)
{
    int err;
    ino_t ino;
    struct inode *inode;
    struct sf_inode_info *sf_new_i;
    struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);

    TRACE();
    BUG_ON(!sf_g);

    sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
    if (!sf_new_i)
    {
        LogRelFunc(("could not allocate inode info.\n"));
        err = -ENOMEM;
        goto fail0;
    }

    ino = iunique(parent->i_sb, 1);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
    inode = iget_locked(parent->i_sb, ino);
#else
    inode = iget(parent->i_sb, ino);
#endif
    if (!inode)
    {
        LogFunc(("iget failed\n"));
        err = -ENOMEM;
        goto fail1;
    }

    sf_init_inode(sf_g, inode, info);
    sf_new_i->path = path;
    SET_INODE_INFO(inode, sf_new_i);
    sf_new_i->force_restat = 1;
    sf_new_i->force_reread = 0;

    d_instantiate(dentry, inode);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
    unlock_new_inode(inode);
#endif

    /* Store this handle if we leave the handle open. */
    sf_new_i->handle = handle;
    return 0;

fail1:
    kfree(sf_new_i);

fail0:
    return err;

}
Exemple #2
0
static ino_t
HgfsGetFileInode(HgfsAttrInfo const *attr,     // IN: Attrs to use
                 struct super_block *sb)       // IN: Superblock of this fs
{
   ino_t inodeEntry;

   ASSERT(attr);
   ASSERT(sb);

   if (attr->mask & HGFS_ATTR_VALID_FILEID) {
      inodeEntry = attr->hostFileId;
   } else {
      inodeEntry = iunique(sb, HGFS_RESERVED_INO);
   }

   LOG(4, (KERN_DEBUG "VMware hgfs: %s: return %lu\n", __func__, inodeEntry));
   return inodeEntry;
}
/* Connect a wrapfs inode dentry/inode with several lower ones.  This is
 * the classic stackable file system "vnode interposition" action.
 *
 * @dentry: wrapfs's dentry which interposes on lower one
 * @sb: wrapfs's super_block
 * @lower_path: the lower path (caller does path_get/put)
 */
int wrapfs_interpose(struct dentry *dentry, struct super_block *sb,
			struct path *lower_path)
{
	int err = 0;
	struct inode *inode;

	struct inode *lower_inode;
	struct super_block *lower_sb;


	lower_inode = lower_path->dentry->d_inode;
	lower_sb = wrapfs_lower_super(sb);

	/* check that the lower file system didn't cross a mount point */
	if (lower_inode->i_sb != lower_sb) {
		err = -EXDEV;
		goto out;
	}

	/*
	* We allocate our new inode below by calling wrapfs_iget,
	* which will initialize some of the new inode's fields
	*/
	/* inherit lower inode number for wrapfs's inode */
	inode = wrapfs_new_iget(sb, iunique(sb, 1));
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out;
	}
	if (atomic_read(&inode->i_count) > 1)
		goto out_add;
	wrapfs_fill_inode(dentry, inode);
	printk(KERN_INFO" U2fs_interpose success\n");
out_add:
	d_add(dentry, inode);
out:
	return err;
}
Exemple #4
0
static int cifs_filldir(char *find_entry, struct file *file,
		struct dir_context *ctx,
		char *scratch_buf, unsigned int max_len)
{
	struct cifsFileInfo *file_info = file->private_data;
	struct super_block *sb = file_inode(file)->i_sb;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	struct cifs_dirent de = { NULL, };
	struct cifs_fattr fattr;
	struct qstr name;
	int rc = 0;
	ino_t ino;

	rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level,
			      file_info->srch_inf.unicode);
	if (rc)
		return rc;

	if (de.namelen > max_len) {
		cifs_dbg(VFS, "bad search response length %zd past smb end\n",
			 de.namelen);
		return -EINVAL;
	}

	/* skip . and .. since we added them first */
	if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode))
		return 0;

	if (file_info->srch_inf.unicode) {
		struct nls_table *nlt = cifs_sb->local_nls;
		int map_type;

		map_type = cifs_remap(cifs_sb);
		name.name = scratch_buf;
		name.len =
			cifs_from_utf16((char *)name.name, (__le16 *)de.name,
					UNICODE_NAME_MAX,
					min_t(size_t, de.namelen,
					      (size_t)max_len), nlt, map_type);
		name.len -= nls_nullsize(nlt);
	} else {
		name.name = de.name;
		name.len = de.namelen;
	}

	switch (file_info->srch_inf.info_level) {
	case SMB_FIND_FILE_UNIX:
		cifs_unix_basic_to_fattr(&fattr,
					 &((FILE_UNIX_INFO *)find_entry)->basic,
					 cifs_sb);
		break;
	case SMB_FIND_FILE_INFO_STANDARD:
		cifs_std_info_to_fattr(&fattr,
				       (FIND_FILE_STANDARD_INFO *)find_entry,
				       cifs_sb);
		break;
	default:
		cifs_dir_info_to_fattr(&fattr,
				       (FILE_DIRECTORY_INFO *)find_entry,
				       cifs_sb);
		break;
	}

	if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
		fattr.cf_uniqueid = de.ino;
	} else {
		fattr.cf_uniqueid = iunique(sb, ROOT_I);
		cifs_autodisable_serverino(cifs_sb);
	}

	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
	    couldbe_mf_symlink(&fattr))
		/*
		 * trying to get the type and mode can be slow,
		 * so just call those regular files for now, and mark
		 * for reval
		 */
		fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;

	cifs_prime_dcache(file->f_path.dentry, &name, &fattr);

	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
	return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
}
static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
		void *dirent, char *scratch_buf, unsigned int max_len)
{
	struct cifsFileInfo *file_info = file->private_data;
	struct super_block *sb = file->f_path.dentry->d_sb;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	struct cifs_dirent de = { NULL, };
	struct cifs_fattr fattr;
	struct dentry *dentry;
	struct qstr name;
	int rc = 0;
	ino_t ino;

	rc = cifs_fill_dirent(&de, find_entry, file_info->srch_inf.info_level,
			      file_info->srch_inf.unicode);
	if (rc)
		return rc;

	if (de.namelen > max_len) {
		cERROR(1, "bad search response length %zd past smb end",
			  de.namelen);
		return -EINVAL;
	}

	
	if (cifs_entry_is_dot(&de, file_info->srch_inf.unicode))
		return 0;

	if (file_info->srch_inf.unicode) {
		struct nls_table *nlt = cifs_sb->local_nls;

		name.name = scratch_buf;
		name.len =
			cifs_from_utf16((char *)name.name, (__le16 *)de.name,
					UNICODE_NAME_MAX,
					min_t(size_t, de.namelen,
					      (size_t)max_len), nlt,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
		name.len -= nls_nullsize(nlt);
	} else {
		name.name = de.name;
		name.len = de.namelen;
	}

	switch (file_info->srch_inf.info_level) {
	case SMB_FIND_FILE_UNIX:
		cifs_unix_basic_to_fattr(&fattr,
					 &((FILE_UNIX_INFO *)find_entry)->basic,
					 cifs_sb);
		break;
	case SMB_FIND_FILE_INFO_STANDARD:
		cifs_std_info_to_fattr(&fattr,
				       (FIND_FILE_STANDARD_INFO *)find_entry,
				       cifs_sb);
		break;
	default:
		cifs_dir_info_to_fattr(&fattr,
				       (FILE_DIRECTORY_INFO *)find_entry,
				       cifs_sb);
		break;
	}

	if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
		fattr.cf_uniqueid = de.ino;
	} else {
		fattr.cf_uniqueid = iunique(sb, ROOT_I);
		cifs_autodisable_serverino(cifs_sb);
	}

	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
	    CIFSCouldBeMFSymlink(&fattr))
		fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;

	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
	dentry = cifs_readdir_lookup(file->f_dentry, &name, &fattr);

	rc = filldir(dirent, name.name, name.len, file->f_pos, ino,
		     fattr.cf_dtype);

	dput(dentry);
	return rc;
}
Exemple #6
0
/**
 * This is called when vfs failed to locate dentry in the cache. The
 * job of this function is to allocate inode and link it to dentry.
 * [dentry] contains the name to be looked in the [parent] directory.
 * Failure to locate the name is not a "hard" error, in this case NULL
 * inode is added to [dentry] and vfs should proceed trying to create
 * the entry via other means. NULL(or "positive" pointer) ought to be
 * returned in case of success and "negative" pointer on error
 */
static struct dentry *sf_lookup(struct inode *parent, struct dentry *dentry
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
                                , unsigned int flags
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
                                , struct nameidata *nd
#endif
                               )
{
    int err;
    struct sf_inode_info *sf_i, *sf_new_i;
    struct sf_glob_info *sf_g;
    SHFLSTRING *path;
    struct inode *inode;
    ino_t ino;
    SHFLFSOBJINFO fsinfo;

    TRACE();
    sf_g = GET_GLOB_INFO(parent->i_sb);
    sf_i = GET_INODE_INFO(parent);

    BUG_ON(!sf_g);
    BUG_ON(!sf_i);

    err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path);
    if (err)
        goto fail0;

    err = sf_stat(__func__, sf_g, path, &fsinfo, 1);
    if (err)
    {
        if (err == -ENOENT)
        {
            /* -ENOENT: add NULL inode to dentry so it later can be
               created via call to create/mkdir/open */
            kfree(path);
            inode = NULL;
        }
        else
            goto fail1;
    }
    else
    {
        sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL);
        if (!sf_new_i)
        {
            LogRelFunc(("could not allocate memory for new inode info\n"));
            err = -ENOMEM;
            goto fail1;
        }
        sf_new_i->handle = SHFL_HANDLE_NIL;
        sf_new_i->force_reread = 0;

        ino = iunique(parent->i_sb, 1);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
        inode = iget_locked(parent->i_sb, ino);
#else
        inode = iget(parent->i_sb, ino);
#endif
        if (!inode)
        {
            LogFunc(("iget failed\n"));
            err = -ENOMEM;          /* XXX: ??? */
            goto fail2;
        }

        SET_INODE_INFO(inode, sf_new_i);
        sf_init_inode(sf_g, inode, &fsinfo);
        sf_new_i->path = path;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
        unlock_new_inode(inode);
#endif
    }

    sf_i->force_restat = 0;
    dentry->d_time = jiffies;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
    d_set_d_op(dentry, &sf_dentry_ops);
#else
    dentry->d_op = &sf_dentry_ops;
#endif
    d_add(dentry, inode);
    return NULL;

fail2:
    kfree(sf_new_i);

fail1:
    kfree(path);

fail0:
    return ERR_PTR(err);
}
Exemple #7
0
static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
                        void *direntry, char *scratch_buf, unsigned int max_len)
{
    int rc = 0;
    struct qstr qstring;
    struct cifsFileInfo *pCifsF;
    u64    inum;
    ino_t  ino;
    struct super_block *sb;
    struct cifs_sb_info *cifs_sb;
    struct dentry *tmp_dentry;
    struct cifs_fattr fattr;

    /* get filename and len into qstring */
    /* get dentry */
    /* decide whether to create and populate ionde */
    if ((direntry == NULL) || (file == NULL))
        return -EINVAL;

    pCifsF = file->private_data;

    if ((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
        return -ENOENT;

    rc = cifs_entry_is_dot(pfindEntry, pCifsF);
    /* skip . and .. since we added them first */
    if (rc != 0)
        return 0;

    sb = file->f_path.dentry->d_sb;
    cifs_sb = CIFS_SB(sb);

    qstring.name = scratch_buf;
    rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
                                       pCifsF->srch_inf.info_level,
                                       pCifsF->srch_inf.unicode, cifs_sb,
                                       max_len, &inum /* returned */);

    if (rc)
        return rc;

    if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
        cifs_unix_basic_to_fattr(&fattr,
                                 &((FILE_UNIX_INFO *) pfindEntry)->basic,
                                 cifs_sb);
    else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
        cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)
                               pfindEntry, cifs_sb);
    else
        cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)
                               pfindEntry, cifs_sb);

    if (inum && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
        fattr.cf_uniqueid = inum;
    } else {
        fattr.cf_uniqueid = iunique(sb, ROOT_I);
        cifs_autodisable_serverino(cifs_sb);
    }

    if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
            CIFSCouldBeMFSymlink(&fattr))
        /*
         * trying to get the type and mode can be slow,
         * so just call those regular files for now, and mark
         * for reval
         */
        fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;

    ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
    tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);

    rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
                 ino, fattr.cf_dtype);

    dput(tmp_dentry);
    return rc;
}
Exemple #8
0
static int compute_svm_adaboost(ESupportVectorMachine *esvm,int n,int d,
				double *x[],int y[],int nmodels,int kernel,
				double kp,double C,double tol,double eps,
				int maxloops,int verbose)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  double *prob;
  double *prob_copy;
  double sumalpha;
  double epsilon;
  int *pred;
  double *margin;
  double sumprob;
  int nclasses;
  int *classes; 

  if(nmodels<1){
    fprintf(stderr,"compute_svm_adaboost: nmodels must be greater than 0\n");
    return 1;
  }

 if(C<=0){
    fprintf(stderr,"compute_svm_adaboost: regularization parameter C must be > 0\n");
    return 1;
  }
  if(eps<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter eps must be > 0\n");
    return 1;
  }
  if(tol<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter tol must be > 0\n");
    return 1;
  }
  if(maxloops<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter maxloops must be > 0\n");
    return 1;
  }

  switch(kernel){
  case SVM_KERNEL_LINEAR:
    break;
  case SVM_KERNEL_GAUSSIAN:
    if(kp <=0){
      fprintf(stderr,"compute_svm_adaboost: parameter kp must be > 0\n");
      return 1;
    }
    break;
  case SVM_KERNEL_POLINOMIAL:
    if(kp <=0){
      fprintf(stderr,"compute_svm_adaboost: parameter kp must be > 0\n");
      return 1;
    }
    break;
  default:
    fprintf(stderr,"compute_svm_adaboost: kernel not recognized\n");
    return 1;
  }

  nclasses=iunique(y,n, &classes);

  if(nclasses<=0){
    fprintf(stderr,"compute_svm_adaboost: iunique error\n");
    return 1;
  }
  if(nclasses==1){
    fprintf(stderr,"compute_svm_adaboost: only 1 class recognized\n");
    return 1;
  }
  if(nclasses==2)
    if(classes[0] != -1 || classes[1] != 1){
      fprintf(stderr,"compute_svm_adaboost: for binary classification classes must be -1,1\n");
      return 1;
    }
  if(nclasses>2){
    fprintf(stderr,"compute_svm_adaboost: multiclass classification not allowed\n");
    return 1;
  }

  if(!(esvm->svm=(SupportVectorMachine *)
       calloc(nmodels,sizeof(SupportVectorMachine)))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(esvm->weights=dvector(nmodels))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  
  if(!(prob_copy=dvector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  if(!(prob=dvector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(pred=ivector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  for(i =0;i<n;i++)
    prob[i]=1.0/(double)n;

  esvm->nmodels=nmodels;
  sumalpha=0.0;
  for(b=0;b<nmodels;b++){

    for(i =0;i<n;i++)
      prob_copy[i]=prob[i];
    if(sample(n, prob_copy, n, &samples, TRUE,b)!=0){
      fprintf(stderr,"compute_svm_adaboost: sample error\n");
      return 1;
    }

    for(i =0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }
    
    if(compute_svm(&(esvm->svm[b]),n,d,trx,try,kernel,kp,C,
		   tol,eps,maxloops,verbose,NULL)!=0){
      fprintf(stderr,"compute_svm_adaboost: compute_svm error\n");
      return 1;
    }
    free_ivector(samples);

    epsilon=0.0;
    for(i=0;i<n;i++){
      pred[i]=predict_svm(&(esvm->svm[b]),x[i],&margin);
      if(pred[i] < -1 ){
	fprintf(stderr,"compute_svm_adaboost: predict_svm error\n");
	return 1;
      }
      if(pred[i]==0 || pred[i] != y[i])
	epsilon += prob[i];
      free_dvector(margin);
    }
    
    if(epsilon > 0 && epsilon < 0.5){
      esvm->weights[b]=0.5 *log((1.0-epsilon)/epsilon);
      sumalpha+=esvm->weights[b];
    }else{
      esvm->nmodels=b;
      break;
    }
      
    sumprob=0.0;
    for(i=0;i<n;i++){
      prob[i]=prob[i]*exp(-esvm->weights[b]*y[i]*pred[i]);
      sumprob+=prob[i];
    }

    if(sumprob <=0){
      fprintf(stderr,"compute_svm_adaboost: sumprob = 0\n");
      return 1;
    }
    for(i=0;i<n;i++)
      prob[i] /= sumprob;
    
  }
  
  if(esvm->nmodels<=0){
    fprintf(stderr,"compute_svm_adaboost: no models produced\n");
    return 1;
  }

  if(sumalpha <=0){
      fprintf(stderr,"compute_svm_adaboost: sumalpha = 0\n");
      return 1;
  }
  for(b=0;b<esvm->nmodels;b++)
    esvm->weights[b] /= sumalpha;
  
  free(trx);
  free_ivector(classes);
  free_ivector(try);
  free_ivector(pred);
  free_dvector(prob);
  free_dvector(prob_copy);
  return 0;

}





static void svm_smo(SupportVectorMachine *svm)
{
  int i,k;
  int numChanged;
  int examineAll;
  int nloops=0;


  svm->end_support_i=svm->n;

  if(svm->kernel_type==SVM_KERNEL_LINEAR){
    svm->kernel_func=dot_product_func;
    svm->learned_func=learned_func_linear;
  }

  if(svm->kernel_type==SVM_KERNEL_POLINOMIAL){
    svm->kernel_func=polinomial_kernel;
    svm->learned_func=learned_func_nonlinear;
  }

  if(svm->kernel_type==SVM_KERNEL_GAUSSIAN){
    /*
    svm->precomputed_self_dot_product=(double *)calloc(svm->n,sizeof(double));
    */
    for(i=0;i<svm->n;i++)
      svm->precomputed_self_dot_product[i] = dot_product_func(i,i,svm);
    svm->kernel_func=rbf_kernel;
    svm->learned_func=learned_func_nonlinear;
  }

  numChanged=0;
  examineAll=1;

  svm->convergence=1;
  while(svm->convergence==1 &&(numChanged>0 || examineAll)){
    numChanged=0;
    if(examineAll){
      for(k=0;k<svm->n;k++)
	numChanged += examineExample(k,svm);
    }else{
      for(k=0;k<svm->n;k++)
	if(svm->alph[k] > 0 && svm->alph[k] < svm->Cw[k])
	  numChanged += examineExample(k,svm);
    }
    if(examineAll==1)
      examineAll=0;
    else if(numChanged==0)
      examineAll=1;

    nloops+=1;
    if(nloops==svm->maxloops)
      svm->convergence=0;
    if(svm->verbose==1)
      fprintf(stdout,"%6d\b\b\b\b\b\b\b",nloops);
  }

}
Exemple #9
0
int  compute_tree(Tree *tree,int n,int d,double *x[],
		   int y[],int stumps,int minsize)
     /*
       compute tree model.x,y,n,d are the input data.
       stumps takes values  1 (compute single split) or 
       0 (standard tree). minsize is the minimum number of
       cases required to split a leaf.

       Return value: 0 on success, 1 otherwise.
     */
{
  int i,j;
  int node_class_index;
  int max_node_points;
  int cn;
  double sumpriors;

  tree->n=n;
  tree->d=d;


  if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree: parameter minsize must be >= 0\n");
    return 1;
  }

  tree->nclasses=iunique(y,tree->n, &(tree->classes));

  if(tree->nclasses<=0){
    fprintf(stderr,"compute_tree: iunique error\n");
    return 1;
  }

  if(tree->nclasses==1){
    fprintf(stderr,"compute_tree: only 1 class recognized\n");
    return 1;
  }

  if(tree->nclasses==2)
    if(tree->classes[0] != -1 || tree->classes[1] != 1){
      fprintf(stderr,"compute_tree: for binary classification classes must be -1,1\n");
      return 1;
    }

  if(tree->nclasses>2)
    for(i=0;i<tree->nclasses;i++)
      if(tree->classes[i] != i+1){
        fprintf(stderr,"compute_tree: for %d-class classification classes must be 1,...,%d\n",tree->nclasses,tree->nclasses);
        return 1;
      }


  
  if(!(tree->x=dmatrix(n,d))){
    fprintf(stderr,"compute_tree: out of memory\n");
    return 1;
  }
  if(!(tree->y=ivector(n))){
    fprintf(stderr,"compute_tree: out of memory\n");
    return 1;
  }
  for(i=0;i<n;i++){
    for(j=0;j<d;j++)
      tree->x[i][j]=x[i][j];
    tree->y[i]=y[i];
  }

  tree->stumps = stumps;
  tree->minsize = minsize;
  
  tree->node=(Node *)malloc(sizeof(Node));

  tree->node[0].nclasses=tree->nclasses;
  tree->node[0].npoints = tree->n;
  tree->node[0].nvar = tree->d;
  tree->node[0].data=tree->x;
  tree->node[0].classes=tree->y;
  

  tree->node[0].npoints_for_class=ivector(tree->nclasses);
  tree->node[0].priors=dvector(tree->nclasses);

  
  for(i=0;i<tree->node[0].npoints;i++){
    for(j = 0; j < tree->nclasses;j++)
      if(tree->classes[j]==tree->node[0].classes[i]){
	tree->node[0].npoints_for_class[j] += 1;
	break;
      }
  }

  node_class_index=0;
  max_node_points=0;
  for(j = 0; j < tree->nclasses;j++)
    if(tree->node[0].npoints_for_class[j] > max_node_points){
      max_node_points = tree->node[0].npoints_for_class[j];
      node_class_index = j;
    }
  tree->node[0].node_class = tree->classes[node_class_index];
  
  sumpriors=.0;
  for(j=0;j < tree->nclasses;j++)
    sumpriors += tree->node[0].npoints_for_class[j];
  for(j = 0; j < tree->nclasses;j++)
    tree->node[0].priors[j] = tree->node[0].npoints_for_class[j]/sumpriors;
  
  tree->node[0].terminal=TRUE;
  if(gini_index(tree->node[0].priors,tree->nclasses)>0)
    tree->node[0].terminal=FALSE;

  tree->nnodes=1;
  for(cn=0;cn<tree->nnodes;cn++)
    if(!tree->node[cn].terminal){
      tree->node[cn].left=tree->nnodes;
      tree->node[cn].right=tree->nnodes+1;
      tree->node=(Node *)realloc(tree->node,(tree->nnodes+2)*sizeof(Node));
      split_node(&(tree->node[cn]),&(tree->node[tree->nnodes]),
		 &(tree->node[tree->nnodes+1]),tree->classes,tree->nclasses);
      
      if(tree->minsize>0){
	if(tree->node[tree->nnodes].npoints < tree->minsize)
	  tree->node[tree->nnodes].terminal = TRUE;
	if(tree->node[tree->nnodes+1].npoints < tree->minsize)
	  tree->node[tree->nnodes+1].terminal = TRUE;
      }
      if(tree->stumps){
	tree->node[tree->nnodes].terminal = TRUE;
	tree->node[tree->nnodes+1].terminal = TRUE;
      }
      tree->nnodes += 2;
    }

  return 0;
  
}
Exemple #10
0
/*
 * Connect a unionfs inode dentry/inode with several lower ones.  This is
 * the classic stackable file system "vnode interposition" action.
 *
 * @sb: unionfs's super_block
 */
struct dentry *unionfs_interpose(struct dentry *dentry, struct super_block *sb,
                                 int flag)
{
    int err = 0;
    struct inode *inode;
    int need_fill_inode = 1;
    struct dentry *spliced = NULL;

    verify_locked(dentry);

    /*
     * We allocate our new inode below, by calling iget.
     * iget will call our read_inode which will initialize some
     * of the new inode's fields
     */

    /*
     * On revalidate we've already got our own inode and just need
     * to fix it up.
     */
    if (flag == INTERPOSE_REVAL) {
        inode = dentry->d_inode;
        UNIONFS_I(inode)->bstart = -1;
        UNIONFS_I(inode)->bend = -1;
        atomic_set(&UNIONFS_I(inode)->generation,
                   atomic_read(&UNIONFS_SB(sb)->generation));

        UNIONFS_I(inode)->lower_inodes =
            kcalloc(sbmax(sb), sizeof(struct inode *), GFP_KERNEL);
        if (unlikely(!UNIONFS_I(inode)->lower_inodes)) {
            err = -ENOMEM;
            goto out;
        }
    } else {
        /* get unique inode number for unionfs */
        inode = iget(sb, iunique(sb, UNIONFS_ROOT_INO));
        if (!inode) {
            err = -EACCES;
            goto out;
        }
        if (atomic_read(&inode->i_count) > 1)
            goto skip;
    }

    need_fill_inode = 0;
    unionfs_fill_inode(dentry, inode);

skip:
    /* only (our) lookup wants to do a d_add */
    switch (flag) {
    case INTERPOSE_DEFAULT:
        /* for operations which create new inodes */
        d_add(dentry, inode);
        break;
    case INTERPOSE_REVAL_NEG:
        d_instantiate(dentry, inode);
        break;
    case INTERPOSE_LOOKUP:
        spliced = d_splice_alias(inode, dentry);
        if (spliced && spliced != dentry) {
            /*
             * d_splice can return a dentry if it was
             * disconnected and had to be moved.  We must ensure
             * that the private data of the new dentry is
             * correct and that the inode info was filled
             * properly.  Finally we must return this new
             * dentry.
             */
            spliced->d_op = &unionfs_dops;
            spliced->d_fsdata = dentry->d_fsdata;
            dentry->d_fsdata = NULL;
            dentry = spliced;
            if (need_fill_inode) {
                need_fill_inode = 0;
                unionfs_fill_inode(dentry, inode);
            }
            goto out_spliced;
        } else if (!spliced) {
            if (need_fill_inode) {
                need_fill_inode = 0;
                unionfs_fill_inode(dentry, inode);
                goto out_spliced;
            }
        }
        break;
    case INTERPOSE_REVAL:
        /* Do nothing. */
        break;
    default:
        printk(KERN_CRIT "unionfs: invalid interpose flag passed!\n");
        BUG();
    }
    goto out;

out_spliced:
    if (!err)
        return spliced;
out:
    return ERR_PTR(err);
}
/*
 * There is no need to lock the wrapfs_super_info's rwsem as there is no
 * way anyone can have a reference to the superblock at this point in time.
 */
static int wrapfs_read_super(struct super_block *sb, void *raw_data, int silent)
{
	int err = 0, i = 0;
	struct wrapfs_dentry_info *lower_root_info = NULL;
	struct inode *inode = NULL;
	if (!raw_data) {
		printk(KERN_ERR
			"u2fs: read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/* allocate superblock private data */

	sb->s_fs_info = kzalloc(sizeof(struct wrapfs_sb_info), GFP_KERNEL);
	if (!WRAPFS_SB(sb)) {
		printk(KERN_CRIT "u2fs: read_super: out of memory\n");
		err = -ENOMEM;
		goto out_free;
	}

	atomic_set(&WRAPFS_SB(sb)->generation, 1);
	WRAPFS_SB(sb)->high_branch_id = -1;
/*      Parsing the Inputs      */
	lower_root_info = wrapfs_parse_options(sb, raw_data);
	if (IS_ERR(lower_root_info)) {
		printk(KERN_ERR
			"u2fs: read_super: error while parsing options"
			"(err = %ld)\n", PTR_ERR(lower_root_info));

		err = PTR_ERR(lower_root_info);
		lower_root_info = NULL;
		goto out_free;
	}

	/* set the lower superblock field of upper superblock */
	for (i = 0; i <= 1; i++) {
		struct dentry *d = lower_root_info->lower_paths[i].dentry;
		atomic_inc(&d->d_sb->s_active);
		wrapfs_set_lower_super_idx(sb, i, d->d_sb);
	}

	/* inherit maxbytes from highest priority branch */
	sb->s_maxbytes = wrapfs_lower_super_idx(sb, 0)->s_maxbytes;

	/*
	* Our c/m/atime granularity is 1 ns because we may stack on file
	* systems whose granularity is as good.
	*/
	sb->s_time_gran = 1;
	sb->s_op = &wrapfs_sops;

	/* get a new inode and allocate our root dentry */

	inode = wrapfs_new_iget(sb, iunique(sb, 1));
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_sput;
	}

	sb->s_root = d_alloc_root(inode);
	if (unlikely(!sb->s_root)) {
		err = -ENOMEM;
		goto out_iput;
	}

	d_set_d_op(sb->s_root, &wrapfs_dops);

	/* link the upper and lower dentries */
	sb->s_root->d_fsdata = NULL;
	err = new_dentry_private_data(sb->s_root);
	if (unlikely(err))
		goto out_freeroot;

	/* if get here: cannot have error */
	/* set the lower dentries for s_root */

	for (i = 0; i <= 1 ; i++) {
		struct dentry *d;
		struct vfsmount *m;
		d = lower_root_info->lower_paths[i].dentry;
		m = lower_root_info->lower_paths[i].mnt;
		wrapfs_set_lower_dentry_idx(sb->s_root, i, d);
		wrapfs_set_lower_mnt_idx(sb->s_root, i, m);
	}
	atomic_set(&WRAPFS_D(sb->s_root)->generation, 1);
	if (atomic_read(&inode->i_count) <= 1)
		wrapfs_fill_inode(sb->s_root, inode);
	/*
	* No need to call interpose because we already have a positive
	* dentry, which was instantiated by d_alloc_root.  Just need to
	* d_rehash it.
	*/
	d_rehash(sb->s_root);
	if (!silent)
		printk(KERN_INFO
			"u2fs: mounted on top of type\n");
	goto out;

	/* all is well */
	/* no longer needed: free_dentry_private_data(sb->s_root); */
out_freeroot:
	if (WRAPFS_D(sb->s_root)) {
		kfree(WRAPFS_D(sb->s_root)->lower_paths);
		free_dentry_private_data(sb->s_root);
	}
	dput(sb->s_root);
out_iput:
	iput(inode);
out_sput:
	/* drop refs we took earlier */
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		for (i = 0; i <= 1; i++) {
			struct dentry *d;
			d = lower_root_info->lower_paths[i].dentry;
			atomic_dec(&d->d_sb->s_active);
			path_put(&lower_root_info->lower_paths[i]);
		}
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
		lower_root_info = NULL;
	}
out_free:
	kfree(WRAPFS_SB(sb)->data);
	kfree(WRAPFS_SB(sb));
	sb->s_fs_info = NULL;
out:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
	}
	return err;
}
Exemple #12
0
/*
 * There is no need to lock the unionfs_super_info's rwsem as there is no
 * way anyone can have a reference to the superblock at this point in time.
 */
static int unionfs_read_super(struct super_block *sb, void *raw_data,
			      int silent)
{
	int err = 0;
	struct unionfs_dentry_info *lower_root_info = NULL;
	int bindex, bstart, bend;
	struct inode *inode = NULL;

	if (!raw_data) {
		printk(KERN_ERR
		       "unionfs: read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/* Allocate superblock private data */
	sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
	if (unlikely(!UNIONFS_SB(sb))) {
		printk(KERN_CRIT "unionfs: read_super: out of memory\n");
		err = -ENOMEM;
		goto out;
	}

	UNIONFS_SB(sb)->bend = -1;
	atomic_set(&UNIONFS_SB(sb)->generation, 1);
	init_rwsem(&UNIONFS_SB(sb)->rwsem);
	UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */

	lower_root_info = unionfs_parse_options(sb, raw_data);
	if (IS_ERR(lower_root_info)) {
		printk(KERN_ERR
		       "unionfs: read_super: error while parsing options "
		       "(err = %ld)\n", PTR_ERR(lower_root_info));
		err = PTR_ERR(lower_root_info);
		lower_root_info = NULL;
		goto out_free;
	}
	if (lower_root_info->bstart == -1) {
		err = -ENOENT;
		goto out_free;
	}

	/* set the lower superblock field of upper superblock */
	bstart = lower_root_info->bstart;
	BUG_ON(bstart != 0);
	sbend(sb) = bend = lower_root_info->bend;
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
		atomic_inc(&d->d_sb->s_active);
		unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
	}

	/* max Bytes is the maximum bytes from highest priority branch */
	sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;

	/*
	 * Our c/m/atime granularity is 1 ns because we may stack on file
	 * systems whose granularity is as good.  This is important for our
	 * time-based cache coherency.
	 */
	sb->s_time_gran = 1;

	sb->s_op = &unionfs_sops;

	/* get a new inode and allocate our root dentry */
	inode = unionfs_iget(sb, iunique(sb, UNIONFS_ROOT_INO));
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		goto out_dput;
	}
	sb->s_root = d_make_root(inode);
	if (unlikely(!sb->s_root)) {
		err = -ENOMEM;
		goto out_iput;
	}
	d_set_d_op(sb->s_root, &unionfs_dops);

	/* link the upper and lower dentries */
	sb->s_root->d_fsdata = NULL;
	err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
	if (unlikely(err))
		goto out_freedpd;

	/* if get here: cannot have error */

	/* Set the lower dentries for s_root */
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;
		struct vfsmount *m;

		d = lower_root_info->lower_paths[bindex].dentry;
		m = lower_root_info->lower_paths[bindex].mnt;

		unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
		unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
	}
	dbstart(sb->s_root) = bstart;
	dbend(sb->s_root) = bend;

	/* Set the generation number to one, since this is for the mount. */
	atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);

	if (atomic_read(&inode->i_count) <= 1)
		unionfs_fill_inode(sb->s_root, inode);

	/*
	 * No need to call interpose because we already have a positive
	 * dentry, which was instantiated by d_alloc_root.  Just need to
	 * d_rehash it.
	 */
	d_rehash(sb->s_root);

	unionfs_unlock_dentry(sb->s_root);
	goto out; /* all is well */

out_freedpd:
	if (UNIONFS_D(sb->s_root)) {
		kfree(UNIONFS_D(sb->s_root)->lower_paths);
		free_dentry_private_data(sb->s_root);
	}
	dput(sb->s_root);

out_iput:
	iput(inode);

out_dput:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		for (bindex = lower_root_info->bstart;
		     bindex <= lower_root_info->bend; bindex++) {
			struct dentry *d;
			d = lower_root_info->lower_paths[bindex].dentry;
			/* drop refs we took earlier */
			atomic_dec(&d->d_sb->s_active);
			path_put(&lower_root_info->lower_paths[bindex]);
		}
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
		lower_root_info = NULL;
	}

out_free:
	kfree(UNIONFS_SB(sb)->data);
	kfree(UNIONFS_SB(sb));
	sb->s_fs_info = NULL;

out:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
	}
	return err;
}
Exemple #13
0
/* sb we pass is unionfs's super_block */
int unionfs_interpose(struct dentry *dentry, struct super_block *sb, int flag)
{
	struct inode *hidden_inode;
	struct dentry *hidden_dentry;
	int err = 0;
	struct inode *inode;
	int is_negative_dentry = 1;
	int bindex, bstart, bend;

	print_entry("flag = %d", flag);

	verify_locked(dentry);

	fist_print_dentry("In unionfs_interpose", dentry);

	bstart = dbstart(dentry);
	bend = dbend(dentry);

	/* Make sure that we didn't get a negative dentry. */
	for (bindex = bstart; bindex <= bend; bindex++) {
		if (dtohd_index(dentry, bindex) &&
		    dtohd_index(dentry, bindex)->d_inode) {
			is_negative_dentry = 0;
			break;
		}
	}
	BUG_ON(is_negative_dentry);

	/* We allocate our new inode below, by calling iget.
	 * iget will call our read_inode which will initialize some
	 * of the new inode's fields
	 */

	/* On revalidate we've already got our own inode and just need
	 * to fix it up. */
	if (flag == INTERPOSE_REVAL) {
		inode = dentry->d_inode;
		itopd(inode)->b_start = -1;
		itopd(inode)->b_end = -1;
		atomic_set(&itopd(inode)->uii_generation,
			   atomic_read(&stopd(sb)->usi_generation));

		itohi_ptr(inode) =
		    KZALLOC(sbmax(sb) * sizeof(struct inode *), GFP_KERNEL);
		if (!itohi_ptr(inode)) {
			err = -ENOMEM;
			goto out;
		}
	} else {
		ino_t ino;
		/* get unique inode number for unionfs */
#ifdef UNIONFS_IMAP
		if (stopd(sb)->usi_persistent) {
			err = read_uin(sb, bindex,
				       dtohd_index(dentry,
						   bindex)->d_inode->i_ino,
				       O_CREAT, &ino);
			if (err)
				goto out;
		} else
#endif
			ino = iunique(sb, UNIONFS_ROOT_INO);

		inode = IGET(sb, ino);
		if (!inode) {
			err = -EACCES;	/* should be impossible??? */
			goto out;
		}
	}

	down(&inode->i_sem);
	if (atomic_read(&inode->i_count) > 1)
		goto skip;

	for (bindex = bstart; bindex <= bend; bindex++) {
		hidden_dentry = dtohd_index(dentry, bindex);
		if (!hidden_dentry) {
			set_itohi_index(inode, bindex, NULL);
			continue;
		}
		/* Initialize the hidden inode to the new hidden inode. */
		if (!hidden_dentry->d_inode)
			continue;
		set_itohi_index(inode, bindex, IGRAB(hidden_dentry->d_inode));
	}

	ibstart(inode) = dbstart(dentry);
	ibend(inode) = dbend(dentry);

	/* Use attributes from the first branch. */
	hidden_inode = itohi(inode);

	/* Use different set of inode ops for symlinks & directories */
	if (S_ISLNK(hidden_inode->i_mode))
		inode->i_op = &unionfs_symlink_iops;
	else if (S_ISDIR(hidden_inode->i_mode))
		inode->i_op = &unionfs_dir_iops;

	/* Use different set of file ops for directories */
	if (S_ISDIR(hidden_inode->i_mode))
		inode->i_fop = &unionfs_dir_fops;

	/* properly initialize special inodes */
	if (S_ISBLK(hidden_inode->i_mode) || S_ISCHR(hidden_inode->i_mode) ||
	    S_ISFIFO(hidden_inode->i_mode) || S_ISSOCK(hidden_inode->i_mode))
		init_special_inode(inode, hidden_inode->i_mode,
				   hidden_inode->i_rdev);

	/* Fix our inode's address operations to that of the lower inode (Unionfs is FiST-Lite) */
	if (inode->i_mapping->a_ops != hidden_inode->i_mapping->a_ops) {
		fist_dprint(7, "fixing inode 0x%p a_ops (0x%p -> 0x%p)\n",
			    inode, inode->i_mapping->a_ops,
			    hidden_inode->i_mapping->a_ops);
		inode->i_mapping->a_ops = hidden_inode->i_mapping->a_ops;
	}

	/* all well, copy inode attributes */
	fist_copy_attr_all(inode, hidden_inode);

      skip:
	/* only (our) lookup wants to do a d_add */
	switch (flag) {
	case INTERPOSE_DEFAULT:
	case INTERPOSE_REVAL_NEG:
		d_instantiate(dentry, inode);
		break;
	case INTERPOSE_LOOKUP:
		err = PTR_ERR(d_splice_alias(inode, dentry));
		break;
	case INTERPOSE_REVAL:
		/* Do nothing. */
		break;
	default:
		printk(KERN_ERR "Invalid interpose flag passed!");
		BUG();
	}

	fist_print_dentry("Leaving unionfs_interpose", dentry);
	fist_print_inode("Leaving unionfs_interpose", inode);
	up(&inode->i_sem);

      out:
	print_exit_status(err);
	return err;
}
Exemple #14
0
int compute_svm(SupportVectorMachine *svm,int n,int d,double *x[],int y[],
		int kernel,double kp,double C,double tol,
		double eps,int maxloops,int verbose,double W[])
     /*
       compute svm model.x,y,n,d are the input data.
       kernel is the kernel type (see ml.h), kp is the kernel parameter 
       (for gaussian and polynomial kernel), C is the regularization parameter.
       eps and tol determine convergence, maxloops is thae maximum number
       of optimization loops, W is an array (of length n) of weights for 
       cost-sensitive  classification.

       Return value: 0 on success, 1 otherwise.
     */
{
  int i,j;
  int nclasses;
  int *classes;

  svm->n=n;
  svm->d=d;
  svm->C=C;
  svm->tolerance=tol;
  svm->eps=eps;
  svm->two_sigma_squared=kp;
  svm->kernel_type=kernel;
  svm->maxloops=maxloops;
  svm->verbose=verbose;

  svm->b=0.0;

  if(C<=0){
    fprintf(stderr,"compute_svm: regularization parameter C must be > 0\n");
    return 1;
  }
  if(eps<=0){
    fprintf(stderr,"compute_svm: parameter eps must be > 0\n");
    return 1;
  }
  if(tol<=0){
    fprintf(stderr,"compute_svm: parameter tol must be > 0\n");
    return 1;
  }
  if(maxloops<=0){
    fprintf(stderr,"compute_svm: parameter maxloops must be > 0\n");
    return 1;
  }
  if(W){
    for(i=0;i<n;i++)
      if(W[i]<=0){
	fprintf(stderr,"compute_svm: parameter W[%d] must be > 0\n",i);
	return 1;
      }
  }

  switch(kernel){
  case SVM_KERNEL_LINEAR:
    break;
  case SVM_KERNEL_GAUSSIAN:
    if(kp <=0){
      fprintf(stderr,"compute_svm: parameter kp must be > 0\n");
      return 1;
    }
    break;
  case SVM_KERNEL_POLINOMIAL:
    if(kp <=0){
      fprintf(stderr,"compute_svm: parameter kp must be > 0\n");
      return 1;
    }
    break;
  default:
    fprintf(stderr,"compute_svm: kernel not recognized\n");
    return 1;
  }

  nclasses=iunique(y,n, &classes);

  if(nclasses<=0){
    fprintf(stderr,"compute_svm: iunique error\n");
    return 1;
  }
  if(nclasses==1){
    fprintf(stderr,"compute_svm: only 1 class recognized\n");
    return 1;
  }
  if(nclasses==2)
    if(classes[0] != -1 || classes[1] != 1){
      fprintf(stderr,"compute_svm: for binary classification classes must be -1,1\n");
      return 1;
    }
  if(nclasses>2){
    fprintf(stderr,"compute_svm: multiclass classification not allowed\n");
    return 1;
  }

  if(kernel==SVM_KERNEL_LINEAR)
    if(!(svm->w=dvector(d))){
      fprintf(stderr,"compute_svm: out of memory\n");
      return 1;
    }
  if(!(svm->Cw=dvector(n))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }
  if(!(svm->alph=dvector(n))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }
  if(!(svm->error_cache=dvector(n))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }
  if(!(svm->precomputed_self_dot_product=dvector(n))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }
  
  for(i=0;i<n;i++)
    svm->error_cache[i]=-y[i];

  if(W){
    for(i=0;i<n;i++)
      svm->Cw[i]=svm->C * W[i];
  }else{
    for(i=0;i<n;i++)
      svm->Cw[i]=svm->C;
  }    
  

  if(!(svm->x=dmatrix(n,d))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }
  if(!(svm->y=ivector(n))){
    fprintf(stderr,"compute_svm: out of memory\n");
    return 1;
  }

  for(i=0;i<n;i++){
    for(j=0;j<d;j++)
      svm->x[i][j]=x[i][j];
    svm->y[i]=y[i];
  }

  svm_smo(svm);
  
  svm->non_bound_support=svm->bound_support=0;
  for(i=0;i<n;i++){
    if(svm->alph[i]>0){
      if(svm->alph[i]< svm->Cw[i])
	svm->non_bound_support++;
      else
	svm->bound_support++;
    }
  }
  
  free_ivector(classes);

  return 0;
}
Exemple #15
0
static int compute_tree_bagging(ETree *etree,int n,int d,double *x[],
				int y[], int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;

  if(nmodels<1){
    fprintf(stderr,"compute_tree_bagging: nmodels must be greater than 0\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));


  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_bagging: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_bagging: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_bagging: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2)
    for(i=0;i<etree->nclasses;i++)
      if(etree->classes[i] != i+1){
	fprintf(stderr,"compute_tree_bagging: for %d-class classification classes must be 1,...,%d\n",etree->nclasses,etree->nclasses);
	return 1;
      }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  etree->nmodels=nmodels;
  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    etree->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  
  for(b=0;b<nmodels;b++){
    if(sample(n, NULL, n, &samples, TRUE,b)!=0){
       fprintf(stderr,"compute_tree_bagging: sample error\n");
       return 1;
    }

    for(i =0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }

    if(compute_tree(&(etree->tree[b]),n,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_bagging: compute_tree error\n");
      return 1;
    }
    free_ivector(samples);

  }

  free(trx);
  free_ivector(try);
    
  return 0;

}



static int compute_tree_aggregate(ETree *etree,int n,int d,double *x[],int y[],
				  int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  int indx;

  if(nmodels<1){
    fprintf(stderr,"compute_tree_aggregate: nmodels must be greater than 0\n");
    return 1;
  }

  if(nmodels > n){
    fprintf(stderr,"compute_tree_aggregate: nmodels must be less than n\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));

  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_aggregate: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_aggregate: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_aggregate: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2)
    for(i=0;i<etree->nclasses;i++)
      if(etree->classes[i] != i+1){
	fprintf(stderr,"compute_tree_aggregate: for %d-class classification classes must be 1,...,%d\n",etree->nclasses,etree->nclasses);
	return 1;
      }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  etree->nmodels=nmodels;
  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    etree->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  
  if(sample(nmodels, NULL, n, &samples, TRUE,0)!=0){
    fprintf(stderr,"compute_tree_aggregate: sample error\n");
    return 1;
  }

  for(b=0;b<nmodels;b++){
  
    indx=0;
    for(i=0;i<n;i++)
      if(samples[i] == b){
	trx[indx] = x[i];
	try[indx++] = y[i];
      }

    if(compute_tree(&(etree->tree[b]),indx,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_aggregate: compute_tree error\n");
      return 1;
    }

  }

  free_ivector(samples);
  free(trx);
  free_ivector(try);
    
  return 0;

}
Exemple #16
0
static int compute_svm_bagging(ESupportVectorMachine *esvm,int n,int d,
			       double *x[],int y[],int nmodels,int kernel,
			       double kp,double C,double tol,double eps,
			       int maxloops,int verbose)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  int nclasses;
  int *classes;

  if(nmodels<1){
    fprintf(stderr,"compute_svm_bagging: nmodels must be greater than 0\n");
    return 1;
  }

  if(C<=0){
    fprintf(stderr,"compute_svm_bagging: regularization parameter C must be > 0\n");
    return 1;
  }
  if(eps<=0){
    fprintf(stderr,"compute_svm_bagging: parameter eps must be > 0\n");
    return 1;
  }
  if(tol<=0){
    fprintf(stderr,"compute_svm_bagging: parameter tol must be > 0\n");
    return 1;
  }
  if(maxloops<=0){
    fprintf(stderr,"compute_svm_bagging: parameter maxloops must be > 0\n");
    return 1;
  }

  switch(kernel){
  case SVM_KERNEL_LINEAR:
    break;
  case SVM_KERNEL_GAUSSIAN:
    if(kp <=0){
      fprintf(stderr,"compute_svm_bagging: parameter kp must be > 0\n");
      return 1;
    }
    break;
  case SVM_KERNEL_POLINOMIAL:
    if(kp <=0){
      fprintf(stderr,"compute_svm_bagging: parameter kp must be > 0\n");
      return 1;
    }
    break;
  default:
    fprintf(stderr,"compute_svm_bagging: kernel not recognized\n");
    return 1;
  }

  nclasses=iunique(y,n, &classes);

  if(nclasses<=0){
    fprintf(stderr,"compute_svm_bagging: iunique error\n");
    return 1;
  }
  if(nclasses==1){
    fprintf(stderr,"compute_svm_bagging: only 1 class recognized\n");
    return 1;
  }
  if(nclasses==2)
    if(classes[0] != -1 || classes[1] != 1){
      fprintf(stderr,"compute_svm_bagging: for binary classification classes must be -1,1\n");
      return 1;
    }
  if(nclasses>2){
    fprintf(stderr,"compute_svm_bagging: multiclass classification not allowed\n");
    return 1;
  }


  if(!(esvm->svm=(SupportVectorMachine *)
       calloc(nmodels,sizeof(SupportVectorMachine)))){
    fprintf(stderr,"compute_svm_bagging: out of memory\n");
    return 1;
  }
  esvm->nmodels=nmodels;
  if(!(esvm->weights=dvector(nmodels))){
    fprintf(stderr,"compute_svm_bagging: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    esvm->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_svm_bagging: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_svm_bagging: out of memory\n");
    return 1;
  }
  
  for(b=0;b<nmodels;b++){
    if(sample(n, NULL, n, &samples, TRUE,b)!=0){
       fprintf(stderr,"compute_svm_bagging: sample error\n");
       return 1;
    }

    for(i =0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }

    if(compute_svm(&(esvm->svm[b]),n,d,trx,try,kernel,kp,C,
		   tol,eps,maxloops,verbose,NULL)!=0){
      fprintf(stderr,"compute_svm_bagging: compute_svm error\n");
      return 1;
    }
    free_ivector(samples);

  }

  free(trx);
  free_ivector(classes);
  free_ivector(try);
    
  return 0;

}



static int compute_svm_aggregate(ESupportVectorMachine *esvm,int n,int d,
				 double *x[],int y[],int nmodels,int kernel,
				 double kp,double C,double tol,double eps,
				 int maxloops,int verbose)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  int indx;
  int nclasses;
  int *classes;

  if(nmodels<1){
    fprintf(stderr,"compute_svm_aggregate: nmodels must be greater than 0\n");
    return 1;
  }

  if(nmodels > n){
    fprintf(stderr,"compute_svm_aggregate: nmodels must be less than n\n");
    return 1;
  }

  if(C<=0){
    fprintf(stderr,"compute_svm_aggregate: regularization parameter C must be > 0\n");
    return 1;
  }
  if(eps<=0){
    fprintf(stderr,"compute_svm_aggregate: parameter eps must be > 0\n");
    return 1;
  }
  if(tol<=0){
    fprintf(stderr,"compute_svm_aggregate: parameter tol must be > 0\n");
    return 1;
  }
  if(maxloops<=0){
    fprintf(stderr,"compute_svm_aggregate: parameter maxloops must be > 0\n");
    return 1;
  }

  switch(kernel){
  case SVM_KERNEL_LINEAR:
    break;
  case SVM_KERNEL_GAUSSIAN:
    if(kp <=0){
      fprintf(stderr,"compute_svm_aggregate: parameter kp must be > 0\n");
      return 1;
    }
    break;
  case SVM_KERNEL_POLINOMIAL:
    if(kp <=0){
      fprintf(stderr,"compute_svm_aggregate: parameter kp must be > 0\n");
      return 1;
    }
    break;
  default:
    fprintf(stderr,"compute_svm_aggregate: kernel not recognized\n");
    return 1;
  }

  nclasses=iunique(y,n, &classes);

  if(nclasses<=0){
    fprintf(stderr,"compute_svm_aggregate: iunique error\n");
    return 1;
  }
  if(nclasses==1){
    fprintf(stderr,"compute_svm_aggregate: only 1 class recognized\n");
    return 1;
  }
  if(nclasses==2)
    if(classes[0] != -1 || classes[1] != 1){
      fprintf(stderr,"compute_svm_aggregate: for binary classification classes must be -1,1\n");
      return 1;
    }
  if(nclasses>2){
    fprintf(stderr,"compute_svm_aggregate: multiclass classification not allowed\n");
    return 1;
  }

  if(!(esvm->svm=(SupportVectorMachine *)
       calloc(nmodels,sizeof(SupportVectorMachine)))){
    fprintf(stderr,"compute_svm_aggregate: out of memory\n");
    return 1;
  }
  esvm->nmodels=nmodels;
  if(!(esvm->weights=dvector(nmodels))){
    fprintf(stderr,"compute_svm_aggregate: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    esvm->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_svm_aggregate: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_svm_aggregate: out of memory\n");
    return 1;
  }
  
  if(sample(nmodels, NULL, n, &samples, TRUE,0)!=0){
    fprintf(stderr,"compute_svm_aggregate: sample error\n");
    return 1;
  }

  for(b=0;b<nmodels;b++){
  
    indx=0;
    for(i=0;i<n;i++)
      if(samples[i] == b){
	trx[indx] = x[i];
	try[indx++] = y[i];
      }

    if(compute_svm(&(esvm->svm[b]),indx,d,trx,try,kernel,kp,C,
		   tol,eps,maxloops,verbose,NULL)!=0){
      fprintf(stderr,"compute_svm_aggregate: compute_svm error\n");
      return 1;
    }

  }

  free_ivector(samples);
  free(trx);
  free_ivector(classes);
  free_ivector(try);
    
  return 0;

}
Exemple #17
0
static int compute_tree_adaboost(ETree *etree,int n,int d,double *x[],int y[],
				 int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  double *prob;
  double *prob_copy;
  double sumalpha;
  double eps;
  int *pred;
  double *margin;
  double sumprob;
  

  if(nmodels<1){
    fprintf(stderr,"compute_tree_adaboost: nmodels must be greater than 0\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));

  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_adaboost: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_adaboost: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_adaboost: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2){
    fprintf(stderr,"compute_tree_adaboost: multiclass classification not allowed\n");
    return 1;
  }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  
  if(!(prob_copy=dvector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  if(!(prob=dvector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(pred=ivector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  for(i =0;i<n;i++)
    prob[i]=1.0/(double)n;

  etree->nmodels=nmodels;
  sumalpha=0.0;
  for(b=0;b<nmodels;b++){

    for(i =0;i<n;i++)
      prob_copy[i]=prob[i];
    if(sample(n, prob_copy, n, &samples, TRUE,b)!=0){
      fprintf(stderr,"compute_tree_adaboost: sample error\n");
      return 1;
    }

    for(i=0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }
    
    if(compute_tree(&(etree->tree[b]),n,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_adaboost: compute_tree error\n");
      return 1;
    }
    free_ivector(samples);

    eps=0.0;
    for(i=0;i<n;i++){
      pred[i]=predict_tree(&(etree->tree[b]),x[i],&margin);
      if(pred[i] < -1 ){
	fprintf(stderr,"compute_tree_adaboost: predict_tree error\n");
	return 1;
      }
      if(pred[i]==0 || pred[i] != y[i])
	eps += prob[i];
      free_dvector(margin);
    }
    
    if(eps > 0.0 && eps < 0.5){
      etree->weights[b]=0.5 *log((1.0-eps)/eps);
      sumalpha+=etree->weights[b];
    }else{
      etree->nmodels=b;
      break;
    }
      
    sumprob=0.0;
    for(i=0;i<n;i++){
      prob[i]=prob[i]*exp(-etree->weights[b]*y[i]*pred[i]);
      sumprob+=prob[i];
    }

    if(sumprob <=0.0){
      fprintf(stderr,"compute_tree_adaboost: sumprob = 0\n");
      return 1;
    }
    for(i=0;i<n;i++)
      prob[i] /= sumprob;
    
  }
  
  if(etree->nmodels<=0){
    fprintf(stderr,"compute_tree_adaboost: no models produced\n");
    return 1;
  }

  if(sumalpha <=0){
      fprintf(stderr,"compute_tree_adaboost: sumalpha = 0\n");
      return 1;
  }
  for(b=0;b<etree->nmodels;b++)
    etree->weights[b] /= sumalpha;
  
  free(trx);
  free_ivector(try);
  free_ivector(pred);
  free_dvector(prob);
  free_dvector(prob_copy);
  return 0;

}



static void split_node(Node *node,Node *nodeL,Node *nodeR,int classes[],
		       int nclasses)
{
  int **indx;
  double *tmpvar;
  int i,j,k;
  int **npL , **npR;
  double **prL , **prR;
  int totL,totR;
  double a,b;
  double *decrease_in_inpurity;
  double max_decrease=0;
  int splitvar;
  int splitvalue;
  int morenumerous;

  nodeL->priors=dvector(nclasses);
  nodeR->priors=dvector(nclasses);
  nodeL->npoints_for_class=ivector(nclasses);
  nodeR->npoints_for_class=ivector(nclasses);
  indx=imatrix(node->nvar,node->npoints);
  tmpvar=dvector(node->npoints);
  decrease_in_inpurity=dvector(node->npoints-1);
  npL=imatrix(node->npoints,nclasses);
  npR=imatrix(node->npoints,nclasses);
  prL=dmatrix(node->npoints,nclasses);
  prR=dmatrix(node->npoints,nclasses);

  splitvar=0;
  splitvalue=0;
  max_decrease=0;

  for(i=0;i<node->nvar;i++){
    for(j=0;j<node->npoints;j++)
      tmpvar[j]=node->data[j][i];
    
    for(j=0;j<node->npoints;j++)
      indx[i][j]=j;
    dsort(tmpvar,indx[i],node->npoints,SORT_ASCENDING);

    for(k=0;k<nclasses;k++)
      if(node->classes[indx[i][0]]==classes[k]){
	npL[0][k] = 1;
	npR[0][k] = node->npoints_for_class[k]-npL[0][k];
      } else{
	npL[0][k] = 0;
	npR[0][k] = node->npoints_for_class[k];
      }
    
    for(j=1;j<node->npoints-1;j++)
      for(k=0;k<nclasses;k++)
	if(node->classes[indx[i][j]]==classes[k]){
	  npL[j][k] = npL[j-1][k] +1;
	  npR[j][k] = node->npoints_for_class[k] - npL[j][k];
	}
	else {
	  npL[j][k] = npL[j-1][k];
	  npR[j][k] = node->npoints_for_class[k] - npL[j][k];
	}


    for(j=0;j<node->npoints-1;j++){
      if(node->data[indx[i][j]][i] != node->data[indx[i][j+1]][i]){
	totL = totR = 0;
	
	for(k=0;k<nclasses;k++)
	  totL += npL[j][k];
	for(k=0;k<nclasses;k++)
	  prL[j][k] =  (double) npL[j][k] / (double) totL;
	
	for(k=0;k<nclasses;k++)
	  totR += npR[j][k];
	for(k=0;k<nclasses;k++)
	  prR[j][k] =  (double) npR[j][k] /(double)  totR;
	
	a = (double) totL / (double) node->npoints;
	b = (double) totR / (double) node->npoints ;
	
	decrease_in_inpurity[j] = gini_index(node->priors,nclasses) - 
	  a * gini_index(prL[j],nclasses) - b * gini_index(prR[j],nclasses);
      }
    }

    for(j=0;j<node->npoints-1;j++)
      if(decrease_in_inpurity[j] > max_decrease){
	max_decrease = decrease_in_inpurity[j];
	
	splitvar=i;
	splitvalue=j;

	for(k=0;k<nclasses;k++){
	  nodeL->priors[k]=prL[splitvalue][k];
	  nodeR->priors[k]=prR[splitvalue][k];
	  nodeL->npoints_for_class[k]=npL[splitvalue][k];
	  nodeR->npoints_for_class[k]=npR[splitvalue][k];
	}
      }
  }
  
  
  node->var=splitvar;
  node->value=(node->data[indx[splitvar][splitvalue]][node->var]+      
	       node->data[indx[splitvar][splitvalue+1]][node->var])/2.;

  nodeL->nvar=node->nvar;
  nodeL->nclasses=node->nclasses;
  nodeL->npoints=splitvalue+1;

  nodeL->terminal=TRUE;
  if(gini_index(nodeL->priors,nclasses) >0)
    nodeL->terminal=FALSE;

  nodeL->data=(double **) calloc(nodeL->npoints,sizeof(double *));
  nodeL->classes=ivector(nodeL->npoints);

  for(i=0;i<nodeL->npoints;i++){
    nodeL->data[i] = node->data[indx[splitvar][i]];
    nodeL->classes[i] = node->classes[indx[splitvar][i]];
  }
  
  
  morenumerous=0;
  for(k=0;k<nclasses;k++)
    if(nodeL->npoints_for_class[k] > morenumerous){
      morenumerous = nodeL->npoints_for_class[k];
      nodeL->node_class=classes[k];
    }
  


  nodeR->nvar=node->nvar;
  nodeR->nclasses=node->nclasses;
  nodeR->npoints=node->npoints-nodeL->npoints;

  nodeR->terminal=TRUE;
  if(gini_index(nodeR->priors,nclasses) >0)
    nodeR->terminal=FALSE;

  nodeR->data=(double **) calloc(nodeR->npoints,sizeof(double *));
  nodeR->classes=ivector(nodeR->npoints);

  for(i=0;i<nodeR->npoints;i++){
    nodeR->data[i] = node->data[indx[splitvar][nodeL->npoints+i]];
    nodeR->classes[i] = node->classes[indx[splitvar][nodeL->npoints+i]];
  }
  
  morenumerous=0;
  for(k=0;k<nclasses;k++)
    if(nodeR->npoints_for_class[k] > morenumerous){
      morenumerous = nodeR->npoints_for_class[k];
      nodeR->node_class=classes[k];
    }

  free_imatrix(indx,  node->nvar,node->npoints);
  free_imatrix(npL, node->npoints,nclasses);
  free_imatrix(npR, node->npoints,nclasses);
  free_dmatrix(prL, node->npoints,nclasses);
  free_dmatrix(prR, node->npoints,nclasses);
  free_dvector(tmpvar);
  free_dvector(decrease_in_inpurity);

}
Exemple #18
0
/*
 * Create dentry/inode for this file and add it to the dircache.
 */
int
smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
	       struct smb_cache_control *ctrl, struct qstr *qname,
	       struct smb_fattr *entry)
{
	struct dentry *newdent, *dentry = filp->f_path.dentry;
	struct inode *newino, *inode = dentry->d_inode;
	struct smb_cache_control ctl = *ctrl;
	int valid = 0;
	int hashed = 0;
	ino_t ino = 0;

	qname->hash = full_name_hash(qname->name, qname->len);

	if (dentry->d_op && dentry->d_op->d_hash)
		if (dentry->d_op->d_hash(dentry, qname) != 0)
			goto end_advance;

	newdent = d_lookup(dentry, qname);

	if (!newdent) {
		newdent = d_alloc(dentry, qname);
		if (!newdent)
			goto end_advance;
	} else {
		hashed = 1;
		memcpy((char *) newdent->d_name.name, qname->name,
		       newdent->d_name.len);
	}

	if (!newdent->d_inode) {
		smb_renew_times(newdent);
		entry->f_ino = iunique(inode->i_sb, 2);
		newino = smb_iget(inode->i_sb, entry);
		if (newino) {
			smb_new_dentry(newdent);
			d_instantiate(newdent, newino);
			if (!hashed)
				d_rehash(newdent);
		}
	} else
		smb_set_inode_attr(newdent->d_inode, entry);

        if (newdent->d_inode) {
		ino = newdent->d_inode->i_ino;
		newdent->d_fsdata = (void *) ctl.fpos;
		smb_new_dentry(newdent);
	}

	if (ctl.idx >= SMB_DIRCACHE_SIZE) {
		if (ctl.page) {
			kunmap(ctl.page);
			SetPageUptodate(ctl.page);
			unlock_page(ctl.page);
			page_cache_release(ctl.page);
		}
		ctl.cache = NULL;
		ctl.idx  -= SMB_DIRCACHE_SIZE;
		ctl.ofs  += 1;
		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
		if (ctl.page)
			ctl.cache = kmap(ctl.page);
	}
	if (ctl.cache) {
		ctl.cache->dentry[ctl.idx] = newdent;
		valid = 1;
	}
	dput(newdent);

end_advance:
	if (!valid)
		ctl.valid = 0;
	if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
		if (!ino)
			ino = find_inode_number(dentry, qname);
		if (!ino)
			ino = iunique(inode->i_sb, 2);
		ctl.filled = filldir(dirent, qname->name, qname->len,
				     filp->f_pos, ino, DT_UNKNOWN);
		if (!ctl.filled)
			filp->f_pos += 1;
	}
	ctl.fpos += 1;
	ctl.idx  += 1;
	*ctrl = ctl;
	return (ctl.valid || !ctl.filled);
}
Exemple #19
0
static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
			void *direntry, char *scratch_buf, unsigned int max_len)
{
	int rc = 0;
	struct qstr qstring;
	struct cifsFileInfo *pCifsF;
	u64    inum;
	ino_t  ino;
	struct super_block *sb;
	struct cifs_sb_info *cifs_sb;
	struct dentry *tmp_dentry;
	struct cifs_fattr fattr;

	/* get filename and len into qstring */
	/* get dentry */
	/* decide whether to create and populate ionde */
	if ((direntry == NULL) || (file == NULL))
		return -EINVAL;

	pCifsF = file->private_data;

	if ((scratch_buf == NULL) || (pfindEntry == NULL) || (pCifsF == NULL))
		return -ENOENT;

	rc = cifs_entry_is_dot(pfindEntry, pCifsF);
	/* skip . and .. since we added them first */
	if (rc != 0)
		return 0;

	sb = file->f_path.dentry->d_sb;
	cifs_sb = CIFS_SB(sb);

	qstring.name = scratch_buf;
	rc = cifs_get_name_from_search_buf(&qstring, pfindEntry,
			pCifsF->srch_inf.info_level,
			pCifsF->srch_inf.unicode, cifs_sb,
			max_len, &inum /* returned */);

	if (rc)
		return rc;

	if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
		cifs_unix_basic_to_fattr(&fattr,
				 &((FILE_UNIX_INFO *) pfindEntry)->basic,
				 cifs_sb);
	else if (pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
		cifs_std_info_to_fattr(&fattr, (FIND_FILE_STANDARD_INFO *)
					pfindEntry, cifs_sb);
	else
		cifs_dir_info_to_fattr(&fattr, (FILE_DIRECTORY_INFO *)
					pfindEntry, cifs_sb);

	if (inum && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
		fattr.cf_uniqueid = inum;
	} else {
		fattr.cf_uniqueid = iunique(sb, ROOT_I);
		cifs_autodisable_serverino(cifs_sb);
	}

	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
	tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);

	rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
		     ino, fattr.cf_dtype);

	/*
	 * we can not return filldir errors to the caller since they are
	 * "normal" when the stat blocksize is too small - we return remapped
	 * error instead
	 *
	 * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above
	 * case already. Why should we be clobbering other errors from it?
	 */
	if (rc) {
		cFYI(1, ("filldir rc = %d", rc));
		rc = -EOVERFLOW;
	}
	dput(tmp_dentry);
	return rc;
}