void UpdateExpSmooth(char *state, double ts, double value) { double pred; struct exp_smooth_state *s = (struct exp_smooth_state *)state; pred = s->last_pred + s->gain * (value - s->last_pred); /* * if there is only one value, last pred is the first valeu in the * series */ if(F_COUNT(s->series) <= 1) { s->last_pred = F_VAL(s->series,F_FIRST(s->series)); } else { s->last_pred = pred; } return; }
/* * This is VOP_CLOSE(). Called when a file pointer is being cleaned * up--guaranteed only once! */ int vnode_fop_release( INODE_T *ino_p, FILE_T *file_p ) { int status = 0; VNODE_T *vp; MOFFSET_T off = 0; CALL_DATA_T cd; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) || defined(SLES10SP2) mdki_vop_close_ctx_t ctx; #endif ASSERT_KERNEL_UNLOCKED(); ASSERT(MDKI_INOISOURS(ino_p)); if (!MDKI_INOISMVFS(ino_p)) { MDKI_TRACE(TRACE_CLOSE, "shadow no-op fp=%p ip=%p\n", file_p, ino_p); return 0; /* XXX shadow something? */ } mdki_linux_init_call_data(&cd); vp = ITOV(ino_p); MDKI_TRACE(TRACE_CLOSE, "%s: fp=%p vp=%p fcount=%ld pvt=%p rfcount=%ld pid=%ld\n", __func__, file_p, vp, (long)F_COUNT(file_p), REALFILE(file_p), REALFILE(file_p) ? (long)F_COUNT(REALFILE(file_p)) : 0, (long)mdki_curpid()); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) || defined(SLES10SP2) ctx.file_p = file_p; ctx.owner_id = NULL; status = VOP_CLOSE(vp, vnlayer_filep_to_flags(file_p), VNODE_LASTCLOSE_COUNT, off, &cd, (file_ctx *)&ctx); #else status = VOP_CLOSE(vp, vnlayer_filep_to_flags(file_p), VNODE_LASTCLOSE_COUNT, off, &cd, (file_ctx *)file_p); #endif status = mdki_errno_unix_to_linux(status); mdki_linux_destroy_call_data(&cd); return status; }
extern int vnode_fop_flush( FILE_T *fp #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) || defined(SLES10SP2) , fl_owner_t id #endif ) { INODE_T *ip = fp->f_dentry->d_inode; int err; CALL_DATA_T cd; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) || defined(SLES10SP2) mdki_vop_close_ctx_t ctx; #endif ASSERT(MDKI_INOISOURS(ip)); if (!MDKI_INOISMVFS(ip)) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s shouldn't be called? (files swapped at open): fp %p\n", __func__, fp); return 0; /* don't fail the operation, though */ } mdki_linux_init_call_data(&cd); ASSERT(F_COUNT(fp) != VNODE_LASTCLOSE_COUNT); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) || defined(SLES10SP2) ctx.file_p = fp; ctx.owner_id = id; err = VOP_CLOSE(ITOV(ip), vnlayer_filep_to_flags(fp), F_COUNT(fp), (MOFFSET_T) 0, &cd, (file_ctx *)&ctx); #else err = VOP_CLOSE(ITOV(ip), vnlayer_filep_to_flags(fp), F_COUNT(fp), (MOFFSET_T) 0, &cd, (file_ctx *)fp); #endif err = mdki_errno_unix_to_linux(err); mdki_linux_destroy_call_data(&cd); return err; }
int vnode_fop_open( INODE_T *ino_p, FILE_T *file_p ) { int status = 0; VNODE_T *avp; VNODE_T *vp; CALL_DATA_T cd; /* No asserts on BKL; locking protocol is changing */ ASSERT(MDKI_INOISOURS(ino_p)); if (!MDKI_INOISMVFS(ino_p)) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s shouldn't be called on shadow?" " (files swapped at open): vp %p fp %p\n", __func__, ino_p, file_p); return -ENOSYS; } if ((status = generic_file_open(ino_p, file_p))) { return status; } avp = ITOV(ino_p); vp = avp; mdki_linux_init_call_data(&cd); status = VOP_OPEN(&vp, vnlayer_filep_to_flags(file_p), &cd, (file_ctx *)file_p); status = mdki_errno_unix_to_linux(status); mdki_linux_destroy_call_data(&cd); MDKI_TRACE(TRACE_OPEN, "%s opened vp=%p fp=%p pvt=%p pcnt=%ld\n", __func__, vp, file_p, REALFILE(file_p), REALFILE(file_p) ? (long)F_COUNT(REALFILE(file_p)) : 0); if (avp != vp) { printk("switcheroo on open? %p became %p\n", avp, vp); /* XXX */ BUG(); } return status; }
void UpdateMedian(char *state, double ts, double value) { struct median_state *s = (struct median_state *)state; int curr_size; curr_size = F_COUNT(s->series); if(curr_size > s->M_size) { s->M_count = curr_size = s->M_size; } else { s->M_count = curr_size; } /* * update the sorted list */ /* * increment the artificial time stamp */ s->artificial_time = s->artificial_time + 1; /* * use artificial time stamp instead of real one to * keep things in terms of entries instead of seconds */ MSort(s->M_array,s->M_ts,value,s->artificial_time,curr_size); return; }
void UpdateAdMedian(char *state, double ts, double value) { struct ad_median_state *s = (struct ad_median_state *)state; int curr_size; int win; double less_val; double eq_val; double more_val; double less_err; double eq_err; double more_err; int lo_offset; int hi_offset; curr_size = F_COUNT(s->series); /* * M_size is the current window size, and M_count is the * current amount of data in the median buffer */ if(curr_size > s->max) { s->M_count = curr_size = s->max; } else { s->M_count = curr_size; } /* * update the sorted list */ /* * increment the artificial time stamp */ s->artificial_time = s->artificial_time + 1; /* * use artificial time stamp instead of real one to * keep things in terms of entries instead of seconds */ MSort(s->M_array,s->M_ts,value,s->artificial_time,curr_size); /* * calculate the window based on how much data there is */ if(curr_size > s->win) { win = s->win; } else { win = curr_size; } /* * find the median using the current * window size */ eq_val = FindMedian(s->M_array, s->M_ts, s->M_count, s->artificial_time, win); /* * we want to wait until there is enough data before we start * to adapt. We don't start to adjust s->win until there is * enough data to get out to the max window size */ if(curr_size < s->max) { return; } if((win - s->offset) < 0) { lo_offset = win - 1; } else { lo_offset = s->offset; } if((win + s->offset) > s->M_count) { hi_offset = s->M_count - win - 1; } else { hi_offset = s->offset; } /* * find the median for a smaller window -- offset * controls how much smaller or bigger the window should be * that we consider */ less_val = FindMedian(s->M_array, s->M_ts, s->M_count, s->artificial_time, lo_offset); /* * find the median for a bigger window -- offset * controls how much smaller or bigger the window should be * that we consider */ more_val = FindMedian(s->M_array, s->M_ts, s->M_count, s->artificial_time, hi_offset); /* * now, calculate the errors */ less_err = (value - less_val) * (value - less_val); more_err = (value - more_val) * (value - more_val); eq_err = (value - eq_val) * (value - eq_val); /* * adapt the window according to the direction giving us the * smallest error */ if(less_err < eq_err) { if(less_err < more_err) { win = win - 1; } else if(more_err < eq_err) { win = win + 1; } } else if(more_err < eq_err) { if(more_err < less_err) { win = win + 1; } else if(less_err < eq_err) { win = win - 1; } } s->win = win; return; }
extern int vnode_shadow_fop_open( INODE_T *inode, FILE_T *file ) { int err = 0; INODE_T *real_inode; DENT_T *rdentry = NULL; DENT_T *oldent; struct file_operations *oldfops; struct vfsmount *oldmnt, *newmnt; VNODE_T *cvp; oldmnt = file->f_vfsmnt; oldent = file->f_dentry; ASSERT(D_COUNT(oldent)); /* The Linux kernel has stopped ignoring the O_DIRECT flag. * The problem is that they wait until after they call the fop open * function to check the inode to see if it will support direct I/O. * But they get the inode pointer before they call us and check the * inode after we return so they never check the actual inode we open * but only the shadow one. Their error handling never comes back to * us and they release their old pointers and not our new ones. The * only choice we have is to not allow O_DIRECT on shadow files. */ if (file->f_flags & O_DIRECT) { err = -EINVAL; goto out_nolock; } /* Get the real dentry */ rdentry = REALDENTRY_LOCKED(oldent, &cvp); if (rdentry == NULL) { err = -ENOENT; goto out_nolock; } VNODE_DGET(rdentry); /* protect rdentry->d_inode */ if (rdentry->d_inode == NULL) { /* delete race */ err = -ENOENT; goto out; } newmnt = MDKI_MNTGET(REALVFSMNT(oldent)); if (newmnt == NULL) { err = -EOPNOTSUPP; /* XXX */ goto out; } /* Check that we can write to this file. Clean up the count on the * shadow inode. */ if (file->f_mode & FMODE_WRITE) { err = get_write_access(rdentry->d_inode); if (err) { MDKI_MNTPUT(newmnt); goto out; } } real_inode = rdentry->d_inode; /* * Swap the file structure contents to point at the underlying object. */ /* In Linux 2.6 they added the mapping stuff to the file so we have to set ** that up here, too. */ file->f_mapping = real_inode->i_mapping; VNLAYER_RA_STATE_INIT(&(file->f_ra), file->f_mapping); file->f_dentry = VNODE_DGET(rdentry); oldfops = file->f_op; file->f_vfsmnt = newmnt; file->f_op = fops_get(real_inode->i_fop); if (real_inode->i_fop && !file->f_op) /* If we failed to get the reference to a non-NULL op, bail out */ err = -EIO; /* XXX? better error code */ if (!err) { /* Move the file to the file list for the real superblock * and remove it from the shadow list */ /* It would be better to use file_move() but it's not exported */ file_list_lock(); list_del(&file->f_list); list_add(&file->f_list, &real_inode->i_sb->s_files); file_list_unlock(); if (file->f_op && file->f_op->open) { err = (*file->f_op->open)(real_inode, file); if (err) { /* restore our file to the list on our super block */ file_list_lock(); list_del(&file->f_list); list_add(&file->f_list, &oldent->d_inode->i_sb->s_files); file_list_unlock(); } } } if (err) { /* MUST put back old dentry/fops to get accounting right in upper * layer. */ put_write_access(rdentry->d_inode); if (file->f_dentry) VNODE_DPUT(file->f_dentry); if (file->f_op) fops_put(file->f_op); MDKI_MNTPUT(file->f_vfsmnt); file->f_vfsmnt = oldmnt; file->f_dentry = oldent; file->f_op = oldfops; } else { put_write_access(oldent->d_inode); VNODE_DPUT(oldent); /* Drop reference now that we've dropped our use of the file ops */ fops_put(oldfops); MDKI_MNTPUT(oldmnt); } out: VNODE_DPUT(rdentry); REALDENTRY_UNLOCK(oldent, cvp); out_nolock: MDKI_TRACE(TRACE_OPEN, "%s: opened vp=%p fp=%p rdent=%p rdcnt=%d fcnt=%d" ", err %d\n", __func__, inode, file, rdentry, rdentry ? D_COUNT(rdentry) : 0, F_COUNT(file), -err); return(err); }