Esempio n. 1
0
void
ptyfs_itimes(struct ptyfsnode *ptyfs, const struct timespec *acc,
    const struct timespec *mod, const struct timespec *cre)
{
	struct timespec now;
 
	KASSERT(ptyfs->ptyfs_flag & (PTYFS_ACCESS|PTYFS_CHANGE|PTYFS_MODIFY));

	getnanotime(&now);
	if (ptyfs->ptyfs_flag & PTYFS_ACCESS) {
		if (acc == NULL)
			acc = &now;
		ptyfs->ptyfs_atime = *acc;
	}
	if (ptyfs->ptyfs_flag & PTYFS_MODIFY) {
		if (mod == NULL)
			mod = &now;
		ptyfs->ptyfs_mtime = *mod;
	}
	if (ptyfs->ptyfs_flag & PTYFS_CHANGE) {
		if (cre == NULL)
			cre = &now;
		ptyfs->ptyfs_ctime = *cre;
	}
	ptyfs->ptyfs_flag &= ~(PTYFS_ACCESS|PTYFS_CHANGE|PTYFS_MODIFY);
}
Esempio n. 2
0
int
ptyfs_read(void *v)
{
	struct vop_read_args /* {
		struct vnode *a_vp;
		struct uio *a_uio;
		int  a_ioflag;
		kauth_cred_t a_cred;
	} */ *ap = v;
	struct timespec ts;
	struct vnode *vp = ap->a_vp;
	struct ptyfsnode *ptyfs = VTOPTYFS(vp);
	int error;

	if (vp->v_type == VDIR)
		return EISDIR;

	ptyfs->ptyfs_status |= PTYFS_ACCESS;
	/* hardclock() resolution is good enough for ptyfs */
	getnanotime(&ts);
	(void)ptyfs_update(vp, &ts, &ts, 0);

	switch (ptyfs->ptyfs_type) {
	case PTYFSpts:
	case PTYFSptc:
		VOP_UNLOCK(vp);
		error = cdev_read(vp->v_rdev, ap->a_uio, ap->a_ioflag);
		vn_lock(vp, LK_RETRY|LK_EXCLUSIVE);
		return error;
	default:
		return EOPNOTSUPP;
	}
}
Esempio n. 3
0
/*
 * initialize and allocate VM and memory for pipe
 */
int
pipe_create(struct pipe *cpipe)
{
	int error;

	/* so pipe_free_kmem() doesn't follow junk pointer */
	cpipe->pipe_buffer.buffer = NULL;
	/*
	 * protect so pipeclose() doesn't follow a junk pointer
	 * if pipespace() fails.
	 */
	bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel);
	cpipe->pipe_state = 0;
	cpipe->pipe_peer = NULL;
	cpipe->pipe_busy = 0;

	error = pipespace(cpipe, PIPE_SIZE);
	if (error != 0)
		return (error);

	getnanotime(&cpipe->pipe_ctime);
	cpipe->pipe_atime = cpipe->pipe_ctime;
	cpipe->pipe_mtime = cpipe->pipe_ctime;
	cpipe->pipe_pgid = NO_PID;

	return (0);
}
Esempio n. 4
0
int
deupdat(struct denode *dep, int waitfor)
{
	struct buf *bp;
	struct direntry *dirp;
	int error;
	struct timespec ts;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY)
		return (0);
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (dep->de_Attributes & ATTR_DIRECTORY)
		return (0);
	if (dep->de_refcnt <= 0)
		return (0);
	error = readde(dep, &bp, &dirp);
	if (error)
		return (error);
	DE_EXTERNALIZE(dirp, dep);
	if (waitfor)
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Esempio n. 5
0
int
ptyfs_write(void *v)
{
	struct vop_write_args /* {
		struct vnode *a_vp;
		struct uio *a_uio;
		int  a_ioflag;
		kauth_cred_t a_cred;
	} */ *ap = v;
	struct timespec ts;
	struct vnode *vp = ap->a_vp;
	struct ptyfsnode *ptyfs = VTOPTYFS(vp);
	int error;

	ptyfs->ptyfs_flag |= PTYFS_MODIFY;
	getnanotime(&ts);
	(void)ptyfs_update(vp, &ts, &ts, 0);

	switch (ptyfs->ptyfs_type) {
	case PTYFSpts:
	case PTYFSptc:
		VOP_UNLOCK(vp, 0);
		error = cdev_write(vp->v_rdev, ap->a_uio, ap->a_ioflag);
		vn_lock(vp, LK_RETRY|LK_EXCLUSIVE);
		return error;
	default:
		return EOPNOTSUPP;
	}
}
Esempio n. 6
0
static __inline void
acpi_cmbat_info_updated(struct timespec *lastupdated)
{

	if (lastupdated != NULL) {
		getnanotime(lastupdated);
	}
}
Esempio n. 7
0
uint64_t
dtrace_gethrestime(void)
{
	struct      timespec curtime;

	getnanotime(&curtime);

	return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
}
Esempio n. 8
0
static void
acpi_cmbat_info_updated(struct timespec *lastupdated)
{

    ACPI_SERIAL_ASSERT(cmbat);

    if (lastupdated != NULL)
	getnanotime(lastupdated);
}
Esempio n. 9
0
static void wt_status_collect_untracked(struct wt_status *s)
{
	int i;
	struct dir_struct dir;
	uint64_t t_begin = getnanotime();

	if (!s->show_untracked_files)
		return;

	memset(&dir, 0, sizeof(dir));
	if (s->show_untracked_files != SHOW_ALL_UNTRACKED_FILES)
		dir.flags |=
			DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
	if (s->show_ignored_files)
		dir.flags |= DIR_SHOW_IGNORED_TOO;
	else
		dir.untracked = the_index.untracked;
	setup_standard_excludes(&dir);

	fill_directory(&dir, &s->pathspec);

	for (i = 0; i < dir.nr; i++) {
		struct dir_entry *ent = dir.entries[i];
		if (cache_name_is_other(ent->name, ent->len) &&
		    dir_path_match(ent, &s->pathspec, 0, NULL))
			string_list_insert(&s->untracked, ent->name);
		free(ent);
	}

	for (i = 0; i < dir.ignored_nr; i++) {
		struct dir_entry *ent = dir.ignored[i];
		if (cache_name_is_other(ent->name, ent->len) &&
		    dir_path_match(ent, &s->pathspec, 0, NULL))
			string_list_insert(&s->ignored, ent->name);
		free(ent);
	}

	free(dir.entries);
	free(dir.ignored);
	clear_directory(&dir);

	if (advice_status_u_option)
		s->untracked_in_ms = (getnanotime() - t_begin) / 1000000;
}
Esempio n. 10
0
int
lwp_park(struct timespec *ts, const void *hint)
{
	struct timespec tsx;
	sleepq_t *sq;
	kmutex_t *mp;
	wchan_t wchan;
	int timo, error;
	lwp_t *l;

	/* Fix up the given timeout value. */
	if (ts != NULL) {
		getnanotime(&tsx);
		timespecsub(ts, &tsx, &tsx);
		if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0))
			return ETIMEDOUT;
		if ((error = itimespecfix(&tsx)) != 0)
			return error;
		timo = tstohz(&tsx);
		KASSERT(timo != 0);
	} else
		timo = 0;

	/* Find and lock the sleep queue. */
	l = curlwp;
	wchan = lwp_park_wchan(l->l_proc, hint);
	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);

	/*
	 * Before going the full route and blocking, check to see if an
	 * unpark op is pending.
	 */
	lwp_lock(l);
	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
		lwp_unlock(l);
		mutex_spin_exit(mp);
		return EALREADY;
	}
	lwp_unlock_to(l, mp);
	l->l_biglocks = 0;
	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
	error = sleepq_block(timo, true);
	switch (error) {
	case EWOULDBLOCK:
		error = ETIMEDOUT;
		break;
	case ERESTART:
		error = EINTR;
		break;
	default:
		/* nothing */
		break;
	}
	return error;
}
Esempio n. 11
0
static int
tap_dev_write(int unit, struct uio *uio, int flags)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);
	struct ifnet *ifp;
	struct mbuf *m, **mp;
	int error = 0;
	int s;

	if (sc == NULL)
		return (ENXIO);

	getnanotime(&sc->sc_mtime);
	ifp = &sc->sc_ec.ec_if;

	/* One write, one packet, that's the rule */
	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		ifp->if_ierrors++;
		return (ENOBUFS);
	}
	m->m_pkthdr.len = uio->uio_resid;

	mp = &m;
	while (error == 0 && uio->uio_resid > 0) {
		if (*mp != m) {
			MGET(*mp, M_DONTWAIT, MT_DATA);
			if (*mp == NULL) {
				error = ENOBUFS;
				break;
			}
		}
		(*mp)->m_len = min(MHLEN, uio->uio_resid);
		error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
		mp = &(*mp)->m_next;
	}
	if (error) {
		ifp->if_ierrors++;
		m_freem(m);
		return (error);
	}

	ifp->if_ipackets++;
	m_set_rcvif(m, ifp);

	bpf_mtap(ifp, m);
	s = splnet();
	if_input(ifp, m);
	splx(s);

	return (0);
}
Esempio n. 12
0
int
msdosfs_close(void *v)
{
	struct vop_close_args *ap = v;
	struct vnode *vp = ap->a_vp;
	struct denode *dep = VTODE(vp);
	struct timespec ts;

	if (vp->v_usecount > 1 && !VOP_ISLOCKED(vp)) {
		getnanotime(&ts);
		DETIMES(dep, &ts, &ts, &ts);
	}
	return (0);
}
Esempio n. 13
0
int
smbfs_smb_create(struct smbnode *dnp, const char *name, int nmlen,
	struct smb_cred *scred)
{
	struct smb_rq *rqp;
	struct smb_share *ssp = dnp->n_mount->sm_share;
	struct mbchain *mbp;
	struct mdchain *mdp;
	struct timespec ctime;
	u_int8_t wc;
	u_int16_t fid;
	u_long tm;
	int error;

	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_CREATE_NEW, scred, &rqp);
	if (error)
		return error;
	smb_rq_getrequest(rqp, &mbp);

	/* get current time */
	getnanotime(&ctime);
	smb_time_local2server(&ctime, SSTOVC(ssp)->vc_sopt.sv_tz, &tm);

	smb_rq_wstart(rqp);
	mb_put_uint16le(mbp, SMB_FA_ARCHIVE);	/* attributes  */
	mb_put_uint32le(mbp, tm);
	smb_rq_wend(rqp);

	smb_rq_bstart(rqp);
	mb_put_uint8(mbp, SMB_DT_ASCII);
	error = smbfs_fullpath(mbp, SSTOVC(ssp), dnp, name, nmlen);
	if (!error) {
		smb_rq_bend(rqp);
		error = smb_rq_simple(rqp);
		if (!error) {
			smb_rq_getreply(rqp, &mdp);
			md_get_uint8(mdp, &wc);
			if (wc == 1)
				md_get_uint16(mdp, &fid);
			else
				error = EBADRPC;
		}
	}

	smb_rq_done(rqp);
	if (!error)
		smbfs_smb_close(ssp, fid, &ctime, scred);

	return (error);
}
Esempio n. 14
0
static __inline int
acpi_cmbat_info_expired(struct timespec *lastupdated)
{
	struct timespec	curtime;

	if (lastupdated == NULL) {
		return (1);
	}

	if (!timespecisset(lastupdated)) {
		return (1);
	}

	getnanotime(&curtime);
	timespecsub(&curtime, lastupdated);
	return ((curtime.tv_sec < 0 || curtime.tv_sec > acpi_battery_get_info_expire()));
}
Esempio n. 15
0
N8_Status_t
n8_gettime( n8_timeval_t *n8_timeResults_p )
{

   struct timespec  ts;
   N8_Status_t returnResults = N8_STATUS_OK;

   getnanotime(&ts);

   /* Timespec has a seconds portion and a nano seconds portion.        */
   /* Thus we need to divide to convert nanoseconds to microseconds.    */
   n8_timeResults_p->tv_sec = ts.tv_sec;
   n8_timeResults_p->tv_usec = ts.tv_nsec / 1000;

   return returnResults;

} /* n8_gettime */
Esempio n. 16
0
static int
acpi_cmbat_info_expired(struct timespec *lastupdated)
{
    struct timespec	curtime;

    ACPI_SERIAL_ASSERT(cmbat);

    if (lastupdated == NULL)
	return (TRUE);
    if (!timespecisset(lastupdated))
	return (TRUE);

    getnanotime(&curtime);
    timespecsub(&curtime, lastupdated);
    return (curtime.tv_sec < 0 ||
	    curtime.tv_sec > acpi_battery_get_info_expire());
}
Esempio n. 17
0
/*
 * Write system time back to RTC
 */
void
resettodr(void)
{
	struct timespec ts;
	int error;

	if (disable_rtc_set || clock_dev == NULL)
		return;

	getnanotime(&ts);
	timespecadd(&ts, &clock_adj);
	ts.tv_sec -= utc_offset();
	/* XXX: We should really set all registered RTCs */
	if ((error = CLOCK_SETTIME(clock_dev, &ts)) != 0)
		printf("warning: clock_settime failed (%d), time-of-day clock "
		    "not adjusted to system time\n", error);
}
Esempio n. 18
0
int
deupdat(struct denode *dep, int waitfor)
{
	struct direntry dir;
	struct timespec ts;
	struct buf *bp;
	struct direntry *dirp;
	int error;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY) {
		dep->de_flag &= ~(DE_UPDATE | DE_CREATE | DE_ACCESS |
		    DE_MODIFIED);
		return (0);
	}
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0 && waitfor == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (DETOV(dep)->v_vflag & VV_ROOT)
		return (EINVAL);
	if (dep->de_refcnt <= 0)
		return (0);
	error = readde(dep, &bp, &dirp);
	if (error)
		return (error);
	DE_EXTERNALIZE(&dir, dep);
	if (bcmp(dirp, &dir, sizeof(dir)) == 0) {
		if (waitfor == 0 || (bp->b_flags & B_DELWRI) == 0) {
			brelse(bp);
			return (0);
		}
	} else
		*dirp = dir;
	if ((DETOV(dep)->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0)
		bp->b_flags |= B_CLUSTEROK;
	if (waitfor)
		error = bwrite(bp);
	else if (vm_page_count_severe() || buf_dirty_count_severe())
		bawrite(bp);
	else
		bdwrite(bp);
	return (error);
}
Esempio n. 19
0
/*
 * Last reference to an inode.  If necessary, write or delete it.
 */
int
ext2fs_inactive(void *v)
{
	struct vop_inactive_args *ap = v;
	struct vnode *vp = ap->a_vp;
	struct inode *ip = VTOI(vp);
	struct proc *p = ap->a_p;
	struct timespec ts;
	int error = 0;
#ifdef DIAGNOSTIC
	extern int prtactive;

	if (prtactive && vp->v_usecount != 0)
		vprint("ext2fs_inactive: pushing active", vp);
#endif

	/* Get rid of inodes related to stale file handles. */
	if (ip->i_e2din == NULL || ip->i_e2fs_mode == 0 || ip->i_e2fs_dtime)
		goto out;

	error = 0;
	if (ip->i_e2fs_nlink == 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
		if (ext2fs_size(ip) != 0) {
			error = ext2fs_truncate(ip, (off_t)0, 0, NOCRED);
		}
		getnanotime(&ts);
		ip->i_e2fs_dtime = ts.tv_sec;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		ext2fs_inode_free(ip, ip->i_number, ip->i_e2fs_mode);
	}
	if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) {
		ext2fs_update(ip, 0);
	}
out:
	VOP_UNLOCK(vp, p);
	/*
	 * If we are done with the inode, reclaim it
	 * so that it can be reused immediately.
	 */
	if (ip->i_e2din == NULL || ip->i_e2fs_dtime != 0)
		vrecycle(vp, p);
	return (error);
}
Esempio n. 20
0
/*
 * Hacked up version of vn_open. We _only_ handle ptys and only open
 * them with FREAD|FWRITE and never deal with creat or stuff like that.
 *
 * We need it because we have to fake up root credentials to open the pty.
 */
static int
ptm_vn_open(struct nameidata *ndp)
{
	struct proc *p = ndp->ni_cnd.cn_proc;
	struct ucred *cred;
	struct vattr vattr;
	struct vnode *vp;
	int error;

	if ((error = namei(ndp)) != 0)
		return (error);
	vp = ndp->ni_vp;
	if (vp->v_type != VCHR) {
		error = EINVAL;
		goto bad;
	}

	/*
	 * Get us a fresh cred with root privileges.
	 */
	cred = crget();
	error = VOP_OPEN(vp, FREAD|FWRITE, cred, p);
	if (!error) {
		/* update atime/mtime */
		VATTR_NULL(&vattr);
		getnanotime(&vattr.va_atime);
		vattr.va_mtime = vattr.va_atime;
		vattr.va_vaflags |= VA_UTIMES_NULL;
		(void)VOP_SETATTR(vp, &vattr, p->p_ucred, p);
	}
	crfree(cred);

	if (error)
		goto bad;

	vp->v_writecount++;

	return (0);
bad:
	vput(vp);
	return (error);
}
Esempio n. 21
0
static struct progress *start_progress_delay(const char *title, unsigned total,
					     unsigned percent_threshold, unsigned delay)
{
	struct progress *progress = malloc(sizeof(*progress));
	if (!progress) {
		/* unlikely, but here's a good fallback */
		fprintf(stderr, "%s...\n", title);
		fflush(stderr);
		return NULL;
	}
	progress->title = title;
	progress->total = total;
	progress->last_value = -1;
	progress->last_percent = -1;
	progress->delayed_percent_threshold = percent_threshold;
	progress->delay = delay;
	progress->throughput = NULL;
	progress->start_ns = getnanotime();
	set_progress_signal();
	return progress;
}
Esempio n. 22
0
static void preload_index(struct index_state *index,
			  const struct pathspec *pathspec)
{
	int threads, i, work, offset;
	struct thread_data data[MAX_PARALLEL];
	uint64_t start = getnanotime();

	if (!core_preload_index)
		return;

	threads = index->cache_nr / THREAD_COST;
	if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST"))
		threads = 2;
	if (threads < 2)
		return;
	if (threads > MAX_PARALLEL)
		threads = MAX_PARALLEL;
	offset = 0;
	work = DIV_ROUND_UP(index->cache_nr, threads);
	memset(&data, 0, sizeof(data));
	for (i = 0; i < threads; i++) {
		struct thread_data *p = data+i;
		p->index = index;
		if (pathspec)
			copy_pathspec(&p->pathspec, pathspec);
		p->offset = offset;
		p->nr = work;
		offset += work;
		if (pthread_create(&p->pthread, NULL, preload_thread, p))
			die("unable to create threaded lstat");
	}
	for (i = 0; i < threads; i++) {
		struct thread_data *p = data+i;
		if (pthread_join(p->pthread, NULL))
			die("unable to join threaded lstat");
	}
	trace_performance_since(start, "preload index");
}
Esempio n. 23
0
void stop_progress_msg(struct progress **p_progress, const char *msg)
{
	struct progress *progress = *p_progress;
	if (!progress)
		return;
	*p_progress = NULL;
	if (progress->last_value != -1) {
		/* Force the last update */
		char *buf;
		struct throughput *tp = progress->throughput;

		if (tp) {
			uint64_t now_ns = getnanotime();
			unsigned int misecs, rate;
			misecs = ((now_ns - progress->start_ns) * 4398) >> 32;
			rate = tp->curr_total / (misecs ? misecs : 1);
			throughput_string(&tp->display, tp->curr_total, rate);
		}
		progress_update = 1;
		buf = xstrfmt(", %s.\n", msg);
		display(progress, progress->last_value, buf);
		free(buf);
	}
Esempio n. 24
0
void add_fsmonitor(struct index_state *istate)
{
	int i;

	if (!istate->fsmonitor_last_update) {
		trace_printf_key(&trace_fsmonitor, "add fsmonitor");
		istate->cache_changed |= FSMONITOR_CHANGED;
		istate->fsmonitor_last_update = getnanotime();

		/* reset the fsmonitor state */
		for (i = 0; i < istate->cache_nr; i++)
			istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;

		/* reset the untracked cache */
		if (istate->untracked) {
			add_untracked_cache(istate);
			istate->untracked->use_fsmonitor = 1;
		}

		/* Update the fsmonitor state */
		refresh_fsmonitor(istate);
	}
}
Esempio n. 25
0
int
xtaf_deupdat(struct denode *dep, int waitfor)
{
	int error;
	struct buf *bp;
	struct direntry *ep;
	struct timespec ts;

	if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY)
		return (0);
	getnanotime(&ts);
	DETIMES(dep, &ts, &ts, &ts);
	if ((dep->de_flag & DE_MODIFIED) == 0)
		return (0);
	dep->de_flag &= ~DE_MODIFIED;
	if (dep->de_Attributes & ATTR_DIRECTORY)
		return (0);
	/*
	 * NOTE: The check for de_refcnt > 0 below ensures the denode being
	 * examined does not represent an unlinked but still open file.
	 * These files are not to be accessible even when the directory
	 * entry that represented the file happens to be reused while the
	 * deleted file is still open.
	*/
	if (dep->de_refcnt <= 0)
		return (0);
	error = xtaf_readde(dep, &bp, &ep);
	if (error)
		return (error);
	DE_EXTERNALIZE(ep, dep);
	if (waitfor)
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Esempio n. 26
0
int
pipe_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
{
	int error = 0;
	int orig_resid;

	struct pipe *wpipe, *rpipe;

	rpipe = (struct pipe *) fp->f_data;
	wpipe = rpipe->pipe_peer;

	/*
	 * detect loss of pipe read side, issue SIGPIPE if lost.
	 */
	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
		return (EPIPE);
	}
	++wpipe->pipe_busy;

	/*
	 * If it is advantageous to resize the pipe buffer, do
	 * so.
	 */
	if ((uio->uio_resid > PIPE_SIZE) &&
	    (nbigpipe < LIMITBIGPIPES) &&
	    (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
	    (wpipe->pipe_buffer.cnt == 0)) {

		if ((error = pipelock(wpipe)) == 0) {
			if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
				nbigpipe++;
			pipeunlock(wpipe);
		}
	}

	/*
	 * If an early error occurred unbusy and return, waking up any pending
	 * readers.
	 */
	if (error) {
		--wpipe->pipe_busy;
		if ((wpipe->pipe_busy == 0) &&
		    (wpipe->pipe_state & PIPE_WANT)) {
			wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
			wakeup(wpipe);
		}
		return (error);
	}

	orig_resid = uio->uio_resid;

	while (uio->uio_resid) {
		int space;

retrywrite:
		if (wpipe->pipe_state & PIPE_EOF) {
			error = EPIPE;
			break;
		}

		space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;

		/* Writes of size <= PIPE_BUF must be atomic. */
		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
			space = 0;

		if (space > 0) {
			if ((error = pipelock(wpipe)) == 0) {
				int size;	/* Transfer size */
				int segsize;	/* first segment to transfer */

				/*
				 * If a process blocked in uiomove, our
				 * value for space might be bad.
				 *
				 * XXX will we be ok if the reader has gone
				 * away here?
				 */
				if (space > wpipe->pipe_buffer.size -
				    wpipe->pipe_buffer.cnt) {
					pipeunlock(wpipe);
					goto retrywrite;
				}

				/*
				 * Transfer size is minimum of uio transfer
				 * and free space in pipe buffer.
				 */
				if (space > uio->uio_resid)
					size = uio->uio_resid;
				else
					size = space;
				/*
				 * First segment to transfer is minimum of
				 * transfer size and contiguous space in
				 * pipe buffer.  If first segment to transfer
				 * is less than the transfer size, we've got
				 * a wraparound in the buffer.
				 */
				segsize = wpipe->pipe_buffer.size -
					wpipe->pipe_buffer.in;
				if (segsize > size)
					segsize = size;

				/* Transfer first segment */

				error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], 
						segsize, uio);

				if (error == 0 && segsize < size) {
					/*
					 * Transfer remaining part now, to
					 * support atomic writes.  Wraparound
					 * happened.
					 */
#ifdef DIAGNOSTIC
					if (wpipe->pipe_buffer.in + segsize !=
					    wpipe->pipe_buffer.size)
						panic("Expected pipe buffer wraparound disappeared");
#endif

					error = uiomove(&wpipe->pipe_buffer.buffer[0],
							size - segsize, uio);
				}
				if (error == 0) {
					wpipe->pipe_buffer.in += size;
					if (wpipe->pipe_buffer.in >=
					    wpipe->pipe_buffer.size) {
#ifdef DIAGNOSTIC
						if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
							panic("Expected wraparound bad");
#endif
						wpipe->pipe_buffer.in = size - segsize;
					}

					wpipe->pipe_buffer.cnt += size;
#ifdef DIAGNOSTIC
					if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
						panic("Pipe buffer overflow");
#endif
				}
				pipeunlock(wpipe);
			}
			if (error)
				break;
		} else {
			/*
			 * If the "read-side" has been blocked, wake it up now.
			 */
			if (wpipe->pipe_state & PIPE_WANTR) {
				wpipe->pipe_state &= ~PIPE_WANTR;
				wakeup(wpipe);
			}

			/*
			 * don't block on non-blocking I/O
			 */
			if (fp->f_flag & FNONBLOCK) {
				error = EAGAIN;
				break;
			}

			/*
			 * We have no more space and have something to offer,
			 * wake up select/poll.
			 */
			pipeselwakeup(wpipe);

			wpipe->pipe_state |= PIPE_WANTW;
			error = tsleep(wpipe, (PRIBIO + 1)|PCATCH,
			    "pipewr", 0);
			if (error)
				break;
			/*
			 * If read side wants to go away, we just issue a
			 * signal to ourselves.
			 */
			if (wpipe->pipe_state & PIPE_EOF) {
				error = EPIPE;
				break;
			}	
		}
	}

	--wpipe->pipe_busy;

	if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
		wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
		wakeup(wpipe);
	} else if (wpipe->pipe_buffer.cnt > 0) {
		/*
		 * If we have put any characters in the buffer, we wake up
		 * the reader.
		 */
		if (wpipe->pipe_state & PIPE_WANTR) {
			wpipe->pipe_state &= ~PIPE_WANTR;
			wakeup(wpipe);
		}
	}

	/*
	 * Don't return EPIPE if I/O was successful
	 */
	if ((wpipe->pipe_buffer.cnt == 0) &&
	    (uio->uio_resid == 0) &&
	    (error == EPIPE)) {
		error = 0;
	}

	if (error == 0)
		getnanotime(&wpipe->pipe_mtime);
	/*
	 * We have something to offer, wake up select/poll.
	 */
	if (wpipe->pipe_buffer.cnt)
		pipeselwakeup(wpipe);

	return (error);
}
Esempio n. 27
0
/* ARGSUSED */
int
pipe_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
{
	struct pipe *rpipe = (struct pipe *) fp->f_data;
	int error;
	int nread = 0;
	int size;

	error = pipelock(rpipe);
	if (error)
		return (error);

	++rpipe->pipe_busy;

	while (uio->uio_resid) {
		/*
		 * normal pipe buffer receive
		 */
		if (rpipe->pipe_buffer.cnt > 0) {
			size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
			if (size > rpipe->pipe_buffer.cnt)
				size = rpipe->pipe_buffer.cnt;
			if (size > uio->uio_resid)
				size = uio->uio_resid;
			error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
					size, uio);
			if (error) {
				break;
			}
			rpipe->pipe_buffer.out += size;
			if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
				rpipe->pipe_buffer.out = 0;

			rpipe->pipe_buffer.cnt -= size;
			/*
			 * If there is no more to read in the pipe, reset
			 * its pointers to the beginning.  This improves
			 * cache hit stats.
			 */
			if (rpipe->pipe_buffer.cnt == 0) {
				rpipe->pipe_buffer.in = 0;
				rpipe->pipe_buffer.out = 0;
			}
			nread += size;
		} else {
			/*
			 * detect EOF condition
			 * read returns 0 on EOF, no need to set error
			 */
			if (rpipe->pipe_state & PIPE_EOF)
				break;

			/*
			 * If the "write-side" has been blocked, wake it up now.
			 */
			if (rpipe->pipe_state & PIPE_WANTW) {
				rpipe->pipe_state &= ~PIPE_WANTW;
				wakeup(rpipe);
			}

			/*
			 * Break if some data was read.
			 */
			if (nread > 0)
				break;

			/*
			 * Unlock the pipe buffer for our remaining processing.
			 * We will either break out with an error or we will
			 * sleep and relock to loop.
			 */
			pipeunlock(rpipe);

			/*
			 * Handle non-blocking mode operation or
			 * wait for more data.
			 */
			if (fp->f_flag & FNONBLOCK) {
				error = EAGAIN;
			} else {
				rpipe->pipe_state |= PIPE_WANTR;
				if ((error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0)) == 0)
					error = pipelock(rpipe);
			}
			if (error)
				goto unlocked_error;
		}
	}
	pipeunlock(rpipe);

	if (error == 0)
		getnanotime(&rpipe->pipe_atime);
unlocked_error:
	--rpipe->pipe_busy;

	/*
	 * PIPE_WANT processing only makes sense if pipe_busy is 0.
	 */
	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
		rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
		wakeup(rpipe);
	} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
		/*
		 * Handle write blocking hysteresis.
		 */
		if (rpipe->pipe_state & PIPE_WANTW) {
			rpipe->pipe_state &= ~PIPE_WANTW;
			wakeup(rpipe);
		}
	}

	if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
		pipeselwakeup(rpipe);

	return (error);
}
Esempio n. 28
0
void refresh_fsmonitor(struct index_state *istate)
{
	static int has_run_once = 0;
	struct strbuf query_result = STRBUF_INIT;
	int query_success = 0;
	size_t bol; /* beginning of line */
	uint64_t last_update;
	char *buf;
	int i;

	if (!core_fsmonitor || has_run_once)
		return;
	has_run_once = 1;

	trace_printf_key(&trace_fsmonitor, "refresh fsmonitor");
	/*
	 * This could be racy so save the date/time now and query_fsmonitor
	 * should be inclusive to ensure we don't miss potential changes.
	 */
	last_update = getnanotime();

	/*
	 * If we have a last update time, call query_fsmonitor for the set of
	 * changes since that time, else assume everything is possibly dirty
	 * and check it all.
	 */
	if (istate->fsmonitor_last_update) {
		query_success = !query_fsmonitor(HOOK_INTERFACE_VERSION,
			istate->fsmonitor_last_update, &query_result);
		trace_performance_since(last_update, "fsmonitor process '%s'", core_fsmonitor);
		trace_printf_key(&trace_fsmonitor, "fsmonitor process '%s' returned %s",
			core_fsmonitor, query_success ? "success" : "failure");
	}

	/* a fsmonitor process can return '/' to indicate all entries are invalid */
	if (query_success && query_result.buf[0] != '/') {
		/* Mark all entries returned by the monitor as dirty */
		buf = query_result.buf;
		bol = 0;
		for (i = 0; i < query_result.len; i++) {
			if (buf[i] != '\0')
				continue;
			fsmonitor_refresh_callback(istate, buf + bol);
			bol = i + 1;
		}
		if (bol < query_result.len)
			fsmonitor_refresh_callback(istate, buf + bol);
	} else {
		/* Mark all entries invalid */
		for (i = 0; i < istate->cache_nr; i++)
			istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;

		/* If we're going to check every file, ensure we save the results */
		istate->cache_changed |= FSMONITOR_CHANGED;

		if (istate->untracked)
			istate->untracked->use_fsmonitor = 0;
	}
	strbuf_release(&query_result);

	/* Now that we've updated istate, save the last_update time */
	istate->fsmonitor_last_update = last_update;
}
Esempio n. 29
0
static uint32_t *
pmclog_reserve(struct pmc_owner *po, int length)
{
	uintptr_t newptr, oldptr;
	uint32_t *lh;
	struct timespec ts;

	PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length);

	KASSERT(length % sizeof(uint32_t) == 0,
	    ("[pmclog,%d] length not a multiple of word size", __LINE__));

	mtx_lock_spin(&po->po_mtx);

	/* No more data when shutdown in progress. */
	if (po->po_flags & PMC_PO_SHUTDOWN) {
		mtx_unlock_spin(&po->po_mtx);
		return (NULL);
	}

	if (po->po_curbuf == NULL)
		if (pmclog_get_buffer(po) != 0) {
			mtx_unlock_spin(&po->po_mtx);
			return (NULL);
		}

	KASSERT(po->po_curbuf != NULL,
	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));

	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base &&
	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
		po->po_curbuf->plb_fence));

	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
	newptr = oldptr + length;

	KASSERT(oldptr != (uintptr_t) NULL,
	    ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po));

	/*
	 * If we have space in the current buffer, return a pointer to
	 * available space with the PO structure locked.
	 */
	if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) {
		po->po_curbuf->plb_ptr = (char *) newptr;
		goto done;
	}

	/*
	 * Otherwise, schedule the current buffer for output and get a
	 * fresh buffer.
	 */
	pmclog_schedule_io(po);

	if (pmclog_get_buffer(po) != 0) {
		mtx_unlock_spin(&po->po_mtx);
		return (NULL);
	}

	KASSERT(po->po_curbuf != NULL,
	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));

	KASSERT(po->po_curbuf->plb_ptr != NULL,
	    ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__));

	KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base &&
	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
		po->po_curbuf->plb_fence));

	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;

 done:
	lh = (uint32_t *) oldptr;
	lh++;				/* skip header */
	getnanotime(&ts);		/* fill in the timestamp */
	*lh++ = ts.tv_sec & 0xFFFFFFFF;
	*lh++ = ts.tv_nsec & 0xFFFFFFF;
	return ((uint32_t *) oldptr);
}
Esempio n. 30
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. The IN_MODIFIED
 * flag is used to specify that the inode needs to be updated but that the
 * times have already been set. The access and modified times are taken from
 * the second and third parameters; the inode change time is always taken
 * from the current time. If waitfor is set, then wait for the disk write
 * of the inode to complete.
 */
int
ffs_update(struct inode *ip, struct timespec *atime, 
    struct timespec *mtime, int waitfor)
{
	struct vnode *vp;
	struct fs *fs;
	struct buf *bp;
	int error;
	struct timespec ts;

	vp = ITOV(ip);
	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
		ip->i_flag &=
		    ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
		return (0);
	}

	if ((ip->i_flag &
	    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
	    waitfor != MNT_WAIT)
		return (0);

	getnanotime(&ts);

	if (ip->i_flag & IN_ACCESS) {
		DIP_ASSIGN(ip, atime, atime ? atime->tv_sec : ts.tv_sec);
		DIP_ASSIGN(ip, atimensec, atime ? atime->tv_nsec : ts.tv_nsec);
	}

	if (ip->i_flag & IN_UPDATE) {
		DIP_ASSIGN(ip, mtime, mtime ? mtime->tv_sec : ts.tv_sec);
		DIP_ASSIGN(ip, mtimensec, mtime ? mtime->tv_nsec : ts.tv_nsec);
		ip->i_modrev++;
	}

	if (ip->i_flag & IN_CHANGE) {
		DIP_ASSIGN(ip, ctime, ts.tv_sec);
		DIP_ASSIGN(ip, ctimensec, ts.tv_nsec);
	}

	ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
	fs = ip->i_fs;

	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) {
		ip->i_din1->di_ouid = ip->i_ffs1_uid;
		ip->i_din1->di_ogid = ip->i_ffs1_gid;
	}

	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
	    (int)fs->fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != DIP(ip, nlink))
		panic("ffs_update: bad link cnt");

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2)
		*((struct ufs2_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
	else
#endif
		*((struct ufs1_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;

	if (waitfor && !DOINGASYNC(vp)) {
		return (bwrite(bp));
	} else {
		bdwrite(bp);
		return (0);
	}
}