Esempio n. 1
0
/*
 * This is the central function for every thread.
 * For each invocation, its role is ets by (a pointer to) a stage_info_t.
 */
void *
manager_fn(void *arg)
{
	worker_info_t	*wp = (worker_info_t *) arg;
	stage_info_t	*sp = wp->stage;
	boolean_t	is_producer = (sp->stagenum == 0);
	long		iteration = 0;
	int		current_tag = 0;

	kern_return_t			ret;
	thread_extended_policy_data_t	epolicy;
	epolicy.timeshare = FALSE;
	ret = thread_policy_set(
			mach_thread_self(), THREAD_EXTENDED_POLICY,
			(thread_policy_t) &epolicy,
			THREAD_EXTENDED_POLICY_COUNT);
	if (ret != KERN_SUCCESS)
		printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret);
	
	/*
	 * If we're using affinity sets and we're a producer
	 * set our tag to by our thread set number.
	 */
	if (affinity && is_producer) {
		affinity_set(wp->setnum);
		current_tag = wp->setnum;
	}

	DBG("Starting %s %d, stage: %d\n", sp->name, wp->setnum, sp->stagenum);

	/*
	 * Start barrier.
	 * The tets thread to get here releases everyone and starts the timer.
	 */
	pthread_mutex_lock(&funnel);
	threads_ready++;
	if (threads_ready == threads) {
		pthread_mutex_unlock(&funnel);
		if (halting) {
			printf("  all threads ready for process %d, "
				"hit any key to start", getpid());
			fflush(stdout);
			(void) getchar();
		}
		pthread_cond_broadcast(&barrier);
		timer = mach_absolute_time();
	} else {
		pthread_cond_wait(&barrier, &funnel);
		pthread_mutex_unlock(&funnel);
	}

	do {
		work_t		*workp;

		/*
		 * Get a buffer from the input queue.
		 * Block if none.
		 * Quit if all work done.
		 */
		pthread_mutex_lock(&sp->input->mtx);
		while (1) {
			if (sp->work_todo == 0) {
				pthread_mutex_unlock(&sp->input->mtx);
				goto out;
			}
			workp = TAILQ_FIRST(&(sp->input->queue));
			if (workp != NULL)
				break;
			DBG("    %s[%d,%d] todo %d waiting for buffer\n",
				sp->name, wp->setnum, sp->stagenum, sp->work_todo);
			sp->input->waiters++;
			pthread_cond_wait(&sp->input->cnd, &sp->input->mtx);
			sp->input->waiters--;
		}
		TAILQ_REMOVE(&(sp->input->queue), workp, link);
		iteration = sp->work_todo--;
		pthread_mutex_unlock(&sp->input->mtx);

		if (is_producer) {
			workp->number = iteration;
			workp->tag = wp->setnum;
		} else {
			if (affinity && current_tag != workp->tag) {
				affinity_set(workp->tag);
				current_tag = workp->tag;
			}
		}

		DBG("  %s[%d,%d] todo %d work %p data %p\n",
			sp->name, wp->setnum, sp->stagenum, iteration, workp, workp->data);

		/* Do our stuff with the buffer */
		(void) sp->fn(workp->data, workp->isize);

		/*
		 * Place the buffer on the input queue of the next stage.
		 * Signal waiters if required.
		 */
		pthread_mutex_lock(&sp->output->mtx);
		TAILQ_INSERT_TAIL(&(sp->output->queue), workp, link);
		if (sp->output->waiters) {
			DBG("    %s[%d,%d] todo %d signaling work\n",
				sp->name, wp->setnum, sp->stagenum, iteration);
			pthread_cond_signal(&sp->output->cnd);
		}
		pthread_mutex_unlock(&sp->output->mtx);

	} while (1);

out:
	pthread_cond_broadcast(&sp->output->cnd);

	DBG("Ending %s[%d,%d]\n", sp->name, wp->setnum, sp->stagenum);

	return (void *) iteration;
}
Esempio n. 2
0
void 
funlockfile(FILE * fp)
{
	int	idx = file_idx(fp);
	struct	file_lock	*p;

	/*
	 * Defer signals to protect the scheduling queues from
	 * access by the signal handler:
	 */
	_thread_kern_sig_defer();

	/* Lock the hash table: */
	_SPINLOCK(&hash_lock);

	/*
	 * Get a pointer to the lock for the file and check that
	 * the running thread is the one with the lock:
	 */
	if ((p = find_lock(idx, fp)) != NULL &&
	    p->owner == _thread_run) {
		/*
		 * Check if this thread has locked the FILE
		 * more than once:
		 */
		if (p->count > 1)
			/*
			 * Decrement the count of the number of
			 * times the running thread has locked this
			 * file:
			 */
			p->count--;
		else {
			/*
			 * The running thread will release the
			 * lock now:
			 */
			p->count = 0;

			/* Get the new owner of the lock: */
			if ((p->owner = TAILQ_FIRST(&p->l_head)) != NULL) {
				/* Pop the thread off the queue: */
				TAILQ_REMOVE(&p->l_head,p->owner,qe);

				/*
				 * This is the first lock for the new
				 * owner:
				 */
				p->count = 1;

				/* Allow the new owner to run: */
				PTHREAD_NEW_STATE(p->owner,PS_RUNNING);
			}
		}
	}

	/* Unlock the hash table: */
	_SPINUNLOCK(&hash_lock);

	/*
	 * Undefer and handle pending signals, yielding if
	 * necessary:
	 */
	_thread_kern_sig_undefer();
}
Esempio n. 3
0
/**
 * @brief Check for expired TCP DRCs.
 */
static inline void drc_free_expired(void)
{
	drc_t *drc;
	time_t now = time(NULL);
	struct rbtree_x_part *t;
	struct opr_rbtree_node *odrc = NULL;

	DRC_ST_LOCK();

	if ((drc_st->tcp_drc_recycle_qlen < 1) ||
	    (now - drc_st->last_expire_check) < 600) /* 10m */
		goto unlock;

	do {
		drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q);
		if (drc && (drc->d_u.tcp.recycle_time > 0)
		    && ((now - drc->d_u.tcp.recycle_time) >
			drc_st->expire_delta) && (drc->refcnt == 0)) {
			LogFullDebug(COMPONENT_DUPREQ,
				     "remove expired drc %p from "
				     "recycle queue", drc);
			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc->d_u.tcp.hk);

			odrc =
			    opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k);
			if (!odrc) {
				LogCrit(COMPONENT_DUPREQ,
					"BUG: asked to dequeue DRC not on "
					" queue");
			} else {
				(void)opr_rbtree_remove(&t->t,
							&drc->d_u.tcp.
							recycle_k);
			}
			TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc,
				     d_u.tcp.recycle_q);
			--(drc_st->tcp_drc_recycle_qlen);
			/* expect DRC to be reachable from some xprt(s) */
			pthread_mutex_lock(&drc->mtx);
			drc->flags &= ~DRC_FLAG_RECYCLE;
			/* but if not, dispose it */
			if (drc->refcnt == 0) {
				pthread_mutex_unlock(&drc->mtx);
				free_tcp_drc(drc);
				continue;
			}
			pthread_mutex_unlock(&drc->mtx);
		} else {
			LogFullDebug(COMPONENT_DUPREQ,
				     "unexpired drc %p in recycle queue "
				     "expire check (nothing happens)", drc);
			drc_st->last_expire_check = now;
			break;
		}

	} while (1);

 unlock:
	DRC_ST_UNLOCK();
}
Esempio n. 4
0
/*
 * dmio_read:
 *
 *	Read file op.
 */
static int
dmio_read(struct file *fp, off_t *offp, struct uio *uio,
    kauth_cred_t cred, int flags)
{
	struct dmio_state *ds = (struct dmio_state *) fp->f_data;
	struct dmio_usrreq_state *dus;
	struct dmover_request *dreq;
	struct dmio_usrresp resp;
	int s, error = 0, progress = 0;

	if ((uio->uio_resid % sizeof(resp)) != 0)
		return (EINVAL);

	if (ds->ds_session == NULL)
		return (ENXIO);

	s = splsoftclock();
	simple_lock(&ds->ds_slock);

	while (uio->uio_resid != 0) {

		for (;;) {
			dus = TAILQ_FIRST(&ds->ds_complete);
			if (dus == NULL) {
				if (fp->f_flag & FNONBLOCK) {
					error = progress ? 0 : EWOULDBLOCK;
					goto out;
				}
				ds->ds_flags |= DMIO_STATE_READ_WAIT;
				error = ltsleep(&ds->ds_complete,
				    PRIBIO | PCATCH, "dmvrrd", 0,
				    &ds->ds_slock);
				if (error)
					goto out;
				continue;
			}
			/* Have a completed request. */
			TAILQ_REMOVE(&ds->ds_complete, dus, dus_q);
			ds->ds_nreqs--;
			if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) {
				ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT;
				wakeup(&ds->ds_nreqs);
			}
			if (ds->ds_flags & DMIO_STATE_SEL) {
				ds->ds_flags &= ~DMIO_STATE_SEL;
				selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0);
			}
			break;
		}

		simple_unlock(&ds->ds_slock);

		dreq = dus->dus_req;
		resp.resp_id = dus->dus_id;
		if (dreq->dreq_flags & DMOVER_REQ_ERROR)
			resp.resp_error = dreq->dreq_error;
		else {
			resp.resp_error = 0;
			memcpy(resp.resp_immediate, dreq->dreq_immediate,
			    sizeof(resp.resp_immediate));
		}

		dmio_usrreq_fini(ds, dus);

		splx(s);

		progress = 1;

		dmover_request_free(dreq);

		error = uiomove(&resp, sizeof(resp), uio);
		if (error)
			return (error);

		s = splsoftclock();
		simple_lock(&ds->ds_slock);
	}

 out:
	simple_unlock(&ds->ds_slock);
	splx(s);

	return (error);
}
Esempio n. 5
0
/* 
 * This routine frees all the BSD context in uthread except the credential.
 * It does not free the uthread structure as well
 */
void
uthread_cleanup(task_t task, void *uthread, void * bsd_info, boolean_t is_corpse)
{
	struct _select *sel;
	uthread_t uth = (uthread_t)uthread;
	proc_t p = (proc_t)bsd_info;

	if (uth->uu_lowpri_window || uth->uu_throttle_info) {
		/*
		 * task is marked as a low priority I/O type
		 * and we've somehow managed to not dismiss the throttle
		 * through the normal exit paths back to user space...
		 * no need to throttle this thread since its going away
		 * but we do need to update our bookeeping w/r to throttled threads
		 *
		 * Calling this routine will clean up any throttle info reference
		 * still inuse by the thread.
		 */
		throttle_lowpri_io(0);
	}
	/*
	 * Per-thread audit state should never last beyond system
	 * call return.  Since we don't audit the thread creation/
	 * removal, the thread state pointer should never be
	 * non-NULL when we get here.
	 */
	assert(uth->uu_ar == NULL);

	sel = &uth->uu_select;
	/* cleanup the select bit space */
	if (sel->nbytes) {
		FREE(sel->ibits, M_TEMP);
		FREE(sel->obits, M_TEMP);
		sel->nbytes = 0;
	}

	if (uth->uu_cdir) {
		vnode_rele(uth->uu_cdir);
		uth->uu_cdir = NULLVP;
	}

	if (uth->uu_wqset) {
		if (waitq_set_is_valid(uth->uu_wqset))
			waitq_set_deinit(uth->uu_wqset);
		FREE(uth->uu_wqset, M_SELECT);
		uth->uu_wqset = NULL;
		uth->uu_wqstate_sz = 0;
	}

	/*
	 * defer the removal of the thread name on process corpses until the corpse has
	 * been autopsied.
	 */
	if (!is_corpse) {
		uthread_cleanup_name(uth);
	}

	if ((task != kernel_task) && p) {

		if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL))  {
			vfork_exit_internal(uth->uu_proc, 0, 1);
		}
		/*
		 * Remove the thread from the process list and
		 * transfer [appropriate] pending signals to the process.
		 */
		if (get_bsdtask_info(task) == p) { 
			proc_lock(p);
			TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
			p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
			proc_unlock(p);
		}
#if CONFIG_DTRACE
		struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
		uth->t_dtrace_scratch = NULL;
		if (tmpptr != NULL) {
			dtrace_ptss_release_entry(p, tmpptr);
		}
#endif
	}
}
Esempio n. 6
0
/*
 * Look for the request in the cache
 * If found then
 *    return action and optionally reply
 * else
 *    insert it in the cache
 *
 * The rules are as follows:
 * - if in progress, return DROP request
 * - if completed within DELAY of the current time, return DROP it
 * - if completed a longer time ago return REPLY if the reply was cached or
 *   return DOIT
 * Update/add new request at end of lru list
 */
int
nfsrv_getcache(
    struct nfsrv_descript *nd,
    struct nfsrv_sock *slp,
    mbuf_t *mrepp)
{
    struct nfsrvcache *rp;
    struct nfsm_chain nmrep;
    struct sockaddr *saddr;
    int ret, error;

    /*
     * Don't cache recent requests for reliable transport protocols.
     * (Maybe we should for the case of a reconnect, but..)
     */
    if (!nd->nd_nam2)
        return (RC_DOIT);
    lck_mtx_lock(nfsrv_reqcache_mutex);
loop:
    for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
            rp = rp->rc_hash.le_next) {
        if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
                netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
            if ((rp->rc_flag & RC_LOCKED) != 0) {
                rp->rc_flag |= RC_WANTED;
                msleep(rp, nfsrv_reqcache_mutex, PZERO-1, "nfsrc", NULL);
                goto loop;
            }
            rp->rc_flag |= RC_LOCKED;
            /* If not at end of LRU chain, move it there */
            if (rp->rc_lru.tqe_next) {
                TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru);
                TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru);
            }
            if (rp->rc_state == RC_UNUSED)
                panic("nfsrv cache");
            if (rp->rc_state == RC_INPROG) {
                OSAddAtomic(1, &nfsstats.srvcache_inproghits);
                ret = RC_DROPIT;
            } else if (rp->rc_flag & RC_REPSTATUS) {
                OSAddAtomic(1, &nfsstats.srvcache_nonidemdonehits);
                nd->nd_repstat = rp->rc_status;
                error = nfsrv_rephead(nd, slp, &nmrep, 0);
                if (error) {
                    printf("nfsrv cache: reply alloc failed for nonidem request hit\n");
                    ret = RC_DROPIT;
                    *mrepp = NULL;
                } else {
                    ret = RC_REPLY;
                    *mrepp = nmrep.nmc_mhead;
                }
            } else if (rp->rc_flag & RC_REPMBUF) {
                OSAddAtomic(1, &nfsstats.srvcache_nonidemdonehits);
                error = mbuf_copym(rp->rc_reply, 0, MBUF_COPYALL, MBUF_WAITOK, mrepp);
                if (error) {
                    printf("nfsrv cache: reply copym failed for nonidem request hit\n");
                    ret = RC_DROPIT;
                } else {
                    ret = RC_REPLY;
                }
            } else {
                OSAddAtomic(1, &nfsstats.srvcache_idemdonehits);
                rp->rc_state = RC_INPROG;
                ret = RC_DOIT;
            }
            rp->rc_flag &= ~RC_LOCKED;
            if (rp->rc_flag & RC_WANTED) {
                rp->rc_flag &= ~RC_WANTED;
                wakeup(rp);
            }
            lck_mtx_unlock(nfsrv_reqcache_mutex);
            return (ret);
        }
    }
    OSAddAtomic(1, &nfsstats.srvcache_misses);
    if (nfsrv_reqcache_count < nfsrv_reqcache_size) {
        /* try to allocate a new entry */
        MALLOC(rp, struct nfsrvcache *, sizeof *rp, M_NFSD, M_WAITOK);
        if (rp) {
            bzero((char *)rp, sizeof *rp);
            nfsrv_reqcache_count++;
            rp->rc_flag = RC_LOCKED;
        }
    } else {
Esempio n. 7
0
/* Save mbox changes. */
int
fetch_mbox_save(struct account *a, struct fetch_mbox_mbox *fmbox)
{
	struct fetch_mbox_data	*data = a->data;
	struct fetch_mbox_mail	*aux, *this;
	char			 path[MAXPATHLEN], saved[MAXPATHLEN], c;
	int			 fd;
	ssize_t			 n;
	struct iovec		 iov[2];

	log_debug2("%s: %s: saving mbox: %u kept, %u total",
	    a->name, fmbox->path, fmbox->reference, fmbox->total);
	fd = -1;

	/*
	 * If the reference count is 0, no mails were kept, so the mbox can
	 * just be truncated.
	 */
	if (fmbox->reference == 0) {
		if (fmbox->total != 0 && ftruncate(fmbox->fd, 0) != 0)
			goto error;
		goto free_all;
	}

	/* If all the mails were kept, do nothing. */
	if (fmbox->reference == fmbox->total)
		goto free_all;

	/*
	 * Otherwise, things get complicated. data->kept is a list of all the
	 * mails (struct fetch_mbox_mail) which were kept for ALL mailboxes.
	 * There is no guarantee it is ordered by offset. Rather than try to be
	 * clever and save disk space, just create a new mbox and copy all the
	 * kept mails into it.
	 */
	if (ppath(path, sizeof path, "%s.XXXXXXXXXX", fmbox->path) != 0)
		goto error;
	if (ppath(saved, sizeof saved, "%s.XXXXXXXXXX", fmbox->path) != 0)
		goto error;
	if ((fd = mkstemp(path)) == -1)
		goto error;

	aux = TAILQ_FIRST(&data->kept);
	while (aux != NULL) {
		this = aux;
		aux = TAILQ_NEXT(aux, entry);

		if (this->fmbox != fmbox)
			continue;

		log_debug2("%s: writing message from %zu, size %zu",
		    a->name, this->off, this->size);
		c = '\n';
		iov[0].iov_base = fmbox->base + this->off;
		iov[0].iov_len = this->size;
		iov[1].iov_base = &c;
		iov[1].iov_len = 1;
		if ((n = writev(fd, iov, 2)) < 0)
			goto error;
		if ((size_t) n != this->size + 1) {
			errno = EIO;
			goto error;
		}

		fetch_mbox_free(this);
		TAILQ_REMOVE(&data->kept, this, entry);
	}

	if (fsync(fd) != 0)
		goto error;
	close(fd);

	/*
	 * Do the replacement dance: create a backup copy of the mbox, remove
	 * the mbox, link in the temporary file, unlink the temporary file,
	 * then unlink the backup mbox. We don't try to recover if anything
	 * fails on the grounds that it could just make things worse, just
	 * die and let the user sort it out.
	 */
	if (link(fmbox->path, saved) != 0)
		goto error;
	if (unlink(fmbox->path) != 0)
		goto error;
	if (link(path, fmbox->path) != 0)
		goto error;
	if (unlink(path) != 0)
		goto error;
	if (unlink(saved) != 0)
		goto error;

free_all:
	aux = TAILQ_FIRST(&data->kept);
	while (aux != NULL) {
		this = aux;
		aux = TAILQ_NEXT(aux, entry);

		if (this->fmbox == fmbox)
			fetch_mbox_free(this);
	}

	if (fmbox->reference != 0)
		fatalx("dangling reference");

	return (0);

error:
	if (fd != -1) {
		close(fd);
		unlink(path);
	}
	log_warn("%s: %s", a->name, fmbox->path);
	return (-1);
}
Esempio n. 8
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	uint8_t buf[sizeof(struct dqblk64)];
	off_t base, recsize;
	struct dquot *dq, *dq1;
	struct dqhash *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int dqvplocked, error;

#ifdef DEBUG_VFS_LOCKS
	if (vp != NULLVP)
		ASSERT_VOP_ELOCKED(vp, "dqget");
#endif

	if (vp != NULLVP && *dqp != NODQUOT) {
		return (0);
	}

	/* XXX: Disallow negative id values to prevent the
	* creation of 100GB+ quota data files.
	*/
	if ((int)id < 0)
		return (EINVAL);

	UFS_LOCK(ump);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		*dqp = NODQUOT;
		UFS_UNLOCK(ump);
		return (EINVAL);
	}
	vref(dqvp);
	UFS_UNLOCK(ump);
	error = 0;
	dqvplocked = 0;

	/*
	 * Check the cache first.
	 */
	dqh = DQHASH(dqvp, id);
	DQH_LOCK();
	dq = dqhashfind(dqh, id, dqvp);
	if (dq != NULL) {
		DQH_UNLOCK();
hfound:		DQI_LOCK(dq);
		DQI_WAIT(dq, PINOD+1, "dqget");
		DQI_UNLOCK(dq);
		if (dq->dq_ump == NULL) {
			dqrele(vp, dq);
			dq = NODQUOT;
			error = EIO;
		}
		*dqp = dq;
		if (dqvplocked)
			vput(dqvp);
		else
			vrele(dqvp);
		return (error);
	}

	/*
	 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
	 * since new dq will appear on the hash chain DQ_LOCKed.
	 */
	if (vp != dqvp) {
		DQH_UNLOCK();
		vn_lock(dqvp, LK_SHARED | LK_RETRY);
		dqvplocked = 1;
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for quota vnode lock.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			DQH_UNLOCK();
			goto hfound;
		}
	}

	/*
	 * Not in cache, allocate a new one or take it from the
	 * free list.
	 */
	if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
	    numdquot < MAXQUOTAS * desiredvnodes)
		desireddquot += DQUOTINC;
	if (numdquot < desireddquot) {
		numdquot++;
		DQH_UNLOCK();
		dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
		mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for memory.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			numdquot--;
			DQH_UNLOCK();
			mtx_destroy(&dq1->dq_lock);
			free(dq1, M_DQUOT);
			goto hfound;
		}
		dq = dq1;
	} else {
		if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
			DQH_UNLOCK();
			tablefull("dquot");
			*dqp = NODQUOT;
			if (dqvplocked)
				vput(dqvp);
			else
				vrele(dqvp);
			return (EUSERS);
		}
		if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
			panic("dqget: free dquot isn't %p", dq);
		TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
		if (dq->dq_ump != NULL)
			LIST_REMOVE(dq, dq_hash);
	}

	/*
	 * Dq is put into hash already locked to prevent parallel
	 * usage while it is being read from file.
	 */
	dq->dq_flags = DQ_LOCK;
	dq->dq_id = id;
	dq->dq_type = type;
	dq->dq_ump = ump;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	DQREF(dq);
	DQH_UNLOCK();

	/*
	 * Read the requested quota record from the quota file, performing
	 * any necessary conversions.
	 */
	if (ump->um_qflags[type] & QTF_64BIT) {
		recsize = sizeof(struct dqblk64);
		base = sizeof(struct dqhdr64);
	} else {
		recsize = sizeof(struct dqblk32);
		base = 0;
	}
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = buf;
	aiov.iov_len = recsize;
	auio.uio_resid = recsize;
	auio.uio_offset = base + id * recsize;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_td = (struct thread *)0;

	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == recsize && error == 0) {
		bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
	} else {
		if (ump->um_qflags[type] & QTF_64BIT)
			dqb64_dq((struct dqblk64 *)buf, dq);
		else
			dqb32_dq((struct dqblk32 *)buf, dq);
	}
	if (dqvplocked)
		vput(dqvp);
	else
		vrele(dqvp);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		DQH_LOCK();
		dq->dq_ump = NULL;
		LIST_REMOVE(dq, dq_hash);
		DQH_UNLOCK();
		DQI_LOCK(dq);
		if (dq->dq_flags & DQ_WANT)
			wakeup(dq);
		dq->dq_flags = 0;
		DQI_UNLOCK(dq);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	DQI_LOCK(dq);
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0) {
			dq->dq_btime = time_second + ump->um_btime[type];
			if (dq->dq_bsoftlimit &&
			    dq->dq_curblocks >= dq->dq_bsoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
		if (dq->dq_itime == 0) {
			dq->dq_itime = time_second + ump->um_itime[type];
			if (dq->dq_isoftlimit &&
			    dq->dq_curinodes >= dq->dq_isoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
	}
	DQI_WAKEUP(dq);
	DQI_UNLOCK(dq);
	*dqp = dq;
	return (0);
}
Esempio n. 9
0
enum cmd_retval
cmd_swap_pane_exec(struct cmd *self, struct cmd_q *cmdq)
{
	struct args		*args = self->args;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*tmp_wp, *src_wp, *dst_wp;
	struct layout_cell	*src_lc, *dst_lc;
	u_int			 sx, sy, xoff, yoff;

	dst_wl = cmd_find_pane(cmdq, args_get(args, 't'), NULL, &dst_wp);
	if (dst_wl == NULL)
		return (CMD_RETURN_ERROR);
	dst_w = dst_wl->window;
	server_unzoom_window(dst_w);

	if (!args_has(args, 's')) {
		src_w = dst_w;
		if (args_has(self->args, 'D')) {
			src_wp = TAILQ_NEXT(dst_wp, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_FIRST(&dst_w->panes);
		} else if (args_has(self->args, 'U')) {
			src_wp = TAILQ_PREV(dst_wp, window_panes, entry);
			if (src_wp == NULL)
				src_wp = TAILQ_LAST(&dst_w->panes, window_panes);
		} else {
			src_wl = cmd_find_pane_marked(cmdq, NULL, NULL,
			    &src_wp);
			if (src_wl == NULL)
				return (CMD_RETURN_ERROR);
			src_w = src_wl->window;
		}
	} else {
		src_wl = cmd_find_pane_marked(cmdq, args_get(args, 's'), NULL,
		    &src_wp);
		if (src_wl == NULL)
			return (CMD_RETURN_ERROR);
		src_w = src_wl->window;
	}
	server_unzoom_window(src_w);

	if (src_wp == dst_wp)
		return (CMD_RETURN_NORMAL);

	tmp_wp = TAILQ_PREV(dst_wp, window_panes, entry);
	TAILQ_REMOVE(&dst_w->panes, dst_wp, entry);
	TAILQ_REPLACE(&src_w->panes, src_wp, dst_wp, entry);
	if (tmp_wp == src_wp)
		tmp_wp = dst_wp;
	if (tmp_wp == NULL)
		TAILQ_INSERT_HEAD(&dst_w->panes, src_wp, entry);
	else
		TAILQ_INSERT_AFTER(&dst_w->panes, tmp_wp, src_wp, entry);

	src_lc = src_wp->layout_cell;
	dst_lc = dst_wp->layout_cell;
	src_lc->wp = dst_wp;
	dst_wp->layout_cell = src_lc;
	dst_lc->wp = src_wp;
	src_wp->layout_cell = dst_lc;

	src_wp->window = dst_w;
	dst_wp->window = src_w;

	sx = src_wp->sx; sy = src_wp->sy;
	xoff = src_wp->xoff; yoff = src_wp->yoff;
	src_wp->xoff = dst_wp->xoff; src_wp->yoff = dst_wp->yoff;
	window_pane_resize(src_wp, dst_wp->sx, dst_wp->sy);
	dst_wp->xoff = xoff; dst_wp->yoff = yoff;
	window_pane_resize(dst_wp, sx, sy);

	if (!args_has(self->args, 'd')) {
		if (src_w != dst_w) {
			window_set_active_pane(src_w, dst_wp);
			window_set_active_pane(dst_w, src_wp);
		} else {
			tmp_wp = dst_wp;
			if (!window_pane_visible(tmp_wp))
				tmp_wp = src_wp;
			window_set_active_pane(src_w, tmp_wp);
		}
	} else {
		if (src_w->active == src_wp)
			window_set_active_pane(src_w, dst_wp);
		if (dst_w->active == dst_wp)
			window_set_active_pane(dst_w, src_wp);
	}
	if (src_w != dst_w) {
		if (src_w->last == src_wp)
			src_w->last = NULL;
		if (dst_w->last == dst_wp)
			dst_w->last = NULL;
	}
	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	return (CMD_RETURN_NORMAL);
}
Esempio n. 10
0
static int
pft_refresh(void)
{
	struct pfioc_table io;
	struct pfr_tstats *t = NULL;
	struct pft_entry *e;
	int i, numtbls = 1;

	if (started && this_tick <= pf_tick)
		return (0);

	while (!TAILQ_EMPTY(&pft_table)) {
		e = TAILQ_FIRST(&pft_table);
		TAILQ_REMOVE(&pft_table, e, link);
		free(e);
	}

	bzero(&io, sizeof(io));
	io.pfrio_esize = sizeof(struct pfr_tstats);

	for (;;) {
		t = reallocf(t, numtbls * sizeof(struct pfr_tstats));
		if (t == NULL) {
			syslog(LOG_ERR, "pft_refresh(): reallocf() numtbls=%d: %s",
			    numtbls, strerror(errno));
			goto err2;
		}
		io.pfrio_size = numtbls;
		io.pfrio_buffer = t;

		if (ioctl(dev, DIOCRGETTSTATS, &io)) {
			syslog(LOG_ERR, "pft_refresh(): ioctl(): %s",
			    strerror(errno));
			goto err2;
		}

		if (numtbls >= io.pfrio_size)
			break;

		numtbls = io.pfrio_size;
	}

	for (i = 0; i < numtbls; i++) {
		e = malloc(sizeof(struct pft_entry));
		if (e == NULL)
			goto err1;
		e->index = i + 1;
		memcpy(&e->pft, t+i, sizeof(struct pfr_tstats));
		TAILQ_INSERT_TAIL(&pft_table, e, link);
	}

	pft_table_age = time(NULL);
	pft_table_count = numtbls;
	pf_tick = this_tick;

	free(t);
	return (0);
err1:
	while (!TAILQ_EMPTY(&pft_table)) {
		e = TAILQ_FIRST(&pft_table);
		TAILQ_REMOVE(&pft_table, e, link);
		free(e);
	}
err2:
	free(t);
	return(-1);
}
Esempio n. 11
0
static int
pfa_refresh(void)
{
	struct pfioc_table io;
	struct pfr_table *pt = NULL, *it = NULL;
	struct pfa_entry *e;
	int i, numtbls = 1, cidx, naddrs;

	if (started && this_tick <= pf_tick)
		return (0);

	while (!TAILQ_EMPTY(&pfa_table)) {
		e = TAILQ_FIRST(&pfa_table);
		TAILQ_REMOVE(&pfa_table, e, link);
		free(e);
	}

	memset(&io, 0, sizeof(io));
	io.pfrio_esize = sizeof(struct pfr_table);

	for (;;) {
		pt = reallocf(pt, numtbls * sizeof(struct pfr_table));
		if (pt == NULL) {
			syslog(LOG_ERR, "pfa_refresh(): reallocf() %s",
			    strerror(errno));
			return (-1);
		}
		memset(pt, 0, sizeof(*pt));
		io.pfrio_size = numtbls;
		io.pfrio_buffer = pt;

		if (ioctl(dev, DIOCRGETTABLES, &io)) {
			syslog(LOG_ERR, "pfa_refresh(): ioctl(): %s",
			    strerror(errno));
			goto err2;
		}

		if (numtbls >= io.pfrio_size)
			break;

		numtbls = io.pfrio_size;
	}

	cidx = 1;

	for (it = pt, i = 0; i < numtbls; it++, i++) {
		/*
		 * Skip the table if not active - ioctl(DIOCRGETASTATS) will
		 * return ESRCH for this entry anyway.
		 */
		if (!(it->pfrt_flags & PFR_TFLAG_ACTIVE))
			continue;

		if ((naddrs = pfa_table_addrs(cidx, it)) < 0)
			goto err1;

		cidx += naddrs;
	}

	pfa_table_age = time(NULL);
	pfa_table_count = cidx;
	pf_tick = this_tick;

	free(pt);
	return (0);
err1:
	while (!TAILQ_EMPTY(&pfa_table)) {
		e = TAILQ_FIRST(&pfa_table);
		TAILQ_REMOVE(&pfa_table, e, link);
		free(e);
	}

err2:
	free(pt);
	return (-1);
}
Esempio n. 12
0
static int
pfq_refresh(void)
{
	struct pfioc_altq pa;
	struct pfq_entry *e;
	int i, numqs, ticket;

	if (started && this_tick <= pf_tick)
		return (0);

	while (!TAILQ_EMPTY(&pfq_table)) {
		e = TAILQ_FIRST(&pfq_table);
		TAILQ_REMOVE(&pfq_table, e, link);
		free(e);
	}

	bzero(&pa, sizeof(pa));
	
	if (ioctl(dev, DIOCGETALTQS, &pa)) {
		syslog(LOG_ERR, "pfq_refresh: ioctl(DIOCGETALTQS): %s",
		    strerror(errno));
		return (-1);
	}

	numqs = pa.nr;
	ticket = pa.ticket;

	for (i = 0; i < numqs; i++) {
		e = malloc(sizeof(struct pfq_entry));
		if (e == NULL) {
			syslog(LOG_ERR, "pfq_refresh(): "
			    "malloc(): %s",
			    strerror(errno));
			goto err;
		}
		pa.ticket = ticket;
		pa.nr = i;

		if (ioctl(dev, DIOCGETALTQ, &pa)) {
			syslog(LOG_ERR, "pfq_refresh(): "
			    "ioctl(DIOCGETALTQ): %s",
			    strerror(errno));
			goto err;
		}

		if (pa.altq.qid > 0) {
			memcpy(&e->altq, &pa.altq, sizeof(struct pf_altq));
			e->index = pa.altq.qid;
			pfq_table_count = i;
			INSERT_OBJECT_INT_LINK_INDEX(e, &pfq_table, link, index);
		}
	}
	
	pfq_table_age = time(NULL);
	pf_tick = this_tick;

	return (0);
err:
	free(e);
	while (!TAILQ_EMPTY(&pfq_table)) {
		e = TAILQ_FIRST(&pfq_table);
		TAILQ_REMOVE(&pfq_table, e, link);
		free(e);
	}
	return(-1);
}
Esempio n. 13
0
static int
pfi_refresh(void)
{
	struct pfioc_iface io;
	struct pfi_kif *p = NULL;
	struct pfi_entry *e;
	int i, numifs = 1;

	if (started && this_tick <= pf_tick)
		return (0);

	while (!TAILQ_EMPTY(&pfi_table)) {
		e = TAILQ_FIRST(&pfi_table);
		TAILQ_REMOVE(&pfi_table, e, link);
		free(e);
	}

	bzero(&io, sizeof(io));
	io.pfiio_esize = sizeof(struct pfi_kif);

	for (;;) {
		p = reallocf(p, numifs * sizeof(struct pfi_kif));
		if (p == NULL) {
			syslog(LOG_ERR, "pfi_refresh(): reallocf() numifs=%d: %s",
			    numifs, strerror(errno));
			goto err2;
		}
		io.pfiio_size = numifs;
		io.pfiio_buffer = p;

		if (ioctl(dev, DIOCIGETIFACES, &io)) {
			syslog(LOG_ERR, "pfi_refresh(): ioctl(): %s",
			    strerror(errno));
			goto err2;
		}

		if (numifs >= io.pfiio_size)
			break;

		numifs = io.pfiio_size;
	}

	for (i = 0; i < numifs; i++) {
		e = malloc(sizeof(struct pfi_entry));
		if (e == NULL)
			goto err1;
		e->index = i + 1;
		memcpy(&e->pfi, p+i, sizeof(struct pfi_kif));
		TAILQ_INSERT_TAIL(&pfi_table, e, link);
	}

	pfi_table_age = time(NULL);
	pfi_table_count = numifs;
	pf_tick = this_tick;

	free(p);
	return (0);

err1:
	while (!TAILQ_EMPTY(&pfi_table)) {
		e = TAILQ_FIRST(&pfi_table);
		TAILQ_REMOVE(&pfi_table, e, link);
		free(e);
	}
err2:
	free(p);
	return(-1);
}
Esempio n. 14
0
static enum cmd_retval
cmd_join_pane_exec(struct cmd *self, struct cmdq_item *item)
{
	struct args		*args = self->args;
	struct session		*dst_s;
	struct winlink		*src_wl, *dst_wl;
	struct window		*src_w, *dst_w;
	struct window_pane	*src_wp, *dst_wp;
	char			*cause;
	int			 size, percentage, dst_idx;
	enum layout_type	 type;
	struct layout_cell	*lc;
	int			 not_same_window;

	if (self->entry == &cmd_join_pane_entry)
		not_same_window = 1;
	else
		not_same_window = 0;

	dst_s = item->state.tflag.s;
	dst_wl = item->state.tflag.wl;
	dst_wp = item->state.tflag.wp;
	dst_w = dst_wl->window;
	dst_idx = dst_wl->idx;
	server_unzoom_window(dst_w);

	src_wl = item->state.sflag.wl;
	src_wp = item->state.sflag.wp;
	src_w = src_wl->window;
	server_unzoom_window(src_w);

	if (not_same_window && src_w == dst_w) {
		cmdq_error(item, "can't join a pane to its own window");
		return (CMD_RETURN_ERROR);
	}
	if (!not_same_window && src_wp == dst_wp) {
		cmdq_error(item, "source and target panes must be different");
		return (CMD_RETURN_ERROR);
	}

	type = LAYOUT_TOPBOTTOM;
	if (args_has(args, 'h'))
		type = LAYOUT_LEFTRIGHT;

	size = -1;
	if (args_has(args, 'l')) {
		size = args_strtonum(args, 'l', 0, INT_MAX, &cause);
		if (cause != NULL) {
			cmdq_error(item, "size %s", cause);
			free(cause);
			return (CMD_RETURN_ERROR);
		}
	} else if (args_has(args, 'p')) {
		percentage = args_strtonum(args, 'p', 0, 100, &cause);
		if (cause != NULL) {
			cmdq_error(item, "percentage %s", cause);
			free(cause);
			return (CMD_RETURN_ERROR);
		}
		if (type == LAYOUT_TOPBOTTOM)
			size = (dst_wp->sy * percentage) / 100;
		else
			size = (dst_wp->sx * percentage) / 100;
	}
	lc = layout_split_pane(dst_wp, type, size, args_has(args, 'b'), 0);
	if (lc == NULL) {
		cmdq_error(item, "create pane failed: pane too small");
		return (CMD_RETURN_ERROR);
	}

	layout_close_pane(src_wp);

	window_lost_pane(src_w, src_wp);
	TAILQ_REMOVE(&src_w->panes, src_wp, entry);

	src_wp->window = dst_w;
	TAILQ_INSERT_AFTER(&dst_w->panes, dst_wp, src_wp, entry);
	layout_assign_pane(lc, src_wp);

	recalculate_sizes();

	server_redraw_window(src_w);
	server_redraw_window(dst_w);

	if (!args_has(args, 'd')) {
		window_set_active_pane(dst_w, src_wp);
		session_select(dst_s, dst_idx);
		server_redraw_session(dst_s);
	} else
		server_status_session(dst_s);

	if (window_count_panes(src_w) == 0)
		server_kill_window(src_w);
	else
		notify_window("window-layout-changed", src_w);
	notify_window("window-layout-changed", dst_w);

	return (CMD_RETURN_NORMAL);
}
Esempio n. 15
0
static void *
dvr_thread(void *aux)
{
  dvr_entry_t *de = aux;
  dvr_config_t *cfg = de->de_config;
  profile_chain_t *prch = de->de_chain;
  streaming_queue_t *sq = &prch->prch_sq;
  streaming_message_t *sm;
  th_subscription_t *ts;
  th_pkt_t *pkt;
  int run = 1;
  int started = 0;
  int comm_skip = cfg->dvr_skip_commercials;
  int commercial = COMMERCIAL_UNKNOWN;

  pthread_mutex_lock(&sq->sq_mutex);

  while(run) {
    sm = TAILQ_FIRST(&sq->sq_queue);
    if(sm == NULL) {
      pthread_cond_wait(&sq->sq_cond, &sq->sq_mutex);
      continue;
    }

    if ((ts = de->de_s) != NULL && started) {
      pktbuf_t *pb = NULL;
      if (sm->sm_type == SMT_PACKET) {
        pb = ((th_pkt_t*)sm->sm_data)->pkt_payload;
        if (((th_pkt_t*)sm->sm_data)->pkt_err) {
          de->de_data_errors += ((th_pkt_t*)sm->sm_data)->pkt_err;
          dvr_notify(de, 0);
        }
      }
      else if (sm->sm_type == SMT_MPEGTS) {
        pb = sm->sm_data;
        if (pb->pb_err) {
          de->de_data_errors += pb->pb_err;
          dvr_notify(de, 0);
        }
      }
      if (pb)
        atomic_add(&ts->ths_bytes_out, pktbuf_len(pb));
    }

    TAILQ_REMOVE(&sq->sq_queue, sm, sm_link);

    pthread_mutex_unlock(&sq->sq_mutex);

    switch(sm->sm_type) {

    case SMT_PACKET:
      pkt = sm->sm_data;
      if(pkt->pkt_commercial == COMMERCIAL_YES)
	dvr_rec_set_state(de, DVR_RS_COMMERCIAL, 0);
      else
	dvr_rec_set_state(de, DVR_RS_RUNNING, 0);

      if(pkt->pkt_commercial == COMMERCIAL_YES && comm_skip)
	break;

      if(commercial != pkt->pkt_commercial)
	muxer_add_marker(prch->prch_muxer);

      commercial = pkt->pkt_commercial;

      if(started) {
	muxer_write_pkt(prch->prch_muxer, sm->sm_type, sm->sm_data);
	sm->sm_data = NULL;
	dvr_notify(de, 0);
      }
      break;

    case SMT_MPEGTS:
      if(started) {
	dvr_rec_set_state(de, DVR_RS_RUNNING, 0);
	muxer_write_pkt(prch->prch_muxer, sm->sm_type, sm->sm_data);
	sm->sm_data = NULL;
	dvr_notify(de, 0);
      }
      break;

    case SMT_START:
      if(started &&
	 muxer_reconfigure(prch->prch_muxer, sm->sm_data) < 0) {
	tvhlog(LOG_WARNING,
	       "dvr", "Unable to reconfigure \"%s\"",
	       de->de_filename ?: lang_str_get(de->de_title, NULL));

	// Try to restart the recording if the muxer doesn't
	// support reconfiguration of the streams.
	dvr_thread_epilog(de);
	started = 0;
      }

      if(!started) {
        pthread_mutex_lock(&global_lock);
        dvr_rec_set_state(de, DVR_RS_WAIT_PROGRAM_START, 0);
        if(dvr_rec_start(de, sm->sm_data) == 0) {
          started = 1;
          idnode_changed(&de->de_id);
          htsp_dvr_entry_update(de);
        }
        pthread_mutex_unlock(&global_lock);
      } 
      break;

    case SMT_STOP:
       if(sm->sm_code == SM_CODE_SOURCE_RECONFIGURED) {
	 // Subscription is restarting, wait for SMT_START

       } else if(sm->sm_code == 0) {
	 // Recording is completed

	de->de_last_error = 0;
	tvhlog(LOG_INFO, 
	       "dvr", "Recording completed: \"%s\"",
	       de->de_filename ?: lang_str_get(de->de_title, NULL));

	dvr_thread_epilog(de);
	started = 0;

      }else if(de->de_last_error != sm->sm_code) {
Esempio n. 16
0
/*
 * Handle a receive on a queue servicing a message endpoint
 */
static inline void
usdf_msg_handle_recv(struct usdf_domain *udp, struct usd_completion *comp)
{
	struct rudp_pkt *pkt;
	struct usdf_msg_qe *rqe;
	struct usdf_ep *ep;
	struct usd_qp *qp;
	struct usdf_rx *rx;
	uint32_t peer_id;
	uint32_t opcode;
	uint8_t *rx_ptr;
	uint8_t *rqe_ptr;
	size_t cur_iov;
	size_t iov_resid;
	size_t ms_resid;
	size_t rxlen;
	size_t copylen;
	int ret;

	pkt = comp->uc_context;
	opcode = ntohs(pkt->msg.opcode);
	peer_id = ntohs(pkt->msg.src_peer_id);
	if (peer_id > USDF_MAX_PEERS) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	ep = udp->dom_peer_tab[peer_id];
	if (ep == NULL) {
		qp = comp->uc_qp;
		rx = qp->uq_context;
		goto dropit;
	}
	rx = ep->ep_rx;

	if (comp->uc_status != USD_COMPSTAT_SUCCESS)
		goto dropit;

	switch (opcode) {
	case RUDP_OP_ACK:
		usdf_msg_rx_ack(ep, pkt);
		goto dropit;
	case RUDP_OP_NAK:
		usdf_msg_rx_nak(ep, pkt);
		goto dropit;
	case RUDP_OP_FIRST:
	case RUDP_OP_LAST:
		break;
	default:
		USDF_DBG_SYS(EP_DATA,
				"encountered unexpected opcode %" PRIu32 "\n",
				opcode);
		goto dropit;
	}

	ret = usdf_msg_check_seq(ep, pkt);
	if (ret == -1) {
		goto dropit;
	}

	rqe = ep->e.msg.ep_cur_recv;
	if (rqe == NULL) {
		if (TAILQ_EMPTY(&rx->r.msg.rx_posted_rqe)) {
			goto dropit;
		}
		rqe = TAILQ_FIRST(&rx->r.msg.rx_posted_rqe);
		TAILQ_REMOVE(&rx->r.msg.rx_posted_rqe, rqe, ms_link);
		ep->e.msg.ep_cur_recv = rqe;
	}

	rx_ptr = (uint8_t *)(pkt + 1);
	rxlen = ntohs(pkt->msg.m.rc_data.length);
	rqe->ms_length += rxlen;
	rqe_ptr = (uint8_t *)rqe->ms_cur_ptr;
	iov_resid = rqe->ms_iov_resid;
	cur_iov = rqe->ms_cur_iov;
	ms_resid = rqe->ms_resid;
	while (rxlen > 0) {
		copylen = MIN(rxlen, iov_resid);
		memcpy(rqe_ptr, rx_ptr, copylen);
		rx_ptr += copylen;
		rxlen -= copylen;
		iov_resid -= copylen;
		ms_resid -= copylen;
		if (iov_resid == 0) {
			if (cur_iov == rqe->ms_last_iov) {
				break;
			}
			++cur_iov;
			rqe_ptr = rqe->ms_iov[cur_iov].iov_base;
			iov_resid = rqe->ms_iov[cur_iov].iov_len;
		} else {
			rqe_ptr += copylen;
		}
	}

	if (opcode & RUDP_OP_LAST) {
		/*
		* Normally we need to store back the updated values of
		* ms_resid, ms_cur_iov, ms_cur_ptr and ms_iov_resid. But
		* being the last step of the process, updating these
		* values are not necessary
		*/
		if (rxlen > 0) {
			USDF_DBG_SYS(EP_DATA, "message truncated by %zu bytes",
					rxlen);
			rqe->ms_length -= rxlen;
			usdf_msg_recv_complete(ep, rqe, FI_ETRUNC);
		} else {
			usdf_msg_recv_complete(ep, rqe, FI_SUCCESS);
		}

		ep->e.msg.ep_cur_recv = NULL;
	} else {
		rqe->ms_cur_ptr = rqe_ptr;
		rqe->ms_iov_resid = iov_resid;
		rqe->ms_cur_iov = cur_iov;
		rqe->ms_resid = ms_resid;
	}

dropit:
	/* repost buffer */
	_usdf_msg_post_recv(rx, pkt,
			rx->rx_domain->dom_fabric->fab_dev_attrs->uda_mtu);
}
Esempio n. 17
0
/*
 * vm_contig_pg_clean:
 * 
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 */
static void
vm_contig_pg_clean(int queue, int count)
{
	vm_object_t object;
	vm_page_t m, m_tmp;
	struct vm_page marker;
	struct vpgqueues *pq = &vm_page_queues[queue];

	/*
	 * Setup a local marker
	 */
	bzero(&marker, sizeof(marker));
	marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
	marker.queue = queue;
	marker.wire_count = 1;

	vm_page_queues_spin_lock(queue);
	TAILQ_INSERT_HEAD(&pq->pl, &marker, pageq);
	vm_page_queues_spin_unlock(queue);

	/*
	 * Iterate the queue.  Note that the vm_page spinlock must be
	 * acquired before the pageq spinlock so it's easiest to simply
	 * not hold it in the loop iteration.
	 */
	while (count-- > 0 && (m = TAILQ_NEXT(&marker, pageq)) != NULL) {
		vm_page_and_queue_spin_lock(m);
		if (m != TAILQ_NEXT(&marker, pageq)) {
			vm_page_and_queue_spin_unlock(m);
			++count;
			continue;
		}
		KKASSERT(m->queue == queue);

		TAILQ_REMOVE(&pq->pl, &marker, pageq);
		TAILQ_INSERT_AFTER(&pq->pl, m, &marker, pageq);

		if (m->flags & PG_MARKER) {
			vm_page_and_queue_spin_unlock(m);
			continue;
		}
		if (vm_page_busy_try(m, TRUE)) {
			vm_page_and_queue_spin_unlock(m);
			continue;
		}
		vm_page_and_queue_spin_unlock(m);

		/*
		 * We've successfully busied the page
		 */
		if (m->queue - m->pc != queue) {
			vm_page_wakeup(m);
			continue;
		}
		if (m->wire_count || m->hold_count) {
			vm_page_wakeup(m);
			continue;
		}
		if ((object = m->object) == NULL) {
			vm_page_wakeup(m);
			continue;
		}
		vm_page_test_dirty(m);
		if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
			vm_object_hold(object);
			KKASSERT(m->object == object);

			if (object->type == OBJT_VNODE) {
				vm_page_wakeup(m);
				vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
				vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
				vn_unlock(((struct vnode *)object->handle));
			} else if (object->type == OBJT_SWAP ||
					object->type == OBJT_DEFAULT) {
				m_tmp = m;
				vm_pageout_flush(&m_tmp, 1, 0);
			} else {
				vm_page_wakeup(m);
			}
			vm_object_drop(object);
		} else if (m->hold_count == 0) {
			vm_page_cache(m);
		} else {
			vm_page_wakeup(m);
		}
	}

	/*
	 * Scrap our local marker
	 */
	vm_page_queues_spin_lock(queue);
	TAILQ_REMOVE(&pq->pl, &marker, pageq);
	vm_page_queues_spin_unlock(queue);
}
Esempio n. 18
0
/*
 *  Discovery thread
 */
static void *
upnp_thread( void *aux )
{
  char *bindaddr = aux;
  tvhpoll_t *poll = tvhpoll_create(2);
  tvhpoll_event_t ev[2];
  upnp_data_t *data;
  udp_connection_t *multicast = NULL, *unicast = NULL;
  udp_connection_t *conn;
  unsigned char buf[16384];
  upnp_service_t *us;
  struct sockaddr_storage ip;
  socklen_t iplen;
  size_t size;
  int r;

  multicast = udp_bind("upnp", "upnp_thread_multicast",
                       "239.255.255.250", 1900,
                       NULL, 32*1024);
  if (multicast == NULL || multicast == UDP_FATAL_ERROR)
    goto error;
  unicast = udp_bind("upnp", "upnp_thread_unicast", bindaddr, 0,
                     NULL, 32*1024);
  if (unicast == NULL || unicast == UDP_FATAL_ERROR)
    goto error;

  memset(&ev, 0, sizeof(ev));
  ev[0].fd       = multicast->fd;
  ev[0].events   = TVHPOLL_IN;
  ev[0].data.ptr = multicast;
  ev[1].fd       = unicast->fd;
  ev[1].events   = TVHPOLL_IN;
  ev[1].data.ptr = unicast;
  tvhpoll_add(poll, ev, 2);

  while (upnp_running && multicast->fd >= 0) {
    r = tvhpoll_wait(poll, ev, 2, 1000);

    while (r-- > 0) {
      if ((ev[r].events & TVHPOLL_IN) != 0) {
        conn = ev[r].data.ptr;
        iplen = sizeof(ip);
        size = recvfrom(conn->fd, buf, sizeof(buf), 0,
                                           (struct sockaddr *)&ip, &iplen);
#if ENABLE_TRACE
        if (size > 0) {
          char tbuf[256];
          inet_ntop(ip.ss_family, IP_IN_ADDR(ip), tbuf, sizeof(tbuf));
          tvhtrace("upnp", "%s - received data from %s:%hu [size=%zi]",
                   conn == multicast ? "multicast" : "unicast",
                   tbuf, (unsigned short) IP_PORT(ip), size);
          tvhlog_hexdump("upnp", buf, size);
        }
#endif
        /* TODO: a filter */
        TAILQ_FOREACH(us, &upnp_services, us_link)
          us->us_received(buf, size, conn, &ip);
      }
    }

    while (1) {
      pthread_mutex_lock(&upnp_lock);
      data = TAILQ_FIRST(&upnp_data_write);
      if (data)
        TAILQ_REMOVE(&upnp_data_write, data, data_link);
      pthread_mutex_unlock(&upnp_lock);
      if (data == NULL)
        break;
      udp_write_queue(unicast, &data->queue, &data->storage);
      htsbuf_queue_flush(&data->queue);
      free(data);
    }
  }

error:
  upnp_running = 0;
  tvhpoll_destroy(poll);
  udp_close(unicast);
  udp_close(multicast);
  return NULL;
}
Esempio n. 19
0
static void rd_kafka_timer_unschedule (rd_kafka_timers_t *rkts,
                                       rd_kafka_timer_t *rtmr) {
	TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link);
	rtmr->rtmr_next = 0;
}
Esempio n. 20
0
void
req_recv_done(struct context *ctx, struct conn *conn, struct msg *msg,
              struct msg *nmsg)
{
    rstatus_t status;
    struct server_pool *pool;
    struct msg_tqh frag_msgq;
    struct msg *sub_msg;
    struct msg *tmsg; 			/* tmp next message */

    ASSERT(conn->client && !conn->proxy);
    ASSERT(msg->request);
    ASSERT(msg->owner == conn);
    ASSERT(conn->rmsg == msg);
    ASSERT(nmsg == NULL || nmsg->request);

    /* enqueue next message (request), if any */
    conn->rmsg = nmsg;

    if (req_filter(ctx, conn, msg)) {
        return;
    }

    if (msg->noforward) {
        status = req_make_reply(ctx, conn, msg);
        if (status != NC_OK) {
            conn->err = errno;
            return;
        }

        status = msg->reply(msg);
        if (status != NC_OK) {
            conn->err = errno;
            return;
        }

        status = event_add_out(ctx->evb, conn);
        if (status != NC_OK) {
            conn->err = errno;
        }

        return;
    }

    /* do fragment */
    pool = conn->owner;
    TAILQ_INIT(&frag_msgq);
    status = msg->fragment(msg, pool->ncontinuum, &frag_msgq);
    if (status != NC_OK) {
        if (!msg->noreply) {
            conn->enqueue_outq(ctx, conn, msg);
        }
        req_forward_error(ctx, conn, msg);
    }

    /* if no fragment happened */
    if (TAILQ_EMPTY(&frag_msgq)) {
        req_forward(ctx, conn, msg);
        return;
    }

    status = req_make_reply(ctx, conn, msg);
    if (status != NC_OK) {
        if (!msg->noreply) {
            conn->enqueue_outq(ctx, conn, msg);
        }
        req_forward_error(ctx, conn, msg);
    }

    for (sub_msg = TAILQ_FIRST(&frag_msgq); sub_msg != NULL; sub_msg = tmsg) {
        tmsg = TAILQ_NEXT(sub_msg, m_tqe);

        TAILQ_REMOVE(&frag_msgq, sub_msg, m_tqe);
        req_forward(ctx, conn, sub_msg);
    }

    ASSERT(TAILQ_EMPTY(&frag_msgq));
    return;
}
Esempio n. 21
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int first_buf_printf = 1;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		register struct buf *bp;
		int iter, nbusy, pbusy;
#ifndef PREEMPTION
		int subiter;
#endif

		waittime = 0;

		wdog_kern_pat(WD_LASTVAL);
		sys_sync(curthread, NULL);

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = 0;
			for (bp = &buf[nbuf]; --bp >= buf; )
				if (isbufbusy(bp))
					nbusy++;
			if (nbusy == 0) {
				if (first_buf_printf)
					printf("All buffers synced.");
				break;
			}
			if (first_buf_printf) {
				printf("Syncing disks, buffers remaining... ");
				first_buf_printf = 0;
			}
			printf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;

			wdog_kern_pat(WD_LASTVAL);
			sys_sync(curthread, NULL);

#ifdef PREEMPTION
			/*
			 * Drop Giant and spin for a while to allow
			 * interrupt threads to run.
			 */
			DROP_GIANT();
			DELAY(50000 * iter);
			PICKUP_GIANT();
#else
			/*
			 * Drop Giant and context switch several times to
			 * allow interrupt threads to run.
			 */
			DROP_GIANT();
			for (subiter = 0; subiter < 50 * iter; subiter++) {
				thread_lock(curthread);
				mi_switch(SW_VOL, NULL);
				thread_unlock(curthread);
				DELAY(1000);
			}
			PICKUP_GIANT();
#endif
		}
		printf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = 0;
		for (bp = &buf[nbuf]; --bp >= buf; ) {
			if (isbufbusy(bp)) {
#if 0
/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
				if (bp->b_dev == NULL) {
					TAILQ_REMOVE(&mountlist,
					    bp->b_vp->v_mount, mnt_list);
					continue;
				}
#endif
				nbusy++;
				if (show_busybufs > 0) {
					printf(
	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
					    nbusy, bp, bp->b_vp, bp->b_flags,
					    (intmax_t)bp->b_blkno,
					    (intmax_t)bp->b_lblkno);
					BUF_LOCKPRINTINFO(bp);
					if (show_busybufs > 1)
						vn_printf(bp->b_vp,
						    "vnode content: ");
				}
			}
		}
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			printf("Giving up on %d buffers\n", nbusy);
			DELAY(5000000);	/* 5 seconds */
		} else {
			if (!first_buf_printf)
				printf("Final sync complete\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == 0)
				vfs_unmountall();
		}
		swapoff_all();
		DELAY(100000);		/* wait for console output to finish */
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Esempio n. 22
0
int
accept (int s, struct sockaddr *name, int *namelen)
{
	int fd;
	struct socket *head, *so;
	struct mbuf *nam;

	rtems_bsdnet_semaphore_obtain ();
	if ((head = rtems_bsdnet_fdToSocket (s)) == NULL) {
		rtems_bsdnet_semaphore_release ();
		return -1;
	}
	if ((head->so_options & SO_ACCEPTCONN) == 0) {
		errno = EINVAL;
		rtems_bsdnet_semaphore_release ();
		return -1;
	}
        if ((head->so_state & SS_NBIO) && head->so_comp.tqh_first == NULL) {
                errno = EWOULDBLOCK;
		rtems_bsdnet_semaphore_release ();
		return -1;
	}
        while (head->so_comp.tqh_first == NULL && head->so_error == 0) {
                if (head->so_state & SS_CANTRCVMORE) {
                        head->so_error = ECONNABORTED;
                        break;
                }
		head->so_error = soconnsleep (head);
        }
	if (head->so_error) {
		errno = head->so_error;
		head->so_error = 0;
		rtems_bsdnet_semaphore_release ();
		return -1;
	}

	so = head->so_comp.tqh_first;
	TAILQ_REMOVE(&head->so_comp, so, so_list);
	head->so_qlen--;

	fd = rtems_bsdnet_makeFdForSocket (so);
	if (fd < 0) {
		TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
		head->so_qlen++;
		soconnwakeup (head);
		rtems_bsdnet_semaphore_release ();
		return -1;
	}
	so->so_state &= ~SS_COMP;
	so->so_head = NULL;

	nam = m_get(M_WAIT, MT_SONAME);
	(void) soaccept(so, nam);
	if (name) {
		 /* check length before it is destroyed */
		if (*namelen > nam->m_len)
			*namelen = nam->m_len;
		memcpy (name, mtod(nam, caddr_t), *namelen);
	}
	m_freem(nam);
	rtems_bsdnet_semaphore_release ();
	return (fd);

}
Esempio n. 23
0
static void
lka_resume(struct lka_session *lks)
{
	struct envelope		*ep;
	struct expandnode	*xn;

	if (lks->error)
		goto error;

	/* pop next node and expand it */
	while ((xn = TAILQ_FIRST(&lks->nodes))) {
		TAILQ_REMOVE(&lks->nodes, xn, tq_entry);
		lka_expand(lks, xn->rule, xn);
		if (lks->flags & F_WAITING)
			return;
		if (lks->error)
			goto error;
	}

	/* delivery list is empty, reject */
	if (TAILQ_FIRST(&lks->deliverylist) == NULL) {
		log_trace(TRACE_EXPAND, "expand: lka_done: expanded to empty "
		    "delivery list");
		lks->error = LKA_PERMFAIL;
	}
    error:
	if (lks->error) {
		m_create(p_pony, IMSG_SMTP_EXPAND_RCPT, 0, 0, -1);
		m_add_id(p_pony, lks->id);
		m_add_int(p_pony, lks->error);

		if (lks->errormsg)
			m_add_string(p_pony, lks->errormsg);
		else {
			if (lks->error == LKA_PERMFAIL)
				m_add_string(p_pony, "550 Invalid recipient");
			else if (lks->error == LKA_TEMPFAIL)
				m_add_string(p_pony, "451 Temporary failure");
		}

		m_close(p_pony);
		while ((ep = TAILQ_FIRST(&lks->deliverylist)) != NULL) {
			TAILQ_REMOVE(&lks->deliverylist, ep, entry);
			free(ep);
		}
	}
	else {
		/* Process the delivery list and submit envelopes to queue */
		while ((ep = TAILQ_FIRST(&lks->deliverylist)) != NULL) {
			TAILQ_REMOVE(&lks->deliverylist, ep, entry);
			m_create(p_queue, IMSG_LKA_ENVELOPE_SUBMIT, 0, 0, -1);
			m_add_id(p_queue, lks->id);
			m_add_envelope(p_queue, ep);
			m_close(p_queue);
			free(ep);
		}

		m_create(p_queue, IMSG_LKA_ENVELOPE_COMMIT, 0, 0, -1);
		m_add_id(p_queue, lks->id);
		m_close(p_queue);
	}

	expand_clear(&lks->expand);
	tree_xpop(&sessions, lks->id);
	free(lks);
}
/*
 * Asynchronous I/O daemons for client nfs.
 * They do read-ahead and write-behind operations on the block I/O cache.
 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
 */
static void
nfssvc_iod(void *instance)
{
	struct buf *bp;
	struct nfsmount *nmp;
	int myiod, timo;
	int error = 0;

	mtx_lock(&ncl_iod_mutex);
	myiod = (int *)instance - nfs_asyncdaemon;
	/*
	 * Main loop
	 */
	for (;;) {
	    while (((nmp = ncl_iodmount[myiod]) == NULL)
		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
		if (myiod >= ncl_iodmax)
			goto finish;
		if (nmp)
			nmp->nm_bufqiods--;
		ncl_iodwant[myiod] = curthread->td_proc;
		ncl_iodmount[myiod] = NULL;
		/*
		 * Always keep at least nfs_iodmin kthreads.
		 */
		timo = (myiod < nfs_iodmin) ? 0 : ncl_iodmaxidle * hz;
		error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH,
		    "-", timo);
		if (error) {
			nmp = ncl_iodmount[myiod];
			/*
			 * Rechecking the nm_bufq closes a rare race where the 
			 * nfsiod is woken up at the exact time the idle timeout
			 * fires
			 */
			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
				error = 0;
			break;
		}
	    }
	    if (error)
		    break;
	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
		    
		/* Take one off the front of the list */
		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
		nmp->nm_bufqlen--;
		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) {
		    nmp->nm_bufqwant = 0;
		    wakeup(&nmp->nm_bufq);
		}
		mtx_unlock(&ncl_iod_mutex);
		if (bp->b_flags & B_DIRECT) {
			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
			(void)ncl_doio_directwrite(bp);
		} else {
			if (bp->b_iocmd == BIO_READ)
				(void) ncl_doio(bp->b_vp, bp, bp->b_rcred, NULL);
			else
				(void) ncl_doio(bp->b_vp, bp, bp->b_wcred, NULL);
		}
		mtx_lock(&ncl_iod_mutex);
		/*
		 * If there are more than one iod on this mount, then defect
		 * so that the iods can be shared out fairly between the mounts
		 */
		if (nfs_defect && nmp->nm_bufqiods > 1) {
		    NFS_DPF(ASYNCIO,
			    ("nfssvc_iod: iod %d defecting from mount %p\n",
			     myiod, nmp));
		    ncl_iodmount[myiod] = NULL;
		    nmp->nm_bufqiods--;
		    break;
		}
	    }
	}
finish:
	nfs_asyncdaemon[myiod] = 0;
	if (nmp)
	    nmp->nm_bufqiods--;
	ncl_iodwant[myiod] = NULL;
	ncl_iodmount[myiod] = NULL;
	/* Someone may be waiting for the last nfsiod to terminate. */
	if (--ncl_numasync == 0)
		wakeup(&ncl_numasync);
	mtx_unlock(&ncl_iod_mutex);
	if ((error == 0) || (error == EWOULDBLOCK))
		kproc_exit(0);
	/* Abnormal termination */
	kproc_exit(1);
}
Esempio n. 25
0
static int 
bsd_accept(cyg_file *fp, cyg_file *new_fp,
           struct sockaddr *name, socklen_t *anamelen)
{
    socklen_t namelen = 0;
    int error = 0, s;
    struct socket *head, *so;
    struct sockaddr *sa;

    if( anamelen != NULL)
        namelen = *anamelen;

    s = splsoftnet();
    head = (struct socket *)fp->f_data;

    if ((head->so_options & SO_ACCEPTCONN) == 0) {
        splx(s);
        return (EINVAL);
    }

    if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
        splx(s);
        return (EWOULDBLOCK);
    }

    while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
        if (head->so_state & SS_CANTRCVMORE) {
            head->so_error = ECONNABORTED;
            break;
        }
        error = tsleep((caddr_t)&head->so_timeo, PSOCK | PCATCH,
                       "netcon", 0);
        if (error) {
            splx(s);
            return (error);
        }
    }

    if (head->so_error) {
        error = head->so_error;
        head->so_error = 0;
        splx(s);
        return (error);
    }

    /*
     * At this point we know that there is at least one connection
     * ready to be accepted. Remove it from the queue prior to
     * allocating the file descriptor for it since falloc() may
     * block allowing another process to accept the connection
     * instead.
     */
    so = TAILQ_FIRST(&head->so_comp);
    TAILQ_REMOVE(&head->so_comp, so, so_list);
    head->so_qlen--;

#if 0 // FIXME
    fflag = lfp->f_flag;
    error = falloc(p, &nfp, &fd);
    if (error) {
        /*
         * Probably ran out of file descriptors. Put the
         * unaccepted connection back onto the queue and
         * do another wakeup so some other process might
         * have a chance at it.
         */
        TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
        head->so_qlen++;
        wakeup_one(&head->so_timeo);
        splx(s);
        goto done;
    }
    fhold(nfp);
    p->p_retval[0] = fd;

    /* connection has been removed from the listen queue */
    KNOTE(&head->so_rcv.sb_sel.si_note, 0);
#endif

    so->so_state &= ~SS_COMP;
    so->so_head = NULL;

    cyg_selinit(&so->so_rcv.sb_sel);
    cyg_selinit(&so->so_snd.sb_sel);
    
    new_fp->f_type      = DTYPE_SOCKET;
    new_fp->f_flag     |= FREAD|FWRITE;
    new_fp->f_offset    = 0;
    new_fp->f_ops       = &bsd_sock_fileops;
    new_fp->f_data      = (CYG_ADDRWORD)so;
    new_fp->f_xops      = (CYG_ADDRWORD)&bsd_sockops;
    
    sa = 0;
    error = soaccept(so, &sa);
    if (error) {
        /*
         * return a namelen of zero for older code which might
         * ignore the return value from accept.
         */	
        if (name != NULL) {
            *anamelen = 0;
        }
        goto noconnection;
    }
    if (sa == NULL) {
        namelen = 0;
        if (name)
            goto gotnoname;
        splx(s);
        error = 0;
        goto done;
    }
    if (name) {
        if (namelen > sa->sa_len)
            namelen = sa->sa_len;
#ifdef COMPAT_OLDSOCK
        if (compat)
            ((struct osockaddr *)sa)->sa_family = sa->sa_family;
#endif
        error = copyout(sa, (caddr_t)name, namelen);
        if (!error)
gotnoname:
        *anamelen = namelen;
    }
noconnection:

#if 0 // FIXME
	/*
	 * close the new descriptor, assuming someone hasn't ripped it
	 * out from under us.
	 */
	if (error) {
		if (fdp->fd_ofiles[fd] == nfp) {
			fdp->fd_ofiles[fd] = NULL;
			fdrop(nfp, p);
		}
	}
	splx(s);

	/*
	 * Release explicitly held references before returning.
	 */
done:
	if (nfp != NULL)
		fdrop(nfp, p);
	fdrop(lfp, p);
	return (error);
    m_freem(nam);
#else
 done:
#endif
    splx(s);
    
    return (error);
}
Esempio n. 26
0
static void
pmclog_loop(void *arg)
{
	struct pmclog_proc_init_args *ia;
	struct pmc_owner *po;
	struct pmclog_buffer *lb;
	struct proc *p;
	struct ucred *ownercred;
	struct ucred *mycred;
	struct thread *td;
	sigset_t unb;
	struct uio auio;
	struct iovec aiov;
	size_t nbytes;
	int error;

	td = curthread;

	SIGEMPTYSET(unb);
	SIGADDSET(unb, SIGHUP);
	(void)kern_sigprocmask(td, SIG_UNBLOCK, &unb, NULL, 0);

	ia = arg;
	MPASS(ia->kthr == curproc);
	MPASS(!ia->acted);
	mtx_lock(&pmc_kthread_mtx);
	while (ia->po == NULL && !ia->exit)
		msleep(ia, &pmc_kthread_mtx, PWAIT, "pmclogi", 0);
	if (ia->exit) {
		ia->acted = true;
		wakeup(ia);
		mtx_unlock(&pmc_kthread_mtx);
		kproc_exit(0);
	}
	MPASS(ia->po != NULL);
	po = ia->po;
	ia->acted = true;
	wakeup(ia);
	mtx_unlock(&pmc_kthread_mtx);
	ia = NULL;

	p = po->po_owner;
	mycred = td->td_ucred;

	PROC_LOCK(p);
	ownercred = crhold(p->p_ucred);
	PROC_UNLOCK(p);

	PMCDBG2(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
	KASSERT(po->po_kthread == curthread->td_proc,
	    ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
		po, po->po_kthread, curthread->td_proc));

	lb = NULL;


	/*
	 * Loop waiting for I/O requests to be added to the owner
	 * struct's queue.  The loop is exited when the log file
	 * is deconfigured.
	 */

	mtx_lock(&pmc_kthread_mtx);

	for (;;) {

		/* check if we've been asked to exit */
		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
			break;

		if (lb == NULL) { /* look for a fresh buffer to write */
			mtx_lock_spin(&po->po_mtx);
			if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
				mtx_unlock_spin(&po->po_mtx);

				/* No more buffers and shutdown required. */
				if (po->po_flags & PMC_PO_SHUTDOWN)
					break;

				(void) msleep(po, &pmc_kthread_mtx, PWAIT,
				    "pmcloop", 0);
				continue;
			}

			TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
			mtx_unlock_spin(&po->po_mtx);
		}

		mtx_unlock(&pmc_kthread_mtx);

		/* process the request */
		PMCDBG3(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
		    lb->plb_base, lb->plb_ptr);
		/* change our thread's credentials before issuing the I/O */

		aiov.iov_base = lb->plb_base;
		aiov.iov_len  = nbytes = lb->plb_ptr - lb->plb_base;

		auio.uio_iov    = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = -1;
		auio.uio_resid  = nbytes;
		auio.uio_rw     = UIO_WRITE;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_td     = td;

		/* switch thread credentials -- see kern_ktrace.c */
		td->td_ucred = ownercred;
		error = fo_write(po->po_file, &auio, ownercred, 0, td);
		td->td_ucred = mycred;

		if (error) {
			/* XXX some errors are recoverable */
			/* send a SIGIO to the owner and exit */
			PROC_LOCK(p);
			kern_psignal(p, SIGIO);
			PROC_UNLOCK(p);

			mtx_lock(&pmc_kthread_mtx);

			po->po_error = error; /* save for flush log */

			PMCDBG2(LOG,WRI,2, "po=%p error=%d", po, error);

			break;
		}

		mtx_lock(&pmc_kthread_mtx);

		/* put the used buffer back into the global pool */
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);

		lb = NULL;
	}

	wakeup_one(po->po_kthread);
	po->po_kthread = NULL;

	mtx_unlock(&pmc_kthread_mtx);

	/* return the current I/O buffer to the global pool */
	if (lb) {
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/*
	 * Exit this thread, signalling the waiter
	 */

	crfree(ownercred);

	kproc_exit(0);
}
Esempio n. 27
0
/**
 * @brief Completes a request in the cache
 *
 * Completes a cache insertion operation begun in nfs_dupreq_start.
 * The refcnt of the corresponding duplicate request entry is unchanged
 * (ie, the caller must still call nfs_dupreq_rele).
 *
 * In contrast with the prior DRC implementation, completing a request
 * in the current implementation may under normal conditions cause one
 * or more cached requests to be retired.  Requests are retired in the
 * order they were inserted.  The primary retire algorithm is a high
 * water mark, and a windowing heuristic.  One or more requests will be
 * retired if the water mark/timeout is exceeded, and if a no duplicate
 * requests have been found in the cache in a configurable window of
 * immediately preceding requests.  A timeout may supplement the water mark,
 * in future.
 *
 * req->rq_u1 has either a magic value, or points to a duplicate request
 * cache entry allocated in nfs_dupreq_start.
 *
 * @param[in] req     The request
 * @param[in] res_nfs The response
 *
 * @return DUPREQ_SUCCESS if successful.
 * @return DUPREQ_INSERT_MALLOC_ERROR if an error occured.
 */
dupreq_status_t nfs_dupreq_finish(struct svc_req *req, nfs_res_t *res_nfs)
{
	dupreq_entry_t *ov = NULL, *dv = (dupreq_entry_t *)req->rq_u1;
	dupreq_status_t status = DUPREQ_SUCCESS;
	struct rbtree_x_part *t;
	drc_t *drc = NULL;

	/* do nothing if req is marked no-cache */
	if (dv == (void *)DUPREQ_NOCACHE)
		goto out;

	/* do nothing if nfs_dupreq_start failed completely */
	if (dv == (void *)DUPREQ_BAD_ADDR1)
		goto out;

	pthread_mutex_lock(&dv->mtx);
	dv->res = res_nfs;
	dv->timestamp = time(NULL);
	dv->state = DUPREQ_COMPLETE;
	drc = dv->hin.drc;
	pthread_mutex_unlock(&dv->mtx);

	/* cond. remove from q head */
	pthread_mutex_lock(&drc->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "completing dv=%p xid=%u on DRC=%p state=%s, status=%s, "
		     "refcnt=%d", dv, dv->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

	/* ok, do the new retwnd calculation here.  then, put drc only if
	 * we retire an entry */
	if (drc_should_retire(drc)) {
		/* again: */
		ov = TAILQ_FIRST(&drc->dupreq_q);
		if (likely(ov)) {
			/* finished request count against retwnd */
			drc_dec_retwnd(drc);
			/* check refcnt */
			if (ov->refcnt > 0) {
				/* ov still in use, apparently */
				goto unlock;
			}
			/* remove q entry */
			TAILQ_REMOVE(&drc->dupreq_q, ov, fifo_q);
			--(drc->size);

			/* remove dict entry */
			t = rbtx_partition_of_scalar(&drc->xt, ov->hk);
			/* interlock */
			pthread_mutex_unlock(&drc->mtx);
			pthread_mutex_lock(&t->mtx);	/* partition lock */
			rbtree_x_cached_remove(&drc->xt, t, &ov->rbt_k, ov->hk);
			pthread_mutex_unlock(&t->mtx);

			LogDebug(COMPONENT_DUPREQ,
				 "retiring ov=%p xid=%u on DRC=%p state=%s, "
				 "status=%s, refcnt=%d", ov, ov->hin.tcp.rq_xid,
				 ov->hin.drc, dupreq_state_table[dv->state],
				 dupreq_status_table[status], ov->refcnt);

			/* deep free ov */
			nfs_dupreq_free_dupreq(ov);
			goto out;
		}
	}

 unlock:
	pthread_mutex_unlock(&drc->mtx);

 out:
	return status;
}
Esempio n. 28
0
/*
 * Look for a name in the cache. We don't do this if the segment name is
 * long, simply so the cache can avoid holding long names (which would
 * either waste space, or add greatly to the complexity).
 *
 * Lookup is called with ni_dvp pointing to the directory to search,
 * ni_ptr pointing to the name of the entry being sought, ni_namelen
 * tells the length of the name, and ni_hash contains a hash of
 * the name. If the lookup succeeds, the vnode is returned in ni_vp
 * and a status of 0 is returned. If the locking fails for whatever
 * reason, the vnode is unlocked and the error is returned to caller.
 * If the lookup determines that the name does not exist (negative caching),
 * a status of ENOENT is returned. If the lookup fails, a status of -1
 * is returned.
 */
int
cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
{
	struct namecache *ncp;
	struct nchashhead *ncpp;
	struct vnode *vp;
	struct proc *p = curproc;
	u_long vpid;
	int error;

	*vpp = NULL;

	if (!doingcache) {
		cnp->cn_flags &= ~MAKEENTRY;
		return (-1);
	}
	if (cnp->cn_namelen > NCHNAMLEN) {
		nchstats.ncs_long++;
		cnp->cn_flags &= ~MAKEENTRY;
		return (-1);
	}

	ncpp = &nchashtbl[NCHASH(dvp, cnp)];
	LIST_FOREACH(ncp, ncpp, nc_hash) {
		if (ncp->nc_dvp == dvp &&
		    ncp->nc_dvpid == dvp->v_id &&
		    ncp->nc_nlen == cnp->cn_namelen &&
		    !memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
			break;
	}
	if (ncp == NULL) {
		nchstats.ncs_miss++;
		return (-1);
	}
	if ((cnp->cn_flags & MAKEENTRY) == 0) {
		nchstats.ncs_badhits++;
		goto remove;
	} else if (ncp->nc_vp == NULL) {
		if (cnp->cn_nameiop != CREATE ||
		    (cnp->cn_flags & ISLASTCN) == 0) {
			nchstats.ncs_neghits++;
			/*
			 * Move this slot to end of LRU chain,
			 * if not already there.
			 */
			if (TAILQ_NEXT(ncp, nc_lru) != NULL) {
				TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
				TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
			}
			return (ENOENT);
		} else {
			nchstats.ncs_badhits++;
			goto remove;
		}
	} else if (ncp->nc_vpid != ncp->nc_vp->v_id) {
		nchstats.ncs_falsehits++;
		goto remove;
	}

	vp = ncp->nc_vp;
	vpid = vp->v_id;
	if (vp == dvp) {	/* lookup on "." */
		VREF(dvp);
		error = 0;
	} else if (cnp->cn_flags & ISDOTDOT) {
		VOP_UNLOCK(dvp, 0, p);
		cnp->cn_flags |= PDIRUNLOCK;
		error = vget(vp, LK_EXCLUSIVE, p);
		/*
		 * If the above vget() succeeded and both LOCKPARENT and
		 * ISLASTCN is set, lock the directory vnode as well.
		 */
		if (!error && (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) == 0) {
			if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) {
				vput(vp);
				return (error);
			}
			cnp->cn_flags &= ~PDIRUNLOCK;
		}
	} else {
		error = vget(vp, LK_EXCLUSIVE, p);
		/*
		 * If the above vget() failed or either of LOCKPARENT or
		 * ISLASTCN is set, unlock the directory vnode.
		 */
		if (error || (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) {
			VOP_UNLOCK(dvp, 0, p);
			cnp->cn_flags |= PDIRUNLOCK;
		}
	}

	/*
	 * Check that the lock succeeded, and that the capability number did
	 * not change while we were waiting for the lock.
	 */
	if (error || vpid != vp->v_id) {
		if (!error) {
			vput(vp);
			nchstats.ncs_falsehits++;
		} else
			nchstats.ncs_badhits++;
		/*
		 * The parent needs to be locked when we return to VOP_LOOKUP().
		 * The `.' case here should be extremely rare (if it can happen
		 * at all), so we don't bother optimizing out the unlock/relock.
		 */
		if (vp == dvp || error ||
		    (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) {
			if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0)
				return (error);
			cnp->cn_flags &= ~PDIRUNLOCK;
		}
		return (-1);
	}

	nchstats.ncs_goodhits++;
	/*
	 * Move this slot to end of LRU chain, if not already there.
	 */
	if (TAILQ_NEXT(ncp, nc_lru) != NULL) {
		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
		TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
	}
	*vpp = vp;
	return (0);

remove:
	/*
	 * Last component and we are renaming or deleting,
	 * the cache entry is invalid, or otherwise don't
	 * want cache entry to exist.
	 */
	TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
	LIST_REMOVE(ncp, nc_hash);
	ncp->nc_hash.le_prev = NULL;

	if (ncp->nc_vhash.le_prev != NULL) {
		LIST_REMOVE(ncp, nc_vhash);
		ncp->nc_vhash.le_prev = NULL;
	}

	TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru);
	return (-1);
}
Esempio n. 29
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1;
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		pthread_mutex_lock(&req->rq_xprt->xp_lock);
		if (xu->drc) {
			drc = xu->drc;
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			pthread_mutex_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));

			drc_k.type = dtype;
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[512];
				sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512);
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				pthread_mutex_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				pthread_mutex_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;
			/* xprt drc */
			(void)nfs_dupreq_ref_drc(drc);	/* xu ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			xu->drc = drc;
		}
		pthread_mutex_unlock(&req->rq_xprt->xp_lock);
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	pthread_mutex_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}
Esempio n. 30
0
static int
vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs)
{
	struct nameidata nd;
	struct mount *mporoot, *mpnroot;
	struct vnode *vp, *vporoot, *vpdevfs;
	char *fspath;
	int error;

	mpnroot = TAILQ_NEXT(mpdevfs, mnt_list);

	/* Shuffle the mountlist. */
	mtx_lock(&mountlist_mtx);
	mporoot = TAILQ_FIRST(&mountlist);
	TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list);
	if (mporoot != mpdevfs) {
		TAILQ_REMOVE(&mountlist, mpnroot, mnt_list);
		TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list);
	}
	TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list);
	mtx_unlock(&mountlist_mtx);

	cache_purgevfs(mporoot);
	if (mporoot != mpdevfs)
		cache_purgevfs(mpdevfs);

	VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot);

	VI_LOCK(vporoot);
	vporoot->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vporoot);
	vporoot->v_mountedhere = NULL;
	mporoot->mnt_flag &= ~MNT_ROOTFS;
	mporoot->mnt_vnodecovered = NULL;
	vput(vporoot);

	/* Set up the new rootvnode, and purge the cache */
	mpnroot->mnt_vnodecovered = NULL;
	set_rootvnode();
	cache_purgevfs(rootvnode->v_mount);

	if (mporoot != mpdevfs) {
		/* Remount old root under /.mount or /mnt */
		fspath = "/.mount";
		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
		    fspath, td);
		error = namei(&nd);
		if (error) {
			NDFREE(&nd, NDF_ONLY_PNBUF);
			fspath = "/mnt";
			NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
			    fspath, td);
			error = namei(&nd);
		}
		if (!error) {
			vp = nd.ni_vp;
			error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
			if (!error)
				error = vinvalbuf(vp, V_SAVE, 0, 0);
			if (!error) {
				cache_purge(vp);
				mporoot->mnt_vnodecovered = vp;
				vp->v_mountedhere = mporoot;
				strlcpy(mporoot->mnt_stat.f_mntonname,
				    fspath, MNAMELEN);
				VOP_UNLOCK(vp, 0);
			} else
				vput(vp);
		}
		NDFREE(&nd, NDF_ONLY_PNBUF);

		if (error && bootverbose)
			printf("mountroot: unable to remount previous root "
			    "under /.mount or /mnt (error %d).\n", error);
	}

	/* Remount devfs under /dev */
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td);
	error = namei(&nd);
	if (!error) {
		vp = nd.ni_vp;
		error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
		if (!error)
			error = vinvalbuf(vp, V_SAVE, 0, 0);
		if (!error) {
			vpdevfs = mpdevfs->mnt_vnodecovered;
			if (vpdevfs != NULL) {
				cache_purge(vpdevfs);
				vpdevfs->v_mountedhere = NULL;
				vrele(vpdevfs);
			}
			mpdevfs->mnt_vnodecovered = vp;
			vp->v_mountedhere = mpdevfs;
			VOP_UNLOCK(vp, 0);
		} else
			vput(vp);
	}
	if (error && bootverbose)
		printf("mountroot: unable to remount devfs under /dev "
		    "(error %d).\n", error);
	NDFREE(&nd, NDF_ONLY_PNBUF);

	if (mporoot == mpdevfs) {
		vfs_unbusy(mpdevfs);
		/* Unlink the no longer needed /dev/dev -> / symlink */
		error = kern_unlink(td, "/dev/dev", UIO_SYSSPACE);
		if (error && bootverbose)
			printf("mountroot: unable to unlink /dev/dev "
			    "(error %d)\n", error);
	}

	return (0);
}