Exemplo n.º 1
0
/*
 * Force all CPUs through cpu_switchto(), waiting until complete.
 * Context switching will drain the write buffer on the calling
 * CPU.
 */
static void
ras_sync(void)
{

	/* No need to sync if exiting or single threaded. */
	if (curproc->p_nlwps > 1 && ncpu > 1) {
#ifdef NO_SOFTWARE_PATENTS
		uint64_t where;
		where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(where);
#else
		/*
		 * Assumptions:
		 *
		 * o preemption is disabled by the thread in
		 *   ras_lookup().
		 * o proc::p_raslist is only inspected with
		 *   preemption disabled.
		 * o ras_lookup() plus loads reordered in advance
		 *   will take no longer than 1/8s to complete.
		 */
		const int delta = hz >> 3;
		int target = hardclock_ticks + delta;
		do {
			kpause("ras", false, delta, NULL);
		} while (hardclock_ticks < target);
#endif
	}
Exemplo n.º 2
0
static bt_t *
bt_alloc(vmem_t *vm, vm_flag_t flags)
{
	bt_t *bt;
	VMEM_LOCK(vm);
	while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
		VMEM_UNLOCK(vm);
		if (bt_refill(vm)) {
			if ((flags & VM_NOSLEEP) != 0) {
				return NULL;
			}

			/*
			 * It would be nice to wait for something specific here
			 * but there are multiple ways that a retry could
			 * succeed and we can't wait for multiple things
			 * simultaneously.  So we'll just sleep for an arbitrary
			 * short period of time and retry regardless.
			 * This should be a very rare case.
			 */

			vmem_kick_pdaemon();
			kpause("btalloc", false, 1, NULL);
		}
		VMEM_LOCK(vm);
	}
	bt = LIST_FIRST(&vm->vm_freetags);
	LIST_REMOVE(bt, bt_freelist);
	vm->vm_nfreetags--;
	VMEM_UNLOCK(vm);

	return bt;
}
Exemplo n.º 3
0
/* Delay for a certain number of ms */
void
usb_delay_ms_locked(struct usbd_bus *bus, u_int ms, kmutex_t *lock)
{
	/* Wait at least two clock ticks so we know the time has passed. */
	if (bus->ub_usepolling || cold)
		delay((ms+1) * 1000);
	else
		kpause("usbdly", false, (ms*hz+999)/1000 + 1, lock);
}
Exemplo n.º 4
0
static void
awin_hdmi_thread(void *priv)
{
	struct awin_hdmi_softc *sc = priv;

	for (;;) {
		awin_hdmi_hpd(sc);
		kpause("hdmihotplug", false, mstohz(1000), NULL);
	}
}
Exemplo n.º 5
0
/*
 * check if the scanner is ready to take commands
 *   wait timeout seconds and try only every second
 *   if update, then update picture size info
 *
 *   returns EBUSY if scanner not ready
 */
static int
mustek_get_status(struct ss_softc *ss, int timeout, int update)
{
    struct mustek_get_status_cmd cmd;
    struct mustek_get_status_data data;
    struct scsipi_periph *periph = ss->sc_periph;
    int error, lines, bytes_per_line;

    memset(&cmd, 0, sizeof(cmd));
    cmd.opcode = MUSTEK_GET_STATUS;
    cmd.length = sizeof(data);

    while (1) {
        SC_DEBUG(periph, SCSIPI_DB1, ("mustek_get_status: stat_cmd\n"));
        error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
                               (void *)&data, sizeof(data),
                               MUSTEK_RETRIES, 5000, NULL, XS_CTL_DATA_IN);
        if (error)
            return error;
        if ((data.ready_busy == MUSTEK_READY) ||
                (timeout-- <= 0))
            break;
        /* please wait a second */
        kpause("mtkrdy", false, hz, NULL);
    }

    if (update) {
        bytes_per_line = _2ltol(data.bytes_per_line);
        lines = _3ltol(data.lines);
        if (lines != ss->sio.scan_lines) {
            printf("mustek: lines actual(%d) != computed(%ld)\n",
                   lines, ss->sio.scan_lines);
            return EIO;
        }
        if (bytes_per_line * lines != ss->sio.scan_window_size) {
            printf("mustek: win-size actual(%d) != computed(%ld)\n",
                   bytes_per_line * lines, ss->sio.scan_window_size);
            return EIO;
        }

        SC_DEBUG(periph, SCSIPI_DB1,
                 ("mustek_get_size: bpl=%ld, lines=%ld\n",
                  (ss->sio.scan_pixels_per_line * ss->sio.scan_bits_per_pixel)
                  / 8, ss->sio.scan_lines));
        SC_DEBUG(periph, SCSIPI_DB1, ("window size = %ld\n",
                                      ss->sio.scan_window_size));
    }

    SC_DEBUG(periph, SCSIPI_DB1, ("mustek_get_status: end\n"));
    if (data.ready_busy == MUSTEK_READY)
        return 0;
    else
        return EBUSY;
}
Exemplo n.º 6
0
/*
 * Read requests from /dev/puffs and forward them to comfd
 *
 * XXX: the init detection is really sucky, but let's not
 * waste too much energy for a better one here
 */
static void
readthread(void *arg)
{
	struct ptargs *pap = arg;
	struct file *fp;
	register_t rv;
	char *buf;
	off_t off;
	int error, inited;

	buf = kmem_alloc(BUFSIZE, KM_SLEEP);
	inited = 0;

 retry:
	kpause(NULL, 0, hz/4, NULL);

	for (;;) {
		size_t n;

		off = 0;
		fp = fd_getfile(pap->fpfd);
		if (fp == NULL)
			error = EINVAL;
		else
			error = dofileread(pap->fpfd, fp, buf, BUFSIZE,
			    &off, 0, &rv);
		if (error) {
			if (error == ENOENT && inited == 0)
				goto retry;
			if (error == ENXIO)
				break;
			panic("fileread failed: %d", error);
		}
		inited = 1;

		while (rv) {
			struct rumpuser_iovec iov;

			iov.iov_base = buf;
			iov.iov_len = rv;

			error = rumpuser_iovwrite(pap->comfd, &iov, 1,
			    RUMPUSER_IOV_NOSEEK, &n);
			if (error)
				panic("fileread failed: %d", error);
			if (n == 0)
				panic("fileread failed: closed");
			rv -= n;
		}
	}

	kthread_exit(0);
}
Exemplo n.º 7
0
int
nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt,
    struct timespec *rmt)
{
	struct timespec rmtstart;
	int error, timo;

	if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) {
		if (error == ETIMEDOUT) {
			error = 0;
			if (rmt != NULL)
				rmt->tv_sec = rmt->tv_nsec = 0;
		}
		return error;
	}

	/*
	 * Avoid inadvertently sleeping forever
	 */
	if (timo == 0)
		timo = 1;
again:
	error = kpause("nanoslp", true, timo, NULL);
	if (rmt != NULL || error == 0) {
		struct timespec rmtend;
		struct timespec t0;
		struct timespec *t;

		(void)clock_gettime1(clock_id, &rmtend);
		t = (rmt != NULL) ? rmt : &t0;
		if (flags & TIMER_ABSTIME) {
			timespecsub(rqt, &rmtend, t);
		} else {
			timespecsub(&rmtend, &rmtstart, t);
			timespecsub(rqt, t, t);
		}
		if (t->tv_sec < 0)
			timespecclear(t);
		if (error == 0) {
			timo = tstohz(t);
			if (timo > 0)
				goto again;
		}
	}

	if (error == ERESTART)
		error = EINTR;
	if (error == EWOULDBLOCK)
		error = 0;

	return error;
}
Exemplo n.º 8
0
int
sigsuspend1(struct lwp *l, const sigset_t *ss)
{

	if (ss)
		sigsuspendsetup(l, ss);

	while (kpause("pause", true, 0, NULL) == 0)
		;

	/* always return EINTR rather than ERESTART... */
	return EINTR;
}
Exemplo n.º 9
0
/*
 * pserialize_perform:
 *
 *	Perform the write side of passive serialization.  The calling
 *	thread holds an exclusive lock on the data object(s) being updated.
 *	We wait until every processor in the system has made at least two
 *	passes through cpu_swichto().  The wait is made with the caller's
 *	update lock held, but is short term.
 */
void
pserialize_perform(pserialize_t psz)
{
	uint64_t xc;

	KASSERT(!cpu_intr_p());
	KASSERT(!cpu_softintr_p());

	if (__predict_false(panicstr != NULL)) {
		return;
	}
	KASSERT(psz->psz_owner == NULL);
	KASSERT(ncpu > 0);

	/*
	 * Set up the object and put it onto the queue.  The lock
	 * activity here provides the necessary memory barrier to
	 * make the caller's data update completely visible to
	 * other processors.
	 */
	psz->psz_owner = curlwp;
	kcpuset_copy(psz->psz_target, kcpuset_running);
	kcpuset_zero(psz->psz_pass);

	mutex_spin_enter(&psz_lock);
	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
	psz_work_todo++;

	do {
		mutex_spin_exit(&psz_lock);

		/*
		 * Force some context switch activity on every CPU, as
		 * the system may not be busy.  Pause to not flood.
		 */
		xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
		xc_wait(xc);
		kpause("psrlz", false, 1, NULL);

		mutex_spin_enter(&psz_lock);
	} while (!kcpuset_iszero(psz->psz_target));

	psz_ev_excl.ev_count++;
	mutex_spin_exit(&psz_lock);

	psz->psz_owner = NULL;
}
Exemplo n.º 10
0
static int
awin_hdmi_i2c_xfer_1_4(void *priv, i2c_addr_t addr, uint8_t block, uint8_t reg,
    size_t len, int type, int flags)
{
	struct awin_hdmi_softc *sc = priv;
	uint32_t val;
	int retry;

	val = HDMI_READ(sc, AWIN_A31_HDMI_DDC_FIFO_CTRL_REG);
	val |= AWIN_A31_HDMI_DDC_FIFO_CTRL_RST;
	HDMI_WRITE(sc, AWIN_A31_HDMI_DDC_FIFO_CTRL_REG, val);

	val = __SHIFTIN(block, AWIN_A31_HDMI_DDC_SLAVE_ADDR_SEG_PTR);
	val |= __SHIFTIN(0x60, AWIN_A31_HDMI_DDC_SLAVE_ADDR_DDC_CMD);
	val |= __SHIFTIN(reg, AWIN_A31_HDMI_DDC_SLAVE_ADDR_OFF_ADR);
	val |= __SHIFTIN(addr, AWIN_A31_HDMI_DDC_SLAVE_ADDR_DEV_ADR);
	HDMI_WRITE(sc, AWIN_A31_HDMI_DDC_SLAVE_ADDR_REG, val);

	HDMI_WRITE(sc, AWIN_A31_HDMI_DDC_COMMAND_REG,
	    __SHIFTIN(len, AWIN_A31_HDMI_DDC_COMMAND_DTC) |
	    __SHIFTIN(type, AWIN_A31_HDMI_DDC_COMMAND_CMD));

	val = HDMI_READ(sc, AWIN_A31_HDMI_DDC_CTRL_REG);
	val |= AWIN_A31_HDMI_DDC_CTRL_ACCESS_CMD_START;
	HDMI_WRITE(sc, AWIN_A31_HDMI_DDC_CTRL_REG, val);

	retry = 1000;
	while (--retry > 0) {
		val = HDMI_READ(sc, AWIN_A31_HDMI_DDC_CTRL_REG);
		if ((val & AWIN_A31_HDMI_DDC_CTRL_ACCESS_CMD_START) == 0)
			break;
		if (cold)
			delay(1000);
		else
			kpause("hdmiddc", false, mstohz(10), &sc->sc_ic_lock);
	}
	if (retry == 0)
		return ETIMEDOUT;

	return 0;
}
Exemplo n.º 11
0
int
rump_netconfig_ipv6_ifaddr(const char *ifname, const char *addr, int prefixlen)
{
	struct sockaddr_in6 *sin6;
	struct in6_aliasreq ia;
	int rv;

	CHECKDOMAIN(in6so);

	/* pfft, you do the bitnibbling */
	if (prefixlen % 8)
		return EINVAL;

	memset(&ia, 0, sizeof(ia));
	strlcpy(ia.ifra_name, ifname, sizeof(ia.ifra_name));

	ia.ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME;
	ia.ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME;

	sin6 = (struct sockaddr_in6 *)&ia.ifra_addr;
	sin6->sin6_family = AF_INET6;
	sin6->sin6_len = sizeof(*sin6);
	netconfig_inet_pton6(addr, &sin6->sin6_addr);

	sin6 = (struct sockaddr_in6 *)&ia.ifra_prefixmask;
	sin6->sin6_family = AF_INET6;
	sin6->sin6_len = sizeof(*sin6);
	memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr));
	memset(&sin6->sin6_addr, 0xff, prefixlen / 8);

	rv = wrapifioctl(in6so, SIOCAIFADDR_IN6, &ia);
	/*
	 * small pause so that we can assume interface is usable when
	 * we return (ARPs have trickled through, etc.)
	 */
	if (rv == 0)
		kpause("ramasee", false, mstohz(50), NULL);
	return rv;
}
Exemplo n.º 12
0
/*
 * Free Ring Buffers.
 *
 * Because we used external memory for the tx mbufs, we dont
 * want to free the memory until all the mbufs are done with
 *
 * Just to be sure, dont free if something is still pending.
 * This would be a memory leak but at least there is a warning..
 */
static void
btsco_freem(void *hdl, void *addr, size_t size)
{
	struct btsco_softc *sc = hdl;
	int count = hz / 2;

	if (addr == sc->sc_tx_buf) {
		DPRINTF("%s: tx_refcnt=%d\n", sc->sc_name, sc->sc_tx_refcnt);

		sc->sc_tx_buf = NULL;

		while (sc->sc_tx_refcnt> 0 && count-- > 0)
			kpause("drain", false, 1, NULL);

		if (sc->sc_tx_refcnt > 0) {
			aprint_error("%s: ring buffer unreleased!\n", sc->sc_name);
			return;
		}
	}

	kmem_free(addr, size);
}
Exemplo n.º 13
0
void
npf_test_conc(bool st, unsigned nthreads)
{
	uint64_t total = 0;
	int error;
	lwp_t **l;

	printf("THREADS\tPKTS\n");
	stateful = st;
	done = false;
	run = false;

	npackets = kmem_zalloc(sizeof(uint64_t) * nthreads, KM_SLEEP);
	l = kmem_zalloc(sizeof(lwp_t *) * nthreads, KM_SLEEP);

	for (unsigned i = 0; i < nthreads; i++) {
		const int flags = KTHREAD_MUSTJOIN | KTHREAD_MPSAFE;
		error = kthread_create(PRI_NONE, flags, NULL,
		    worker, (void *)(uintptr_t)i, &l[i], "npfperf");
		KASSERT(error == 0);
	}

	/* Let them spin! */
	run = true;
	kpause("perf", false, NSECS * hz, NULL);
	done = true;

	/* Wait until all threads exit and sum the counts. */
	for (unsigned i = 0; i < nthreads; i++) {
		kthread_join(l[i]);
		total += npackets[i];
	}
	kmem_free(npackets, sizeof(uint64_t) * nthreads);
	kmem_free(l, sizeof(lwp_t *) * nthreads);

	printf("%u\t%" PRIu64 "\n", nthreads, total / NSECS);
}
Exemplo n.º 14
0
static int
cfg_ipv4(const char *ifname, const char *addr, in_addr_t m_addr)
{
	struct ifaliasreq ia;
	struct sockaddr_in *sin;
	int rv;

	CHECKDOMAIN(in4so);

	memset(&ia, 0, sizeof(ia));
	strlcpy(ia.ifra_name, ifname, sizeof(ia.ifra_name));

	sin = (struct sockaddr_in *)&ia.ifra_addr;
	sin->sin_family = AF_INET;
	sin->sin_len = sizeof(*sin);
	sin->sin_addr.s_addr = inet_addr(addr);

	sin = (struct sockaddr_in *)&ia.ifra_mask;
	sin->sin_family = AF_INET;
	sin->sin_len = sizeof(*sin);
	sin->sin_addr.s_addr = m_addr;

	sin = (struct sockaddr_in *)&ia.ifra_broadaddr;
	sin->sin_family = AF_INET;
	sin->sin_len = sizeof(*sin);
	sin->sin_addr.s_addr = inet_addr(addr) | ~m_addr;

	rv = wrapifioctl(in4so, SIOCAIFADDR, &ia);
	/*
	 * small pause so that we can assume interface is usable when
	 * we return (ARPs have trickled through, etc.)
	 */
	if (rv == 0)
		kpause("ramasee", false, mstohz(50), NULL);
	return rv;
}
Exemplo n.º 15
0
int
uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
    struct vm_anon *anon)
{
	struct vm_page *pg;
	int error;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * if we are loaning to "another" anon then it is easy, we just
	 * bump the reference count on the current anon and return a
	 * pointer to it (it becomes copy-on-write shared).
	 */

	if (flags & UVM_LOAN_TOANON) {
		KASSERT(mutex_owned(anon->an_lock));
		pg = anon->an_page;
		if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) {
			if (pg->wire_count > 0) {
				UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0);
				uvmfault_unlockall(ufi,
				    ufi->entry->aref.ar_amap,
				    ufi->entry->object.uvm_obj);
				return (-1);
			}
			pmap_page_protect(pg, VM_PROT_READ);
		}
		anon->an_ref++;
		**output = anon;
		(*output)++;
		UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
		return (1);
	}

	/*
	 * we are loaning to a kernel-page.   we need to get the page
	 * resident so we can wire it.   uvmfault_anonget will handle
	 * this for us.
	 */

	KASSERT(mutex_owned(anon->an_lock));
	error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);

	/*
	 * if we were unable to get the anon, then uvmfault_anonget has
	 * unlocked everything and returned an error code.
	 */

	if (error) {
		UVMHIST_LOG(loanhist, "error %d", error,0,0,0);

		/* need to refault (i.e. refresh our lookup) ? */
		if (error == ERESTART) {
			return (0);
		}

		/* "try again"?   sleep a bit and retry ... */
		if (error == EAGAIN) {
			kpause("loanagain", false, hz/2, NULL);
			return (0);
		}

		/* otherwise flag it as an error */
		return (-1);
	}

	/*
	 * we have the page and its owner locked: do the loan now.
	 */

	pg = anon->an_page;
	mutex_enter(&uvm_pageqlock);
	if (pg->wire_count > 0) {
		mutex_exit(&uvm_pageqlock);
		UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0);
		KASSERT(pg->uobject == NULL);
		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
		return (-1);
	}
	if (pg->loan_count == 0) {
		pmap_page_protect(pg, VM_PROT_READ);
	}
	pg->loan_count++;
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);
	**output = pg;
	(*output)++;

	/* unlock and return success */
	if (pg->uobject)
		mutex_exit(pg->uobject->vmobjlock);
	UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
	return (1);
}
Exemplo n.º 16
0
/*
 * System filesystem synchronizer daemon.
 */
void
sched_sync(void *arg)
{
	synclist_t *slp;
	struct vnode *vp;
	time_t starttime;
	bool synced;

	for (;;) {
		mutex_enter(&syncer_mutex);
		mutex_enter(&syncer_data_lock);

		starttime = time_second;

		/*
		 * Push files whose dirty time has expired.
		 */
		slp = &syncer_workitem_pending[syncer_delayno];
		syncer_delayno += 1;
		if (syncer_delayno >= syncer_last)
			syncer_delayno = 0;

		while ((vp = TAILQ_FIRST(slp)) != NULL) {
			/* We are locking in the wrong direction. */
			synced = false;
			if (mutex_tryenter(vp->v_interlock)) {
				mutex_exit(&syncer_data_lock);
				if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
					synced = true;
					(void) VOP_FSYNC(vp, curlwp->l_cred,
					    FSYNC_LAZY, 0, 0);
					vput(vp);
				}
				mutex_enter(&syncer_data_lock);
			}

			/*
			 * XXX The vnode may have been recycled, in which
			 * case it may have a new identity.
			 */
			if (TAILQ_FIRST(slp) == vp) {
				/*
				 * Put us back on the worklist.  The worklist
				 * routine will remove us from our current
				 * position and then add us back in at a later
				 * position.
				 *
				 * Try again sooner rather than later if
				 * we were unable to lock the vnode.  Lock
				 * failure should not prevent us from doing
				 * the sync "soon".
				 *
				 * If we locked it yet arrive here, it's
				 * likely that lazy sync is in progress and
				 * so the vnode still has dirty metadata. 
				 * syncdelay is mainly to get this vnode out
				 * of the way so we do not consider it again
				 * "soon" in this loop, so the delay time is
				 * not critical as long as it is not "soon". 
				 * While write-back strategy is the file
				 * system's domain, we expect write-back to
				 * occur no later than syncdelay seconds
				 * into the future.
				 */
				vn_syncer_add1(vp,
				    synced ? syncdelay : lockdelay);
			}
		}
		mutex_exit(&syncer_mutex);

		/*
		 * If it has taken us less than a second to process the
		 * current work, then wait.  Otherwise start right over
		 * again.  We can still lose time if any single round
		 * takes more than two seconds, but it does not really
		 * matter as we are just trying to generally pace the
		 * filesystem activity.
		 */
		if (time_second == starttime) {
			kpause("syncer", false, hz, &syncer_data_lock);
		}
		mutex_exit(&syncer_data_lock);
	}
}
Exemplo n.º 17
0
static int
uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
{
	struct vm_amap *amap = ufi->entry->aref.ar_amap;
	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
	struct vm_page *pg;
	int error, npages;
	bool locked;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	/*
	 * first we must make sure the page is resident.
	 *
	 * XXXCDC: duplicate code with uvm_fault().
	 */

	/* locked: maps(read), amap(if there) */
	mutex_enter(uobj->vmobjlock);
	/* locked: maps(read), amap(if there), uobj */

	if (uobj->pgops->pgo_get) {	/* try locked pgo_get */
		npages = 1;
		pg = NULL;
		error = (*uobj->pgops->pgo_get)(uobj,
		    va - ufi->entry->start + ufi->entry->offset,
		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
	} else {
		error = EIO;		/* must have pgo_get op */
	}

	/*
	 * check the result of the locked pgo_get.  if there is a problem,
	 * then we fail the loan.
	 */

	if (error && error != EBUSY) {
		uvmfault_unlockall(ufi, amap, uobj);
		return (-1);
	}

	/*
	 * if we need to unlock for I/O, do so now.
	 */

	if (error == EBUSY) {
		uvmfault_unlockall(ufi, amap, NULL);

		/* locked: uobj */
		npages = 1;
		error = (*uobj->pgops->pgo_get)(uobj,
		    va - ufi->entry->start + ufi->entry->offset,
		    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
		/* locked: <nothing> */

		if (error) {
			if (error == EAGAIN) {
				kpause("fltagain2", false, hz/2, NULL);
				return (0);
			}
			return (-1);
		}

		/*
		 * pgo_get was a success.   attempt to relock everything.
		 */

		locked = uvmfault_relock(ufi);
		if (locked && amap)
			amap_lock(amap);
		uobj = pg->uobject;
		mutex_enter(uobj->vmobjlock);

		/*
		 * verify that the page has not be released and re-verify
		 * that amap slot is still free.   if there is a problem we
		 * drop our lock (thus force a lookup refresh/retry).
		 */

		if ((pg->flags & PG_RELEASED) != 0 ||
		    (locked && amap && amap_lookup(&ufi->entry->aref,
		    ufi->orig_rvaddr - ufi->entry->start))) {
			if (locked)
				uvmfault_unlockall(ufi, amap, NULL);
			locked = false;
		}

		/*
		 * didn't get the lock?   release the page and retry.
		 */

		if (locked == false) {
			if (pg->flags & PG_WANTED) {
				wakeup(pg);
			}
			if (pg->flags & PG_RELEASED) {
				mutex_enter(&uvm_pageqlock);
				uvm_pagefree(pg);
				mutex_exit(&uvm_pageqlock);
				mutex_exit(uobj->vmobjlock);
				return (0);
			}
			mutex_enter(&uvm_pageqlock);
			uvm_pageactivate(pg);
			mutex_exit(&uvm_pageqlock);
			pg->flags &= ~(PG_BUSY|PG_WANTED);
			UVM_PAGE_OWN(pg, NULL);
			mutex_exit(uobj->vmobjlock);
			return (0);
		}
	}

	KASSERT(uobj == pg->uobject);

	/*
	 * at this point we have the page we want ("pg") marked PG_BUSY for us
	 * and we have all data structures locked.  do the loanout.  page can
	 * not be PG_RELEASED (we caught this above).
	 */

	if ((flags & UVM_LOAN_TOANON) == 0) {
		if (uvm_loanpage(&pg, 1)) {
			uvmfault_unlockall(ufi, amap, uobj);
			return (-1);
		}
		mutex_exit(uobj->vmobjlock);
		**output = pg;
		(*output)++;
		return (1);
	}

#ifdef notdef
	/*
	 * must be a loan to an anon.   check to see if there is already
	 * an anon associated with this page.  if so, then just return
	 * a reference to this object.   the page should already be
	 * mapped read-only because it is already on loan.
	 */

	if (pg->uanon) {
		/* XXX: locking */
		anon = pg->uanon;
		anon->an_ref++;
		if (pg->flags & PG_WANTED) {
			wakeup(pg);
		}
		pg->flags &= ~(PG_WANTED|PG_BUSY);
		UVM_PAGE_OWN(pg, NULL);
		mutex_exit(uobj->vmobjlock);
		**output = anon;
		(*output)++;
		return (1);
	}

	/*
	 * need to allocate a new anon
	 */

	anon = uvm_analloc();
	if (anon == NULL) {
		goto fail;
	}
	mutex_enter(&uvm_pageqlock);
	if (pg->wire_count > 0) {
		mutex_exit(&uvm_pageqlock);
		UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
		goto fail;
	}
	if (pg->loan_count == 0) {
		pmap_page_protect(pg, VM_PROT_READ);
	}
	pg->loan_count++;
	pg->uanon = anon;
	anon->an_page = pg;
	anon->an_lock = /* TODO: share amap lock */
	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);
	if (pg->flags & PG_WANTED) {
		wakeup(pg);
	}
	pg->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(pg, NULL);
	mutex_exit(uobj->vmobjlock);
	mutex_exit(&anon->an_lock);
	**output = anon;
	(*output)++;
	return (1);

fail:
	UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
	/*
	 * unlock everything and bail out.
	 */
	if (pg->flags & PG_WANTED) {
		wakeup(pg);
	}
	pg->flags &= ~(PG_WANTED|PG_BUSY);
	UVM_PAGE_OWN(pg, NULL);
	uvmfault_unlockall(ufi, amap, uobj, NULL);
	if (anon) {
		anon->an_ref--;
		uvm_anon_free(anon);
	}
#endif	/* notdef */
	return (-1);
}
Exemplo n.º 18
0
/*
 * General fork call.  Note that another LWP in the process may call exec()
 * or exit() while we are forking.  It's safe to continue here, because
 * neither operation will complete until all LWPs have exited the process.
 */
int
fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc	*p1, *p2, *parent;
	struct plimit   *p1_lim;
	uid_t		uid;
	struct lwp	*l2;
	int		count;
	vaddr_t		uaddr;
	int		tnprocs;
	int		tracefork;
	int		error = 0;

	p1 = l1->l_proc;
	uid = kauth_cred_getuid(l1->l_cred);
	tnprocs = atomic_inc_uint_nv(&nprocs);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create.
	 */
	if (__predict_false(tnprocs >= maxproc))
		error = -1;
	else
		error = kauth_authorize_process(l1->l_cred,
		    KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);

	if (error) {
		static struct timeval lasttfm;
		atomic_dec_uint(&nprocs);
		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc", "increase kern.maxproc or NPROC");
		if (forkfsleep)
			kpause("forkmx", false, forkfsleep, NULL);
		return EAGAIN;
	}

	/*
	 * Enforce limits.
	 */
	count = chgproccnt(uid, 1);
	if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
		if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT,
		    p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
		    &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) {
			(void)chgproccnt(uid, -1);
			atomic_dec_uint(&nprocs);
			if (forkfsleep)
				kpause("forkulim", false, forkfsleep, NULL);
			return EAGAIN;
		}
	}

	/*
	 * Allocate virtual address space for the U-area now, while it
	 * is still easy to abort the fork operation if we're out of
	 * kernel virtual address space.
	 */
	uaddr = uvm_uarea_alloc();
	if (__predict_false(uaddr == 0)) {
		(void)chgproccnt(uid, -1);
		atomic_dec_uint(&nprocs);
		return ENOMEM;
	}

	/*
	 * We are now committed to the fork.  From here on, we may
	 * block on resources, but resource allocation may NOT fail.
	 */

	/* Allocate new proc. */
	p2 = proc_alloc();

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	memset(&p2->p_startzero, 0,
	    (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
	memcpy(&p2->p_startcopy, &p1->p_startcopy,
	    (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));

	TAILQ_INIT(&p2->p_sigpend.sp_info);

	LIST_INIT(&p2->p_lwps);
	LIST_INIT(&p2->p_sigwaiters);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * Inherit flags we want to keep.  The flags related to SIGCHLD
	 * handling are important in order to keep a consistent behaviour
	 * for the child after the fork.  If we are a 32-bit process, the
	 * child will be too.
	 */
	p2->p_flag =
	    p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
	p2->p_emul = p1->p_emul;
	p2->p_execsw = p1->p_execsw;

	if (flags & FORK_SYSTEM) {
		/*
		 * Mark it as a system process.  Set P_NOCLDWAIT so that
		 * children are reparented to init(8) when they exit.
		 * init(8) can easily wait them out for us.
		 */
		p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT);
	}

	mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
	mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
	rw_init(&p2->p_reflock);
	cv_init(&p2->p_waitcv, "wait");
	cv_init(&p2->p_lwpcv, "lwpwait");

	/*
	 * Share a lock between the processes if they are to share signal
	 * state: we must synchronize access to it.
	 */
	if (flags & FORK_SHARESIGS) {
		p2->p_lock = p1->p_lock;
		mutex_obj_hold(p1->p_lock);
	} else
		p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);

	kauth_proc_fork(p1, p2);

	p2->p_raslist = NULL;
#if defined(__HAVE_RAS)
	ras_fork(p1, p2);
#endif

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		vref(p2->p_textvp);

	if (flags & FORK_SHAREFILES)
		fd_share(p2);
	else if (flags & FORK_CLEANFILES)
		p2->p_fd = fd_init(NULL);
	else
		p2->p_fd = fd_copy();

	/* XXX racy */
	p2->p_mqueue_cnt = p1->p_mqueue_cnt;

	if (flags & FORK_SHARECWD)
		cwdshare(p2);
	else
		p2->p_cwdi = cwdinit();

	/*
	 * Note: p_limit (rlimit stuff) is copy-on-write, so normally
	 * we just need increase pl_refcnt.
	 */
	p1_lim = p1->p_limit;
	if (!p1_lim->pl_writeable) {
		lim_addref(p1_lim);
		p2->p_limit = p1_lim;
	} else {
		p2->p_limit = lim_copy(p1_lim);
	}

	if (flags & FORK_PPWAIT) {
		/* Mark ourselves as waiting for a child. */
		l1->l_pflag |= LP_VFORKWAIT;
		p2->p_lflag = PL_PPWAIT;
		p2->p_vforklwp = l1;
	} else {
		p2->p_lflag = 0;
	}
	p2->p_sflag = 0;
	p2->p_slflag = 0;
	parent = (flags & FORK_NOWAIT) ? initproc : p1;
	p2->p_pptr = parent;
	p2->p_ppid = parent->p_pid;
	LIST_INIT(&p2->p_children);

	p2->p_aio = NULL;

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		mutex_enter(&ktrace_lock);
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			ktradref(p2);
		mutex_exit(&ktrace_lock);
	}
#endif

	/*
	 * Create signal actions for the child process.
	 */
	p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS);
	mutex_enter(p1->p_lock);
	p2->p_sflag |=
	    (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
	sched_proc_fork(p1, p2);
	mutex_exit(p1->p_lock);

	p2->p_stflag = p1->p_stflag;

	/*
	 * p_stats.
	 * Copy parts of p_stats, and zero out the rest.
	 */
	p2->p_stats = pstatscopy(p1->p_stats);

	/*
	 * Set up the new process address space.
	 */
	uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false);

	/*
	 * Finish creating the child process.
	 * It will return through a different path later.
	 */
	lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0,
	    stack, stacksize, (func != NULL) ? func : child_return, arg, &l2,
	    l1->l_class);

	/*
	 * Inherit l_private from the parent.
	 * Note that we cannot use lwp_setprivate() here since that
	 * also sets the CPU TLS register, which is incorrect if the
	 * process has changed that without letting the kernel know.
	 */
	l2->l_private = l1->l_private;

	/*
	 * If emulation has a process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, l1, flags);

	/*
	 * ...and finally, any other random fork hooks that subsystems
	 * might have registered.
	 */
	doforkhooks(p2, p1);

	SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0);

	/*
	 * It's now safe for the scheduler and other processes to see the
	 * child process.
	 */
	mutex_enter(proc_lock);

	if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
		p2->p_lflag |= PL_CONTROLT;

	LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling);
	p2->p_exitsig = exitsig;		/* signal for parent on exit */

	/*
	 * We don't want to tracefork vfork()ed processes because they
	 * will not receive the SIGTRAP until it is too late.
	 */
	tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) ==
	    (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0;
	if (tracefork) {
		p2->p_slflag |= PSL_TRACED;
		p2->p_opptr = p2->p_pptr;
		if (p2->p_pptr != p1->p_pptr) {
			struct proc *parent1 = p2->p_pptr;

			if (parent1->p_lock < p2->p_lock) {
				if (!mutex_tryenter(parent1->p_lock)) {
					mutex_exit(p2->p_lock);
					mutex_enter(parent1->p_lock);
				}
			} else if (parent1->p_lock > p2->p_lock) {
				mutex_enter(parent1->p_lock);
			}
			parent1->p_slflag |= PSL_CHTRACED;
			proc_reparent(p2, p1->p_pptr);
			if (parent1->p_lock != p2->p_lock)
				mutex_exit(parent1->p_lock);
		}

		/*
		 * Set ptrace status.
		 */
		p1->p_fpid = p2->p_pid;
		p2->p_fpid = p1->p_pid;
	}

	LIST_INSERT_AFTER(p1, p2, p_pglist);
	LIST_INSERT_HEAD(&allproc, p2, p_list);

	p2->p_trace_enabled = trace_is_enabled(p2);
#ifdef __HAVE_SYSCALL_INTERN
	(*p2->p_emul->e_syscall_intern)(p2);
#endif

	/*
	 * Update stats now that we know the fork was successful.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	if (ktrpoint(KTR_EMUL))
		p2->p_traceflag |= KTRFAC_TRC_EMUL;

	/*
	 * Notify any interested parties about the new process.
	 */
	if (!SLIST_EMPTY(&p1->p_klist)) {
		mutex_exit(proc_lock);
		KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
		mutex_enter(proc_lock);
	}

	/*
	 * Make child runnable, set start time, and add to run queue except
	 * if the parent requested the child to start in SSTOP state.
	 */
	mutex_enter(p2->p_lock);

	/*
	 * Start profiling.
	 */
	if ((p2->p_stflag & PST_PROFIL) != 0) {
		mutex_spin_enter(&p2->p_stmutex);
		startprofclock(p2);
		mutex_spin_exit(&p2->p_stmutex);
	}

	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	lwp_lock(l2);
	KASSERT(p2->p_nrlwps == 1);
	if (p2->p_sflag & PS_STOPFORK) {
		struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate;
		p2->p_nrlwps = 0;
		p2->p_stat = SSTOP;
		p2->p_waited = 0;
		p1->p_nstopchild++;
		l2->l_stat = LSSTOP;
		KASSERT(l2->l_wchan == NULL);
		lwp_unlock_to(l2, spc->spc_lwplock);
	} else {
		p2->p_nrlwps = 1;
		p2->p_stat = SACTIVE;
		l2->l_stat = LSRUN;
		sched_enqueue(l2, false);
		lwp_unlock(l2);
	}

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	mutex_exit(p2->p_lock);

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, sleep until it clears LP_VFORKWAIT.
	 */
#if 0
	while (l1->l_pflag & LP_VFORKWAIT) {
		cv_wait(&l1->l_waitcv, proc_lock);
	}
#else
	while (p2->p_lflag & PL_PPWAIT)
		cv_wait(&p1->p_waitcv, proc_lock);
#endif

	/*
	 * Let the parent know that we are tracing its child.
	 */
	if (tracefork) {
		ksiginfo_t ksi;

		KSI_INIT_EMPTY(&ksi);
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_lid = l1->l_lid;
		kpsignal(p1, &ksi, NULL);
	}
	mutex_exit(proc_lock);

	return 0;
}
Exemplo n.º 19
0
static void
uvm_unloanpage(struct vm_page **ploans, int npages)
{
	struct vm_page *pg;
	kmutex_t *slock;

	mutex_enter(&uvm_pageqlock);
	while (npages-- > 0) {
		pg = *ploans++;

		/*
		 * do a little dance to acquire the object or anon lock
		 * as appropriate.  we are locking in the wrong order,
		 * so we have to do a try-lock here.
		 */

		slock = NULL;
		while (pg->uobject != NULL || pg->uanon != NULL) {
			if (pg->uobject != NULL) {
				slock = &pg->uobject->vmobjlock;
			} else {
				slock = &pg->uanon->an_lock;
			}
			if (mutex_tryenter(slock)) {
				break;
			}
			mutex_exit(&uvm_pageqlock);
			/* XXX Better than yielding but inadequate. */
			kpause("livelock", false, 1, NULL);
			mutex_enter(&uvm_pageqlock);
			slock = NULL;
		}

		/*
		 * drop our loan.  if page is owned by an anon but
		 * PQ_ANON is not set, the page was loaned to the anon
		 * from an object which dropped ownership, so resolve
		 * this by turning the anon's loan into real ownership
		 * (ie. decrement loan_count again and set PQ_ANON).
		 * after all this, if there are no loans left, put the
		 * page back a paging queue (if the page is owned by
		 * an anon) or free it (if the page is now unowned).
		 */

		KASSERT(pg->loan_count > 0);
		pg->loan_count--;
		if (pg->uobject == NULL && pg->uanon != NULL &&
		    (pg->pqflags & PQ_ANON) == 0) {
			KASSERT(pg->loan_count > 0);
			pg->loan_count--;
			pg->pqflags |= PQ_ANON;
		}
		if (pg->loan_count == 0 && pg->uobject == NULL &&
		    pg->uanon == NULL) {
			KASSERT((pg->flags & PG_BUSY) == 0);
			uvm_pagefree(pg);
		}
		if (slock != NULL) {
			mutex_exit(slock);
		}
	}
	mutex_exit(&uvm_pageqlock);
}
Exemplo n.º 20
0
int
mtopen(dev_t dev, int flag, int mode, struct lwp *l)
{
	struct mt_softc *sc;
	int req_den;
	int error;

	sc = device_lookup_private(&mt_cd, MTUNIT(dev));
	if (sc == NULL || (sc->sc_flags & MTF_EXISTS) == 0)
		return (ENXIO);

	if (sc->sc_flags & MTF_OPEN)
		return (EBUSY);

	DPRINTF(MDB_ANY, ("%s open: flags 0x%x", device_xname(sc->sc_dev),
	    sc->sc_flags));

	sc->sc_flags |= MTF_OPEN;
	sc->sc_ttyp = tprintf_open(l->l_proc);
	if ((sc->sc_flags & MTF_ALIVE) == 0) {
		error = mtcommand(dev, MTRESET, 0);
		if (error != 0 || (sc->sc_flags & MTF_ALIVE) == 0)
			goto errout;
		if ((sc->sc_stat1 & (SR1_BOT | SR1_ONLINE)) == SR1_ONLINE)
			(void) mtcommand(dev, MTREW, 0);
	}
	for (;;) {
		if ((error = mtcommand(dev, MTNOP, 0)) != 0)
			goto errout;
		if (!(sc->sc_flags & MTF_REW))
			break;
		error = kpause("mt", true, hz, NULL);
		if (error != 0 && error != EWOULDBLOCK) {
			error = EINTR;
			goto errout;
		}
	}
	if ((flag & FWRITE) && (sc->sc_stat1 & SR1_RO)) {
		error = EROFS;
		goto errout;
	}
	if (!(sc->sc_stat1 & SR1_ONLINE)) {
		uprintf("%s: not online\n", device_xname(sc->sc_dev));
		error = EIO;
		goto errout;
	}
	/*
	 * Select density:
	 *  - find out what density the drive is set to
	 *	(i.e. the density of the current tape)
	 *  - if we are going to write
	 *    - if we're not at the beginning of the tape
	 *      - complain if we want to change densities
	 *    - otherwise, select the mtcommand to set the density
	 *
	 * If the drive doesn't support it then don't change the recorded
	 * density.
	 *
	 * The original MOREbsd code had these additional conditions
	 * for the mid-tape change
	 *
	 *	req_den != T_BADBPI &&
	 *	sc->sc_density != T_6250BPI
	 *
	 * which suggests that it would be possible to write multiple
	 * densities if req_den == T_BAD_BPI or the current tape
	 * density was 6250.  Testing of our 7980 suggests that the
	 * device cannot change densities mid-tape.
	 *
	 * [email protected]
	 */
	sc->sc_density = (sc->sc_stat2 & SR2_6250) ? T_6250BPI : (
			 (sc->sc_stat3 & SR3_1600) ? T_1600BPI : (
			 (sc->sc_stat3 & SR3_800) ? T_800BPI : -1));
	req_den = (dev & T_DENSEL);

	if (flag & FWRITE) {
		if (!(sc->sc_stat1 & SR1_BOT)) {
			if (sc->sc_density != req_den) {
				uprintf("%s: can't change density mid-tape\n",
				    device_xname(sc->sc_dev));
				error = EIO;
				goto errout;
			}
		}
		else {
			int mtset_density =
			    (req_den == T_800BPI  ? MTSET800BPI : (
			     req_den == T_1600BPI ? MTSET1600BPI : (
			     req_den == T_6250BPI ? MTSET6250BPI : (
			     sc->sc_type == MT7980ID
						  ? MTSET6250DC
						  : MTSET6250BPI))));
			if (mtcommand(dev, mtset_density, 0) == 0)
				sc->sc_density = req_den;
		}
	}
	return (0);
errout:
	sc->sc_flags &= ~MTF_OPEN;
	return (error);
}
Exemplo n.º 21
0
/*
 * uvm_loanuobjpages: loan pages from a uobj out (O->K)
 *
 * => uobj shouldn't be locked.  (we'll lock it)
 * => fail with EBUSY if we meet a wired page.
 */
int
uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    struct vm_page **origpgpp)
{
	int ndone; /* # of pages loaned out */
	struct vm_page **pgpp;
	int error;
	int i;
	kmutex_t *slock;

	pgpp = origpgpp;
	for (ndone = 0; ndone < orignpages; ) {
		int npages;
		/* npendloan: # of pages busied but not loand out yet. */
		int npendloan = 0xdead; /* XXX gcc */
reget:
		npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
		mutex_enter(uobj->vmobjlock);
		error = (*uobj->pgops->pgo_get)(uobj,
		    pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
		    VM_PROT_READ, 0, PGO_SYNCIO);
		if (error == EAGAIN) {
			kpause("loanuopg", false, hz/2, NULL);
			continue;
		}
		if (error)
			goto fail;

		KASSERT(npages > 0);

		/* loan and unbusy pages */
		slock = NULL;
		for (i = 0; i < npages; i++) {
			kmutex_t *nextslock; /* slock for next page */
			struct vm_page *pg = *pgpp;

			/* XXX assuming that the page is owned by uobj */
			KASSERT(pg->uobject != NULL);
			nextslock = pg->uobject->vmobjlock;

			if (slock != nextslock) {
				if (slock) {
					KASSERT(npendloan > 0);
					error = uvm_loanpage(pgpp - npendloan,
					    npendloan);
					mutex_exit(slock);
					if (error)
						goto fail;
					ndone += npendloan;
					KASSERT(origpgpp + ndone == pgpp);
				}
				slock = nextslock;
				npendloan = 0;
				mutex_enter(slock);
			}

			if ((pg->flags & PG_RELEASED) != 0) {
				/*
				 * release pages and try again.
				 */
				mutex_exit(slock);
				for (; i < npages; i++) {
					pg = pgpp[i];
					slock = pg->uobject->vmobjlock;

					mutex_enter(slock);
					mutex_enter(&uvm_pageqlock);
					uvm_page_unbusy(&pg, 1);
					mutex_exit(&uvm_pageqlock);
					mutex_exit(slock);
				}
				goto reget;
			}

			npendloan++;
			pgpp++;
			KASSERT(origpgpp + ndone + npendloan == pgpp);
		}
		KASSERT(slock != NULL);
		KASSERT(npendloan > 0);
		error = uvm_loanpage(pgpp - npendloan, npendloan);
		mutex_exit(slock);
		if (error)
			goto fail;
		ndone += npendloan;
		KASSERT(origpgpp + ndone == pgpp);
	}

	return 0;

fail:
	uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);

	return error;
}
Exemplo n.º 22
0
void
nfs_decode_args(struct nfsmount *nmp, struct nfs_args *argp, struct lwp *l)
{
	int s;
	int adjsock;
	int maxio;

	s = splsoftnet();

	/*
	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
	 * no sense in that context.
	 */
	if (argp->sotype == SOCK_STREAM)
		argp->flags &= ~NFSMNT_NOCONN;

	/*
	 * Cookie translation is not needed for v2, silently ignore it.
	 */
	if ((argp->flags & (NFSMNT_XLATECOOKIE|NFSMNT_NFSV3)) ==
	    NFSMNT_XLATECOOKIE)
		argp->flags &= ~NFSMNT_XLATECOOKIE;

	/* Re-bind if rsrvd port requested and wasn't on one */
	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
		  && (argp->flags & NFSMNT_RESVPORT);
	/* Also re-bind if we're switching to/from a connected UDP socket */
	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
		    (argp->flags & NFSMNT_NOCONN));

	/* Update flags. */
	nmp->nm_flag = argp->flags;
	splx(s);

	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
		if (nmp->nm_timeo < NFS_MINTIMEO)
			nmp->nm_timeo = NFS_MINTIMEO;
		else if (nmp->nm_timeo > NFS_MAXTIMEO)
			nmp->nm_timeo = NFS_MAXTIMEO;
	}

	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
		nmp->nm_retry = argp->retrans;
		if (nmp->nm_retry > NFS_MAXREXMIT)
			nmp->nm_retry = NFS_MAXREXMIT;
	}

#ifndef NFS_V2_ONLY
	if (argp->flags & NFSMNT_NFSV3) {
		if (argp->sotype == SOCK_DGRAM)
			maxio = NFS_MAXDGRAMDATA;
		else
			maxio = NFS_MAXDATA;
	} else
#endif
		maxio = NFS_V2MAXDATA;

	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
		int osize = nmp->nm_wsize;
		nmp->nm_wsize = argp->wsize;
		/* Round down to multiple of blocksize */
		nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_wsize <= 0)
			nmp->nm_wsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_wsize != osize);
	}
	if (nmp->nm_wsize > maxio)
		nmp->nm_wsize = maxio;
	if (nmp->nm_wsize > MAXBSIZE)
		nmp->nm_wsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
		int osize = nmp->nm_rsize;
		nmp->nm_rsize = argp->rsize;
		/* Round down to multiple of blocksize */
		nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_rsize <= 0)
			nmp->nm_rsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_rsize != osize);
	}
	if (nmp->nm_rsize > maxio)
		nmp->nm_rsize = maxio;
	if (nmp->nm_rsize > MAXBSIZE)
		nmp->nm_rsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
		nmp->nm_readdirsize = argp->readdirsize;
		/* Round down to multiple of minimum blocksize */
		nmp->nm_readdirsize &= ~(NFS_DIRFRAGSIZ - 1);
		if (nmp->nm_readdirsize < NFS_DIRFRAGSIZ)
			nmp->nm_readdirsize = NFS_DIRFRAGSIZ;
		/* Bigger than buffer size makes no sense */
		if (nmp->nm_readdirsize > NFS_DIRBLKSIZ)
			nmp->nm_readdirsize = NFS_DIRBLKSIZ;
	} else if (argp->flags & NFSMNT_RSIZE)
		nmp->nm_readdirsize = nmp->nm_rsize;

	if (nmp->nm_readdirsize > maxio)
		nmp->nm_readdirsize = maxio;

	if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0 &&
		argp->maxgrouplist <= NFS_MAXGRPS)
		nmp->nm_numgrps = argp->maxgrouplist;
	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0 &&
		argp->readahead <= NFS_MAXRAHEAD)
		nmp->nm_readahead = argp->readahead;
	if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 1 &&
		argp->deadthresh <= NFS_NEVERDEAD)
		nmp->nm_deadthresh = argp->deadthresh;

	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
		    (nmp->nm_soproto != argp->proto));
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;

	if (nmp->nm_so && adjsock) {
		nfs_safedisconnect(nmp);
		if (nmp->nm_sotype == SOCK_DGRAM)
			while (nfs_connect(nmp, (struct nfsreq *)0, l)) {
				printf("nfs_args: retrying connect\n");
				kpause("nfscn3", false, hz, NULL);
			}
	}
}