示例#1
0
static void
random_kthread(void)
{
        u_int maxloop, ring_out, i;

	/*
	 * Locking is not needed as this is the only place we modify ring.out, and
	 * we only examine ring.in without changing it. Both of these are volatile,
	 * and this is a unique thread.
	 */
	for (random_kthread_control = 1; random_kthread_control;) {
		/* Deal with events, if any. Restrict the number we do in one go. */
		maxloop = RANDOM_RING_MAX;
		while (harvest_context.hc_entropy_ring.out != harvest_context.hc_entropy_ring.in) {
			ring_out = (harvest_context.hc_entropy_ring.out + 1)%RANDOM_RING_MAX;
			random_harvestq_fast_process_event(harvest_context.hc_entropy_ring.ring + ring_out);
			harvest_context.hc_entropy_ring.out = ring_out;
			if (!--maxloop)
				break;
		}
		random_sources_feed();
		/* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */
		for (i = 0; i < RANDOM_ACCUM_MAX; i++) {
			if (harvest_context.hc_entropy_fast_accumulator.buf[i]) {
				random_harvest_direct(harvest_context.hc_entropy_fast_accumulator.buf + i, sizeof(harvest_context.hc_entropy_fast_accumulator.buf[0]), 4, RANDOM_FAST);
				harvest_context.hc_entropy_fast_accumulator.buf[i] = 0;
			}
		}
		/* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */
		tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-", SBT_1S/10, 0, C_PREL(1));
	}
	wakeup(&harvest_context.hc_kthread_proc);
	kproc_exit(0);
	/* NOTREACHED */
}
示例#2
0
void
kthread_exit(void)
{
	struct proc *p;

	p = curthread->td_proc;

	/* A module may be waiting for us to exit. */
	wakeup(curthread);

	/*
	 * The last exiting thread in a kernel process must tear down
	 * the whole process.
	 */
	rw_wlock(&tidhash_lock);
	PROC_LOCK(p);
	if (p->p_numthreads == 1) {
		PROC_UNLOCK(p);
		rw_wunlock(&tidhash_lock);
		kproc_exit(0);
	}
	LIST_REMOVE(curthread, td_hash);
	rw_wunlock(&tidhash_lock);
	umtx_thread_exit(curthread);
	PROC_SLOCK(p);
	thread_exit();
}
示例#3
0
void
g_uzip_wrkthr(void *arg)
{
	struct g_uzip_softc *sc;
	struct bio *bp;

	sc = (struct g_uzip_softc *)arg;
	thread_lock(curthread);
	sched_prio(curthread, PRIBIO);
	thread_unlock(curthread);

	for (;;) {
		mtx_lock(&sc->queue_mtx);
		if (sc->wrkthr_flags & GUZ_SHUTDOWN) {
			sc->wrkthr_flags |= GUZ_EXITING;
			mtx_unlock(&sc->queue_mtx);
			kproc_exit(0);
		}
		bp = bioq_takefirst(&sc->bio_queue);
		if (!bp) {
			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP,
			    "wrkwait", 0);
			continue;
		}
		mtx_unlock(&sc->queue_mtx);
		sc->uzip_do(sc, bp);
	}
}
示例#4
0
/* Helper routine to enable kproc_exit() to work while the module is
 * being (or has been) unloaded.
 * This routine is in this file because it is always linked into the kernel,
 * and will thus never be unloaded. This is critical for unloadable modules
 * that have threads.
 */
void
random_set_wakeup_exit(void *control)
{
	wakeup(control);
	kproc_exit(0);
	/* NOTREACHED */
}
示例#5
0
static void
bcm2835_audio_worker(void *data)
{
	struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)data;
	struct bcm2835_audio_chinfo *ch = &sc->pch;
	mtx_lock(&sc->data_lock);
	while(1) {

		if (sc->unloading)
			break;

		if ((ch->playback_state == PLAYBACK_PLAYING) &&
		    (vchiq_unbuffered_bytes(ch) >= VCHIQ_AUDIO_PACKET_SIZE)
		    && (ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)) {
			bcm2835_audio_write_samples(ch);
		} else {
			if (ch->playback_state == PLAYBACK_STOPPING) {
				bcm2835_audio_reset_channel(&sc->pch);
				ch->playback_state = PLAYBACK_IDLE;
			}

			cv_wait_sig(&sc->data_cv, &sc->data_lock);

			if (ch->playback_state == PLAYBACK_STARTING) {
				/* Give it initial kick */
				chn_intr(sc->pch.channel);
				ch->playback_state = PLAYBACK_PLAYING;
			}
		}
	}
	mtx_unlock(&sc->data_lock);

	kproc_exit(0);
}
示例#6
0
static void
soaio_kproc_loop(void *arg)
{
	struct proc *p;
	struct vmspace *myvm;
	struct task *task;
	int error, id, pending;

	id = (intptr_t)arg;

	/*
	 * Grab an extra reference on the daemon's vmspace so that it
	 * doesn't get freed by jobs that switch to a different
	 * vmspace.
	 */
	p = curproc;
	myvm = vmspace_acquire_ref(p);

	mtx_lock(&soaio_jobs_lock);
	MPASS(soaio_starting > 0);
	soaio_starting--;
	for (;;) {
		while (!STAILQ_EMPTY(&soaio_jobs)) {
			task = STAILQ_FIRST(&soaio_jobs);
			STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
			soaio_queued--;
			pending = task->ta_pending;
			task->ta_pending = 0;
			mtx_unlock(&soaio_jobs_lock);

			task->ta_func(task->ta_context, pending);

			mtx_lock(&soaio_jobs_lock);
		}
		MPASS(soaio_queued == 0);

		if (p->p_vmspace != myvm) {
			mtx_unlock(&soaio_jobs_lock);
			vmspace_switch_aio(myvm);
			mtx_lock(&soaio_jobs_lock);
			continue;
		}

		soaio_idle++;
		error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
		    soaio_lifetime);
		soaio_idle--;
		if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
		    soaio_num_procs > soaio_target_procs)
			break;
	}
	soaio_num_procs--;
	mtx_unlock(&soaio_jobs_lock);
	free_unr(soaio_kproc_unr, id);
	kproc_exit(0);
}
示例#7
0
static void
ow_temp_event_thread(void *arg)
{
	struct ow_temp_softc *sc;
	uint8_t scratch[8 + 1];
	uint8_t crc;
	int retries, rv;

	sc = arg;
	pause("owtstart", device_get_unit(sc->dev) * hz / 100);	// 10ms stagger
	mtx_lock(&sc->temp_lock);
	sc->flags |= OW_TEMP_RUNNING;
	ow_temp_read_power_supply(sc->dev, &sc->parasite);
	if (sc->parasite)
		device_printf(sc->dev, "Running in parasitic mode unsupported\n");
	while ((sc->flags & OW_TEMP_DONE) == 0) {
		mtx_unlock(&sc->temp_lock);
		ow_temp_convert_t(sc->dev);
		mtx_lock(&sc->temp_lock);
		msleep(sc, &sc->temp_lock, 0, "owtcvt", hz);
		if (sc->flags & OW_TEMP_DONE)
			break;
		for (retries = 5; retries > 0; retries--) {
			mtx_unlock(&sc->temp_lock);
			rv = ow_temp_read_scratchpad(sc->dev, scratch, sizeof(scratch));
			mtx_lock(&sc->temp_lock);
			if (rv == 0) {
				crc = own_crc(sc->dev, scratch, sizeof(scratch) - 1);
				if (crc == scratch[8]) {
					if (sc->type == OWT_DS1820) {
						if (scratch[7]) {
							/*
							 * Formula from DS18S20 datasheet, page 6
							 * DS18S20 datahseet says count_per_c is 16, DS1820 does not
							 */
							sc->temp = (int16_t)((scratch[0] & 0xfe) |
							    (scratch[1] << 8)) << 3;
							sc->temp += 16 - scratch[6] - 4; /* count_per_c == 16 */
						} else
							sc->temp = (int16_t)(scratch[0] | (scratch[1] << 8)) << 3;
					} else
						sc->temp = (int16_t)(scratch[0] | (scratch[1] << 8));
					sc->temp = sc->temp * 1000 / 16 + 273150;
					break;
				}
				sc->bad_crc++;
			} else
				sc->bad_reads++;
		}
		msleep(sc, &sc->temp_lock, 0, "owtcvt", sc->reading_interval);
	}
	sc->flags &= ~OW_TEMP_RUNNING;
	mtx_unlock(&sc->temp_lock);
	kproc_exit(0);
}
/**
 * Native thread main function.
 *
 * @param   pvThreadInt     The thread structure.
 */
static void rtThreadNativeMain(void *pvThreadInt)
{
    const struct thread *Self = curthread;
    PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt;
    int rc;

    rc = rtThreadMain(pThreadInt, (RTNATIVETHREAD)Self, &pThreadInt->szName[0]);

#if __FreeBSD_version >= 800002
    kproc_exit(rc);
#else
    kthread_exit(rc);
#endif
}
示例#9
0
/*
 * Passive cooling thread; monitors current temperature according to the
 * cooling interval and calculates whether to scale back CPU frequency.
 */
static void
acpi_tz_cooling_thread(void *arg)
{
    struct acpi_tz_softc *sc;
    int error, perf, curr_temp, prev_temp;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    sc = (struct acpi_tz_softc *)arg;

    prev_temp = sc->tz_temperature;
    while (sc->tz_cooling_enabled) {
	if (sc->tz_cooling_active)
	    (void)acpi_tz_get_temperature(sc);
	curr_temp = sc->tz_temperature;
	if (curr_temp >= sc->tz_zone.psv)
	    sc->tz_cooling_active = TRUE;
	if (sc->tz_cooling_active) {
	    perf = sc->tz_zone.tc1 * (curr_temp - prev_temp) +
		   sc->tz_zone.tc2 * (curr_temp - sc->tz_zone.psv);
	    perf /= 10;

	    if (perf != 0) {
		error = acpi_tz_cpufreq_update(sc, perf);

		/*
		 * If error and not simply a higher priority setting was
		 * active, disable cooling.
		 */
		if (error != 0 && error != EPERM) {
		    device_printf(sc->tz_dev,
			"failed to set new freq, disabling passive cooling\n");
		    sc->tz_cooling_enabled = FALSE;
		}
	    }
	}
	prev_temp = curr_temp;
	tsleep(&sc->tz_cooling_proc, PZERO, "cooling",
	    hz * sc->tz_zone.tsp / 10);
    }
    if (sc->tz_cooling_active) {
	acpi_tz_cpufreq_restore(sc);
	sc->tz_cooling_active = FALSE;
    }
    sc->tz_cooling_proc = NULL;
    ACPI_LOCK(thermal);
    sc->tz_cooling_proc_running = FALSE;
    ACPI_UNLOCK(thermal);
    kproc_exit(0);
}
示例#10
0
static void
kcs_loop(void *arg)
{
	struct ipmi_softc *sc = arg;
	struct ipmi_request *req;
	int i, ok;

	IPMI_LOCK(sc);
	while ((req = ipmi_dequeue_request(sc)) != NULL) {
		ok = 0;
		for (i = 0; i < 3 && !ok; i++)
			ok = kcs_polled_request(sc, req);
		if (ok)
			req->ir_error = 0;
		else
			req->ir_error = EIO;
		ipmi_complete_request(sc, req);
	}
	IPMI_UNLOCK(sc);
	kproc_exit(0);
}
示例#11
0
static void
ssif_loop(void *arg)
{
	struct ipmi_softc *sc = arg;
	struct ipmi_request *req;
	int i, ok;

	IPMI_LOCK(sc);
	while ((req = ipmi_dequeue_request(sc)) != NULL) {
		IPMI_UNLOCK(sc);
		ok = 0;
		for (i = 0; i < 5; i++) {
			ok = ssif_polled_request(sc, req);
			if (ok)
				break;

			/* Wait 60 ms between retries. */
			pause("retry", 60 * hz / 1000);
#ifdef SSIF_RETRY_DEBUG
			device_printf(sc->ipmi_dev,
			    "SSIF: Retrying request (%d)\n", i + 1);
#endif
		}
		if (ok)
			req->ir_error = 0;
		else
			req->ir_error = EIO;
		IPMI_LOCK(sc);
		ipmi_complete_request(sc, req);
		IPMI_UNLOCK(sc);

		/* Enforce 10ms between requests. */
		pause("delay", hz / 100);

		IPMI_LOCK(sc);
	}
	IPMI_UNLOCK(sc);
	kproc_exit(0);
}
示例#12
0
static void
ald_daemon(void)
{
    int needwakeup;
    struct alq *alq;

    ald_thread = FIRST_THREAD_IN_PROC(ald_proc);

    alq_eventhandler_tag = EVENTHANDLER_REGISTER(shutdown_pre_sync,
                           ald_shutdown, NULL, SHUTDOWN_PRI_FIRST);

    ALD_LOCK();

    for (;;) {
        while ((alq = BSD_LIST_FIRST(&ald_active)) == NULL &&
                !ald_shutingdown)
            mtx_sleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);

        /* Don't shutdown until all active ALQs are flushed. */
        if (ald_shutingdown && alq == NULL) {
            ALD_UNLOCK();
            break;
        }

        ALQ_LOCK(alq);
        ald_deactivate(alq);
        ALD_UNLOCK();
        needwakeup = alq_doio(alq);
        ALQ_UNLOCK(alq);
        if (needwakeup)
            wakeup_one(alq);
        ALD_LOCK();
    }

    kproc_exit(0);
}
示例#13
0
/*
 * Asynchronous I/O daemons for client nfs.
 * They do read-ahead and write-behind operations on the block I/O cache.
 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
 */
static void
nfssvc_iod(void *instance)
{
	struct buf *bp;
	struct nfsmount *nmp;
	int myiod, timo;
	int error = 0;

	mtx_lock(&nfs_iod_mtx);
	myiod = (int *)instance - nfs_asyncdaemon;
	/*
	 * Main loop
	 */
	for (;;) {
	    while (((nmp = nfs_iodmount[myiod]) == NULL)
		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
		if (myiod >= nfs_iodmax)
			goto finish;
		if (nmp)
			nmp->nm_bufqiods--;
		if (nfs_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
			nfs_iodwant[myiod] = NFSIOD_AVAILABLE;
		nfs_iodmount[myiod] = NULL;
		/*
		 * Always keep at least nfs_iodmin kthreads.
		 */
		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
		error = msleep(&nfs_iodwant[myiod], &nfs_iod_mtx, PWAIT | PCATCH,
		    "-", timo);
		if (error) {
			nmp = nfs_iodmount[myiod];
			/*
			 * Rechecking the nm_bufq closes a rare race where the 
			 * nfsiod is woken up at the exact time the idle timeout
			 * fires
			 */
			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
				error = 0;
			break;
		}
	    }
	    if (error)
		    break;
	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
	        int giant_locked = 0;
		    
		/* Take one off the front of the list */
		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
		nmp->nm_bufqlen--;
		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
		    nmp->nm_bufqwant = 0;
		    wakeup(&nmp->nm_bufq);
		}
		mtx_unlock(&nfs_iod_mtx);
		if (NFS_ISV4(bp->b_vp)) {
			giant_locked = 1;
			mtx_lock(&Giant);
		}
		if (bp->b_flags & B_DIRECT) {
			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
			(void)nfs_doio_directwrite(bp);
		} else {
			if (bp->b_iocmd == BIO_READ)
				(void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
			else
				(void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
		}
		if (giant_locked)
			mtx_unlock(&Giant);
		mtx_lock(&nfs_iod_mtx);
		/*
		 * If there are more than one iod on this mount, then defect
		 * so that the iods can be shared out fairly between the mounts
		 */
		if (nfs_defect && nmp->nm_bufqiods > 1) {
		    NFS_DPF(ASYNCIO,
			    ("nfssvc_iod: iod %d defecting from mount %p\n",
			     myiod, nmp));
		    nfs_iodmount[myiod] = NULL;
		    nmp->nm_bufqiods--;
		    break;
		}
	    }
	}
finish:
	nfs_asyncdaemon[myiod] = 0;
	if (nmp)
	    nmp->nm_bufqiods--;
	nfs_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
	nfs_iodmount[myiod] = NULL;
	/* Someone may be waiting for the last nfsiod to terminate. */
	if (--nfs_numasync == 0)
		wakeup(&nfs_numasync);
	mtx_unlock(&nfs_iod_mtx);
	if ((error == 0) || (error == EWOULDBLOCK))
		kproc_exit(0);
	/* Abnormal termination */
	kproc_exit(1);
}
示例#14
0
static void
mambodisk_task(void *arg)
{
	struct mambodisk_softc *sc = (struct mambodisk_softc*)arg;
	struct bio *bp;
	size_t sz;
	int result;
	daddr_t block, end;
	device_t dev;
	u_long unit;

	dev = sc->dev;
	unit = device_get_unit(dev);

	while (sc->running) {
		MBODISK_LOCK(sc);
		do {
			bp = bioq_first(&sc->bio_queue);
			if (bp == NULL)
				msleep(sc, &sc->sc_mtx, PRIBIO, "jobqueue", 0);
		} while (bp == NULL && sc->running);
		if (bp)
			bioq_remove(&sc->bio_queue, bp);
		MBODISK_UNLOCK(sc);
		if (!sc->running)
			break;
		sz = sc->disk->d_sectorsize;
		end = bp->bio_pblkno + (bp->bio_bcount / sz);
		for (block = bp->bio_pblkno; block < end;) {
			u_long numblocks;
			char *vaddr = bp->bio_data + 
			    (block - bp->bio_pblkno) * sz;

			numblocks = end - block;
			if (numblocks > sc->maxblocks)
				numblocks = sc->maxblocks;

			if (bp->bio_cmd == BIO_READ) {
				result = mambocall(MAMBO_DISK_READ, vaddr, 
				  (u_long)block, (numblocks << 16) | unit);
			} else if (bp->bio_cmd == BIO_WRITE) {
				result = mambocall(MAMBO_DISK_WRITE, vaddr, 
				  (u_long)block, (numblocks << 16) | unit);
			} else {
				result = 1;
			}
		
			if (result)
				break;

			block += numblocks;
		}
		if (block < end) {
			bp->bio_error = EIO;
			bp->bio_resid = (end - block) * sz;
			bp->bio_flags |= BIO_ERROR;
		}
		biodone(bp);
	}

	/* tell parent we're done */
	MBODISK_LOCK(sc);
	sc->running = -1;
	wakeup(sc);
	MBODISK_UNLOCK(sc);

	kproc_exit(0);
}
示例#15
0
static void
bcm2835_audio_worker(void *data)
{
	struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)data;
	struct bcm2835_audio_chinfo *ch = &sc->pch;
	uint32_t speed, format;
	uint32_t volume, dest;
	uint32_t flags;
	uint32_t count, size, readyptr;
	uint8_t *buf;

	ch->playback_state = PLAYBACK_IDLE;

	while (1) {
		if (sc->worker_state != WORKER_RUNNING)
			break;

		BCM2835_AUDIO_LOCK(sc);
		/*
		 * wait until there are flags set or buffer is ready
		 * to consume more samples
		 */
		while ((sc->flags_pending == 0) &&
		    bcm2835_audio_buffer_should_sleep(ch)) {
			cv_wait_sig(&sc->worker_cv, &sc->lock);
		}
		flags = sc->flags_pending;
		/* Clear pending flags */
		sc->flags_pending = 0;
		BCM2835_AUDIO_UNLOCK(sc);

		/* Requested to change parameters */
		if (flags & AUDIO_PARAMS) {
			BCM2835_AUDIO_LOCK(sc);
			speed = ch->spd;
			format = ch->fmt;
			volume = sc->volume;
			dest = sc->dest;
			BCM2835_AUDIO_UNLOCK(sc);
			if (ch->playback_state == PLAYBACK_IDLE)
				bcm2835_audio_update_params(sc, format, speed);
			bcm2835_audio_update_controls(sc, volume, dest);
		}

		/* Requested to stop playback */
		if ((flags & AUDIO_STOP) &&
		    (ch->playback_state == PLAYBACK_PLAYING)) {
			bcm2835_audio_stop(ch);
			BCM2835_AUDIO_LOCK(sc);
			bcm2835_audio_reset_channel(&sc->pch);
			ch->playback_state = PLAYBACK_IDLE;
			BCM2835_AUDIO_UNLOCK(sc);
			continue;
		}

		/* Requested to start playback */
		if ((flags & AUDIO_PLAY) &&
		    (ch->playback_state == PLAYBACK_IDLE)) {
			BCM2835_AUDIO_LOCK(sc);
			ch->playback_state = PLAYBACK_PLAYING;
			BCM2835_AUDIO_UNLOCK(sc);
			bcm2835_audio_start(ch);
		}

		if (ch->playback_state == PLAYBACK_IDLE)
			continue;

		if (sndbuf_getready(ch->buffer) == 0)
			continue;

		count = sndbuf_getready(ch->buffer);
		size = sndbuf_getsize(ch->buffer);
		readyptr = sndbuf_getreadyptr(ch->buffer);

		BCM2835_AUDIO_LOCK(sc);
		if (readyptr + count > size)
			count = size - readyptr;
		count = min(count, ch->available_space);
		count -= (count % VCHIQ_AUDIO_PACKET_SIZE);
		BCM2835_AUDIO_UNLOCK(sc);

		if (count < VCHIQ_AUDIO_PACKET_SIZE)
			continue;

		buf = (uint8_t*)sndbuf_getbuf(ch->buffer) + readyptr;

		bcm2835_audio_write_samples(ch, buf, count);
		BCM2835_AUDIO_LOCK(sc);
		ch->unsubmittedptr = (ch->unsubmittedptr + count) % sndbuf_getsize(ch->buffer);
		ch->available_space -= count;
		ch->submitted_samples += count;
		KASSERT(ch->available_space >= 0, ("ch->available_space == %d\n", ch->available_space));
		BCM2835_AUDIO_UNLOCK(sc);
	}

	BCM2835_AUDIO_LOCK(sc);
	sc->worker_state = WORKER_STOPPED;
	cv_signal(&sc->worker_cv);
	BCM2835_AUDIO_UNLOCK(sc);

	kproc_exit(0);
}
示例#16
0
static void
isf_task(void *arg)
{
	struct isf_softc	*sc = arg;
	struct bio		*bp;
	int			ss = sc->isf_disk->d_sectorsize;
	int			error, i;

	for (;;) {
		ISF_LOCK(sc);
		do {
			bp = bioq_first(&sc->isf_bioq);
			if (bp == NULL) {
				if (sc->isf_doomed)
					kproc_exit(0);
				else
					ISF_SLEEP(sc, sc, 0);
			}
		} while (bp == NULL);
		bioq_remove(&sc->isf_bioq, bp);

		error = 0;
		switch (bp->bio_cmd) {
		case BIO_READ:
			isf_read(sc, bp->bio_pblkno * ss, bp->bio_data,
			    bp->bio_bcount);
			break;

		case BIO_WRITE:
			/*
			 * In principle one could suspend the in-progress
			 * erase, process any pending writes to other
			 * blocks and then proceed, but that seems
			 * overly complex for the likely usage modes.
			 */
			if (sc->isf_erasing) {
				error = EBUSY;
				break;
			}

			/*
			 * Read in the block we want to write and check that
			 * we're only setting bits to 0.  If an erase would
			 * be required return an I/O error.
			 */
			isf_read(sc, bp->bio_pblkno * ss, sc->isf_rbuf,
			    bp->bio_bcount);
			for (i = 0; i < bp->bio_bcount / 2; i++)
				if ((sc->isf_rbuf[i] &
				    ((uint16_t *)bp->bio_data)[i]) !=
				    ((uint16_t *)bp->bio_data)[i]) {
					device_printf(sc->isf_dev, "write"
					    " requires erase at 0x%08jx\n",
					    bp->bio_pblkno * ss);
					error = EIO;
					break;
				}
			if (error != 0)
				break;

			error = isf_write(sc, bp->bio_pblkno * ss,
			    bp->bio_data, bp->bio_bcount);
			break;

		default:
			panic("%s: unsupported I/O operation %d", __func__,
			    bp->bio_cmd);
		}
		if (error == 0)
			biodone(bp);
		else
			biofinish(bp, NULL, error);
		ISF_UNLOCK(sc);
	}
}
示例#17
0
static void
pmclog_loop(void *arg)
{
	int error;
	struct pmc_owner *po;
	struct pmclog_buffer *lb;
	struct proc *p;
	struct ucred *ownercred;
	struct ucred *mycred;
	struct thread *td;
	struct uio auio;
	struct iovec aiov;
	size_t nbytes;

	po = (struct pmc_owner *) arg;
	p = po->po_owner;
	td = curthread;
	mycred = td->td_ucred;

	PROC_LOCK(p);
	ownercred = crhold(p->p_ucred);
	PROC_UNLOCK(p);

	PMCDBG(LOG,INI,1, "po=%p kt=%p", po, po->po_kthread);
	KASSERT(po->po_kthread == curthread->td_proc,
	    ("[pmclog,%d] proc mismatch po=%p po/kt=%p curproc=%p", __LINE__,
		po, po->po_kthread, curthread->td_proc));

	lb = NULL;


	/*
	 * Loop waiting for I/O requests to be added to the owner
	 * struct's queue.  The loop is exited when the log file
	 * is deconfigured.
	 */

	mtx_lock(&pmc_kthread_mtx);

	for (;;) {

		/* check if we've been asked to exit */
		if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
			break;

		if (lb == NULL) { /* look for a fresh buffer to write */
			mtx_lock_spin(&po->po_mtx);
			if ((lb = TAILQ_FIRST(&po->po_logbuffers)) == NULL) {
				mtx_unlock_spin(&po->po_mtx);

				/* No more buffers and shutdown required. */
				if (po->po_flags & PMC_PO_SHUTDOWN) {
					mtx_unlock(&pmc_kthread_mtx);
					/*
			 		 * Close the file to get PMCLOG_EOF
					 * error in pmclog(3).
					 */
					fo_close(po->po_file, curthread);
					mtx_lock(&pmc_kthread_mtx);
					break;
				}

				(void) msleep(po, &pmc_kthread_mtx, PWAIT,
				    "pmcloop", 0);
				continue;
			}

			TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
			mtx_unlock_spin(&po->po_mtx);
		}

		mtx_unlock(&pmc_kthread_mtx);

		/* process the request */
		PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
		    lb->plb_base, lb->plb_ptr);
		/* change our thread's credentials before issuing the I/O */

		aiov.iov_base = lb->plb_base;
		aiov.iov_len  = nbytes = lb->plb_ptr - lb->plb_base;

		auio.uio_iov    = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = -1;
		auio.uio_resid  = nbytes;
		auio.uio_rw     = UIO_WRITE;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_td     = td;

		/* switch thread credentials -- see kern_ktrace.c */
		td->td_ucred = ownercred;
		error = fo_write(po->po_file, &auio, ownercred, 0, td);
		td->td_ucred = mycred;

		if (error) {
			/* XXX some errors are recoverable */
			/* send a SIGIO to the owner and exit */
			PROC_LOCK(p);
			kern_psignal(p, SIGIO);
			PROC_UNLOCK(p);

			mtx_lock(&pmc_kthread_mtx);

			po->po_error = error; /* save for flush log */

			PMCDBG(LOG,WRI,2, "po=%p error=%d", po, error);

			break;
		}

		mtx_lock(&pmc_kthread_mtx);

		/* put the used buffer back into the global pool */
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);

		lb = NULL;
	}

	wakeup_one(po->po_kthread);
	po->po_kthread = NULL;

	mtx_unlock(&pmc_kthread_mtx);

	/* return the current I/O buffer to the global pool */
	if (lb) {
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);

		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/*
	 * Exit this thread, signalling the waiter
	 */

	crfree(ownercred);

	kproc_exit(0);
}