コード例 #1
0
ファイル: icl_proxy.c プロジェクト: ele7enxxh/dtrace-pf
/*
 * XXX: Doing accept in a separate thread in each socket might not be the best way
 * 	to do stuff, but it's pretty clean and debuggable - and you probably won't
 * 	have hundreds of listening sockets anyway.
 */
static void
icl_accept_thread(void *arg)
{
	struct icl_listen_sock *ils;
	struct socket *head, *so;
	struct sockaddr *sa;
	int error;

	ils = arg;
	head = ils->ils_socket;

	ils->ils_running = true;

	for (;;) {
		ACCEPT_LOCK();
		while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0 && ils->ils_disconnecting == false) {
			if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
				head->so_error = ECONNABORTED;
				break;
			}
			error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
			    "accept", 0);
			if (error) {
				ACCEPT_UNLOCK();
				ICL_WARN("msleep failed with error %d", error);
				continue;
			}
			if (ils->ils_disconnecting) {
				ACCEPT_UNLOCK();
				ICL_DEBUG("terminating");
				ils->ils_running = false;
				kthread_exit();
				return;
			}
		}
		if (head->so_error) {
			error = head->so_error;
			head->so_error = 0;
			ACCEPT_UNLOCK();
			ICL_WARN("socket error %d", error);
			continue;
		}
		so = TAILQ_FIRST(&head->so_comp);
		KASSERT(so != NULL, ("NULL so"));
		KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
		KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));

		/*
		 * Before changing the flags on the socket, we have to bump the
		 * reference count.  Otherwise, if the protocol calls sofree(),
		 * the socket will be released due to a zero refcount.
		 */
		SOCK_LOCK(so);			/* soref() and so_state update */
		soref(so);			/* file descriptor reference */

		TAILQ_REMOVE(&head->so_comp, so, so_list);
		head->so_qlen--;
		so->so_state |= (head->so_state & SS_NBIO);
		so->so_qstate &= ~SQ_COMP;
		so->so_head = NULL;

		SOCK_UNLOCK(so);
		ACCEPT_UNLOCK();

		sa = NULL;
		error = soaccept(so, &sa);
		if (error != 0) {
			ICL_WARN("soaccept error %d", error);
			if (sa != NULL)
				free(sa, M_SONAME);
			soclose(so);
			continue;
		}

		(ils->ils_listen->il_accept)(so, sa, ils->ils_id);
	}
}
コード例 #2
0
static void
shmif_rcv(void *arg)
{
	struct ifnet *ifp = arg;
	struct shmif_sc *sc = ifp->if_softc;
	struct shmif_mem *busmem;
	struct mbuf *m = NULL;
	struct ether_header *eth;
	uint32_t nextpkt;
	bool wrap, passup;
	int error;
	const int align
	    = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);

 reup:
	mutex_enter(&sc->sc_mtx);
	while ((ifp->if_flags & IFF_RUNNING) == 0 && !sc->sc_dying)
		cv_wait(&sc->sc_cv, &sc->sc_mtx);
	mutex_exit(&sc->sc_mtx);

	busmem = sc->sc_busmem;

	while (ifp->if_flags & IFF_RUNNING) {
		struct shmif_pkthdr sp;

		if (m == NULL) {
			m = m_gethdr(M_WAIT, MT_DATA);
			MCLGET(m, M_WAIT);
			m->m_data += align;
		}

		DPRINTF(("waiting %d/%" PRIu64 "\n",
		    sc->sc_nextpacket, sc->sc_devgen));
		KASSERT(m->m_flags & M_EXT);

		shmif_lockbus(busmem);
		KASSERT(busmem->shm_magic == SHMIF_MAGIC);
		KASSERT(busmem->shm_gen >= sc->sc_devgen);

		/* need more data? */
		if (sc->sc_devgen == busmem->shm_gen && 
		    shmif_nextpktoff(busmem, busmem->shm_last)
		     == sc->sc_nextpacket) {
			shmif_unlockbus(busmem);
			error = 0;
			rumpcomp_shmif_watchwait(sc->sc_kq);
			if (__predict_false(error))
				printf("shmif_rcv: wait failed %d\n", error);
			membar_consumer();
			continue;
		}

		if (stillvalid_p(sc)) {
			nextpkt = sc->sc_nextpacket;
		} else {
			KASSERT(busmem->shm_gen > 0);
			nextpkt = busmem->shm_first;
			if (busmem->shm_first > busmem->shm_last)
				sc->sc_devgen = busmem->shm_gen - 1;
			else
				sc->sc_devgen = busmem->shm_gen;
			DPRINTF(("dev %p overrun, new data: %d/%" PRIu64 "\n",
			    sc, nextpkt, sc->sc_devgen));
		}

		/*
		 * If our read pointer is ahead the bus last write, our
		 * generation must be one behind.
		 */
		KASSERT(!(nextpkt > busmem->shm_last
		    && sc->sc_devgen == busmem->shm_gen));

		wrap = false;
		nextpkt = shmif_busread(busmem, &sp,
		    nextpkt, sizeof(sp), &wrap);
		KASSERT(sp.sp_len <= ETHERMTU + ETHER_HDR_LEN);
		nextpkt = shmif_busread(busmem, mtod(m, void *),
		    nextpkt, sp.sp_len, &wrap);

		DPRINTF(("shmif_rcv: read packet of length %d at %d\n",
		    sp.sp_len, nextpkt));

		sc->sc_nextpacket = nextpkt;
		shmif_unlockbus(sc->sc_busmem);

		if (wrap) {
			sc->sc_devgen++;
			DPRINTF(("dev %p generation now %" PRIu64 "\n",
			    sc, sc->sc_devgen));
		}

		/*
		 * Ignore packets too short to possibly be valid.
		 * This is hit at least for the first frame on a new bus.
		 */
		if (__predict_false(sp.sp_len < ETHER_HDR_LEN)) {
			DPRINTF(("shmif read packet len %d < ETHER_HDR_LEN\n",
			    sp.sp_len));
			continue;
		}

		m->m_len = m->m_pkthdr.len = sp.sp_len;
		m->m_pkthdr.rcvif = ifp;

		/*
		 * Test if we want to pass the packet upwards
		 */
		eth = mtod(m, struct ether_header *);
		if (memcmp(eth->ether_dhost, CLLADDR(ifp->if_sadl),
		    ETHER_ADDR_LEN) == 0) {
			passup = true;
		} else if (ETHER_IS_MULTICAST(eth->ether_dhost)) {
			passup = true;
		} else if (ifp->if_flags & IFF_PROMISC) {
			m->m_flags |= M_PROMISC;
			passup = true;
		} else {
			passup = false;
		}

		if (passup) {
			KERNEL_LOCK(1, NULL);
			bpf_mtap(ifp, m);
			ifp->if_input(ifp, m);
			KERNEL_UNLOCK_ONE(NULL);
			m = NULL;
		}
		/* else: reuse mbuf for a future packet */
	}
	m_freem(m);
	m = NULL;

	if (!sc->sc_dying)
		goto reup;

	kthread_exit(0);
}
コード例 #3
0
ファイル: vfs_sync.c プロジェクト: mihaicarabas/dragonfly
/*
 * System filesystem synchronizer daemon.
 */
static void
syncer_thread(void *_ctx)
{
	struct syncer_ctx *ctx = _ctx;
	struct synclist *slp;
	struct vnode *vp;
	long starttime;
	int *sc_flagsp;
	int sc_flags;
	int vnodes_synced = 0;
	int delta;
	int dummy = 0;

	for (;;) {
		kproc_suspend_loop();

		starttime = time_uptime;
		lwkt_gettoken(&ctx->sc_token);

		/*
		 * Push files whose dirty time has expired.  Be careful
		 * of interrupt race on slp queue.
		 */
		slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
		ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
				      ctx->syncer_mask;

		while ((vp = LIST_FIRST(slp)) != NULL) {
			if (ctx->syncer_forced) {
				if (vget(vp, LK_EXCLUSIVE) == 0) {
					VOP_FSYNC(vp, MNT_NOWAIT, 0);
					vput(vp);
					vnodes_synced++;
				}
			} else {
				if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
					VOP_FSYNC(vp, MNT_LAZY, 0);
					vput(vp);
					vnodes_synced++;
				}
			}

			/*
			 * vp is stale but can still be used if we can
			 * verify that it remains at the head of the list.
			 * Be careful not to try to get vp->v_token as
			 * vp can become stale if this blocks.
			 *
			 * If the vp is still at the head of the list were
			 * unable to completely flush it and move it to
			 * a later slot to give other vnodes a fair shot.
			 *
			 * Note that v_tag VT_VFS vnodes can remain on the
			 * worklist with no dirty blocks, but sync_fsync()
			 * moves it to a later slot so we will never see it
			 * here.
			 *
			 * It is possible to race a vnode with no dirty
			 * buffers being removed from the list.  If this
			 * occurs we will move the vnode in the synclist
			 * and then the other thread will remove it.  Do
			 * not try to remove it here.
			 */
			if (LIST_FIRST(slp) == vp)
				vn_syncer_add(vp, syncdelay);
		}

		sc_flags = ctx->sc_flags;

		/* Exit on unmount */
		if (sc_flags & SC_FLAG_EXIT)
			break;

		lwkt_reltoken(&ctx->sc_token);

		/*
		 * Do sync processing for each mount.
		 */
		if (ctx->sc_mp)
			bio_ops_sync(ctx->sc_mp);

		/*
		 * The variable rushjob allows the kernel to speed up the
		 * processing of the filesystem syncer process. A rushjob
		 * value of N tells the filesystem syncer to process the next
		 * N seconds worth of work on its queue ASAP. Currently rushjob
		 * is used by the soft update code to speed up the filesystem
		 * syncer process when the incore state is getting so far
		 * ahead of the disk that the kernel memory pool is being
		 * threatened with exhaustion.
		 */
		delta = rushjob - ctx->syncer_rushjob;
		if ((u_int)delta > syncdelay / 2) {
			ctx->syncer_rushjob = rushjob - syncdelay / 2;
			tsleep(&dummy, 0, "rush", 1);
			continue;
		}
		if (delta) {
			++ctx->syncer_rushjob;
			tsleep(&dummy, 0, "rush", 1);
			continue;
		}

		/*
		 * If it has taken us less than a second to process the
		 * current work, then wait. Otherwise start right over
		 * again. We can still lose time if any single round
		 * takes more than two seconds, but it does not really
		 * matter as we are just trying to generally pace the
		 * filesystem activity.
		 */
		if (time_uptime == starttime)
			tsleep(ctx, 0, "syncer", hz);
	}

	/*
	 * Unmount/exit path for per-filesystem syncers; sc_token held
	 */
	ctx->sc_flags |= SC_FLAG_DONE;
	sc_flagsp = &ctx->sc_flags;
	lwkt_reltoken(&ctx->sc_token);
	wakeup(sc_flagsp);

	kthread_exit();
}
コード例 #4
0
ファイル: sa11xx_pcic.c プロジェクト: ryo/netbsd-src
static void
sapcic_event_thread(void *arg)
{
    struct sapcic_socket *so = arg;
    int newstatus, s;

    while (so->shutdown == 0) {
        /*
         * Serialize event processing on the PCIC.  We may
         * sleep while we hold this lock.
         */
        mutex_enter(&so->sc->sc_lock);

        /* sleep .25s to be enqueued chatterling interrupts */
        (void) tsleep(sapcic_event_thread, PWAIT, "pcicss", hz / 4);

        s = splhigh();
        so->event = 0;

        /* we don't rely on interrupt type */
        newstatus = (so->pcictag->read)(so, SAPCIC_STATUS_CARD);
        splx(s);

        if (so->laststatus == newstatus) {
            /*
             * No events to process; release the PCIC lock.
             */
            mutex_exit(&so->sc->sc_lock);
            (void) tsleep(&so->event, PWAIT, "pcicev", hz);
            continue;
        }

        so->laststatus = newstatus;
        switch (newstatus) {
        case SAPCIC_CARD_VALID:
            aprint_normal_dev(so->sc->sc_dev, "insertion event\n");

            pcmcia_card_attach(so->pcmcia);
            break;

        case SAPCIC_CARD_INVALID:
            aprint_normal_dev(so->sc->sc_dev, "removal event\n");

            pcmcia_card_detach(so->pcmcia, DETACH_FORCE);
            break;

        default:
            panic("sapcic_event_thread: unknown status %d",
                  newstatus);
        }

        mutex_exit(&so->sc->sc_lock);
    }

    so->event_thread = NULL;

    /* In case parent is waiting for us to exit. */
    wakeup(so->sc);

    kthread_exit(0);
}
コード例 #5
0
ファイル: ata.c プロジェクト: goroutines/rumprun
/*
 * atabus_configthread: finish attach of atabus's childrens, in a separate
 * kernel thread.
 */
static void
atabusconfig_thread(void *arg)
{
	struct atabus_softc *atabus_sc = arg;
	struct ata_channel *chp = atabus_sc->sc_chan;
	struct atac_softc *atac = chp->ch_atac;
	struct atabus_initq *atabus_initq = NULL;
	int i, s;

	/* XXX seems wrong */
	mutex_enter(&atabus_qlock);
	atabus_initq = TAILQ_FIRST(&atabus_initq_head);
	KASSERT(atabus_initq->atabus_sc == atabus_sc);
	mutex_exit(&atabus_qlock);

	/*
	 * First look for a port multiplier
	 */
	if (chp->ch_ndrives == PMP_MAX_DRIVES &&
	    chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
#if NSATA_PMP > 0
		satapmp_attach(chp);
#else
		aprint_error_dev(atabus_sc->sc_dev,
		    "SATA port multiplier not supported\n");
		/* no problems going on, all drives are ATA_DRIVET_NONE */
#endif
	}

	/*
	 * Attach an ATAPI bus, if needed.
	 */
	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
	for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
#if NATAPIBUS > 0
			(*atac->atac_atapibus_attach)(atabus_sc);
#else
			/*
			 * Fake the autoconfig "not configured" message
			 */
			aprint_normal("atapibus at %s not configured\n",
			    device_xname(atac->atac_dev));
			chp->atapibus = NULL;
			s = splbio();
			for (i = 0; i < chp->ch_ndrives; i++) {
				if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
					chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
			}
			splx(s);
#endif
			break;
		}
	}

	for (i = 0; i < chp->ch_ndrives; i++) {
		struct ata_device adev;
		if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
		    chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
			continue;
		}
		if (chp->ch_drive[i].drv_softc != NULL)
			continue;
		memset(&adev, 0, sizeof(struct ata_device));
		adev.adev_bustype = atac->atac_bustype_ata;
		adev.adev_channel = chp->ch_channel;
		adev.adev_openings = 1;
		adev.adev_drv_data = &chp->ch_drive[i];
		chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev,
		    "ata_hl", &adev, ataprint);
		if (chp->ch_drive[i].drv_softc != NULL) {
			ata_probe_caps(&chp->ch_drive[i]);
		} else {
			s = splbio();
			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
			splx(s);
		}
	}

	/* now that we know the drives, the controller can set its modes */
	if (atac->atac_set_modes) {
		(*atac->atac_set_modes)(chp);
		ata_print_modes(chp);
	}
#if NATARAID > 0
	if (atac->atac_cap & ATAC_CAP_RAID) {
		for (i = 0; i < chp->ch_ndrives; i++) {
			if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
				ata_raid_check_component(
				    chp->ch_drive[i].drv_softc);
			}
		}
	}
#endif /* NATARAID > 0 */

	/*
	 * reset drive_flags for unattached devices, reset state for attached
	 * ones
	 */
	s = splbio();
	for (i = 0; i < chp->ch_ndrives; i++) {
		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
			continue;
		if (chp->ch_drive[i].drv_softc == NULL) {
			chp->ch_drive[i].drive_flags = 0;
			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
		} else
			chp->ch_drive[i].state = 0;
	}
	splx(s);

	mutex_enter(&atabus_qlock);
	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
	cv_broadcast(&atabus_qcv);
	mutex_exit(&atabus_qlock);

	free(atabus_initq, M_DEVBUF);

	ata_delref(chp);

	config_pending_decr(atac->atac_dev);
	kthread_exit(0);
}
コード例 #6
0
ファイル: ata.c プロジェクト: goroutines/rumprun
/*
 * atabus_thread:
 *
 *	Worker thread for the ATA bus.
 */
static void
atabus_thread(void *arg)
{
	struct atabus_softc *sc = arg;
	struct ata_channel *chp = sc->sc_chan;
	struct ata_xfer *xfer;
	int i, s;

	s = splbio();
	chp->ch_flags |= ATACH_TH_RUN;

	/*
	 * Probe the drives.  Reset type to indicate to controllers
	 * that can re-probe that all drives must be probed..
	 *
	 * Note: ch_ndrives may be changed during the probe.
	 */
	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
	for (i = 0; i < chp->ch_ndrives; i++) {
		chp->ch_drive[i].drive_flags = 0;
		chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
	}
	splx(s);

	atabusconfig(sc);

	s = splbio();
	for (;;) {
		if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 &&
		    (chp->ch_queue->active_xfer == NULL ||
		     chp->ch_queue->queue_freeze == 0)) {
			chp->ch_flags &= ~ATACH_TH_RUN;
			(void) tsleep(&chp->ch_thread, PRIBIO, "atath", 0);
			chp->ch_flags |= ATACH_TH_RUN;
		}
		if (chp->ch_flags & ATACH_SHUTDOWN) {
			break;
		}
		if (chp->ch_flags & ATACH_TH_RESCAN) {
			atabusconfig(sc);
			chp->ch_flags &= ~ATACH_TH_RESCAN;
		}
		if (chp->ch_flags & ATACH_TH_RESET) {
			/*
			 * ata_reset_channel() will freeze 2 times, so
			 * unfreeze one time. Not a problem as we're at splbio
			 */
			chp->ch_queue->queue_freeze--;
			ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags);
		} else if (chp->ch_queue->active_xfer != NULL &&
			   chp->ch_queue->queue_freeze == 1) {
			/*
			 * Caller has bumped queue_freeze, decrease it.
			 */
			chp->ch_queue->queue_freeze--;
			xfer = chp->ch_queue->active_xfer;
			KASSERT(xfer != NULL);
			(*xfer->c_start)(xfer->c_chp, xfer);
		} else if (chp->ch_queue->queue_freeze > 1)
			panic("ata_thread: queue_freeze");
	}
	splx(s);
	chp->ch_thread = NULL;
	wakeup(&chp->ch_flags);
	kthread_exit(0);
}
コード例 #7
0
ファイル: cardslot.c プロジェクト: avsm/openbsd-xen-sys
/*
 * static void cardslot_event_thread(void *arg)
 *
 *   This function is the main routine handing cardslot events such as
 *   insertions and removals.
 *
 */
static void
cardslot_event_thread(void *arg)
{
	struct cardslot_softc *sc = arg;
	struct cardslot_event *ce;
	int s;
	static int antonym_ev[4] = {
		CARDSLOT_EVENT_REMOVAL_16, CARDSLOT_EVENT_INSERTION_16,
		CARDSLOT_EVENT_REMOVAL_CB, CARDSLOT_EVENT_INSERTION_CB
	};

	while (sc->sc_th_enable) {
		s = spltty();
		if ((ce = SIMPLEQ_FIRST(&sc->sc_events)) == NULL) {
			splx(s);
			(void) tsleep(&sc->sc_events, PWAIT, "cardslotev", 0);
			continue;
		}
		SIMPLEQ_REMOVE_HEAD(&sc->sc_events, ce_q);
		splx(s);

		if (IS_CARDSLOT_INSERT_REMOVE_EV(ce->ce_type)) {
			/* Chattering suppression */
			s = spltty();
			while (1) {
				struct cardslot_event *ce1, *ce2;

				if ((ce1 = SIMPLEQ_FIRST(&sc->sc_events)) ==
				    NULL)
					break;
				if (ce1->ce_type != antonym_ev[ce->ce_type])
					break;
				if ((ce2 = SIMPLEQ_NEXT(ce1, ce_q)) == NULL)
					break;
				if (ce2->ce_type == ce->ce_type) {
					SIMPLEQ_REMOVE_HEAD(&sc->sc_events,
					    ce_q);
					free(ce1, M_TEMP);
					SIMPLEQ_REMOVE_HEAD(&sc->sc_events,
					    ce_q);
					free(ce2, M_TEMP);
				}
			}
			splx(s);
		}

		switch (ce->ce_type) {
		case CARDSLOT_EVENT_INSERTION_CB:
			if ((CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_CB) ||
			    (CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_16)) {
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					/* A card has already been inserted
					 * and works.
					 */
					break;
				}
			}

			if (sc->sc_cb_softc) {
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_CB);
				if (cardbus_attach_card(sc->sc_cb_softc) > 0) {
					/* At least one function works */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_WORKING);
				} else {
					/* No functions work or this card is
					 * not known
					 */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
				}
			} else {
				panic("no cardbus on %s", sc->sc_dev.dv_xname);
			}

			break;

		case CARDSLOT_EVENT_INSERTION_16:
			if ((CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_CB) ||
			    (CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_16)) {
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					/* A card has already been inserted
					 * and works.
					 */
					break;
				}
			}
			if (sc->sc_16_softc) {
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_16);
				if (pcmcia_card_attach(
				    (struct device *)sc->sc_16_softc)) {
					/* Do not attach */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
				} else {
					/* working */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_WORKING);
				}
			} else {
				panic("no 16-bit pcmcia on %s",
				    sc->sc_dev.dv_xname);
			}

			break;

		case CARDSLOT_EVENT_REMOVAL_CB:
			if (CARDSLOT_CARDTYPE(sc->sc_status) ==
			    CARDSLOT_STATUS_CARD_CB) {
				/* CardBus card has not been inserted. */
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					cardbus_detach_card(sc->sc_cb_softc);
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_CARD_NONE);
				}
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_NONE);
			} else if (CARDSLOT_CARDTYPE(sc->sc_status) !=
			    CARDSLOT_STATUS_CARD_16) {
				/* Unknown card... */
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_NONE);
			}
			CARDSLOT_SET_WORK(sc->sc_status,
			    CARDSLOT_STATUS_NOTWORK);
			break;

		case CARDSLOT_EVENT_REMOVAL_16:
			DPRINTF(("%s: removal event\n", sc->sc_dev.dv_xname));
			if (CARDSLOT_CARDTYPE(sc->sc_status) !=
			    CARDSLOT_STATUS_CARD_16) {
				/* 16-bit card has not been inserted. */
				break;
			}
			if ((sc->sc_16_softc != NULL) &&
			    (CARDSLOT_WORK(sc->sc_status) ==
			     CARDSLOT_STATUS_WORKING)) {
				struct pcmcia_softc *psc = sc->sc_16_softc;

				pcmcia_card_deactivate((struct device *)psc);
				pcmcia_chip_socket_disable(psc->pct, psc->pch);
				pcmcia_card_detach((struct device *)psc,
				    DETACH_FORCE);
			}
			CARDSLOT_SET_CARDTYPE(sc->sc_status,
			    CARDSLOT_STATUS_CARD_NONE);
			CARDSLOT_SET_WORK(sc->sc_status,
			    CARDSLOT_STATUS_NOTWORK);
			break;

		default:
			panic("cardslot_event_thread: unknown event %d",
			    ce->ce_type);
		}
		free(ce, M_TEMP);
	}

	sc->sc_event_thread = NULL;

	/* In case the parent device is waiting for us to exit. */
	wakeup(sc);

	kthread_exit(0);
}
コード例 #8
0
ファイル: proc.c プロジェクト: zvikadori/assignment2
void manage_exits()
{
   kthread_exit();

}
コード例 #9
0
ファイル: nfs_nfsiod.c プロジェクト: MarginC/kame
/*
 * Asynchronous I/O daemons for client nfs.
 * They do read-ahead and write-behind operations on the block I/O cache.
 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
 */
static void
nfssvc_iod(void *instance)
{
	struct buf *bp;
	struct nfsmount *nmp;
	int myiod, timo;
	int error = 0;

	mtx_lock(&Giant);
	myiod = (int *)instance - nfs_asyncdaemon;
	/*
	 * Main loop
	 */
	for (;;) {
	    while (((nmp = nfs_iodmount[myiod]) == NULL
		   || !TAILQ_FIRST(&nmp->nm_bufq))
		   && error == 0) {
		if (myiod >= nfs_iodmax)
			goto finish;
		if (nmp)
			nmp->nm_bufqiods--;
		nfs_iodwant[myiod] = curthread->td_proc;
		nfs_iodmount[myiod] = NULL;
		/*
		 * Always keep at least nfs_iodmin kthreads.
		 */
		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
		error = tsleep((caddr_t)&nfs_iodwant[myiod], PWAIT | PCATCH,
		    "nfsidl", timo);
	    }
	    if (error)
		    break;
	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
		/* Take one off the front of the list */
		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
		nmp->nm_bufqlen--;
		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
		    nmp->nm_bufqwant = 0;
		    wakeup(&nmp->nm_bufq);
		}
		if (bp->b_iocmd == BIO_READ)
		    (void) nfs_doio(bp, bp->b_rcred, NULL);
		else
		    (void) nfs_doio(bp, bp->b_wcred, NULL);
		/*
		 * If there are more than one iod on this mount, then defect
		 * so that the iods can be shared out fairly between the mounts
		 */
		if (nfs_defect && nmp->nm_bufqiods > 1) {
		    NFS_DPF(ASYNCIO,
			    ("nfssvc_iod: iod %d defecting from mount %p\n",
			     myiod, nmp));
		    nfs_iodmount[myiod] = NULL;
		    nmp->nm_bufqiods--;
		    break;
		}
	    }
	}
finish:
	nfs_asyncdaemon[myiod] = 0;
	if (nmp)
	    nmp->nm_bufqiods--;
	nfs_iodwant[myiod] = NULL;
	nfs_iodmount[myiod] = NULL;
	nfs_numasync--;
	if ((error == 0) || (error == EWOULDBLOCK))
		kthread_exit(0);
	/* Abnormal termination */
	kthread_exit(1);
}
コード例 #10
0
ファイル: rf_engine.c プロジェクト: Tommmster/netbsd-avr32
static void
DAGExecutionThread(RF_ThreadArg_t arg)
{
	RF_DagNode_t *nd, *local_nq, *term_nq, *fire_nq;
	RF_Raid_t *raidPtr;
	int     ks;
	int     s;

	raidPtr = (RF_Raid_t *) arg;

#if RF_DEBUG_ENGINE
	if (rf_engineDebug) {
		printf("raid%d: Engine thread is running\n", raidPtr->raidid);
	}
#endif
	s = splbio();

	DO_LOCK(raidPtr);
	while (!raidPtr->shutdown_engine) {

		while (raidPtr->node_queue != NULL) {
			local_nq = raidPtr->node_queue;
			fire_nq = NULL;
			term_nq = NULL;
			raidPtr->node_queue = NULL;
			DO_UNLOCK(raidPtr);

			/* first, strip out the terminal nodes */
			while (local_nq) {
				nd = local_nq;
				local_nq = local_nq->next;
				switch (nd->dagHdr->status) {
				case rf_enable:
				case rf_rollForward:
					if (nd->numSuccedents == 0) {
						/* end of the dag, add to
						 * callback list */
						nd->next = term_nq;
						term_nq = nd;
					} else {
						/* not the end, add to the
						 * fire queue */
						nd->next = fire_nq;
						fire_nq = nd;
					}
					break;
				case rf_rollBackward:
					if (nd->numAntecedents == 0) {
						/* end of the dag, add to the
						 * callback list */
						nd->next = term_nq;
						term_nq = nd;
					} else {
						/* not the end, add to the
						 * fire queue */
						nd->next = fire_nq;
						fire_nq = nd;
					}
					break;
				default:
					RF_PANIC();
					break;
				}
			}

			/* execute callback of dags which have reached the
			 * terminal node */
			while (term_nq) {
				nd = term_nq;
				term_nq = term_nq->next;
				nd->next = NULL;
				(nd->dagHdr->cbFunc) (nd->dagHdr->cbArg);
				raidPtr->dags_in_flight--;	/* debug only */
			}

			/* fire remaining nodes */
			FireNodeList(fire_nq);

			DO_LOCK(raidPtr);
		}
		while (!raidPtr->shutdown_engine &&
		       raidPtr->node_queue == NULL) {
			DO_WAIT(raidPtr);
		}
	}
	DO_UNLOCK(raidPtr);

	splx(s);
	kthread_exit(0);
}
コード例 #11
0
void
wi_usb_thread(void *arg)
{
	struct wi_usb_softc *sc = arg;
	struct wi_usb_thread_info *wi_thread_info;
	int s;

	wi_thread_info = malloc(sizeof(*wi_thread_info), M_DEVBUF, M_WAITOK);

	/*
	 * is there a remote possibility that the device could
	 * be removed before the kernel thread starts up?
	 */

	sc->wi_usb_refcnt++;

	sc->wi_thread_info = wi_thread_info;
	wi_thread_info->dying = 0;
	wi_thread_info->status = 0;

	wi_usb_ctl_lock(sc);

	wi_attach(&sc->sc_wi, &wi_func_usb);

	wi_usb_ctl_unlock(sc);

	for(;;) {
		if (wi_thread_info->dying) { 
			if (--sc->wi_usb_refcnt < 0)
				usb_detach_wakeup(&sc->wi_usb_dev);
			kthread_exit(0);
		}

		DPRINTFN(5,("%s: %s: dying %x status %x\n",
		    sc->wi_usb_dev.dv_xname, __func__,
			wi_thread_info->dying, wi_thread_info->status));

		wi_usb_ctl_lock(sc);

		DPRINTFN(5,("%s: %s: starting %x\n",
		    sc->wi_usb_dev.dv_xname, __func__,
		    wi_thread_info->status));

		s = splusb();
		if (wi_thread_info->status & WI_START) {
			wi_thread_info->status &= ~WI_START;
			wi_usb_tx_lock(sc);
			wi_func_io.f_start(&sc->sc_wi.sc_ic.ic_if);
			/*
			 * tx_unlock is explicitly missing here
			 * it is done in txeof_frm
			 */
		} else if (wi_thread_info->status & WI_INQUIRE) {
			wi_thread_info->status &= ~WI_INQUIRE;
			wi_func_io.f_inquire(&sc->sc_wi);
		} else if (wi_thread_info->status & WI_WATCHDOG) {
			wi_thread_info->status &= ~WI_WATCHDOG;
			wi_func_io.f_watchdog( &sc->sc_wi.sc_ic.ic_if);
		}
		splx(s);

		DPRINTFN(5,("%s: %s: ending %x\n",
		    sc->wi_usb_dev.dv_xname, __func__,
		    wi_thread_info->status));
		wi_usb_ctl_unlock(sc);

		if (wi_thread_info->status == 0) {
			s = splnet();
			wi_thread_info->idle = 1;
			tsleep(wi_thread_info, PRIBIO, "wiIDL", 0);
			wi_thread_info->idle = 0;
			splx(s);
		}
	}
}
コード例 #12
0
ファイル: sched_edf.c プロジェクト: bkolobara/Benu-pi
static void edf_deadline_alarm ( sigval_t sigev_value )
{
	kthread_t *kthread = sigev_value.sival_ptr, *test;
	kthread_sched2_t *tsched;
	ksched_t *ksched;
	itimerspec_t alarm;

	ASSERT ( kthread );

	ksched = ksched2_get ( kthread_get_sched_policy (kthread) );
	tsched = kthread_get_sched2_param ( kthread );

	test = kthreadq_remove ( &ksched->params.edf.wait, kthread );

	EDF_LOG ( "%x %x [Deadline alarm]", kthread, test );

	if( test == kthread )
	{
		EDF_LOG ( "%x [Waked, but too late]", kthread );

		kthread_set_syscall_retval ( kthread, EXIT_FAILURE );
		kthread_move_to_ready ( kthread, LAST );

		if ( tsched->params.edf.flags & EDF_TERMINATE )
		{
			EDF_LOG ( "%x [EDF_TERMINATE]", kthread );
			ktimer_delete ( tsched->params.edf.period_alarm );
			tsched->params.edf.period_alarm = NULL;
			ktimer_delete ( tsched->params.edf.deadline_alarm );
			tsched->params.edf.deadline_alarm = NULL;
			kthread_set_errno ( kthread, ETIMEDOUT );
			kthread_exit ( kthread, NULL, TRUE );
		}
		else {
			edf_schedule (ksched);
		}
	}
	else {
	/*
	 * thread is not in edf.wait queue, but might be running or its
	 * blocked - it is probable (almost certain) that it missed deadline
	 */
	EDF_LOG ( "%x [Not in edf.wait. Missed deadline?]", kthread );

	if ( edf_check_deadline ( kthread ) )
	{
		/* what to do if its missed? kill thread? */
		if ( tsched->params.edf.flags & EDF_TERMINATE )
		{
			EDF_LOG ( "%x [EDF_TERMINATE]", kthread );
			ktimer_delete (tsched->params.edf.period_alarm);
			tsched->params.edf.period_alarm = NULL;
			ktimer_delete ( tsched->params.edf.deadline_alarm );
			tsched->params.edf.deadline_alarm = NULL;
			kthread_set_errno ( kthread, ETIMEDOUT );
			kthread_exit ( kthread, NULL, TRUE );
		}
		else if ( tsched->params.edf.flags & EDF_CONTINUE )
		{
			/* continue as deadline is not missed */
			EDF_LOG ( "%x [EDF_CONTINUE]", kthread );
		}
		else if ( tsched->params.edf.flags & EDF_SKIP )
		{
			/* skip deadline */
			/* set times for next period */
			EDF_LOG ( "%x [EDF_SKIP]", kthread );

			time_add ( &tsched->params.edf.next_run,
				   &tsched->params.edf.period );

			tsched->params.edf.active_deadline =
					tsched->params.edf.next_run;
			time_add ( &tsched->params.edf.active_deadline,
					&tsched->params.edf.relative_deadline );

			if ( kthread == ksched->params.edf.active )
				ksched->params.edf.active = NULL;

			TIME_RESET ( &alarm.it_interval );
			alarm.it_value = tsched->params.edf.active_deadline;
			ktimer_settime ( tsched->params.edf.deadline_alarm,
					 TIMER_ABSTIME, &alarm, NULL );

			alarm.it_interval = tsched->params.edf.period;
			alarm.it_value = tsched->params.edf.next_run;
			ktimer_settime ( tsched->params.edf.period_alarm,
					 TIMER_ABSTIME, &alarm, NULL );

			kthread_enqueue (kthread, &ksched->params.edf.ready);
			edf_schedule (ksched);
		}
	} /* moved 1 tab left for readability */
	}
}
コード例 #13
0
static void
vndthread(void *arg)
{
	struct vnd_softc *vnd = arg;
	int s;

	/* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
	 * directly access the backing vnode.  If we can, use these two
	 * operations to avoid messing with the local buffer cache.
	 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
	 * which are guaranteed to work with any file system. */
	if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
	    ! vnode_has_strategy(vnd))
		vnd->sc_flags |= VNF_USE_VN_RDWR;

#ifdef DEBUG
	if (vnddebug & VDB_INIT)
		printf("vndthread: vp %p, %s\n", vnd->sc_vp,
		    (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
		    "using bmap/strategy operations" :
		    "using read/write operations");
#endif

	s = splbio();
	vnd->sc_flags |= VNF_KTHREAD;
	wakeup(&vnd->sc_kthread);

	/*
	 * Dequeue requests and serve them depending on the available
	 * vnode operations.
	 */
	while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
		struct vndxfer *vnx;
		int flags;
		struct buf *obp;
		struct buf *bp;

		obp = bufq_get(vnd->sc_tab);
		if (obp == NULL) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
			continue;
		};
		if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
			KASSERT(vnd->sc_pending > 0 &&
			    vnd->sc_pending <= VND_MAXPENDING(vnd));
			if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
				wakeup(&vnd->sc_pending);
		}
		splx(s);
		flags = obp->b_flags;
#ifdef DEBUG
		if (vnddebug & VDB_FOLLOW)
			printf("vndthread(%p)\n", obp);
#endif

		if (vnd->sc_vp->v_mount == NULL) {
			obp->b_error = ENXIO;
			goto done;
		}
#ifdef VND_COMPRESSION
		/* handle a compressed read */
		if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
			off_t bn;
			
			/* Convert to a byte offset within the file. */
			bn = obp->b_rawblkno *
			    vnd->sc_dkdev.dk_label->d_secsize;

			compstrategy(obp, bn);
			goto done;
		}
#endif /* VND_COMPRESSION */
		
		/*
		 * Allocate a header for this transfer and link it to the
		 * buffer
		 */
		s = splbio();
		vnx = VND_GETXFER(vnd);
		splx(s);
		vnx->vx_vnd = vnd;

		s = splbio();
		while (vnd->sc_active >= vnd->sc_maxactive) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
		}
		vnd->sc_active++;
		splx(s);

		/* Instrumentation. */
		disk_busy(&vnd->sc_dkdev);

		bp = &vnx->vx_buf;
		buf_init(bp);
		bp->b_flags = (obp->b_flags & B_READ);
		bp->b_oflags = obp->b_oflags;
		bp->b_cflags = obp->b_cflags;
		bp->b_iodone = vndiodone;
		bp->b_private = obp;
		bp->b_vp = vnd->sc_vp;
		bp->b_objlock = bp->b_vp->v_interlock;
		bp->b_data = obp->b_data;
		bp->b_bcount = obp->b_bcount;
		BIO_COPYPRIO(bp, obp);

		/* Handle the request using the appropriate operations. */
		if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
			handle_with_strategy(vnd, obp, bp);
		else
			handle_with_rdwr(vnd, obp, bp);

		s = splbio();
		continue;

done:
		biodone(obp);
		s = splbio();
	}

	vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
	wakeup(&vnd->sc_kthread);
	splx(s);
	kthread_exit(0);
}
コード例 #14
0
static void
nvme_ns_bio_test(void *arg)
{
	struct nvme_io_test_internal	*io_test = arg;
	struct cdevsw			*csw;
	struct mtx			*mtx;
	struct bio			*bio;
	struct cdev			*dev;
	void				*buf;
	struct timeval			t;
	uint64_t			offset;
	uint32_t			idx, io_completed = 0;
#if __FreeBSD_version >= 900017
	int				ref;
#endif

	buf = malloc(io_test->size, M_NVME, M_WAITOK);
	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
	dev = io_test->ns->cdev;

	offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns);

	while (1) {

		bio = g_alloc_bio();

		memset(bio, 0, sizeof(*bio));
		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
		    BIO_READ : BIO_WRITE;
		bio->bio_done = nvme_ns_bio_test_cb;
		bio->bio_dev = dev;
		bio->bio_offset = offset;
		bio->bio_data = buf;
		bio->bio_bcount = io_test->size;

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			csw = dev_refthread(dev, &ref);
#else
			csw = dev_refthread(dev);
#endif
		} else
			csw = dev->si_devsw;

		mtx = mtx_pool_find(mtxpool_sleep, bio);
		mtx_lock(mtx);
		(*csw->d_strategy)(bio);
		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
		mtx_unlock(mtx);

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			dev_relthread(dev, ref);
#else
			dev_relthread(dev);
#endif
		}

		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
			break;

		g_destroy_bio(bio);

		io_completed++;

		getmicrouptime(&t);
		timevalsub(&t, &io_test->start);

		if (t.tv_sec >= io_test->time)
			break;

		offset += io_test->size;
		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
			offset = 0;
	}

	io_test->io_completed[idx] = io_completed;
	wakeup_one(io_test);

	free(buf, M_NVME);

	atomic_subtract_int(&io_test->td_active, 1);
	mb();

#if __FreeBSD_version >= 800000
	kthread_exit();
#else
	kthread_exit(0);
#endif
}
コード例 #15
0
/*
 * The kernel thread (one for every active snapshot).
 *
 * After wakeup it cleans the cache and runs the I/O requests.
 */
static void
fss_bs_thread(void *arg)
{
	bool thread_idle, is_valid;
	int error, i, todo, len, crotor, is_read;
	long off;
	char *addr;
	u_int32_t c, cl, ch, *indirp;
	struct buf *bp, *nbp;
	struct fss_softc *sc;
	struct fss_cache *scp, *scl;

	sc = arg;
	scl = sc->sc_cache+sc->sc_cache_size;
	crotor = 0;
	thread_idle = false;

	mutex_enter(&sc->sc_slock);

	for (;;) {
		if (thread_idle)
			cv_wait(&sc->sc_work_cv, &sc->sc_slock);
		thread_idle = true;
		if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
			mutex_exit(&sc->sc_slock);
			kthread_exit(0);
		}

		/*
		 * Process I/O requests (persistent)
		 */

		if (sc->sc_flags & FSS_PERSISTENT) {
			if ((bp = bufq_get(sc->sc_bufq)) == NULL)
				continue;
			is_valid = FSS_ISVALID(sc);
			is_read = (bp->b_flags & B_READ);
			thread_idle = false;
			mutex_exit(&sc->sc_slock);

			if (is_valid) {
				disk_busy(sc->sc_dkdev);
				error = fss_bs_io(sc, FSS_READ, 0,
				    dbtob(bp->b_blkno), bp->b_bcount,
				    bp->b_data);
				disk_unbusy(sc->sc_dkdev,
				    (error ? 0 : bp->b_bcount), is_read);
			} else
				error = ENXIO;

			bp->b_error = error;
			bp->b_resid = (error ? bp->b_bcount : 0);
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		/*
		 * Clean the cache
		 */
		for (i = 0; i < sc->sc_cache_size; i++) {
			crotor = (crotor + 1) % sc->sc_cache_size;
			scp = sc->sc_cache + crotor;
			if (scp->fc_type != FSS_CACHE_VALID)
				continue;
			mutex_exit(&sc->sc_slock);

			thread_idle = false;
			indirp = fss_bs_indir(sc, scp->fc_cluster);
			if (indirp != NULL) {
				error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
				    0, FSS_CLSIZE(sc), scp->fc_data);
			} else
				error = EIO;

			mutex_enter(&sc->sc_slock);
			if (error == 0) {
				*indirp = sc->sc_clnext++;
				sc->sc_indir_dirty = 1;
			} else
				fss_error(sc, "write error on backing store");

			scp->fc_type = FSS_CACHE_FREE;
			cv_broadcast(&sc->sc_cache_cv);
			break;
		}

		/*
		 * Process I/O requests
		 */
		if ((bp = bufq_get(sc->sc_bufq)) == NULL)
			continue;
		is_valid = FSS_ISVALID(sc);
		is_read = (bp->b_flags & B_READ);
		thread_idle = false;

		if (!is_valid) {
			mutex_exit(&sc->sc_slock);

			bp->b_error = ENXIO;
			bp->b_resid = bp->b_bcount;
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		disk_busy(sc->sc_dkdev);

		/*
		 * First read from the snapshotted block device unless
		 * this request is completely covered by backing store.
		 */

		cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
		off = FSS_CLOFF(sc, dbtob(bp->b_blkno));
		ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
		error = 0;
		bp->b_resid = 0;
		bp->b_error = 0;
		for (c = cl; c <= ch; c++) {
			if (isset(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			/* Not on backing store, read from device. */
			nbp = getiobuf(NULL, true);
			nbp->b_flags = B_READ;
			nbp->b_resid = nbp->b_bcount = bp->b_bcount;
			nbp->b_bufsize = bp->b_bcount;
			nbp->b_data = bp->b_data;
			nbp->b_blkno = bp->b_blkno;
			nbp->b_lblkno = 0;
			nbp->b_dev = sc->sc_bdev;
			SET(nbp->b_cflags, BC_BUSY);	/* mark buffer busy */

			bdev_strategy(nbp);

			error = biowait(nbp);
			if (error != 0) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = nbp->b_error;
				disk_unbusy(sc->sc_dkdev, 0, is_read);
				biodone(bp);
			}
			putiobuf(nbp);

			mutex_enter(&sc->sc_slock);
			break;
		}
		if (error)
			continue;

		/*
		 * Replace those parts that have been saved to backing store.
		 */

		addr = bp->b_data;
		todo = bp->b_bcount;
		for (c = cl; c <= ch; c++, off = 0, todo -= len, addr += len) {
			len = FSS_CLSIZE(sc)-off;
			if (len > todo)
				len = todo;
			if (isclr(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			indirp = fss_bs_indir(sc, c);
			if (indirp == NULL || *indirp == 0) {
				/*
				 * Not on backing store. Either in cache
				 * or hole in the snapshotted block device.
				 */

				mutex_enter(&sc->sc_slock);
				for (scp = sc->sc_cache; scp < scl; scp++)
					if (scp->fc_type == FSS_CACHE_VALID &&
					    scp->fc_cluster == c)
						break;
				if (scp < scl)
					memcpy(addr, (char *)scp->fc_data+off,
					    len);
				else
					memset(addr, 0, len);
				continue;
			}

			/*
			 * Read from backing store.
			 */
			error =
			    fss_bs_io(sc, FSS_READ, *indirp, off, len, addr);

			mutex_enter(&sc->sc_slock);
			if (error) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = error;
				break;
			}
		}
		mutex_exit(&sc->sc_slock);

		disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
		biodone(bp);

		mutex_enter(&sc->sc_slock);
	}
}
コード例 #16
0
ファイル: syscall.c プロジェクト: FrankSzn/USC-Projects
static int syscall_dispatch(uint32_t sysnum, uint32_t args, regs_t *regs)
{
        switch (sysnum) {
                case SYS_waitpid:
                        return sys_waitpid((waitpid_args_t *)args);

                case SYS_exit:
                        do_exit((int)args);
                        panic("exit failed!\n");
                        return 0;

                case SYS_thr_exit:
                        kthread_exit((void *)args);
                        panic("thr_exit failed!\n");
                        return 0;

                case SYS_thr_yield:
                        sched_make_runnable(curthr);
                        sched_switch();
                        return 0;

                case SYS_fork:
                        return sys_fork(regs);

                case SYS_getpid:
                        return curproc->p_pid;

                case SYS_sync:
                        sys_sync();
                        return 0;

#ifdef __MOUNTING__
                case SYS_mount:
                        return sys_mount((mount_args_t *) args);

                case SYS_umount:
                        return sys_umount((argstr_t *) args);
#endif

                case SYS_mmap:
                        return (int) sys_mmap((mmap_args_t *) args);

                case SYS_munmap:
                        return sys_munmap((munmap_args_t *) args);

                case SYS_open:
                        return sys_open((open_args_t *) args);

                case SYS_close:
                        return sys_close((int)args);

                case SYS_read:
                        return sys_read((read_args_t *)args);

                case SYS_write:
                        return sys_write((write_args_t *)args);

                case SYS_dup:
                        return sys_dup((int)args);

                case SYS_dup2:
                        return sys_dup2((dup2_args_t *)args);

                case SYS_mkdir:
                        return sys_mkdir((mkdir_args_t *)args);

                case SYS_rmdir:
                        return sys_rmdir((argstr_t *)args);

                case SYS_unlink:
                        return sys_unlink((argstr_t *)args);

                case SYS_link:
                        return sys_link((link_args_t *)args);

                case SYS_rename:
                        return sys_rename((rename_args_t *)args);

                case SYS_chdir:
                        return sys_chdir((argstr_t *)args);

                case SYS_getdents:
                        return sys_getdents((getdents_args_t *)args);

                case SYS_brk:
                        return (int) sys_brk((void *)args);

                case SYS_lseek:
                        return sys_lseek((lseek_args_t *)args);

                case SYS_halt:
                        sys_halt();
                        return -1;

                case SYS_set_errno:
                        curthr->kt_errno = (int)args;
                        return 0;

                case SYS_errno:
                        return curthr->kt_errno;

                case SYS_execve:
                        return sys_execve((execve_args_t *)args, regs);

                case SYS_stat:
                        return sys_stat((stat_args_t *)args);

                case SYS_uname:
                        return sys_uname((struct utsname *)args);

                case SYS_debug:
                        return sys_debug((argstr_t *)args);
                case SYS_kshell:
                        return sys_kshell((int)args);
                default:
                        dbg(DBG_ERROR, "ERROR: unknown system call: %d (args: %#08x)\n", sysnum, args);
                        curthr->kt_errno = ENOSYS;
                        return -1;
        }
}
コード例 #17
0
ファイル: khttpd_log.c プロジェクト: Taketsuru/khttpd
static void
khttpd_log_main(void *arg)
{
	struct iovec iovs[64];
	struct uio auio;
	struct thread *td;
	struct khttpd_log *l;
	struct mbuf *pkt, *m;
	ssize_t resid;
	int error, fd, i, niov;

	td = curthread;
	niov = sizeof(iovs) / sizeof(iovs[0]);
	error = 0;

	mtx_lock(&khttpd_log_lock);

	for (;;) {
		while (!khttpd_log_shutdown && TAILQ_EMPTY(&khttpd_busy_logs))
			mtx_sleep(&khttpd_busy_logs, &khttpd_log_lock, 0,
			    "log", 0);

		l = TAILQ_FIRST(&khttpd_busy_logs);
		if (l == NULL && khttpd_log_shutdown) {
			khttpd_log_shutdown = FALSE;
			wakeup(&khttpd_log_shutdown);
			break;
		}

		TAILQ_REMOVE(&khttpd_busy_logs, l, link);

		pkt = mbufq_flush(&l->queue);
		if (l->draining) {
			l->draining = FALSE;
			wakeup(l);
		}

		mtx_unlock(&khttpd_log_lock);

		while (pkt != NULL) {
			m = pkt;

			while (m != NULL && error == 0) {
				resid = 0;
				for (i = 0; i < niov && m != NULL;
				     ++i, m = m->m_next) {
					iovs[i].iov_base = mtod(m, void *);
					iovs[i].iov_len = m->m_len;
					resid += m->m_len;
				}

				auio.uio_iov = iovs;
				auio.uio_iovcnt = i;
				auio.uio_offset = 0;
				auio.uio_resid = resid;
				auio.uio_segflg = UIO_SYSSPACE;
				auio.uio_td = td;
				error = kern_writev(td, l->fd, &auio);

				if (error != 0) {
					fd = l->fd;
					khttpd_log_abort(l);
					log(LOG_WARNING, "khttpd: "
					    "write to log %d faild (%d)",
					    fd, error);
				}
			}

			m = pkt;
			pkt = STAILQ_NEXT(pkt, m_stailqpkt);
			m_freem(m);
		}

		mtx_lock(&khttpd_log_lock);
	}

	KASSERT(TAILQ_EMPTY(khttpd_busy_logs),
	    ("khttpd_busy_logs is not empty"));

	mtx_unlock(&khttpd_log_lock);

	kthread_exit();
}