Beispiel #1
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int once = 0;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && once == 0) {
		once = 1;
		bufshutdown(show_busybufs);
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Beispiel #2
0
/*
 * Switch to target cpu to run.
 */
static void
set_cpu(int cpu, struct thread *td)
{

	KASSERT(cpu >= 0 && cpu < mp_ncpus && cpu_enabled(cpu),
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
	thread_lock(td);
	sched_bind(td, cpu);
	thread_unlock(td);
	KASSERT(td->td_oncpu == cpu,
	    ("[cpuctl,%d]: cannot bind to target cpu %d", __LINE__, cpu));
}
Beispiel #3
0
/*
 * Go to Px-state on all cpus considering the limit.
 */
static int
hwpstate_goto_pstate(device_t dev, int pstate)
{
	int i;
	uint64_t msr;
	int j;
	int limit;
	int id = pstate;
	int error;
	
	/* get the current pstate limit */
	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
	limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
	if(limit > id)
		id = limit;

	/*
	 * We are going to the same Px-state on all cpus.
	 * Probably should take _PSD into account.
	 */
	error = 0;
	CPU_FOREACH(i) {
		/* Bind to each cpu. */
		thread_lock(curthread);
		sched_bind(curthread, i);
		thread_unlock(curthread);
		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
			id, PCPU_GET(cpuid));
		/* Go To Px-state */
		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
		/* wait loop (100*100 usec is enough ?) */
		for(j = 0; j < 100; j++){
			msr = rdmsr(MSR_AMD_10H_11H_STATUS);
			if(msr == id){
				break;
			}
			DELAY(100);
		}
		/* get the result. not assure msr=id */
		msr = rdmsr(MSR_AMD_10H_11H_STATUS);
		HWPSTATE_DEBUG(dev, "result  P%d-state on cpu%d\n",
		    (int)msr, PCPU_GET(cpuid));
		if (msr != id) {
			HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
			error = ENXIO;
		}
	}
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);
	return (error);
}
Beispiel #4
0
static void
restore_cpu(int oldcpu, int is_bound, struct thread *td)
{

	KASSERT(oldcpu >= 0 && oldcpu < mp_ncpus && cpu_enabled(oldcpu),
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
	thread_lock(td);
	if (is_bound == 0)
		sched_unbind(td);
	else
		sched_bind(td, oldcpu);
	thread_unlock(td);
}
Beispiel #5
0
int
bman_pool_destroy(t_Handle pool)
{
	struct bman_softc *sc;

	sc = bman_sc;
	thread_lock(curthread);
	sched_bind(curthread, sc->sc_bpool_cpu[BM_POOL_GetId(pool)]);
	thread_unlock(curthread);

	BM_POOL_Free(pool);

	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (0);
}
Beispiel #6
0
t_Error
qman_fqr_free(t_Handle fqr)
{
	struct qman_softc *sc;
	t_Error error;

	sc = qman_sc;
	thread_lock(curthread);
	sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]);
	thread_unlock(curthread);

	error = QM_FQR_Free(fqr);

	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Beispiel #7
0
static void
if_pcap_send(void *arg)
{
	struct mbuf *m;
	struct if_pcap_softc *sc = (struct if_pcap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	uint8_t copybuf[2048];
	uint8_t *pkt;
	unsigned int pktlen;

	if (sc->uif->cpu >= 0)
		sched_bind(sc->tx_thread, sc->uif->cpu);

	while (1) {
		mtx_lock(&sc->tx_lock);
		while (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
			mtx_sleep(&ifp->if_drv_flags, &sc->tx_lock, 0, "wtxlk", 0);
		}
		mtx_unlock(&sc->tx_lock);
	
		while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			pktlen = m_length(m, NULL);

			ifp->if_opackets++;

			if (!sc->isfile && (pktlen <= sizeof(copybuf))) {			
				if (NULL == m->m_next) {
					/* all in one piece - avoid copy */
					pkt = mtod(m, uint8_t *);
					ifp->if_ozcopies++;
				} else {
					pkt = copybuf;
					m_copydata(m, 0, pktlen, pkt);
					ifp->if_ocopies++;
				}

				if (0 != if_pcap_sendpacket(sc->pcap_host_ctx, pkt, pktlen))
					ifp->if_oerrors++;
			} else {
				if (sc->isfile)
Beispiel #8
0
int
bman_portals_detach(device_t dev)
{
    struct dpaa_portals_softc *sc;
    int i;

    bp_sc = NULL;
    sc = device_get_softc(dev);

    for (i = 0; i < ARRAY_SIZE(sc->sc_dp); i++) {
        if (sc->sc_dp[i].dp_ph != NULL) {
            thread_lock(curthread);
            sched_bind(curthread, i);
            thread_unlock(curthread);

            BM_PORTAL_Free(sc->sc_dp[i].dp_ph);

            thread_lock(curthread);
            sched_unbind(curthread);
            thread_unlock(curthread);
        }

        if (sc->sc_dp[i].dp_ires != NULL) {
            XX_DeallocIntr((int)sc->sc_dp[i].dp_ires);
            bus_release_resource(dev, SYS_RES_IRQ,
                                 sc->sc_dp[i].dp_irid, sc->sc_dp[i].dp_ires);
        }
    }
    for (i = 0; i < ARRAY_SIZE(sc->sc_rres); i++) {
        if (sc->sc_rres[i] != NULL)
            bus_release_resource(dev, SYS_RES_MEMORY,
                                 sc->sc_rrid[i],
                                 sc->sc_rres[i]);
    }

    return (0);
}
Beispiel #9
0
/*
 * Wait specified idle threads to switch once.  This ensures that even
 * preempted threads have cycled through the switch function once,
 * exiting their codepaths.  This allows us to change global pointers
 * with no other synchronization.
 */
int
quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
{
	struct pcpu *pcpu;
	u_int gen[MAXCPU];
	int error;
	int cpu;

	error = 0;
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		gen[cpu] = pcpu->pc_idlethread->td_generation;
	}
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		thread_lock(curthread);
		sched_bind(curthread, cpu);
		thread_unlock(curthread);
		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
			error = tsleep(quiesce_cpus, prio, wmesg, 1);
			if (error != EWOULDBLOCK)
				goto out;
			error = 0;
		}
	}
out:
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Beispiel #10
0
static void
XX_Dispatch(void *arg)
{
	struct XX_IntrInfo *info;

	info = arg;

	/* Bind this thread to proper CPU when SMP has been already started. */
	if ((info->flags & XX_INTR_FLAG_BOUND) == 0 && smp_started &&
	    info->cpu >= 0) {
		thread_lock(curthread);
		sched_bind(curthread, info->cpu);
		thread_unlock(curthread);

		info->flags |= XX_INTR_FLAG_BOUND;
	}

	if (info->handler == NULL) {
		printf("%s(): IRQ handler is NULL!\n", __func__);
		return;
	}

	info->handler(info->arg);
}
Beispiel #11
0
static void
if_netmap_receive(void *arg)
{
	struct if_netmap_softc *sc;
	struct ifnet *ifp;
	struct uhi_pollfd pfd;
	struct mbuf *m;
	struct if_netmap_bufinfo *bi;
	void *slotbuf;
	uint32_t slotindex;
	uint32_t pktlen;
	uint32_t cur;
	uint32_t avail;
	uint32_t reserved;
	uint32_t returned;
	uint32_t new_reserved;
	unsigned int n;
	int rv;
	int done;


	/* Zero-copy receive
	 *
	 * A packet header mbuf is allocated for each received netmap
	 * buffer, and the netmap buffer is attached to this mbuf as
	 * external storage, along with a free routine and piece of context
	 * that enables the free routine to move the netmap buffer on its
	 * way back to the receive ring.  The per-buffer context objects
	 * (struct if_netmap_bufinfo) are managed by this driver.
	 *
	 * When the mbuf layer calls the free routine for an mbuf-attached
	 * netmap buffer, its associated context object is added to a list
	 * that is part of the pool of those objects.  On each pass through
	 * the receive loop below, all of the context objects that have been
	 * returned to the list since the last pass are processed, and their
	 * associated netmap buffers are returned to the receive ring.
	 *
	 * With this approach, a given netmap buffer may be available for
	 * netmap's use on the ring, may be newly available for our
	 * consumption on the ring, may have been passed to the stack for
	 * processing and not yet returned, or may have been returned to us
	 * from the stack but not yet returned to the netmap ring.
	 */

	sc = (struct if_netmap_softc *)arg;
	ifp = sc->ifp;

	if (sc->cfg->cpu >= 0)
		sched_bind(sc->rx_thread.thr, sc->cfg->cpu);

	rv = if_netmap_rxsync(sc->nm_host_ctx, NULL, NULL, NULL);
	if (rv == -1)
		printf("could not sync rx descriptors before receive loop\n");

	reserved = if_netmap_rxreserved(sc->nm_host_ctx);
	sc->hw_rx_rsvd_begin = if_netmap_rxcur(sc->nm_host_ctx);

	sc->rx_thread.last_stop_check = ticks;
	done = 0;
	for (;;) {
		while (!done && (0 == (avail = if_netmap_rxavail(sc->nm_host_ctx)))) {
			memset(&pfd, 0, sizeof pfd);

			pfd.fd = sc->fd;
			pfd.events = UHI_POLLIN;

			rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS);
			if (rv == 0) {
				done = if_netmap_stoppable_thread_check(&sc->rx_thread);
			} else if (rv == -1)
				printf("error from poll for receive\n");
		}

		if (ticks - sc->rx_thread.last_stop_check >= sc->stop_check_ticks) {
			done = if_netmap_stoppable_thread_check(&sc->rx_thread);
		}

		if (done)
			break;

		cur = if_netmap_rxcur(sc->nm_host_ctx);
		new_reserved = 0;
		for (n = 0; n < avail; n++) {
			slotbuf = if_netmap_rxslot(sc->nm_host_ctx, &cur, &pktlen, &slotindex);

			ifp->if_ipackets++;
			ifp->if_ibytes += pktlen;

			bi = if_netmap_bufinfo_alloc(&sc->rx_bufinfo, slotindex);
			if (NULL == bi) {
				/* copy receive */
				ifp->if_icopies++;

				/* could streamline this a little since we
				 * know the data is going to fit in a
				 * cluster
				 */
				m = m_devget(slotbuf, pktlen, ETHER_ALIGN, sc->ifp, NULL);

				/* Recover this buffer at the far end of the
				 * reserved trail from prior zero-copy
				 * activity.
				 */
				if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex);
			} else {
				/* zero-copy receive */
				ifp->if_izcopies++;

				m = m_gethdr(M_DONTWAIT, MT_DATA);
				if (NULL == m) {
					if_netmap_bufinfo_unalloc(&sc->rx_bufinfo);
					if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex);
				} else {
					/* XXX presumably in this path the
					 * IP header isn't aligned on a
					 * 32-bit boundary because the
					 * ethernet header is and there is
					 * no ETHER_ALIGN adjustment?  this
					 * would be an issue for ip_src and
					 * ip_dst on platforms that don't
					 * support 16-bit aligned access to
					 * 32-bit values.
					 */
					
					m->m_pkthdr.len = m->m_len = pktlen;
					m->m_pkthdr.rcvif = sc->ifp;
					m->m_ext.ref_cnt = &bi->refcnt;
					m_extadd(m, slotbuf, if_netmap_rxbufsize(sc->nm_host_ctx),
						 if_netmap_free, sc, bi, 0, EXT_EXTREF);

					new_reserved++;
				}

			}

			if (m) {
				sc->ifp->if_input(sc->ifp, m);
			} else {
				ifp->if_iqdrops++;				
			}
		}

		avail -= n;
		reserved += new_reserved;

		/* Return any netmap buffers freed by the stack to the ring */
		returned = if_netmap_sweep_trail(sc);
		reserved -= returned;

		rv = if_netmap_rxsync(sc->nm_host_ctx, &avail, &cur, &reserved);
		if (rv == -1)
			printf("could not sync rx descriptors after receive\n");

	}

	if_netmap_stoppable_thread_done(&sc->rx_thread);
}
Beispiel #12
0
static void
if_netmap_send(void *arg)
{
	struct mbuf *m;
	struct if_netmap_softc *sc = (struct if_netmap_softc *)arg;
	struct ifnet *ifp = sc->ifp;
	struct uhi_pollfd pfd;
	uint32_t avail;
	uint32_t cur;
	u_int pktlen;
	int rv;
	int done;
	int pkts_sent;

	if (sc->cfg->cpu >= 0)
		sched_bind(sc->tx_thread.thr, sc->cfg->cpu);

	rv = if_netmap_txsync(sc->nm_host_ctx, NULL, NULL);
	if (rv == -1) {
		printf("could not sync tx descriptors before transmit\n");
	}

	avail = if_netmap_txavail(sc->nm_host_ctx);

	sc->tx_thread.last_stop_check = ticks;
	done = 0;
	pkts_sent = 0;
	do {
		mtx_lock(&sc->tx_lock);
		sc->tx_pkts_to_send -= pkts_sent;
		while ((sc->tx_pkts_to_send == 0) && !done)
			if (EWOULDBLOCK == cv_timedwait(&sc->tx_cv, &sc->tx_lock, sc->stop_check_ticks))
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);
		mtx_unlock(&sc->tx_lock);
	
		if (done)
			break;

		pkts_sent = 0;

		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		while (m) {
			while (0 == avail && !done) {
				memset(&pfd, 0, sizeof(pfd));

				pfd.fd = sc->fd;
				pfd.events = UHI_POLLOUT;
				
				rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS);
				if (rv == 0)
					done = if_netmap_stoppable_thread_check(&sc->tx_thread);	
				else if (rv == -1)
					printf("error from poll for transmit\n");
					
				avail = if_netmap_txavail(sc->nm_host_ctx);
			}

			if (ticks - sc->tx_thread.last_stop_check >= sc->stop_check_ticks)
				done = if_netmap_stoppable_thread_check(&sc->tx_thread);

			if (done)
				break;

			cur = if_netmap_txcur(sc->nm_host_ctx);

			while (m && avail) {
				ifp->if_ocopies++;
				ifp->if_opackets++;

				avail--;
				pkts_sent++;

				pktlen = m_length(m, NULL);

				m_copydata(m, 0, pktlen,
					   if_netmap_txslot(sc->nm_host_ctx, &cur, pktlen)); 
				m_freem(m);

				IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
			}

			rv = if_netmap_txsync(sc->nm_host_ctx, &avail, &cur);
			if (rv == -1) {
				printf("could not sync tx descriptors after transmit\n");
			}
			avail = if_netmap_txavail(sc->nm_host_ctx);
		}

	} while (!done);

	if_netmap_stoppable_thread_done(&sc->tx_thread);
}
Beispiel #13
0
static void
xctrl_suspend()
{
#ifdef SMP
	cpuset_t cpu_suspend_map;
#endif
	int suspend_cancelled;

	EVENTHANDLER_INVOKE(power_suspend);

	if (smp_started) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
	}
	KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0"));

	/*
	 * Clear our XenStore node so the toolstack knows we are
	 * responding to the suspend request.
	 */
	xs_write(XST_NIL, "control", "shutdown", "");

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
		return;
	}
	mtx_unlock(&Giant);

#ifdef SMP
	CPU_ZERO(&cpu_suspend_map);	/* silence gcc */
	if (smp_started) {
		/*
		 * Suspend other CPUs. This prevents IPIs while we
		 * are resuming, and will allow us to reset per-cpu
		 * vcpu_info on resume.
		 */
		cpu_suspend_map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map);
		if (!CPU_EMPTY(&cpu_suspend_map))
			suspend_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * Prevent any races with evtchn_interrupt() handler.
	 */
	disable_intr();
	intr_suspend();
	xen_hvm_suspend();

	suspend_cancelled = HYPERVISOR_suspend(0);

	xen_hvm_resume(suspend_cancelled != 0);
	intr_resume(suspend_cancelled != 0);
	enable_intr();

	/*
	 * Reset grant table info.
	 */
	gnttab_resume(NULL);

#ifdef SMP
	/* Send an IPI_BITMAP in case there are pending bitmap IPIs. */
	lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL);
	if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
		/*
		 * Now that event channels have been initialized,
		 * resume CPUs.
		 */
		resume_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or
	 * similar.
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

	if (smp_started) {
		thread_lock(curthread);
		sched_unbind(curthread);
		thread_unlock(curthread);
	}

	EVENTHANDLER_INVOKE(power_resume);

	if (bootverbose)
		printf("System resumed after suspension\n");

}
Beispiel #14
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int first_buf_printf = 1;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		register struct buf *bp;
		int iter, nbusy, pbusy;
#ifndef PREEMPTION
		int subiter;
#endif

		waittime = 0;

		wdog_kern_pat(WD_LASTVAL);
		sys_sync(curthread, NULL);

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = 0;
			for (bp = &buf[nbuf]; --bp >= buf; )
				if (isbufbusy(bp))
					nbusy++;
			if (nbusy == 0) {
				if (first_buf_printf)
					printf("All buffers synced.");
				break;
			}
			if (first_buf_printf) {
				printf("Syncing disks, buffers remaining... ");
				first_buf_printf = 0;
			}
			printf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;

			wdog_kern_pat(WD_LASTVAL);
			sys_sync(curthread, NULL);

#ifdef PREEMPTION
			/*
			 * Drop Giant and spin for a while to allow
			 * interrupt threads to run.
			 */
			DROP_GIANT();
			DELAY(50000 * iter);
			PICKUP_GIANT();
#else
			/*
			 * Drop Giant and context switch several times to
			 * allow interrupt threads to run.
			 */
			DROP_GIANT();
			for (subiter = 0; subiter < 50 * iter; subiter++) {
				thread_lock(curthread);
				mi_switch(SW_VOL, NULL);
				thread_unlock(curthread);
				DELAY(1000);
			}
			PICKUP_GIANT();
#endif
		}
		printf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = 0;
		for (bp = &buf[nbuf]; --bp >= buf; ) {
			if (isbufbusy(bp)) {
#if 0
/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
				if (bp->b_dev == NULL) {
					TAILQ_REMOVE(&mountlist,
					    bp->b_vp->v_mount, mnt_list);
					continue;
				}
#endif
				nbusy++;
				if (show_busybufs > 0) {
					printf(
	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
					    nbusy, bp, bp->b_vp, bp->b_flags,
					    (intmax_t)bp->b_blkno,
					    (intmax_t)bp->b_lblkno);
					BUF_LOCKPRINTINFO(bp);
					if (show_busybufs > 1)
						vn_printf(bp->b_vp,
						    "vnode content: ");
				}
			}
		}
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			printf("Giving up on %d buffers\n", nbusy);
			DELAY(5000000);	/* 5 seconds */
		} else {
			if (!first_buf_printf)
				printf("Final sync complete\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == 0)
				vfs_unmountall();
		}
		swapoff_all();
		DELAY(100000);		/* wait for console output to finish */
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Beispiel #15
0
/* Full PV mode suspension. */
static void
xctrl_suspend()
{
	int i, j, k, fpp, suspend_cancelled;
	unsigned long max_pfn, start_info_mfn;

	EVENTHANDLER_INVOKE(power_suspend);

#ifdef SMP
	struct thread *td;
	cpuset_t map;
	u_int cpuid;

	/*
	 * Bind us to CPU 0 and stop any other VCPUs.
	 */
	td = curthread;
	thread_lock(td);
	sched_bind(td, 0);
	thread_unlock(td);
	cpuid = PCPU_GET(cpuid);
	KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));

	map = all_cpus;
	CPU_CLR(cpuid, &map);
	CPU_NAND(&map, &stopped_cpus);
	if (!CPU_EMPTY(&map))
		stop_cpus(map);
#endif

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
#ifdef SMP
		if (!CPU_EMPTY(&map))
			restart_cpus(map);
#endif
		return;
	}
	mtx_unlock(&Giant);

	local_irq_disable();

	xencons_suspend();
	gnttab_suspend();
	intr_suspend();

	max_pfn = HYPERVISOR_shared_info->arch.max_pfn;

	void *shared_info = HYPERVISOR_shared_info;
	HYPERVISOR_shared_info = NULL;
	pmap_kremove((vm_offset_t) shared_info);
	PT_UPDATES_FLUSH();

	xen_start_info->store_mfn = MFNTOPFN(xen_start_info->store_mfn);
	xen_start_info->console.domU.mfn = MFNTOPFN(xen_start_info->console.domU.mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	start_info_mfn = VTOMFN(xen_start_info);
	pmap_suspend();
	suspend_cancelled = HYPERVISOR_suspend(start_info_mfn);
	pmap_resume();

	pmap_kenter_ma((vm_offset_t) shared_info, xen_start_info->shared_info);
	HYPERVISOR_shared_info = shared_info;

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		VTOMFN(xen_pfn_to_mfn_frame_list_list);
  
	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			xen_pfn_to_mfn_frame_list_list[k] = 
				VTOMFN(xen_pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		xen_pfn_to_mfn_frame_list[k][j] = 
			VTOMFN(&xen_phys_machine[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();
	intr_resume(suspend_cancelled != 0);
	local_irq_enable();
	xencons_resume();

#ifdef CONFIG_SMP
	for_each_cpu(i)
		vcpu_prepare(i);

#endif

	/* 
	 * Only resume xenbus /after/ we've prepared our VCPUs; otherwise
	 * the VCPU hotplug callback can race with our vcpu_prepare
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

#ifdef SMP
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);
	if (!CPU_EMPTY(&map))
		restart_cpus(map);
#endif
	EVENTHANDLER_INVOKE(power_resume);
}