示例#1
0
static void
vcons_copycols_buffer(void *cookie, int row, int srccol, int dstcol, int ncols)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;
	int from = srccol + row * ri->ri_cols;
	int to = dstcol + row * ri->ri_cols;

#ifdef WSDISPLAY_SCROLLSUPPORT
	int offset;
	offset = scr->scr_offset_to_zero;

	memmove(&scr->scr_attrs[offset + to], &scr->scr_attrs[offset + from],
	    ncols * sizeof(long));
	memmove(&scr->scr_chars[offset + to], &scr->scr_chars[offset + from],
	    ncols * sizeof(uint32_t));
#else
	memmove(&scr->scr_attrs[to], &scr->scr_attrs[from],
	    ncols * sizeof(long));
	memmove(&scr->scr_chars[to], &scr->scr_chars[from],
	    ncols * sizeof(uint32_t));
#endif

#ifdef VCONS_DRAW_INTR
	atomic_inc_uint(&scr->scr_dirty);
#endif
}
示例#2
0
static void
vcons_erasecols_buffer(void *cookie, int row, int startcol, int ncols, long fillattr)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;
	int start = startcol + row * ri->ri_cols;
	int end = start + ncols, i;

#ifdef WSDISPLAY_SCROLLSUPPORT
	int offset;
	offset = scr->scr_offset_to_zero;

	for (i = start; i < end; i++) {
		scr->scr_attrs[offset + i] = fillattr;
		scr->scr_chars[offset + i] = 0x20;
	}
#else
	for (i = start; i < end; i++) {
		scr->scr_attrs[i] = fillattr;
		scr->scr_chars[i] = 0x20;
	}
#endif

#ifdef VCONS_DRAW_INTR
	atomic_inc_uint(&scr->scr_dirty);
#endif
}
示例#3
0
static void
vcons_putchar_buffer(void *cookie, int row, int col, u_int c, long attr)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;
	int pos;
	
#ifdef WSDISPLAY_SCROLLSUPPORT
	int offset;
	offset = scr->scr_offset_to_zero;

	if ((row >= 0) && (row < ri->ri_rows) && (col >= 0) && 
	     (col < ri->ri_cols)) {
		pos = col + row * ri->ri_cols;
		scr->scr_attrs[pos + offset] = attr;
		scr->scr_chars[pos + offset] = c;
	}
#else
	if ((row >= 0) && (row < ri->ri_rows) && (col >= 0) && 
	     (col < ri->ri_cols)) {
		pos = col + row * ri->ri_cols;
		scr->scr_attrs[pos] = attr;
		scr->scr_chars[pos] = c;
	}
#endif

#ifdef VCONS_DRAW_INTR
	atomic_inc_uint(&scr->scr_dirty);
#endif
}
示例#4
0
static void
vcons_cursor(void *cookie, int on, int row, int col)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;


#if defined(VCONS_DRAW_INTR)
	if (scr->scr_vd->use_intr) {
		vcons_lock(scr);
		if (scr->scr_ri.ri_crow != row || scr->scr_ri.ri_ccol != col) {
			scr->scr_ri.ri_crow = row;
			scr->scr_ri.ri_ccol = col;
			atomic_inc_uint(&scr->scr_dirty);
		}
		vcons_unlock(scr);
		return;
	}
#endif

	vcons_lock(scr);

	if (SCREEN_IS_VISIBLE(scr) && SCREEN_CAN_DRAW(scr)) {
		scr->scr_vd->cursor(cookie, on, row, col);
	} else {
		scr->scr_ri.ri_crow = row;
		scr->scr_ri.ri_ccol = col;
	}
	vcons_unlock(scr);
}
示例#5
0
void
mutex_obj_hold(kmutex_t *lock)
{
	struct kmutexobj *mo = (struct kmutexobj *)lock;

	atomic_inc_uint(&mo->mo_refcnt);
}
static int
audio_stropen(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp)
{
	int			rv;
	audio_client_t		*c;

	if (sflag != 0) {
		/* no direct clone or module opens */
		return (ENXIO);
	}

	/*
	 * Make sure its a STREAMS personality - only legacy Sun API uses
	 * STREAMS.
	 */
	switch (AUDIO_MN_TYPE_MASK & getminor(*devp)) {
	case AUDIO_MINOR_DEVAUDIO:
	case AUDIO_MINOR_DEVAUDIOCTL:
		break;
	default:
		return (ENOSTR);
	}

	if ((c = auimpl_client_create(*devp)) == NULL) {
		audio_dev_warn(NULL, "client create failed");
		return (ENXIO);
	}

	rq->q_ptr = WR(rq)->q_ptr = c;
	c->c_omode = oflag;
	c->c_pid = ddi_get_pid();
	c->c_cred = credp;
	c->c_rq = rq;
	c->c_wq = WR(rq);

	/*
	 * Call client/personality specific open handler.  Note that
	 * we "insist" that there is an open.  The personality layer
	 * will initialize/allocate any engines required.
	 *
	 * Hmm... do we need to pass in the cred?
	 */
	if ((rv = c->c_open(c, oflag)) != 0) {
		audio_dev_warn(c->c_dev, "open failed (rv %d)", rv);
		auimpl_client_destroy(c);
		return (rv);
	}

	/* we do device cloning! */
	*devp = makedevice(c->c_major, c->c_minor);

	qprocson(rq);

	/* now we can receive upcalls */
	auimpl_client_activate(c);

	atomic_inc_uint(&c->c_dev->d_serial);

	return (0);
}
示例#7
0
文件: if_mpls.c 项目: ryo/netbsd-src
static int
mpls_clone_create(struct if_clone *ifc, int unit)
{
	struct mpls_softc *sc;

	atomic_inc_uint(&mpls_count);
	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);

	if_initname(&sc->sc_if, ifc->ifc_name, unit);
	sc->sc_if.if_softc = sc;
	sc->sc_if.if_type = IFT_MPLS;
	sc->sc_if.if_addrlen = 0;
	sc->sc_if.if_hdrlen = sizeof(union mpls_shim);
	sc->sc_if.if_dlt = DLT_NULL;
	sc->sc_if.if_mtu = 1500;
	sc->sc_if.if_flags = 0;
	sc->sc_if._if_input = mpls_input;
	sc->sc_if.if_output = mpls_output;
	sc->sc_if.if_ioctl = mpls_ioctl;

	if_attach(&sc->sc_if);
	if_alloc_sadl(&sc->sc_if);
	bpf_attach(&sc->sc_if, DLT_NULL, sizeof(uint32_t));
	return 0;
}
示例#8
0
static void
vcons_eraserows_buffer(void *cookie, int row, int nrows, long fillattr)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;
	int start, end, i;

#ifdef WSDISPLAY_SCROLLSUPPORT
	int offset;
	offset = scr->scr_offset_to_zero;

	start = ri->ri_cols * row + offset;
	end = ri->ri_cols * (row + nrows) + offset;
#else
	start = ri->ri_cols * row;
	end = ri->ri_cols * (row + nrows);
#endif

	for (i = start; i < end; i++) {
		scr->scr_attrs[i] = fillattr;
		scr->scr_chars[i] = 0x20;
	}

#ifdef VCONS_DRAW_INTR
	atomic_inc_uint(&scr->scr_dirty);
#endif
}
示例#9
0
文件: if_tap.c 项目: ryo/netbsd-src
/*
 * The 'create' command of ifconfig can be used to create
 * any numbered instance of a given device.  Thus we have to
 * make sure we have enough room in cd_devs to create the
 * user-specified instance.  config_attach_pseudo will do this
 * for us.
 */
static int
tap_clone_create(struct if_clone *ifc, int unit)
{
	if (tap_clone_creator(unit) == NULL) {
		aprint_error("%s%d: unable to attach an instance\n",
                    tap_cd.cd_name, unit);
		return (ENXIO);
	}
	atomic_inc_uint(&tap_count);
	return (0);
}
示例#10
0
int
iommulib_nex_open(dev_info_t *dip, dev_info_t *rdip)
{
	iommulib_unit_t *unitp;
	int instance = ddi_get_instance(rdip);
	const char *driver = ddi_driver_name(rdip);
	const char *f = "iommulib_nex_open";

	ASSERT(DEVI(dip)->devi_iommulib_nex_handle != NULL);
	ASSERT(DEVI(rdip)->devi_iommulib_handle == NULL);

	/* prevent use of IOMMU for AMD IOMMU's DMA */
	if (strcmp(driver, "amd_iommu") == 0) {
		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
		return (DDI_ENOTSUP);
	}

	/*
	 * Use the probe entry point to determine in a hardware specific
	 * manner whether this dip is controlled by an IOMMU. If yes,
	 * return the handle corresponding to the IOMMU unit.
	 */

	mutex_enter(&iommulib_lock);
	for (unitp = iommulib_list; unitp; unitp = unitp->ilu_next) {
		if (unitp->ilu_ops->ilops_probe(unitp, rdip) == DDI_SUCCESS)
			break;
	}

	if (unitp == NULL) {
		mutex_exit(&iommulib_lock);
		if (iommulib_debug) {
			char *buf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
			cmn_err(CE_WARN, "%s: %s%d: devinfo node (%p): is not "
			    "controlled by an IOMMU: path=%s", f, driver,
			    instance, (void *)rdip, ddi_pathname(rdip, buf));
			kmem_free(buf, MAXPATHLEN);
		}
		DEVI(rdip)->devi_iommulib_handle = IOMMU_HANDLE_UNUSED;
		return (DDI_ENOTSUP);
	}

	mutex_enter(&unitp->ilu_lock);
	unitp->ilu_nex = DEVI(dip)->devi_iommulib_nex_handle;
	unitp->ilu_ref++;
	DEVI(rdip)->devi_iommulib_handle = unitp;
	mutex_exit(&unitp->ilu_lock);
	mutex_exit(&iommulib_lock);

	atomic_inc_uint(&DEVI(dip)->devi_iommulib_nex_handle->nex_ref);

	return (DDI_SUCCESS);
}
示例#11
0
/*
 * mutex_obj_hold:
 *
 *	Add a single reference to a lock object.  A reference to the object
 *	must already be held, and must be held across this call.
 */
void
mutex_obj_hold(kmutex_t *lock)
{
	struct kmutexobj *mo = (struct kmutexobj *)lock;

	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
	KASSERTMSG(mo->mo_refcnt > 0,
	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
	     __func__, mo, mo->mo_refcnt);

	atomic_inc_uint(&mo->mo_refcnt);
}
示例#12
0
static int
audio_close(dev_t dev, int flag, int otyp, cred_t *credp)
{
	audio_client_t	*c;
	audio_dev_t	*d;

	_NOTE(ARGUNUSED(flag));
	_NOTE(ARGUNUSED(credp));
	_NOTE(ARGUNUSED(otyp));

	if ((c = auclnt_hold_by_devt(dev)) == NULL) {
		audio_dev_warn(NULL, "close on bogus devt %x,%x",
		    getmajor(dev), getminor(dev));
		return (ENXIO);
	}

	/* we don't want any upcalls anymore */
	auimpl_client_deactivate(c);

	/*
	 * Pick up any data sitting around in input buffers.  This
	 * avoids leaving record data stuck in queues.
	 */
	if (c->c_istream.s_engine != NULL)
		audio_engine_produce(c->c_istream.s_engine);

	/* get a local hold on the device */
	d = c->c_dev;
	auimpl_dev_hold(c->c_dev);

	/*
	 * NB: This must be done before c->c_close, since it calls
	 * auclnt_close which will block waiting for the refence count
	 * to drop to zero.
	 */
	auclnt_release(c);

	/* Call personality specific close handler */
	c->c_close(c);

	auimpl_client_destroy(c);

	/* notify peers that a change has occurred */
	atomic_inc_uint(&d->d_serial);

	/* now we can drop the release we had on the device */
	auimpl_dev_release(d);

	return (0);
}
示例#13
0
static void
smbd_sig_handler(int sigval)
{
	if (smbd.s_sigval == 0)
		(void) atomic_swap_uint(&smbd.s_sigval, sigval);

	if (sigval == SIGHUP) {
		atomic_inc_uint(&smbd.s_refreshes);
		(void) pthread_cond_signal(&refresh_cond);
	}

	if (sigval == SIGINT || sigval == SIGTERM) {
		smbd.s_shutting_down = B_TRUE;
		(void) pthread_cond_signal(&refresh_cond);
	}
}
示例#14
0
static int
audio_strclose(queue_t *rq, int flag, cred_t *credp)
{
	audio_client_t	*c;
	audio_dev_t	*d;
	int		rv;

	_NOTE(ARGUNUSED(flag));
	_NOTE(ARGUNUSED(credp));

	if ((c = rq->q_ptr) == NULL) {
		return (ENXIO);
	}
	if (ddi_can_receive_sig() || (ddi_get_pid() == 0)) {
		rv = auclnt_drain(c);
	}

	/* make sure we won't get any upcalls */
	auimpl_client_deactivate(c);

	/*
	 * Pick up any data sitting around in input buffers.  This
	 * avoids leaving record data stuck in queues.
	 */
	if (c->c_istream.s_engine != NULL)
		audio_engine_produce(c->c_istream.s_engine);

	/* get a local hold on the device */
	d = c->c_dev;
	auimpl_dev_hold(c->c_dev);

	/* Turn off queue processing... */
	qprocsoff(rq);

	/* Call personality specific close handler */
	c->c_close(c);

	auimpl_client_destroy(c);

	/* notify peers that a change has occurred */
	atomic_inc_uint(&d->d_serial);

	/* now we can drop the release we had on the device */
	auimpl_dev_release(d);

	return (rv);
}
示例#15
0
static int
audio_open(dev_t *devp, int oflag, int otyp, cred_t *credp)
{
	int			rv;
	audio_client_t		*c;

	if (otyp == OTYP_BLK) {
		return (ENXIO);
	}

	if ((c = auimpl_client_create(*devp)) == NULL) {
		audio_dev_warn(NULL, "client create failed");
		return (ENXIO);
	}

	c->c_omode = oflag;
	c->c_pid = ddi_get_pid();
	c->c_cred = credp;

	/*
	 * Call client/personality specific open handler.  Note that
	 * we "insist" that there is an open.  The personality layer
	 * will initialize/allocate any engines required.
	 *
	 * Hmm... do we need to pass in the cred?
	 */
	if ((rv = c->c_open(c, oflag)) != 0) {
		audio_dev_warn(c->c_dev, "open failed (rv %d)", rv);
		auimpl_client_destroy(c);
		return (rv);
	}

	/* we do device cloning! */
	*devp = makedevice(c->c_major, c->c_minor);

	/* now we can receive upcalls */
	auimpl_client_activate(c);

	atomic_inc_uint(&c->c_dev->d_serial);

	return (0);
}
示例#16
0
static void
vcons_copyrows_buffer(void *cookie, int srcrow, int dstrow, int nrows)
{
	struct rasops_info *ri = cookie;
	struct vcons_screen *scr = ri->ri_hw;
	int from, to, len;

#ifdef WSDISPLAY_SCROLLSUPPORT
	int offset;
	offset = scr->scr_offset_to_zero;

	/* do we need to scroll the back buffer? */
	if (dstrow == 0) {
		from = ri->ri_cols * srcrow;
		to = ri->ri_cols * dstrow;

		memmove(&scr->scr_attrs[to], &scr->scr_attrs[from],
		    scr->scr_offset_to_zero * sizeof(long));
		memmove(&scr->scr_chars[to], &scr->scr_chars[from],
		    scr->scr_offset_to_zero * sizeof(uint32_t));
	}
	from = ri->ri_cols * srcrow + offset;
	to = ri->ri_cols * dstrow + offset;
	len = ri->ri_cols * nrows;		
		
#else
	from = ri->ri_cols * srcrow;
	to = ri->ri_cols * dstrow;
	len = ri->ri_cols * nrows;
#endif
	memmove(&scr->scr_attrs[to], &scr->scr_attrs[from],
	    len * sizeof(long));
	memmove(&scr->scr_chars[to], &scr->scr_chars[from],
	    len * sizeof(uint32_t));

#ifdef VCONS_DRAW_INTR
	atomic_inc_uint(&scr->scr_dirty);
#endif
}
示例#17
0
static void
acpinex_event_system_handler(ACPI_HANDLE hdl, UINT32 type, void *arg)
{
	acpinex_softstate_t *sp;

	ASSERT(hdl != NULL);
	ASSERT(arg != NULL);
	sp = (acpinex_softstate_t *)arg;

	acpidev_dr_lock_all();
	mutex_enter(&sp->ans_lock);

	switch (type) {
	case ACPI_NOTIFY_BUS_CHECK:
		/*
		 * Bus Check. This notification is performed on a device object
		 * to indicate to OSPM that it needs to perform the Plug and
		 * Play re-enumeration operation on the device tree starting
		 * from the point where it has been notified. OSPM will only
		 * perform this operation at boot, and when notified. It is
		 * the responsibility of the ACPI AML code to notify OSPM at
		 * any other times that this operation is required. The more
		 * accurately and closer to the actual device tree change the
		 * notification can be done, the more efficient the operating
		 * system response will be; however, it can also be an issue
		 * when a device change cannot be confirmed. For example, if
		 * the hardware cannot notice a device change for a particular
		 * location during a system sleeping state, it issues a Bus
		 * Check notification on wake to inform OSPM that it needs to
		 * check the configuration for a device change.
		 */
		/*FALLTHROUGH*/
	case ACPI_NOTIFY_DEVICE_CHECK:
		/*
		 * Device Check. Used to notify OSPM that the device either
		 * appeared or disappeared. If the device has appeared, OSPM
		 * will re-enumerate from the parent. If the device has
		 * disappeared, OSPM will invalidate the state of the device.
		 * OSPM may optimize out re-enumeration. If _DCK is present,
		 * then Notify(object,1) is assumed to indicate an undock
		 * request.
		 */
		/*FALLTHROUGH*/
	case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
		/*
		 * Device Check Light. Used to notify OSPM that the device
		 * either appeared or disappeared. If the device has appeared,
		 * OSPM will re-enumerate from the device itself, not the
		 * parent. If the device has disappeared, OSPM will invalidate
		 * the state of the device.
		 */
		atomic_inc_uint(&acpinex_dr_event_cnt);
		acpinex_event_handle_check_request(type, hdl, sp, B_TRUE);
		break;

	case ACPI_NOTIFY_EJECT_REQUEST:
		/*
		 * Eject Request. Used to notify OSPM that the device should
		 * be ejected, and that OSPM needs to perform the Plug and Play
		 * ejection operation. OSPM will run the _EJx method.
		 */
		atomic_inc_uint(&acpinex_dr_event_cnt);
		acpinex_event_handle_eject_request(hdl, sp, B_TRUE);
		break;

	default:
		ACPINEX_DEBUG(CE_NOTE,
		    "!acpinex: unhandled event(%d) on hdl %p under %s.",
		    type, hdl, sp->ans_path);
		(void) acpidev_eval_ost(hdl, type, ACPI_OST_STA_NOT_SUPPORT,
		    NULL, 0);
		break;
	}

	if (acpinex_dr_event_cnt != 0) {
		/*
		 * Disable fast reboot if a CPU/MEM/IOH hotplug event happens.
		 * Note: this is a temporary solution and will be revised when
		 * fast reboot can support CPU/MEM/IOH DR operations in the
		 * future.
		 *
		 * ACPI BIOS generates some static ACPI tables, such as MADT,
		 * SRAT and SLIT, to describe the system hardware configuration
		 * on power-on. When a CPU/MEM/IOH hotplug event happens, those
		 * static tables won't be updated and will become stale.
		 *
		 * If we reset the system by fast reboot, BIOS will have no
		 * chance to regenerate those staled static tables. Fast reboot
		 * can't tolerate such inconsistency between staled ACPI tables
		 * and real hardware configuration yet.
		 *
		 * A temporary solution is introduced to disable fast reboot if
		 * CPU/MEM/IOH hotplug event happens. This solution should be
		 * revised when fast reboot is enhanced to support CPU/MEM/IOH
		 * DR operations.
		 */
		fastreboot_disable(FBNS_HOTPLUG);
	}

	mutex_exit(&sp->ans_lock);
	acpidev_dr_unlock_all();
}
示例#18
0
文件: cpuset.c 项目: 0xffea/MINIX3
void
kcpuset_use(kcpuset_t *c)
{

	atomic_inc_uint(&c->nused);
}
/*
 * This is pretty much a CD target for now
 */
static void
scsitest_request(struct scsipi_channel *chan,
	scsipi_adapter_req_t req, void *arg)
{
	struct scsipi_xfer *xs = arg;
	struct scsipi_generic *cmd = xs->cmd;
#ifdef USE_TOSI_ISO
	int error;
#endif

	if (req != ADAPTER_REQ_RUN_XFER)
		return;

	//show_scsipi_xs(xs);

	switch (cmd->opcode) {
	case SCSI_TEST_UNIT_READY:
		if (isofd == -1)
			sense_notready(xs);

		break;
	case INQUIRY: {
		struct scsipi_inquiry_data *inqbuf = (void *)xs->data;

		memset(inqbuf, 0, sizeof(*inqbuf));
		inqbuf->device = T_CDROM;
		inqbuf->dev_qual2 = SID_REMOVABLE;
		strcpy(inqbuf->vendor, "RUMPHOBO");
		strcpy(inqbuf->product, "It's a LIE");
		strcpy(inqbuf->revision, "0.00");
		break;
	}
	case READ_CD_CAPACITY: {
		struct scsipi_read_cd_cap_data *ret = (void *)xs->data;

		_lto4b(CDBLOCKSIZE, ret->length);
		_lto4b(mycdsize, ret->addr);

		break;
	}
	case READ_DISCINFO: {
		struct scsipi_read_discinfo_data *ret = (void *)xs->data;

		memset(ret, 0, sizeof(*ret));
		break;
	}
	case READ_TRACKINFO: {
		struct scsipi_read_trackinfo_data *ret = (void *)xs->data;

		_lto4b(mycdsize, ret->track_size);
		break;
	}
	case READ_TOC: {
		struct scsipi_toc_header *ret = (void *)xs->data;

		memset(ret, 0, sizeof(*ret));
		break;
	}
	case START_STOP: {
		struct scsipi_start_stop *param = (void *)cmd;

		if (param->how & SSS_LOEJ) {
#ifdef USE_TOSI_ISO
			rumpuser_close(isofd, &error);
#endif
			isofd = -1;
		}
		break;
	}
	case SCSI_SYNCHRONIZE_CACHE_10: {
		if (isofd == -1) {
			if ((xs->xs_control & XS_CTL_SILENT) == 0)
				atomic_inc_uint(&rump_scsitest_err
				    [RUMP_SCSITEST_NOISYSYNC]);
			
			sense_notready(xs);
		}

		break;
	}
	case GET_CONFIGURATION: {
		memset(xs->data, 0, sizeof(struct scsipi_get_conf_data));
		break;
	}
	case SCSI_READ_6_COMMAND: {
#ifdef USE_TOSI_ISO
		struct scsi_rw_6 *param = (void *)cmd;

		printf("reading %d bytes from %d\n",
		    param->length * CDBLOCKSIZE,
		    _3btol(param->addr) * CDBLOCKSIZE);
		rumpuser_pread(isofd, xs->data,
		     param->length * CDBLOCKSIZE,
		     _3btol(param->addr) * CDBLOCKSIZE,
		     &error);
#endif

		break;
	}
	case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
		/* hardcoded for now */
		break;
	default:
		printf("unhandled opcode 0x%x\n", cmd->opcode);
		break;
	}

	scsipi_done(xs);
}
示例#20
0
/*
 * Statistics clock.  Grab profile sample, and if divider reaches 0,
 * do process and kernel statistics.
 */
void
statclock(struct clockframe *frame)
{
#ifdef GPROF
	struct gmonparam *g;
	intptr_t i;
#endif
	struct cpu_info *ci = curcpu();
	struct schedstate_percpu *spc = &ci->ci_schedstate;
	struct proc *p;
	struct lwp *l;

	/*
	 * Notice changes in divisor frequency, and adjust clock
	 * frequency accordingly.
	 */
	if (spc->spc_psdiv != psdiv) {
		spc->spc_psdiv = psdiv;
		spc->spc_pscnt = psdiv;
		if (psdiv == 1) {
			setstatclockrate(stathz);
		} else {
			setstatclockrate(profhz);
		}
	}
	l = ci->ci_data.cpu_onproc;
	if ((l->l_flag & LW_IDLE) != 0) {
		/*
		 * don't account idle lwps as swapper.
		 */
		p = NULL;
	} else {
		p = l->l_proc;
		mutex_spin_enter(&p->p_stmutex);
	}

	if (CLKF_USERMODE(frame)) {
		if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
			addupc_intr(l, CLKF_PC(frame));
		if (--spc->spc_pscnt > 0) {
			mutex_spin_exit(&p->p_stmutex);
			return;
		}

		/*
		 * Came from user mode; CPU was in user state.
		 * If this process is being profiled record the tick.
		 */
		p->p_uticks++;
		if (p->p_nice > NZERO)
			spc->spc_cp_time[CP_NICE]++;
		else
			spc->spc_cp_time[CP_USER]++;
	} else {
#ifdef GPROF
		/*
		 * Kernel statistics are just like addupc_intr, only easier.
		 */
		g = &_gmonparam;
		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
			i = CLKF_PC(frame) - g->lowpc;
			if (i < g->textsize) {
				i /= HISTFRACTION * sizeof(*g->kcount);
				g->kcount[i]++;
			}
		}
#endif
#ifdef LWP_PC
		if (p != NULL && profsrc == PROFSRC_CLOCK &&
		    (p->p_stflag & PST_PROFIL)) {
			addupc_intr(l, LWP_PC(l));
		}
#endif
		if (--spc->spc_pscnt > 0) {
			if (p != NULL)
				mutex_spin_exit(&p->p_stmutex);
			return;
		}
		/*
		 * Came from kernel mode, so we were:
		 * - handling an interrupt,
		 * - doing syscall or trap work on behalf of the current
		 *   user process, or
		 * - spinning in the idle loop.
		 * Whichever it is, charge the time as appropriate.
		 * Note that we charge interrupts to the current process,
		 * regardless of whether they are ``for'' that process,
		 * so that we know how much of its real time was spent
		 * in ``non-process'' (i.e., interrupt) work.
		 */
		if (CLKF_INTR(frame) || (curlwp->l_pflag & LP_INTR) != 0) {
			if (p != NULL) {
				p->p_iticks++;
			}
			spc->spc_cp_time[CP_INTR]++;
		} else if (p != NULL) {
			p->p_sticks++;
			spc->spc_cp_time[CP_SYS]++;
		} else {
			spc->spc_cp_time[CP_IDLE]++;
		}
	}
	spc->spc_pscnt = psdiv;

	if (p != NULL) {
		atomic_inc_uint(&l->l_cpticks);
		mutex_spin_exit(&p->p_stmutex);
	}
}