Beispiel #1
0
/*
 * Common code for mount and mountroot
 */
static int
mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam,
    char *hst, struct vnode **vpp, struct ucred *cred, int nametimeo,
    int negnametimeo)
{
	struct nfsmount *nmp;
	struct nfsnode *np;
	int error;
	struct vattr attrs;

	if (mp->mnt_flag & MNT_UPDATE) {
		nmp = VFSTONFS(mp);
		printf("%s: MNT_UPDATE is no longer handled here\n", __func__);
		free(nam, M_SONAME);
		return (0);
	} else {
		nmp = uma_zalloc(nfsmount_zone, M_WAITOK);
		bzero((caddr_t)nmp, sizeof (struct nfsmount));
		TAILQ_INIT(&nmp->nm_bufq);
		mp->mnt_data = nmp;
		nmp->nm_getinfo = nfs_getnlminfo;
		nmp->nm_vinvalbuf = nfs_vinvalbuf;
	}
	vfs_getnewfsid(mp);
	nmp->nm_mountp = mp;
	mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF);			

	/*
	 * V2 can only handle 32 bit filesizes.  A 4GB-1 limit may be too
	 * high, depending on whether we end up with negative offsets in
	 * the client or server somewhere.  2GB-1 may be safer.
	 *
	 * For V3, nfs_fsinfo will adjust this as necessary.  Assume maximum
	 * that we can handle until we find out otherwise.
	 */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_maxfilesize = 0xffffffffLL;
	else
		nmp->nm_maxfilesize = OFF_MAX;

	nmp->nm_timeo = NFS_TIMEO;
	nmp->nm_retry = NFS_RETRANS;
	if ((argp->flags & NFSMNT_NFSV3) && argp->sotype == SOCK_STREAM) {
		nmp->nm_wsize = nmp->nm_rsize = NFS_MAXDATA;
	} else {
		nmp->nm_wsize = NFS_WSIZE;
		nmp->nm_rsize = NFS_RSIZE;
	}
	nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000);
	nmp->nm_readdirsize = NFS_READDIRSIZE;
	nmp->nm_numgrps = NFS_MAXGRPS;
	nmp->nm_readahead = NFS_DEFRAHEAD;
	nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	nmp->nm_nametimeo = nametimeo;
	nmp->nm_negnametimeo = negnametimeo;
	nmp->nm_tprintf_delay = nfs_tprintf_delay;
	if (nmp->nm_tprintf_delay < 0)
		nmp->nm_tprintf_delay = 0;
	nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
	if (nmp->nm_tprintf_initial_delay < 0)
		nmp->nm_tprintf_initial_delay = 0;
	nmp->nm_fhsize = argp->fhsize;
	bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize);
	bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
	nmp->nm_nam = nam;
	/* Set up the sockets and per-host congestion */
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;
	nmp->nm_rpcops = &nfs_rpcops;

	nfs_decode_args(mp, nmp, argp, hst);

	/*
	 * For Connection based sockets (TCP,...) defer the connect until
	 * the first request, in case the server is not responding.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM &&
		(error = nfs_connect(nmp)))
		goto bad;

	/*
	 * This is silly, but it has to be set so that vinifod() works.
	 * We do not want to do an nfs_statfs() here since we can get
	 * stuck on a dead server and we are holding a lock on the mount
	 * point.
	 */
	mtx_lock(&nmp->nm_mtx);
	mp->mnt_stat.f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	/*
	 * A reference count is needed on the nfsnode representing the
	 * remote root.  If this object is not persistent, then backward
	 * traversals of the mount point (i.e. "..") will not work if
	 * the nfsnode gets flushed out of the cache. Ufs does not have
	 * this problem, because one can identify root inodes by their
	 * number == ROOTINO (2).
	 */
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error)
		goto bad;
	*vpp = NFSTOV(np);

	/*
	 * Get file attributes and transfer parameters for the
	 * mountpoint.  This has the side effect of filling in
	 * (*vpp)->v_type with the correct value.
	 */
	if (argp->flags & NFSMNT_NFSV3)
		nfs_fsinfo(nmp, *vpp, curthread->td_ucred, curthread);
	else
		VOP_GETATTR(*vpp, &attrs, curthread->td_ucred);

	/*
	 * Lose the lock but keep the ref.
	 */
	VOP_UNLOCK(*vpp, 0);

	return (0);
bad:
	nfs_disconnect(nmp);
	mtx_destroy(&nmp->nm_mtx);
	uma_zfree(nfsmount_zone, nmp);
	free(nam, M_SONAME);
	return (error);
}
Beispiel #2
0
/*
 * Process an asynchronous software trap.
 * This is relatively easy.
 * This function will return with preemption disabled.
 */
void
ast(struct trapframe *framep)
{
	struct thread *td;
	struct proc *p;
	int flags;
	int sig;

	td = curthread;
	p = td->td_proc;

	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
            p->p_comm);
	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
	mtx_assert(&Giant, MA_NOTOWNED);
	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
	td->td_frame = framep;
	td->td_pticks = 0;

	/*
	 * This updates the td_flag's for the checks below in one
	 * "atomic" operation with turning off the astpending flag.
	 * If another AST is triggered while we are handling the
	 * AST's saved in flags, the astpending flag will be set and
	 * ast() will be called again.
	 */
	thread_lock(td);
	flags = td->td_flags;
	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
	    TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND);
	thread_unlock(td);
	PCPU_INC(cnt.v_trap);

	if (td->td_ucred != p->p_ucred) 
		cred_update_thread(td);
	if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
		addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
		td->td_profil_ticks = 0;
		td->td_pflags &= ~TDP_OWEUPC;
	}
#ifdef HWPMC_HOOKS
	/* Handle Software PMC callchain capture. */
	if (PMC_IS_PENDING_CALLCHAIN(td))
		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
#endif
	if (flags & TDF_ALRMPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGVTALRM);
		PROC_UNLOCK(p);
	}
	if (flags & TDF_PROFPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGPROF);
		PROC_UNLOCK(p);
	}
#ifdef MAC
	if (flags & TDF_MACPEND)
		mac_thread_userret(td);
#endif
	if (flags & TDF_NEEDRESCHED) {
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(1, 1, __func__);
#endif
		thread_lock(td);
		sched_prio(td, td->td_user_pri);
		mi_switch(SW_INVOL | SWT_NEEDRESCHED, NULL);
		thread_unlock(td);
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(0, 1, __func__);
#endif
	}

	/*
	 * Check for signals. Unlocked reads of p_pendingcnt or
	 * p_siglist might cause process-directed signal to be handled
	 * later.
	 */
	if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
	    !SIGISEMPTY(p->p_siglist)) {
		PROC_LOCK(p);
		mtx_lock(&p->p_sigacts->ps_mtx);
		while ((sig = cursig(td, SIG_STOP_ALLOWED)) != 0)
			postsig(sig);
		mtx_unlock(&p->p_sigacts->ps_mtx);
		PROC_UNLOCK(p);
	}
	/*
	 * We need to check to see if we have to exit or wait due to a
	 * single threading requirement or some other STOP condition.
	 */
	if (flags & TDF_NEEDSUSPCHK) {
		PROC_LOCK(p);
		thread_suspend_check(0);
		PROC_UNLOCK(p);
	}

	if (td->td_pflags & TDP_OLDMASK) {
		td->td_pflags &= ~TDP_OLDMASK;
		kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
	}

	userret(td, framep);
	mtx_assert(&Giant, MA_NOTOWNED);
}
Beispiel #3
0
static int
hpt_set_array_state(DEVICEID idArray, DWORD state)
{
	IAL_ADAPTER_T *pAdapter;
	PVDevice pVDevice = ID_TO_VDEV(idArray);
	int	i;

	if(idArray == 0 || check_VDevice_valid(pVDevice))	return -1;
	if(!mIsArray(pVDevice))
		return -1;
	if(!pVDevice->vf_online || pVDevice->u.array.rf_broken) return -1;

	pAdapter=(IAL_ADAPTER_T *)pVDevice->pVBus->OsExt;
	
	switch(state)
	{
		case MIRROR_REBUILD_START:
		{
			mtx_lock(&pAdapter->lock);
			if (pVDevice->u.array.rf_rebuilding ||
				pVDevice->u.array.rf_verifying ||
				pVDevice->u.array.rf_initializing) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}
			
			pVDevice->u.array.rf_auto_rebuild = 0;
			pVDevice->u.array.rf_abort_rebuild = 0;

			hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, 
				(UCHAR)((pVDevice->u.array.CriticalMembers || pVDevice->VDeviceType == VD_RAID_1)? DUPLICATE : REBUILD_PARITY));

			while (!pVDevice->u.array.rf_rebuilding)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptwait", hz * 20) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}

		break;

		case MIRROR_REBUILD_ABORT:
		{
			for(i = 0; i < pVDevice->u.array.bArnMember; i++) {
				if(pVDevice->u.array.pMember[i] != 0 && pVDevice->u.array.pMember[i]->VDeviceType == VD_RAID_1)
					hpt_set_array_state(VDEV_TO_ID(pVDevice->u.array.pMember[i]), state);
			}

			mtx_lock(&pAdapter->lock);
			if(pVDevice->u.array.rf_rebuilding != 1) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}
			
			pVDevice->u.array.rf_abort_rebuild = 1;
			
			while (pVDevice->u.array.rf_abort_rebuild)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptabrt", hz * 20) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}
		break;

		case AS_VERIFY_START:
		{
			/*if(pVDevice->u.array.rf_verifying)
				return -1;*/
			mtx_lock(&pAdapter->lock);
			if (pVDevice->u.array.rf_rebuilding ||
				pVDevice->u.array.rf_verifying ||
				pVDevice->u.array.rf_initializing) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}

            pVDevice->u.array.RebuildSectors = 0;
			hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, VERIFY);
			
			while (!pVDevice->u.array.rf_verifying)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptvrfy", hz * 20) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}
		break;

		case AS_VERIFY_ABORT:
		{
			mtx_lock(&pAdapter->lock);
			if(pVDevice->u.array.rf_verifying != 1) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}
				
			pVDevice->u.array.rf_abort_rebuild = 1;
			
			while (pVDevice->u.array.rf_abort_rebuild)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptvrfy", hz * 80) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}
		break;

		case AS_INITIALIZE_START:
		{
			mtx_lock(&pAdapter->lock);
			if (pVDevice->u.array.rf_rebuilding ||
				pVDevice->u.array.rf_verifying ||
				pVDevice->u.array.rf_initializing) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}

			hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pVDevice, VERIFY);
			
			while (!pVDevice->u.array.rf_initializing)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptinit", hz * 80) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}
		break;

		case AS_INITIALIZE_ABORT:
		{
			mtx_lock(&pAdapter->lock);
			if(pVDevice->u.array.rf_initializing != 1) {
				mtx_unlock(&pAdapter->lock);
				return -1;
			}
				
			pVDevice->u.array.rf_abort_rebuild = 1;
			
			while (pVDevice->u.array.rf_abort_rebuild)
			{
				if (mtx_sleep(pVDevice, &pAdapter->lock, 0,
				    "hptinit", hz * 80) != 0)
					break;
			}
			mtx_unlock(&pAdapter->lock);
		}
		break;

		default:
			return -1;
	}

	return 0;
}
Beispiel #4
0
/*
 * The caller must make sure the protocol and its functions correctly shut down
 * all sockets and release all locks and memory references.
 */
int
pf_proto_unregister(int family, int protocol, int type)
{
	struct domain *dp;
	struct protosw *pr, *dpr;

	/* Sanity checks. */
	if (family == 0)
		return (EPFNOSUPPORT);
	if (protocol == 0)
		return (EPROTONOSUPPORT);
	if (type == 0)
		return (EPROTOTYPE);

	/* Try to find the specified domain based on the family type. */
	dp = pffinddomain(family);
	if (dp == NULL)
		return (EPFNOSUPPORT);

	dpr = NULL;

	/* Lock out everyone else while we are manipulating the protosw. */
	mtx_lock(&dom_mtx);

	/* The protocol must exist and only once. */
	for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
		if ((pr->pr_type == type) && (pr->pr_protocol == protocol)) {
			if (dpr != NULL) {
				mtx_unlock(&dom_mtx);
				return (EMLINK);   /* Should not happen! */
			} else
				dpr = pr;
		}
	}

	/* Protocol does not exist. */
	if (dpr == NULL) {
		mtx_unlock(&dom_mtx);
		return (EPROTONOSUPPORT);
	}

	/* De-orbit the protocol and make the slot available again. */
	dpr->pr_type = 0;
	dpr->pr_domain = dp;
	dpr->pr_protocol = PROTO_SPACER;
	dpr->pr_flags = 0;
	dpr->pr_input = NULL;
	dpr->pr_output = NULL;
	dpr->pr_ctlinput = NULL;
	dpr->pr_ctloutput = NULL;
	dpr->pr_init = NULL;
	dpr->pr_fasttimo = NULL;
	dpr->pr_slowtimo = NULL;
	dpr->pr_drain = NULL;
	dpr->pr_usrreqs = &nousrreqs;

	/* Job is done, not more protection required. */
	mtx_unlock(&dom_mtx);

	return (0);
}
int
cfcs_init(void)
{
	struct cfcs_softc *softc;
	struct ccb_setasync csa;
	struct ctl_port *port;
#ifdef NEEDTOPORT
	char wwnn[8];
#endif
	int retval;

	softc = &cfcs_softc;
	retval = 0;
	bzero(softc, sizeof(*softc));
	mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF);
	port = &softc->port;

	port->frontend = &cfcs_frontend;
	port->port_type = CTL_PORT_INTERNAL;
	/* XXX KDM what should the real number be here? */
	port->num_requested_ctl_io = 4096;
	snprintf(softc->port_name, sizeof(softc->port_name), "camsim");
	port->port_name = softc->port_name;
	port->port_online = cfcs_online;
	port->port_offline = cfcs_offline;
	port->onoff_arg = softc;
	port->lun_enable = cfcs_lun_enable;
	port->lun_disable = cfcs_lun_disable;
	port->targ_lun_arg = softc;
	port->fe_datamove = cfcs_datamove;
	port->fe_done = cfcs_done;

	/* XXX KDM what should we report here? */
	/* XXX These should probably be fetched from CTL. */
	port->max_targets = 1;
	port->max_target_id = 15;

	retval = ctl_port_register(port);
	if (retval != 0) {
		printf("%s: ctl_port_register() failed with error %d!\n",
		       __func__, retval);
		mtx_destroy(&softc->lock);
		return (retval);
	}

	/*
	 * Get the WWNN out of the database, and create a WWPN as well.
	 */
#ifdef NEEDTOPORT
	ddb_GetWWNN((char *)wwnn);
	softc->wwnn = be64dec(wwnn);
	softc->wwpn = softc->wwnn + (softc->port.targ_port & 0xff);
#endif

	/*
	 * If the CTL frontend didn't tell us what our WWNN/WWPN is, go
	 * ahead and set something random.
	 */
	if (port->wwnn == 0) {
		uint64_t random_bits;

		arc4rand(&random_bits, sizeof(random_bits), 0);
		softc->wwnn = (random_bits & 0x0000000fffffff00ULL) |
			/* Company ID */ 0x5000000000000000ULL |
			/* NL-Port */    0x0300;
		softc->wwpn = softc->wwnn + port->targ_port + 1;
		ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn);
	} else {
		softc->wwnn = port->wwnn;
		softc->wwpn = port->wwpn;
	}

	mtx_lock(&softc->lock);
	softc->devq = cam_simq_alloc(port->num_requested_ctl_io);
	if (softc->devq == NULL) {
		printf("%s: error allocating devq\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name,
				   softc, /*unit*/ 0, &softc->lock, 1,
				   port->num_requested_ctl_io, softc->devq);
	if (softc->sim == NULL) {
		printf("%s: error allocating SIM\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) {
		printf("%s: error registering SIM\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	if (xpt_create_path(&softc->path, /*periph*/NULL,
			    cam_sim_path(softc->sim),
			    CAM_TARGET_WILDCARD,
			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		printf("%s: error creating path\n", __func__);
		xpt_bus_deregister(cam_sim_path(softc->sim));
		retval = EINVAL;
		goto bailout;
	}

	xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE);
	csa.ccb_h.func_code = XPT_SASYNC_CB;
	csa.event_enable = AC_LOST_DEVICE;
	csa.callback = cfcs_async;
        csa.callback_arg = softc->sim;
        xpt_action((union ccb *)&csa);

	mtx_unlock(&softc->lock);

	return (retval);

bailout:
	if (softc->sim)
		cam_sim_free(softc->sim, /*free_devq*/ TRUE);
	else if (softc->devq)
		cam_simq_free(softc->devq);
	mtx_unlock(&softc->lock);
	mtx_destroy(&softc->lock);

	return (retval);
}
Beispiel #6
0
int
ata_attach(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);
    int error, rid;
    struct cam_devq *devq;
    const char *res;
    char buf[64];
    int i, mode;

    /* check that we have a virgin channel to attach */
    if (ch->r_irq)
	return EEXIST;

    /* initialize the softc basics */
    ch->dev = dev;
    ch->state = ATA_IDLE;
    bzero(&ch->state_mtx, sizeof(struct mtx));
    mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
    TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
	for (i = 0; i < 16; i++) {
		ch->user[i].revision = 0;
		snprintf(buf, sizeof(buf), "dev%d.sata_rev", i);
		if (resource_int_value(device_get_name(dev),
		    device_get_unit(dev), buf, &mode) != 0 &&
		    resource_int_value(device_get_name(dev),
		    device_get_unit(dev), "sata_rev", &mode) != 0)
			mode = -1;
		if (mode >= 0)
			ch->user[i].revision = mode;
		ch->user[i].mode = 0;
		snprintf(buf, sizeof(buf), "dev%d.mode", i);
		if (resource_string_value(device_get_name(dev),
		    device_get_unit(dev), buf, &res) == 0)
			mode = ata_str2mode(res);
		else if (resource_string_value(device_get_name(dev),
		    device_get_unit(dev), "mode", &res) == 0)
			mode = ata_str2mode(res);
		else
			mode = -1;
		if (mode >= 0)
			ch->user[i].mode = mode;
		if (ch->flags & ATA_SATA)
			ch->user[i].bytecount = 8192;
		else
			ch->user[i].bytecount = MAXPHYS;
		ch->user[i].caps = 0;
		ch->curr[i] = ch->user[i];
		if (ch->flags & ATA_SATA) {
			if (ch->pm_level > 0)
				ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
			if (ch->pm_level > 1)
				ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
		} else {
			if (!(ch->flags & ATA_NO_48BIT_DMA))
				ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48;
		}
	}
	callout_init(&ch->poll_callout, 1);

    /* allocate DMA resources if DMA HW present*/
    if (ch->dma.alloc)
	ch->dma.alloc(dev);

    /* setup interrupt delivery */
    rid = ATA_IRQ_RID;
    ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
				       RF_SHAREABLE | RF_ACTIVE);
    if (!ch->r_irq) {
	device_printf(dev, "unable to allocate interrupt\n");
	return ENXIO;
    }
    if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
				ata_interrupt, ch, &ch->ih))) {
	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
	device_printf(dev, "unable to setup interrupt\n");
	return error;
    }

	if (ch->flags & ATA_PERIODIC_POLL)
		callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
	mtx_lock(&ch->state_mtx);
	/* Create the device queue for our SIM. */
	devq = cam_simq_alloc(1);
	if (devq == NULL) {
		device_printf(dev, "Unable to allocate simq\n");
		error = ENOMEM;
		goto err1;
	}
	/* Construct SIM entry */
	ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
	    device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
	if (ch->sim == NULL) {
		device_printf(dev, "unable to allocate sim\n");
		cam_simq_free(devq);
		error = ENOMEM;
		goto err1;
	}
	if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
		device_printf(dev, "unable to register xpt bus\n");
		error = ENXIO;
		goto err2;
	}
	if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		device_printf(dev, "unable to create path\n");
		error = ENXIO;
		goto err3;
	}
	mtx_unlock(&ch->state_mtx);
	return (0);

err3:
	xpt_bus_deregister(cam_sim_path(ch->sim));
err2:
	cam_sim_free(ch->sim, /*free_devq*/TRUE);
	ch->sim = NULL;
err1:
	bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
	mtx_unlock(&ch->state_mtx);
	if (ch->flags & ATA_PERIODIC_POLL)
		callout_drain(&ch->poll_callout);
	return (error);
}
Beispiel #7
0
static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
	vm_object_t object;
	vm_page_t m, ma[1];
	vm_pindex_t idx, nobjsize;
	vm_ooffset_t delta;
	int base, rv;

	object = shmfd->shm_object;
	VM_OBJECT_LOCK(object);
	if (length == shmfd->shm_size) {
		VM_OBJECT_UNLOCK(object);
		return (0);
	}
	nobjsize = OFF_TO_IDX(length + PAGE_MASK);

	/* Are we shrinking?  If so, trim the end. */
	if (length < shmfd->shm_size) {
		/*
		 * Disallow any requests to shrink the size if this
		 * object is mapped into the kernel.
		 */
		if (shmfd->shm_kmappings > 0) {
			VM_OBJECT_UNLOCK(object);
			return (EBUSY);
		}

		/*
		 * Zero the truncated part of the last page.
		 */
		base = length & PAGE_MASK;
		if (base != 0) {
			idx = OFF_TO_IDX(length);
retry:
			m = vm_page_lookup(object, idx);
			if (m != NULL) {
				if ((m->oflags & VPO_BUSY) != 0 ||
				    m->busy != 0) {
					vm_page_sleep(m, "shmtrc");
					goto retry;
				}
			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
				if (m == NULL) {
					VM_OBJECT_UNLOCK(object);
					VM_WAIT;
					VM_OBJECT_LOCK(object);
					goto retry;
				} else if (m->valid != VM_PAGE_BITS_ALL) {
					ma[0] = m;
					rv = vm_pager_get_pages(object, ma, 1,
					    0);
					m = vm_page_lookup(object, idx);
				} else
					/* A cached page was reactivated. */
					rv = VM_PAGER_OK;
				vm_page_lock(m);
				if (rv == VM_PAGER_OK) {
					vm_page_deactivate(m);
					vm_page_unlock(m);
					vm_page_wakeup(m);
				} else {
					vm_page_free(m);
					vm_page_unlock(m);
					VM_OBJECT_UNLOCK(object);
					return (EIO);
				}
			}
			if (m != NULL) {
				pmap_zero_page_area(m, base, PAGE_SIZE - base);
				KASSERT(m->valid == VM_PAGE_BITS_ALL,
				    ("shm_dotruncate: page %p is invalid", m));
				vm_page_dirty(m);
				vm_pager_page_unswapped(m);
			}
		}
		delta = ptoa(object->size - nobjsize);

		/* Toss in memory pages. */
		if (nobjsize < object->size)
			vm_object_page_remove(object, nobjsize, object->size,
			    0);

		/* Toss pages from swap. */
		if (object->type == OBJT_SWAP)
			swap_pager_freespace(object, nobjsize, delta);

		/* Free the swap accounted for shm */
		swap_release_by_cred(delta, object->cred);
		object->charge -= delta;
	} else {
		/* Attempt to reserve the swap */
		delta = ptoa(nobjsize - object->size);
		if (!swap_reserve_by_cred(delta, object->cred)) {
			VM_OBJECT_UNLOCK(object);
			return (ENOMEM);
		}
		object->charge += delta;
	}
	shmfd->shm_size = length;
	mtx_lock(&shm_timestamp_lock);
	vfs_timestamp(&shmfd->shm_ctime);
	shmfd->shm_mtime = shmfd->shm_ctime;
	mtx_unlock(&shm_timestamp_lock);
	object->size = nobjsize;
	VM_OBJECT_UNLOCK(object);
	return (0);
}
Beispiel #8
0
/*
 * Asynchronous I/O daemons for client nfs.
 * They do read-ahead and write-behind operations on the block I/O cache.
 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
 */
static void
nfssvc_iod(void *instance)
{
    struct buf *bp;
    struct nfsmount *nmp;
    int myiod, timo;
    int error = 0;

    mtx_lock(&nfs_iod_mtx);
    myiod = (int *)instance - nfs_asyncdaemon;
    /*
     * Main loop
     */
    for (;;) {
        while (((nmp = nfs_iodmount[myiod]) == NULL)
                || !TAILQ_FIRST(&nmp->nm_bufq)) {
            if (myiod >= nfs_iodmax)
                goto finish;
            if (nmp)
                nmp->nm_bufqiods--;
            if (nfs_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
                nfs_iodwant[myiod] = NFSIOD_AVAILABLE;
            nfs_iodmount[myiod] = NULL;
            /*
             * Always keep at least nfs_iodmin kthreads.
             */
            timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
            error = msleep(&nfs_iodwant[myiod], &nfs_iod_mtx, PWAIT | PCATCH,
                           "-", timo);
            if (error) {
                nmp = nfs_iodmount[myiod];
                /*
                 * Rechecking the nm_bufq closes a rare race where the
                 * nfsiod is woken up at the exact time the idle timeout
                 * fires
                 */
                if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
                    error = 0;
                break;
            }
        }
        if (error)
            break;
        while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
            int giant_locked = 0;

            /* Take one off the front of the list */
            TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
            nmp->nm_bufqlen--;
            if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
                nmp->nm_bufqwant = 0;
                wakeup(&nmp->nm_bufq);
            }
            mtx_unlock(&nfs_iod_mtx);
            if (NFS_ISV4(bp->b_vp)) {
                giant_locked = 1;
                mtx_lock(&Giant);
            }
            if (bp->b_flags & B_DIRECT) {
                KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
                (void)nfs_doio_directwrite(bp);
            } else {
                if (bp->b_iocmd == BIO_READ)
                    (void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
                else
                    (void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
            }
            if (giant_locked)
                mtx_unlock(&Giant);
            mtx_lock(&nfs_iod_mtx);
            /*
             * Make sure the nmp hasn't been dismounted as soon as
             * nfs_doio() completes for the last buffer.
             */
            nmp = nfs_iodmount[myiod];
            if (nmp == NULL)
                break;

            /*
             * If there are more than one iod on this mount, then defect
             * so that the iods can be shared out fairly between the mounts
             */
            if (nfs_defect && nmp->nm_bufqiods > 1) {
                NFS_DPF(ASYNCIO,
                        ("nfssvc_iod: iod %d defecting from mount %p\n",
                         myiod, nmp));
                nfs_iodmount[myiod] = NULL;
                nmp->nm_bufqiods--;
                break;
            }
        }
    }
finish:
    nfs_asyncdaemon[myiod] = 0;
    if (nmp)
        nmp->nm_bufqiods--;
    nfs_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
    nfs_iodmount[myiod] = NULL;
    /* Someone may be waiting for the last nfsiod to terminate. */
    if (--nfs_numasync == 0)
        wakeup(&nfs_numasync);
    mtx_unlock(&nfs_iod_mtx);
    if ((error == 0) || (error == EWOULDBLOCK))
        kproc_exit(0);
    /* Abnormal termination */
    kproc_exit(1);
}
Beispiel #9
0
/*
 * Evil wildcarding resource string lookup.
 * This walks the supplied env string table and returns a match.
 * The start point can be remembered for incremental searches.
 */
static int
res_find(int *line, int *startln,
         const char *name, int *unit, const char *resname, const char *value,
         const char **ret_name, int *ret_namelen, int *ret_unit,
         const char **ret_resname, int *ret_resnamelen, const char **ret_value)
{
    int n = 0, hit, i = 0;
    char r_name[32];
    int r_unit;
    char r_resname[32];
    char r_value[128];
    const char *s, *cp;
    char *p;

    if (checkmethod) {
        hintp = NULL;

        switch (hintmode) {
        case 0:		/* loader hints in environment only */
            break;
        case 1:		/* static hints only */
            hintp = static_hints;
            checkmethod = 0;
            break;
        case 2:		/* fallback mode */
            if (dynamic_kenv) {
                mtx_lock(&kenv_lock);
                cp = kenvp[0];
                for (i = 0; cp != NULL; cp = kenvp[++i]) {
                    if (!strncmp(cp, "hint.", 5)) {
                        use_kenv = 1;
                        checkmethod = 0;
                        break;
                    }
                }
                mtx_unlock(&kenv_lock);
            } else {
                cp = kern_envp;
                while (cp) {
                    if (strncmp(cp, "hint.", 5) == 0) {
                        cp = NULL;
                        hintp = kern_envp;
                        break;
                    }
                    while (*cp != '\0')
                        cp++;
                    cp++;
                    if (*cp == '\0') {
                        cp = NULL;
                        hintp = static_hints;
                        break;
                    }
                }
            }
            break;
        default:
            break;
        }
        if (hintp == NULL) {
            if (dynamic_kenv) {
                use_kenv = 1;
                checkmethod = 0;
            } else
                hintp = kern_envp;
        }
    }

    if (use_kenv) {
        mtx_lock(&kenv_lock);
        i = 0;
        cp = kenvp[0];
        if (cp == NULL) {
            mtx_unlock(&kenv_lock);
            return (ENOENT);
        }
    } else
        cp = hintp;
    while (cp) {
        hit = 1;
        (*line)++;
        if (strncmp(cp, "hint.", 5) != 0)
            hit = 0;
        else
            n = sscanf(cp, "hint.%32[^.].%d.%32[^=]=%128s",
                       r_name, &r_unit, r_resname, r_value);
        if (hit && n != 4) {
            printf("CONFIG: invalid hint '%s'\n", cp);
            p = strchr(cp, 'h');
            *p = 'H';
            hit = 0;
        }
        if (hit && startln && *startln >= 0 && *line < *startln)
            hit = 0;
        if (hit && name && strcmp(name, r_name) != 0)
            hit = 0;
        if (hit && unit && *unit != r_unit)
            hit = 0;
        if (hit && resname && strcmp(resname, r_resname) != 0)
            hit = 0;
        if (hit && value && strcmp(value, r_value) != 0)
            hit = 0;
        if (hit)
            break;
        if (use_kenv) {
            cp = kenvp[++i];
            if (cp == NULL)
                break;
        } else {
            while (*cp != '\0')
                cp++;
            cp++;
            if (*cp == '\0') {
                cp = NULL;
                break;
            }
        }
    }
    if (use_kenv)
        mtx_unlock(&kenv_lock);
    if (cp == NULL)
        return ENOENT;

    s = cp;
    /* This is a bit of a hack, but at least is reentrant */
    /* Note that it returns some !unterminated! strings. */
    s = strchr(s, '.') + 1;		/* start of device */
    if (ret_name)
        *ret_name = s;
    s = strchr(s, '.') + 1;		/* start of unit */
    if (ret_namelen && ret_name)
        *ret_namelen = s - *ret_name - 1; /* device length */
    if (ret_unit)
        *ret_unit = r_unit;
    s = strchr(s, '.') + 1;		/* start of resname */
    if (ret_resname)
        *ret_resname = s;
    s = strchr(s, '=') + 1;		/* start of value */
    if (ret_resnamelen && ret_resname)
        *ret_resnamelen = s - *ret_resname - 1; /* value len */
    if (ret_value)
        *ret_value = s;
    if (startln)			/* line number for anchor */
        *startln = *line + 1;
    return 0;
}
Beispiel #10
0
static int
iir_ioctl(struct cdev *dev, u_long cmd, caddr_t cmdarg, int flags, struct thread * p)
{
    GDT_DPRINTF(GDT_D_DEBUG, ("iir_ioctl() cmd 0x%lx\n",cmd));

    ++gdt_stat.io_count_act;
    if (gdt_stat.io_count_act > gdt_stat.io_count_max)
        gdt_stat.io_count_max = gdt_stat.io_count_act;

    switch (cmd) {
      case GDT_IOCTL_GENERAL:
        {
            gdt_ucmd_t *ucmd;
            struct gdt_softc *gdt;

            ucmd = (gdt_ucmd_t *)cmdarg;
            gdt = gdt_minor2softc(dev, ucmd->io_node);
            if (gdt == NULL)
                return (ENXIO);
	    mtx_lock(&gdt->sc_lock);
            TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
            ucmd->complete_flag = FALSE;
            gdt_next(gdt);
            if (!ucmd->complete_flag)
                (void) mtx_sleep(ucmd, &gdt->sc_lock, PCATCH | PRIBIO, "iirucw",
		    0);
	    mtx_unlock(&gdt->sc_lock);
            break;
        }

      case GDT_IOCTL_DRVERS:
      case GDT_IOCTL_DRVERS_OLD:
        *(int *)cmdarg = 
            (IIR_DRIVER_VERSION << 8) | IIR_DRIVER_SUBVERSION;
        break;

      case GDT_IOCTL_CTRTYPE:
      case GDT_IOCTL_CTRTYPE_OLD:
        {
            gdt_ctrt_t *p;
            struct gdt_softc *gdt; 
            
            p = (gdt_ctrt_t *)cmdarg;
            gdt = gdt_minor2softc(dev, p->io_node);
            if (gdt == NULL)
                return (ENXIO);
            /* only RP controllers */
            p->ext_type = 0x6000 | gdt->sc_device;
            if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR) {
                p->oem_id = OEM_ID_INTEL;
                p->type = 0xfd;
                /* new -> subdevice into ext_type */
                if (gdt->sc_device >= 0x600)
                    p->ext_type = 0x6000 | gdt->sc_subdevice;
            } else {
                p->oem_id = OEM_ID_ICP;
                p->type = 0xfe;
                /* new -> subdevice into ext_type */
                if (gdt->sc_device >= 0x300)
                    p->ext_type = 0x6000 | gdt->sc_subdevice;
            }
            p->info = (gdt->sc_bus << 8) | (gdt->sc_slot << 3);
            p->device_id = gdt->sc_device;
            p->sub_device_id = gdt->sc_subdevice;
            break;
        }

      case GDT_IOCTL_OSVERS:
        {
            gdt_osv_t *p;

            p = (gdt_osv_t *)cmdarg;
            p->oscode = 10;
	    p->version = osreldate / 100000;
	    p->subversion = osreldate / 1000 % 100;
	    p->revision = 0;
            strcpy(p->name, ostype);
            break;
        }

      case GDT_IOCTL_CTRCNT:
        *(int *)cmdarg = gdt_cnt;
        break;

      case GDT_IOCTL_EVENT:
        {
            gdt_event_t *p;

            p = (gdt_event_t *)cmdarg;
            if (p->erase == 0xff) {
                if (p->dvr.event_source == GDT_ES_TEST)
                    p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.test);
                else if (p->dvr.event_source == GDT_ES_DRIVER)
                    p->dvr.event_data.size= sizeof(p->dvr.event_data.eu.driver);
                else if (p->dvr.event_source == GDT_ES_SYNC)
                    p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.sync);
                else
                    p->dvr.event_data.size = sizeof(p->dvr.event_data.eu.async);
                gdt_store_event(p->dvr.event_source, p->dvr.event_idx,
                                &p->dvr.event_data);
            } else if (p->erase == 0xfe) {
                gdt_clear_events();
            } else if (p->erase == 0) {
                p->handle = gdt_read_event(p->handle, &p->dvr);
            } else {
                gdt_readapp_event((u_int8_t)p->erase, &p->dvr);
            }
            break;
        }
        
      case GDT_IOCTL_STATIST:
        {
            gdt_statist_t *p;
            
            p = (gdt_statist_t *)cmdarg;
            bcopy(&gdt_stat, p, sizeof(gdt_statist_t));
            break;
        }

      default:
        break;
    }

    --gdt_stat.io_count_act;
    return (0);
}
Beispiel #11
0
static int
ahci_em_attach(device_t dev)
{
	device_t parent = device_get_parent(dev);
	struct ahci_controller *ctlr = device_get_softc(parent);
	struct ahci_enclosure *enc = device_get_softc(dev);
	struct cam_devq *devq;
	int i, c, rid, error;
	char buf[32];

	enc->dev = dev;
	enc->quirks = ctlr->quirks;
	enc->channels = ctlr->channels;
	enc->ichannels = ctlr->ichannels;
	mtx_init(&enc->mtx, "AHCI enclosure lock", NULL, MTX_DEF);
	rid = 0;
	if (!(enc->r_memc = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &rid, RF_ACTIVE)))
		return (ENXIO);
	enc->capsem = ATA_INL(enc->r_memc, 0);;
	rid = 1;
	if (!(enc->r_memt = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &rid, RF_ACTIVE))) {
		error = ENXIO;
		goto err0;
	}
	if ((enc->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) {
		rid = 2;
		if (!(enc->r_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
		    &rid, RF_ACTIVE))) {
			error = ENXIO;
			goto err0;
		}
	} else
		enc->r_memr = NULL;
	mtx_lock(&enc->mtx);
	ahci_em_reset(dev);
	rid = ATA_IRQ_RID;
	/* Create the device queue for our SIM. */
	devq = cam_simq_alloc(1);
	if (devq == NULL) {
		device_printf(dev, "Unable to allocate SIM queue\n");
		error = ENOMEM;
		goto err1;
	}
	/* Construct SIM entry */
	enc->sim = cam_sim_alloc(ahciemaction, ahciempoll, "ahciem", enc,
	    device_get_unit(dev), &enc->mtx,
	    1, 0, devq);
	if (enc->sim == NULL) {
		cam_simq_free(devq);
		device_printf(dev, "Unable to allocate SIM\n");
		error = ENOMEM;
		goto err1;
	}
	if (xpt_bus_register(enc->sim, dev, 0) != CAM_SUCCESS) {
		device_printf(dev, "unable to register xpt bus\n");
		error = ENXIO;
		goto err2;
	}
	if (xpt_create_path(&enc->path, /*periph*/NULL, cam_sim_path(enc->sim),
	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		device_printf(dev, "Unable to create path\n");
		error = ENXIO;
		goto err3;
	}
	mtx_unlock(&enc->mtx);
	if (bootverbose) {
		device_printf(dev, "Caps:%s%s%s%s%s%s%s%s\n",
		    (enc->capsem & AHCI_EM_PM) ? " PM":"",
		    (enc->capsem & AHCI_EM_ALHD) ? " ALHD":"",
		    (enc->capsem & AHCI_EM_XMT) ? " XMT":"",
		    (enc->capsem & AHCI_EM_SMB) ? " SMB":"",
		    (enc->capsem & AHCI_EM_SGPIO) ? " SGPIO":"",
		    (enc->capsem & AHCI_EM_SES2) ? " SES-2":"",
		    (enc->capsem & AHCI_EM_SAFTE) ? " SAF-TE":"",
		    (enc->capsem & AHCI_EM_LED) ? " LED":"");
	}
	if ((enc->capsem & AHCI_EM_LED)) {
		for (c = 0; c < enc->channels; c++) {
			if ((enc->ichannels & (1 << c)) == 0)
				continue;
			for (i = 0; i < AHCI_NUM_LEDS; i++) {
				enc->leds[c * AHCI_NUM_LEDS + i].dev = dev;
				enc->leds[c * AHCI_NUM_LEDS + i].num =
				    c * AHCI_NUM_LEDS + i;
			}
			if ((enc->capsem & AHCI_EM_ALHD) == 0) {
				snprintf(buf, sizeof(buf), "%s.%d.act",
				    device_get_nameunit(parent), c);
				enc->leds[c * AHCI_NUM_LEDS + 0].led =
				    led_create(ahci_em_led,
				    &enc->leds[c * AHCI_NUM_LEDS + 0], buf);
			}
			snprintf(buf, sizeof(buf), "%s.%d.locate",
			    device_get_nameunit(parent), c);
			enc->leds[c * AHCI_NUM_LEDS + 1].led =
			    led_create(ahci_em_led,
			    &enc->leds[c * AHCI_NUM_LEDS + 1], buf);
			snprintf(buf, sizeof(buf), "%s.%d.fault",
			    device_get_nameunit(parent), c);
			enc->leds[c * AHCI_NUM_LEDS + 2].led =
			    led_create(ahci_em_led,
			    &enc->leds[c * AHCI_NUM_LEDS + 2], buf);
		}
	}
	return (0);

err3:
	xpt_bus_deregister(cam_sim_path(enc->sim));
err2:
	cam_sim_free(enc->sim, /*free_devq*/TRUE);
err1:
	mtx_unlock(&enc->mtx);
	if (enc->r_memr)
		bus_release_resource(dev, SYS_RES_MEMORY, 2, enc->r_memr);
err0:
	if (enc->r_memt)
		bus_release_resource(dev, SYS_RES_MEMORY, 1, enc->r_memt);
	bus_release_resource(dev, SYS_RES_MEMORY, 0, enc->r_memc);
	mtx_destroy(&enc->mtx);
	return (error);
}
Beispiel #12
0
/*
 * Mount a remote root fs via. nfs. This depends on the info in the
 * nfs_diskless structure that has been filled in properly by some primary
 * bootstrap.
 * It goes something like this:
 * - do enough of "ifconfig" by calling ifioctl() so that the system
 *   can talk to the server
 * - If nfs_diskless.mygateway is filled in, use that address as
 *   a default gateway.
 * - build the rootfs mount point and call mountnfs() to do the rest.
 *
 * It is assumed to be safe to read, modify, and write the nfsv3_diskless
 * structure, as well as other global NFS client variables here, as
 * nfs_mountroot() will be called once in the boot before any other NFS
 * client activity occurs.
 */
int
nfs_mountroot(struct mount *mp)
{
	struct thread *td = curthread;
	struct nfsv3_diskless *nd = &nfsv3_diskless;
	struct socket *so;
	struct vnode *vp;
	struct ifreq ir;
	int error;
	u_long l;
	char buf[128];
	char *cp;


#if defined(BOOTP_NFSROOT) && defined(BOOTP)
	bootpc_init();		/* use bootp to get nfs_diskless filled in */
#elif defined(NFS_ROOT)
	nfs_setup_diskless();
#endif

	if (nfs_diskless_valid == 0) {
		return (-1);
	}
	if (nfs_diskless_valid == 1)
		nfs_convert_diskless();

	/*
	 * XXX splnet, so networks will receive...
	 */
	splnet();

	/*
	 * Do enough of ifconfig(8) so that the critical net interface can
	 * talk to the server.
	 */
	error = socreate(nd->myif.ifra_addr.sa_family, &so, nd->root_args.sotype, 0,
	    td->td_ucred, td);
	if (error)
		panic("nfs_mountroot: socreate(%04x): %d",
			nd->myif.ifra_addr.sa_family, error);

#if 0 /* XXX Bad idea */
	/*
	 * We might not have been told the right interface, so we pass
	 * over the first ten interfaces of the same kind, until we get
	 * one of them configured.
	 */

	for (i = strlen(nd->myif.ifra_name) - 1;
		nd->myif.ifra_name[i] >= '0' &&
		nd->myif.ifra_name[i] <= '9';
		nd->myif.ifra_name[i] ++) {
		error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td);
		if(!error)
			break;
	}
#endif

	error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, td);
	if (error)
		panic("nfs_mountroot: SIOCAIFADDR: %d", error);

	if ((cp = getenv("boot.netif.mtu")) != NULL) {
		ir.ifr_mtu = strtol(cp, NULL, 10);
		bcopy(nd->myif.ifra_name, ir.ifr_name, IFNAMSIZ);
		freeenv(cp);
		error = ifioctl(so, SIOCSIFMTU, (caddr_t)&ir, td);
		if (error)
			printf("nfs_mountroot: SIOCSIFMTU: %d", error);
	}
	soclose(so);

	/*
	 * If the gateway field is filled in, set it as the default route.
	 * Note that pxeboot will set a default route of 0 if the route
	 * is not set by the DHCP server.  Check also for a value of 0
	 * to avoid panicking inappropriately in that situation.
	 */
	if (nd->mygateway.sin_len != 0 &&
	    nd->mygateway.sin_addr.s_addr != 0) {
		struct sockaddr_in mask, sin;

		bzero((caddr_t)&mask, sizeof(mask));
		sin = mask;
		sin.sin_family = AF_INET;
		sin.sin_len = sizeof(sin);
                /* XXX MRT use table 0 for this sort of thing */
		CURVNET_SET(TD_TO_VNET(td));
		error = rtrequest_fib(RTM_ADD, (struct sockaddr *)&sin,
		    (struct sockaddr *)&nd->mygateway,
		    (struct sockaddr *)&mask,
		    RTF_UP | RTF_GATEWAY, NULL, RT_DEFAULT_FIB);
		CURVNET_RESTORE();
		if (error)
			panic("nfs_mountroot: RTM_ADD: %d", error);
	}

	/*
	 * Create the rootfs mount point.
	 */
	nd->root_args.fh = nd->root_fh;
	nd->root_args.fhsize = nd->root_fhsize;
	l = ntohl(nd->root_saddr.sin_addr.s_addr);
	snprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s",
		(l >> 24) & 0xff, (l >> 16) & 0xff,
		(l >>  8) & 0xff, (l >>  0) & 0xff, nd->root_hostnam);
	printf("NFS ROOT: %s\n", buf);
	nd->root_args.hostname = buf;
	if ((error = nfs_mountdiskless(buf,
	    &nd->root_saddr, &nd->root_args, td, &vp, mp)) != 0) {
		return (error);
	}

	/*
	 * This is not really an nfs issue, but it is much easier to
	 * set hostname here and then let the "/etc/rc.xxx" files
	 * mount the right /var based upon its preset value.
	 */
	mtx_lock(&prison0.pr_mtx);
	strlcpy(prison0.pr_hostname, nd->my_hostnam,
	    sizeof (prison0.pr_hostname));
	mtx_unlock(&prison0.pr_mtx);
	inittodr(ntohl(nd->root_time));
	return (0);
}
Beispiel #13
0
/*
 * nfs statfs call
 */
static int
nfs_statfs(struct mount *mp, struct statfs *sbp)
{
	struct vnode *vp;
	struct thread *td;
	struct nfs_statfs *sfp;
	caddr_t bpos, dpos;
	struct nfsmount *nmp = VFSTONFS(mp);
	int error = 0, v3 = (nmp->nm_flag & NFSMNT_NFSV3), retattr;
	struct mbuf *mreq, *mrep, *md, *mb;
	struct nfsnode *np;
	u_quad_t tquad;

	td = curthread;
#ifndef nolint
	sfp = NULL;
#endif
	error = vfs_busy(mp, MBF_NOWAIT);
	if (error)
		return (error);
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error) {
		vfs_unbusy(mp);
		return (error);
	}
	vp = NFSTOV(np);
	mtx_lock(&nmp->nm_mtx);
	if (v3 && (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
		mtx_unlock(&nmp->nm_mtx);		
		(void)nfs_fsinfo(nmp, vp, td->td_ucred, td);
	} else
		mtx_unlock(&nmp->nm_mtx);
	nfsstats.rpccnt[NFSPROC_FSSTAT]++;
	mreq = nfsm_reqhead(vp, NFSPROC_FSSTAT, NFSX_FH(v3));
	mb = mreq;
	bpos = mtod(mb, caddr_t);
	nfsm_fhtom(vp, v3);
	nfsm_request(vp, NFSPROC_FSSTAT, td, td->td_ucred);
	if (v3)
		nfsm_postop_attr(vp, retattr);
	if (error) {
		if (mrep != NULL)
			m_freem(mrep);
		goto nfsmout;
	}
	sfp = nfsm_dissect(struct nfs_statfs *, NFSX_STATFS(v3));
	mtx_lock(&nmp->nm_mtx);
	sbp->f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	if (v3) {
		sbp->f_bsize = NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_tbytes);
		sbp->f_blocks = tquad / NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_fbytes);
		sbp->f_bfree = tquad / NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_abytes);
		sbp->f_bavail = tquad / NFS_FABLKSIZE;
		sbp->f_files = (fxdr_unsigned(int32_t,
		    sfp->sf_tfiles.nfsuquad[1]) & 0x7fffffff);
		sbp->f_ffree = (fxdr_unsigned(int32_t,
		    sfp->sf_ffiles.nfsuquad[1]) & 0x7fffffff);
	} else {
		sbp->f_bsize = fxdr_unsigned(int32_t, sfp->sf_bsize);
		sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks);
		sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree);
		sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail);
		sbp->f_files = 0;
		sbp->f_ffree = 0;
	}
	m_freem(mrep);
nfsmout:
	vput(vp);
	vfs_unbusy(mp);
	return (error);
}
Beispiel #14
0
static int
nfs_sysctl(struct mount *mp, fsctlop_t op, struct sysctl_req *req)
{
	struct nfsmount *nmp = VFSTONFS(mp);
	struct vfsquery vq;
	int error;

	bzero(&vq, sizeof(vq));
	switch (op) {
#if 0
	case VFS_CTL_NOLOCKS:
		val = (nmp->nm_flag & NFSMNT_NOLOCKS) ? 1 : 0;
 		if (req->oldptr != NULL) {
 			error = SYSCTL_OUT(req, &val, sizeof(val));
 			if (error)
 				return (error);
 		}
 		if (req->newptr != NULL) {
 			error = SYSCTL_IN(req, &val, sizeof(val));
 			if (error)
 				return (error);
			if (val)
				nmp->nm_flag |= NFSMNT_NOLOCKS;
			else
				nmp->nm_flag &= ~NFSMNT_NOLOCKS;
 		}
		break;
#endif
	case VFS_CTL_QUERY:
		mtx_lock(&nmp->nm_mtx);
		if (nmp->nm_state & NFSSTA_TIMEO)
			vq.vq_flags |= VQ_NOTRESP;
		mtx_unlock(&nmp->nm_mtx);
#if 0
		if (!(nmp->nm_flag & NFSMNT_NOLOCKS) &&
		    (nmp->nm_state & NFSSTA_LOCKTIMEO))
			vq.vq_flags |= VQ_NOTRESPLOCK;
#endif
		error = SYSCTL_OUT(req, &vq, sizeof(vq));
		break;
 	case VFS_CTL_TIMEO:
 		if (req->oldptr != NULL) {
 			error = SYSCTL_OUT(req, &nmp->nm_tprintf_initial_delay,
 			    sizeof(nmp->nm_tprintf_initial_delay));
 			if (error)
 				return (error);
 		}
 		if (req->newptr != NULL) {
			error = vfs_suser(mp, req->td);
			if (error)
				return (error);
 			error = SYSCTL_IN(req, &nmp->nm_tprintf_initial_delay,
 			    sizeof(nmp->nm_tprintf_initial_delay));
 			if (error)
 				return (error);
 			if (nmp->nm_tprintf_initial_delay < 0)
 				nmp->nm_tprintf_initial_delay = 0;
 		}
		break;
	default:
		return (ENOTSUP);
	}
	return (0);
}
Beispiel #15
0
/*
 * Allocate a vnode
 */
int
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
		  struct pfs_node *pn, pid_t pid)
{
	struct pfs_vdata *pvd;
	int error;

	/*
	 * See if the vnode is in the cache.
	 * XXX linear search is not very efficient.
	 */
	mtx_lock(&pfs_vncache_mutex);
	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid) {
			if (vget(pvd->pvd_vnode, 0, curthread) == 0) {
				++pfs_vncache_hits;
				*vpp = pvd->pvd_vnode;
				mtx_unlock(&pfs_vncache_mutex);
				/* XXX see comment at top of pfs_lookup() */
				cache_purge(*vpp);
				vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE,
				    curthread);
				return (0);
			}
			/* XXX if this can happen, we're in trouble */
			break;
		}
	}
	mtx_unlock(&pfs_vncache_mutex);
	++pfs_vncache_misses;

	/* nope, get a new one */
	MALLOC(pvd, struct pfs_vdata *, sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
	if (++pfs_vncache_entries > pfs_vncache_maxentries)
		pfs_vncache_maxentries = pfs_vncache_entries;
	error = getnewvnode("pseudofs", mp, pfs_vnodeop_p, vpp);
	if (error) {
		FREE(pvd, M_PFSVNCACHE);
		return (error);
	}
	pvd->pvd_pn = pn;
	pvd->pvd_pid = pid;
	(*vpp)->v_data = pvd;
	switch (pn->pn_type) {
	case pfstype_root:
		(*vpp)->v_vflag = VV_ROOT;
#if 0
		printf("root vnode allocated\n");
#endif
		/* fall through */
	case pfstype_dir:
	case pfstype_this:
	case pfstype_parent:
	case pfstype_procdir:
		(*vpp)->v_type = VDIR;
		break;
	case pfstype_file:
		(*vpp)->v_type = VREG;
		break;
	case pfstype_symlink:
		(*vpp)->v_type = VLNK;
		break;
	case pfstype_none:
		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
	default:
		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
	}
	/*
	 * Propagate flag through to vnode so users know it can change
	 * if the process changes (i.e. execve)
	 */
	if ((pn->pn_flags & PFS_PROCDEP) != 0)
		(*vpp)->v_vflag |= VV_PROCDEP;
	pvd->pvd_vnode = *vpp;
	mtx_lock(&pfs_vncache_mutex);
	pvd->pvd_prev = NULL;
	pvd->pvd_next = pfs_vncache;
	if (pvd->pvd_next)
		pvd->pvd_next->pvd_prev = pvd;
	pfs_vncache = pvd;
	mtx_unlock(&pfs_vncache_mutex);
        (*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
	vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
	return (0);
}
Beispiel #16
0
static u_int 
adb_mouse_receive_packet(device_t dev, u_char status, u_char command, 
    u_char reg, int len, u_char *data) 
{
	struct adb_mouse_softc *sc;
	int i = 0;
	int xdelta, ydelta;
	int buttons, tmp_buttons;

	sc = device_get_softc(dev);

	if (command != ADB_COMMAND_TALK || reg != 0 || len < 2)
		return (0);

	ydelta = data[0] & 0x7f;
	xdelta = data[1] & 0x7f;

	buttons = 0;
	buttons |= !(data[0] & 0x80);
	buttons |= !(data[1] & 0x80) << 1;

	if (sc->flags & AMS_EXTENDED) {
		for (i = 2; i < len && i < 5; i++) {
			xdelta |= (data[i] & 0x07) << (3*i + 1);
			ydelta |= (data[i] & 0x70) << (3*i - 3);

			buttons |= !(data[i] & 0x08) << (2*i - 2);
			buttons |= !(data[i] & 0x80) << (2*i - 1);
		}
	} else {
		len = 2; /* Ignore extra data */
	}

	/* Do sign extension as necessary */
	if (xdelta & (0x40 << 3*(len-2)))
		xdelta |= 0xffffffc0 << 3*(len - 2);
	if (ydelta & (0x40 << 3*(len-2)))
		ydelta |= 0xffffffc0 << 3*(len - 2);

	if ((sc->flags & AMS_TOUCHPAD) && (sc->sc_tapping == 1)) {
		tmp_buttons = buttons;
		if (buttons == 0x12) {
			/* Map a double tap on button 3.
			   Keep the button state for the next sequence.
			   A double tap sequence is followed by a single tap
			   sequence.
			*/
			tmp_buttons = 0x3;
			sc->button_buf = tmp_buttons;
		} else if (buttons == 0x2) {
			/* Map a single tap on button 2. But only if it is
			   not a successor from a double tap.
			*/
			if (sc->button_buf != 0x3) 
				tmp_buttons = 0x2;
			else
				tmp_buttons = 0;

			sc->button_buf = 0;
		}
		buttons = tmp_buttons;
	}

	/*
	 * Some mice report high-numbered buttons on the wrong button number,
	 * so set the highest-numbered real button as pressed if there are
	 * mysterious high-numbered ones set.
	 *
	 * Don't do this for touchpads, because touchpads also trigger
	 * high button events when they are touched.
	 */

	if (rounddown2(buttons, 1 << sc->hw.buttons)
	    && !(sc->flags & AMS_TOUCHPAD)) {
		buttons |= 1 << (sc->hw.buttons - 1);
	}
	buttons &= (1 << sc->hw.buttons) - 1;

	mtx_lock(&sc->sc_mtx);

	/* Add in our new deltas, and take into account
	   Apple's opposite meaning for Y axis motion */

	sc->xdelta += xdelta;
	sc->ydelta -= ydelta;

	sc->buttons = buttons;

	mtx_unlock(&sc->sc_mtx);

	cv_broadcast(&sc->sc_cv);
	selwakeuppri(&sc->rsel, PZERO);

	return (0);
}
Beispiel #17
0
int
mk48txx_attach(device_t dev)
{
	struct mk48txx_softc *sc;
	int i;
	uint8_t wday;

	sc = device_get_softc(dev);

	if (mtx_initialized(&sc->sc_mtx) == 0) {
		device_printf(dev, "%s: mutex not initialized\n", __func__);
		return (ENXIO);
	}

	device_printf(dev, "model %s", sc->sc_model);
	i = sizeof(mk48txx_models) / sizeof(mk48txx_models[0]);
	while (--i >= 0) {
		if (strcmp(sc->sc_model, mk48txx_models[i].name) == 0) {
			break;
		}
	}
	if (i < 0) {
		device_printf(dev, " (unsupported)\n");
		return (ENXIO);
	}
	printf("\n");
	sc->sc_nvramsz = mk48txx_models[i].nvramsz;
	sc->sc_clkoffset = mk48txx_models[i].clkoff;

	if (sc->sc_nvrd == NULL)
		sc->sc_nvrd = mk48txx_def_nvrd;
	if (sc->sc_nvwr == NULL)
		sc->sc_nvwr = mk48txx_def_nvwr;

	if (mk48txx_models[i].flags & MK48TXX_EXT_REGISTERS) {
		mtx_lock(&sc->sc_mtx);
		if ((*sc->sc_nvrd)(dev, sc->sc_clkoffset + MK48TXX_FLAGS) &
		    MK48TXX_FLAGS_BL) {
			mtx_unlock(&sc->sc_mtx);
			device_printf(dev, "%s: battery low\n", __func__);
			return (ENXIO);
		}
		mtx_unlock(&sc->sc_mtx);
	}

	if (sc->sc_flag & MK48TXX_NO_CENT_ADJUST) {
		/*
		 * Use MK48TXX_WDAY_CB instead of manually adjusting the
		 * century.
		 */
		if (!(mk48txx_models[i].flags & MK48TXX_EXT_REGISTERS)) {
			device_printf(dev, "%s: no century bit\n", __func__);
			return (ENXIO);
		} else {
			mtx_lock(&sc->sc_mtx);
			wday = (*sc->sc_nvrd)
			    (dev, sc->sc_clkoffset + MK48TXX_IWDAY);
			wday |= MK48TXX_WDAY_CEB;
			(*sc->sc_nvwr)
			    (dev, sc->sc_clkoffset + MK48TXX_IWDAY, wday);
			mtx_unlock(&sc->sc_mtx);
		}
	}

	clock_register(dev, 1000000);	/* 1 second resolution */

	if ((sc->sc_flag & MK48TXX_WDOG_REGISTER) &&
	    (mk48txx_models[i].flags & MK48TXX_EXT_REGISTERS)) {
		sc->sc_wet = EVENTHANDLER_REGISTER(watchdog_list,
		    mk48txx_watchdog, dev, 0);
		device_printf(dev,
		    "watchdog registered, timeout interval max. 128 sec\n");
	}

	return (0);
}
Beispiel #18
0
static int
ams_read(struct cdev *dev, struct uio *uio, int flag)
{
	struct adb_mouse_softc *sc;
	size_t len;
	int8_t outpacket[8];
	int error;

	sc = CDEV_GET_SOFTC(dev);
	if (sc == NULL)
		return (EIO);

	if (uio->uio_resid <= 0)
		return (0);

	mtx_lock(&sc->sc_mtx);

	if (!sc->packet_read_len) {
		if (sc->xdelta == 0 && sc->ydelta == 0 && 
		   sc->buttons == sc->last_buttons) {

			if (flag & O_NONBLOCK) {
				mtx_unlock(&sc->sc_mtx);
				return EWOULDBLOCK;
			}

	
			/* Otherwise, block on new data */
			error = cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
			if (error) {
				mtx_unlock(&sc->sc_mtx);
				return (error);
			}
		}

		sc->packet[0] = 1 << 7;
		sc->packet[0] |= (!(sc->buttons & 1)) << 2;
		sc->packet[0] |= (!(sc->buttons & 4)) << 1;
		sc->packet[0] |= (!(sc->buttons & 2));

		if (sc->xdelta > 127) {
			sc->packet[1] = 127;
			sc->packet[3] = sc->xdelta - 127;
		} else if (sc->xdelta < -127) {
			sc->packet[1] = -127;
			sc->packet[3] = sc->xdelta + 127;
		} else {
			sc->packet[1] = sc->xdelta;
			sc->packet[3] = 0;
		}

		if (sc->ydelta > 127) {
			sc->packet[2] = 127;
			sc->packet[4] = sc->ydelta - 127;
		} else if (sc->ydelta < -127) {
			sc->packet[2] = -127;
			sc->packet[4] = sc->ydelta + 127;
		} else {
			sc->packet[2] = sc->ydelta;
			sc->packet[4] = 0;
		}

		/* No Z movement */
		sc->packet[5] = 0;
		sc->packet[6] = 0; 

		sc->packet[7] = ~((uint8_t)(sc->buttons >> 3)) & 0x7f;


		sc->last_buttons = sc->buttons;
		sc->xdelta = 0;
		sc->ydelta = 0;

		sc->packet_read_len = sc->mode.packetsize;
	}

	len = (sc->packet_read_len > uio->uio_resid) ? 
		uio->uio_resid : sc->packet_read_len;

	memcpy(outpacket,sc->packet + 
		(sc->mode.packetsize - sc->packet_read_len),len);
	sc->packet_read_len -= len;

	mtx_unlock(&sc->sc_mtx);

	error = uiomove(outpacket,len,uio);

	return (error);
}
Beispiel #19
0
void
audit_commit(struct kaudit_record *ar, int error, int retval)
{
	au_event_t event;
	au_class_t class;
	au_id_t auid;
	int sorf;
	struct au_mask *aumask;

	if (ar == NULL)
		return;

	/*
	 * Decide whether to commit the audit record by checking the error
	 * value from the system call and using the appropriate audit mask.
	 */
	if (ar->k_ar.ar_subj_auid == AU_DEFAUDITID)
		aumask = &audit_nae_mask;
	else
		aumask = &ar->k_ar.ar_subj_amask;

	if (error)
		sorf = AU_PRS_FAILURE;
	else
		sorf = AU_PRS_SUCCESS;

	/*
	 * syscalls.master sometimes contains a prototype event number, which
	 * we will transform into a more specific event number now that we
	 * have more complete information gathered during the system call.
	 */
	switch(ar->k_ar.ar_event) {
	case AUE_OPEN_RWTC:
		ar->k_ar.ar_event = audit_flags_and_error_to_openevent(
		    ar->k_ar.ar_arg_fflags, error);
		break;

	case AUE_OPENAT_RWTC:
		ar->k_ar.ar_event = audit_flags_and_error_to_openatevent(
		    ar->k_ar.ar_arg_fflags, error);
		break;

	case AUE_SYSCTL:
		ar->k_ar.ar_event = audit_ctlname_to_sysctlevent(
		    ar->k_ar.ar_arg_ctlname, ar->k_ar.ar_valid_arg);
		break;

	case AUE_AUDITON:
		/* Convert the auditon() command to an event. */
		ar->k_ar.ar_event = auditon_command_event(ar->k_ar.ar_arg_cmd);
		break;
	}

	auid = ar->k_ar.ar_subj_auid;
	event = ar->k_ar.ar_event;
	class = au_event_class(event);

	ar->k_ar_commit |= AR_COMMIT_KERNEL;
	if (au_preselect(event, class, aumask, sorf) != 0)
		ar->k_ar_commit |= AR_PRESELECT_TRAIL;
	if (audit_pipe_preselect(auid, event, class, sorf,
	    ar->k_ar_commit & AR_PRESELECT_TRAIL) != 0)
		ar->k_ar_commit |= AR_PRESELECT_PIPE;
	if ((ar->k_ar_commit & (AR_PRESELECT_TRAIL | AR_PRESELECT_PIPE |
	    AR_PRESELECT_USER_TRAIL | AR_PRESELECT_USER_PIPE)) == 0) {
		mtx_lock(&audit_mtx);
		audit_pre_q_len--;
		mtx_unlock(&audit_mtx);
		audit_free(ar);
		return;
	}

	ar->k_ar.ar_errno = error;
	ar->k_ar.ar_retval = retval;
	nanotime(&ar->k_ar.ar_endtime);

	/*
	 * Note: it could be that some records initiated while audit was
	 * enabled should still be committed?
	 */
	mtx_lock(&audit_mtx);
	if (audit_suspended || !audit_enabled) {
		audit_pre_q_len--;
		mtx_unlock(&audit_mtx);
		audit_free(ar);
		return;
	}

	/*
	 * Constrain the number of committed audit records based on the
	 * configurable parameter.
	 */
	while (audit_q_len >= audit_qctrl.aq_hiwater)
		cv_wait(&audit_watermark_cv, &audit_mtx);

	TAILQ_INSERT_TAIL(&audit_q, ar, k_q);
	audit_q_len++;
	audit_pre_q_len--;
	cv_signal(&audit_worker_cv);
	mtx_unlock(&audit_mtx);
}
Beispiel #20
0
static int
ams_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 
    struct thread *p)
{
	struct adb_mouse_softc *sc;
	mousemode_t mode;

	sc = CDEV_GET_SOFTC(dev);
	if (sc == NULL)
		return (EIO);

	switch (cmd) {
	case MOUSE_GETHWINFO:
		*(mousehw_t *)addr = sc->hw;
		break;
	case MOUSE_GETMODE:
		*(mousemode_t *)addr = sc->mode;
		break;
	case MOUSE_SETMODE:
		mode = *(mousemode_t *)addr;
		addr = (caddr_t)&mode.level;

		/* Fallthrough */

	case MOUSE_SETLEVEL:
		if (*(int *)addr == -1)
			break;
		else if (*(int *)addr == 1) {
			sc->mode.level = 1;
			sc->mode.packetsize = 8;
			break;
		} else if (*(int *)addr == 0) {
			sc->mode.level = 0;
			sc->mode.packetsize = 5;
			break;
		}
	
		return EINVAL;
	case MOUSE_GETLEVEL:
		*(int *)addr = sc->mode.level;
		break;
	
	case MOUSE_GETSTATUS: {
		mousestatus_t *status = (mousestatus_t *) addr;

		mtx_lock(&sc->sc_mtx);

		status->button = sc->buttons;
		status->obutton = sc->last_buttons;
		
		status->flags = status->button ^ status->obutton;

		if (sc->xdelta != 0 || sc->ydelta)
			status->flags |= MOUSE_POSCHANGED;
		if (status->button != status->obutton)
			status->flags |= MOUSE_BUTTONSCHANGED;

		status->dx = sc->xdelta;
		status->dy = sc->ydelta;
		status->dz = 0;

		sc->xdelta = 0;
		sc->ydelta = 0;
		sc->last_buttons = sc->buttons;

		mtx_unlock(&sc->sc_mtx);

		break; }
	default:
		return ENOTTY;
	}

	return (0);
}
Beispiel #21
0
/*
 * The caller must make sure that the new protocol is fully set up and ready to
 * accept requests before it is registered.
 */
int
pf_proto_register(int family, struct protosw *npr)
{
	VNET_ITERATOR_DECL(vnet_iter);
	struct domain *dp;
	struct protosw *pr, *fpr;

	/* Sanity checks. */
	if (family == 0)
		return (EPFNOSUPPORT);
	if (npr->pr_type == 0)
		return (EPROTOTYPE);
	if (npr->pr_protocol == 0)
		return (EPROTONOSUPPORT);
	if (npr->pr_usrreqs == NULL)
		return (ENXIO);

	/* Try to find the specified domain based on the family. */
	dp = pffinddomain(family);
	if (dp == NULL)
		return (EPFNOSUPPORT);

	/* Initialize backpointer to struct domain. */
	npr->pr_domain = dp;
	fpr = NULL;

	/*
	 * Protect us against races when two protocol registrations for
	 * the same protocol happen at the same time.
	 */
	mtx_lock(&dom_mtx);

	/* The new protocol must not yet exist. */
	for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) {
		if ((pr->pr_type == npr->pr_type) &&
		    (pr->pr_protocol == npr->pr_protocol)) {
			mtx_unlock(&dom_mtx);
			return (EEXIST);	/* XXX: Check only protocol? */
		}
		/* While here, remember the first free spacer. */
		if ((fpr == NULL) && (pr->pr_protocol == PROTO_SPACER))
			fpr = pr;
	}

	/* If no free spacer is found we can't add the new protocol. */
	if (fpr == NULL) {
		mtx_unlock(&dom_mtx);
		return (ENOMEM);
	}

	/* Copy the new struct protosw over the spacer. */
	bcopy(npr, fpr, sizeof(*fpr));

	/* Job is done, no more protection required. */
	mtx_unlock(&dom_mtx);

	/* Initialize and activate the protocol. */
	VNET_LIST_RLOCK();
	VNET_FOREACH(vnet_iter) {
		CURVNET_SET_QUIET(vnet_iter);
		protosw_init(fpr);
		CURVNET_RESTORE();
	}
	VNET_LIST_RUNLOCK();

	return (0);
}
Beispiel #22
0
void
sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
	struct sigframe *fp, frame;
	struct sysentvec *sysent;
	struct trapframe *tf;
	struct sigacts *psp;
	struct thread *td;
	struct proc *p;
	int onstack;
	int code;
	int sig;

	td = curthread;
	p = td->td_proc;
	PROC_LOCK_ASSERT(p, MA_OWNED);

	sig = ksi->ksi_signo;
	code = ksi->ksi_code;
	psp = p->p_sigacts;
	mtx_assert(&psp->ps_mtx, MA_OWNED);

	tf = td->td_frame;
	onstack = sigonstack(tf->tf_sp);

	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
	    catcher, sig);

	/* Allocate and validate space for the signal handler context. */
	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
		fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
		    td->td_sigstk.ss_size);
	} else {
		fp = (struct sigframe *)td->td_frame->tf_sp;
	}

	/* Make room, keeping the stack aligned */
	fp--;
	fp = (struct sigframe *)STACKALIGN(fp);

	/* Fill in the frame to copy out */
	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
	frame.sf_si = ksi->ksi_info;
	frame.sf_uc.uc_sigmask = *mask;
	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
	frame.sf_uc.uc_stack = td->td_sigstk;
	mtx_unlock(&psp->ps_mtx);
	PROC_UNLOCK(td->td_proc);

	/* Copy the sigframe out to the user's stack. */
	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
		/* Process has trashed its stack. Kill it. */
		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
		PROC_LOCK(p);
		sigexit(td, SIGILL);
	}

	tf->tf_a[0] = sig;
	tf->tf_a[1] = (register_t)&fp->sf_si;
	tf->tf_a[2] = (register_t)&fp->sf_uc;

	tf->tf_sepc = (register_t)catcher;
	tf->tf_sp = (register_t)fp;

	sysent = p->p_sysent;
	if (sysent->sv_sigcode_base != 0)
		tf->tf_ra = (register_t)sysent->sv_sigcode_base;
	else
		tf->tf_ra = (register_t)(sysent->sv_psstrings -
		    *(sysent->sv_szsigcode));

	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc,
	    tf->tf_sp);

	PROC_LOCK(p);
	mtx_lock(&psp->ps_mtx);
}
Beispiel #23
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{

	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			kern_yield(0);
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTARTSYS:
		case -EINTR:
			kern_yield(0);
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	mtx_lock(&bdev->fence_lock);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		mtx_unlock(&bdev->fence_lock);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		mtx_unlock(&bdev->fence_lock);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
		    offset);
		KASSERT((m->flags & PG_FICTITIOUS) != 0,
		    ("physical address %#jx not fictitious",
		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
		    + offset)));
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if (vm_page_busied(m)) {
		vm_page_lock(m);
		VM_OBJECT_WUNLOCK(vm_obj);
		vm_page_busy_sleep(m, "ttmpbs");
		VM_OBJECT_WLOCK(vm_obj);
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
			VM_OBJECT_WUNLOCK(vm_obj);
			VM_WAIT;
			VM_OBJECT_WLOCK(vm_obj);
			ttm_mem_io_unlock(man);
			ttm_bo_unreserve(bo);
			goto retry;
		}
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_xbusy(m);

	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}
Beispiel #24
0
/**
 * @brief Release the lock on a file
 *
 * This function releases the lock acquired by @ref squash_file_lock.
 * If you have not called that function *do not call this one*.
 *
 * @param file the file to release the lock on
 */
void
squash_file_unlock (SquashFile* file) {
  assert (file != NULL);

  mtx_unlock (&(file->mtx));
}
Beispiel #25
0
static int
dma_memcpy(void *dst, void *src, int len, int flags)
{
    struct i80321_dma_softc *sc;
    i80321_dmadesc_t *desc;
    int ret;
    int csr;
    int descnb = 0;
    int tmplen = len;
    int to_nextpagesrc, to_nextpagedst;
    int min_hop;
    vm_paddr_t pa, pa2, tmppa;
    pmap_t pmap = vmspace_pmap(curthread->td_proc->p_vmspace);

    if (!softcs[0] || !softcs[1])
        return (-1);
    mtx_lock_spin(&softcs[0]->mtx);
    if (softcs[0]->flags & BUSY) {
        mtx_unlock_spin(&softcs[0]->mtx);
        mtx_lock_spin(&softcs[1]->mtx);
        if (softcs[1]->flags & BUSY) {
            mtx_unlock(&softcs[1]->mtx);
            return (-1);
        }
        sc = softcs[1];
    } else
        sc = softcs[0];
    sc->flags |= BUSY;
    mtx_unlock_spin(&sc->mtx);
    desc = sc->dmaring[0].desc;
    if (flags & IS_PHYSICAL) {
        desc->next_desc = 0;
        desc->low_pciaddr = (vm_paddr_t)src;
        desc->high_pciaddr = 0;
        desc->local_addr = (vm_paddr_t)dst;
        desc->count = len;
        desc->descr_ctrl = 1 << 6; /* Local memory to local memory. */
        bus_dmamap_sync(sc->dmatag,
                        sc->dmaring[0].map,
                        BUS_DMASYNC_PREWRITE);
    } else {
        if (!virt_addr_is_valid(dst, len, 1, !(flags & DST_IS_USER)) ||
                !virt_addr_is_valid(src, len, 0, !(flags & SRC_IS_USER))) {
            mtx_lock_spin(&sc->mtx);
            sc->flags &= ~BUSY;
            mtx_unlock_spin(&sc->mtx);
            return (-1);
        }
        cpu_dcache_wb_range((vm_offset_t)src, len);
        if ((vm_offset_t)dst & (31))
            cpu_dcache_wb_range((vm_offset_t)dst & ~31, 32);
        if (((vm_offset_t)dst + len) & 31)
            cpu_dcache_wb_range(((vm_offset_t)dst + len) & ~31,
                                32);
        cpu_dcache_inv_range((vm_offset_t)dst, len);
        while (tmplen > 0) {
            pa = (flags & SRC_IS_USER) ?
                 pmap_extract(pmap, (vm_offset_t)src) :
                 vtophys(src);
            pa2 = (flags & DST_IS_USER) ?
                  pmap_extract(pmap, (vm_offset_t)dst) :
                  vtophys(dst);
            to_nextpagesrc = ((vm_offset_t)src & ~PAGE_MASK) +
                             PAGE_SIZE - (vm_offset_t)src;
            to_nextpagedst = ((vm_offset_t)dst & ~PAGE_MASK) +
                             PAGE_SIZE - (vm_offset_t)dst;
            while (to_nextpagesrc < tmplen) {
                tmppa = (flags & SRC_IS_USER) ?
                        pmap_extract(pmap, (vm_offset_t)src +
                                     to_nextpagesrc) :
                        vtophys((vm_offset_t)src +
                                to_nextpagesrc);
                if (tmppa != pa + to_nextpagesrc)
                    break;
                to_nextpagesrc += PAGE_SIZE;
            }
            while (to_nextpagedst < tmplen) {
                tmppa = (flags & DST_IS_USER) ?
                        pmap_extract(pmap, (vm_offset_t)dst +
                                     to_nextpagedst) :
                        vtophys((vm_offset_t)dst +
                                to_nextpagedst);
                if (tmppa != pa2 + to_nextpagedst)
                    break;
                to_nextpagedst += PAGE_SIZE;
            }
            min_hop = to_nextpagedst > to_nextpagesrc ?
                      to_nextpagesrc : to_nextpagedst;
            if (min_hop < 64) {
                tmplen -= min_hop;
                memcpy(dst, src, min_hop);
                cpu_dcache_wbinv_range((vm_offset_t)dst,
                                       min_hop);

                src = (void *)((vm_offset_t)src + min_hop);
                dst = (void *)((vm_offset_t)dst + min_hop);
                if (tmplen <= 0 && descnb > 0) {
                    sc->dmaring[descnb - 1].desc->next_desc
                        = 0;
                    bus_dmamap_sync(sc->dmatag,
                                    sc->dmaring[descnb - 1].map,
                                    BUS_DMASYNC_PREWRITE);
                }
                continue;
            }
            desc->low_pciaddr = pa;
            desc->high_pciaddr = 0;
            desc->local_addr = pa2;
            desc->count = tmplen > min_hop ? min_hop : tmplen;
            desc->descr_ctrl = 1 << 6;
            if (min_hop < tmplen) {
                tmplen -= min_hop;
                src = (void *)((vm_offset_t)src + min_hop);
                dst = (void *)((vm_offset_t)dst + min_hop);
            } else
                tmplen = 0;
            if (descnb + 1 >= DMA_RING_SIZE) {
                mtx_lock_spin(&sc->mtx);
                sc->flags &= ~BUSY;
                mtx_unlock_spin(&sc->mtx);
                return (-1);
            }
            if (tmplen > 0) {
                desc->next_desc = sc->dmaring[descnb + 1].
                                  phys_addr;
                bus_dmamap_sync(sc->dmatag,
                                sc->dmaring[descnb].map,
                                BUS_DMASYNC_PREWRITE);
                desc = sc->dmaring[descnb + 1].desc;
                descnb++;
            } else {
                desc->next_desc = 0;
                bus_dmamap_sync(sc->dmatag,
                                sc->dmaring[descnb].map,
                                BUS_DMASYNC_PREWRITE);
            }

        }

    }
    DMA_REG_WRITE(sc, 4 /* Status register */,
                  DMA_REG_READ(sc, 4) | DMA_CLEAN_MASK);
    DMA_REG_WRITE(sc, 0x10 /* Descriptor addr */,
                  sc->dmaring[0].phys_addr);
    DMA_REG_WRITE(sc, 0 /* Control register */, 1 | 2/* Start transfer */);
    while ((csr = DMA_REG_READ(sc, 0x4)) & (1 << 10));
    /* Wait until it's done. */
    if (csr & 0x2e) /* error */
        ret = -1;
    else
        ret = 0;
    DMA_REG_WRITE(sc, 0, 0);
    mtx_lock_spin(&sc->mtx);
    sc->flags &= ~BUSY;
    mtx_unlock_spin(&sc->mtx);
    return (ret);
}
Beispiel #26
0
/*
 * Probes an interface for its particular capabilities and attaches if
 * it's a supported interface.
 */
static int
uhso_probe_iface(struct uhso_softc *sc, int index,
    int (*probe)(struct usb_device *, int))
{
	struct usb_interface *iface;
	int type, error;

	UHSO_DPRINTF(1, "Probing for interface %d, probe_func=%p\n", index, probe);

	type = probe(sc->sc_udev, index);
	UHSO_DPRINTF(1, "Probe result %x\n", type);
	if (type <= 0)
		return (ENXIO);

	sc->sc_type = type;
	iface = usbd_get_iface(sc->sc_udev, index);

	if (UHSO_IFACE_PORT_TYPE(type) == UHSO_PORT_TYPE_NETWORK) {
		error = uhso_attach_ifnet(sc, iface, type);
		if (error) {
			UHSO_DPRINTF(1, "uhso_attach_ifnet failed");
			return (ENXIO);
		}

		/*
		 * If there is an additional interrupt endpoint on this
		 * interface then we most likely have a multiplexed serial port
		 * available.
		 */
		if (iface->idesc->bNumEndpoints < 3) {
			sc->sc_type = UHSO_IFACE_SPEC( 
			    UHSO_IFACE_USB_TYPE(type) & ~UHSO_IF_MUX,
			    UHSO_IFACE_PORT(type) & ~UHSO_PORT_SERIAL,
			    UHSO_IFACE_PORT_TYPE(type));
			return (0);
		}

		UHSO_DPRINTF(1, "Trying to attach mux. serial\n");
		error = uhso_attach_muxserial(sc, iface, type);
		if (error == 0 && sc->sc_ttys > 0) {
			error = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
			    sc->sc_ttys, sc, &uhso_ucom_callback, &sc->sc_mtx);
			if (error) {
				device_printf(sc->sc_dev, "ucom_attach failed\n");
				return (ENXIO);
			}
			ucom_set_pnpinfo_usb(&sc->sc_super_ucom, sc->sc_dev);

			mtx_lock(&sc->sc_mtx);
			usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]);
			mtx_unlock(&sc->sc_mtx);
		}
	} else if ((UHSO_IFACE_USB_TYPE(type) & UHSO_IF_BULK) &&
	    UHSO_IFACE_PORT(type) & UHSO_PORT_SERIAL) {

		error = uhso_attach_bulkserial(sc, iface, type);
		if (error)
			return (ENXIO);

		error = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom,
		    sc->sc_ttys, sc, &uhso_ucom_callback, &sc->sc_mtx);
		if (error) {
			device_printf(sc->sc_dev, "ucom_attach failed\n");
			return (ENXIO);
		}
		ucom_set_pnpinfo_usb(&sc->sc_super_ucom, sc->sc_dev);
	}
	else {
		UHSO_DPRINTF(0, "Unknown type %x\n", type);
		return (ENXIO);
	}

	return (0);
}
Beispiel #27
0
int Kernel_DeviceIoControl(_VBUS_ARG
							DWORD dwIoControlCode,       	/* operation control code */
							PVOID lpInBuffer,            	/* input data buffer */
							DWORD nInBufferSize,         	/* size of input data buffer */
							PVOID lpOutBuffer,           	/* output data buffer */
							DWORD nOutBufferSize,        	/* size of output data buffer */
							PDWORD lpBytesReturned      	/* byte count */
						)
{
	IAL_ADAPTER_T *pAdapter;

	switch(dwIoControlCode)	{
		case HPT_IOCTL_DELETE_ARRAY:
		{
			DEVICEID idArray;
			int iSuccess;			
	        int i;
			PVDevice pArray;
			PVBus _vbus_p;
			struct cam_periph *periph = NULL;

			if (nInBufferSize!=sizeof(DEVICEID)+sizeof(DWORD)) return -1;
			if (nOutBufferSize!=sizeof(int)) return -1;
			idArray = *(DEVICEID *)lpInBuffer;

			pArray = ID_TO_VDEV(idArray);

			if((idArray == 0) || check_VDevice_valid(pArray))	
		       	return -1;
		
        	if(!mIsArray(pArray))
			return -1;

			_vbus_p=pArray->pVBus;
			pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt;

	        for(i = 0; i < MAX_VDEVICE_PER_VBUS; i++) {
				if(pArray == _vbus_p->pVDevice[i])
				{
					periph = hpt_get_periph(pAdapter->mvSataAdapter.adapterId, i);
					if (periph != NULL && periph->refcount >= 1)
					{
						hpt_printk(("Can not delete a mounted device.\n"));
	                    return -1;	
					}
				}
				/* the Mounted Disk isn't delete */
			} 

			iSuccess = hpt_delete_array(_VBUS_P idArray, *(DWORD*)((DEVICEID *)lpInBuffer+1));

			*(int*)lpOutBuffer = iSuccess;

			if(iSuccess != 0)
				return -1;
			break;
		}

		case HPT_IOCTL_GET_EVENT:
		{
			PHPT_EVENT pInfo;

			if (nInBufferSize!=0) return -1;
			if (nOutBufferSize!=sizeof(HPT_EVENT)) return -1;

			pInfo = (PHPT_EVENT)lpOutBuffer;

			if (hpt_get_event(pInfo)!=0)
				return -1;
		}
		break;

		case HPT_IOCTL_SET_ARRAY_STATE:
		{
			DEVICEID idArray;
			DWORD state;

			if (nInBufferSize!=sizeof(HPT_SET_STATE_PARAM)) return -1;
			if (nOutBufferSize!=0) return -1;

			idArray = ((PHPT_SET_STATE_PARAM)lpInBuffer)->idArray;
			state = ((PHPT_SET_STATE_PARAM)lpInBuffer)->state;

			if(hpt_set_array_state(idArray, state)!=0)
				return -1;
		}
		break;

		case HPT_IOCTL_RESCAN_DEVICES:
		{
			if (nInBufferSize!=0) return -1;
			if (nOutBufferSize!=0) return -1;

#ifndef FOR_DEMO
			/* stop buzzer if user perform rescan */
			for (pAdapter=gIal_Adapter; pAdapter; pAdapter=pAdapter->next) {
				if (pAdapter->beeping) {
					pAdapter->beeping = 0;
					BeepOff(pAdapter->mvSataAdapter.adapterIoBaseAddress);
				}
			}
#endif
		}
		break;

		default:
		{
			PVDevice pVDev;

			switch(dwIoControlCode) {
			/* read-only ioctl functions can be called directly. */
			case HPT_IOCTL_GET_VERSION:
			case HPT_IOCTL_GET_CONTROLLER_IDS:
			case HPT_IOCTL_GET_CONTROLLER_COUNT:
			case HPT_IOCTL_GET_CONTROLLER_INFO:
			case HPT_IOCTL_GET_CHANNEL_INFO:
			case HPT_IOCTL_GET_LOGICAL_DEVICES:
			case HPT_IOCTL_GET_DEVICE_INFO:
			case HPT_IOCTL_GET_DEVICE_INFO_V2:				
			case HPT_IOCTL_GET_EVENT:
			case HPT_IOCTL_GET_DRIVER_CAPABILITIES:
				if(hpt_default_ioctl(_VBUS_P dwIoControlCode, lpInBuffer, nInBufferSize, 
					lpOutBuffer, nOutBufferSize, lpBytesReturned) == -1) return -1;
				break;

			default:
				/* 
				 * GUI always use /proc/scsi/hptmv/0, so the _vbus_p param will be 
				 * wrong for second controller. 
				 */
				switch(dwIoControlCode) {
				case HPT_IOCTL_CREATE_ARRAY:
					pVDev = ID_TO_VDEV(((PCREATE_ARRAY_PARAMS)lpInBuffer)->Members[0]); break;
				case HPT_IOCTL_CREATE_ARRAY_V2:
					pVDev = ID_TO_VDEV(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->Members[0]); break;
				case HPT_IOCTL_SET_ARRAY_INFO:
					pVDev = ID_TO_VDEV(((PHPT_SET_ARRAY_INFO)lpInBuffer)->idArray); break;
				case HPT_IOCTL_SET_DEVICE_INFO:
					pVDev = ID_TO_VDEV(((PHPT_SET_DEVICE_INFO)lpInBuffer)->idDisk); break;
				case HPT_IOCTL_SET_DEVICE_INFO_V2:
					pVDev = ID_TO_VDEV(((PHPT_SET_DEVICE_INFO_V2)lpInBuffer)->idDisk); break;
				case HPT_IOCTL_SET_BOOT_MARK:
				case HPT_IOCTL_ADD_SPARE_DISK:
				case HPT_IOCTL_REMOVE_SPARE_DISK:
					pVDev = ID_TO_VDEV(*(DEVICEID *)lpInBuffer); break;
				case HPT_IOCTL_ADD_DISK_TO_ARRAY:
					pVDev = ID_TO_VDEV(((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idArray); break;
				default:
					pVDev = 0;
				}
				
				if (pVDev && !check_VDevice_valid(pVDev)){
					_vbus_p = pVDev->pVBus;

					pAdapter = (IAL_ADAPTER_T *)_vbus_p->OsExt;
					/* 
					 * create_array, and other functions can't be executed while channel is 
					 * perform I/O commands. Wait until driver is idle.
					 */
					lock_driver_idle(pAdapter);
					if (hpt_default_ioctl(_VBUS_P dwIoControlCode, lpInBuffer, nInBufferSize, 
						lpOutBuffer, nOutBufferSize, lpBytesReturned) == -1) {
						mtx_unlock(&pAdapter->lock);
						return -1;
					}
					mtx_unlock(&pAdapter->lock);
				}
				else
					return -1;
				break;
			}
			
#ifdef SUPPORT_ARRAY
			switch(dwIoControlCode)
			{
				case HPT_IOCTL_CREATE_ARRAY:
				{
					pAdapter=(IAL_ADAPTER_T *)(ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->pVBus->OsExt;
					mtx_lock(&pAdapter->lock);
                    if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_AND_DUPLICATE)
				    {
						  (ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->u.array.rf_auto_rebuild = 0;
                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), DUPLICATE);
					}
					else if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_R5_ZERO_INIT)
				    {
                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), INITIALIZE);
					}
					else if(((PCREATE_ARRAY_PARAMS)lpInBuffer)->CreateFlags & CAF_CREATE_R5_BUILD_PARITY)
				    {
                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), REBUILD_PARITY);
					}
					mtx_unlock(&pAdapter->lock);
                    break;
				}


				case HPT_IOCTL_CREATE_ARRAY_V2:
				{
					pAdapter=(IAL_ADAPTER_T *)(ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->pVBus->OsExt;
					mtx_lock(&pAdapter->lock);
				             if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_AND_DUPLICATE) {
						  (ID_TO_VDEV(*(DEVICEID *)lpOutBuffer))->u.array.rf_auto_rebuild = 0;
				                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), DUPLICATE);
					} else if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_R5_ZERO_INIT) {
				                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), INITIALIZE);
					} else if(((PCREATE_ARRAY_PARAMS_V2)lpInBuffer)->CreateFlags & CAF_CREATE_R5_BUILD_PARITY) {
				                          hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, ID_TO_VDEV(*(DEVICEID *)lpOutBuffer), REBUILD_PARITY);
					}
					mtx_unlock(&pAdapter->lock);
					break;
				}
				case HPT_IOCTL_ADD_DISK_TO_ARRAY:
				{
					PVDevice pArray = ID_TO_VDEV(((PHPT_ADD_DISK_TO_ARRAY)lpInBuffer)->idArray);
					pAdapter=(IAL_ADAPTER_T *)pArray->pVBus->OsExt;
					if(pArray->u.array.rf_rebuilding == 0)
					{		
						mtx_lock(&pAdapter->lock);
						pArray->u.array.rf_auto_rebuild = 0;
						pArray->u.array.rf_abort_rebuild = 0;
						hpt_queue_dpc((HPT_DPC)hpt_rebuild_data_block, pAdapter, pArray, DUPLICATE);
						while (!pArray->u.array.rf_rebuilding)
						{
							if (mtx_sleep(pArray, &pAdapter->lock, 0, "hptwait", hz * 3) != 0)
								break;
						}
						mtx_unlock(&pAdapter->lock);
					}
					break;
				}
			}
#endif
            return 0;
		}
	}

	if (lpBytesReturned)
		*lpBytesReturned = nOutBufferSize;
	return 0;
}
static int
mfi_disk_attach(device_t dev)
{
	struct mfi_disk *sc;
	struct mfi_ld_info *ld_info;
	uint64_t sectors;
	uint32_t secsize;
	char *state;

	sc = device_get_softc(dev);
	ld_info = device_get_ivars(dev);

	sc->ld_dev = dev;
	sc->ld_id = ld_info->ld_config.properties.ld.v.target_id;
	sc->ld_unit = device_get_unit(dev);
	sc->ld_info = ld_info;
	sc->ld_controller = device_get_softc(device_get_parent(dev));
	sc->ld_flags = 0;

	sectors = ld_info->size;
	secsize = MFI_SECTOR_LEN;
	mtx_lock(&sc->ld_controller->mfi_io_lock);
	TAILQ_INSERT_TAIL(&sc->ld_controller->mfi_ld_tqh, sc, ld_link);
	mtx_unlock(&sc->ld_controller->mfi_io_lock);

	switch (ld_info->ld_config.params.state) {
	case MFI_LD_STATE_OFFLINE:
		state = "offline";
		break;
	case MFI_LD_STATE_PARTIALLY_DEGRADED:
		state = "partially degraded";
		break;
	case MFI_LD_STATE_DEGRADED:
		state = "degraded";
		break;
	case MFI_LD_STATE_OPTIMAL:
		state = "optimal";
		break;
	default:
		state = "unknown";
		break;
	}
	device_printf(dev, "%juMB (%ju sectors) RAID volume '%s' is %s\n",
		      sectors / (1024 * 1024 / secsize), sectors,
		      ld_info->ld_config.properties.name,
		      state);

	sc->ld_disk = disk_alloc();
	sc->ld_disk->d_drv1 = sc;
	sc->ld_disk->d_maxsize = min(sc->ld_controller->mfi_max_io * secsize,
	    (sc->ld_controller->mfi_max_sge - 1) * PAGE_SIZE);
	sc->ld_disk->d_name = "mfid";
	sc->ld_disk->d_open = mfi_disk_open;
	sc->ld_disk->d_close = mfi_disk_close;
	sc->ld_disk->d_strategy = mfi_disk_strategy;
	sc->ld_disk->d_dump = mfi_disk_dump;
	sc->ld_disk->d_unit = sc->ld_unit;
	sc->ld_disk->d_sectorsize = secsize;
	sc->ld_disk->d_mediasize = sectors * secsize;
	if (sc->ld_disk->d_mediasize >= (1 * 1024 * 1024)) {
		sc->ld_disk->d_fwheads = 255;
		sc->ld_disk->d_fwsectors = 63;
	} else {
		sc->ld_disk->d_fwheads = 64;
		sc->ld_disk->d_fwsectors = 32;
	}
	disk_create(sc->ld_disk, DISK_VERSION);

	return (0);
}
Beispiel #29
0
struct svga_sampler_view *
svga_get_tex_sampler_view(struct pipe_context *pipe,
			  struct pipe_resource *pt,
                          unsigned min_lod, unsigned max_lod)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_screen *ss = svga_screen(pipe->screen);
   struct svga_texture *tex = svga_texture(pt);
   struct svga_sampler_view *sv = NULL;
   SVGA3dSurfaceFlags flags = SVGA3D_SURFACE_HINT_TEXTURE;
   SVGA3dSurfaceFormat format = svga_translate_format(ss, pt->format,
                                                      PIPE_BIND_SAMPLER_VIEW);
   boolean view = TRUE;

   assert(pt);
   assert(min_lod <= max_lod);
   assert(max_lod <= pt->last_level);
   assert(!svga_have_vgpu10(svga));

   /* Is a view needed */
   {
      /*
       * Can't control max lod. For first level views and when we only
       * look at one level we disable mip filtering to achive the same
       * results as a view.
       */
      if (min_lod == 0 && max_lod >= pt->last_level)
         view = FALSE;

      if (ss->debug.no_sampler_view)
         view = FALSE;

      if (ss->debug.force_sampler_view)
         view = TRUE;
   }

   /* First try the cache */
   if (view) {
      mtx_lock(&ss->tex_mutex);
      if (tex->cached_view &&
          tex->cached_view->min_lod == min_lod &&
          tex->cached_view->max_lod == max_lod) {
         svga_sampler_view_reference(&sv, tex->cached_view);
         mtx_unlock(&ss->tex_mutex);
         SVGA_DBG(DEBUG_VIEWS, "svga: Sampler view: reuse %p, %u %u, last %u\n",
                              pt, min_lod, max_lod, pt->last_level);
         svga_validate_sampler_view(svga_context(pipe), sv);
         return sv;
      }
      mtx_unlock(&ss->tex_mutex);
   }

   sv = CALLOC_STRUCT(svga_sampler_view);
   if (!sv)
      return NULL;

   pipe_reference_init(&sv->reference, 1);

   /* Note: we're not refcounting the texture resource here to avoid
    * a circular dependency.
    */
   sv->texture = pt;

   sv->min_lod = min_lod;
   sv->max_lod = max_lod;

   /* No view needed just use the whole texture */
   if (!view) {
      SVGA_DBG(DEBUG_VIEWS,
               "svga: Sampler view: no %p, mips %u..%u, nr %u, size (%ux%ux%u), last %u\n",
               pt, min_lod, max_lod,
               max_lod - min_lod + 1,
               pt->width0,
               pt->height0,
               pt->depth0,
               pt->last_level);
      sv->key.cachable = 0;
      sv->handle = tex->handle;
      debug_reference(&sv->reference,
                      (debug_reference_descriptor)svga_debug_describe_sampler_view, 0);
      return sv;
   }

   SVGA_DBG(DEBUG_VIEWS,
            "svga: Sampler view: yes %p, mips %u..%u, nr %u, size (%ux%ux%u), last %u\n",
            pt, min_lod, max_lod,
            max_lod - min_lod + 1,
            pt->width0,
            pt->height0,
            pt->depth0,
            pt->last_level);

   sv->age = tex->age;
   sv->handle = svga_texture_view_surface(svga, tex,
                                          PIPE_BIND_SAMPLER_VIEW,
                                          flags, format,
                                          min_lod,
                                          max_lod - min_lod + 1,
                                          -1, 1, -1, FALSE,
                                          &sv->key);

   if (!sv->handle) {
      sv->key.cachable = 0;
      sv->handle = tex->handle;
      debug_reference(&sv->reference,
                      (debug_reference_descriptor)
                      svga_debug_describe_sampler_view, 0);
      return sv;
   }

   mtx_lock(&ss->tex_mutex);
   svga_sampler_view_reference(&tex->cached_view, sv);
   mtx_unlock(&ss->tex_mutex);

   debug_reference(&sv->reference,
                   (debug_reference_descriptor)
                   svga_debug_describe_sampler_view, 0);

   return sv;
}
Beispiel #30
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int gcalls;
	int wakeup_cookie;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				curr_callout = c;
				mtx_unlock_spin(&callout_lock);
				if (!(c_flags & CALLOUT_MPSAFE)) {
					mtx_lock(&Giant);
					gcalls++;
					CTR1(KTR_CALLOUT, "callout %p", c_func);
				} else {
					mpcalls++;
					CTR1(KTR_CALLOUT, "callout mpsafe %p",
					    c_func);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
				mtx_lock(&dont_sleep_in_callout);
#endif
				c_func(c_arg);
#ifdef DIAGNOSTIC
				mtx_unlock(&dont_sleep_in_callout);
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
					bintime2timespec(&bt2, &ts2);
					printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
					c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if (!(c_flags & CALLOUT_MPSAFE))
					mtx_unlock(&Giant);
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (wakeup_needed) {
					/*
					 * There might be someone waiting
					 * for the callout to complete.
					 */
					wakeup_cookie = wakeup_ctr;
					mtx_unlock_spin(&callout_lock);
					mtx_lock(&callout_wait_lock);
					cv_broadcast(&callout_wait);
					wakeup_done_ctr = wakeup_cookie;
					mtx_unlock(&callout_wait_lock);
					mtx_lock_spin(&callout_lock);
					wakeup_needed = 0;
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}