示例#1
0
static dm_fsreg_t *
dm_find_fsreg_and_lock(
	fsid_t		*fsidp,
	int		*lcp)		/* address of returned lock cookie */
{
	dm_fsreg_t	*fsrp;

	for (;;) {
		*lcp = mutex_spinlock(&dm_reg_lock);

		if ((fsrp = dm_find_fsreg(fsidp)) == NULL) {
			mutex_spinunlock(&dm_reg_lock, *lcp);
			return(NULL);
		}
		if (nested_spintrylock(&fsrp->fr_lock)) {
			nested_spinunlock(&dm_reg_lock);
			return(fsrp);   /* success */
		}

		/* If the second lock is not available, drop the first and
		   start over.  This gives the CPU a chance to process any
		   interrupts, and also allows processes which want a fr_lock
		   for a different filesystem to proceed.
		*/

		mutex_spinunlock(&dm_reg_lock, *lcp);
	}
}
示例#2
0
int
dm_find_session_and_lock(
	dm_sessid_t	sid,
	dm_session_t	**sessionpp,
	int		*lcp)		/* addr of returned lock cookie */
{
	int		error;

	for (;;) {
		*lcp = mutex_spinlock(&dm_session_lock);

		if ((error = dm_find_session(sid, sessionpp)) != 0) {
			mutex_spinunlock(&dm_session_lock, *lcp);
			return(error);
		}
		if (nested_spintrylock(&(*sessionpp)->sn_qlock)) {
			nested_spinunlock(&dm_session_lock);
			return(0);	/* success */
		}

		/* If the second lock is not available, drop the first and
		   start over.  This gives the CPU a chance to process any
		   interrupts, and also allows processes which want a sn_qlock
		   for a different session to proceed.
		*/

		mutex_spinunlock(&dm_session_lock, *lcp);
	}
}
示例#3
0
/*
 * hub_piomap_free destroys a caddr_t-to-xtalk pio mapping and frees
 * any associated mapping resources.
 *
 * If this * piomap was handled with a small window, or if it was handled
 * in a big window that's still in use by someone else, then there's
 * nothing to do.  On the other hand, if this mapping was handled
 * with a big window, AND if we were the final user of that mapping,
 * then destroy the mapping.
 */
void
hub_piomap_free(hub_piomap_t hub_piomap)
{
    devfs_handle_t hubv;
    hubinfo_t hubinfo;
    nasid_t nasid;
    int s;

    /*
     * Small windows are permanently mapped to corresponding widgets,
     * so there're no resources to free.
     */
    if (!(hub_piomap->hpio_flags & HUB_PIOMAP_IS_BIGWINDOW))
        return;

    ASSERT(hub_piomap->hpio_flags & HUB_PIOMAP_IS_VALID);
    ASSERT(hub_piomap->hpio_holdcnt > 0);

    hubv = hub_piomap->hpio_hub;
    hubinfo_get(hubv, &hubinfo);
    nasid = hubinfo->h_nasid;

    s = mutex_spinlock(&hubinfo->h_bwlock);

    /*
     * If this is the last hold on this mapping, free it.
     */
    if (--hub_piomap->hpio_holdcnt == 0) {
        IIO_ITTE_DISABLE(nasid, hub_piomap->hpio_bigwin_num );

        if (hub_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED) {
            hub_piomap->hpio_flags &= ~(HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED);
            hubinfo->h_num_big_window_fixed--;
            ASSERT(hubinfo->h_num_big_window_fixed >= 0);
        } else
            hub_piomap->hpio_flags &= ~HUB_PIOMAP_IS_VALID;

        (void)sv_signal(&hubinfo->h_bwwait);
    }

    mutex_spinunlock(&hubinfo->h_bwlock, s);
}
ulong_t
atealloc(
	struct map *mp,
	size_t size)
{
	register unsigned int a;
	register struct map *bp;
	register unsigned long s;

	ASSERT(size >= 0);

	if (size == 0)
		return((ulong_t) NULL);

	s = mutex_spinlock(maplock(mp));

	for (bp = mapstart(mp); bp->m_size; bp++) {
		if (bp->m_size >= size) {
			a = bp->m_addr;
			bp->m_addr += size;
			if ((bp->m_size -= size) == 0) {
				do {
					bp++;
					(bp-1)->m_addr = bp->m_addr;
				} while ((((bp-1)->m_size) = (bp->m_size)));
				mapsize(mp)++;
			}

			ASSERT(bp->m_size < 0x80000000);
			mutex_spinunlock(maplock(mp), s);
			return(a);
		}
	}

	/*
	 * We did not get what we need .. we cannot sleep .. 
	 */
	mutex_spinunlock(maplock(mp), s);
	return(0);
}
示例#5
0
int
hubii_ixtt_get(devfs_handle_t widget_vhdl, ii_ixtt_u_t *ixtt)
{
    xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
    devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
    hubinfo_t		hub_info = 0;
    nasid_t			nasid;
    int			s;

    /* Use the nasid from the hub info hanging off the hub vertex
     * and widget number from the widget vertex
     */
    hubinfo_get(hub_vhdl, &hub_info);
    /* Being over cautious by grabbing a lock */
    s 	= mutex_spinlock(&hub_info->h_bwlock);
    nasid 	= hub_info->h_nasid;

    ixtt->ii_ixtt_regval = REMOTE_HUB_L(nasid, IIO_IXTT);

    mutex_spinunlock(&hub_info->h_bwlock, s);
    return 0;
}
示例#6
0
/* Interface to allow special drivers to set hub specific
 * device flags.
 * Return 0 on failure , 1 on success
 */
int
hub_device_flags_set(devfs_handle_t	widget_vhdl,
                     hub_widget_flags_t	flags)
{
    xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
    xwidgetnum_t		widget_num  = xwidget_info_id_get(widget_info);
    devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
    hubinfo_t		hub_info = 0;
    nasid_t			nasid;
    int			s,rv;

    /* Use the nasid from the hub info hanging off the hub vertex
     * and widget number from the widget vertex
     */
    hubinfo_get(hub_vhdl, &hub_info);
    /* Being over cautious by grabbing a lock */
    s 	= mutex_spinlock(&hub_info->h_bwlock);
    nasid 	= hub_info->h_nasid;
    rv 	= hub_widget_flags_set(nasid,widget_num,flags);
    mutex_spinunlock(&hub_info->h_bwlock, s);

    return rv;
}
示例#7
0
void
dm_clear_fsreg(
	dm_session_t	*s)
{
	dm_fsreg_t	*fsrp;
	int		event;
	int		lc;			/* lock cookie */

	lc = mutex_spinlock(&dm_reg_lock);

	for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) {
		nested_spinlock(&fsrp->fr_lock);
		for (event = 0; event < DM_EVENT_MAX; event++) {
			if (fsrp->fr_sessp[event] != s)
				continue;
			fsrp->fr_sessp[event] = NULL;
			if (event == DM_EVENT_DESTROY)
				bzero(&fsrp->fr_rattr, sizeof(fsrp->fr_rattr));
		}
		nested_spinunlock(&fsrp->fr_lock);
	}

	mutex_spinunlock(&dm_reg_lock, lc);
}
示例#8
0
/*
 * Look up an inode by number in the given file system.
 * The inode is looked up in the hash table for the file system
 * represented by the mount point parameter mp.  Each bucket of
 * the hash table is guarded by an individual semaphore.
 *
 * If the inode is found in the hash table, its corresponding vnode
 * is obtained with a call to vn_get().  This call takes care of
 * coordination with the reclamation of the inode and vnode.  Note
 * that the vmap structure is filled in while holding the hash lock.
 * This gives us the state of the inode/vnode when we found it and
 * is used for coordination in vn_get().
 *
 * If it is not in core, read it in from the file system's device and
 * add the inode into the hash table.
 *
 * The inode is locked according to the value of the lock_flags parameter.
 * This flag parameter indicates how and if the inode's IO lock and inode lock
 * should be taken.
 *
 * mp -- the mount point structure for the current file system.  It points
 *       to the inode hash table.
 * tp -- a pointer to the current transaction if there is one.  This is
 *       simply passed through to the xfs_iread() call.
 * ino -- the number of the inode desired.  This is the unique identifier
 *        within the file system for the inode being requested.
 * lock_flags -- flags indicating how to lock the inode.  See the comment
 *		 for xfs_ilock() for a list of valid values.
 * bno -- the block number starting the buffer containing the inode,
 *	  if known (as by bulkstat), else 0.
 */
int
xfs_iget(
	xfs_mount_t	*mp,
	xfs_trans_t	*tp,
	xfs_ino_t	ino,
	uint		flags,
	uint		lock_flags,
	xfs_inode_t	**ipp,
	xfs_daddr_t	bno)
{
	xfs_ihash_t	*ih;
	xfs_inode_t	*ip;
	xfs_inode_t	*iq;
	xfs_vnode_t	*vp;
	ulong		version;
	int		error;
	/* REFERENCED */
	int		newnode;
	xfs_chash_t	*ch;
	xfs_chashlist_t	*chl, *chlnew;
	vmap_t		vmap;
	SPLDECL(s);

	XFS_STATS_INC(xs_ig_attempts);

	ih = XFS_IHASH(mp, ino);

again:
	read_lock(&ih->ih_lock);

	for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
		if (ip->i_ino == ino) {
			vp = XFS_ITOV(ip);
			VMAP(vp, vmap);
			/*
			 * Inode cache hit: if ip is not at the front of
			 * its hash chain, move it there now.
			 * Do this with the lock held for update, but
			 * do statistics after releasing the lock.
			 */
			if (ip->i_prevp != &ih->ih_next
			    && rwlock_trypromote(&ih->ih_lock)) {

				if ((iq = ip->i_next)) {
					iq->i_prevp = ip->i_prevp;
				}
				*ip->i_prevp = iq;
				iq = ih->ih_next;
				iq->i_prevp = &ip->i_next;
				ip->i_next = iq;
				ip->i_prevp = &ih->ih_next;
				ih->ih_next = ip;
				write_unlock(&ih->ih_lock);
			} else {
				read_unlock(&ih->ih_lock);
			}

			XFS_STATS_INC(xs_ig_found);

			/*
			 * Get a reference to the vnode/inode.
			 * vn_get() takes care of coordination with
			 * the file system inode release and reclaim
			 * functions.  If it returns NULL, the inode
			 * has been reclaimed so just start the search
			 * over again.  We probably won't find it,
			 * but we could be racing with another cpu
			 * looking for the same inode so we have to at
			 * least look.
			 */
			if (!(vp = vn_get(vp, &vmap))) {
				XFS_STATS_INC(xs_ig_frecycle);
				goto again;
			}

			if (lock_flags != 0) {
				ip->i_flags &= ~XFS_IRECLAIM;
				xfs_ilock(ip, lock_flags);
			}

			newnode = (ip->i_d.di_mode == 0);
			if (newnode) {
				xfs_iocore_inode_reinit(ip);
			}
			ip->i_flags &= ~XFS_ISTALE;

			vn_trace_exit(vp, "xfs_iget.found",
						(inst_t *)__return_address);
			goto return_ip;
		}
	}

	/*
	 * Inode cache miss: save the hash chain version stamp and unlock
	 * the chain, so we don't deadlock in vn_alloc.
	 */
	XFS_STATS_INC(xs_ig_missed);

	version = ih->ih_version;

	read_unlock(&ih->ih_lock);

	/*
	 * Read the disk inode attributes into a new inode structure and get
	 * a new vnode for it. This should also initialize i_ino and i_mount.
	 */
	error = xfs_iread(mp, tp, ino, &ip, bno);
	if (error) {
		return error;
	}

	error = xfs_vn_allocate(mp, ip, &vp);
	if (error) {
		return error;
	}
	vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);

	xfs_inode_lock_init(ip, vp);
	xfs_iocore_inode_init(ip);

	if (lock_flags != 0) {
		xfs_ilock(ip, lock_flags);
	}

	/*
	 * Put ip on its hash chain, unless someone else hashed a duplicate
	 * after we released the hash lock.
	 */
	write_lock(&ih->ih_lock);

	if (ih->ih_version != version) {
		for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) {
			if (iq->i_ino == ino) {
				write_unlock(&ih->ih_lock);
				xfs_idestroy(ip);

				XFS_STATS_INC(xs_ig_dup);
				goto again;
			}
		}
	}

	/*
	 * These values _must_ be set before releasing ihlock!
	 */
	ip->i_hash = ih;
	if ((iq = ih->ih_next)) {
		iq->i_prevp = &ip->i_next;
	}
	ip->i_next = iq;
	ip->i_prevp = &ih->ih_next;
	ih->ih_next = ip;
	ip->i_udquot = ip->i_gdquot = NULL;
	ih->ih_version++;

	write_unlock(&ih->ih_lock);

	/*
	 * put ip on its cluster's hash chain
	 */
	ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL &&
	       ip->i_cnext == NULL);

	chlnew = NULL;
	ch = XFS_CHASH(mp, ip->i_blkno);
 chlredo:
	s = mutex_spinlock(&ch->ch_lock);
	for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
		if (chl->chl_blkno == ip->i_blkno) {

			/* insert this inode into the doubly-linked list
			 * where chl points */
			if ((iq = chl->chl_ip)) {
				ip->i_cprev = iq->i_cprev;
				iq->i_cprev->i_cnext = ip;
				iq->i_cprev = ip;
				ip->i_cnext = iq;
			} else {
				ip->i_cnext = ip;
				ip->i_cprev = ip;
			}
			chl->chl_ip = ip;
			ip->i_chash = chl;
			break;
		}
	}

	/* no hash list found for this block; add a new hash list */
	if (chl == NULL)  {
		if (chlnew == NULL) {
			mutex_spinunlock(&ch->ch_lock, s);
			ASSERT(xfs_chashlist_zone != NULL);
			chlnew = (xfs_chashlist_t *)
					kmem_zone_alloc(xfs_chashlist_zone,
						KM_SLEEP);
			ASSERT(chlnew != NULL);
			goto chlredo;
		} else {
			ip->i_cnext = ip;
			ip->i_cprev = ip;
			ip->i_chash = chlnew;
			chlnew->chl_ip = ip;
			chlnew->chl_blkno = ip->i_blkno;
			chlnew->chl_next = ch->ch_list;
			ch->ch_list = chlnew;
			chlnew = NULL;
		}
	} else {
		if (chlnew != NULL) {
			kmem_zone_free(xfs_chashlist_zone, chlnew);
		}
	}

	mutex_spinunlock(&ch->ch_lock, s);

	/*
	 * Link ip to its mount and thread it on the mount's inode list.
	 */
	XFS_MOUNT_ILOCK(mp);
	if ((iq = mp->m_inodes)) {
		ASSERT(iq->i_mprev->i_mnext == iq);
		ip->i_mprev = iq->i_mprev;
		iq->i_mprev->i_mnext = ip;
		iq->i_mprev = ip;
		ip->i_mnext = iq;
	} else {
		ip->i_mnext = ip;
		ip->i_mprev = ip;
	}
	mp->m_inodes = ip;

	XFS_MOUNT_IUNLOCK(mp);

	newnode = 1;

 return_ip:
	ASSERT(ip->i_df.if_ext_max ==
	       XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));

	ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
	       ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));

	*ipp = ip;

	/*
	 * If we have a real type for an on-disk inode, we can set ops(&unlock)
	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
	 */
	XVFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);

	return 0;
}
示例#9
0
vnode_t	*
dm_handle_to_vp(
	xfs_handle_t	*handlep,
	short		*typep)
{
	dm_fsreg_t	*fsrp;
	vnode_t		*vp;
	short		type;
	int		lc;			/* lock cookie */
	int		error;
	fid_t		*fidp;

	if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL)
		return(NULL);

	if (fsrp->fr_state == DM_STATE_MOUNTING) {
		mutex_spinunlock(&fsrp->fr_lock, lc);
		return(NULL);
	}

	for (;;) {
		if (fsrp->fr_state == DM_STATE_MOUNTED)
			break;
		if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
			if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
				sv_broadcast(&fsrp->fr_queue);
			mutex_spinunlock(&fsrp->fr_lock, lc);
			return(NULL);
		}

		/* Must be DM_STATE_UNMOUNTING. */

		fsrp->fr_hdlcnt++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_hdlcnt--;
	}

	fsrp->fr_vfscnt++;
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Now that the mutex is released, wait until we have access to the
	   vnode.
	*/

	fidp = (fid_t*)&handlep->ha_fid;
	if (fidp->fid_len == 0) {	/* filesystem handle */
		VFS_ROOT(fsrp->fr_vfsp, &vp, error);
	} else {				/* file object handle */
		VFS_VGET(fsrp->fr_vfsp, &vp, fidp, error);
	}

	lc = mutex_spinlock(&fsrp->fr_lock);

	fsrp->fr_vfscnt--;
	if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
		sv_broadcast(&fsrp->fr_queue);

	mutex_spinunlock(&fsrp->fr_lock, lc);
	if (error || vp == NULL)
		return(NULL);

	if (fidp->fid_len == 0) {
		type = DM_TDT_VFS;
	} else if (vp->v_type == VREG) {
		type = DM_TDT_REG;
	} else if (vp->v_type == VDIR) {
		type = DM_TDT_DIR;
	} else if (vp->v_type == VLNK) {
		type = DM_TDT_LNK;
	} else {
		type = DM_TDT_OTH;
	}
	*typep = type;
	return(vp);
}
示例#10
0
static int
dm_waitfor_disp(
	vfs_t		*vfsp,
	dm_tokevent_t	*tevp,
	dm_fsreg_t	**fsrpp,
	int		*lc1p,		/* addr of first returned lock cookie */
	dm_session_t	**sessionpp,
	int		*lc2p)		/* addr of 2nd returned lock cookie */
{
	dm_eventtype_t	event = tevp->te_msg.ev_type;
	dm_session_t	*s;
	dm_fsreg_t	*fsrp;

	if ((fsrp = dm_find_fsreg_and_lock(vfsp->vfs_altfsid, lc1p)) == NULL)
		return(ENOENT);

	/* If no session is registered for this event in the specified
	   filesystem, then sleep interruptibly until one does.
	*/

	for (;;) {
		int	rc = 0;

		/* The dm_find_session_and_lock() call is needed because a
		   session that is in the process of being removed might still
		   be in the dm_fsreg_t structure but won't be in the
		   dm_sessions list.
		*/

		if ((s = fsrp->fr_sessp[event]) != NULL &&
	            dm_find_session_and_lock(s->sn_sessid, &s, lc2p) == 0) {
			break;
		}

		/* Noone is currently registered.  DM_EVENT_UNMOUNT events
		   don't wait for anyone to register because the unmount is
		   already past the point of no return.
		*/

		if (event == DM_EVENT_UNMOUNT) {
			mutex_spinunlock(&fsrp->fr_lock, *lc1p);
			return(ENOENT);
		}

		/* Wait until a session registers for disposition of this
		   event.
		*/

		fsrp->fr_dispcnt++;
		dm_link_event(tevp, &fsrp->fr_evt_dispq);

		mp_sv_wait_sig(&fsrp->fr_dispq, 1, &fsrp->fr_lock, *lc1p);
		rc = signal_pending(current);

		*lc1p = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_dispcnt--;
		dm_unlink_event(tevp, &fsrp->fr_evt_dispq);
		if (rc) {		/* if signal was received */
			mutex_spinunlock(&fsrp->fr_lock, *lc1p);
			return(EINTR);
		}
	}
	*sessionpp = s;
	*fsrpp = fsrp;
	return(0);
}
示例#11
0
void
dm_change_fsys_entry(
	vfs_t		*vfsp,
	dm_fsstate_t	newstate)
{
	dm_fsreg_t	*fsrp;
	int		seq_error;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t.  This should
	   always succeed.
	*/

	if ((fsrp = dm_find_fsreg_and_lock(vfsp->vfs_altfsid, &lc)) == NULL) {
		panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	/* Make sure that the new state is acceptable given the current state
	   of the filesystem.  Any error here is a major DMAPI/filesystem
	   screwup.
	*/

	seq_error = 0;
	switch (newstate) {
	case DM_STATE_MOUNTED:
		if (fsrp->fr_state != DM_STATE_MOUNTING &&
		    fsrp->fr_state != DM_STATE_UNMOUNTING) {
			seq_error++;
		}
		break;
	case DM_STATE_UNMOUNTING:
		if (fsrp->fr_state != DM_STATE_MOUNTED)
			seq_error++;
		break;
	case DM_STATE_UNMOUNTED:
		if (fsrp->fr_state != DM_STATE_UNMOUNTING)
			seq_error++;
		break;
	default:
		seq_error++;
		break;
	}
	if (seq_error) {
		panic("dm_change_fsys_entry: DMAPI sequence error: old state "
			"%d, new state %d, fsrp %p\n", fsrp->fr_state,
			newstate, fsrp);
	}

	/* If the old state was DM_STATE_UNMOUNTING, then processes could be
	   sleeping in dm_handle_to_vp() waiting for their DM_NO_TOKEN handles
	   to be translated to vnodes.  Wake them up so that they either
	   continue (new state is DM_STATE_MOUNTED) or fail (new state is
	   DM_STATE_UNMOUNTED).
	*/

	if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
		if (fsrp->fr_hdlcnt)
			sv_broadcast(&fsrp->fr_queue);
	}

	/* Change the filesystem's mount state to its new value. */

	fsrp->fr_state = newstate;
	fsrp->fr_tevp = NULL;		/* not valid after DM_STATE_MOUNTING */

	/* If the new state is DM_STATE_UNMOUNTING, wait until any application
	   threads currently in the process of making VFS_VGET and VFS_ROOT
	   calls are done before we let this unmount thread continue the
	   unmount.  (We want to make sure that the unmount will see these
	   vnode references during its scan.)
	*/

	if (newstate == DM_STATE_UNMOUNTING) {
		while (fsrp->fr_vfscnt) {
			fsrp->fr_unmount++;
			sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
			lc = mutex_spinlock(&fsrp->fr_lock);
			fsrp->fr_unmount--;
		}
	}

	mutex_spinunlock(&fsrp->fr_lock, lc);
}
示例#12
0
void
dm_remove_fsys_entry(
	vfs_t		*vfsp)
{
	dm_fsreg_t	**fsrpp;
	dm_fsreg_t	*fsrp;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t and dequeue
	   it after verifying that the fr_state shows a filesystem that is
	   either mounting or unmounted.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	fsrpp = &dm_registers;
	while ((fsrp = *fsrpp) != NULL) {
		if (!bcmp(&fsrp->fr_fsid, vfsp->vfs_altfsid, sizeof(fsrp->fr_fsid)))
			break;
		fsrpp = &fsrp->fr_next;
	}
	if (fsrp == NULL) {
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	nested_spinlock(&fsrp->fr_lock);

	/* Verify that it makes sense to remove this entry. */

	if (fsrp->fr_state != DM_STATE_MOUNTING &&
	    fsrp->fr_state != DM_STATE_UNMOUNTED) {
		nested_spinunlock(&fsrp->fr_lock);
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
			"%d, fsrp %p\n", fsrp->fr_state, fsrp);
	}

	*fsrpp = fsrp->fr_next;
	dm_fsys_cnt--;

	nested_spinunlock(&dm_reg_lock);

	/* Since the filesystem is about to finish unmounting, we must be sure
	   that no vnodes are being referenced within the filesystem before we
	   let this event thread continue.  If the filesystem is currently in
	   state DM_STATE_MOUNTING, then we know by definition that there can't
	   be any references.  If the filesystem is DM_STATE_UNMOUNTED, then
	   any application threads referencing handles with DM_NO_TOKEN should
	   have already been awakened by dm_change_fsys_entry and should be
	   long gone by now.  Just in case they haven't yet left, sleep here
	   until they are really gone.
	*/

	while (fsrp->fr_hdlcnt) {
		fsrp->fr_unmount++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_unmount--;
	}
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Release all memory. */

#ifdef CONFIG_PROC_FS
	{
	char buf[100];
	sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
	remove_proc_entry(buf, NULL);
	}
#endif
	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
}
示例#13
0
int
dm_add_fsys_entry(
	vfs_t		*vfsp,
	dm_tokevent_t	*tevp)
{
	dm_fsreg_t	*fsrp;
	int		msgsize;
	void		*msg;
	int		lc;			/* lock cookie */

	/* Allocate and initialize a dm_fsreg_t structure for the filesystem. */

	msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event);
	msg = kmem_alloc(msgsize, KM_SLEEP);
	bcopy(&tevp->te_event, msg, msgsize);

	fsrp = kmem_zalloc(sizeof(*fsrp), KM_SLEEP);
	fsrp->fr_vfsp = vfsp;
	fsrp->fr_tevp = tevp;
	fsrp->fr_fsid = *vfsp->vfs_altfsid;
	fsrp->fr_msg = msg;
	fsrp->fr_msgsize = msgsize;
	fsrp->fr_state = DM_STATE_MOUNTING;
	sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq");
	sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue");
	spinlock_init(&fsrp->fr_lock, "fr_lock");

	/* If no other mounted DMAPI filesystem already has this same
	   fsid_t, then add this filesystem to the list.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	if (!dm_find_fsreg(vfsp->vfs_altfsid)) {
		fsrp->fr_next = dm_registers;
		dm_registers = fsrp;
		dm_fsys_cnt++;
#ifdef CONFIG_PROC_FS
		{
		char buf[100];
		struct proc_dir_entry *entry;

		sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
		entry = create_proc_read_entry(buf, 0, 0, fsreg_read_pfs, fsrp);
		entry->owner = THIS_MODULE;
		}
#endif
		mutex_spinunlock(&dm_reg_lock, lc);
		return(0);
	}

	/* A fsid_t collision occurred, so prevent this new filesystem from
	   mounting.
	*/

	mutex_spinunlock(&dm_reg_lock, lc);

	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
	return(EBUSY);
}
示例#14
0
/* ARGSUSED */
hub_piomap_t
hub_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
                 device_desc_t dev_desc,	/* device descriptor */
                 iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
                 size_t byte_count,
                 size_t byte_count_max, 	/* maximum size of a mapping */
                 unsigned flags)		/* defined in sys/pio.h */
{
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);
    hubinfo_t hubinfo;
    hub_piomap_t bw_piomap;
    int bigwin, free_bw_index;
    nasid_t nasid;
    volatile hubreg_t junk;
    int s;

    /* sanity check */
    if (byte_count_max > byte_count)
        return(NULL);

    hubinfo_get(hubv, &hubinfo);

    /* If xtalk_addr range is mapped by a small window, we don't have
     * to do much
     */
    if (xtalk_addr + byte_count <= SWIN_SIZE)
        return(hubinfo_swin_piomap_get(hubinfo, (int)widget));

    /* We need to use a big window mapping.  */

    /*
     * TBD: Allow requests that would consume multiple big windows --
     * split the request up and use multiple mapping entries.
     * For now, reject requests that span big windows.
     */
    if ((xtalk_addr % BWIN_SIZE) + byte_count > BWIN_SIZE)
        return(NULL);


    /* Round xtalk address down for big window alignement */
    xtalk_addr = xtalk_addr & ~(BWIN_SIZE-1);

    /*
     * Check to see if an existing big window mapping will suffice.
     */
tryagain:
    free_bw_index = -1;
    s = mutex_spinlock(&hubinfo->h_bwlock);
    for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
        bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);

        /* If mapping is not valid, skip it */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
            free_bw_index = bigwin;
            continue;
        }

        /*
         * If mapping is UNFIXED, skip it.  We don't allow sharing
         * of UNFIXED mappings, because this would allow starvation.
         */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
            continue;

        if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
                widget == bw_piomap->hpio_xtalk_info.xp_target) {
            bw_piomap->hpio_holdcnt++;
            mutex_spinunlock(&hubinfo->h_bwlock, s);
            return(bw_piomap);
        }
    }

    /*
     * None of the existing big window mappings will work for us --
     * we need to establish a new mapping.
     */

    /* Insure that we don't consume all big windows with FIXED mappings */
    if (flags & PIOMAP_FIXED) {
        if (hubinfo->h_num_big_window_fixed < HUB_NUM_BIG_WINDOW-1) {
            ASSERT(free_bw_index >= 0);
            hubinfo->h_num_big_window_fixed++;
        } else {
            bw_piomap = NULL;
            goto done;
        }
    } else { /* PIOMAP_UNFIXED */
        if (free_bw_index < 0) {
            if (flags & PIOMAP_NOSLEEP) {
                bw_piomap = NULL;
                goto done;
            }

            sv_wait(&hubinfo->h_bwwait, PZERO, &hubinfo->h_bwlock, s);
            goto tryagain;
        }
    }


    /* OK!  Allocate big window free_bw_index for this mapping. */
    /*
     * The code below does a PIO write to setup an ITTE entry.
     * We need to prevent other CPUs from seeing our updated memory
     * shadow of the ITTE (in the piomap) until the ITTE entry is
     * actually set up; otherwise, another CPU might attempt a PIO
     * prematurely.
     *
     * Also, the only way we can know that an entry has been received
     * by the hub and can be used by future PIO reads/writes is by
     * reading back the ITTE entry after writing it.
     *
     * For these two reasons, we PIO read back the ITTE entry after
     * we write it.
     */

    nasid = hubinfo->h_nasid;
    IIO_ITTE_PUT(nasid, free_bw_index, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
    junk = HUB_L(IIO_ITTE_GET(nasid, free_bw_index));

    bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
    bw_piomap->hpio_xtalk_info.xp_dev = dev;
    bw_piomap->hpio_xtalk_info.xp_target = widget;
    bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
    bw_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
    bw_piomap->hpio_holdcnt++;
    bw_piomap->hpio_bigwin_num = free_bw_index;

    if (flags & PIOMAP_FIXED)
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
    else
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;

done:
    mutex_spinunlock(&hubinfo->h_bwlock, s);
    return(bw_piomap);
}
示例#15
0
int
dm_getall_disp(
	dm_sessid_t	sid,
	size_t		buflen,
	void		*bufp,
	size_t		*rlenp)
{
	dm_session_t	*s;		/* pointer to session given by sid */
	int		lc1;		/* first lock cookie */
	int		lc2;		/* second lock cookie */
	int		totalsize;
	int		msgsize;
	int		fsyscnt;
	dm_dispinfo_t	*prevmsg;
	dm_fsreg_t	*fsrp;
	int		error;
	char		*kbuf;

	int tmp3;
	int tmp4;

	/* Because the dm_getall_disp structure contains a __u64 field,
	   make sure that the buffer provided by the caller is aligned so
	   that he can read such fields successfully.
	*/

	if (((__psint_t)bufp & (sizeof(__u64) - 1)) != 0)
		return(EFAULT);

	/* Compute the size of a dm_dispinfo structure, rounding up to an
	   8-byte boundary so that any subsequent structures will also be
	   aligned.
	*/

#if 0
	/* XXX  ug, what is going on here? */
	msgsize = (sizeof(dm_dispinfo_t) + FSHSIZE + sizeof(uint64_t) - 1) &
		~(sizeof(uint64_t) - 1);
#else
	tmp3 = sizeof(dm_dispinfo_t) + FSHSIZE;
	tmp3 += sizeof(__u64);
	tmp3 -= 1;
	tmp4 = ~(sizeof(__u64) - 1);
	msgsize = tmp3 & tmp4;
#endif

	/* Loop until we can get the right amount of temp space, being careful
	   not to hold a mutex during the allocation.  Usually only one trip.
	*/

	for (;;) {
		if ((fsyscnt = dm_fsys_cnt) == 0) {
			/*if (dm_cpoutsizet(rlenp, 0))*/
			if (put_user(0,rlenp))
				return(EFAULT);
			return(0);
		}
		kbuf = kmem_alloc(fsyscnt * msgsize, KM_SLEEP);

		lc1 = mutex_spinlock(&dm_reg_lock);
		if (fsyscnt == dm_fsys_cnt)
			break;

		mutex_spinunlock(&dm_reg_lock, lc1);
		kmem_free(kbuf, fsyscnt * msgsize);
	}

	/* Find the indicated session and lock it. */

	if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
		mutex_spinunlock(&dm_reg_lock, lc1);
		kmem_free(kbuf, fsyscnt * msgsize);
		return(error);
	}

	/* Create a dm_dispinfo structure for each filesystem in which
	   this session has at least one event selected for disposition.
	*/

	totalsize = 0;		/* total bytes to transfer to the user */
	prevmsg = NULL;

	for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
		dm_dispinfo_t	*disp;
		int		event;
		int		found;

		disp = (dm_dispinfo_t *)(kbuf + totalsize);

		DMEV_ZERO(disp->di_eventset);

		for (event = 0, found = 0; event < DM_EVENT_MAX; event++) {
			if (fsrp->fr_sessp[event] != s)
				continue;
			DMEV_SET(event, disp->di_eventset);
			found++;
		}
		if (!found)
			continue;

		disp->_link = 0;
		disp->di_fshandle.vd_offset = sizeof(dm_dispinfo_t);
		disp->di_fshandle.vd_length = FSHSIZE;

		bcopy(&fsrp->fr_fsid,
			(char *)disp + disp->di_fshandle.vd_offset,
			disp->di_fshandle.vd_length);

		if (prevmsg)
			prevmsg->_link = msgsize;

		prevmsg = disp;
		totalsize += msgsize;
	}
	mutex_spinunlock(&s->sn_qlock, lc2);	/* reverse cookie order */
	mutex_spinunlock(&dm_reg_lock, lc1);

	if (put_user(totalsize, rlenp)) {
		error = EFAULT;
	} else if (totalsize > buflen) {	/* no more room */
		error = E2BIG;
	} else if (totalsize && copy_to_user(bufp, kbuf, totalsize)) {
		error = EFAULT;
	} else {
		error = 0;
	}

	kmem_free(kbuf, fsyscnt * msgsize);
	return(error);
}
示例#16
0
void
dm_uninit(void)
{
	int lc;
	dm_session_t *s;

	static void unlink_session( dm_session_t *s);

	if(dm_sessions_active) {
		printk(KERN_ERR "xfs dmapi is being unloaded while there are active sessions\n");

		while( dm_sessions_active ) {
			/* this for-loop mostly from dm_find_session_and_lock() */
			for (;;) {
				s = dm_sessions;
				lc = mutex_spinlock(&dm_session_lock);

				if (nested_spintrylock(&s->sn_qlock)) {
					nested_spinunlock(&dm_session_lock);
					break; /* success */
				}
				mutex_spinunlock(&dm_session_lock, lc);
			}/* for */

			/* this cleanup stuff mostly from dm_destroy_session() */
			if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
				/* busy session */
				printk(KERN_ERR "   sessid %d (%s) is busy\n", s->sn_sessid, s->sn_info);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				break; /* do not continue */
			}
			else {
				unlink_session(s);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				dm_clear_fsreg(s);
				spinlock_destroy(&s->sn_qlock);
				sv_destroy(&s->sn_readerq);
				sv_destroy(&s->sn_writerq);
				kmem_free(s, sizeof *s);
				printk(KERN_ERR "   sessid %d (%s) destroyed\n", s->sn_sessid, s->sn_info);
			}
		}/*while*/
	}

	/* If any of these are still locked, then we should not allow
	 * an unload.
	 * XXX can any of these be held when no sessions exist?
	 *   - yes, dm_session_lock is acquired prior to adding a new session
	 *   - no, dm_token_lock is only held when a session is locked
	 *   - ?, dm_reg_lock (XXX lookup this one)
	 */

	if( spin_is_locked(&dm_session_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_session_lock is held\n");
	if( spin_is_locked(&dm_token_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_token_lock is held\n");
	if( spin_is_locked(&dm_reg_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_reg_lock is held\n");

	spinlock_destroy(&dm_session_lock);
	spinlock_destroy(&dm_token_lock);
	spinlock_destroy(&dm_reg_lock);
}
/*
 * Free the previously allocated space a of size units into the specified map.
 * Sort ``a'' into map and combine on one or both ends if possible.
 * Returns 0 on success, 1 on failure.
 */
void
atefree(struct map *mp, size_t size, ulong_t a)
{
	register struct map *bp;
	register unsigned int t;
	register unsigned long s;

	ASSERT(size >= 0);

	if (size == 0)
		return;

	bp = mapstart(mp);
	s = mutex_spinlock(maplock(mp));

	for ( ; bp->m_addr<=a && bp->m_size!=0; bp++)
		;
	if (bp>mapstart(mp) && (bp-1)->m_addr+(bp-1)->m_size == a) {
		(bp-1)->m_size += size;
		if (bp->m_addr) {	
			/* m_addr==0 end of map table */
			ASSERT(a+size <= bp->m_addr);
			if (a+size == bp->m_addr) { 

				/* compress adjacent map addr entries */
				(bp-1)->m_size += bp->m_size;
				while (bp->m_size) {
					bp++;
					(bp-1)->m_addr = bp->m_addr;
					(bp-1)->m_size = bp->m_size;
				}
				mapsize(mp)++;
			}
		}
	} else {
		if (a+size == bp->m_addr && bp->m_size) {
			bp->m_addr -= size;
			bp->m_size += size;
		} else {
			ASSERT(size);
			if (mapsize(mp) == 0) {
				mutex_spinunlock(maplock(mp), s);
				printk("atefree : map overflow 0x%p Lost 0x%lx items at 0x%lx",
						(void *)mp, size, a) ;
				return ;
			}
			do {
				t = bp->m_addr;
				bp->m_addr = a;
				a = t;
				t = bp->m_size;
				bp->m_size = size;
				bp++;
			} while ((size = t));
			mapsize(mp)--;
		}
	}
	mutex_spinunlock(maplock(mp), s);
	/*
	 * wake up everyone waiting for space
	 */
	if (mapout(mp))
		;
		/* sv_broadcast(mapout(mp)); */
}