Ejemplo n.º 1
0
static int interrupt_test_worker(void *unused) 
{
	int id = ++irqtestcount;
	int it = 0;
			unsigned long flags, flags2;

	printk("ITW: thread %d started.\n", id);

	while(1) {
		__save_flags(flags2);
		if(jiffies % 3) {
			printk("ITW %2d %5d: irqsaving          (%lx)\n", id, it, flags2);
			spin_lock_irqsave(&int_test_spin, flags);
		} else {
			printk("ITW %2d %5d: spin_lock_irqing   (%lx)\n", id, it, flags2);
			spin_lock_irq(&int_test_spin);
		}

		__save_flags(flags2);
		printk("ITW %2d %5d: locked, sv_waiting (%lx).\n", id, it, flags2);
		sv_wait(&int_test_sv, 0, 0);

		__save_flags(flags2);
		printk("ITW %2d %5d: wait finished      (%lx), pausing\n", id, it, flags2);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(jiffies & 0xf);
		if(current->state != TASK_RUNNING)
		  printk("ITW:  current->state isn't RUNNING after schedule!\n");
		it++;
	}
}
Ejemplo n.º 2
0
/*
 * purge a vnode from the cache
 * At this point the vnode is guaranteed to have no references (vn_count == 0)
 * The caller has to make sure that there are no ways someone could
 * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
 */
void
vn_purge(
	struct vnode	*vp,
	vmap_t		*vmap)
{
	vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);

again:
	/*
	 * Check whether vp has already been reclaimed since our caller
	 * sampled its version while holding a filesystem cache lock that
	 * its VOP_RECLAIM function acquires.
	 */
	VN_LOCK(vp);
	if (vp->v_number != vmap->v_number) {
		VN_UNLOCK(vp, 0);
		return;
	}

	/*
	 * If vp is being reclaimed or inactivated, wait until it is inert,
	 * then proceed.  Can't assume that vnode is actually reclaimed
	 * just because the reclaimed flag is asserted -- a vn_alloc
	 * reclaim can fail.
	 */
	if (vp->v_flag & (VINACT | VRECLM)) {
		ASSERT(vn_count(vp) == 0);
		vp->v_flag |= VWAIT;
		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
		goto again;
	}

	/*
	 * Another process could have raced in and gotten this vnode...
	 */
	if (vn_count(vp) > 0) {
		VN_UNLOCK(vp, 0);
		return;
	}

	XFS_STATS_DEC(vn_active);
	vp->v_flag |= VRECLM;
	VN_UNLOCK(vp, 0);

	/*
	 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
	 * vp's filesystem to flush and invalidate all cached resources.
	 * When vn_reclaim returns, vp should have no private data,
	 * either in a system cache or attached to v_data.
	 */
	if (vn_reclaim(vp) != 0)
		panic("vn_purge: cannot reclaim");

	/*
	 * Wakeup anyone waiting for vp to be reclaimed.
	 */
	vn_wakeup(vp);
}
Ejemplo n.º 3
0
int
vn_wait(
	struct vnode	*vp)
{
	VN_LOCK(vp);
	if (vp->v_flag & (VINACT | VRECLM)) {
		vp->v_flag |= VWAIT;
		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
		return 1;
	}
	VN_UNLOCK(vp, 0);
	return 0;
}
Ejemplo n.º 4
0
/*
 * This is called to wait for the given dquot to be unpinned.
 * Most of these pin/unpin routines are plagiarized from inode code.
 */
void
xfs_qm_dqunpin_wait(
	xfs_dquot_t	*dqp)
{
	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	if (dqp->q_pincount == 0) {
		return;
	}

	/*
	 * Give the log a push so we don't wait here too long.
	 */
	xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
	spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
	if (dqp->q_pincount == 0) {
		spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
		return;
	}
	sv_wait(&(dqp->q_pinwait), PINOD,
		&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
}
Ejemplo n.º 5
0
vnode_t	*
dm_handle_to_vp(
	xfs_handle_t	*handlep,
	short		*typep)
{
	dm_fsreg_t	*fsrp;
	vnode_t		*vp;
	short		type;
	int		lc;			/* lock cookie */
	int		error;
	fid_t		*fidp;

	if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL)
		return(NULL);

	if (fsrp->fr_state == DM_STATE_MOUNTING) {
		mutex_spinunlock(&fsrp->fr_lock, lc);
		return(NULL);
	}

	for (;;) {
		if (fsrp->fr_state == DM_STATE_MOUNTED)
			break;
		if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
			if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
				sv_broadcast(&fsrp->fr_queue);
			mutex_spinunlock(&fsrp->fr_lock, lc);
			return(NULL);
		}

		/* Must be DM_STATE_UNMOUNTING. */

		fsrp->fr_hdlcnt++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_hdlcnt--;
	}

	fsrp->fr_vfscnt++;
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Now that the mutex is released, wait until we have access to the
	   vnode.
	*/

	fidp = (fid_t*)&handlep->ha_fid;
	if (fidp->fid_len == 0) {	/* filesystem handle */
		VFS_ROOT(fsrp->fr_vfsp, &vp, error);
	} else {				/* file object handle */
		VFS_VGET(fsrp->fr_vfsp, &vp, fidp, error);
	}

	lc = mutex_spinlock(&fsrp->fr_lock);

	fsrp->fr_vfscnt--;
	if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
		sv_broadcast(&fsrp->fr_queue);

	mutex_spinunlock(&fsrp->fr_lock, lc);
	if (error || vp == NULL)
		return(NULL);

	if (fidp->fid_len == 0) {
		type = DM_TDT_VFS;
	} else if (vp->v_type == VREG) {
		type = DM_TDT_REG;
	} else if (vp->v_type == VDIR) {
		type = DM_TDT_DIR;
	} else if (vp->v_type == VLNK) {
		type = DM_TDT_LNK;
	} else {
		type = DM_TDT_OTH;
	}
	*typep = type;
	return(vp);
}
Ejemplo n.º 6
0
void
dm_remove_fsys_entry(
	vfs_t		*vfsp)
{
	dm_fsreg_t	**fsrpp;
	dm_fsreg_t	*fsrp;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t and dequeue
	   it after verifying that the fr_state shows a filesystem that is
	   either mounting or unmounted.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	fsrpp = &dm_registers;
	while ((fsrp = *fsrpp) != NULL) {
		if (!bcmp(&fsrp->fr_fsid, vfsp->vfs_altfsid, sizeof(fsrp->fr_fsid)))
			break;
		fsrpp = &fsrp->fr_next;
	}
	if (fsrp == NULL) {
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	nested_spinlock(&fsrp->fr_lock);

	/* Verify that it makes sense to remove this entry. */

	if (fsrp->fr_state != DM_STATE_MOUNTING &&
	    fsrp->fr_state != DM_STATE_UNMOUNTED) {
		nested_spinunlock(&fsrp->fr_lock);
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
			"%d, fsrp %p\n", fsrp->fr_state, fsrp);
	}

	*fsrpp = fsrp->fr_next;
	dm_fsys_cnt--;

	nested_spinunlock(&dm_reg_lock);

	/* Since the filesystem is about to finish unmounting, we must be sure
	   that no vnodes are being referenced within the filesystem before we
	   let this event thread continue.  If the filesystem is currently in
	   state DM_STATE_MOUNTING, then we know by definition that there can't
	   be any references.  If the filesystem is DM_STATE_UNMOUNTED, then
	   any application threads referencing handles with DM_NO_TOKEN should
	   have already been awakened by dm_change_fsys_entry and should be
	   long gone by now.  Just in case they haven't yet left, sleep here
	   until they are really gone.
	*/

	while (fsrp->fr_hdlcnt) {
		fsrp->fr_unmount++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_unmount--;
	}
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Release all memory. */

#ifdef CONFIG_PROC_FS
	{
	char buf[100];
	sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
	remove_proc_entry(buf, NULL);
	}
#endif
	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
}
Ejemplo n.º 7
0
void
dm_change_fsys_entry(
	vfs_t		*vfsp,
	dm_fsstate_t	newstate)
{
	dm_fsreg_t	*fsrp;
	int		seq_error;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t.  This should
	   always succeed.
	*/

	if ((fsrp = dm_find_fsreg_and_lock(vfsp->vfs_altfsid, &lc)) == NULL) {
		panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	/* Make sure that the new state is acceptable given the current state
	   of the filesystem.  Any error here is a major DMAPI/filesystem
	   screwup.
	*/

	seq_error = 0;
	switch (newstate) {
	case DM_STATE_MOUNTED:
		if (fsrp->fr_state != DM_STATE_MOUNTING &&
		    fsrp->fr_state != DM_STATE_UNMOUNTING) {
			seq_error++;
		}
		break;
	case DM_STATE_UNMOUNTING:
		if (fsrp->fr_state != DM_STATE_MOUNTED)
			seq_error++;
		break;
	case DM_STATE_UNMOUNTED:
		if (fsrp->fr_state != DM_STATE_UNMOUNTING)
			seq_error++;
		break;
	default:
		seq_error++;
		break;
	}
	if (seq_error) {
		panic("dm_change_fsys_entry: DMAPI sequence error: old state "
			"%d, new state %d, fsrp %p\n", fsrp->fr_state,
			newstate, fsrp);
	}

	/* If the old state was DM_STATE_UNMOUNTING, then processes could be
	   sleeping in dm_handle_to_vp() waiting for their DM_NO_TOKEN handles
	   to be translated to vnodes.  Wake them up so that they either
	   continue (new state is DM_STATE_MOUNTED) or fail (new state is
	   DM_STATE_UNMOUNTED).
	*/

	if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
		if (fsrp->fr_hdlcnt)
			sv_broadcast(&fsrp->fr_queue);
	}

	/* Change the filesystem's mount state to its new value. */

	fsrp->fr_state = newstate;
	fsrp->fr_tevp = NULL;		/* not valid after DM_STATE_MOUNTING */

	/* If the new state is DM_STATE_UNMOUNTING, wait until any application
	   threads currently in the process of making VFS_VGET and VFS_ROOT
	   calls are done before we let this unmount thread continue the
	   unmount.  (We want to make sure that the unmount will see these
	   vnode references during its scan.)
	*/

	if (newstate == DM_STATE_UNMOUNTING) {
		while (fsrp->fr_vfscnt) {
			fsrp->fr_unmount++;
			sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
			lc = mutex_spinlock(&fsrp->fr_lock);
			fsrp->fr_unmount--;
		}
	}

	mutex_spinunlock(&fsrp->fr_lock, lc);
}
Ejemplo n.º 8
0
/* ARGSUSED */
hub_piomap_t
hub_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
                 device_desc_t dev_desc,	/* device descriptor */
                 iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
                 size_t byte_count,
                 size_t byte_count_max, 	/* maximum size of a mapping */
                 unsigned flags)		/* defined in sys/pio.h */
{
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);
    hubinfo_t hubinfo;
    hub_piomap_t bw_piomap;
    int bigwin, free_bw_index;
    nasid_t nasid;
    volatile hubreg_t junk;
    int s;

    /* sanity check */
    if (byte_count_max > byte_count)
        return(NULL);

    hubinfo_get(hubv, &hubinfo);

    /* If xtalk_addr range is mapped by a small window, we don't have
     * to do much
     */
    if (xtalk_addr + byte_count <= SWIN_SIZE)
        return(hubinfo_swin_piomap_get(hubinfo, (int)widget));

    /* We need to use a big window mapping.  */

    /*
     * TBD: Allow requests that would consume multiple big windows --
     * split the request up and use multiple mapping entries.
     * For now, reject requests that span big windows.
     */
    if ((xtalk_addr % BWIN_SIZE) + byte_count > BWIN_SIZE)
        return(NULL);


    /* Round xtalk address down for big window alignement */
    xtalk_addr = xtalk_addr & ~(BWIN_SIZE-1);

    /*
     * Check to see if an existing big window mapping will suffice.
     */
tryagain:
    free_bw_index = -1;
    s = mutex_spinlock(&hubinfo->h_bwlock);
    for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
        bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);

        /* If mapping is not valid, skip it */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
            free_bw_index = bigwin;
            continue;
        }

        /*
         * If mapping is UNFIXED, skip it.  We don't allow sharing
         * of UNFIXED mappings, because this would allow starvation.
         */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
            continue;

        if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
                widget == bw_piomap->hpio_xtalk_info.xp_target) {
            bw_piomap->hpio_holdcnt++;
            mutex_spinunlock(&hubinfo->h_bwlock, s);
            return(bw_piomap);
        }
    }

    /*
     * None of the existing big window mappings will work for us --
     * we need to establish a new mapping.
     */

    /* Insure that we don't consume all big windows with FIXED mappings */
    if (flags & PIOMAP_FIXED) {
        if (hubinfo->h_num_big_window_fixed < HUB_NUM_BIG_WINDOW-1) {
            ASSERT(free_bw_index >= 0);
            hubinfo->h_num_big_window_fixed++;
        } else {
            bw_piomap = NULL;
            goto done;
        }
    } else { /* PIOMAP_UNFIXED */
        if (free_bw_index < 0) {
            if (flags & PIOMAP_NOSLEEP) {
                bw_piomap = NULL;
                goto done;
            }

            sv_wait(&hubinfo->h_bwwait, PZERO, &hubinfo->h_bwlock, s);
            goto tryagain;
        }
    }


    /* OK!  Allocate big window free_bw_index for this mapping. */
    /*
     * The code below does a PIO write to setup an ITTE entry.
     * We need to prevent other CPUs from seeing our updated memory
     * shadow of the ITTE (in the piomap) until the ITTE entry is
     * actually set up; otherwise, another CPU might attempt a PIO
     * prematurely.
     *
     * Also, the only way we can know that an entry has been received
     * by the hub and can be used by future PIO reads/writes is by
     * reading back the ITTE entry after writing it.
     *
     * For these two reasons, we PIO read back the ITTE entry after
     * we write it.
     */

    nasid = hubinfo->h_nasid;
    IIO_ITTE_PUT(nasid, free_bw_index, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
    junk = HUB_L(IIO_ITTE_GET(nasid, free_bw_index));

    bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
    bw_piomap->hpio_xtalk_info.xp_dev = dev;
    bw_piomap->hpio_xtalk_info.xp_target = widget;
    bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
    bw_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
    bw_piomap->hpio_holdcnt++;
    bw_piomap->hpio_bigwin_num = free_bw_index;

    if (flags & PIOMAP_FIXED)
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
    else
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;

done:
    mutex_spinunlock(&hubinfo->h_bwlock, s);
    return(bw_piomap);
}