示例#1
0
static dm_fsreg_t *
dm_find_fsreg_and_lock(
	fsid_t		*fsidp,
	int		*lcp)		/* address of returned lock cookie */
{
	dm_fsreg_t	*fsrp;

	for (;;) {
		*lcp = mutex_spinlock(&dm_reg_lock);

		if ((fsrp = dm_find_fsreg(fsidp)) == NULL) {
			mutex_spinunlock(&dm_reg_lock, *lcp);
			return(NULL);
		}
		if (nested_spintrylock(&fsrp->fr_lock)) {
			nested_spinunlock(&dm_reg_lock);
			return(fsrp);   /* success */
		}

		/* If the second lock is not available, drop the first and
		   start over.  This gives the CPU a chance to process any
		   interrupts, and also allows processes which want a fr_lock
		   for a different filesystem to proceed.
		*/

		mutex_spinunlock(&dm_reg_lock, *lcp);
	}
}
示例#2
0
int
dm_find_session_and_lock(
	dm_sessid_t	sid,
	dm_session_t	**sessionpp,
	int		*lcp)		/* addr of returned lock cookie */
{
	int		error;

	for (;;) {
		*lcp = mutex_spinlock(&dm_session_lock);

		if ((error = dm_find_session(sid, sessionpp)) != 0) {
			mutex_spinunlock(&dm_session_lock, *lcp);
			return(error);
		}
		if (nested_spintrylock(&(*sessionpp)->sn_qlock)) {
			nested_spinunlock(&dm_session_lock);
			return(0);	/* success */
		}

		/* If the second lock is not available, drop the first and
		   start over.  This gives the CPU a chance to process any
		   interrupts, and also allows processes which want a sn_qlock
		   for a different session to proceed.
		*/

		mutex_spinunlock(&dm_session_lock, *lcp);
	}
}
static void
notify_irq(u8 vector)
{
	u32 n = cpuno();
	vm_t *v = &cpu_vm[n];

	nested_spinlock(&v->v_notes_lock);

	list_t *elem;
	while ((elem = list_remove_head(&v->v_notes_list)) != NULL) {
		nested_spinunlock(&v->v_notes_lock);

		note_t *note = super(elem, note_t, nb_list[n]);
		if (note->nb_func) {
			note->nb_func(v, note->nb_arg0, note->nb_arg1);
		}
		fence();
		atomic_inc(&note->nb_completion_count);

		nested_spinlock(&v->v_notes_lock);
	}
	nested_spinunlock(&v->v_notes_lock);
}
示例#4
0
dm_tokevent_t *
dm_find_mount_tevp_and_lock(
	fsid_t		*fsidp,
	int		*lcp)		/* address of returned lock cookie */
{
	dm_fsreg_t	*fsrp;

	if ((fsrp = dm_find_fsreg_and_lock(fsidp, lcp)) == NULL)
		return(NULL);

	if (!fsrp->fr_tevp || fsrp->fr_state != DM_STATE_MOUNTING) {
		mutex_spinunlock(&fsrp->fr_lock, *lcp);
		return(NULL);
	}
	nested_spinlock(&fsrp->fr_tevp->te_lock);
	nested_spinunlock(&fsrp->fr_lock);
	return(fsrp->fr_tevp);
}
示例#5
0
int
dm_find_msg_and_lock(
	dm_sessid_t	sid,
	dm_token_t	token,
	dm_tokevent_t	**tevpp,
	int		*lcp)		/* address of returned lock cookie */
{
	dm_session_t	*s;
	int		error;

	if ((error = dm_find_session_and_lock(sid, &s, lcp)) != 0)
		return(error);

	if ((error = dm_find_msg(s, token, tevpp)) != 0) {
		mutex_spinunlock(&s->sn_qlock, *lcp);
		return(error);
	}
	nested_spinlock(&(*tevpp)->te_lock);
	nested_spinunlock(&s->sn_qlock);
	return(0);
}
示例#6
0
void
dm_clear_fsreg(
	dm_session_t	*s)
{
	dm_fsreg_t	*fsrp;
	int		event;
	int		lc;			/* lock cookie */

	lc = mutex_spinlock(&dm_reg_lock);

	for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) {
		nested_spinlock(&fsrp->fr_lock);
		for (event = 0; event < DM_EVENT_MAX; event++) {
			if (fsrp->fr_sessp[event] != s)
				continue;
			fsrp->fr_sessp[event] = NULL;
			if (event == DM_EVENT_DESTROY)
				bzero(&fsrp->fr_rattr, sizeof(fsrp->fr_rattr));
		}
		nested_spinunlock(&fsrp->fr_lock);
	}

	mutex_spinunlock(&dm_reg_lock, lc);
}
示例#7
0
void
dm_remove_fsys_entry(
	vfs_t		*vfsp)
{
	dm_fsreg_t	**fsrpp;
	dm_fsreg_t	*fsrp;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t and dequeue
	   it after verifying that the fr_state shows a filesystem that is
	   either mounting or unmounted.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	fsrpp = &dm_registers;
	while ((fsrp = *fsrpp) != NULL) {
		if (!bcmp(&fsrp->fr_fsid, vfsp->vfs_altfsid, sizeof(fsrp->fr_fsid)))
			break;
		fsrpp = &fsrp->fr_next;
	}
	if (fsrp == NULL) {
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	nested_spinlock(&fsrp->fr_lock);

	/* Verify that it makes sense to remove this entry. */

	if (fsrp->fr_state != DM_STATE_MOUNTING &&
	    fsrp->fr_state != DM_STATE_UNMOUNTED) {
		nested_spinunlock(&fsrp->fr_lock);
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
			"%d, fsrp %p\n", fsrp->fr_state, fsrp);
	}

	*fsrpp = fsrp->fr_next;
	dm_fsys_cnt--;

	nested_spinunlock(&dm_reg_lock);

	/* Since the filesystem is about to finish unmounting, we must be sure
	   that no vnodes are being referenced within the filesystem before we
	   let this event thread continue.  If the filesystem is currently in
	   state DM_STATE_MOUNTING, then we know by definition that there can't
	   be any references.  If the filesystem is DM_STATE_UNMOUNTED, then
	   any application threads referencing handles with DM_NO_TOKEN should
	   have already been awakened by dm_change_fsys_entry and should be
	   long gone by now.  Just in case they haven't yet left, sleep here
	   until they are really gone.
	*/

	while (fsrp->fr_hdlcnt) {
		fsrp->fr_unmount++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_unmount--;
	}
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Release all memory. */

#ifdef CONFIG_PROC_FS
	{
	char buf[100];
	sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
	remove_proc_entry(buf, NULL);
	}
#endif
	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
}
void
notify_all(nb_func_t func, nb_arg_t arg0, nb_arg_t arg1)
{
	sn this = cpuno();

	assert(irq_is_disabled());

	if (unlikely(notify_intvec == 0)) {
		func(&cpu_vm[this], arg0, arg1);
		return;
	}

	note_t note;
	un count = 0;

	note.nb_func = func;
	note.nb_arg0 = arg0;
	note.nb_arg1 = arg1;
	note.nb_completion_count = 0;
	/* No need to list_init(&note.nb_list[i]) because adding an element
	 * to a list will overwrite the element's prev and next ptrs.
	 */

	un notify_mask = 0;
	for_each_active_cpu(i) {
		if (i == this) {
			continue;
		}
		vm_t *v = &cpu_vm[i];
		nested_spinlock(&v->v_notes_lock);
		list_add_tail(&v->v_notes_list, &note.nb_list[i]);
		nested_spinunlock(&v->v_notes_lock);
		bit_set(notify_mask, i);

		apic_send_IPI(i, notify_intvec);
		count++;
	}

	func(&cpu_vm[this], arg0, arg1);

	fence();
	u64 timeout = rdtsc() + TSC_TIMEOUT;
	while (volatile_read(&note.nb_completion_count) < count) {
		/* Poll for incoming notifications */
		notify_irq(0);
		if (rdtsc() > timeout) {
			kprintf("notify_all>TIMEOUT %u %ld\n",
				volatile_read(&note.nb_completion_count),
				count /* ,
				note.nb_ack_map */);
			for_each_cpu(i, notify_mask) {
				vm_t *v = &cpu_vm[i];
				nested_spinlock(&v->v_notes_lock);
				/* If the element was already removed, this
				 * is a no-op.
				 */
				list_remove(&note.nb_list[i]);
				nested_spinunlock(&v->v_notes_lock);
			}
			return;
		}
示例#9
0
void
dm_uninit(void)
{
	int lc;
	dm_session_t *s;

	static void unlink_session( dm_session_t *s);

	if(dm_sessions_active) {
		printk(KERN_ERR "xfs dmapi is being unloaded while there are active sessions\n");

		while( dm_sessions_active ) {
			/* this for-loop mostly from dm_find_session_and_lock() */
			for (;;) {
				s = dm_sessions;
				lc = mutex_spinlock(&dm_session_lock);

				if (nested_spintrylock(&s->sn_qlock)) {
					nested_spinunlock(&dm_session_lock);
					break; /* success */
				}
				mutex_spinunlock(&dm_session_lock, lc);
			}/* for */

			/* this cleanup stuff mostly from dm_destroy_session() */
			if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
				/* busy session */
				printk(KERN_ERR "   sessid %d (%s) is busy\n", s->sn_sessid, s->sn_info);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				break; /* do not continue */
			}
			else {
				unlink_session(s);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				dm_clear_fsreg(s);
				spinlock_destroy(&s->sn_qlock);
				sv_destroy(&s->sn_readerq);
				sv_destroy(&s->sn_writerq);
				kmem_free(s, sizeof *s);
				printk(KERN_ERR "   sessid %d (%s) destroyed\n", s->sn_sessid, s->sn_info);
			}
		}/*while*/
	}

	/* If any of these are still locked, then we should not allow
	 * an unload.
	 * XXX can any of these be held when no sessions exist?
	 *   - yes, dm_session_lock is acquired prior to adding a new session
	 *   - no, dm_token_lock is only held when a session is locked
	 *   - ?, dm_reg_lock (XXX lookup this one)
	 */

	if( spin_is_locked(&dm_session_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_session_lock is held\n");
	if( spin_is_locked(&dm_token_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_token_lock is held\n");
	if( spin_is_locked(&dm_reg_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_reg_lock is held\n");

	spinlock_destroy(&dm_session_lock);
	spinlock_destroy(&dm_token_lock);
	spinlock_destroy(&dm_reg_lock);
}