static dm_fsreg_t * dm_find_fsreg_and_lock( fsid_t *fsidp, int *lcp) /* address of returned lock cookie */ { dm_fsreg_t *fsrp; for (;;) { *lcp = mutex_spinlock(&dm_reg_lock); if ((fsrp = dm_find_fsreg(fsidp)) == NULL) { mutex_spinunlock(&dm_reg_lock, *lcp); return(NULL); } if (nested_spintrylock(&fsrp->fr_lock)) { nested_spinunlock(&dm_reg_lock); return(fsrp); /* success */ } /* If the second lock is not available, drop the first and start over. This gives the CPU a chance to process any interrupts, and also allows processes which want a fr_lock for a different filesystem to proceed. */ mutex_spinunlock(&dm_reg_lock, *lcp); } }
int dm_find_session_and_lock( dm_sessid_t sid, dm_session_t **sessionpp, int *lcp) /* addr of returned lock cookie */ { int error; for (;;) { *lcp = mutex_spinlock(&dm_session_lock); if ((error = dm_find_session(sid, sessionpp)) != 0) { mutex_spinunlock(&dm_session_lock, *lcp); return(error); } if (nested_spintrylock(&(*sessionpp)->sn_qlock)) { nested_spinunlock(&dm_session_lock); return(0); /* success */ } /* If the second lock is not available, drop the first and start over. This gives the CPU a chance to process any interrupts, and also allows processes which want a sn_qlock for a different session to proceed. */ mutex_spinunlock(&dm_session_lock, *lcp); } }
void dm_uninit(void) { int lc; dm_session_t *s; static void unlink_session( dm_session_t *s); if(dm_sessions_active) { printk(KERN_ERR "xfs dmapi is being unloaded while there are active sessions\n"); while( dm_sessions_active ) { /* this for-loop mostly from dm_find_session_and_lock() */ for (;;) { s = dm_sessions; lc = mutex_spinlock(&dm_session_lock); if (nested_spintrylock(&s->sn_qlock)) { nested_spinunlock(&dm_session_lock); break; /* success */ } mutex_spinunlock(&dm_session_lock, lc); }/* for */ /* this cleanup stuff mostly from dm_destroy_session() */ if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) { /* busy session */ printk(KERN_ERR " sessid %d (%s) is busy\n", s->sn_sessid, s->sn_info); nested_spinunlock(&s->sn_qlock); mutex_spinunlock(&dm_session_lock, lc); break; /* do not continue */ } else { unlink_session(s); nested_spinunlock(&s->sn_qlock); mutex_spinunlock(&dm_session_lock, lc); dm_clear_fsreg(s); spinlock_destroy(&s->sn_qlock); sv_destroy(&s->sn_readerq); sv_destroy(&s->sn_writerq); kmem_free(s, sizeof *s); printk(KERN_ERR " sessid %d (%s) destroyed\n", s->sn_sessid, s->sn_info); } }/*while*/ } /* If any of these are still locked, then we should not allow * an unload. * XXX can any of these be held when no sessions exist? * - yes, dm_session_lock is acquired prior to adding a new session * - no, dm_token_lock is only held when a session is locked * - ?, dm_reg_lock (XXX lookup this one) */ if( spin_is_locked(&dm_session_lock) ) printk(KERN_ERR "xfs dmapi is being unloaded while dm_session_lock is held\n"); if( spin_is_locked(&dm_token_lock) ) printk(KERN_ERR "xfs dmapi is being unloaded while dm_token_lock is held\n"); if( spin_is_locked(&dm_reg_lock) ) printk(KERN_ERR "xfs dmapi is being unloaded while dm_reg_lock is held\n"); spinlock_destroy(&dm_session_lock); spinlock_destroy(&dm_token_lock); spinlock_destroy(&dm_reg_lock); }