static void lockres_free(struct dlm_lock_resource *res) { int ret; if (!res) return; /* cancel a lock request or a conversion request that is blocked */ res->flags |= DLM_LKF_CANCEL; retry: ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res); if (unlikely(ret != 0)) { pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret); /* if a lock conversion is cancelled, then the lock is put * back to grant queue, need to ensure it is unlocked */ if (ret == -DLM_ECANCEL) goto retry; } res->flags &= ~DLM_LKF_CANCEL; wait_for_completion(&res->completion); kfree(res->name); kfree(res->lksb.sb_lvbptr); kfree(res); }
/* * An variation of dlm_lock_sync, which make lock request could * be interrupted */ static int dlm_lock_sync_interruptible(struct dlm_lock_resource *res, int mode, struct mddev *mddev) { int ret = 0; ret = dlm_lock(res->ls, mode, &res->lksb, res->flags, res->name, strlen(res->name), 0, sync_ast, res, res->bast); if (ret) return ret; wait_event(res->sync_locking, res->sync_locking_done || kthread_should_stop() || test_bit(MD_CLOSING, &mddev->flags)); if (!res->sync_locking_done) { /* * the convert queue contains the lock request when request is * interrupted, and sync_ast could still be run, so need to * cancel the request and reset completion */ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL, &res->lksb, res); res->sync_locking_done = false; if (unlikely(ret != 0)) pr_info("failed to cancel previous lock request " "%s return %d\n", res->name, ret); return -EPERM; } else res->sync_locking_done = false; if (res->lksb.sb_status == 0) res->mode = mode; return res->lksb.sb_status; }
int unlock_resource(int lockid) { int status; struct lock_wait lwait; if (default_ls == NULL) { errno = -ENOTCONN; return -1; } pthread_cond_init(&lwait.cond, NULL); pthread_mutex_init(&lwait.mutex, NULL); pthread_mutex_lock(&lwait.mutex); status = dlm_unlock(lockid, 0, &lwait.lksb, &lwait); if (status) return status; /* Wait for it to complete */ pthread_cond_wait(&lwait.cond, &lwait.mutex); pthread_mutex_unlock(&lwait.mutex); errno = lwait.lksb.sb_status; if (lwait.lksb.sb_status != DLM_EUNLOCK) return -1; else return 0; }
static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { gfs2_glock_free(gl); return; } clear_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); /* don't want to skip dlm_unlock writing the lvb when lock is ex */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) { gfs2_glock_free(gl); return; } error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); return; } }
static void unlock(void) { int status; status = dlm_unlock( our_lksb.sb_lkid, 0, &our_lksb, 0); if (status != 0) perror("pinglock: unlock failed"); }
static void lockres_free(struct dlm_lock_resource *res) { if (!res) return; init_completion(&res->completion); dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res); wait_for_completion(&res->completion); kfree(res->name); kfree(res->lksb.sb_lvbptr); kfree(res); }
static void ast_routine(void *arg) { struct dlm_lksb *lksb = arg; if (lksb->sb_status == 0) { dlm_unlock(lksb->sb_lkid, 0, lksb, lksb); return; } if (lksb->sb_status == EUNLOCK) { count--; } }
static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { gfs2_glock_free(gl); return; } error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); return; } }
static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); if (error) { fs_err(sdp, "%s lkid %x error %d\n", name, lksb->sb_lkid, error); return error; } wait_for_completion(&ls->ls_sync_wait); if (lksb->sb_status != -DLM_EUNLOCK) { fs_err(sdp, "%s lkid %x status %d\n", name, lksb->sb_lkid, lksb->sb_status); return -1; } return 0; }
static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { kmem_cache_free(cachep, gl); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_glock_wait); return; } error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); return; } }
static void lockres_free(struct dlm_lock_resource *res) { int ret = 0; if (!res) return; /* * use FORCEUNLOCK flag, so we can unlock even the lock is on the * waiting or convert queue */ ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK, &res->lksb, res); if (unlikely(ret != 0)) pr_err("failed to unlock %s return %d\n", res->name, ret); else wait_event(res->sync_locking, res->sync_locking_done); kfree(res->name); kfree(res->lksb.sb_lvbptr); kfree(res); }
static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { gfs2_glock_free(gl); return; } clear_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, NULL, gl); if (error) { printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); return; } }
static void gdlm_cancel(struct gfs2_glock *gl) { struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); }
int main(int argc, char *argv[]) { const char *resource = "LOCK-NAME"; int flags = 0; int delay = 0; int status; int mode = LKM_EXMODE; int convmode = -1; int do_unlock = 1; int do_crash = 0; signed char opt; /* Deal with command-line arguments */ opterr = 0; optind = 0; while ((opt=getopt(argc,argv,"?m:nqupc:d:CvV")) != EOF) { switch(opt) { case 'h': usage(argv[0], stdout); exit(0); case '?': usage(argv[0], stderr); exit(0); case 'm': mode = modetonum(optarg); break; case 'c': convmode = modetonum(optarg); break; case 'p': use_threads++; break; case 'n': flags |= LKF_NOQUEUE; break; case 'q': quiet = 1; break; case 'u': do_unlock = 0; break; case 'C': do_crash = 1; break; case 'd': delay = atoi(optarg); break; case 'V': printf("\nasttest version 0.1\n\n"); exit(1); break; } } if (argv[optind]) resource = argv[optind]; if (!quiet) fprintf(stderr, "locking %s %s %s...", resource, numtomode(mode), (flags&LKF_NOQUEUE?"(NOQUEUE)":"")); fflush(stderr); if (use_threads) { pthread_cond_init(&cond, NULL); pthread_mutex_init(&mutex, NULL); pthread_mutex_lock(&mutex); dlm_pthread_init(); } status = dlm_lock(mode, &lksb, flags, resource, strlen(resource), 0, // Parent, ast_routine, &lksb, bast_routine, NULL); // Range if (status == -1) { if (!quiet) fprintf(stderr, "\n"); perror("lock"); return -1; } printf("(lkid=%x)", lksb.sb_lkid); if (do_crash) *(int *)0 = 0xdeadbeef; /* Wait */ if (use_threads) pthread_cond_wait(&cond, &mutex); else poll_for_ast(); if (delay) sleep(delay); if (!quiet) { fprintf(stderr, "unlocking %s...", resource); fflush(stderr); } if (do_unlock) { status = dlm_unlock(lksb.sb_lkid, 0, // flags &lksb, &lksb); // AST args if (status == -1) { if (!quiet) fprintf(stderr, "\n"); perror("unlock"); return -1; } /* Wait */ if (use_threads) pthread_cond_wait(&cond, &mutex); else poll_for_ast(); } return 0; }