/* * Try to claim a lock that was previously blocked. * * Note that we use both the RPC_GRANTED_MSG call _and_ an async * RPC thread when notifying the client. This seems like overkill... * Here's why: * - we don't want to use a synchronous RPC thread, otherwise * we might find ourselves hanging on a dead portmapper. * - Some lockd implementations (e.g. HP) don't react to * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. */ static void nlmsvc_grant_blocked(struct nlm_block *block) { struct nlm_file *file = block->b_file; struct nlm_lock *lock = &block->b_call.a_args.lock; struct file_lock *conflock; int error; dprintk("lockd: grant blocked lock %p\n", block); /* First thing is lock the file */ down(&file->f_sema); /* Unlink block request from list */ nlmsvc_remove_block(block); /* If b_granted is true this means we've been here before. * Just retry the grant callback, possibly refreshing the RPC * binding */ if (block->b_granted) { nlm_rebind_host(block->b_host); goto callback; } /* Try the lock operation again */ if ((conflock = posix_test_lock(&file->f_file, &lock->fl)) != NULL) { /* Bummer, we blocked again */ dprintk("lockd: lock still blocked\n"); nlmsvc_insert_block(block, NLM_NEVER); posix_block_lock(conflock, &lock->fl); up(&file->f_sema); return; } /* Alright, no conflicting lock. Now lock it for real. If the * following yields an error, this is most probably due to low * memory. Retry the lock in a few seconds. */ if ((error = posix_lock_file(&file->f_file, &lock->fl, 0)) < 0) { printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", -error, __FUNCTION__); nlmsvc_insert_block(block, jiffies + 10 * HZ); up(&file->f_sema); return; } callback: /* Lock was granted by VFS. */ dprintk("lockd: GRANTing blocked lock.\n"); block->b_granted = 1; block->b_incall = 1; /* Schedule next grant callback in 30 seconds */ nlmsvc_insert_block(block, jiffies + 30 * HZ); /* Call the client */ nlmclnt_async_call(&block->b_call, NLMPROC_GRANTED_MSG, nlmsvc_grant_callback); up(&file->f_sema); }
/* Returns failure iff a succesful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(void *, void *, int) = NULL; struct plock_xop *xop = (struct plock_xop *)op; int rv = 0; spin_lock(&ops_lock); if (!list_empty(&op->list)) { log_print("dlm_plock_callback: op on list %llx", (unsigned long long)op->info.number); list_del(&op->list); } spin_unlock(&ops_lock); /* check if the following 2 are still valid or make a copy */ file = xop->file; flc = &xop->flc; fl = xop->fl; notify = xop->callback; if (op->info.rv) { notify(fl, NULL, op->info.rv); goto out; } /* got fs lock; bookkeep locally as well: */ flc->fl_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. * The filesystem's own lock is the authoritative lock, * so a failure to get the lock locally is not a disaster. * As long as the fs cannot reliably cancel locks (especially * in a low-memory situation), we're better off ignoring * this failure than trying to recover. */ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p", (unsigned long long)op->info.number, file, fl); } rv = notify(fl, NULL, 0); if (rv) { /* XXX: We need to cancel the fs lock here: */ log_print("dlm_plock_callback: lock granted after lock request " "failed; dangling lock!\n"); goto out; } out: kfree(xop); return rv; }
static int dlm_plock_callback(struct plock_op *op) { struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(void *, void *, int) = NULL; struct plock_xop *xop = (struct plock_xop *)op; int rv = 0; spin_lock(&ops_lock); if (!list_empty(&op->list)) { log_print("dlm_plock_callback: op on list %llx", (unsigned long long)op->info.number); list_del(&op->list); } spin_unlock(&ops_lock); /* */ file = xop->file; flc = &xop->flc; fl = xop->fl; notify = xop->callback; if (op->info.rv) { notify(fl, NULL, op->info.rv); goto out; } /* */ flc->fl_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* */ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p", (unsigned long long)op->info.number, file, fl); } rv = notify(fl, NULL, 0); if (rv) { /* */ log_print("dlm_plock_callback: lock granted after lock request " "failed; dangling lock!\n"); goto out; } out: kfree(xop); return rv; }
/** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; /* No mandatory locks */ if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK) return -ENOLCK; dout("ceph_lock, fl_owner: %p", fl->fl_owner); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (IS_GETLK(cmd)) op = CEPH_MDS_OP_GETFILELOCK; else if (IS_SETLKW(cmd)) wait = 1; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if (op != CEPH_MDS_OP_GETFILELOCK) { dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if * the kernel detects local * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; }
/* * Support local locks (locks that only this kernel knows about) * if Orangefs was mounted -o local_lock. */ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) { int rc = -EINVAL; if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) { if (cmd == F_GETLK) { rc = 0; posix_test_lock(filp, fl); } else { rc = posix_lock_file(filp, fl, NULL); } } return rc; }
/** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_lock, fl_pid:%d", fl->fl_pid); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (F_SETLKW == cmd) wait = 1; if (F_GETLK == cmd) op = CEPH_MDS_OP_GETFILELOCK; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if ( op != CEPH_MDS_OP_GETFILELOCK ){ dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if * the kernel detects local * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else if (err == -ERESTARTSYS) { dout("undoing lock\n"); ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); } return err; }
static int do_setlk(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = filp->f_mapping->host; int status; /* * Flush all pending writes before doing anything * with locks.. */ status = filemap_fdatawrite(filp->f_mapping); if (status == 0) { down(&inode->i_sem); status = nfs_wb_all(inode); up(&inode->i_sem); if (status == 0) status = filemap_fdatawait(filp->f_mapping); } if (status < 0) return status; lock_kernel(); status = NFS_PROTO(inode)->lock(filp, cmd, fl); /* If we were signalled we still need to ensure that * we clean up any state on the server. We therefore * record the lock call as having succeeded in order to * ensure that locks_remove_posix() cleans it out when * the process exits. */ if (status == -EINTR || status == -ERESTARTSYS) posix_lock_file(filp, fl); unlock_kernel(); if (status < 0) return status; /* * Make sure we clear the cache whenever we try to get the lock. * This makes locking act as a cache coherency point. */ filemap_fdatawrite(filp->f_mapping); down(&inode->i_sem); nfs_wb_all(inode); /* we may have slept */ up(&inode->i_sem); filemap_fdatawait(filp->f_mapping); nfs_zap_caches(inode); return 0; }
/* * Delete a block. If the lock was cancelled or the grant callback * failed, unlock is set to 1. * It is the caller's responsibility to check whether the file * can be closed hereafter. */ static void nlmsvc_delete_block(struct nlm_block *block, int unlock) { struct file_lock *fl = &block->b_call.a_args.lock.fl; struct nlm_file *file = block->b_file; struct nlm_block **bp; dprintk("lockd: deleting block %p...\n", block); /* Remove block from list */ nlmsvc_remove_block(block); /* If granted, unlock it, else remove from inode block list */ if (unlock && block->b_granted) { dprintk("lockd: deleting granted lock\n"); fl->fl_type = F_UNLCK; posix_lock_file(&block->b_file->f_file, fl, 0); block->b_granted = 0; } else { dprintk("lockd: unblocking blocked lock\n"); posix_unblock_lock(fl); } /* If the block is in the middle of a GRANT callback, * don't kill it yet. */ if (block->b_incall) { nlmsvc_insert_block(block, NLM_NEVER); block->b_done = 1; return; } /* Remove block from file's list of blocks */ for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { if (*bp == block) { *bp = block->b_fnext; break; } } if (block->b_host) nlm_release_host(block->b_host); nlmclnt_freegrantargs(&block->b_call); kfree(block); }
/* * Remove a lock. * This implies a CANCEL call: We send a GRANT_MSG, the client replies * with a GRANT_RES call which gets lost, and calls UNLOCK immediately * afterwards. In this case the block will still be there, and hence * must be removed. */ u32 nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock) { int error; dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %ld-%ld)\n", file->f_file.f_dentry->d_inode->i_dev, file->f_file.f_dentry->d_inode->i_ino, lock->fl.fl_pid, lock->fl.fl_start, lock->fl.fl_end); /* First, cancel any lock that might be there */ nlmsvc_cancel_blocked(file, lock); lock->fl.fl_type = F_UNLCK; error = posix_lock_file(&file->f_file, &lock->fl, 0); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; }
/* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). * It also emulates flock() in a pretty broken way for older C * libraries. */ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) { int error; struct file *filp; struct file_lock file_lock; struct flock flock; struct inode *inode; /* Get arguments and validate them ... */ if ((fd >= NR_OPEN) || !(filp = current->files->fd[fd])) return (-EBADF); error = verify_area(VERIFY_READ, l, sizeof(*l)); if (error) return (error); if (!(inode = filp->f_inode)) return (-EINVAL); /* * This might block, so we do it before checking the inode. */ memcpy_fromfs(&flock, l, sizeof(flock)); /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ if (IS_MANDLOCK(inode) && (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) { struct vm_area_struct *vma = inode->i_mmap; do { if (vma->vm_flags & VM_MAYSHARE) return (-EAGAIN); vma = vma->vm_next_share; } while (vma != inode->i_mmap); } if (!posix_make_lock(filp, &file_lock, &flock)) return (-EINVAL); switch (flock.l_type) { case F_RDLCK: if (!(filp->f_mode & 1)) return (-EBADF); break; case F_WRLCK: if (!(filp->f_mode & 2)) return (-EBADF); break; case F_UNLCK: break; case F_SHLCK: case F_EXLCK: #if 1 /* warn a bit for now, but don't overdo it */ { static int count = 0; if (!count) { count=1; printk(KERN_WARNING "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n", current->pid, current->comm); } } #endif if (!(filp->f_mode & 3)) return (-EBADF); break; default: return (-EINVAL); } return (posix_lock_file(filp, &file_lock, cmd == F_SETLKW)); }
/* * Attempt to establish a lock, and if it can't be granted, block it * if required. */ u32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_lock *lock, int wait, u32 cookie) { struct file_lock *conflock; struct nlm_block *block; int error; dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %ld-%ld, bl=%d)\n", file->f_file.f_dentry->d_inode->i_dev, file->f_file.f_dentry->d_inode->i_ino, lock->fl.fl_type, lock->fl.fl_pid, lock->fl.fl_start, lock->fl.fl_end, wait); /* Lock file against concurrent access */ down(&file->f_sema); /* Get existing block (in case client is busy-waiting) */ block = nlmsvc_lookup_block(file, lock, 0); lock->fl.fl_flags |= FL_LOCKD; again: if (!(conflock = posix_test_lock(&file->f_file, &lock->fl))) { error = posix_lock_file(&file->f_file, &lock->fl, 0); if (block) nlmsvc_delete_block(block, 0); up(&file->f_sema); dprintk("lockd: posix_lock_file returned %d\n", -error); switch(-error) { case 0: return nlm_granted; case EDEADLK: /* no applicable NLM status */ case EAGAIN: return nlm_lck_denied; default: /* includes ENOLCK */ return nlm_lck_denied_nolocks; } } if (!wait) { up(&file->f_sema); return nlm_lck_denied; } /* If we don't have a block, create and initialize it. Then * retry because we may have slept in kmalloc. */ if (block == NULL) { dprintk("lockd: blocking on this lock (allocating).\n"); if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie))) return nlm_lck_denied_nolocks; goto again; } /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); /* Now add block to block list of the conflicting lock */ dprintk("lockd: blocking on this lock.\n"); posix_block_lock(conflock, &block->b_call.a_args.lock.fl); up(&file->f_sema); return nlm_lck_blocked; }
/* * Attempt to establish a lock, and if it can't be granted, block it * if required. */ u32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) { struct file_lock *conflock; struct nlm_block *block, *nblock = NULL; int error; dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", file->f_file.f_dentry->d_inode->i_dev, file->f_file.f_dentry->d_inode->i_ino, lock->fl.fl_type, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, wait); /* Get existing block (in case client is busy-waiting) */ block = nlmsvc_lookup_block(file, lock, 0); lock->fl.fl_flags |= FL_LOCKD; again: /* Lock file against concurrent access */ down(&file->f_sema); if (!(conflock = posix_test_lock(&file->f_file, &lock->fl))) { error = posix_lock_file(&file->f_file, &lock->fl, 0); if (block) nlmsvc_delete_block(block, 0); up(&file->f_sema); dprintk("lockd: posix_lock_file returned %d\n", -error); switch(-error) { case 0: return nlm_granted; case EDEADLK: return nlm_deadlock; case EAGAIN: return nlm_lck_denied; default: /* includes ENOLCK */ return nlm_lck_denied_nolocks; } } if (!wait) { up(&file->f_sema); return nlm_lck_denied; } if (posix_locks_deadlock(&lock->fl, conflock)) { if (nblock) nlmsvc_delete_block(nblock, 0); up(&file->f_sema); return nlm_deadlock; } /* If we don't have a block, create and initialize it. Then * retry because we may have slept in kmalloc. */ /* We have to release f_sema as nlmsvc_create_block may try to * to claim it while doing host garbage collection */ if (block == NULL) { up(&file->f_sema); dprintk("lockd: blocking on this lock (allocating).\n"); if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie))) return nlm_lck_denied_nolocks; nblock = block; goto again; } /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) { /* Now add block to block list of the conflicting lock if we haven't done so. */ dprintk("lockd: blocking on this lock.\n"); posix_block_lock(conflock, &block->b_call.a_args.lock.fl); } up(&file->f_sema); return nlm_lck_blocked; }
/* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) { struct file *filp; struct file_lock file_lock; struct flock flock; struct dentry * dentry; struct inode *inode; int error; /* * This might block, so we do it before checking the inode. */ error = -EFAULT; if (copy_from_user(&flock, l, sizeof(flock))) goto out; /* Get arguments and validate them ... */ error = -EBADF; filp = fget(fd); if (!filp) goto out; error = -EINVAL; if (!(dentry = filp->f_dentry)) goto out_putf; if (!(inode = dentry->d_inode)) goto out_putf; /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ if (IS_MANDLOCK(inode) && (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && inode->i_mmap) { struct vm_area_struct *vma = inode->i_mmap; error = -EAGAIN; do { if (vma->vm_flags & VM_MAYSHARE) goto out_putf; } while ((vma = vma->vm_next_share) != NULL); } error = -EINVAL; if (!posix_make_lock(filp, &file_lock, &flock)) goto out_putf; error = -EBADF; switch (flock.l_type) { case F_RDLCK: if (!(filp->f_mode & FMODE_READ)) goto out_putf; break; case F_WRLCK: if (!(filp->f_mode & FMODE_WRITE)) goto out_putf; break; case F_UNLCK: break; case F_SHLCK: case F_EXLCK: #ifdef __sparc__ /* warn a bit for now, but don't overdo it */ { static int count = 0; if (!count) { count=1; printk(KERN_WARNING "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n", current->pid, current->comm); } } if (!(filp->f_mode & 3)) goto out_putf; break; #endif default: error = -EINVAL; goto out_putf; } if (filp->f_op->lock != NULL) { error = filp->f_op->lock(filp, cmd, &file_lock); if (error < 0) goto out_putf; } error = posix_lock_file(filp, &file_lock, cmd == F_SETLKW); out_putf: fput(filp); out: return error; }