bool talpa_vfsmount_unlock(unsigned* m_seq) { #if defined TALPA_USE_VFSMOUNT_LOCK # if defined TALPA_VFSMOUNT_LG_BRLOCK br_read_unlock(&vfsmount_lock); # elif defined TALPA_VFSMOUNT_LOCK_BRLOCK br_read_unlock(vfsmount_lock); # else spinlock_t* talpa_vfsmount_lock_addr = (spinlock_t *)talpa_get_symbol("vfmount_lock", (void *)TALPA_VFSMOUNT_LOCK_ADDR); spin_unlock(talpa_vfsmount_lock_addr); # endif #elif defined TALPA_USE_MOUNT_LOCK seqlock_t* mount_lock_addr = (seqlock_t *)talpa_get_symbol("mount_lock", (void *)TALPA_MOUNT_LOCK_ADDR); if (need_seqretry(mount_lock_addr, *m_seq)) { *m_seq = 1; return true; } done_seqretry(mount_lock_addr, *m_seq); #else // On 2.4 we don't have vfsmount_lock - we use dcache_lock instead spin_unlock(&dcache_lock); #endif return false; }
/* * read the attributes of an inode */ int afs_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct afs_vnode *vnode = AFS_FS_I(inode); int seq = 0; _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); generic_fillattr(inode, stat); } while (need_seqretry(&vnode->cb_lock, seq)); done_seqretry(&vnode->cb_lock, seq); return 0; }
/* * Find a service connection under RCU conditions. * * We could use a hash table, but that is subject to bucket stuffing by an * attacker as the client gets to pick the epoch and cid values and would know * the hash function. So, instead, we use a hash table for the peer and from * that an rbtree to find the service connection. Under ordinary circumstances * it might be slower than a large hash table, but it is at least limited in * depth. */ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, struct sk_buff *skb) { struct rxrpc_connection *conn = NULL; struct rxrpc_conn_proto k; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p; unsigned int seq = 0; k.epoch = sp->hdr.epoch; k.cid = sp->hdr.cid & RXRPC_CIDMASK; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ read_seqbegin_or_lock(&peer->service_conn_lock, &seq); p = rcu_dereference_raw(peer->service_conns.rb_node); while (p) { conn = rb_entry(p, struct rxrpc_connection, service_node); if (conn->proto.index_key < k.index_key) p = rcu_dereference_raw(p->rb_left); else if (conn->proto.index_key > k.index_key) p = rcu_dereference_raw(p->rb_right); else goto done; conn = NULL; } } while (need_seqretry(&peer->service_conn_lock, seq)); done: done_seqretry(&peer->service_conn_lock, seq); _leave(" = %d", conn ? conn->debug_id : -1); return conn; }
/* * Look up and get an activation reference on a cell record under RCU * conditions. The caller must hold the RCU read lock. */ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, const char *name, unsigned int namesz) { struct afs_cell *cell = NULL; struct rb_node *p; int n, seq = 0, ret = 0; _enter("%*.*s", namesz, namesz, name); if (name && namesz == 0) return ERR_PTR(-EINVAL); if (namesz > AFS_MAXCELLNAME) return ERR_PTR(-ENAMETOOLONG); do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ if (cell) afs_put_cell(net, cell); cell = NULL; ret = -ENOENT; read_seqbegin_or_lock(&net->cells_lock, &seq); if (!name) { cell = rcu_dereference_raw(net->ws_cell); if (cell) { afs_get_cell(cell); break; } ret = -EDESTADDRREQ; continue; } p = rcu_dereference_raw(net->cells.rb_node); while (p) { cell = rb_entry(p, struct afs_cell, net_node); n = strncasecmp(cell->name, name, min_t(size_t, cell->name_len, namesz)); if (n == 0) n = cell->name_len - namesz; if (n < 0) { p = rcu_dereference_raw(p->rb_left); } else if (n > 0) { p = rcu_dereference_raw(p->rb_right); } else { if (atomic_inc_not_zero(&cell->usage)) { ret = 0; break; } /* We want to repeat the search, this time with * the lock properly locked. */ } cell = NULL; } } while (need_seqretry(&net->cells_lock, seq)); done_seqretry(&net->cells_lock, seq); return ret == 0 ? cell : ERR_PTR(ret); }