/* return EEXIST if the same logger is registered, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; int ret = 0; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { if (rcu_access_pointer(loggers[i][logger->type])) { ret = -EEXIST; goto unlock; } } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) rcu_assign_pointer(loggers[i][logger->type], logger); } else { if (rcu_access_pointer(loggers[pf][logger->type])) { ret = -EEXIST; goto unlock; } rcu_assign_pointer(loggers[pf][logger->type], logger); } unlock: mutex_unlock(&nf_log_mutex); return ret; }
/** * bus1_queue_relink() - change sequence number of an entry * @queue: queue to operate on * @entry: entry to relink * @seq: sequence number to set * * This changes the sequence number of @entry to @seq. The caller must * guarantee that the entry was already linked with an odd-numbered sequence * number. This will unlink the entry, change the sequence number and link it * again. * * The caller must hold the write-side peer-lock of the parent peer. * * Return: True if the queue became readable with this call. */ bool bus1_queue_relink(struct bus1_queue *queue, struct bus1_queue_entry *entry, u64 seq) { struct rb_node *front; if (WARN_ON(seq == 0 || RB_EMPTY_NODE(&entry->rb) || !(entry->seq & 1))) return false; bus1_queue_assert_held(queue); /* remember front, cannot point to @entry */ front = rcu_access_pointer(queue->front); WARN_ON(front == &entry->rb); /* drop from rb-tree and insert again */ rb_erase(&entry->rb, &queue->messages); RB_CLEAR_NODE(&entry->rb); bus1_queue_link(queue, entry, seq); /* if this uncovered a front, then the queue became readable */ return !front && rcu_access_pointer(queue->front); }
static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); struct bnxt_ulp *ulp; int i = 0; ASSERT_RTNL(); if (ulp_id >= BNXT_MAX_ULP) return -EINVAL; ulp = &edev->ulp_tbl[ulp_id]; if (!rcu_access_pointer(ulp->ulp_ops)) { netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id); return -EINVAL; } if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested) edev->en_ops->bnxt_free_msix(edev, ulp_id); if (ulp->max_async_event_id) bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); RCU_INIT_POINTER(ulp->ulp_ops, NULL); synchronize_rcu(); ulp->max_async_event_id = 0; ulp->async_events_bmap = NULL; while (atomic_read(&ulp->ref_count) != 0 && i < 10) { msleep(100); i++; } return 0; }
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, const struct sk_buff *skb) { struct flow_stats *stats; int node = numa_node_id(); stats = rcu_dereference(flow->stats[node]); /* Check if already have node-specific stats. */ if (likely(stats)) { spin_lock(&stats->lock); /* Mark if we write on the pre-allocated stats. */ if (node == 0 && unlikely(flow->stats_last_writer != node)) flow->stats_last_writer = node; } else { stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ spin_lock(&stats->lock); /* If the current NUMA-node is the only writer on the * pre-allocated stats keep using them. */ if (unlikely(flow->stats_last_writer != node)) { /* A previous locker may have already allocated the * stats, so we need to check again. If node-specific * stats were already allocated, we update the pre- * allocated stats as we have already locked them. */ if (likely(flow->stats_last_writer != NUMA_NO_NODE) && likely(!rcu_access_pointer(flow->stats[node]))) { /* Try to allocate node-specific stats. */ struct flow_stats *new_stats; new_stats = kmem_cache_alloc_node(flow_stats_cache, GFP_THISNODE | __GFP_NOMEMALLOC, node); if (likely(new_stats)) { new_stats->used = jiffies; new_stats->packet_count = 1; new_stats->byte_count = skb->len; new_stats->tcp_flags = tcp_flags; spin_lock_init(&new_stats->lock); rcu_assign_pointer(flow->stats[node], new_stats); goto unlock; } } flow->stats_last_writer = node; } } stats->used = jiffies; stats->packet_count++; stats->byte_count += skb->len; stats->tcp_flags |= tcp_flags; unlock: spin_unlock(&stats->lock); }
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb) #endif { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct netvsc_device *nvsc_dev = net_device_ctx->nvdev; struct sock *sk = skb->sk; int q_idx = sk_tx_queue_get(sk); if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE); int new_idx; new_idx = nvsc_dev->send_table[hash] % nvsc_dev->num_chn; if (q_idx != new_idx && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_idx); q_idx = new_idx; } if (unlikely(!nvsc_dev->chan_table[q_idx].channel)) q_idx = 0; return q_idx; }
int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) { struct sock_reuseport *reuse; struct bpf_prog *old_prog; if (sk_unhashed(sk) && sk->sk_reuseport) { int err = reuseport_alloc(sk, false); if (err) return err; } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { /* The socket wasn't bound with SO_REUSEPORT */ return -EINVAL; } spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); old_prog = rcu_dereference_protected(reuse->prog, lockdep_is_held(&reuseport_lock)); rcu_assign_pointer(reuse->prog, prog); spin_unlock_bh(&reuseport_lock); sk_reuseport_prog_free(old_prog); return 0; }
int nf_logger_find_get(int pf, enum nf_log_type type) { struct nf_logger *logger; int ret = -ENOENT; if (pf == NFPROTO_INET) { ret = nf_logger_find_get(NFPROTO_IPV4, type); if (ret < 0) return ret; ret = nf_logger_find_get(NFPROTO_IPV6, type); if (ret < 0) { nf_logger_put(NFPROTO_IPV4, type); return ret; } return 0; } if (rcu_access_pointer(loggers[pf][type]) == NULL) request_module("nf-logger-%u-%u", pf, type); rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (logger == NULL) goto out; if (try_module_get(logger->me)) ret = 0; out: rcu_read_unlock(); return ret; }
static void bss_free(struct cfg80211_internal_bss *bss) { struct cfg80211_bss_ies *ies; if (WARN_ON(atomic_read(&bss->hold))) return; ies = (void *)rcu_access_pointer(bss->pub.beacon_ies); if (ies && !bss->pub.hidden_beacon_bss) kfree_rcu(ies, rcu_head); ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies); if (ies) kfree_rcu(ies, rcu_head); if (!list_empty(&bss->hidden_list)) list_del(&bss->hidden_list); kfree(bss); }
static void trie_free(struct bpf_map *map) { struct lpm_trie *trie = container_of(map, struct lpm_trie, map); struct lpm_trie_node __rcu **slot; struct lpm_trie_node *node; raw_spin_lock(&trie->lock); /* Always start at the root and walk down to a node that has no * children. Then free that node, nullify its reference in the parent * and start over. */ for (;;) { slot = &trie->root; for (;;) { node = rcu_dereference_protected(*slot, lockdep_is_held(&trie->lock)); if (!node) goto unlock; if (rcu_access_pointer(node->child[0])) { slot = &node->child[0]; continue; } if (rcu_access_pointer(node->child[1])) { slot = &node->child[1]; continue; } kfree(node); RCU_INIT_POINTER(*slot, NULL); break; } } unlock: raw_spin_unlock(&trie->lock); }
/* * release an RxRPC socket */ static int rxrpc_release_sock(struct sock *sk) { struct rxrpc_sock *rx = rxrpc_sk(sk); struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); /* declare the socket closed for business */ sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; /* We want to kill off all connections from a service socket * as fast as possible because we can't share these; client * sockets, on the other hand, can share an endpoint. */ switch (sk->sk_state) { case RXRPC_SERVER_BOUND: case RXRPC_SERVER_BOUND2: case RXRPC_SERVER_LISTENING: case RXRPC_SERVER_LISTEN_DISABLED: rx->local->service_closed = true; break; } spin_lock_bh(&sk->sk_receive_queue.lock); sk->sk_state = RXRPC_CLOSE; spin_unlock_bh(&sk->sk_receive_queue.lock); if (rx->local && rcu_access_pointer(rx->local->service) == rx) { write_lock(&rx->local->services_lock); rcu_assign_pointer(rx->local->service, NULL); write_unlock(&rx->local->services_lock); } /* try to flush out this socket */ rxrpc_discard_prealloc(rx); rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_queue_work(&rxnet->service_conn_reaper); rxrpc_queue_work(&rxnet->client_conn_reaper); rxrpc_put_local(rx->local); rx->local = NULL; key_put(rx->key); rx->key = NULL; key_put(rx->securities); rx->securities = NULL; sock_put(sk); _leave(" = 0"); return 0; }
/* by default VRF devices do not have a qdisc and are expected * to be created with only a single queue. */ static bool qdisc_tx_is_default(const struct net_device *dev) { struct netdev_queue *txq; struct Qdisc *qdisc; if (dev->num_tx_queues > 1) return false; txq = netdev_get_tx_queue(dev, 0); qdisc = rcu_access_pointer(txq->qdisc); return !qdisc->enqueue; }
/* * Destroy a cell record */ static void afs_cell_destroy(struct rcu_head *rcu) { struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); _enter("%p{%s}", cell, cell->name); ASSERTCMP(atomic_read(&cell->usage), ==, 0); afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); key_put(cell->anonymous_key); kfree(cell); _leave(" [destroyed]"); }
static void bss_free(struct cfg80211_internal_bss *bss) { struct cfg80211_bss_ies *ies; if (WARN_ON(atomic_read(&bss->hold))) return; ies = (void *)rcu_access_pointer(bss->pub.beacon_ies); if (ies && !bss->pub.hidden_beacon_bss) kfree_rcu(ies, rcu_head); ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies); if (ies) kfree_rcu(ies, rcu_head); /* * This happens when the module is removed, it doesn't * really matter any more save for completeness */ if (!list_empty(&bss->hidden_list)) list_del(&bss->hidden_list); kfree(bss); }
/* * Return the number of omx ifaces. */ int omx_ifaces_get_count(void) { int i, count = 0; /* no need to lock since the array of iface is always coherent * and we don't access the internals of the ifaces */ for (i=0; i<omx_iface_max; i++) if (rcu_access_pointer(omx_ifaces[i]) != NULL) count++; return count; }
/* * Compare new packet with random packet in queue * returns true if matched and sets *pidx */ static bool choke_match_random(const struct choke_sched_data *q, struct sk_buff *nskb, unsigned int *pidx) { struct sk_buff *oskb; if (q->head == q->tail) return false; oskb = choke_peek_random(q, pidx); if (rcu_access_pointer(q->filter_list)) return choke_get_classid(nskb) == choke_get_classid(oskb); return choke_match_flow(oskb, nskb); }
/* * Display the name of the current workstation cell. */ static int afs_proc_rootcell_show(struct seq_file *m, void *v) { struct afs_cell *cell; struct afs_net *net; net = afs_seq2net_single(m); if (rcu_access_pointer(net->ws_cell)) { rcu_read_lock(); cell = rcu_dereference(net->ws_cell); if (cell) seq_printf(m, "%s\n", cell->name); rcu_read_unlock(); } return 0; }
/* Returns null if this device is not attached to a datapath. */ struct vport *netdev_get_vport(struct net_device *dev) { #ifdef IFF_BRIDGE_PORT #if IFF_BRIDGE_PORT != IFF_OVS_DATAPATH if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) #else if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) #endif return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data); else return NULL; #else return (struct vport *)rcu_dereference_rtnl(dev->br_port); #endif }
/* Returns null if this device is not attached to a datapath. */ struct vport *ovs_netdev_get_vport(struct net_device *dev) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) #if IFF_OVS_DATAPATH != 0 if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) #else if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) #endif return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data); else return NULL; #else return (struct vport *)rcu_dereference_rtnl(dev->br_port); #endif }
/* Returns null if this device is not attached to a datapath. */ struct vport *ovs_netdev_get_vport(struct net_device *dev) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) || \ defined HAVE_RHEL_OVS_HOOK #if IFF_OVS_DATAPATH != 0 if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) #else if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) #endif #ifdef HAVE_RHEL_OVS_HOOK return (struct vport *)rcu_dereference_rtnl(dev->ax25_ptr); #else return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data); #endif else return NULL;
/* * set the root cell information * - can be called with a module parameter string * - can be called from a write to /proc/fs/afs/rootcell */ int afs_cell_init(struct afs_net *net, const char *rootcell) { struct afs_cell *old_root, *new_root; const char *cp, *vllist; size_t len; _enter(""); if (!rootcell) { /* module is loaded with no parameters, or built statically. * - in the future we might initialize cell DB here. */ _leave(" = 0 [no root]"); return 0; } cp = strchr(rootcell, ':'); if (!cp) { _debug("kAFS: no VL server IP addresses specified"); vllist = NULL; len = strlen(rootcell); } else { vllist = cp + 1; len = cp - rootcell; } /* allocate a cell record for the root cell */ new_root = afs_lookup_cell(net, rootcell, len, vllist, false); if (IS_ERR(new_root)) { _leave(" = %ld", PTR_ERR(new_root)); return PTR_ERR(new_root); } if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) afs_get_cell(new_root); /* install the new cell */ write_seqlock(&net->cells_lock); old_root = rcu_access_pointer(net->ws_cell); rcu_assign_pointer(net->ws_cell, new_root); write_sequnlock(&net->cells_lock); afs_put_cell(net, old_root); _leave(" = 0"); return 0; }
/** * bus1_queue_link() - link entry into sorted queue * @queue: queue to link into * @entry: entry to link * @seq: sequence number to use * * This links @entry into the message queue @queue. The caller must guarantee * that the entry is unlinked and was never marked as ready. * * The caller must hold the write-side peer-lock of the parent peer. * * Return: True if the queue became readable with this call. */ bool bus1_queue_link(struct bus1_queue *queue, struct bus1_queue_entry *entry, u64 seq) { struct bus1_queue_entry *iter; struct rb_node *prev, **slot; bool is_leftmost = true; if (WARN_ON(seq == 0 || !RB_EMPTY_NODE(&entry->rb) || (entry->seq > 0 && !(entry->seq & 1)))) return false; bus1_queue_assert_held(queue); slot = &queue->messages.rb_node; prev = NULL; while (*slot) { prev = *slot; iter = bus1_queue_entry(prev); if (seq < iter->seq) { slot = &prev->rb_left; } else /* if (seq >= iter->seq) */ { slot = &prev->rb_right; is_leftmost = false; } } entry->seq = seq; rb_link_node(&entry->rb, prev, slot); rb_insert_color(&entry->rb, &queue->messages); if (is_leftmost) { WARN_ON(rcu_access_pointer(queue->front)); if (!(seq & 1)) rcu_assign_pointer(queue->front, &entry->rb); } /* * If we're linked leftmost, we're the new front. If we're ready to be * dequeued, the list has become readable via this entry. Note that the * previous front cannot be ready in this case, as we *never* order * ready entries in front of other ready entries. */ return is_leftmost && !(seq & 1); }
static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, int old_idx) { const struct net_device_context *ndc = netdev_priv(ndev); struct sock *sk = skb->sk; int q_idx; q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) & (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ if (q_idx != old_idx && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, q_idx); return q_idx; }
int nf_logger_find_get(int pf, enum nf_log_type type) { struct nf_logger *logger; int ret = -ENOENT; if (rcu_access_pointer(loggers[pf][type]) == NULL) request_module("nf-logger-%u-%u", pf, type); rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (logger == NULL) goto out; if (logger && try_module_get(logger->me)) ret = 0; out: rcu_read_unlock(); return ret; }
/** * __blocking_notifier_call_chain - Call functions in a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; /* * We check the head outside the lock, but if this access is * racy then it does not matter what the result of the test * is, we re-check the list after having taken the lock anyway: */ if (rcu_access_pointer(nh->head)) { down_read(&nh->rwsem); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); up_read(&nh->rwsem); } return ret; }
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) { struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int new_index = get_xps_queue(dev, skb); if (new_index < 0) new_index = skb_tx_hash(dev, skb); if (queue_index != new_index && sk && rcu_access_pointer(sk->sk_dst_cache)) sk_tx_queue_set(sk, new_index); queue_index = new_index; } return queue_index; }
static void lb_tx_hash_to_port_mapping_null_port(struct team *team, struct team_port *port) { struct lb_priv *lb_priv = get_lb_priv(team); bool changed = false; int i; for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) { struct lb_port_mapping *pm; pm = &lb_priv->ex->tx_hash_to_port_mapping[i]; if (rcu_access_pointer(pm->port) == port) { RCU_INIT_POINTER(pm->port, NULL); team_option_inst_set_change(pm->opt_inst_info); changed = true; } } if (changed) team_options_change_check(team); }
/** * reuseport_add_sock - Add a socket to the reuseport group of another. * @sk: New socket to add to the group. * @sk2: Socket belonging to the existing reuseport group. * @bind_inany: Whether or not the group is bound to a local INANY address. * * May return ENOMEM and not add socket to group under memory pressure. */ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) { struct sock_reuseport *old_reuse, *reuse; if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { int err = reuseport_alloc(sk2, bind_inany); if (err) return err; } spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)); if (old_reuse && old_reuse->num_socks != 1) { spin_unlock_bh(&reuseport_lock); return -EBUSY; } if (reuse->num_socks == reuse->max_socks) { reuse = reuseport_grow(reuse); if (!reuse) { spin_unlock_bh(&reuseport_lock); return -ENOMEM; } } reuse->socks[reuse->num_socks] = sk; /* paired with smp_rmb() in reuseport_select_sock() */ smp_wmb(); reuse->num_socks++; rcu_assign_pointer(sk->sk_reuseport_cb, reuse); spin_unlock_bh(&reuseport_lock); if (old_reuse) call_rcu(&old_reuse->rcu, reuseport_free_rcu); return 0; }
void fl6_free_socklist(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist *sfl; if (!rcu_access_pointer(np->ipv6_fl_list)) return; spin_lock_bh(&ip6_sk_fl_lock); while ((sfl = rcu_dereference_protected(np->ipv6_fl_list, lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { np->ipv6_fl_list = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); spin_lock_bh(&ip6_sk_fl_lock); } spin_unlock_bh(&ip6_sk_fl_lock); }
/* Returns null if this device is not attached to a datapath. */ struct vport *ovs_netdev_get_vport(struct net_device *dev) { #if defined HAVE_NETDEV_RX_HANDLER_REGISTER || \ defined HAVE_RHEL_OVS_HOOK #ifdef HAVE_OVS_DATAPATH if (likely(dev->priv_flags & IFF_OVS_DATAPATH)) #else if (likely(rcu_access_pointer(dev->rx_handler) == netdev_frame_hook)) #endif #ifdef HAVE_RHEL_OVS_HOOK return (struct vport *)rcu_dereference_rtnl(dev->ax25_ptr); #else #ifdef HAVE_NET_DEVICE_EXTENDED return (struct vport *) rcu_dereference_rtnl(netdev_extended(dev)->rx_handler_data); #else return (struct vport *)rcu_dereference_rtnl(dev->rx_handler_data); #endif #endif else return NULL;
bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, struct noise_keypair *received_keypair) { struct noise_keypair *old_keypair; bool key_is_new; /* We first check without taking the spinlock. */ key_is_new = received_keypair == rcu_access_pointer(keypairs->next_keypair); if (likely(!key_is_new)) return false; spin_lock_bh(&keypairs->keypair_update_lock); /* After locking, we double check that things didn't change from * beneath us. */ if (unlikely(received_keypair != rcu_dereference_protected(keypairs->next_keypair, lockdep_is_held(&keypairs->keypair_update_lock)))) { spin_unlock_bh(&keypairs->keypair_update_lock); return false; } /* When we've finally received the confirmation, we slide the next * into the current, the current into the previous, and get rid of * the old previous. */ old_keypair = rcu_dereference_protected(keypairs->previous_keypair, lockdep_is_held(&keypairs->keypair_update_lock)); rcu_assign_pointer(keypairs->previous_keypair, rcu_dereference_protected(keypairs->current_keypair, lockdep_is_held(&keypairs->keypair_update_lock))); wg_noise_keypair_put(old_keypair, true); rcu_assign_pointer(keypairs->current_keypair, received_keypair); RCU_INIT_POINTER(keypairs->next_keypair, NULL); spin_unlock_bh(&keypairs->keypair_update_lock); return true; }