int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) { struct request_queue *q = pblk->dev->q; struct pblk_w_ctx w_ctx; sector_t lba = pblk_get_lba(bio); unsigned long start_time = jiffies; unsigned int bpos, pos; int nr_entries = pblk_get_secs(bio); int i, ret; generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0); /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); switch (ret) { case NVM_IO_REQUEUE: io_schedule(); goto retry; case NVM_IO_ERR: pblk_pipeline_stop(pblk); goto out; } if (unlikely(!bio_has_data(bio))) goto out; pblk_ppa_set_empty(&w_ctx.ppa); w_ctx.flags = flags; if (bio->bi_opf & REQ_PREFLUSH) w_ctx.flags |= PBLK_FLUSH_ENTRY; for (i = 0; i < nr_entries; i++) { void *data = bio_data(bio); w_ctx.lba = lba + i; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i); pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } atomic64_add(nr_entries, &pblk->user_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); #endif pblk_rl_inserted(&pblk->rl, nr_entries); out: generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time); pblk_write_should_kick(pblk); return ret; }
static void mr_alrt_leave(void) { unsigned long then; if (! atomic_xchg(&alrt_onoff, 0)) return; then = atomic_long_xchg(&alrt_start, 0); atomic_inc(&alrt_count); if (jiffies == then) atomic_long_add(jiffies_to_msecs(1) / 2, &alrt_time); else atomic_long_add(jiffies_to_msecs(jiffies - then), &alrt_time); }
static void update_mcs_stats(enum mcs_op op, unsigned long clks) { atomic_long_inc(&mcs_op_statistics[op].count); atomic_long_add(clks, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < clks) mcs_op_statistics[op].max = clks; }
/* * On GC the incoming lbas are not necessarily sequential. Also, some of the * lbas might not be valid entries, which are marked as empty by the GC thread */ int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq) { struct pblk_w_ctx w_ctx; unsigned int bpos, pos; void *data = gc_rq->data; int i, valid_entries; /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) { io_schedule(); goto retry; } w_ctx.flags = PBLK_IOTYPE_GC; pblk_ppa_set_empty(&w_ctx.ppa); for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) { if (gc_rq->lba_list[i] == ADDR_EMPTY) continue; w_ctx.lba = gc_rq->lba_list[i]; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries); pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line, gc_rq->paddr_list[i], pos); data += PBLK_EXPOSED_PAGE_SIZE; valid_entries++; } WARN_ONCE(gc_rq->secs_to_gc != valid_entries, "pblk: inconsistent GC write\n"); atomic64_add(valid_entries, &pblk->gc_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_add(valid_entries, &pblk->inflight_writes); atomic_long_add(valid_entries, &pblk->recov_gc_writes); #endif pblk_write_should_kick(pblk); return NVM_IO_OK; }
void pblk_submit_rec(struct work_struct *work) { struct pblk_rec_ctx *recovery = container_of(work, struct pblk_rec_ctx, ws_rec); struct pblk *pblk = recovery->pblk; struct nvm_tgt_dev *dev = pblk->dev; struct nvm_rq *rqd = recovery->rqd; struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); int max_secs = nvm_max_phys_sects(dev); struct bio *bio; unsigned int nr_rec_secs; unsigned int pgs_read; int ret; nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status, max_secs); bio = bio_alloc(GFP_KERNEL, nr_rec_secs); if (!bio) { pr_err("pblk: not able to create recovery bio\n"); return; } bio->bi_iter.bi_sector = 0; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); rqd->bio = bio; rqd->nr_ppas = nr_rec_secs; pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed, nr_rec_secs); if (pgs_read != nr_rec_secs) { pr_err("pblk: could not read recovery entries\n"); goto err; } if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) { pr_err("pblk: could not setup recovery request\n"); goto err; } #ifdef CONFIG_NVM_DEBUG atomic_long_add(nr_rec_secs, &pblk->recov_writes); #endif ret = pblk_submit_io(pblk, rqd); if (ret) { pr_err("pblk: I/O submission failed: %d\n", ret); goto err; } mempool_free(recovery, pblk->rec_pool); return; err: bio_put(bio); pblk_free_rqd(pblk, rqd, WRITE); }
static void update_mcs_stats(enum mcs_op op, unsigned long clks) { unsigned long nsec; nsec = CLKS2NSEC(clks); atomic_long_inc(&mcs_op_statistics[op].count); atomic_long_add(nsec, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < nsec) mcs_op_statistics[op].max = nsec; }
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; atomic_long_add(skb->len, &priv->tx_bytes); atomic_long_inc(&priv->tx_packets); l2tp_xmit_skb(session, skb, session->hdr_len); return NETDEV_TX_OK; }
void vm_acct_memory(long pages) { long *local; preempt_disable(); local = &__get_cpu_var(committed_space); *local += pages; if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { atomic_long_add(*local, &vm_committed_space); *local = 0; } preempt_enable(); }
/* Drop the CPU's cached committed space back into the central pool. */ static int cpu_swap_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long *committed; committed = &per_cpu(committed_space, (long)hcpu); if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { atomic_long_add(*committed, &vm_committed_space); *committed = 0; drain_cpu_pagevecs((long)hcpu); } return NOTIFY_OK; }
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; unsigned int len = skb->len; int ret = l2tp_xmit_skb(session, skb, session->hdr_len); if (likely(ret == NET_XMIT_SUCCESS)) { atomic_long_add(len, &priv->tx_bytes); atomic_long_inc(&priv->tx_packets); } else { atomic_long_inc(&priv->tx_dropped); } return NETDEV_TX_OK; }
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); /* * May be called early in order to allocate a slab for the * kmem_cache_node structure. Solve the chicken-egg * dilemma by deferring the increment of the count during * bootstrap (see early_kmem_cache_node_alloc). */ if (n) { atomic_long_inc(&n->nr_slabs); atomic_long_add(objects, &n->total_objects); } }
void calc_load_enter_idle(void) { struct rq *this_rq = this_rq(); long delta; /* * We're going into NOHZ mode, if there's any pending delta, fold it * into the pending idle delta. */ delta = calc_load_fold_active(this_rq); if (delta) { int idx = calc_load_write_idx(); atomic_long_add(delta, &calc_load_idle[idx]); } }
void calc_load_nohz_start(void) { struct rq *this_rq = this_rq(); long delta; /* * We're going into NO_HZ mode, if there's any pending delta, fold it * into the pending NO_HZ delta. */ delta = calc_load_fold_active(this_rq, 0); if (delta) { int idx = calc_load_write_idx(); atomic_long_add(delta, &calc_load_nohz[idx]); } }
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; struct l2tp_eth *priv = netdev_priv(dev); if (session->debug & L2TP_MSG_DATA) { unsigned int length; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; pr_debug("%s: eth recv\n", session->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); } if (!pskb_may_pull(skb, ETH_HLEN)) goto error; secpath_reset(skb); /* checksums verified by L2TP */ skb->ip_summed = CHECKSUM_NONE; skb_dst_drop(skb); nf_reset(skb); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { atomic_long_inc(&priv->rx_packets); atomic_long_add(data_len, &priv->rx_bytes); } else { atomic_long_inc(&priv->rx_errors); } return; error: atomic_long_inc(&priv->rx_errors); kfree_skb(skb); }
value_type operator+=(value_type value) { atomic_long_add(value, &_rep); return atomic_long_read(&_rep); }
/* * write to a file */ static int afs_store_data(struct address_space *mapping, pgoff_t first, pgoff_t last, unsigned offset, unsigned to) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct afs_fs_cursor fc; struct afs_wb_key *wbk = NULL; struct list_head *p; int ret = -ENOKEY, ret2; _enter("%s{%x:%u.%u},%lx,%lx,%x,%x", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, first, last, offset, to); spin_lock(&vnode->wb_lock); p = vnode->wb_keys.next; /* Iterate through the list looking for a valid key to use. */ try_next_key: while (p != &vnode->wb_keys) { wbk = list_entry(p, struct afs_wb_key, vnode_link); _debug("wbk %u", key_serial(wbk->key)); ret2 = key_validate(wbk->key); if (ret2 == 0) goto found_key; if (ret == -ENOKEY) ret = ret2; p = p->next; } spin_unlock(&vnode->wb_lock); afs_put_wb_key(wbk); _leave(" = %d [no keys]", ret); return ret; found_key: refcount_inc(&wbk->usage); spin_unlock(&vnode->wb_lock); _debug("USE WB KEY %u", key_serial(wbk->key)); ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) { while (afs_select_fileserver(&fc)) { fc.cb_break = afs_calc_vnode_cb_break(vnode); afs_fs_store_data(&fc, mapping, first, last, offset, to); } afs_check_for_remote_deletion(&fc, fc.vnode); afs_vnode_commit_status(&fc, vnode, fc.cb_break); ret = afs_end_vnode_operation(&fc); } switch (ret) { case 0: afs_stat_v(vnode, n_stores); atomic_long_add((last * PAGE_SIZE + to) - (first * PAGE_SIZE + offset), &afs_v2net(vnode)->n_store_bytes); break; case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: _debug("next"); spin_lock(&vnode->wb_lock); p = wbk->vnode_link.next; afs_put_wb_key(wbk); goto try_next_key; } afs_put_wb_key(wbk); _leave(" = %d", ret); return ret; }