Ejemplo n.º 1
0
static long calc_load_nohz_fold(void)
{
	int idx = calc_load_read_idx();
	long delta = 0;

	if (atomic_long_read(&calc_load_nohz[idx]))
		delta = atomic_long_xchg(&calc_load_nohz[idx], 0);

	return delta;
}
Ejemplo n.º 2
0
static u64 tcp_read_usage(struct mem_cgroup *memcg)
{
	struct cg_proto *cg_proto;

	cg_proto = tcp_prot.proto_cgroup(memcg);
	if (!cg_proto)
		return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;

	return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
}
Ejemplo n.º 3
0
/**
 *	ovs_vport_get_stats - retrieve device stats
 *
 * @vport: vport from which to retrieve the stats
 * @stats: location to store stats
 *
 * Retrieves transmit, receive, and error stats for the given device.
 *
 * Must be called with ovs_mutex or rcu_read_lock.
 */
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
	int i;

	/* We potentially have two surces of stats that need to be
	 * combined: those we have collected (split into err_stats and
	 * percpu_stats), and device error stats from netdev->get_stats()
	 * (for errors that happen downstream and therefore aren't
	 * reported through our vport_record_error() function).
	 * Stats from first source are reported by ovs over
	 * OVS_VPORT_ATTR_STATS.
	 * netdev-stats can be directly read over netlink-ioctl.
	 */

	stats->rx_errors  = atomic_long_read(&vport->err_stats.rx_errors);
	stats->tx_errors  = atomic_long_read(&vport->err_stats.tx_errors);
	stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
	stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);

	stats->rx_bytes		= 0;
	stats->rx_packets	= 0;
	stats->tx_bytes		= 0;
	stats->tx_packets	= 0;

	for_each_possible_cpu(i) {
		const struct pcpu_sw_netstats *percpu_stats;
		struct pcpu_sw_netstats local_stats;
		unsigned int start;

		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);

		do {
			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
			local_stats = *percpu_stats;
		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));

		stats->rx_bytes		+= local_stats.rx_bytes;
		stats->rx_packets	+= local_stats.rx_packets;
		stats->tx_bytes		+= local_stats.tx_bytes;
		stats->tx_packets	+= local_stats.tx_packets;
	}
}
Ejemplo n.º 4
0
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
	struct io_context *src = *psrc;
	struct io_context *dst = *pdst;

	if (src) {
		BUG_ON(atomic_long_read(&src->refcount) == 0);
		atomic_long_inc(&src->refcount);
		put_io_context(dst);
		*pdst = src;
	}
}
Ejemplo n.º 5
0
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
		struct lib_ring_buffer *buf)
{
	unsigned int mask = 0;
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;
	int finalized, disabled;

	if (filp->f_mode & FMODE_READ) {
		poll_wait_set_exclusive(wait);
		poll_wait(filp, &buf->read_wait, wait);

		finalized = lib_ring_buffer_is_finalized(config, buf);
		disabled = lib_ring_buffer_channel_is_disabled(chan);

		/*
		 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
		 * finalized load before offsets loads.
		 */
		WARN_ON(atomic_long_read(&buf->active_readers) != 1);
retry:
		if (disabled)
			return POLLERR;

		if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
		  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
		  == 0) {
			if (finalized)
				return POLLHUP;
			else {
				/*
				 * The memory barriers
				 * __wait_event()/wake_up_interruptible() take
				 * care of "raw_spin_is_locked" memory ordering.
				 */
				if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
					goto retry;
				else
					return 0;
			}
		} else {
			if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
					 chan)
			  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
					 chan)
			  >= chan->backend.buf_size)
				return POLLPRI | POLLRDBAND;
			else
				return POLLIN | POLLRDNORM;
		}
	}
	return mask;
}
Ejemplo n.º 6
0
static u64 tcp_read_usage(struct mem_cgroup *memcg)
{
	struct tcp_memcontrol *tcp;
	struct cg_proto *cg_proto;

	cg_proto = tcp_prot.proto_cgroup(memcg);
	if (!cg_proto)
		return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;

	tcp = tcp_from_cgproto(cg_proto);
	return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
}
Ejemplo n.º 7
0
struct pfq_sock *
pfq_get_sock_by_id(pfq_id_t id)
{
        struct pfq_sock *so;
        if (unlikely((__force int)id >= Q_MAX_ID)) {
                pr_devel("[PFQ] pfq_get_sock_by_id: bad id=%d!\n", id);
                return NULL;
        }
	so = (struct pfq_sock *)atomic_long_read(&pfq_sock_vector[(__force int)id]);
	smp_read_barrier_depends();
	return so;
}
Ejemplo n.º 8
0
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
	unsigned long data, text, lib, swap, ptes, pmds;
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
	swap = get_mm_counter(mm, MM_SWAPENTS);
	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
	seq_printf(m,
		"VmPeak:\t%8lu kB\n"
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
		"VmPin:\t%8lu kB\n"
		"VmHWM:\t%8lu kB\n"
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
		"VmPTE:\t%8lu kB\n"
		"VmPMD:\t%8lu kB\n"
		"VmSwap:\t%8lu kB\n",
		hiwater_vm << (PAGE_SHIFT-10),
		total_vm << (PAGE_SHIFT-10),
		mm->locked_vm << (PAGE_SHIFT-10),
		mm->pinned_vm << (PAGE_SHIFT-10),
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
		ptes >> 10,
		pmds >> 10,
		swap << (PAGE_SHIFT-10));
}
Ejemplo n.º 9
0
Archivo: fork.c Proyecto: 19Dan01/linux
static void check_mm(struct mm_struct *mm)
{
	int i;

	for (i = 0; i < NR_MM_COUNTERS; i++) {
		long x = atomic_long_read(&mm->rss_stat.count[i]);

		if (unlikely(x))
			printk(KERN_ALERT "BUG: Bad rss-counter state "
					  "mm:%p idx:%d val:%ld\n", mm, i, x);
	}

	if (atomic_long_read(&mm->nr_ptes))
		pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
				atomic_long_read(&mm->nr_ptes));
	if (mm_nr_pmds(mm))
		pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
				mm_nr_pmds(mm));

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif
}
Ejemplo n.º 10
0
 inline void doBarrier(size_t tid) {
     assert(tid<maxNThreads);
     const int whichBar = (barArray[tid] ^= true); // computes % 2
     long c = atomic_long_inc_return(&B[whichBar]);
     if ((size_t)c == _barrier) {
         atomic_long_set(&B[whichBar], 0);
         return;
     }
     // spin-wait
     while(c) { 
         c= atomic_long_read(&B[whichBar]);
         PAUSE();  // TODO: define a spin policy !
     }
 }
Ejemplo n.º 11
0
/* can be called either with percpu mib (pcpumib != NULL),
 * or shared one (smib != NULL)
 */
static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
				atomic_long_t *smib,
				const struct snmp_mib *itemlist)
{
	int i;
	unsigned long val;

	for (i = 0; itemlist[i].name; i++) {
		val = pcpumib ?
			snmp_fold_field(pcpumib, itemlist[i].entry) :
			atomic_long_read(smib + itemlist[i].entry);
		seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, val);
	}
}
Ejemplo n.º 12
0
void pfq_devmap_monitor_update(void)
{
    int i,j;
    for(i=0; i < Q_MAX_DEVICE; ++i)
    {
        unsigned long val = 0;
        for(j=0; j < Q_MAX_HW_QUEUE; ++j)
        {
            val |= atomic_long_read(&pfq_devmap[i][j]);
        }

        atomic_set(&pfq_devmap_monitor[i], val ? 1 : 0);
    }
}
Ejemplo n.º 13
0
static int pfq_proc_groups(struct seq_file *m, void *v)
{
	size_t n;

	seq_printf(m, "group: recv      drop      forward   kernel    disc      aborted   pol pid   def.    uplane   cplane    ctrl\n");

	down(&group_sem);

	for(n = 0; n < Q_MAX_GID; n++)
	{
		pfq_gid_t gid = (__force pfq_gid_t)n;

		struct pfq_group *this_group = pfq_get_group(gid);
		if (!this_group->policy)
			continue;

		seq_printf(m, "%5zu: %-9lu %-9lu %-9lu %-9lu %-9lu %-9lu", n,
			   sparse_read(this_group->stats, recv),
			   sparse_read(this_group->stats, drop),
			   sparse_read(this_group->stats, frwd),
			   sparse_read(this_group->stats, kern),
			   sparse_read(this_group->stats, disc),
			   sparse_read(this_group->stats, abrt));

		seq_printf(m, "%3d %3d ", this_group->policy, this_group->pid);

		seq_printf(m, "%08lx %08lx %08lx %08lx \n",
			   atomic_long_read(&this_group->sock_mask[pfq_ctz(Q_CLASS_DEFAULT)]),
			   atomic_long_read(&this_group->sock_mask[pfq_ctz(Q_CLASS_USER_PLANE)]),
			   atomic_long_read(&this_group->sock_mask[pfq_ctz(Q_CLASS_CONTROL_PLANE)]),
			   atomic_long_read(&this_group->sock_mask[63]));

	}

	up(&group_sem);
	return 0;
}
Ejemplo n.º 14
0
/**
 * oom_badness - heuristic function to determine which candidate task to kill
 * @p: task struct of which task we should calculate
 * @totalpages: total present RAM allowed for page allocation
 *
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
 */
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
{
	long points;
	long adj;

	if (oom_unkillable_task(p, memcg, nodemask))
		return 0;

	p = find_lock_task_mm(p);
	if (!p)
		return 0;

	/*
	 * Do not even consider tasks which are explicitly marked oom
	 * unkillable or have been already oom reaped.
	 */
	adj = (long)p->signal->oom_score_adj;
	if (adj == OOM_SCORE_ADJ_MIN ||
			test_bit(MMF_OOM_REAPED, &p->mm->flags)) {
		task_unlock(p);
		return 0;
	}

	/*
	 * The baseline for the badness score is the proportion of RAM that each
	 * task's rss, pagetable and swap space use.
	 */
	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
	task_unlock(p);

	/*
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
	 */
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
		points -= (points * 3) / 100;

	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;

	/*
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
	 */
	return points > 0 ? points : 1;
}
Ejemplo n.º 15
0
static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
	struct se_port_stat_grps *pgrps, char *page)
{
	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
	struct se_device *dev;
	ssize_t ret = -ENODEV;

	rcu_read_lock();
	dev = rcu_dereference(lun->lun_se_dev);
	if (dev)
		ret = snprintf(page, PAGE_SIZE, "%lu\n",
			       atomic_long_read(&lun->lun_stats.cmd_pdus));
	rcu_read_unlock();
	return ret;
}
Ejemplo n.º 16
0
// consumer function
void * consumer(void * arg) {
    int myid= *(int*)arg;
    int * data;

    ff::Barrier::instance()->doBarrier(myid);
    while(1) {
	if (q->pop((void**)&data)) {
	    printf("(%d %ld) ", myid, (long)data);
	    atomic_long_inc(&counter);
	}
	if ((long)(atomic_long_read(&counter))>= SIZE) break;
    }
    pthread_exit(NULL);
    return NULL;
}
Ejemplo n.º 17
0
static int ccfs_flush(struct file *file, fl_owner_t td)
{
	int rc = 0;
	struct file *lower_file = NULL;

	mdbg(INFO3,"Flush file %p", file);
	lower_file = ccfs_get_nested_file(file);
	mdbg(INFO3,"Flush lower file %p (%ld)", lower_file, atomic_long_read(&lower_file->f_count));
	
	BUG_ON(!lower_file);
	
	if (lower_file->f_op && lower_file->f_op->flush)
		rc = lower_file->f_op->flush(lower_file, td);
	return rc;
}
Ejemplo n.º 18
0
static ssize_t target_stat_tgt_port_in_cmds_show(struct config_item *item,
		char *page)
{
	struct se_lun *lun = to_stat_tgt_port(item);
	struct se_device *dev;
	ssize_t ret = -ENODEV;

	rcu_read_lock();
	dev = rcu_dereference(lun->lun_se_dev);
	if (dev)
		ret = snprintf(page, PAGE_SIZE, "%lu\n",
			       atomic_long_read(&lun->lun_stats.cmd_pdus));
	rcu_read_unlock();
	return ret;
}
Ejemplo n.º 19
0
static void check_mm(struct mm_struct *mm)
{
	int i;

	for (i = 0; i < NR_MM_COUNTERS; i++) {
		long x = atomic_long_read(&mm->rss_stat.count[i]);

		if (unlikely(x))
			printk(KERN_ALERT "BUG: Bad rss-counter state "
					  "mm:%p idx:%d val:%ld\n", mm, i, x);
	}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(mm->pmd_huge_pte);
#endif
}
Ejemplo n.º 20
0
void au_plink_maint_leave(struct file *file)
{
	struct au_sbinfo *sbinfo;
	int iam;

	AuDebugOn(atomic_long_read(&file->f_count));

	sbinfo = au_sbi(file->f_dentry->d_sb);
	spin_lock(&sbinfo->si_plink_maint_lock);
	iam = (sbinfo->si_plink_maint == file);
	if (iam)
		sbinfo->si_plink_maint = NULL;
	spin_unlock(&sbinfo->si_plink_maint_lock);
	if (iam)
		wake_up_all(&sbinfo->si_plink_wq);
}
Ejemplo n.º 21
0
int put_io_context(struct io_context *ioc)
{
	if (ioc == NULL)
		return 1;

	BUG_ON(atomic_long_read(&ioc->refcount) == 0);

	if (atomic_long_dec_and_test(&ioc->refcount)) {
		rcu_read_lock();
		cfq_dtor(ioc);
		rcu_read_unlock();

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
	}
	return 0;
}
Ejemplo n.º 22
0
static void l2tp_eth_get_stats64(struct net_device *dev,
				 struct rtnl_link_stats64 *stats)
{
	struct l2tp_eth *priv = netdev_priv(dev);

	stats->tx_bytes   = (unsigned long) atomic_long_read(&priv->tx_bytes);
	stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
	stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
	stats->rx_bytes   = (unsigned long) atomic_long_read(&priv->rx_bytes);
	stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
	stats->rx_errors  = (unsigned long) atomic_long_read(&priv->rx_errors);

}
Ejemplo n.º 23
0
static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
						      struct rtnl_link_stats64 *stats)
{
	struct l2tp_eth *priv = netdev_priv(dev);

	stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
	stats->tx_packets = atomic_long_read(&priv->tx_packets);
	stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
	stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
	stats->rx_packets = atomic_long_read(&priv->rx_packets);
	stats->rx_errors  = atomic_long_read(&priv->rx_errors);
	return stats;
}
Ejemplo n.º 24
0
int ccfs_write_lower_page_segment(struct inode *ccfsinode,
				      struct page *page_for_lower,
				      size_t offset_in_page, size_t size)
{
	struct ccfs_inode *inode_info;
	char *virt;
	loff_t offset;
	int rc;
	inode_info = ccfs_inode_to_private(ccfsinode);

	mdbg(INFO3, "Inode %p has lower file: %p (%ld)", ccfsinode, inode_info->lower_file, atomic_long_read(&inode_info->lower_file->f_count));
	
	offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
		  + offset_in_page);
	virt = kmap(page_for_lower);
	rc = ccfs_write_lower(ccfsinode, virt, offset, size);
	kunmap(page_for_lower);
	return rc;
}
Ejemplo n.º 25
0
/* can be called either with percpu mib (pcpumib != NULL),
 * or shared one (smib != NULL)
 */
static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
				atomic_long_t *smib,
				const struct snmp_mib *itemlist)
{
	unsigned long buff[SNMP_MIB_MAX];
	int i;

	if (pcpumib) {
		memset(buff, 0, sizeof(unsigned long) * SNMP_MIB_MAX);

		snmp_get_cpu_field_batch(buff, itemlist, pcpumib);
		for (i = 0; itemlist[i].name; i++)
			seq_printf(seq, "%-32s\t%lu\n",
				   itemlist[i].name, buff[i]);
	} else {
		for (i = 0; itemlist[i].name; i++)
			seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
				   atomic_long_read(smib + itemlist[i].entry));
	}
}
Ejemplo n.º 26
0
static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
		char *page)
{
	struct se_lun_acl *lacl = auth_to_lacl(item);
	struct se_node_acl *nacl = lacl->se_lun_nacl;
	struct se_dev_entry *deve;
	ssize_t ret;

	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (!deve) {
		rcu_read_unlock();
		return -ENODEV;
	}
	/* scsiAuthIntrOutCommands */
	ret = snprintf(page, PAGE_SIZE, "%lu\n",
		       atomic_long_read(&deve->total_cmds));
	rcu_read_unlock();
	return ret;
}
Ejemplo n.º 27
0
/*
 * IO Context helper functions. put_io_context() returns 1 if there are no
 * more users of this io context, 0 otherwise.
 */
int put_io_context(struct io_context *ioc)
{
	if (ioc == NULL)
		return 1;

	BUG_ON(atomic_long_read(&ioc->refcount) == 0);

	if (atomic_long_dec_and_test(&ioc->refcount)) {
		rcu_read_lock();
		if (ioc->aic && ioc->aic->dtor)
			ioc->aic->dtor(ioc->aic);
		hlist_sched_dtor(ioc, &ioc->cic_list);
		hlist_sched_dtor(ioc, &ioc->bfq_cic_list);
		rcu_read_unlock();

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
	}
	return 0;
}
Ejemplo n.º 28
0
void show_mem(unsigned int filter)
{
	pg_data_t *pgdat;
	unsigned long total = 0, reserved = 0, highmem = 0;

	printk("Mem-Info:\n");
	show_free_areas(filter);

	for_each_online_pgdat(pgdat) {
		unsigned long flags;
		int zoneid;

		pgdat_resize_lock(pgdat, &flags);
		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
			struct zone *zone = &pgdat->node_zones[zoneid];
			if (!populated_zone(zone))
				continue;

			total += zone->present_pages;
			reserved += zone->present_pages - zone->managed_pages;

			if (is_highmem_idx(zoneid))
				highmem += zone->present_pages;
		}
		pgdat_resize_unlock(pgdat, &flags);
	}

	printk("%lu pages RAM\n", total);
	printk("%lu pages HighMem/MovableOnly\n", highmem);
	printk("%lu pages reserved\n", reserved);
#ifdef CONFIG_CMA
	printk("%lu pages cma reserved\n", totalcma_pages);
#endif
#ifdef CONFIG_QUICKLIST
	printk("%lu pages in pagetable cache\n",
		quicklist_total_size());
#endif
#ifdef CONFIG_MEMORY_FAILURE
	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
#endif
}
Ejemplo n.º 29
0
static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
	struct se_ml_stat_grps *lgrps, char *page)
{
	struct se_lun_acl *lacl = container_of(lgrps,
			struct se_lun_acl, ml_stat_grps);
	struct se_node_acl *nacl = lacl->se_lun_nacl;
	struct se_dev_entry *deve;
	ssize_t ret;

	rcu_read_lock();
	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
	if (!deve) {
		rcu_read_unlock();
		return -ENODEV;
	}
	/* scsiAuthIntrOutCommands */
	ret = snprintf(page, PAGE_SIZE, "%lu\n",
		       atomic_long_read(&deve->total_cmds));
	rcu_read_unlock();
	return ret;
}
Ejemplo n.º 30
0
Archivo: super.c Proyecto: mdamt/linux
static int ext2_freeze(struct super_block *sb)
{
	struct ext2_sb_info *sbi = EXT2_SB(sb);

	/*
	 * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared
	 * because we have unattached inodes and thus filesystem is not fully
	 * consistent.
	 */
	if (atomic_long_read(&sb->s_remove_count)) {
		ext2_sync_fs(sb, 1);
		return 0;
	}
	/* Set EXT2_FS_VALID flag */
	spin_lock(&sbi->s_lock);
	sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state);
	spin_unlock(&sbi->s_lock);
	ext2_sync_super(sb, sbi->s_es, 1);

	return 0;
}