Example #1
0
static void
__pfq_group_ctor(int gid)
{
        struct pfq_group * that = &pfq_groups[gid];
        int i;

        that->pid = -1;
        that->policy = Q_GROUP_UNDEFINED;

        for(i = 0; i < Q_CLASS_MAX; i++)
        {
                atomic_long_set(&that->sock_mask[i], 0);
        }

        /* note the = is for setting the limit to the function composition:
         * the last function pointer is always set to NULL
         * */

        for(i = 0; i <= Q_FUN_MAX; i++)
        {
                atomic_long_set(&that->fun_ctx[i].function, 0L);
                atomic_long_set(&that->fun_ctx[i].context,    0L);
                spin_lock_init (&that->fun_ctx[i].lock);
        }

        atomic_long_set(&that->filter,   0L);

        sparse_set(&that->recv, 0);
        sparse_set(&that->lost, 0);
        sparse_set(&that->drop, 0);
}
Example #2
0
 inline int barrierSetup(size_t init) {
     assert(init>0);
     if (init == _barrier) return -1;
     for(size_t i=0; i<init; ++i) barArray[i]=false;
     atomic_long_set(&B[0],0);
     atomic_long_set(&B[1],0);
     _barrier = init; 
     return 0;
 }
Example #3
0
int pfq_devmap_update(int action, int index, int queue, pfq_gid_t gid)
{
    int n = 0, i,q;

    if (unlikely((__force int)gid >= Q_MAX_GID ||
		 (__force int)gid < 0)) {
        pr_devel("[PF_Q] devmap_update: bad gid (%u)\n",gid);
        return 0;
    }

    down(&devmap_sem);

    for(i=0; i < Q_MAX_DEVICE; ++i)
    {
        for(q=0; q < Q_MAX_HW_QUEUE; ++q)
        {
            unsigned long tmp;

            if (!pfq_devmap_equal(i, q, index, queue))
                continue;

            /* map_set... */
            if (action == map_set) {

                tmp = atomic_long_read(&pfq_devmap[i][q]);
                tmp |= 1L << (__force int)gid;
                atomic_long_set(&pfq_devmap[i][q], tmp);
                n++;
                continue;
            }

            /* map_reset */
            tmp = atomic_long_read(&pfq_devmap[i][q]);
            if (tmp & (1L << (__force int)gid)) {
                tmp &= ~(1L << (__force int)gid);
                atomic_long_set(&pfq_devmap[i][q], tmp);
                n++;
                continue;
            }
        }
    }

    /* update capture monitor filter... */

    pfq_devmap_monitor_update();

    up(&devmap_sem);

    return n;
}
Example #4
0
static int __init myworkqueue_init(void)
{
	int ret;
	unsigned long delay;
	printk(KERN_INFO "workqueue: %s\n", __FUNCTION__);

	/************************ static work *********************/
	atomic_long_set(&static_work.data, 10);
	ret = schedule_work(&static_work); /* schedule on kernel global workqueue */
	if (ret == 0) {
		printk(KERN_INFO "workqueue: static work already on the kernel-global workqueue\n");
	}

	/*********************** dynamic work *********************/
	dynamic_work = (struct work_struct *)kmalloc(sizeof(struct work_struct), GFP_KERNEL);
	if (!dynamic_work) {
		printk(KERN_INFO "workqueue: failed to create work\n");
		return 0;
	}
	INIT_WORK(dynamic_work, do_things);
	atomic_long_set(&dynamic_work->data, 20);
	ret = schedule_work(dynamic_work); /* schedule on kernel global workqueue */
	if (ret == 0) {
		printk(KERN_INFO "workqueue: dynamic work already on the kernel-global workqueue\n");
	}

	/******************* static delayed work *****************/
	/* delay is in jiffies, delaying by 3 seconds since 1 HZ jiffies = 1 sec */
	delay = HZ * 10;
	ret = schedule_delayed_work(&static_delay_work, delay); /* schedule on kernel global workqueue */
	if (ret == 0) {
		printk(KERN_INFO "workqueue: static delayed work already on the kernel-global workqueue\n");
	}

	/************ static work on custom workqueue ************/
	wq = create_workqueue("myqueue");
	if (!wq) {
		printk(KERN_INFO "workqueue: failed to create workqueue\n");
		return 0;
	}
	atomic_long_set(&static_work.data, 40);
	ret = queue_work(wq, &static_work); /* schedule on custom workqueue */
	if (ret == 0) {
		printk(KERN_INFO "workqueue: static work already on the custom workqueue\n");
	}

	return 0;
}
Example #5
0
static void __update_writeback_rate(struct cached_dev *dc)
{
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
	int64_t target = __calc_target_rate(dc);
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
	int64_t integral_scaled;
	uint32_t new_rate;

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}

	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);

	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);

	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate -
			atomic_long_read(&dc->writeback_rate.rate);
	atomic_long_set(&dc->writeback_rate.rate, new_rate);
	dc->writeback_rate_target = target;
}
Example #6
0
File: sock.c Project: pfq/PFQ
int
pfq_sock_disable(struct pfq_sock *so)
{
	pr_devel("[PFQ|%d] leaving all groups...\n", so->id);
	pfq_group_leave_all(so->id);

	msleep(Q_GRACE_PERIOD);

	if (atomic_long_read(&so->shmem_addr)) {

		/* unbind Tx threads */

		pr_devel("[PFQ|%d] unbinding Tx threads...\n", so->id);
		pfq_sock_tx_unbind(so);

		msleep(Q_GRACE_PERIOD);

		pr_devel("[PFQ|%d] disabling shared queue...\n", so->id);
		atomic_long_set(&so->shmem_addr, 0);

		msleep(Q_GRACE_PERIOD);

		pr_devel("[PFQ|%d] unmapping shared queue...\n", so->id);
		pfq_shared_queue_unmap(so);
	}
	else {
		pr_devel("[PFQ|%d] socket (already) disabled.\n", so->id);
	}

	return 0;
}
Example #7
0
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ioc;

	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ioc) {
		atomic_long_set(&ioc->refcount, 1);
		atomic_set(&ioc->nr_tasks, 1);
		spin_lock_init(&ioc->lock);
//*		ioc->ioprio_changed = 0;
		bitmap_zero(ioc->ioprio_changed, IOC_IOPRIO_CHANGED_BITS);
		ioc->ioprio = 0;
		ioc->last_waited = 0; /* doesn't matter... */
		ioc->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->cic_list);
		INIT_RADIX_TREE(&ioc->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->bfq_cic_list);
		ioc->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
		ioc->cgroup_changed = 0;
#endif
	}

	return ioc;
}
Example #8
0
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
				int node)
{
	struct io_context *ioc;

	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
				    node);

	if (unlikely(!ioc))
		return;

	/* initialize */
	atomic_long_set(&ioc->refcount, 1);
	atomic_set(&ioc->nr_tasks, 1);
	spin_lock_init(&ioc->lock);
	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
	INIT_HLIST_HEAD(&ioc->icq_list);
	INIT_WORK(&ioc->release_work, ioc_release_fn);

	/*
	 * Try to install.  ioc shouldn't be installed if someone else
	 * already did or @task, which isn't %current, is exiting.  Note
	 * that we need to allow ioc creation on exiting %current as exit
	 * path may issue IOs from e.g. exit_files().  The exit path is
	 * responsible for not issuing IO after exit_io_context().
	 */
	task_lock(task);
	if (!task->io_context &&
	    (task == current || !(task->flags & PF_EXITING)))
		task->io_context = ioc;
	else
		kmem_cache_free(iocontext_cachep, ioc);

	task_unlock(task);
}
Example #9
0
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
	init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
	mm->flags = (current->mm) ?
		(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
	mm->core_state = NULL;
	atomic_long_set(&mm->nr_ptes, 0);
	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
	spin_lock_init(&mm->page_table_lock);
	mm_init_aio(mm);
	mm_init_owner(mm, p);
	clear_tlb_flush_pending(mm);

	if (likely(!mm_alloc_pgd(mm))) {
		mm->def_flags = 0;
		mmu_notifier_mm_init(mm);
		return mm;
	}

	free_mm(mm);
	return NULL;
}
Example #10
0
/* Find an unused file structure and return a pointer to it.
 * Returns NULL, if there are no more free file structures or
 * we run out of memory.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file * f;
	int acct;

	acct = (get_exec_ub() == get_ub0());
	/*
	 * Privileged users can go above max_files
	 */
	if (acct && get_nr_files() >= files_stat.max_files &&
			!capable(CAP_SYS_ADMIN)) {
		/*
		 * percpu_counters are inaccurate.  Do an expensive check before
		 * we go and fail.
		 */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	if (ub_file_charge(f))
		goto fail_ch;
	if (acct)
		percpu_counter_inc(&nr_files);

	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	f->f_cred = get_cred(cred);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/* f->f_version: 0 */
	return f;

over:
	/* Ran out of filps - report that */
	if (get_nr_files() > old_max) {
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;

fail_ch:
	kmem_cache_free(filp_cachep, f);
	return NULL;
}
Example #11
0
static void ns_prune_dentry(struct dentry *dentry)
{
	struct inode *inode = d_inode(dentry);
	if (inode) {
		struct ns_common *ns = inode->i_private;
		atomic_long_set(&ns->stashed, 0);
	}
}
Example #12
0
static void
mr_alrt_enter(void)
{
  if (atomic_xchg(&alrt_onoff, 1))
    return;

  atomic_long_set(&alrt_start, jiffies);
}
Example #13
0
void pfq_sock_opt_init(struct pfq_sock_opt *that, size_t caplen, size_t maxlen)
{
        /* the queue is allocate later, when the socket is enabled */
        int n;

        atomic_long_set(&that->rxq.addr, 0);

        that->rxq.base_addr = NULL;

        /* disable tiemstamping by default */
        that->tstamp = false;

	/* Rx queue setup */

        /* set slots and caplen default values */

        that->caplen = caplen;
        that->rx_queue_size = 0;
        that->rx_slot_size = 0;

        /* initialize waitqueue */

        init_waitqueue_head(&that->waitqueue);

	/* Tx queues setup */

        that->tx_queue_size = 0;
        that->tx_slot_size  = Q_SPSC_QUEUE_SLOT_SIZE(maxlen);
	that->tx_num_queues = 0;

	for(n = 0; n < Q_MAX_TX_QUEUES; ++n)
	{
		atomic_long_set(&that->txq[n].addr, 0);

		that->txq[n].base_addr = NULL;
		that->txq[n].if_index  = -1;
		that->txq[n].queue     = -1;
		that->txq[n].cpu       = -1;
		that->txq[n].task      = NULL;
	}

}
Example #14
0
void pfq_release_sock_id(pfq_id_t id)
{
        if (unlikely(id.value >= Q_MAX_ID || id.value < 0)) {
                pr_devel("[PFQ] pfq_release_sock_by_id: bad id=%d!\n", id.value);
                return;
        }

        atomic_long_set(pfq_sock_vector + id.value, 0);
        if (atomic_dec_return(&pfq_sock_count) == 0)
		pfq_sock_finish();
}
Example #15
0
    /**
     *  Constructor
     *
     *  \param n the size of the buffer
     *  \param fixedsize a boolean flag that asserts whether the buffer can be
     *  resized. Default is \p false.
     *  \param fillcache a flag.
     */
    uSWSR_Ptr_Buffer(unsigned long n, const bool fixedsize=false, const bool fillcache=false):
        buf_r(0),buf_w(0),in_use_buffers(1),size(n),fixedsize(fixedsize),
        pool(CACHE_SIZE,fillcache,size) {
        init_unlocked(P_lock); init_unlocked(C_lock);
#if defined(UBUFFER_STATS)
        atomic_long_set(&numBuffers,0);
#endif
        // Avoid unused private field warning on padding fields
        (void)padding1; (void)padding2; (void)padding3; (void)padding4;

    }
Example #16
0
void pfq_release_sock_id(pfq_id_t id)
{
        if (unlikely((__force int)id >= Q_MAX_ID ||
		     (__force int)id < 0)) {
                pr_devel("[PFQ] pfq_release_sock_by_id: bad id=%d!\n", id);
                return;
        }

        atomic_long_set(pfq_sock_vector + (__force int)id, 0);
        if (atomic_dec_return(&pfq_sock_count) == 0)
		pfq_sock_finish_once();
}
Example #17
0
 inline void doBarrier(size_t tid) {
     assert(tid<maxNThreads);
     const int whichBar = (barArray[tid] ^= true); // computes % 2
     long c = atomic_long_inc_return(&B[whichBar]);
     if ((size_t)c == _barrier) {
         atomic_long_set(&B[whichBar], 0);
         return;
     }
     // spin-wait
     while(c) { 
         c= atomic_long_read(&B[whichBar]);
         PAUSE();  // TODO: define a spin policy !
     }
 }
Example #18
0
static void
__pfq_group_init(int gid)
{
        struct pfq_group * g = pfq_get_group(gid);
        int i;

        if (!g) {
                pr_devel("[PFQ] get_group: invalid group id %d!\n", gid);
                return;
        }

	g->pid = current->tgid;
        g->owner = -1;
        g->policy = Q_POLICY_GROUP_UNDEFINED;

        for(i = 0; i < Q_CLASS_MAX; i++)
        {
                atomic_long_set(&g->sock_mask[i], 0);
        }

        atomic_long_set(&g->bp_filter,0L);
        atomic_long_set(&g->comp,     0L);
        atomic_long_set(&g->comp_ctx, 0L);

	pfq_group_stats_reset(&g->stats);

        for(i = 0; i < Q_MAX_COUNTERS; i++)
        {
                sparse_set(&g->context.counter[i], 0);
        }

	for(i = 0; i < Q_MAX_PERSISTENT; i++)
	{
		spin_lock_init(&g->context.persistent[i].lock);
		memset(g->context.persistent[i].memory, 0, sizeof(g->context.persistent[i].memory));
	}
}
Example #19
0
File: fork.c Project: 19Dan01/linux
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{
	mm->mmap = NULL;
	mm->mm_rb = RB_ROOT;
	mm->vmacache_seqnum = 0;
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
	init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
	mm->core_state = NULL;
	atomic_long_set(&mm->nr_ptes, 0);
	mm_nr_pmds_init(mm);
	mm->map_count = 0;
	mm->locked_vm = 0;
	mm->pinned_vm = 0;
	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
	spin_lock_init(&mm->page_table_lock);
	mm_init_cpumask(mm);
	mm_init_aio(mm);
	mm_init_owner(mm, p);
	mmu_notifier_mm_init(mm);
	clear_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	mm->pmd_huge_pte = NULL;
#endif

	if (current->mm) {
		mm->flags = current->mm->flags & MMF_INIT_MASK;
		mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
	} else {
		mm->flags = default_dump_filter;
		mm->def_flags = 0;
	}

	if (mm_alloc_pgd(mm))
		goto fail_nopgd;

	if (init_new_context(p, mm))
		goto fail_nocontext;

	return mm;

fail_nocontext:
	mm_free_pgd(mm);
fail_nopgd:
	free_mm(mm);
	return NULL;
}
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file * f;

	/*
                                           
  */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
                                                                  
                    
   */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	percpu_counter_inc(&nr_files);
	f->f_cred = get_cred(cred);
	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/*                 */
	return f;

over:
	/*                                */
	if (get_nr_files() > old_max) {
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;
}
Example #21
0
File: sock.c Project: pfq/PFQ
void pfq_sock_release_id(pfq_id_t id)
{
        if ((__force int)id >= Q_MAX_ID ||
	    (__force int)id < 0) {
                pr_devel("[PFQ] pfq_release_sock_by_id: bad id=%d!\n", id);
                return;
        }

        atomic_long_set(global->socket_ptr + (__force int)id, 0);

        if (atomic_dec_return(&global->socket_count) == 0) {
		pr_devel("[PFQ] calling sock_fini_once...\n");
		msleep(Q_GRACE_PERIOD);
		pfq_sock_fini_once();
	}
}
Example #22
0
/* Find an unused file structure and return a pointer to it.
 * Returns an error pointer if some error happend e.g. we over file
 * structures limit, run out of memory or operation is not permitted.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
    const struct cred *cred = current_cred();
    static long old_max;
    struct file *f;
    int error;

    /*
     * Privileged users can go above max_files
     */
    if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
        /*
         * percpu_counters are inaccurate.  Do an expensive check before
         * we go and fail.
         */
        if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
            goto over;
    }

    f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
    if (unlikely(!f))
        return ERR_PTR(-ENOMEM);

    percpu_counter_inc(&nr_files);
    f->f_cred = get_cred(cred);
    error = security_file_alloc(f);
    if (unlikely(error)) {
        file_free(f);
        return ERR_PTR(error);
    }

    INIT_LIST_HEAD(&f->f_u.fu_list);
    atomic_long_set(&f->f_count, 1);
    rwlock_init(&f->f_owner.lock);
    spin_lock_init(&f->f_lock);
    eventpoll_init_file(f);
    /* f->f_version: 0 */
    return f;

over:
    /* Ran out of filps - report that */
    if (get_nr_files() > old_max) {
        pr_info("VFS: file-max limit %lu reached\n", get_max_files());
        old_max = get_nr_files();
    }
    return ERR_PTR(-ENFILE);
}
Example #23
0
/***
 * psrwlock_init - initialize the psrwlock
 * @lock: the psrwlock to be initialized
 * @key: the lock_class_key for the class; used by mutex lock debugging
 *
 * Initialize the psrwlock to unlocked state.
 *
 * It is not allowed to initialize an already locked psrwlock.
 */
void
__psrwlock_init(struct psrwlock *lock, const char *name,
		struct lock_class_key *key, u32 rctx, enum psrw_prio wctx)
{
	unsigned int i;

	atomic_set(&lock->uc, 0);
	atomic_set(&lock->ws, 0);
	for (i = 0; i < PSRW_NR_PRIO; i++)
		atomic_long_set(&lock->prio[i], 0);
	lock->rctx_bitmap = rctx;
	lock->wctx = wctx;
	INIT_LIST_HEAD(&lock->wait_list_r);
	INIT_LIST_HEAD(&lock->wait_list_w);

	debug_psrwlock_init(lock, name, key);
}
Example #24
0
int main(int argc, char* argv[]) {
    if (argc != 3) {
	std::cerr << "use: " << argv[0] 
		  << " queue-size #consumers\n";
	return -1;
    }

    SIZE= atoi(argv[1]);
    assert(SIZE>0);
    NTHREADS=atoi(argv[2]);
    assert(NTHREADS>0);

    q = new ff::MPMC_Ptr_Queue;
    assert(q);
    q->init(SIZE);

    for(int i=1;i<=SIZE;++i) 
        q->push((void*)i);

    atomic_long_set(&counter,0);

    pthread_t * C_handle;

    C_handle = (pthread_t *) malloc(sizeof(pthread_t)*NTHREADS);
	
    // define the number of threads that are going to partecipate....
    ff::Barrier::instance()->barrierSetup(NTHREADS);

    int * idC;
    idC = (int *) malloc(sizeof(int)*NTHREADS);
    for(int i=0;i<NTHREADS;++i) {
        idC[i]=i;
        if (pthread_create(&C_handle[i], NULL,consumer,&idC[i]) != 0) {
            abort();
        }
    }

    // wait all consumers
    for(int i=0;i<NTHREADS;++i) {
        pthread_join(C_handle[i],NULL);
    }
    printf("\n");
    return 0;

}
Example #25
0
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
	sem->owner = NULL;
	osq_lock_init(&sem->osq);
#endif
}
Example #26
0
static bool set_at_max_writeback_rate(struct cache_set *c,
				       struct cached_dev *dc)
{
	/*
	 * Idle_counter is increased everytime when update_writeback_rate() is
	 * called. If all backing devices attached to the same cache set have
	 * identical dc->writeback_rate_update_seconds values, it is about 6
	 * rounds of update_writeback_rate() on each backing device before
	 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
	 * to each dc->writeback_rate.rate.
	 * In order to avoid extra locking cost for counting exact dirty cached
	 * devices number, c->attached_dev_nr is used to calculate the idle
	 * throushold. It might be bigger if not all cached device are in write-
	 * back mode, but it still works well with limited extra rounds of
	 * update_writeback_rate().
	 */
	if (atomic_inc_return(&c->idle_counter) <
	    atomic_read(&c->attached_dev_nr) * 6)
		return false;

	if (atomic_read(&c->at_max_writeback_rate) != 1)
		atomic_set(&c->at_max_writeback_rate, 1);

	atomic_long_set(&dc->writeback_rate.rate, INT_MAX);

	/* keep writeback_rate_target as existing value */
	dc->writeback_rate_proportional = 0;
	dc->writeback_rate_integral_scaled = 0;
	dc->writeback_rate_change = 0;

	/*
	 * Check c->idle_counter and c->at_max_writeback_rate agagain in case
	 * new I/O arrives during before set_at_max_writeback_rate() returns.
	 * Then the writeback rate is set to 1, and its new value should be
	 * decided via __update_writeback_rate().
	 */
	if ((atomic_read(&c->idle_counter) <
	     atomic_read(&c->attached_dev_nr) * 6) ||
	    !atomic_read(&c->at_max_writeback_rate))
		return false;

	return true;
}
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret;

	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ret) {
		atomic_long_set(&ret->refcount, 1);
		atomic_set(&ret->nr_tasks, 1);
		spin_lock_init(&ret->lock);
		ret->ioprio_changed = 0;
		ret->ioprio = 0;
		ret->last_waited = 0; /* doesn't matter... */
		ret->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ret->cic_list);
		ret->ioc_data = NULL;
	}

	return ret;
}
Example #28
0
static void
__pfq_group_dtor(int gid)
{
        struct pfq_group * that = &pfq_groups[gid];
        void *context[Q_FUN_MAX];

        struct sk_filter *filter;
        int i;

        /* remove this gid from demux matrix */

        pfq_devmap_update(map_reset, Q_ANY_DEVICE, Q_ANY_QUEUE, gid);

        that->pid = 0;
        that->policy = Q_GROUP_UNDEFINED;

        for(i = 0; i < Q_FUN_MAX; i++)
        {
		atomic_long_set(&pfq_groups[gid].fun_ctx[i].function, 0L);

		context[i] = (void *)atomic_long_xchg(&pfq_groups[gid].fun_ctx[i].context, 0L);
        }

        filter = (struct sk_filter *)atomic_long_xchg(&pfq_groups[gid].filter, 0L);

        msleep(Q_GRACE_PERIOD);   /* sleeping is possible here: user-context */

        for(i = 0; i < Q_FUN_MAX; i++)
        {
                kfree(context[i]);
        }

        pfq_free_sk_filter(filter);

        that->vlan_filt = false;

        pr_devel("[PFQ] group id:%d destroyed.\n", gid);
}
Example #29
0
int au_si_alloc(struct super_block *sb)
{
	int err;
	struct au_sbinfo *sbinfo;
	static struct lock_class_key aufs_si;

	err = -ENOMEM;
	sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
	if (unlikely(!sbinfo))
		goto out;

	BUILD_BUG_ON(sizeof(unsigned long) !=
		     sizeof(*sbinfo->au_si_pid.bitmap));
	sbinfo->au_si_pid.bitmap = kcalloc(BITS_TO_LONGS(PID_MAX_DEFAULT),
					sizeof(*sbinfo->au_si_pid.bitmap),
					GFP_NOFS);
	if (unlikely(!sbinfo->au_si_pid.bitmap))
		goto out_sbinfo;

	/* will be reallocated separately */
	sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
	if (unlikely(!sbinfo->si_branch))
		goto out_pidmap;

	err = sysaufs_si_init(sbinfo);
	if (unlikely(err))
		goto out_br;

	au_nwt_init(&sbinfo->si_nowait);
	au_rw_init_wlock(&sbinfo->si_rwsem);
	au_rw_class(&sbinfo->si_rwsem, &aufs_si);
	spin_lock_init(&sbinfo->au_si_pid.tree_lock);
	INIT_RADIX_TREE(&sbinfo->au_si_pid.tree, GFP_ATOMIC | __GFP_NOFAIL);

	atomic_long_set(&sbinfo->si_ninodes, 0);
	atomic_long_set(&sbinfo->si_nfiles, 0);

	sbinfo->si_bend = -1;

	sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
	sbinfo->si_wbr_create = AuWbrCreate_Def;
	sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
	sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;

	sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);

	mutex_init(&sbinfo->si_xib_mtx);
	sbinfo->si_xino_brid = -1;
	/* leave si_xib_last_pindex and si_xib_next_bit */

	sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
	sbinfo->si_rdblk = AUFS_RDBLK_DEF;
	sbinfo->si_rdhash = AUFS_RDHASH_DEF;
	sbinfo->si_dirwh = AUFS_DIRWH_DEF;

	au_spl_init(&sbinfo->si_plink);
	init_waitqueue_head(&sbinfo->si_plink_wq);
	spin_lock_init(&sbinfo->si_plink_maint_lock);

	/* leave other members for sysaufs and si_mnt. */
	sbinfo->si_sb = sb;
	sb->s_fs_info = sbinfo;
	si_pid_set(sb);
	au_debug_sbinfo_init(sbinfo);
	return 0; /* success */

out_br:
	kfree(sbinfo->si_branch);
out_pidmap:
	kfree(sbinfo->au_si_pid.bitmap);
out_sbinfo:
	kfree(sbinfo);
out:
	return err;
}
/* Find an unused file structure and return a pointer to it.
 * Returns NULL, if there are no more free file structures or
 * we run out of memory.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file * f;

	/*
	 * Privileged users can go above max_files
	 */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
		 * percpu_counters are inaccurate.  Do an expensive check before
		 * we go and fail.
		 */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	percpu_counter_inc(&nr_files);
	f->f_cred = get_cred(cred);
	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/* f->f_version: 0 */
	return f;

over:
	/* Ran out of filps - report that */
	if (get_nr_files() > old_max) {
#ifdef FILE_OVER_MAX
        static int fd_dump_all_files = 0;        
        if(!fd_dump_all_files) { 
	        struct task_struct *p;
	        xlog_printk(ANDROID_LOG_INFO, FS_TAG, "(PID:%d)files %d over old_max:%d", current->pid, get_nr_files(), old_max);
	        for_each_process(p) {
	            pid_t pid = p->pid;
	            struct files_struct *files = p->files;
	            struct fdtable *fdt = files_fdtable(files);
	            if(files && fdt) {
	                fd_show_open_files(pid, files, fdt);
	            }	        
	        }
	        fd_dump_all_files = 0x1;
        }
#endif	    
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;
}