static void free_pages_to_dynamic_pool(void *priv_data,
					struct hmm_page_object *page_obj)
{
	struct hmm_page *hmm_page;
	unsigned long flags;
	int ret;
	struct hmm_dynamic_pool_info *dypool_info;

	if (priv_data != NULL)
		dypool_info = priv_data;
	else
		return;

	spin_lock_irqsave(&dypool_info->list_lock, flags);
	if (dypool_info->flag != HMM_DYNAMIC_POOL_INITED) {
		spin_unlock_irqrestore(&dypool_info->list_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);

	if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
		return;
#ifdef USE_KMEM_CACHE
	hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
						GFP_KERNEL);
#else
	hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
#endif
	if (!hmm_page) {
		v4l2_err(&atomisp_dev, "out of memory for hmm_page.\n");

		/* free page directly */
		ret = set_pages_wb(page_obj->page, 1);
		if (ret)
			v4l2_err(&atomisp_dev,
					"set page to WB err ...\n");
		__free_pages(page_obj->page, 0);

		return;
	}

	hmm_page->page = page_obj->page;

	/*
	 * add to pages_list of pages_pool
	 */
	spin_lock_irqsave(&dypool_info->list_lock, flags);
	list_add_tail(&hmm_page->list, &dypool_info->pages_list);
	spin_unlock_irqrestore(&dypool_info->list_lock, flags);
}
Пример #2
0
static int __bprm_mm_init(struct linux_binprm *bprm)
{
	int err = -ENOMEM;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = bprm->mm;

	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto err;

	down_write(&mm->mmap_sem);
	vma->vm_mm = mm;

	/*
	 * Place the stack at the largest stack address the architecture
	 * supports. Later, we'll move this to an appropriate place. We don't
	 * use STACK_TOP because that can depend on attributes which aren't
	 * configured yet.
	 */
	vma->vm_end = STACK_TOP_MAX;
	vma->vm_start = vma->vm_end - PAGE_SIZE;

	vma->vm_flags = VM_STACK_FLAGS;
	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];

	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
	if (err)
		goto err;

	err = insert_vm_struct(mm, vma);
	if (err) {
		up_write(&mm->mmap_sem);
		goto err;
	}

	mm->stack_vm = mm->total_vm = 1;
	up_write(&mm->mmap_sem);

	bprm->p = vma->vm_end - sizeof(void *);

	return 0;

err:
	if (vma) {
		bprm->vma = NULL;
		kmem_cache_free(vm_area_cachep, vma);
	}

	return err;
}
Пример #3
0
/* allocate new dentry private data */
int new_dentry_private_data(struct dentry *dentry)
{
	struct sdcardfs_dentry_info *info = SDCARDFS_D(dentry);

	/* use zalloc to init dentry_info.lower_path */
	info = kmem_cache_zalloc(sdcardfs_dentry_cachep, GFP_ATOMIC);
	if (!info)
		return -ENOMEM;

	spin_lock_init(&info->lock);
	dentry->d_fsdata = info;

	return 0;
}
Пример #4
0
/* Find an unused file structure and return a pointer to it.
 * Returns NULL, if there are no more free file structures or
 * we run out of memory.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static int old_max;
	struct file * f;

	/*
	 * Privileged users can go above max_files
	 */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
		 * percpu_counters are inaccurate.  Do an expensive check before
		 * we go and fail.
		 */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	percpu_counter_inc(&nr_files);
	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	f->f_cred = get_cred(cred);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/* f->f_version: 0 */
	return f;

over:
	/* Ran out of filps - report that */
	if (get_nr_files() > old_max) {
		printk(KERN_INFO "VFS: file-max limit %d reached\n",
					get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;
}
Пример #5
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	sig->nr_threads = 1;
	atomic_set(&sig->live, 1);
	atomic_set(&sig->sigcnt, 1);
	init_waitqueue_head(&sig->wait_chldexit);
	if (clone_flags & CLONE_NEWPID)
		sig->flags |= SIGNAL_UNKILLABLE;
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->real_timer.function = it_real_fn;

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	tty_audit_fork(sig);
	sched_autogroup_fork(sig);

#ifdef CONFIG_CGROUPS
	init_rwsem(&sig->group_rwsem);
#endif

	sig->oom_adj = current->signal->oom_adj;
	sig->oom_score_adj = current->signal->oom_score_adj;
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;

	sig->has_child_subreaper = current->signal->has_child_subreaper ||
				   current->signal->is_child_subreaper;

	mutex_init(&sig->cred_guard_mutex);

	return 0;
}
Пример #6
0
int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
		 struct cl_req *req)
{
	struct vvp_req *vrq;
	int result;

	vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
	if (vrq) {
		cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
		result = 0;
	} else {
		result = -ENOMEM;
	}
	return result;
}
Пример #7
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	sig->nr_threads = 1;
	atomic_set(&sig->live, 1);
	atomic_set(&sig->sigcnt, 1);

	/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);

	init_waitqueue_head(&sig->wait_chldexit);
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);
	seqlock_init(&sig->stats_lock);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->real_timer.function = it_real_fn;

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	tty_audit_fork(sig);
	sched_autogroup_fork(sig);

	sig->oom_score_adj = current->signal->oom_score_adj;
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;

	sig->has_child_subreaper = current->signal->has_child_subreaper ||
				   current->signal->is_child_subreaper;

	mutex_init(&sig->cred_guard_mutex);

	return 0;
}
Пример #8
0
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file * f;

	/*
                                           
  */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
                                                                  
                    
   */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	percpu_counter_inc(&nr_files);
	f->f_cred = get_cred(cred);
	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/*                 */
	return f;

over:
	/*                                */
	if (get_nr_files() > old_max) {
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;
}
Пример #9
0
/*
 * Allocates a new sysfs_dirent and links it to the parent sysfs_dirent
 */
static struct sysfs_dirent * __sysfs_new_dirent(void * element)
{
	struct sysfs_dirent * sd;

	sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL);
	if (!sd)
		return NULL;

	atomic_set(&sd->s_count, 1);
	atomic_set(&sd->s_event, 1);
	INIT_LIST_HEAD(&sd->s_children);
	INIT_LIST_HEAD(&sd->s_sibling);
	sd->s_element = element;

	return sd;
}
Пример #10
0
/* allocate new dentry private data */
int new_dentry_private_data(struct dentry *dentry)
{
	struct amfs_dentry_info *info = AMFS_D(dentry);
//	 printk("\n lookup.c->new_dentry_private_data"); //aditi


	/* use zalloc to init dentry_info.lower_path */
	info = kmem_cache_zalloc(amfs_dentry_cachep, GFP_ATOMIC);
	if (!info)
		return -ENOMEM;

	spin_lock_init(&info->lock);
	dentry->d_fsdata = info;

	return 0;
}
Пример #11
0
/* Find an unused file structure and return a pointer to it.
 * Returns an error pointer if some error happend e.g. we over file
 * structures limit, run out of memory or operation is not permitted.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
    const struct cred *cred = current_cred();
    static long old_max;
    struct file *f;
    int error;

    /*
     * Privileged users can go above max_files
     */
    if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
        /*
         * percpu_counters are inaccurate.  Do an expensive check before
         * we go and fail.
         */
        if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
            goto over;
    }

    f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
    if (unlikely(!f))
        return ERR_PTR(-ENOMEM);

    percpu_counter_inc(&nr_files);
    f->f_cred = get_cred(cred);
    error = security_file_alloc(f);
    if (unlikely(error)) {
        file_free(f);
        return ERR_PTR(error);
    }

    INIT_LIST_HEAD(&f->f_u.fu_list);
    atomic_long_set(&f->f_count, 1);
    rwlock_init(&f->f_owner.lock);
    spin_lock_init(&f->f_lock);
    eventpoll_init_file(f);
    /* f->f_version: 0 */
    return f;

over:
    /* Ran out of filps - report that */
    if (get_nr_files() > old_max) {
        pr_info("VFS: file-max limit %lu reached\n", get_max_files());
        old_max = get_nr_files();
    }
    return ERR_PTR(-ENFILE);
}
Пример #12
0
int vmmr0_setup_async_pf(struct vmmr0_vcpu *vcpu, gva_t gva, gfn_t gfn,
		       struct vmmr0_arch_async_pf *arch)
{
	struct vmmr0_async_pf *work;

	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
		return 0;

	/* setup delayed work */

	/*
	 * do alloc nowait since if we are going to sleep anyway we
	 * may as well sleep faulting in page
	 */
	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
	if (!work)
		return 0;

	work->page = NULL;
	work->done = false;
	work->vcpu = vcpu;
	work->gva = gva;
	work->addr = mmu_gfn_to_hva(vcpu->pvm, gfn);
	work->arch = *arch;
	work->mm = current->mm;
	atomic_inc(&work->mm->mm_count);
	vmmr0_get_vm(work->vcpu->pvm);

	/* this can't really happen otherwise mmu_gfn_to_pfn_async
	   would succeed */
	if (unlikely(vmmr0_is_error_hva(work->addr)))
		goto retry_sync;

	INIT_WORK(&work->work, async_pf_execute);
	if (!schedule_work(&work->work))
		goto retry_sync;

	list_add_tail(&work->queue, &vcpu->async_pf.queue);
	vcpu->async_pf.queued++;
	vmmr0_arch_async_page_not_present(vcpu, work);
	return 1;
retry_sync:
	vmmr0_put_vm(work->vcpu->pvm);
	mmdrop(work->mm);
	kmem_cache_free(async_pf_cache, work);
	return 0;
}
Пример #13
0
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
		       struct kvm_arch_async_pf *arch)
{
	struct kvm_async_pf *work;

	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
		return 0;

	/* setup delayed work */

	/*
	 * do alloc nowait since if we are going to sleep anyway we
	 * may as well sleep faulting in page
	 */
	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
	if (!work)
		return 0;

	work->wakeup_all = false;
	work->vcpu = vcpu;
	work->gva = gva;
	work->addr = hva;
	work->arch = *arch;
	work->mm = current->mm;
	atomic_inc(&work->mm->mm_users);
	kvm_get_kvm(work->vcpu->kvm);

	/* this can't really happen otherwise gfn_to_pfn_async
	   would succeed */
	if (unlikely(kvm_is_error_hva(work->addr)))
		goto retry_sync;

	INIT_WORK(&work->work, async_pf_execute);
	if (!schedule_work(&work->work))
		goto retry_sync;

	list_add_tail(&work->queue, &vcpu->async_pf.queue);
	vcpu->async_pf.queued++;
	kvm_arch_async_page_not_present(vcpu, work);
	return 1;
retry_sync:
	kvm_put_kvm(work->vcpu->kvm);
	mmput(work->mm);
	kmem_cache_free(async_pf_cache, work);
	return 0;
}
Пример #14
0
/*
 * Allocates a new configfs_dirent and links it to the parent configfs_dirent
 */
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * parent_sd,
						void * element)
{
	struct configfs_dirent * sd;

	sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL);
	if (!sd)
		return NULL;

	atomic_set(&sd->s_count, 1);
	INIT_LIST_HEAD(&sd->s_links);
	INIT_LIST_HEAD(&sd->s_children);
	list_add(&sd->s_sibling, &parent_sd->s_children);
	sd->s_element = element;

	return sd;
}
Пример #15
0
/**
 * ext4_init_crypto() - Set up for ext4 encryption.
 *
 * We only call this when we start accessing encrypted files, since it
 * results in memory getting allocated that wouldn't otherwise be used.
 *
 * Return: Zero on success, non-zero otherwise.
 */
int ext4_init_crypto(void)
{
	int i, res = -ENOMEM;

	mutex_lock(&crypto_init);
	if (ext4_read_workqueue)
		goto already_initialized;
	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
	if (!ext4_read_workqueue)
		goto fail;

	ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
					    SLAB_RECLAIM_ACCOUNT);
	if (!ext4_crypto_ctx_cachep)
		goto fail;

	ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
					    SLAB_RECLAIM_ACCOUNT);
	if (!ext4_crypt_info_cachep)
		goto fail;

	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
		struct ext4_crypto_ctx *ctx;

		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
		if (!ctx) {
			res = -ENOMEM;
			goto fail;
		}
		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
	}

	ext4_bounce_page_pool =
		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
	if (!ext4_bounce_page_pool) {
		res = -ENOMEM;
		goto fail;
	}
already_initialized:
	mutex_unlock(&crypto_init);
	return 0;
fail:
	ext4_exit_crypto();
	mutex_unlock(&crypto_init);
	return res;
}
Пример #16
0
/**
 * ext4_get_crypto_ctx() - Gets an encryption context
 * @inode:       The inode for which we are doing the crypto
 *
 * Allocates and initializes an encryption context.
 *
 * Return: An allocated and initialized encryption context on success; error
 * value or NULL otherwise.
 */
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
{
	struct ext4_crypto_ctx *ctx = NULL;
	int res = 0;
	unsigned long flags;
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;

	if (ci == NULL)
		return ERR_PTR(-ENOKEY);

	/*
	 * We first try getting the ctx from a free list because in
	 * the common case the ctx will have an allocated and
	 * initialized crypto tfm, so it's probably a worthwhile
	 * optimization. For the bounce page, we first try getting it
	 * from the kernel allocator because that's just about as fast
	 * as getting it from a list and because a cache of free pages
	 * should generally be a "last resort" option for a filesystem
	 * to be able to do its job.
	 */
	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
				       struct ext4_crypto_ctx, free_list);
	if (ctx)
		list_del(&ctx->free_list);
	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
	if (!ctx) {
		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
		if (!ctx) {
			res = -ENOMEM;
			goto out;
		}
		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
	} else {
		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
	}
	ctx->flags &= ~EXT4_WRITE_PATH_FL;

out:
	if (res) {
		if (!IS_ERR_OR_NULL(ctx))
			ext4_release_crypto_ctx(ctx);
		ctx = ERR_PTR(res);
	}
	return ctx;
}
Пример #17
0
int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
		  struct cl_lock *lock, const struct cl_io *unused)
{
	struct vvp_lock *vlk;
	int result;

	CLOBINVRNT(env, obj, vvp_object_invariant(obj));

	vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
	if (vlk) {
		cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
		result = 0;
	} else {
		result = -ENOMEM;
	}
	return result;
}
Пример #18
0
/*
 * allocate a new call
 */
static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
{
	struct rxrpc_call *call;

	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
	if (!call)
		return NULL;

	call->acks_winsz = 16;
	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
				    gfp);
	if (!call->acks_window) {
		kmem_cache_free(rxrpc_call_jar, call);
		return NULL;
	}

	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
		    (unsigned long) call);
	setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
		    (unsigned long) call);
	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
		    (unsigned long) call);
	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
		    (unsigned long) call);
	INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
	INIT_WORK(&call->processor, &rxrpc_process_call);
	INIT_LIST_HEAD(&call->accept_link);
	skb_queue_head_init(&call->rx_queue);
	skb_queue_head_init(&call->rx_oos_queue);
	init_waitqueue_head(&call->tx_waitq);
	spin_lock_init(&call->lock);
	rwlock_init(&call->state_lock);
	atomic_set(&call->usage, 1);
	call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;

	memset(&call->sock_node, 0xed, sizeof(call->sock_node));

	call->rx_data_expect = 1;
	call->rx_data_eaten = 0;
	call->rx_first_oos = 0;
	call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
	call->creation_jif = jiffies;
	return call;
}
Пример #19
0
/*
 * initialize private struct file data.
 * if we fail, clean up by dropping fmode reference on the ceph_inode
 */
static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
{
	struct ceph_file_info *cf;
	int ret = 0;

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		ceph_fscache_register_inode_cookie(inode);
		ceph_fscache_file_set_cookie(inode, file);
	case S_IFDIR:
		dout("init_file %p %p 0%o (regular)\n", inode, file,
		     inode->i_mode);
		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
		if (cf == NULL) {
			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
			return -ENOMEM;
		}
		cf->fmode = fmode;
		cf->next_offset = 2;
		cf->readdir_cache_idx = -1;
		file->private_data = cf;
		BUG_ON(inode->i_fop->release != ceph_release);
		break;

	case S_IFLNK:
		dout("init_file %p %p 0%o (symlink)\n", inode, file,
		     inode->i_mode);
		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
		break;

	default:
		dout("init_file %p %p 0%o (special)\n", inode, file,
		     inode->i_mode);
		/*
		 * we need to drop the open ref now, since we don't
		 * have .release set to ceph_release.
		 */
		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
		BUG_ON(inode->i_fop->release == ceph_release);

		/* call the proper open fop */
		ret = inode->i_fop->open(inode, file);
	}
	return ret;
}
Пример #20
0
/*
 * Create a Greybus operation to be sent over the given connection.
 * The request buffer will be big enough for a payload of the given
 * size.
 *
 * For outgoing requests, the request message's header will be
 * initialized with the type of the request and the message size.
 * Outgoing operations must also specify the response buffer size,
 * which must be sufficient to hold all expected response data.  The
 * response message header will eventually be overwritten, so there's
 * no need to initialize it here.
 *
 * Request messages for incoming operations can arrive in interrupt
 * context, so they must be allocated with GFP_ATOMIC.  In this case
 * the request buffer will be immediately overwritten, so there is
 * no need to initialize the message header.  Responsibility for
 * allocating a response buffer lies with the incoming request
 * handler for a protocol.  So we don't allocate that here.
 *
 * Returns a pointer to the new operation or a null pointer if an
 * error occurs.
 */
static struct gb_operation *
gb_operation_create_common(struct gb_connection *connection, u8 type,
				size_t request_size, size_t response_size,
				unsigned long op_flags, gfp_t gfp_flags)
{
	struct gb_host_device *hd = connection->hd;
	struct gb_operation *operation;

	operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
	if (!operation)
		return NULL;
	operation->connection = connection;

	operation->request = gb_operation_message_alloc(hd, type, request_size,
							gfp_flags);
	if (!operation->request)
		goto err_cache;
	operation->request->operation = operation;

	/* Allocate the response buffer for outgoing operations */
	if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
		if (!gb_operation_response_alloc(operation, response_size,
						 gfp_flags)) {
			goto err_request;
		}
	}

	operation->flags = op_flags;
	operation->type = type;
	operation->errno = -EBADR;  /* Initial value--means "never set" */

	INIT_WORK(&operation->work, gb_operation_work);
	init_completion(&operation->completion);
	kref_init(&operation->kref);
	atomic_set(&operation->waiters, 0);

	return operation;

err_request:
	gb_operation_message_free(operation->request);
err_cache:
	kmem_cache_free(gb_operation_cache, operation);

	return NULL;
}
Пример #21
0
int khashmap_add(struct khashmap *hlist, u64 key, void *val, gfp_t flags)
{
	struct khashmap_item *item = khashmap_find_item(hlist, key);

	if (item) {
		item->val = val;
		return 0;
	}

	item = kmem_cache_zalloc(hlist_cachep, flags);
	if (unlikely(!item))
		return -ENOMEM;
	item->key = key;
	item->val = val;
	hlist_add_head(&item->hlist, &hlist->hash[khashmap_hashfn(hlist, key)]);

	return 0;
}
Пример #22
0
struct lu_object *osc_object_alloc(const struct lu_env *env,
				   const struct lu_object_header *unused,
				   struct lu_device *dev)
{
	struct osc_object *osc;
	struct lu_object *obj;

	osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS);
	if (osc) {
		obj = osc2lu(osc);
		lu_object_init(obj, NULL, dev);
		osc->oo_cl.co_ops = &osc_ops;
		obj->lo_ops = &osc_lu_obj_ops;
	} else {
		obj = NULL;
	}
	return obj;
}
Пример #23
0
static int appcl_lsm_file_alloc_security(struct file *file)
{
	struct file_security_label *flabel;

	flabel = kmem_cache_zalloc(sel_file_cache, GFP_NOFS);
	if (!flabel)
        	return -ENOMEM;

	mutex_init(&flabel->lock);
        INIT_LIST_HEAD(&flabel->list);

        flabel->perms = 0x00;
        flabel->entries_count = 0;
        flabel->file = file;

	file->f_security = flabel;

	return 0;
}
Пример #24
0
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
					     const char *name, umode_t mode,
					     unsigned flags)
{
	struct kernfs_node *kn;
	int ret;

	name = kstrdup_const(name, GFP_KERNEL);
	if (!name)
		return NULL;

	kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
	if (!kn)
		goto err_out1;

	/*
	 * If the ino of the sysfs entry created for a kmem cache gets
	 * allocated from an ida layer, which is accounted to the memcg that
	 * owns the cache, the memcg will get pinned forever. So do not account
	 * ino ida allocations.
	 */
	ret = ida_simple_get(&root->ino_ida, 1, 0,
			     GFP_KERNEL | __GFP_NOACCOUNT);
	if (ret < 0)
		goto err_out2;
	kn->ino = ret;

	atomic_set(&kn->count, 1);
	atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
	RB_CLEAR_NODE(&kn->rb);

	kn->name = name;
	kn->mode = mode;
	kn->flags = flags;

	return kn;

 err_out2:
	kmem_cache_free(kernfs_node_cache, kn);
 err_out1:
	kfree_const(name);
	return NULL;
}
Пример #25
0
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
	struct xen_blkif *blkif;

	blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
	if (!blkif)
		return ERR_PTR(-ENOMEM);

	blkif->domid = domid;
	spin_lock_init(&blkif->blk_ring_lock);
	atomic_set(&blkif->refcnt, 1);
	init_waitqueue_head(&blkif->wq);
	init_completion(&blkif->drain_complete);
	atomic_set(&blkif->drain, 0);
	blkif->st_print = jiffies;
	init_waitqueue_head(&blkif->waiting_to_free);
	blkif->persistent_gnts.rb_node = NULL;

	return blkif;
}
Пример #26
0
static int alloc_entries(struct mq_policy *mq, unsigned elts)
{
	unsigned u = mq->nr_entries;

	INIT_LIST_HEAD(&mq->free);
	mq->nr_entries_allocated = 0;

	while (u--) {
		struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);

		if (!e) {
			free_entries(mq);
			return -ENOMEM;
		}


		list_add(&e->list, &mq->free);
	}

	return 0;
}
Пример #27
0
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
{
	struct kvm_async_pf *work;

	if (!list_empty_careful(&vcpu->async_pf.done))
		return 0;

	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
	if (!work)
		return -ENOMEM;

	work->wakeup_all = true;
	INIT_LIST_HEAD(&work->queue); /* for list_del to work */

	spin_lock(&vcpu->async_pf.lock);
	list_add_tail(&work->link, &vcpu->async_pf.done);
	spin_unlock(&vcpu->async_pf.lock);

	vcpu->async_pf.queued++;
	return 0;
}
Пример #28
0
void *dst_alloc(struct dst_ops *ops)
{
	struct dst_entry *dst;

	if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
		if (ops->gc(ops))
			return NULL;
	}
	dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
	if (!dst)
		return NULL;
	atomic_set(&dst->__refcnt, 0);
	dst->ops = ops;
	dst->lastuse = jiffies;
	dst->path = dst;
	dst->input = dst->output = dst_discard;
#if RT_CACHE_DEBUG >= 2
	atomic_inc(&dst_total);
#endif
	atomic_inc(&ops->entries);
	return dst;
}
Пример #29
0
static struct avtab_node*
avtab_insert_node(struct avtab *h, int hvalue,
		  struct avtab_node *prev, struct avtab_node *cur,
		  struct avtab_key *key, struct avtab_datum *datum)
{
	struct avtab_node *newnode;
	newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
	if (newnode == NULL)
		return NULL;
	newnode->key = *key;
	newnode->datum = *datum;
	if (prev) {
		newnode->next = prev->next;
		prev->next = newnode;
	} else {
		newnode->next = h->htable[hvalue];
		h->htable[hvalue] = newnode;
	}

	h->nel++;
	return newnode;
}
Пример #30
0
static int inode_alloc_security(struct inode *inode)
{
	struct inode_security_struct *isec;
	u32 sid = current_uid();
	int ret = 0;

	isec = kmem_cache_zalloc(kse_inode_cache, GFP_NOFS);
	if (!isec)
		return -ENOMEM;

	mutex_init(&isec->lock);
	isec->task_sid = sid;
	ret = init_mlevel(&isec->mlevel);
	if (ret)
		return -ENOMEM;

	isec->ilevel.level_value = 0;

	inode->i_security = isec;

	return 0;
}