Пример #1
0
static inline void init_idle_pids(struct pid_link *links)
{
	enum pid_type type;

	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
		INIT_HLIST_NODE(&links[type].node); /* not really needed */
		links[type].pid = &init_struct_pid;
	}
}
static int msm_pmem_table_add(struct hlist_head *ptype,
	struct msm_pmem_info *info)
{
	struct file *file;
	unsigned long paddr;
#ifdef CONFIG_ANDROID_PMEM
	unsigned long kvstart;
	int rc;
#endif
	unsigned long len;
	struct msm_pmem_region *region;
#ifdef CONFIG_ANDROID_PMEM
	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
						__func__,
						info->fd, rc);
		return rc;
	}
	if (!info->len)
		info->len = len;

	rc = check_pmem_info(info, len);
	if (rc < 0)
		return rc;
#else
	paddr = 0;
	file = NULL;
#endif
	paddr += info->offset;
	len = info->len;

	if (check_overlap(ptype, paddr, len) < 0)
		return -EINVAL;

	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
		__func__, info->type, info->active, paddr,
		(unsigned long)info->vaddr);

	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
	if (!region)
		return -ENOMEM;

	INIT_HLIST_NODE(&region->list);

	region->paddr = paddr;
	region->len = len;
	region->file = file;
	memcpy(&region->info, info, sizeof(region->info));
	D("%s Adding region to list with type %d\n", __func__,
						region->info.type);
	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
	hlist_add_head(&(region->list), ptype);

	return 0;
}
Пример #3
0
static struct nfulnl_instance *
instance_create(struct net *net, u_int16_t group_num,
		u32 portid, struct user_namespace *user_ns)
{
	struct nfulnl_instance *inst;
	struct nfnl_log_net *log = nfnl_log_pernet(net);
	int err;

	spin_lock_bh(&log->instances_lock);
	if (__instance_lookup(log, group_num)) {
		err = -EEXIST;
		goto out_unlock;
	}

	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
	if (!inst) {
		err = -ENOMEM;
		goto out_unlock;
	}

	if (!try_module_get(THIS_MODULE)) {
		kfree(inst);
		err = -EAGAIN;
		goto out_unlock;
	}

	INIT_HLIST_NODE(&inst->hlist);
	spin_lock_init(&inst->lock);
	/* needs to be two, since we _put() after creation */
	refcount_set(&inst->use, 2);

	timer_setup(&inst->timer, nfulnl_timer, 0);

	inst->net = get_net(net);
	inst->peer_user_ns = user_ns;
	inst->peer_portid = portid;
	inst->group_num = group_num;

	inst->qthreshold 	= NFULNL_QTHRESH_DEFAULT;
	inst->flushtimeout 	= NFULNL_TIMEOUT_DEFAULT;
	inst->nlbufsiz 		= NFULNL_NLBUFSIZ_DEFAULT;
	inst->copy_mode 	= NFULNL_COPY_PACKET;
	inst->copy_range 	= NFULNL_COPY_RANGE_MAX;

	hlist_add_head_rcu(&inst->hlist,
		       &log->instance_table[instance_hashfn(group_num)]);


	spin_unlock_bh(&log->instances_lock);

	return inst;

out_unlock:
	spin_unlock_bh(&log->instances_lock);
	return ERR_PTR(err);
}
Пример #4
0
/*
 * This assumes the cblock hasn't already been allocated.
 */
static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
{
	struct entry *e = ep->entries + from_cblock(cblock);

	list_del_init(&e->list);
	INIT_HLIST_NODE(&e->hlist);
	ep->nr_allocated++;

	return e;
}
Пример #5
0
static void gfs2_init_glock_once(void *foo)
{
	struct gfs2_glock *gl = foo;

	INIT_HLIST_NODE(&gl->gl_list);
	spin_lock_init(&gl->gl_spin);
	INIT_LIST_HEAD(&gl->gl_holders);
	INIT_LIST_HEAD(&gl->gl_lru);
	INIT_LIST_HEAD(&gl->gl_ail_list);
	atomic_set(&gl->gl_ail_count, 0);
}
Пример #6
0
static struct nfulnl_instance *
instance_create(u_int16_t group_num, int pid)
{
	struct nfulnl_instance *inst;

	UDEBUG("entering (group_num=%u, pid=%d)\n", group_num,
		pid);

	write_lock_bh(&instances_lock);
	if (__instance_lookup(group_num)) {
		inst = NULL;
		UDEBUG("aborting, instance already exists\n");
		goto out_unlock;
	}

	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
	if (!inst)
		goto out_unlock;

	INIT_HLIST_NODE(&inst->hlist);
	spin_lock_init(&inst->lock);
	/* needs to be two, since we _put() after creation */
	atomic_set(&inst->use, 2);

	setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);

	inst->peer_pid = pid;
	inst->group_num = group_num;

	inst->qthreshold 	= NFULNL_QTHRESH_DEFAULT;
	inst->flushtimeout 	= NFULNL_TIMEOUT_DEFAULT;
	inst->nlbufsiz 		= NFULNL_NLBUFSIZ_DEFAULT;
	inst->copy_mode 	= NFULNL_COPY_PACKET;
	inst->copy_range 	= 0xffff;

	if (!try_module_get(THIS_MODULE))
		goto out_free;

	hlist_add_head(&inst->hlist,
		       &instance_table[instance_hashfn(group_num)]);

	UDEBUG("newly added node: %p, next=%p\n", &inst->hlist,
		inst->hlist.next);

	write_unlock_bh(&instances_lock);

	return inst;

out_free:
	instance_put(inst);
out_unlock:
	write_unlock_bh(&instances_lock);
	return NULL;
}
Пример #7
0
void
nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
			const struct pnfs_layoutdriver_type *ld,
			const struct nfs_client *nfs_client,
			const struct nfs4_deviceid *id)
{
	INIT_HLIST_NODE(&d->node);
	d->ld = ld;
	d->nfs_client = nfs_client;
	d->deviceid = *id;
	atomic_set(&d->ref, 1);
}
Пример #8
0
static inline int hmap_entry_init(struct hash_entry *e, const char *key_str,
                                  unsigned int len)
{
    INIT_HLIST_NODE(&(e->head));
    if (key_str) {
        if ((e->key = (char *)malloc(len+1)) == NULL)
            return -1;
        strcpy((char *)e->key, (char *)key_str);
        /*		memcpy(e->key, key_str, len); */
        e->keylen = len;
    }
    return 0;
}
Пример #9
0
static struct nfulnl_instance *
instance_create(u_int16_t group_num, int pid)
{
	struct nfulnl_instance *inst;
	int err;

	spin_lock_bh(&instances_lock);
	if (__instance_lookup(group_num)) {
		err = -EEXIST;
		goto out_unlock;
	}

	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
	if (!inst) {
		err = -ENOMEM;
		goto out_unlock;
	}

	if (!try_module_get(THIS_MODULE)) {
		kfree(inst);
		err = -EAGAIN;
		goto out_unlock;
	}

	INIT_HLIST_NODE(&inst->hlist);
	spin_lock_init(&inst->lock);
	/* needs to be two, since we _put() after creation */
	atomic_set(&inst->use, 2);

	setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);

	inst->peer_pid = pid;
	inst->group_num = group_num;

	inst->qthreshold 	= NFULNL_QTHRESH_DEFAULT;
	inst->flushtimeout 	= NFULNL_TIMEOUT_DEFAULT;
	inst->nlbufsiz 		= NFULNL_NLBUFSIZ_DEFAULT;
	inst->copy_mode 	= NFULNL_COPY_PACKET;
	inst->copy_range 	= NFULNL_COPY_RANGE_MAX;

	hlist_add_head_rcu(&inst->hlist,
		       &instance_table[instance_hashfn(group_num)]);

	spin_unlock_bh(&instances_lock);

	return inst;

out_unlock:
	spin_unlock_bh(&instances_lock);
	return ERR_PTR(err);
}
Пример #10
0
static struct entry *alloc_entry(struct entry_pool *ep)
{
	struct entry *e;

	if (list_empty(&ep->free))
		return NULL;

	e = list_entry(list_pop(&ep->free), struct entry, list);
	INIT_LIST_HEAD(&e->list);
	INIT_HLIST_NODE(&e->hlist);
	ep->nr_allocated++;

	return e;
}
Пример #11
0
static void gfs2_init_glock_once(void *foo)
{
	struct gfs2_glock *gl = foo;

	INIT_HLIST_NODE(&gl->gl_list);
	spin_lock_init(&gl->gl_spin);
	INIT_LIST_HEAD(&gl->gl_holders);
	INIT_LIST_HEAD(&gl->gl_waiters1);
	INIT_LIST_HEAD(&gl->gl_waiters3);
	gl->gl_lvb = NULL;
	atomic_set(&gl->gl_lvb_count, 0);
	INIT_LIST_HEAD(&gl->gl_reclaim);
	INIT_LIST_HEAD(&gl->gl_ail_list);
	atomic_set(&gl->gl_ail_count, 0);
}
Пример #12
0
/*
 * Allocates a new entry structure.  The memory is allocated in one lump,
 * so we just handing it out here.  Returns NULL if all entries have
 * already been allocated.  Cannot fail otherwise.
 */
static struct entry *alloc_entry(struct mq_policy *mq)
{
	struct entry *e;

	if (mq->nr_entries_allocated >= mq->nr_entries) {
		BUG_ON(!list_empty(&mq->free));
		return NULL;
	}

	e = list_entry(list_pop(&mq->free), struct entry, list);
	INIT_LIST_HEAD(&e->list);
	INIT_HLIST_NODE(&e->hlist);

	mq->nr_entries_allocated++;
	return e;
}
Пример #13
0
/**
 * ioc_create_icq - create and link io_cq
 * @q: request_queue of interest
 * @gfp_mask: allocation mask
 *
 * Make sure io_cq linking %current->io_context and @q exists.  If either
 * io_context and/or icq don't exist, they will be created using @gfp_mask.
 *
 * The caller is responsible for ensuring @ioc won't go away and @q is
 * alive and will stay alive until this function returns.
 */
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
{
	struct elevator_type *et = q->elevator->type;
	struct io_context *ioc;
	struct io_cq *icq;

	/* allocate stuff */
	ioc = create_io_context(current, gfp_mask, q->node);
	if (!ioc)
		return NULL;

	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
				    q->node);
	if (!icq)
		return NULL;

	if (radix_tree_preload(gfp_mask) < 0) {
		kmem_cache_free(et->icq_cache, icq);
		return NULL;
	}

	icq->ioc = ioc;
	icq->q = q;
	INIT_LIST_HEAD(&icq->q_node);
	INIT_HLIST_NODE(&icq->ioc_node);

	/* lock both q and ioc and try to link @icq */
	spin_lock_irq(q->queue_lock);
	spin_lock(&ioc->lock);

	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
		list_add(&icq->q_node, &q->icq_list);
		if (et->ops.elevator_init_icq_fn)
			et->ops.elevator_init_icq_fn(icq);
	} else {
		kmem_cache_free(et->icq_cache, icq);
		icq = ioc_lookup_icq(ioc, q);
		if (!icq)
			printk(KERN_ERR "cfq: icq link failed!\n");
	}

	spin_unlock(&ioc->lock);
	spin_unlock_irq(q->queue_lock);
	radix_tree_preload_end();
	return icq;
}
Пример #14
0
static struct buffer_head *
buffer_alloc(int dev,uint32_t block)
{
	struct buffer_head *buf;
	buf = kmalloc(sizeof(struct buffer_head));

	if (!buf)
		return NULL;

	memset(buf,0x0,sizeof(struct buffer_head));
	buf->b_blocknr = block;
	buf->b_dev = dev;
	SPIN_LOCK_INIT(&buf->b_lock);
	INIT_HLIST_NODE(&buf->list_free);

	return buf;
}
Пример #15
0
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
	struct gfs2_glock *gl = foo;
	if (flags & SLAB_CTOR_CONSTRUCTOR) {
		INIT_HLIST_NODE(&gl->gl_list);
		spin_lock_init(&gl->gl_spin);
		INIT_LIST_HEAD(&gl->gl_holders);
		INIT_LIST_HEAD(&gl->gl_waiters1);
		INIT_LIST_HEAD(&gl->gl_waiters2);
		INIT_LIST_HEAD(&gl->gl_waiters3);
		gl->gl_lvb = NULL;
		atomic_set(&gl->gl_lvb_count, 0);
		INIT_LIST_HEAD(&gl->gl_reclaim);
		INIT_LIST_HEAD(&gl->gl_ail_list);
		atomic_set(&gl->gl_ail_count, 0);
	}
}
Пример #16
0
static struct cfq_io_context *bfq_alloc_io_context(struct bfq_data *bfqd,
						   gfp_t gfp_mask)
{
	struct cfq_io_context *cic;

	cic = kmem_cache_alloc_node(bfq_ioc_pool, gfp_mask | __GFP_ZERO,
							bfqd->queue->node);
	if (cic != NULL) {
		cic->last_end_request = jiffies;
		INIT_LIST_HEAD(&cic->queue_list);
		INIT_HLIST_NODE(&cic->cic_list);
		cic->dtor = bfq_free_io_context;
		cic->exit = bfq_exit_io_context;
		elv_ioc_count_inc(bfq_ioc_count);
	}

	return cic;
}
Пример #17
0
static uproc_dentry_t* __uproc_create(uproc_ctx_t *ctx,
                                      const char *name,
                                      mode_t mode,
                                      uproc_dentry_t **parent) {
    const char *lp;
    uproc_dentry_t *new_entry = NULL;
    size_t namelen;
    if (!name || !strlen(name)) {
        if (ctx->dbg)
            fprintf(stderr, "uproc: empty pathname!\n");
        goto out;
    }

    if (__find_last_part(ctx, name, parent, &lp)) {
        if (ctx->dbg)
            fprintf(stderr, "uproc: some parts of \"%s\" does not exist!\n", name);
        goto out;
    }

    if (!*lp) {
        if (ctx->dbg)
            fprintf(stderr, "uproc: invalid pathname \"%s\"\n", name);
        goto out;
    }

    namelen = strlen(lp);
    new_entry = malloc(sizeof(*new_entry) + namelen + 1);
    if (!new_entry) {
        if (ctx->dbg)
            fprintf(stderr, "uproc: memory shortage, can't allocate memory for entry \"%s\"\n", name);
        goto out;
    }

    memset(new_entry, 0, sizeof(*new_entry) + namelen + 1);
    new_entry->name = (char *)new_entry + sizeof(*new_entry);
    memcpy(new_entry->name, lp, namelen);
    new_entry->namelen = namelen;
    new_entry->uid = getuid();
    new_entry->gid = getgid();
    new_entry->mode = mode;
    INIT_HLIST_NODE(&new_entry->hlink);
out:
    return new_entry;
}
Пример #18
0
struct ptlrpc_connection *
ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self,
		      struct obd_uuid *uuid)
{
	struct ptlrpc_connection *conn, *conn2;
	ENTRY;

	peer.nid = LNetPrimaryNID(peer.nid);
	conn = cfs_hash_lookup(conn_hash, &peer);
	if (conn)
		GOTO(out, conn);

	OBD_ALLOC_PTR(conn);
	if (!conn)
		RETURN(NULL);

	conn->c_peer = peer;
	conn->c_self = self;
	INIT_HLIST_NODE(&conn->c_hash);
	atomic_set(&conn->c_refcount, 1);
	if (uuid)
		obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);

	/*
	 * Add the newly created conn to the hash, on key collision we
	 * lost a racing addition and must destroy our newly allocated
	 * connection.	The object which exists in the hash will be
	 * returned and may be compared against out object.
	 */
	/* In the function below, .hs_keycmp resolves to
	 * conn_keycmp() */
	/* coverity[overrun-buffer-val] */
	conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
	if (conn != conn2) {
		OBD_FREE_PTR(conn);
		conn = conn2;
	}
	EXIT;
out:
	CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
	       conn, atomic_read(&conn->c_refcount),
	       libcfs_nid2str(conn->c_peer.nid));
	return conn;
}
Пример #19
0
static struct shim_ipc_port * __get_new_ipc_port (PAL_HANDLE hdl)
{
    struct shim_ipc_port * port =
                get_mem_obj_from_mgr_enlarge(port_mgr,
                                             size_align_up(PORT_MGR_ALLOC));

    if (!port)
        return NULL;

    memset(port, 0, sizeof(struct shim_ipc_port));
    port->pal_handle = hdl;
    port->update = true;
    INIT_HLIST_NODE(&port->hlist);
    INIT_LIST_HEAD(&port->list);
    INIT_LIST_HEAD(&port->msgs);
    REF_SET(port->ref_count, 1);
    create_lock(port->msgs_lock);
    return port;
}
void blk_rq_init(struct request_queue *q, struct request *rq)
{
	memset(rq, 0, sizeof(*rq));

	INIT_LIST_HEAD(&rq->queuelist);
	INIT_LIST_HEAD(&rq->timeout_list);
	rq->cpu = -1;
	rq->q = q;
	rq->__sector = (sector_t) -1;
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->cmd = rq->__cmd;
	rq->cmd_len = BLK_MAX_CDB;
	rq->tag = -1;
	rq->ref_count = 1;
	rq->start_time = jiffies;
	set_start_time_ns(rq);
	rq->part = NULL;
}
Пример #21
0
struct ptlrpc_connection *
ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
		      struct obd_uuid *uuid)
{
	struct ptlrpc_connection *conn, *conn2;

	conn = cfs_hash_lookup(conn_hash, &peer);
	if (conn)
		goto out;

	conn = kzalloc(sizeof(*conn), GFP_NOFS);
	if (!conn)
		return NULL;

	conn->c_peer = peer;
	conn->c_self = self;
	INIT_HLIST_NODE(&conn->c_hash);
	atomic_set(&conn->c_refcount, 1);
	if (uuid)
		obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);

	/*
	 * Add the newly created conn to the hash, on key collision we
	 * lost a racing addition and must destroy our newly allocated
	 * connection.  The object which exists in the has will be
	 * returned and may be compared against out object.
	 */
	/* In the function below, .hs_keycmp resolves to
	 * conn_keycmp()
	 */
	/* coverity[overrun-buffer-val] */
	conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
	if (conn != conn2) {
		kfree(conn);
		conn = conn2;
	}
out:
	CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
	       conn, atomic_read(&conn->c_refcount),
	       libcfs_nid2str(conn->c_peer.nid));
	return conn;
}
Пример #22
0
struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
						  size_t uri_len)
{
	struct nfc_llcp_sdp_tlv *sdreq;

	pr_debug("uri: %s, len: %zu\n", uri, uri_len);

	/* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
	if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
		return NULL;

	sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
	if (sdreq == NULL)
		return NULL;

	sdreq->tlv_len = uri_len + 3;

	if (uri[uri_len - 1] == 0)
		sdreq->tlv_len--;

	sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL);
	if (sdreq->tlv == NULL) {
		kfree(sdreq);
		return NULL;
	}

	sdreq->tlv[0] = LLCP_TLV_SDREQ;
	sdreq->tlv[1] = sdreq->tlv_len - 2;
	sdreq->tlv[2] = tid;

	sdreq->tid = tid;
	sdreq->uri = sdreq->tlv + 3;
	memcpy(sdreq->uri, uri, uri_len);

	sdreq->time = jiffies;

	INIT_HLIST_NODE(&sdreq->node);

	return sdreq;
}
static int objhash_add_one(struct my_obj *obj)
{
	u32 hash_idx;

	if (obj == NULL) {
		pr_err("%s(): Failed, NULL object\n", __func__);
		return 0;
	}

	objhash_cnt++;
	INIT_HLIST_NODE(&obj->node);
	obj->page = virt_to_head_page(obj);

	/* Hash on the page address of the object */
	hash_idx = jhash(&obj->page, 8, 13);
	//pr_info("DEBUG: hash_idx=0x%x [%u] page=0x%p\n",
	//	hash_idx, hash_idx % HASHSZ, obj->page);
	hash_idx = hash_idx % HASHSZ;

	hlist_add_head(&obj->node, &objhash[hash_idx]);

	return 1;
}
Пример #24
0
/**
 * fscache_object_init - Initialise a cache object description
 * @object: Object description
 * @cookie: Cookie object will be attached to
 * @cache: Cache in which backing object will be found
 *
 * Initialise a cache object description to its basic values.
 *
 * See Documentation/filesystems/caching/backend-api.txt for a complete
 * description.
 */
void fscache_object_init(struct fscache_object *object,
			 struct fscache_cookie *cookie,
			 struct fscache_cache *cache)
{
	const struct fscache_transition *t;

	atomic_inc(&cache->object_count);

	object->state = STATE(WAIT_FOR_INIT);
	object->oob_table = fscache_osm_init_oob;
	object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
	spin_lock_init(&object->lock);
	INIT_LIST_HEAD(&object->cache_link);
	INIT_HLIST_NODE(&object->cookie_link);
	INIT_WORK(&object->work, fscache_object_work_func);
	INIT_LIST_HEAD(&object->dependents);
	INIT_LIST_HEAD(&object->dep_link);
	INIT_LIST_HEAD(&object->pending_ops);
	object->n_children = 0;
	object->n_ops = object->n_in_progress = object->n_exclusive = 0;
	object->events = 0;
	object->store_limit = 0;
	object->store_limit_l = 0;
	object->cache = cache;
	object->cookie = cookie;
	object->parent = NULL;
#ifdef CONFIG_FSCACHE_OBJECT_LIST
	RB_CLEAR_NODE(&object->objlist_link);
#endif

	object->oob_event_mask = 0;
	for (t = object->oob_table; t->events; t++)
		object->oob_event_mask |= t->events;
	object->event_mask = object->oob_event_mask;
	for (t = object->state->transitions; t->events; t++)
		object->event_mask |= t->events;
}
Пример #25
0
static struct cfq_io_context *bfq_alloc_io_context(struct bfq_data *bfqd,
        gfp_t gfp_mask)
{
    struct cfq_io_context *cic;

    cic = kmem_cache_alloc_node(bfq_ioc_pool, gfp_mask | __GFP_ZERO,
                                bfqd->queue->node);
    if (cic != NULL) {
        cic->last_end_request = jiffies;
        /*
         * A newly created cic indicates that the process has just
         * started doing I/O, and is probably mapping into memory its
         * executable and libraries: it definitely needs weight raising.
         * There is however the possibility that the process performs,
         * for a while, I/O close to some other process. EQM intercepts
         * this behavior and may merge the queue corresponding to the
         * process  with some other queue, BEFORE the weight of the queue
         * is raised. Merged queues are not weight-raised (they are assumed
         * to belong to processes that benefit only from high throughput).
         * If the merge is basically the consequence of an accident, then
         * the queue will be split soon and will get back its old weight.
         * It is then important to write down somewhere that this queue
         * does need weight raising, even if it did not make it to get its
         * weight raised before being merged. To this purpose, we overload
         * the field raising_time_left and assign 1 to it, to mark the queue
         * as needing weight raising.
         */
        cic->wr_time_left = 1;
        INIT_LIST_HEAD(&cic->queue_list);
        INIT_HLIST_NODE(&cic->cic_list);
        cic->dtor = bfq_free_io_context;
        cic->exit = bfq_exit_io_context;
        elv_ioc_count_inc(bfq_ioc_count);
    }

    return cic;
}
Пример #26
0
static void dlm_init_mle(struct dlm_master_list_entry *mle,
			enum dlm_mle_type type,
			struct dlm_ctxt *dlm,
			struct dlm_lock_resource *res,
			const char *name,
			unsigned int namelen)
{
	assert_spin_locked(&dlm->spinlock);

	mle->dlm = dlm;
	mle->type = type;
	INIT_HLIST_NODE(&mle->master_hash_node);
	INIT_LIST_HEAD(&mle->hb_events);
	memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
	spin_lock_init(&mle->spinlock);
	init_waitqueue_head(&mle->wq);
	atomic_set(&mle->woken, 0);
	kref_init(&mle->mle_refs);
	memset(mle->response_map, 0, sizeof(mle->response_map));
	mle->master = O2NM_MAX_NODES;
	mle->new_master = O2NM_MAX_NODES;
	mle->inuse = 0;

	BUG_ON(mle->type != DLM_MLE_BLOCK &&
Пример #27
0
/**
 * nfp_flower_add_offload() - Adds a new flow to hardware.
 * @app:	Pointer to the APP handle
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
 * @egress:	NFP netdev is the egress.
 *
 * Adds a new flow to the repeated hash structure and action payload.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
		       struct tc_cls_flower_offload *flow, bool egress)
{
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
	struct nfp_port *port = nfp_port_from_netdev(netdev);
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_key_ls *key_layer;
	struct net_device *ingr_dev;
	int err;

	ingr_dev = egress ? NULL : netdev;
	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
					      NFP_FL_STATS_CTX_DONT_CARE);
	if (flow_pay) {
		/* Ignore as duplicate if it has been added by different cb. */
		if (flow_pay->ingress_offload && egress)
			return 0;
		else
			return -EOPNOTSUPP;
	}

	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
	if (!key_layer)
		return -ENOMEM;

	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
					      &tun_type);
	if (err)
		goto err_free_key_ls;

	flow_pay = nfp_flower_allocate_new(key_layer, egress);
	if (!flow_pay) {
		err = -ENOMEM;
		goto err_free_key_ls;
	}

	flow_pay->ingress_dev = egress ? NULL : netdev;

	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
					    tun_type);
	if (err)
		goto err_destroy_flow;

	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
	if (err)
		goto err_destroy_flow;

	err = nfp_compile_flow_metadata(app, flow, flow_pay,
					flow_pay->ingress_dev);
	if (err)
		goto err_destroy_flow;

	err = nfp_flower_xmit_flow(netdev, flow_pay,
				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
	if (err)
		goto err_destroy_flow;

	INIT_HLIST_NODE(&flow_pay->link);
	flow_pay->tc_flower_cookie = flow->cookie;
	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
	port->tc_offload_cnt++;

	/* Deallocate flow payload when flower rule has been destroyed. */
	kfree(key_layer);

	return 0;

err_destroy_flow:
	kfree(flow_pay->action_data);
	kfree(flow_pay->mask_data);
	kfree(flow_pay->unmasked_data);
	kfree(flow_pay);
err_free_key_ls:
	kfree(key_layer);
	return err;
}
Пример #28
0
static int __add_msg_handle (unsigned long key, IDTYPE msqid, bool owned,
                             struct shim_msg_handle ** msghdl)
{
    struct hlist_head * key_head = (key != IPC_PRIVATE) ?
                                   &msgq_key_hlist[MSGQ_HASH(key)] :
                                   NULL;
    struct hlist_head * qid_head = msqid ?
                                   &msgq_qid_hlist[MSGQ_HASH(msqid)] :
                                   NULL;

    struct shim_msg_handle * tmp;
    struct hlist_node * pos;

    if (key_head)
        hlist_for_each_entry(tmp, pos, key_head, key_hlist)
            if (tmp->msqkey == key) {
                if (tmp->msqid == msqid) {
                    if (msghdl)
                        *msghdl = tmp;
                    return 0;
                }
                return -EEXIST;
            }

    if (qid_head)
        hlist_for_each_entry(tmp, pos, qid_head, qid_hlist)
            if (tmp->msqid == msqid) {
                if (key)
                    tmp->msqkey = key;
                if (msghdl)
                    *msghdl = tmp;
                return 0;
            }

    struct shim_handle * hdl = get_new_handle();
    if (!hdl)
        return -ENOMEM;

    struct shim_msg_handle * msgq = &hdl->info.msg;

    hdl->type       = TYPE_MSG;
    msgq->msqkey    = key;
    msgq->msqid     = msqid;
    msgq->owned     = owned;
    msgq->deleted   = false;
    msgq->currentsize = 0;
    msgq->event     = DkSynchronizationEventCreate(0);

    msgq->queue     = malloc(MSG_QOBJ_SIZE * DEFAULT_MSG_QUEUE_SIZE);
    msgq->queuesize = DEFAULT_MSG_QUEUE_SIZE;
    msgq->queueused = 0;
    msgq->freed     = NULL;

    msgq->ntypes    = 0;
    msgq->maxtypes  = INIT_MSG_TYPE_SIZE;
    msgq->types     = malloc(sizeof(struct msg_type) * INIT_MSG_TYPE_SIZE);

    INIT_LIST_HEAD(&msgq->list);
    get_handle(hdl);
    list_add_tail(&msgq->list, &msgq_list);

    INIT_HLIST_NODE(&msgq->key_hlist);
    if (key_head) {
        get_handle(hdl);
        hlist_add_head(&msgq->key_hlist, key_head);
    }
    INIT_HLIST_NODE(&msgq->qid_hlist);
    if (qid_head) {
        get_handle(hdl);
        hlist_add_head(&msgq->qid_hlist, qid_head);
    }

    if (!msghdl) {
        put_handle(hdl);
        return 0;
    }

    *msghdl = msgq;
    return 0;
}
Пример #29
0
int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
{
	int ret_val;
	struct pid *our_pid;
	struct mm_struct *mm = get_task_mm(current);
	BUG_ON(!mm);

	/* Prevent creating ODP MRs in child processes */
	rcu_read_lock();
	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
	rcu_read_unlock();
	put_pid(our_pid);
	if (context->tgid != our_pid) {
		ret_val = -EINVAL;
		goto out_mm;
	}

	umem->hugetlb = 0;
	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
	if (!umem->odp_data) {
		ret_val = -ENOMEM;
		goto out_mm;
	}
	umem->odp_data->umem = umem;

	mutex_init(&umem->odp_data->umem_mutex);

	init_completion(&umem->odp_data->notifier_completion);

	umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
					    sizeof(*umem->odp_data->page_list));
	if (!umem->odp_data->page_list) {
		ret_val = -ENOMEM;
		goto out_odp_data;
	}

	umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
					  sizeof(*umem->odp_data->dma_list));
	if (!umem->odp_data->dma_list) {
		ret_val = -ENOMEM;
		goto out_page_list;
	}

	/*
	 * When using MMU notifiers, we will get a
	 * notification before the "current" task (and MM) is
	 * destroyed. We use the umem_mutex lock to synchronize.
	 */
	down_write(&context->umem_mutex);
	context->odp_mrs_count++;
	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
				   &context->umem_tree);
	downgrade_write(&context->umem_mutex);

	if (context->odp_mrs_count == 1) {
		/*
		 * Note that at this point, no MMU notifier is running
		 * for this context!
		 */
		INIT_HLIST_NODE(&context->mn.hlist);
		context->mn.ops = &ib_umem_notifiers;
		/*
		 * Lock-dep detects a false positive for mmap_sem vs.
		 * umem_mutex, due to not grasping downgrade_write correctly.
		 */
		lockdep_off();
		ret_val = mmu_notifier_register(&context->mn, mm);
		lockdep_on();
		if (ret_val) {
			pr_err("Failed to register mmu_notifier %d\n", ret_val);
			ret_val = -EBUSY;
			goto out_mutex;
		}
	}

	up_read(&context->umem_mutex);

	/*
	 * Note that doing an mmput can cause a notifier for the relevant mm.
	 * If the notifier is called while we hold the umem_mutex, this will
	 * cause a deadlock. Therefore, we release the reference only after we
	 * released the mutex.
	 */
	mmput(mm);
	return 0;

out_mutex:
	up_read(&context->umem_mutex);
	vfree(umem->odp_data->dma_list);
out_page_list:
	vfree(umem->odp_data->page_list);
out_odp_data:
	kfree(umem->odp_data);
out_mm:
	mmput(mm);
	return ret_val;
}
Пример #30
0
static int msm_pmem_table_add(struct hlist_head *ptype,
	struct msm_pmem_info *info, struct ion_client *client)
{
	unsigned long paddr;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
	unsigned long kvstart;
	struct file *file;
#endif
	int rc = -ENOMEM;

	unsigned long len;
	struct msm_pmem_region *region;

	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
	if (!region)
		goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	region->handle = ion_import_fd(client, info->fd);
	if (IS_ERR_OR_NULL(region->handle))
		goto out1;
	if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
				  SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0)
		goto out2;
#elif CONFIG_ANDROID_PMEM
	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
				__func__, info->fd, rc);
		goto out1;
	}
	region->file = file;
#else
	paddr = 0;
	file = NULL;
	kvstart = 0;
#endif
	if (!info->len)
		info->len = len;
	rc = check_pmem_info(info, len);
	if (rc < 0)
		goto out3;
	paddr += info->offset;
	len = info->len;

	if (check_overlap(ptype, paddr, len) < 0) {
		rc = -EINVAL;
		goto out3;
	}

	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
		__func__, info->type, info->active, paddr,
		(unsigned long)info->vaddr);

	INIT_HLIST_NODE(&region->list);
	region->paddr = paddr;
	region->len = len;
	memcpy(&region->info, info, sizeof(region->info));
	D("%s Adding region to list with type %d\n", __func__,
						region->info.type);
	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
	hlist_add_head(&(region->list), ptype);

	return 0;
out3:
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
out2:
	ion_free(client, region->handle);
#elif CONFIG_ANDROID_PMEM
	put_pmem_file(region->file);
#endif
out1:
	kfree(region);
out:
	return rc;
}