static struct d_tree *digest_new(unsigned char *digest) { struct d_tree *token = malloc(sizeof(struct d_tree)); if (token) { rb_init_node(&token->t_node); token->digest = malloc(sizeof(unsigned char) * digest_len); if (!token->digest) { free(token); return NULL; } memcpy(token->digest, digest, digest_len); } return token; }
static struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); kref_init(&handle->ref); rb_init_node(&handle->node); handle->client = client; ion_buffer_get(buffer); handle->buffer = buffer; return handle; }
int PushCache(INT h, int hash, int width, int height, int format, unsigned char * data) { LPCACHE_NODE pNode; LPCACHE_HANDLE handle = (LPCACHE_HANDLE)h; int ret = 0; if (handle == GNull) { return -1; } // search in rb-tree pNode = rbt_search(&handle->mRBRoot, hash); if (handle->mCurCount >= handle->mMaxCount && pNode == GNull) { // replace container_of(pNode, dl_last(&(handle->mDLRoot)), CACHE_NODE, mDLNode); #if defined( _DEBUG ) LOGI("replace get last 0x%X\n", pNode); #endif } if (pNode != GNull) { //remove out in linked queue. dl_remove_node(&(pNode->mDLNode), &(handle->mDLRoot)); //remove from rb-tree. rb_erase(&pNode->mRBNode, &handle->mRBRoot); pNode->mKey = hash; } else { pNode = (LPCACHE_NODE)GMemMalloc(sizeof(CACHE_NODE)); pNode->mKey = hash; handle->mCurCount++; cache_data_initial(&(pNode->mData)); } cache_data_update(&(pNode->mData), width, height, format, data); //add node dl_insert_node(&(pNode->mDLNode), GNull, &(handle->mDLRoot)); //add to rb-tree rb_init_node(&pNode->mRBNode); rbt_insert(&handle->mRBRoot, pNode); return ret; }
static void insert_chunk(struct rb_root *root, struct chunk *c, bool coalesce) { struct chunk *parent = NULL; struct rb_node **p = find_insert_location(root, c->base, &parent); // XXX: If inserting [x|y] into a heap that has [z|w] with x<z<x+y, // find_insert_location will have no clue. I can't imagine that this // would cause any sort of bug, though... if (coalesce && p == NULL) { assert(parent != NULL); parent->len = MAX(parent->len, c->len + c->base - parent->base); MM_FREE(c); return; } assert(p != NULL && "allocated a block already contained in the heap?"); rb_init_node(&c->nobe); rb_link_node(&c->nobe, parent != NULL ? &parent->nobe : NULL, p); rb_insert_color(&c->nobe, root); }
static struct dupe_extents *dupe_extents_new(struct results_tree *res, unsigned char *digest, uint64_t len) { struct dupe_extents *dext; dext = calloc_dupe_extents(1); if (!dext) return NULL; memcpy(dext->de_hash, digest, digest_len); dext->de_len = len; INIT_LIST_HEAD(&dext->de_extents); dext->de_extents_root = RB_ROOT; rb_init_node(&dext->de_node); insert_dupe_extents(res, dext); dext->de_score = len; return dext; }
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, int flags, struct ceph_snap_context *snapc, struct ceph_osd_req_op *ops, bool use_mempool, gfp_t gfp_flags, struct page **pages, struct bio *bio) { struct ceph_osd_request *req; struct ceph_msg *msg; int needs_trail; int num_op = get_num_ops(ops, &needs_trail); size_t msg_size = sizeof(struct ceph_osd_request_head); msg_size += num_op*sizeof(struct ceph_osd_op); if (use_mempool) { req = mempool_alloc(osdc->req_mempool, gfp_flags); memset(req, 0, sizeof(*req)); } else { req = kzalloc(sizeof(*req), gfp_flags); } if (req == NULL) return NULL; req->r_osdc = osdc; req->r_mempool = use_mempool; kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); rb_init_node(&req->r_node); INIT_LIST_HEAD(&req->r_unsafe_item); INIT_LIST_HEAD(&req->r_linger_item); INIT_LIST_HEAD(&req->r_linger_osd); INIT_LIST_HEAD(&req->r_req_lru_item); INIT_LIST_HEAD(&req->r_osd_item); req->r_flags = flags; WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); /* create reply message */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, OSD_OPREPLY_FRONT_LEN, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } req->r_reply = msg; /* allocate space for the trailing data */ if (needs_trail) { req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags); if (!req->r_trail) { ceph_osdc_put_request(req); return NULL; } ceph_pagelist_init(req->r_trail); } /* create request message; allow space for oid */ msg_size += MAX_OBJ_NAME_SIZE; if (snapc) msg_size += sizeof(u64) * snapc->num_snaps; if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true); if (!msg) { ceph_osdc_put_request(req); return NULL; } memset(msg->front.iov_base, 0, msg->front.iov_len); req->r_request = msg; req->r_pages = pages; #ifdef CONFIG_BLOCK if (bio) { req->r_bio = bio; bio_get(req->r_bio); } #endif return req; }