/** * b1_node_free() - destroy a node * @node: node to destroy * * This destroys the given node and releases all linked resources. This implies * a call to b1_node_destroy(), if not already done by the caller. * * Return: NULL is returned. */ _c_public_ B1Node *b1_node_free(B1Node *node) { CRBNode *n; if (!node) return NULL; assert(node->owner); b1_node_release(node); while ((n = c_rbtree_first(&node->implementations))) { B1Implementation *implementation = c_container_of(n, B1Implementation, rb); c_rbtree_remove(&node->implementations, n); b1_interface_unref(implementation->interface); free(implementation); } /* if the node name is set, it means this node is owned by a message or * peer object, which will be responsibly for cleaning it up */ if (!node->name && node->id != BUS1_HANDLE_INVALID) { b1_node_destroy(node); c_rbtree_remove(&node->owner->nodes, &node->rb); } b1_peer_unref(node->owner); free(node); return NULL; }
static int implementations_compare(CRBTree *t, void *k, CRBNode *n) { B1Implementation *implementation = c_container_of(n, B1Implementation, rb); const char *name = k; return strcmp(name, implementation->interface->name); }
void _cg_memory_stack_rewind(cg_memory_stack_t *stack) { stack->sub_stack = c_container_of(stack->sub_stacks.next, cg_memory_sub_stack_t, link); stack->sub_stack_offset = 0; }
int b1_handle_acquire(B1Handle **handlep, B1Peer *peer, uint64_t handle_id) { B1Handle *handle; CRBNode **slot, *p; int r; assert(handlep); assert(peer); if (handle_id == BUS1_HANDLE_INVALID) { *handlep = NULL; return 0; } slot = c_rbtree_find_slot(&peer->handles, handles_compare, &handle_id, &p); if (slot) { r = b1_handle_new(peer, handle_id, &handle); if (r < 0) return r; c_rbtree_add(&peer->handles, p, slot, &handle->rb); } else { handle = c_container_of(p, B1Handle, rb); b1_handle_ref(handle); b1_handle_release(handle); } *handlep = handle; return 0; }
void * _cg_memory_stack_alloc(cg_memory_stack_t *stack, size_t bytes) { cg_memory_sub_stack_t *sub_stack; void *ret; sub_stack = stack->sub_stack; if (C_LIKELY(sub_stack->bytes - stack->sub_stack_offset >= bytes)) { ret = sub_stack->data + stack->sub_stack_offset; stack->sub_stack_offset += bytes; return ret; } /* If the stack has been rewound and then a large initial allocation * is made then we may need to skip over one or more of the * sub-stacks that are too small for the requested allocation * size... */ for (c_list_set_iterator(sub_stack->link.next, sub_stack, link); &sub_stack->link != &stack->sub_stacks; c_list_set_iterator(sub_stack->link.next, sub_stack, link)) { if (sub_stack->bytes >= bytes) { ret = sub_stack->data; stack->sub_stack = sub_stack; stack->sub_stack_offset = bytes; return ret; } } /* Finally if we couldn't find a free sub-stack with enough space * for the requested allocation we allocate another sub-stack that's * twice as big as the last sub-stack or twice as big as the * requested allocation if that's bigger. */ sub_stack = c_container_of(stack->sub_stacks.prev, cg_memory_sub_stack_t, link); _cg_memory_stack_add_sub_stack(stack, MAX(sub_stack->bytes, bytes) * 2); sub_stack = c_container_of(stack->sub_stacks.prev, cg_memory_sub_stack_t, link); stack->sub_stack_offset += bytes; return sub_stack->data; }
int handles_compare(CRBTree *t, void *k, CRBNode *n) { B1Handle *handle = c_container_of(n, B1Handle, rb); uint64_t id = *(uint64_t*)k; if (id < handle->id) return -1; else if (id > handle->id) return 1; else return 0; }
int nodes_compare(CRBTree *t, void *k, CRBNode *n) { B1Node *node = c_container_of(n, B1Node, rb); uint64_t id = *(uint64_t*)k; if (id < node->id) return -1; else if (id > node->id) return 1; else return 0; }
B1Handle *b1_handle_lookup(B1Peer *peer, uint64_t handle_id) { CRBNode *n; assert(peer); n = c_rbtree_find_node(&peer->handles, handles_compare, &handle_id); if (!n) return NULL; return c_container_of(n, B1Handle, rb); }
B1Node *b1_node_lookup(B1Peer *peer, uint64_t node_id) { CRBNode *n; assert(peer); n = c_rbtree_find_node(&peer->nodes, nodes_compare, &node_id); if (!n) return NULL; return c_container_of(n, B1Node, rb_nodes); }
void _cg_memory_stack_free(cg_memory_stack_t *stack) { while (!c_list_empty(&stack->sub_stacks)) { cg_memory_sub_stack_t *sub_stack = c_container_of( stack->sub_stacks.next, cg_memory_sub_stack_t, link); c_list_remove(&sub_stack->link); _cg_memory_sub_stack_free(sub_stack); } c_slice_free(cg_memory_stack_t, stack); }
B1Interface *b1_node_get_interface(B1Node *node, const char *name) { B1Implementation *implementation; CRBNode *n; assert(node); assert(name); n = c_rbtree_find_node(&node->implementations, implementations_compare, name); if (!n) return NULL; implementation = c_container_of(n, B1Implementation, rb); return implementation->interface; }
int b1_handle_acquire(B1Peer *peer, B1Handle **handlep, uint64_t handle_id) { B1Handle *handle; CRBNode **slot, *p; int r; assert(peer); assert(handlep); if (handle_id == BUS1_HANDLE_INVALID) { *handlep = NULL; return 0; } slot = c_rbtree_find_slot(&peer->handles, handles_compare, &handle_id, &p); if (slot) { r = b1_handle_new(peer, &handle); if (r < 0) return r; handle->ref_kernel = (CRef)C_REF_INIT; handle->live = true; handle->id = handle_id; c_rbtree_add(&peer->handles, p, slot, &handle->rb); } else { handle = c_container_of(p, B1Handle, rb); if (handle->live) { c_ref_inc(&handle->ref_kernel); /* reusing existing handle, drop redundant reference from kernel */ r = bus1_peer_handle_release(handle->holder->peer, handle->id); if (r < 0) return r; } else { handle->ref_kernel = (CRef)C_REF_INIT; handle->live = true; } c_ref_inc(&handle->ref); } *handlep = handle; return 0; }
int root_nodes_compare(CRBTree *t, void *k, CRBNode *n) { B1Node *node = c_container_of(n, B1Node, rb); const char *name = k; return strcmp(node->name, name); }