/* * Allocate a single object of the given type. */ seL4_CPtr allocator_alloc_kobject(struct allocator *allocator, seL4_Word item_type, seL4_Word item_size) { unsigned long size_bits; seL4_CPtr untyped_memory; struct cap_range cap_range; int result; UNUSED_NDEBUG(result); /* Allocate an untyped memory item of the right size. */ size_bits = vka_get_object_size(item_type, item_size); untyped_memory = allocator_alloc_untyped( allocator, size_bits); if (!untyped_memory) { return 0; } /* Allocate an object. */ result = allocator_retype_untyped_memory(allocator, untyped_memory, item_type, item_size, 1, &cap_range); /* We should have gotten either zero items (if we ran out of caps), or one * item (if everything went well). If we get more than one, we * miscalculated our sizes. */ assert((result == 0) || (result == 1)); /* Return the first item (or zero if none were allocated. */ return cap_range.first; }
uint32_t _utspace_trickle_alloc(struct allocman *alloc, void *_trickle, uint32_t size_bits, seL4_Word type, cspacepath_t *slot, int *error) { uint32_t sel4_size_bits; int _error; utspace_trickle_t *trickle = (utspace_trickle_t*)_trickle; struct utspace_trickle_node *node; uint32_t offset; uint32_t mem_offset; /* get size of untyped call */ sel4_size_bits = get_sel4_object_size(type, size_bits); if (size_bits != vka_get_object_size(type, sel4_size_bits) || size_bits == 0) { SET_ERROR(error, 1); return 0; } assert(size_bits < 32); if (!trickle->heads[size_bits]) { _error = _refill_pool(alloc, trickle, size_bits); if (_error) { SET_ERROR(error, _error); return 0; } } node = trickle->heads[size_bits]; offset = CLZ(node->bitmap); mem_offset = node->offset + (offset << size_bits); if (slot) { _error = seL4_Untyped_RetypeAtOffset(node->ut->capPtr, type, mem_offset, sel4_size_bits, slot->root, slot->dest, slot->destDepth, slot->offset, 1); if (_error != seL4_NoError) { SET_ERROR(error, 1); return 0; } } node->bitmap &= ~BIT(31 - offset); if (node->bitmap == 0) { _remove_node(&trickle->heads[size_bits], node); } SET_ERROR(error, 0); return _make_cookie(node, offset); }
uint32_t _utspace_split_alloc(allocman_t *alloc, void *_split, uint32_t size_bits, seL4_Word type, cspacepath_t *slot, int *error) { utspace_split_t *split = (utspace_split_t*)_split; uint32_t sel4_size_bits; int sel4_error; struct utspace_split_node *node; /* get size of untyped call */ sel4_size_bits = get_sel4_object_size(type, size_bits); if (size_bits != vka_get_object_size(type, sel4_size_bits) || size_bits == 0) { SET_ERROR(error, 1); return 0; } /* make sure we have an available untyped */ if (_refill_pool(alloc, split, size_bits)) { /* out of memory? */ SET_ERROR(error, 1); return 0; } /* use the first node for lack of a better one */ node = split->heads[size_bits]; /* Perform the untyped retype */ #if defined(CONFIG_KERNEL_STABLE) sel4_error = seL4_Untyped_RetypeAtOffset(node->ut.capPtr, type, 0, sel4_size_bits, slot->root, slot->dest, slot->destDepth, slot->offset, 1); #else sel4_error = seL4_Untyped_Retype(node->ut.capPtr, type, sel4_size_bits, slot->root, slot->dest, slot->destDepth, slot->offset, 1); #endif if (sel4_error != seL4_NoError) { /* Well this shouldn't happen */ SET_ERROR(error, 1); return 0; } /* remove the node */ _remove_node(&split->heads[size_bits], node); SET_ERROR(error, 0); /* return the node as a cookie */ return (uint32_t)node; }
seL4_Word _utspace_split_alloc(allocman_t *alloc, void *_split, size_t size_bits, seL4_Word type, const cspacepath_t *slot, uintptr_t paddr, bool canBeDev, int *error) { utspace_split_t *split = (utspace_split_t*)_split; size_t sel4_size_bits; int sel4_error; struct utspace_split_node *node; /* get size of untyped call */ sel4_size_bits = get_sel4_object_size(type, size_bits); if (size_bits != vka_get_object_size(type, sel4_size_bits) || size_bits == 0) { SET_ERROR(error, 1); return 0; } struct utspace_split_node **head = NULL; /* if we're allocating at a particular paddr then we will just trawl through every pool * and see if we can find out which one has what we want */ if (paddr != ALLOCMAN_NO_PADDR) { if (canBeDev) { head = find_head_for_paddr(split->dev_heads, paddr, size_bits); if (!head) { head = find_head_for_paddr(split->dev_mem_heads, paddr, size_bits); } } if (!head) { head = find_head_for_paddr(split->heads, paddr, size_bits); } if (!head) { SET_ERROR(error, 1); ZF_LOGE("Failed to find any untyped capable of creating an object at address %p", (void*)paddr); return 0; } if (_refill_pool(alloc, split, head, size_bits, paddr)) { /* out of memory? */ SET_ERROR(error, 1); ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits); return 0; } /* search for the node we want to use. We have the advantage of knowing that * due to objects being size aligned that the base paddr of the untyped will * be exactly the paddr we want */ for (node = head[size_bits]; node && node->paddr != paddr; node = node->next); /* _refill_pool should not have returned if this wasn't possible */ assert(node); } else { /* if we can use device memory then preference allocating from there */ if (canBeDev) { if (_refill_pool(alloc, split, split->dev_mem_heads, size_bits, ALLOCMAN_NO_PADDR)) { /* out of memory? */ SET_ERROR(error, 1); ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits); return 0; } head = split->dev_mem_heads; } if (!head) { head = split->heads; if (_refill_pool(alloc, split, head, size_bits, ALLOCMAN_NO_PADDR)) { /* out of memory? */ SET_ERROR(error, 1); ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits); return 0; } } /* use the first node for lack of a better one */ node = head[size_bits]; } /* Perform the untyped retype */ sel4_error = seL4_Untyped_Retype(node->ut.capPtr, type, sel4_size_bits, slot->root, slot->dest, slot->destDepth, slot->offset, 1); if (sel4_error != seL4_NoError) { /* Well this shouldn't happen */ ZF_LOGE("Failed to retype untyped, error %d\n", sel4_error); SET_ERROR(error, 1); return 0; } /* remove the node */ _remove_node(&head[size_bits], node); SET_ERROR(error, 0); /* return the node as a cookie */ return (seL4_Word)node; }