void task_manager:: receive_result( IGenericStream* inStream ) { u8 pool_id ( u8(-1) ); //u32 task_id ( u32(-1) ), type_id ( u32(-1) ); read_task_pool( inStream, pool_id ); pools[pool_id]->receive_result( inStream ); }
static int pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, size_t size) { size_t lsizes[p->n_levels]; int i, alloc_l = -1, free_l = -1, from_l; void *blk = NULL; /* Walk down through levels, finding the one from which we * want to allocate and the smallest one with a free entry * from which we can split an allocation if needed. Along the * way, we populate an array of sizes for each level so we * don't need to waste RAM storing it. */ lsizes[0] = _ALIGN4(p->max_sz); for (i = 0; i < p->n_levels; i++) { if (i > 0) { lsizes[i] = _ALIGN4(lsizes[i-1] / 4); } if (lsizes[i] < size) { break; } alloc_l = i; if (!level_empty(p, i)) { free_l = i; } } if (alloc_l < 0 || free_l < 0) { block->data = NULL; return -ENOMEM; } /* Iteratively break the smallest enclosing block... */ blk = alloc_block(p, free_l, lsizes[free_l]); if (!blk) { /* This can happen if we race with another allocator. * It's OK, just back out and the timeout code will * retry. Note mild overloading: -EAGAIN isn't for * propagation to the caller, it's to tell the loop in * k_mem_pool_alloc() to try again synchronously. But * it means exactly what it says. */ return -EAGAIN; } for (from_l = free_l; level_empty(p, alloc_l) && from_l < alloc_l; from_l++) { blk = break_block(p, blk, from_l, lsizes); } /* ... until we have something to return */ block->data = blk; block->id.pool = pool_id(p); block->id.level = alloc_l; block->id.block = block_num(p, block->data, lsizes[alloc_l]); return 0; }
bool application::is_asynchronous() { return pool_id() < 0; }