void *kmalloc(int size) { AcquireSRWLockExclusive(&heap->rw_lock); /* Find pool */ int p = -1; for (int i = 0; i < POOL_COUNT; i++) if (size <= heap->pools[i].objsize) { p = i; break; } if (p == -1) { log_error("kmalloc(%d): size too large.\n", size); ReleaseSRWLockExclusive(&heap->rw_lock); return NULL; } /* Find a bucket with a free object slot */ if (!heap->pools[p].first) heap->pools[p].first = alloc_bucket(heap->pools[p].objsize); struct bucket *current = heap->pools[p].first; for (;;) { if (!current) { log_error("kmalloc(%d): out of memory\n", size); ReleaseSRWLockExclusive(&heap->rw_lock); return NULL; } /* Current bucket has a free object, return it */ if (current->first_free) { void *c = current->first_free; current->first_free = *(void**)c; current->ref_cnt++; ReleaseSRWLockExclusive(&heap->rw_lock); return c; } /* Next bucket does not exist, allocate one */ if (!current->next_bucket) current->next_bucket = alloc_bucket(heap->pools[p].objsize); /* Move to next bucket */ current = current->next_bucket; } }
/* grow bucket by one element, reallocating bucket if necessary */ elem *cilkred_map::grow(__cilkrts_worker *w, bucket **bp) { size_t i, nmax, nnmax; bucket *b, *nb; b = *bp; if (b) { nmax = b->nmax; /* find empty element if any */ for (i = 0; i < nmax; ++i) if (b->el[i].key == 0) return &(b->el[i]); /* do not use the last one even if empty */ } else { nmax = 0; } verify_current_wkr(w); /* allocate a new bucket */ nnmax = roundup(2 * nmax); nb = alloc_bucket(w, nnmax); /* copy old bucket into new */ for (i = 0; i < nmax; ++i) nb->el[i] = b->el[i]; free_bucket(w, bp); *bp = nb; /* zero out extra elements */ for (; i < nnmax; ++i) nb->el[i].key = 0; /* zero out the last one */ nb->el[i].key = 0; return &(nb->el[nmax]); }