Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) { PodVectorData* d = _d; if (d->capacity >= n) return kErrorOk; size_t nBytes = sizeof(PodVectorData) + n * sizeOfT; if (nBytes < n) return kErrorNoHeapMemory; if (d == &_nullData) { d = static_cast<PodVectorData*>(ASMJIT_ALLOC(nBytes)); if (!d) return kErrorNoHeapMemory; d->length = 0; } else { d = static_cast<PodVectorData*>(ASMJIT_REALLOC(d, nBytes)); if (!d) return kErrorNoHeapMemory; } d->capacity = n; _d = d; return kErrorOk; }
Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) noexcept { Data* d = _d; if (d->capacity >= n) return kErrorOk; size_t nBytes = sizeof(Data) + n * sizeOfT; if (ASMJIT_UNLIKELY(nBytes < n)) return kErrorNoHeapMemory; if (d == &_nullData) { d = static_cast<Data*>(ASMJIT_ALLOC(nBytes)); if (ASMJIT_UNLIKELY(d == nullptr)) return kErrorNoHeapMemory; d->length = 0; } else { if (isDataStatic(this, d)) { Data* oldD = d; d = static_cast<Data*>(ASMJIT_ALLOC(nBytes)); if (ASMJIT_UNLIKELY(d == nullptr)) return kErrorNoHeapMemory; size_t len = oldD->length; d->length = len; ::memcpy(d->getData(), oldD->getData(), len * sizeOfT); } else { d = static_cast<Data*>(ASMJIT_REALLOC(d, nBytes)); if (ASMJIT_UNLIKELY(d == nullptr)) return kErrorNoHeapMemory; } } d->capacity = n; _d = d; return kErrorOk; }
void* Zone::_alloc(size_t size) noexcept { Block* curBlock = _block; size_t blockSize = Utils::iMax<size_t>(_blockSize, size); // The `_alloc()` method can only be called if there is not enough space // in the current block, see `alloc()` implementation for more details. ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || curBlock->getRemainingSize() < size); // If the `Zone` has been reset the current block doesn't have to be the // last one. Check if there is a block that can be used instead of allocating // a new one. If there is a `next` block it's completely unused, we don't have // to check for remaining bytes. Block* next = curBlock->next; if (next != nullptr && next->getBlockSize() >= size) { next->pos = next->data + size; _block = next; return static_cast<void*>(next->data); } // Prevent arithmetic overflow. if (blockSize > ~static_cast<size_t>(0) - sizeof(Block)) return nullptr; Block* newBlock = static_cast<Block*>(ASMJIT_ALLOC(sizeof(Block) - sizeof(void*) + blockSize)); if (newBlock == nullptr) return nullptr; newBlock->pos = newBlock->data + size; newBlock->end = newBlock->data + blockSize; newBlock->prev = nullptr; newBlock->next = nullptr; if (curBlock != &Zone_zeroBlock) { newBlock->prev = curBlock; curBlock->next = newBlock; // Does only happen if there is a next block, but the requested memory // can't fit into it. In this case a new buffer is allocated and inserted // between the current block and the next one. if (next != nullptr) { newBlock->next = next; next->prev = newBlock; } } _block = newBlock; return static_cast<void*>(newBlock->data); }
Error Assembler::_reserve(size_t n) { size_t capacity = getCapacity(); if (n <= capacity) return kErrorOk; uint8_t* newBuffer; if (_buffer == NULL) newBuffer = static_cast<uint8_t*>(ASMJIT_ALLOC(n)); else newBuffer = static_cast<uint8_t*>(ASMJIT_REALLOC(_buffer, n)); if (newBuffer == NULL) return setError(kErrorNoHeapMemory); size_t offset = getOffset(); _buffer = newBuffer; _end = _buffer + n; _cursor = newBuffer + offset; return kErrorOk; }