void OutputSection::addChunk(Chunk *C) { Chunks.push_back(C); C->setOutputSection(this); uint64_t Off = Header.VirtualSize; Off = alignTo(Off, C->getAlign()); C->setRVA(Off); C->setOutputSectionOff(Off); Off += C->getSize(); Header.VirtualSize = Off; if (C->hasData()) Header.SizeOfRawData = alignTo(Off, SectorSize); }
LinearAllocator::LinearAllocator(size_t size) { size = alignTo(size, ALIGNMENT); m_start = reinterpret_cast<uint8_t*>(VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE)); m_size = size; m_currentSize = 0; }
//---------------------------------------------------------- void ofRectangle::scaleTo(const ofRectangle& targetRect, ofScaleMode scaleMode) { if(scaleMode == OF_SCALEMODE_FIT) { scaleTo(targetRect, OF_ASPECT_RATIO_KEEP, OF_ALIGN_HORZ_CENTER, OF_ALIGN_VERT_CENTER); } else if(scaleMode == OF_SCALEMODE_FILL) { scaleTo(targetRect, OF_ASPECT_RATIO_KEEP_BY_EXPANDING, OF_ALIGN_HORZ_CENTER, OF_ALIGN_VERT_CENTER); } else if(scaleMode == OF_SCALEMODE_CENTER) { alignTo(targetRect, OF_ALIGN_HORZ_CENTER, OF_ALIGN_VERT_CENTER); } else if(scaleMode == OF_SCALEMODE_STRETCH_TO_FILL) { scaleTo(targetRect, OF_ASPECT_RATIO_IGNORE, OF_ALIGN_HORZ_CENTER, OF_ALIGN_VERT_CENTER); } else { scaleTo(targetRect, OF_ASPECT_RATIO_KEEP); } }
void* Arena::allocateBytes(size_t amount, uint alignment, bool hasDisposer) { if (hasDisposer) { alignment = kj::max(alignment, __alignof(ObjectHeader)); amount += alignTo(sizeof(ObjectHeader), alignment); } void* result = allocateBytesInternal(amount, alignment); if (hasDisposer) { // Reserve space for the ObjectHeader, but don't add it to the object list yet. result = alignTo(reinterpret_cast<byte*>(result) + sizeof(ObjectHeader), alignment); } KJ_DASSERT(reinterpret_cast<uintptr_t>(result) % alignment == 0); return result; }
//---------------------------------------------------------- void ofRectangle::alignTo(const ofRectangle& targetRect, ofAlignHorz sharedHorzAnchor, ofAlignVert sharedVertAnchor) { alignTo(targetRect, sharedHorzAnchor, sharedVertAnchor, sharedHorzAnchor, sharedVertAnchor); }
void* Arena::allocateBytesInternal(size_t amount, uint alignment) { if (currentChunk != nullptr) { ChunkHeader* chunk = currentChunk; byte* alignedPos = alignTo(chunk->pos, alignment); // Careful about overflow here. if (amount + (alignedPos - chunk->pos) <= chunk->end - chunk->pos) { // There's enough space in this chunk. chunk->pos = alignedPos + amount; return alignedPos; } } // Not enough space in the current chunk. Allocate a new one. // We need to allocate at least enough space for the ChunkHeader and the requested allocation. // If the alignment is less than that of the chunk header, we'll need to increase it. alignment = kj::max(alignment, __alignof(ChunkHeader)); // If the ChunkHeader size does not match the alignment, we'll need to pad it up. amount += alignTo(sizeof(ChunkHeader), alignment); // Make sure we're going to allocate enough space. while (nextChunkSize < amount) { nextChunkSize *= 2; } // Allocate. byte* bytes = reinterpret_cast<byte*>(operator new(nextChunkSize)); // Set up the ChunkHeader at the beginning of the allocation. ChunkHeader* newChunk = reinterpret_cast<ChunkHeader*>(bytes); newChunk->next = chunkList; newChunk->pos = bytes + amount; newChunk->end = bytes + nextChunkSize; currentChunk = newChunk; chunkList = newChunk; nextChunkSize *= 2; // Move past the ChunkHeader to find the position of the allocated object. return alignTo(bytes + sizeof(ChunkHeader), alignment); }
// We want to add a full redzone after every variable. // The larger the variable Size the larger is the redzone. // The resulting frame size is a multiple of Alignment. static size_t VarAndRedzoneSize(size_t Size, size_t Granularity, size_t Alignment) { size_t Res = 0; if (Size <= 4) Res = 16; else if (Size <= 16) Res = 32; else if (Size <= 128) Res = Size + 32; else if (Size <= 512) Res = Size + 64; else if (Size <= 4096) Res = Size + 128; else Res = Size + 256; return alignTo(std::max(Res, 2 * Granularity), Alignment); }
void* LinearAllocator::allocate(size_t sizeInByte, size_t alignment) { if (m_currentSize >= m_size) return nullptr; sizeInByte = alignTo(sizeInByte, alignment); if (m_currentSize + sizeInByte > m_size) return nullptr; void* ptr = m_start + m_currentSize; m_currentSize += sizeInByte; return ptr; }
//---------------------------------------------------------- void ofRectangle::scaleTo(const ofRectangle& targetRect, ofAspectRatioMode aspectRatioMode, ofAlignHorz modelHorzAnchor, ofAlignVert modelVertAnchor, ofAlignHorz thisHorzAnchor, ofAlignVert thisVertAnchor) { float tw = targetRect.getWidth(); // target width float th = targetRect.getHeight(); // target height float sw = getWidth(); // subject width float sh = getHeight(); // subject height if(aspectRatioMode == OF_ASPECT_RATIO_KEEP_BY_EXPANDING || aspectRatioMode == OF_ASPECT_RATIO_KEEP) { if(fabs(sw) >= FLT_EPSILON || fabs(sh) >= FLT_EPSILON) { float wRatio = fabs(tw) / fabs(sw); float hRatio = fabs(th) / fabs(sh); if(aspectRatioMode == OF_ASPECT_RATIO_KEEP_BY_EXPANDING) { scale(MAX(wRatio,hRatio)); } else if(aspectRatioMode == OF_ASPECT_RATIO_KEEP) { scale(MIN(wRatio,hRatio)); } } else { ofLogWarning("ofRectangle") << "scaleTo(): no scaling applied to avoid divide by zero, rectangle has 0 width and/or height: " << sw << "x" << sh; } } else if(aspectRatioMode == OF_ASPECT_RATIO_IGNORE) { width = tw; height = th; } else { ofLogWarning("ofRectangle") << "scaleTo(): unknown ofAspectRatioMode = " << aspectRatioMode << ", using OF_ASPECT_RATIO_IGNORE"; width = tw; height = th; } // now align if anchors are not ignored. alignTo(targetRect, modelHorzAnchor, modelVertAnchor, thisHorzAnchor, thisVertAnchor); }
void OrcAArch64::writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr, unsigned NumTrampolines) { unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8); memcpy(TrampolineMem + OffsetToPtr, &ResolverAddr, sizeof(void *)); // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so // subtract 32-bits. OffsetToPtr -= 4; uint32_t *Trampolines = reinterpret_cast<uint32_t *>(TrampolineMem); for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) { Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30 Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // adr x16, Lptr Trampolines[3 * I + 2] = 0xd63f0200; // blr x16 } }
char* vformat(char *buff, size_t *_size, const char *fmt, va_list ap) { if (!buff || !_size || !*_size) { char *res = nullptr; vasprintf(&res, fmt, ap); return res; } va_list _ap; va_copy(_ap, ap); size_t size = *_size; size_t new_size = vsnprintf(buff, size, fmt, ap) + 1; if (new_size > size) { new_size = alignTo(new_size, 1024); if (allocated) { buff = (char*)realloc(buff, new_size); } else { buff = (char*)malloc(new_size); } *_size = new_size; new_size = vsnprintf(buff, new_size, fmt, _ap); } return buff; }
CCPoint Utils::alignTo(ScreenAlign screenAlign) { return alignTo(screenAlign, CCPointZero); }
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs) { NumVGPRs = alignTo(std::max(1u, NumVGPRs), getVGPREncodingGranule(STI)); // VGPRBlocks is actual number of VGPR blocks minus 1. return NumVGPRs / getVGPREncodingGranule(STI) - 1; }