void _SYS_image_memDrawRect(uint16_t *dest, _SYSCubeID destCID, const _SYSAssetImage *im, unsigned dest_stride, unsigned frame, struct _SYSInt2 *srcXY, struct _SYSInt2 *size) { if (!isAligned(dest, 2) || !isAligned(im) || !isAligned(srcXY) || !isAligned(size)) return SvmRuntime::fault(F_SYSCALL_ADDR_ALIGN); struct _SYSInt2 lSrcXY, lSize; if (!SvmMemory::copyROData(lSrcXY, srcXY)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); if (!SvmMemory::copyROData(lSize, size)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); ImageDecoder decoder; if (destCID == _SYS_CUBE_ID_INVALID) { // Relocation disabled if (!decoder.init(im)) return SvmRuntime::fault(F_BAD_ASSET_IMAGE); } else { // Relocate to a specific cube (validated by decoder.init) if (!decoder.init(im, destCID)) return SvmRuntime::fault(F_BAD_ASSET_IMAGE); } ImageIter iter(decoder, frame, lSrcXY.x, lSrcXY.y, lSize.x, lSize.y); if (!SvmMemory::mapRAM(dest, iter.getDestBytes(dest_stride))) return SvmRuntime::fault(F_SYSCALL_ADDRESS); iter.copyToMem(dest, dest_stride); }
void _SYS_image_BG1DrawRect(struct _SYSAttachedVideoBuffer *vbuf, const _SYSAssetImage *im, struct _SYSInt2 *destXY, unsigned frame, struct _SYSInt2 *srcXY, struct _SYSInt2 *size) { if (!isAligned(vbuf) || !isAligned(im) || !isAligned(destXY) || !isAligned(srcXY) || !isAligned(size)) return SvmRuntime::fault(F_SYSCALL_ADDR_ALIGN); if (!SvmMemory::mapRAM(vbuf)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); struct _SYSInt2 lDestXY, lSrcXY, lSize; if (!SvmMemory::copyROData(lDestXY, destXY)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); if (!SvmMemory::copyROData(lSrcXY, srcXY)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); if (!SvmMemory::copyROData(lSize, size)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); ImageDecoder decoder; if (!decoder.init(im, vbuf->cube)) return SvmRuntime::fault(F_BAD_ASSET_IMAGE); ImageIter iter(decoder, frame, lSrcXY.x, lSrcXY.y, lSize.x, lSize.y); iter.copyToBG1(vbuf->vbuf, lDestXY.x, lDestXY.y); }
static void alignMessage(struct Message* msg, uint32_t alignmentBytes) { if (isAligned(msg->bytes, alignmentBytes)) { return; } uint8_t* bytes = msg->bytes; int length = msg->length; do { Message_push8(msg, 0, NULL); } while (!isAligned(msg->bytes, alignmentBytes)); Bits_memmove(msg->bytes, bytes, length); msg->length = length; }
int isValidBlock(char *bp) { int size = GETSIZE(HDRP(bp)) ; int validLength = ((bp + size) == (NEXT_BLKP(bp))); // bp + size gives address of next block hdr validLength = validLength && (size >= MINBLOCKSIZE) ; ASSERT(isAligned(bp)) ; ASSERT(hdequalft(bp)) ; ASSERT(validCoalescing(bp)) ; ASSERT(validLength); return isAligned(bp) && hdequalft(bp) && validCoalescing(bp) && validLength; }
static bool checkArgs(KernelArgs* args) { if(!isAligned(args->row)) { printf("Array not aligned (row)\n"); return false; } if(!isAligned(args->col)) { printf("Array not aligned (col)\n"); return false; } if(!isAligned(args->count)) { printf("Array not aligned (count)\n"); return false; } return true; }
/** * Allocate 'size' bytes of raw memory from the current page, or set up a new * page by allocating from our parent Arena if there is insufficient space in * the current page to satisfy the request. * * We write a custom finalizing header (class Page) at the front of each page * and push this onto the front of the finalizer list where reset() can later * find it. This neatly ensures that the pages are deallocated only after the * objects in them have been finalized. */ void* ScopedArena::doMalloc(size_t size) { assert(size!=0 && isAligned(size)); // Is already aligned if (_next + size < _next) // Pointer overflows? { this->overflowed(); // ...signal overflow } if (_next + size > _last) // Page out of space? { size_t n = std::max(_size,sizeof(Page) + size); // ...at least a page void* m = LimitedArena::doMalloc(n); // ...allocate memory Page* p = (new(m) Page(n,_parent.get())); // ...init the header _list.push_front(p->getPayload()); // ...add to the list _next = static_cast<byte_t*>(m) + sizeof(Page); // ...aim past header _last = static_cast<byte_t*>(m) + n; // ...end of the page } assert(_next + size <= _last); // Now there is room! byte_t* p = _next; // Copy next pointer _next += size; // Then step over it return p; // Our new allocation }
void* Chunk::operator new (size_t unused, size_t size, Executable executable) { ASSERT(isAligned(size, PAGESIZE)); // TODO: support large allocations ASSERT(size == kDefaultSize); // We can't guarantee the kernel will give us an aligned chunk, so we ask for some extra // and align the chunk within the region given to us. // TODO: ASLR auto alignedSize = size + kDefaultSize; auto prot = kReadable | kWritable | (executable == EXECUTABLE ? kExecutable : 0); auto base = allocateMemory(alignedSize, prot); // Free the extra memory at the beginning and end. auto start = align(base, kDefaultSize); auto end = start + size; auto extraBefore = start - base; if (extraBefore > 0) releaseMemory(base, extraBefore); auto extraAfter = (base + alignedSize) - end; if (extraAfter > 0) releaseMemory(end, extraAfter); Chunk* chunk = reinterpret_cast<Chunk*>(start); chunk->size_ = size; chunk->executable_ = executable; return reinterpret_cast<void*>(start); }
void _SYS_image_BG0Draw(struct _SYSAttachedVideoBuffer *vbuf, const _SYSAssetImage *im, uint16_t addr, unsigned frame) { if (!isAligned(vbuf) || !isAligned(im)) return SvmRuntime::fault(F_SYSCALL_ADDR_ALIGN); if (!SvmMemory::mapRAM(vbuf)) return SvmRuntime::fault(F_SYSCALL_ADDRESS); ImageDecoder decoder; if (!decoder.init(im, vbuf->cube)) return SvmRuntime::fault(F_BAD_ASSET_IMAGE); ImageIter iter(decoder, frame); iter.copyToVRAM(vbuf->vbuf, addr, _SYS_VRAM_BG0_WIDTH); }
void CommandBufferPtr::updateDynamicUniforms(void* data, U32 originalSize) { ANKI_ASSERT(data); ANKI_ASSERT(originalSize > 0); ANKI_ASSERT(originalSize <= 1024 * 4 && "Too high?"); GlState& state = get().getManager().getImplementation().getRenderingThread().getState(); const U uboSize = state.m_globalUboSize; const U subUboSize = GlState::MAX_UBO_SIZE; // Get offset in the contiguous buffer U size = getAlignedRoundUp(state.m_uniBuffOffsetAlignment, originalSize); U offset = state.m_globalUboCurrentOffset.fetchAdd(size); offset = offset % uboSize; while((offset % subUboSize) + size > subUboSize) { // Update area will fall between UBOs, need to start over offset = state.m_globalUboCurrentOffset.fetchAdd(size); offset = offset % uboSize; } ANKI_ASSERT(isAligned(state.m_uniBuffOffsetAlignment, offset)); ANKI_ASSERT(offset + size <= uboSize); // Get actual UBO address to write U uboIdx = offset / subUboSize; U subUboOffset = offset % subUboSize; ANKI_ASSERT(isAligned(state.m_uniBuffOffsetAlignment, subUboOffset)); U8* addressToWrite = state.m_globalUboAddresses[uboIdx] + subUboOffset; // Write memcpy(addressToWrite, data, originalSize); // Push bind command get().pushBackNewCommand<UpdateUniformsCommand>( state.m_globalUbos[uboIdx], subUboOffset, originalSize); }
static Range<Type> findMinAndMax (const Type* src, int num) noexcept { const int numLongOps = num / Mode::numParallel; #if JUCE_USE_SSE_INTRINSICS if (numLongOps > 1 && isSSE2Available()) #else if (numLongOps > 1) #endif { ParallelType mn, mx; #if ! JUCE_USE_ARM_NEON if (isAligned (src)) { mn = Mode::loadA (src); mx = mn; for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; const ParallelType v = Mode::loadA (src); mn = Mode::min (mn, v); mx = Mode::max (mx, v); } } else #endif { mn = Mode::loadU (src); mx = mn; for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; const ParallelType v = Mode::loadU (src); mn = Mode::min (mn, v); mx = Mode::max (mx, v); } } Range<Type> result (Mode::min (mn), Mode::max (mx)); num &= 3; for (int i = 0; i < num; ++i) result = result.getUnionWith (src[i]); return result; } return Range<Type>::findMinAndMax (src, num); }
void PMM::markUsed(physical_t page) { uint32_t ptr = page.numeric(); if(!isAligned(ptr)) ; // Do something about it! uint32_t pageId = ptr / 4096; uint32_t idx = pageId / 32; uint32_t bit = pageId % 32; bitmap[idx] &= ~(1<<bit); }
size_t add_zeros(std::vector<llvm::Type*>& defaultTypes, size_t startOffset, size_t endOffset) { size_t const oldLength = defaultTypes.size(); llvm::Type* const eightByte = llvm::Type::getInt64Ty(gIR->context()); llvm::Type* const fourByte = llvm::Type::getInt32Ty(gIR->context()); llvm::Type* const twoByte = llvm::Type::getInt16Ty(gIR->context()); assert(startOffset <= endOffset); size_t paddingLeft = endOffset - startOffset; while (paddingLeft) { if (global.params.is64bit && paddingLeft >= 8 && isAligned(eightByte, startOffset)) { defaultTypes.push_back(eightByte); startOffset += 8; } else if (paddingLeft >= 4 && isAligned(fourByte, startOffset)) { defaultTypes.push_back(fourByte); startOffset += 4; } else if (paddingLeft >= 2 && isAligned(twoByte, startOffset)) { defaultTypes.push_back(twoByte); startOffset += 2; } else { defaultTypes.push_back(llvm::Type::getInt8Ty(gIR->context())); startOffset += 1; } paddingLeft = endOffset - startOffset; } return defaultTypes.size() - oldLength; }
void PMM::free(physical_t page) { uint32_t ptr = page.numeric(); if(!isAligned(ptr)) ; // Do something about it! uint32_t pageId = ptr / 4096; uint32_t idx = pageId / 32; uint32_t bit = pageId % 32; // Mark the selected bit as free. bitmap[idx] |= (1<<bit); }
void makePicture(){ float point[3]; calcularattitude(); point[0] = norm[0]; point[1] = norm[1]; point[2] = norm[2]; while (!(isAligned())){ Movimiento(); api.setAttitudeTarget(point); } DEBUG(("Esta bien alineado \n")); game.takePic(PoiID); DEBUG(("Llevas ocupadas ", game.getMemoryFilled(), " fotos de 2. \n")); }
//============================================================================== void Renderer::createRenderTarget(U32 w, U32 h, GLenum internalFormat, GLenum format, GLenum type, U32 samples, GlTextureHandle& rt) { // Not very important but keep the resulution of render targets aligned to // 16 if(0) { ANKI_ASSERT(isAligned(16, w)); ANKI_ASSERT(isAligned(16, h)); } GlTextureHandle::Initializer init; init.m_width = w; init.m_height = h; init.m_depth = 0; #if ANKI_GL == ANKI_GL_DESKTOP init.m_target = (samples == 1) ? GL_TEXTURE_2D : GL_TEXTURE_2D_MULTISAMPLE; #else ANKI_ASSERT(samples == 1); init.m_target = GL_TEXTURE_2D; #endif init.m_internalFormat = internalFormat; init.m_format = format; init.m_type = type; init.m_mipmapsCount = 1; init.m_filterType = GlTextureHandle::Filter::NEAREST; init.m_repeat = false; init.m_anisotropyLevel = 0; init.m_genMipmaps = false; init.m_samples = samples; GlDevice& gl = GlDeviceSingleton::get(); GlCommandBufferHandle jobs(&gl); rt = GlTextureHandle(jobs, init); jobs.finish(); }
REO_POS getOrientWordModel(SentenceAlignment & sentence, REO_MODEL_TYPE modelType, bool connectedLeftTop, bool connectedRightTop, int startF, int endF, int startE, int endE, int countF, int zero, int unit, bool (*ge)(int, int), bool (*lt)(int, int) ) { if( connectedLeftTop && !connectedRightTop) return LEFT; if(modelType == REO_MONO) return UNKNOWN; if (!connectedLeftTop && connectedRightTop) return RIGHT; if(modelType == REO_MSD) return UNKNOWN; for(int indexF=startF-2*unit; (*ge)(indexF, zero) && !connectedLeftTop; indexF=indexF-unit) connectedLeftTop = isAligned(sentence, indexF, startE-unit); for(int indexF=endF+2*unit; (*lt)(indexF,countF) && !connectedRightTop; indexF=indexF+unit) connectedRightTop = isAligned(sentence, indexF, startE-unit); if(connectedLeftTop && !connectedRightTop) return DRIGHT; else if(!connectedLeftTop && connectedRightTop) return DLEFT; return UNKNOWN; }
void Data::calculateTargetAzEl() { targetAz = NAN; targetEl = NAN; // If we have valid alignment data, // recalculate everything based on the current position and time, // and update the form's result fields. if(isAligned() && finite(targetRA) && finite(targetDec)) { calc.EqToAzEl(targetRA, targetDec, Util::getEffectiveTime(), false, &targetAz, &targetEl); setTargetAzEl(targetAz, targetEl); } }
void DistanceDB::addSequence(Sequence seq) { try { //are the template sequences aligned if (!isAligned(seq.getAligned())) { templateAligned = false; m->mothurOut(seq.getName() + " is not aligned. Sequences must be aligned to use the distance method."); m->mothurOutEndLine(); } if (templateSeqsLength == 0) { templateSeqsLength = seq.getAligned().length(); } data.push_back(seq); } catch(exception& e) { m->errorOut(e, "DistanceDB", "addSequence"); exit(1); } }
/* advanceToObjectData (s, p) * * If p points at the beginning of an object, then advanceToObjectData * returns a pointer to the start of the object data. */ pointer advanceToObjectData (ARG_USED_FOR_ASSERT GC_state s, pointer p) { GC_header header; pointer res; assert (isFrontierAligned (s, p)); header = *(GC_header*)p; if (0 == header) /* Looking at the counter word in an array. */ res = p + GC_ARRAY_HEADER_SIZE; else /* Looking at a header word. */ res = p + GC_NORMAL_HEADER_SIZE; assert (isAligned ((uintptr_t)res, s->alignment)); if (DEBUG_DETAILED) fprintf (stderr, FMTPTR" = advanceToObjectData ("FMTPTR")\n", (uintptr_t)res, (uintptr_t)p); return res; }
size_t sizeofThread (GC_state s) { size_t res; res = GC_NORMAL_METADATA_SIZE + sizeof (struct GC_thread); res = align (res, s->alignment); if (DEBUG) { size_t check; uint16_t bytesNonObjptrs, numObjptrs; splitHeader (s, GC_THREAD_HEADER, NULL, NULL, &bytesNonObjptrs, &numObjptrs); check = GC_NORMAL_METADATA_SIZE + (bytesNonObjptrs + (numObjptrs * OBJPTR_SIZE)); if (DEBUG_DETAILED) fprintf (stderr, "sizeofThread: res = %"PRIuMAX" check = %"PRIuMAX"\n", (uintmax_t)res, (uintmax_t)check); assert (check == res); } assert (isAligned (res, s->alignment)); return res; }
//============================================================================== RenderableDrawer::RenderableDrawer(Renderer* r) : m_r(r) { // Create the uniform buffer GlDevice& gl = GlDeviceSingleton::get(); GlCommandBufferHandle jobs(&gl); m_uniformBuff = GlBufferHandle(jobs, GL_UNIFORM_BUFFER, MAX_UNIFORM_BUFFER_SIZE, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); jobs.flush(); m_uniformPtr = (U8*)m_uniformBuff.getPersistentMappingAddress(); ANKI_ASSERT(m_uniformPtr != nullptr); ANKI_ASSERT(isAligned(gl.getBufferOffsetAlignment( m_uniformBuff.getTarget()), m_uniformPtr)); // Set some other values m_uniformsUsedSize = 0; m_uniformsUsedSizeFrame = 0; }
int GC_init (GC_state s, int argc, char **argv) { char *worldFile; int res; assert (s->alignment >= GC_MODEL_MINALIGN); assert (isAligned (sizeof (struct GC_stack), s->alignment)); // While the following asserts are manifestly true, // they check the asserts in sizeofThread and sizeofWeak. assert (sizeofThread (s) == sizeofThread (s)); assert (sizeofWeak (s) == sizeofWeak (s)); s->amInGC = TRUE; s->amOriginal = TRUE; s->atomicState = 0; s->callFromCHandlerThread = BOGUS_OBJPTR; s->controls.fixedHeap = 0; s->controls.maxHeap = 0; s->controls.mayLoadWorld = TRUE; s->controls.mayPageHeap = FALSE; s->controls.mayProcessAtMLton = TRUE; s->controls.messages = FALSE; s->controls.oldGenSequenceSize = 0x100000; s->controls.ratios.copy = 4.0f; s->controls.ratios.copyGenerational = 4.0f; s->controls.ratios.grow = 8.0f; s->controls.ratios.hashCons = 0.0f; s->controls.ratios.live = 8.0f; s->controls.ratios.markCompact = 1.04f; s->controls.ratios.markCompactGenerational = 8.0f; s->controls.ratios.nursery = 10.0f; s->controls.ratios.ramSlop = 0.5f; s->controls.ratios.stackCurrentGrow = 2.0f; s->controls.ratios.stackCurrentMaxReserved = 32.0f; s->controls.ratios.stackCurrentPermitReserved = 4.0f; s->controls.ratios.stackCurrentShrink = 0.5f; s->controls.ratios.stackMaxReserved = 8.0f; s->controls.ratios.stackShrink = 0.5f; s->controls.summary = FALSE; s->controls.summaryFile = stderr; s->cumulativeStatistics.bytesAllocated = 0; s->cumulativeStatistics.bytesCopied = 0; s->cumulativeStatistics.bytesCopiedMinor = 0; s->cumulativeStatistics.bytesHashConsed = 0; s->cumulativeStatistics.bytesMarkCompacted = 0; s->cumulativeStatistics.bytesScannedMinor = 0; s->cumulativeStatistics.maxBytesLive = 0; s->cumulativeStatistics.maxHeapSize = 0; s->cumulativeStatistics.maxPauseTime = 0; s->cumulativeStatistics.maxStackSize = 0; s->cumulativeStatistics.numCardsMarked = 0; s->cumulativeStatistics.numCopyingGCs = 0; s->cumulativeStatistics.numHashConsGCs = 0; s->cumulativeStatistics.numMarkCompactGCs = 0; s->cumulativeStatistics.numMinorGCs = 0; rusageZero (&s->cumulativeStatistics.ru_gc); rusageZero (&s->cumulativeStatistics.ru_gcCopying); rusageZero (&s->cumulativeStatistics.ru_gcMarkCompact); rusageZero (&s->cumulativeStatistics.ru_gcMinor); s->currentThread = BOGUS_OBJPTR; s->hashConsDuringGC = FALSE; initHeap (s, &s->heap); s->lastMajorStatistics.bytesHashConsed = 0; s->lastMajorStatistics.bytesLive = 0; s->lastMajorStatistics.kind = GC_COPYING; s->lastMajorStatistics.numMinorGCs = 0; s->savedThread = BOGUS_OBJPTR; initHeap (s, &s->secondaryHeap); s->signalHandlerThread = BOGUS_OBJPTR; s->signalsInfo.amInSignalHandler = FALSE; s->signalsInfo.gcSignalHandled = FALSE; s->signalsInfo.gcSignalPending = FALSE; s->signalsInfo.signalIsPending = FALSE; sigemptyset (&s->signalsInfo.signalsHandled); sigemptyset (&s->signalsInfo.signalsPending); s->sysvals.pageSize = GC_pageSize (); s->sysvals.physMem = GC_physMem (); s->weaks = NULL; s->saveWorldStatus = true; initIntInf (s); initSignalStack (s); worldFile = NULL; unless (isAligned (s->sysvals.pageSize, CARD_SIZE)) die ("Page size must be a multiple of card size."); processAtMLton (s, 0, s->atMLtonsLength, s->atMLtons, &worldFile); res = processAtMLton (s, 1, argc, argv, &worldFile); if (s->controls.fixedHeap > 0 and s->controls.maxHeap > 0) die ("Cannot use both fixed-heap and max-heap."); unless (s->controls.ratios.markCompact <= s->controls.ratios.copy and s->controls.ratios.copy <= s->controls.ratios.live) die ("Ratios must satisfy mark-compact-ratio <= copy-ratio <= live-ratio."); unless (s->controls.ratios.stackCurrentPermitReserved <= s->controls.ratios.stackCurrentMaxReserved) die ("Ratios must satisfy stack-current-permit-reserved <= stack-current-max-reserved."); /* We align s->sysvals.ram by s->sysvals.pageSize so that we can * test whether or not we we are using mark-compact by comparing * heap size to ram size. If we didn't round, the size might be * slightly off. */ uintmax_t ram; ram = alignMax ((uintmax_t)(s->controls.ratios.ramSlop * (double)(s->sysvals.physMem)), (uintmax_t)(s->sysvals.pageSize)); ram = min (ram, alignMaxDown((uintmax_t)SIZE_MAX, (uintmax_t)(s->sysvals.pageSize))); s->sysvals.ram = (size_t)ram; if (DEBUG or DEBUG_RESIZING or s->controls.messages) fprintf (stderr, "[GC: Found %s bytes of RAM; using %s bytes (%.1f%% of RAM).]\n", uintmaxToCommaString(s->sysvals.physMem), uintmaxToCommaString(s->sysvals.ram), 100.0 * ((double)ram / (double)(s->sysvals.physMem))); if (DEBUG_SOURCES or DEBUG_PROFILE) { uint32_t i; for (i = 0; i < s->sourceMaps.frameSourcesLength; i++) { uint32_t j; uint32_t *sourceSeq; fprintf (stderr, "%"PRIu32"\n", i); sourceSeq = s->sourceMaps.sourceSeqs[s->sourceMaps.frameSources[i]]; for (j = 1; j <= sourceSeq[0]; j++) fprintf (stderr, "\t%s\n", s->sourceMaps.sourceNames[ s->sourceMaps.sources[sourceSeq[j]].sourceNameIndex ]); } } /* Initialize profiling. This must occur after processing * command-line arguments, because those may just be doing a * show-sources, in which case we don't want to initialize the * atExit. */ initProfiling (s); if (s->amOriginal) { initWorld (s); /* The mutator stack invariant doesn't hold, * because the mutator has yet to run. */ assert (invariantForMutator (s, TRUE, FALSE)); } else { loadWorldFromFileName (s, worldFile); if (s->profiling.isOn and s->profiling.stack) foreachStackFrame (s, enterFrameForProfiling); assert (invariantForMutator (s, TRUE, TRUE)); } s->amInGC = FALSE; return res; }
static char* toAligned(char* p) { while (! isAligned(p)) p++; return p; }
static unsigned toAligned(UintPtr x) { while (! isAligned(x)) x++; return x; }
static bool isAligned(char* p) { return isAligned(UintPtr(p)); }
static Type findMinOrMax (const Type* src, int num, const bool isMinimum) noexcept { const int numLongOps = num / Mode::numParallel; #if JUCE_USE_SSE_INTRINSICS if (numLongOps > 1 && isSSE2Available()) #else if (numLongOps > 1) #endif { ParallelType val; #if ! JUCE_USE_ARM_NEON if (isAligned (src)) { val = Mode::loadA (src); if (isMinimum) { for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; val = Mode::min (val, Mode::loadA (src)); } } else { for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; val = Mode::max (val, Mode::loadA (src)); } } } else #endif { val = Mode::loadU (src); if (isMinimum) { for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; val = Mode::min (val, Mode::loadU (src)); } } else { for (int i = 1; i < numLongOps; ++i) { src += Mode::numParallel; val = Mode::max (val, Mode::loadU (src)); } } } Type result = isMinimum ? Mode::min (val) : Mode::max (val); num &= (Mode::numParallel - 1); for (int i = 0; i < num; ++i) result = isMinimum ? jmin (result, src[i]) : jmax (result, src[i]); return result; } return isMinimum ? juce::findMinimum (src, num) : juce::findMaximum (src, num); }
word_t Chunk::bitIndexForAddress(Address addr) const { ASSERT(isAligned(addr, kWordSize)); return (addr - storageBase()) / kWordSize; }
Address Chunk::storageBase() const { ASSERT(isAligned(bitmapSize(), kWordSize)); return bitmapBase() + bitmapSize(); }
DgSqrD4Grid2DS::DgSqrD4Grid2DS (DgRFNetwork& networkIn, const DgRF<DgDVec2D, long double>& backFrameIn, int nResIn, unsigned int apertureIn, bool isCongruentIn, bool isAlignedIn, const string& nameIn) : DgDiscRFS2D (networkIn, backFrameIn, nResIn, apertureIn, isCongruentIn, isAlignedIn, nameIn) { // determine the radix radix_ = static_cast<int>(sqrt(static_cast<float>(aperture()))); if (static_cast<unsigned int>(radix() * radix()) != aperture()) { report( "DgSqrD4Grid2DS::DgSqrD4Grid2DS() aperture must be a perfect square", DgBase::Fatal); } if (isAligned() && radix() != 2 && radix() != 3) { report("DgSqrD4Grid2DS::DgSqrD4Grid2DS() only aligned apertures 4 and 9 " " parent/children operators fully implemented", DgBase::Warning); } // do the grids long double fac = 1; DgDVec2D trans; if (isCongruent()) { trans = DgDVec2D(-0.5, -0.5); } else if (isAligned()) { trans = DgDVec2D(0.0, 0.0); } else { report("DgSqrD4Grid2DS::DgSqrD4Grid2DS() grid system must be either " "congruent, aligned, or both", DgBase::Fatal); } for (int i = 0; i < nRes(); i++) { string newName = name() + "_" + dgg::util::to_string(i); //cout << newName << " " << fac << ' ' << trans << endl; DgContCartRF* ccRF = new DgContCartRF(network(), newName + string("bf")); new Dg2WayContAffineConverter(backFrame(), *ccRF, (long double) fac, 0.0, trans); (*grids_)[i] = new DgSqrD4Grid2D(network(), *ccRF, newName); new Dg2WayResAddConverter<DgIVec2D, DgDVec2D, long double> (*this, *(grids()[i]), i); fac *= radix(); } } // DgSqrD4Grid2DS::DgSqrD4Grid2DS
static unsigned toAligned(unsigned x) { while (! isAligned(x)) x++; return x; }