PassRef<StyleRule> StyleRule::create(int sourceLine, const Vector<const CSSSelector*>& selectors, PassRef<StyleProperties> properties) { ASSERT_WITH_SECURITY_IMPLICATION(!selectors.isEmpty()); CSSSelector* selectorListArray = reinterpret_cast<CSSSelector*>(fastMalloc(sizeof(CSSSelector) * selectors.size())); for (unsigned i = 0; i < selectors.size(); ++i) new (NotNull, &selectorListArray[i]) CSSSelector(*selectors.at(i)); selectorListArray[selectors.size() - 1].setLastInSelectorList(); auto rule = StyleRule::create(sourceLine, std::move(properties)); rule.get().parserAdoptSelectorArray(selectorListArray); return rule; }
void HashTable::createTable() const { ASSERT(!keys); keys = static_cast<const char**>(fastMalloc(sizeof(char*) * numberOfValues)); for (int i = 0; i < numberOfValues; ++i) { if (values[i].m_key) keys[i] = values[i].m_key; else keys[i] = 0; } }
static void translate(UString::Rep *& location, const UCharBuffer& buf, unsigned hash) { UChar *d = static_cast<UChar *>(fastMalloc(sizeof(UChar) * buf.length)); for (unsigned i = 0; i != buf.length; i++) d[i] = buf.s[i]; UString::Rep *r = UString::Rep::create(d, buf.length).releaseRef(); r->rc = 0; r->_hash = hash; location = r; }
void ArgumentDecoder::initialize(const uint8_t* buffer, size_t bufferSize) { // This is the largest primitive type we expect to unpack from the message. const size_t expectedAlignment = sizeof(uint64_t); m_allocatedBase = static_cast<uint8_t*>(fastMalloc(bufferSize + expectedAlignment)); m_buffer = roundUpToAlignment(m_allocatedBase, expectedAlignment); ASSERT(!(reinterpret_cast<uintptr_t>(m_buffer) % expectedAlignment)); m_bufferPos = m_buffer; m_bufferEnd = m_buffer + bufferSize; memcpy(m_buffer, buffer, bufferSize); }
CSSSelectorList::CSSSelectorList(const CSSSelectorList& o) { unsigned length = o.length(); if (length == 1) { // Destructor expects a single selector to be allocated by new, multiple with fastMalloc. m_selectorArray = new CSSSelector(o.m_selectorArray[0]); return; } m_selectorArray = reinterpret_cast<CSSSelector*>(fastMalloc(sizeof(CSSSelector) * length)); for (unsigned i = 0; i < length; ++i) new (&m_selectorArray[i]) CSSSelector(o.m_selectorArray[i]); }
static inline void setSize( UMat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps=false ) { CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM ); if( m.dims != _dims ) { if( m.step.p != m.step.buf ) { fastFree(m.step.p); m.step.p = m.step.buf; m.size.p = &m.rows; } if( _dims > 2 ) { m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0])); m.size.p = (int*)(m.step.p + _dims) + 1; m.size.p[-1] = _dims; m.rows = m.cols = -1; } } m.dims = _dims; if( !_sz ) return; size_t esz = CV_ELEM_SIZE(m.flags), total = esz; int i; for( i = _dims-1; i >= 0; i-- ) { int s = _sz[i]; CV_Assert( s >= 0 ); m.size.p[i] = s; if( _steps ) m.step.p[i] = i < _dims-1 ? _steps[i] : esz; else if( autoSteps ) { m.step.p[i] = total; int64 total1 = (int64)total*s; if( (uint64)total1 != (size_t)total1 ) CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" ); total = (size_t)total1; } } if( _dims == 1 ) { m.dims = 2; m.cols = 1; m.step[1] = esz; } }
static void translate(UString::Rep*& location, const char *c, unsigned hash) { size_t length = strlen(c); UChar *d = static_cast<UChar *>(fastMalloc(sizeof(UChar) * length)); for (size_t i = 0; i != length; i++) d[i] = static_cast<unsigned char>(c[i]); // use unsigned char to zero-extend instead of sign-extend UString::Rep *r = UString::Rep::create(d, static_cast<int>(length)).releaseRef(); r->rc = 0; r->_hash = hash; location = r; }
void IOChannel::read(size_t offset, size_t size, WorkQueue*, std::function<void (Data&, int error)> completionHandler) { ASSERT(m_inputStream); size_t bufferSize = std::min(size, gDefaultReadBufferSize); uint8_t* bufferData = static_cast<uint8_t*>(fastMalloc(bufferSize)); GRefPtr<SoupBuffer> buffer = adoptGRef(soup_buffer_new_with_owner(bufferData, bufferSize, bufferData, fastFree)); ReadAsyncData* asyncData = new ReadAsyncData { this, buffer.get(), size, completionHandler, { } }; // FIXME: implement offset. g_input_stream_read_async(m_inputStream.get(), const_cast<char*>(buffer->data), bufferSize, G_PRIORITY_DEFAULT, nullptr, reinterpret_cast<GAsyncReadyCallback>(inputStreamReadReadyCallback), asyncData); }
static void* allocate(CFIndex size, CFOptionFlags, void*) { StringImpl* underlyingString = 0; if (isMainThread()) { underlyingString = currentString; if (underlyingString) { currentString = 0; underlyingString->ref(); // Balanced by call to deref in deallocate below. } } StringImpl** header = static_cast<StringImpl**>(fastMalloc(sizeof(StringImpl*) + size)); *header = underlyingString; return header + 1; }
HB_FontRec* allocHarfbuzzFont() { HB_FontRec* font = reinterpret_cast<HB_FontRec*>(fastMalloc(sizeof(HB_FontRec))); memset(font, 0, sizeof(HB_FontRec)); font->klass = &harfbuzzSkiaClass; font->userData = 0; // The values which harfbuzzSkiaClass returns are already scaled to // pixel units, so we just set all these to one to disable further // scaling. font->x_ppem = 1; font->y_ppem = 1; font->x_scale = 1; font->y_scale = 1; return font; }
PassRefPtr<StringImpl> StringImpl::createUninitialized(unsigned length, UChar*& data) { if (!length) { data = 0; return empty(); } // Allocate a single buffer large enough to contain the StringImpl // struct as well as the data which it contains. This removes one // heap allocation from this call. size_t size = sizeof(StringImpl) + length * sizeof(UChar); char* buffer = static_cast<char*>(fastMalloc(size)); data = reinterpret_cast<UChar*>(buffer + sizeof(StringImpl)); StringImpl* string = new (buffer) StringImpl(data, length, AdoptBuffer()); string->m_bufferIsInternal = true; return adoptRef(string); }
void StringPrintStream::increaseSize(size_t newSize) { ASSERT(newSize > m_size); ASSERT(newSize > sizeof(m_inlineBuffer)); // Use exponential resizing to reduce thrashing. m_size = newSize << 1; // Use fastMalloc instead of fastRealloc because we know that for the sizes we're using, // fastRealloc will just do malloc+free anyway. Also, this simplifies the code since // we can't realloc the inline buffer. char* newBuffer = static_cast<char*>(fastMalloc(m_size)); memcpy(newBuffer, m_buffer, m_next + 1); if (m_buffer != m_inlineBuffer) fastFree(m_buffer); m_buffer = newBuffer; }
inline PassRefPtr<SpaceSplitStringData> SpaceSplitStringData::create(const AtomicString& keyString, unsigned tokenCount) { ASSERT(tokenCount); RELEASE_ASSERT(tokenCount < (std::numeric_limits<unsigned>::max() - sizeof(SpaceSplitStringData)) / sizeof(AtomicString)); unsigned sizeToAllocate = sizeof(SpaceSplitStringData) + tokenCount * sizeof(AtomicString); SpaceSplitStringData* spaceSplitStringData = static_cast<SpaceSplitStringData*>(fastMalloc(sizeToAllocate)); new (NotNull, spaceSplitStringData) SpaceSplitStringData(keyString, tokenCount); AtomicString* tokenArrayStart = spaceSplitStringData->tokenArrayStart(); TokenAtomicStringInitializer tokenInitializer(tokenArrayStart); tokenizeSpaceSplitString(tokenInitializer, keyString); ASSERT(static_cast<unsigned>(tokenInitializer.nextMemoryBucket() - tokenArrayStart) == tokenCount); ASSERT(reinterpret_cast<const char*>(tokenInitializer.nextMemoryBucket()) == reinterpret_cast<const char*>(spaceSplitStringData) + sizeToAllocate); return adoptRef(spaceSplitStringData); }
String GraphicsContext3D::getProgramInfoLog(Platform3DObject program) { ASSERT(program); makeContextCurrent(); GLint length; ::glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length); if (!length) return ""; GLsizei size; GLchar* info = (GLchar*) fastMalloc(length); ::glGetProgramInfoLog(program, length, &size, info); String s(info); fastFree(info); return s; }
ThreadGlobalData& threadGlobalData() { // FIXME: Workers are not necessarily the only feature that make per-thread global data necessary. // We need to check for e.g. database objects manipulating strings on secondary threads. #if ENABLE(WORKERS) // ThreadGlobalData is used on main thread before it could possibly be used on secondary ones, so there is no need for synchronization here. static ThreadSpecific<ThreadGlobalData>* threadGlobalData = new ThreadSpecific<ThreadGlobalData>; return **threadGlobalData; #else static ThreadGlobalData* staticData; if (!staticData) { staticData = static_cast<ThreadGlobalData*>(fastMalloc(sizeof(ThreadGlobalData))); // ThreadGlobalData constructor indirectly uses staticData, so we need to set up the memory before invoking it. new (staticData) ThreadGlobalData; } return *staticData; #endif }
PassRefPtr<UStringImpl> UStringImpl::createUninitialized(unsigned length, UChar*& data) { if (!length) { data = 0; return empty(); } // Allocate a single buffer large enough to contain the StringImpl // struct as well as the data which it contains. This removes one // heap allocation from this call. if (length > ((std::numeric_limits<size_t>::max() - sizeof(UStringImpl)) / sizeof(UChar))) CRASH(); size_t size = sizeof(UStringImpl) + length * sizeof(UChar); UStringImpl* string = static_cast<UStringImpl*>(fastMalloc(size)); data = reinterpret_cast<UChar*>(string + 1); return adoptRef(new (string) UStringImpl(length)); }
void SocketStreamHandle::runRecvThread() { static int runRecvThreadCounter = 0; runRecvThreadCounter++; LOG_CONNECT(Network, "SocketStreamHandleCurl: starting runRecvThread, count = %d [%p][thread=%d]\n", runRecvThreadCounter, this, GetCurrentThreadId()); if (connect()) { fd_set fdread; fd_set fdwrite; fd_set fdexcep; while (isConnected() && !m_platformCloseRequested) { FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); FD_SET(m_socket, &fdexcep); FD_SET(m_socket, &fdread); if (m_receive_buffer == NULL) m_receive_buffer = (char*)fastMalloc(receiveBufferSize); int rc = ::select(m_socket + 1, &fdread, &fdwrite, &fdexcep, NULL); if (rc > 0 && !m_platformCloseRequested) { if (FD_ISSET(m_socket, &fdexcep)) { LOG_CONNECT(Network, "SocketStreamHandleCurl: processActiveJobs, socket exception occured [%p][thread=%d]\n", this, GetCurrentThreadId()); // The remote side probably closed the connection. m_curl_code = CURLE_UNSUPPORTED_PROTOCOL; sendMessageToMainThread(DidClose); continue; } if (FD_ISSET(m_socket, &fdread)) { privateReceive(); } } } } runRecvThreadCounter--; LOG_CONNECT(Network, "SocketStreamHandleCurl: exiting runRecvThread, count = %d [%p][thread=%d]\n", runRecvThreadCounter, this, GetCurrentThreadId()); sendMessageToMainThread(DidStopRecvLoop); }
static GstMemoryFastMalloc* gst_allocator_fast_malloc_mem_share(GstMemoryFastMalloc* mem, gssize offset, gsize size) { GstMemory* parent = mem->base.parent; if (!parent) parent = GST_MEMORY_CAST(mem); if (size == static_cast<gsize>(-1)) size = mem->base.size - offset; GstMemoryFastMalloc* sharedMem = static_cast<GstMemoryFastMalloc*>(fastMalloc(sizeof(GstMemoryFastMalloc))); gst_memory_init(GST_MEMORY_CAST(sharedMem), static_cast<GstMemoryFlags>(GST_MINI_OBJECT_FLAGS(parent) | GST_MINI_OBJECT_FLAG_LOCK_READONLY), mem->base.allocator, parent, mem->base.maxsize, mem->base.align, mem->base.offset + offset, size); sharedMem->data = mem->data; return sharedMem; }
static hb_blob_t* harfBuzzSkiaGetTable(hb_face_t* face, hb_tag_t tag, void* userData) { SkTypeface* typeface = reinterpret_cast<SkTypeface*>(userData); const size_t tableSize = typeface->getTableSize(tag); if (!tableSize) return 0; char* buffer = reinterpret_cast<char*>(fastMalloc(tableSize)); if (!buffer) return 0; size_t actualSize = typeface->getTableData(tag, 0, tableSize, buffer); if (tableSize != actualSize) { fastFree(buffer); return 0; } return hb_blob_create(const_cast<char*>(buffer), tableSize, HB_MEMORY_MODE_WRITABLE, buffer, fastFree); }
String GraphicsContext3D::getShaderSource(Platform3DObject shader) { makeContextCurrent(); GLint length = 0; glGetShaderiv(shader, GraphicsContext3D::SHADER_SOURCE_LENGTH, &length); GLsizei size = 0; GLchar* info = (GLchar*) fastMalloc(length); if (!info) return ""; glGetShaderSource(shader, length, &size, info); String result(info); fastFree(info); return result; }
// Method: SetDragImage // Not part of Win32 API. // Notes in http://msdn.microsoft.com/en-us/library/windows/desktop/bb762034(v=vs.85).aspx say: // The drag-and-drop helper object calls IDataObject::SetData to load private formats—used for // cross-process support—into the data object. It later retrieves these formats by calling // IDataObject::GetData. To support the drag-and-drop helper object, the data object's SetData // and GetData implementations must be able to accept and return arbitrary private formats. // Because we don't know how the drag-and-drop helper is going to call SetData and GetData, // and we don't need to support drag and drop across processes, we'll just manage this ourselves. void WinCE_SetDragImage(SHDRAGIMAGE * dragImage) { memcpy(¤tDragImage, dragImage, sizeof(SHDRAGIMAGE)); BITMAP info; if (GetObject(currentDragImage.hbmpDragImage, sizeof(info), &info) == 0) return; unsigned char * bits = (unsigned char *) info.bmBits; // Windows flips bitmaps. Flip it back. // See Top-Down vs. Bottom-Up DIBs // http://msdn.microsoft.com/en-us/library/windows/desktop/dd407212(v=vs.85).aspx if (info.bmHeight > 0) { LONG bmSize = info.bmWidthBytes * info.bmHeight; bits = (unsigned char *)fastMalloc(bmSize); for (int y = 0; y < info.bmHeight; y++) { unsigned char * dst = bits + (info.bmHeight - y - 1) * info.bmWidthBytes; unsigned char * src = (unsigned char *)info.bmBits + y * info.bmWidthBytes; memcpy(dst, src, info.bmWidthBytes); } } cairo_surface_t* imageSurface = cairo_image_surface_create_for_data((unsigned char*)bits, info.bmBitsPixel == 32 ? CAIRO_FORMAT_ARGB32 : info.bmBitsPixel == 24 ? CAIRO_FORMAT_RGB24 : info.bmBitsPixel == 16 ? CAIRO_FORMAT_RGB16_565 : CAIRO_FORMAT_INVALID, info.bmWidth, info.bmHeight > 0 ? info.bmHeight : -info.bmHeight, info.bmWidthBytes); dragImageCairo = cairo_win32_surface_create_with_dib (CAIRO_FORMAT_ARGB32, dragImage->sizeDragImage.cx, dragImage->sizeDragImage.cy); cairo_t *cr = cairo_create(dragImageCairo); cairo_set_source_surface(cr, imageSurface, 0, 0); cairo_paint_with_alpha(cr, 0.5); cairo_destroy(cr); cairo_surface_destroy(imageSurface); if (bits != info.bmBits) fastFree(bits); }
static hb_blob_t* harfbuzzSkiaGetTable(hb_face_t* face, hb_tag_t tag, void* userData) { FontPlatformData* font = reinterpret_cast<FontPlatformData*>(userData); const size_t tableSize = SkFontHost::GetTableSize(font->uniqueID(), tag); if (!tableSize) return 0; char* buffer = reinterpret_cast<char*>(fastMalloc(tableSize)); if (!buffer) return 0; size_t actualSize = SkFontHost::GetTableData(font->uniqueID(), tag, 0, tableSize, buffer); if (tableSize != actualSize) { fastFree(buffer); return 0; } return hb_blob_create(const_cast<char*>(buffer), tableSize, HB_MEMORY_MODE_WRITABLE, buffer, fastFree); }
String GraphicsContext3D::getProgramInfoLog(Platform3DObject program) { makeContextCurrent(); GLint length = 0; glGetProgramiv(program, GraphicsContext3D::INFO_LOG_LENGTH, &length); GLsizei size = 0; GLchar* info = (GLchar*) fastMalloc(length); if (!info) return ""; glGetProgramInfoLog(program, length, &size, info); String result(info); fastFree(info); return result; }
uint8_t* ArgumentEncoder::grow(unsigned alignment, size_t size) { size_t alignedSize = roundUpToAlignment(m_bufferSize, alignment); if (alignedSize + size > m_bufferCapacity) { size_t newCapacity = std::max(alignedSize + size, std::max(static_cast<size_t>(32), m_bufferCapacity + m_bufferCapacity / 4 + 1)); if (!m_buffer) m_buffer = static_cast<uint8_t*>(fastMalloc(newCapacity)); else m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, newCapacity)); // FIXME: What should we do if allocating memory fails? m_bufferCapacity = newCapacity; } m_bufferSize = alignedSize + size; m_bufferPointer = m_buffer + alignedSize + size; return m_buffer + alignedSize; }
PropertyTable::PropertyTable(VM& vm, const PropertyTable& other) : JSCell(vm, vm.propertyTableStructure.get()) , m_indexSize(other.m_indexSize) , m_indexMask(other.m_indexMask) , m_index(static_cast<unsigned*>(fastMalloc(dataSize()))) , m_keyCount(other.m_keyCount) , m_deletedCount(other.m_deletedCount) { ASSERT(isPowerOf2(m_indexSize)); memcpy(m_index, other.m_index, dataSize()); iterator end = this->end(); for (iterator iter = begin(); iter != end; ++iter) iter->key->ref(); // Copy the m_deletedOffsets vector. Vector<PropertyOffset>* otherDeletedOffsets = other.m_deletedOffsets.get(); if (otherDeletedOffsets) m_deletedOffsets = std::make_unique<Vector<PropertyOffset>>(*otherDeletedOffsets); }
void CSSSelectorList::adoptSelectorVector(Vector<CSSSelector*>& selectorVector) { deleteSelectors(); const size_t size = selectorVector.size(); ASSERT(size); if (size == 1) { m_selectorArray = selectorVector[0]; m_selectorArray->setLastInSelectorList(); selectorVector.shrink(0); return; } m_selectorArray = reinterpret_cast<CSSSelector*>(fastMalloc(sizeof(CSSSelector) * selectorVector.size())); for (size_t i = 0; i < size; ++i) { memcpy(&m_selectorArray[i], selectorVector[i], sizeof(CSSSelector)); // We want to free the memory (which was allocated with fastNew), but we // don't want the destructor to run since it will affect the copy we've just made. fastDeleteSkippingDestructor(selectorVector[i]); ASSERT(!m_selectorArray[i].isLastInSelectorList()); } m_selectorArray[size - 1].setLastInSelectorList(); selectorVector.shrink(0); }
VPtrSet::VPtrSet() { // Bizarrely, calling fastMalloc here is faster than allocating space on the stack. void* storage = fastMalloc(sizeof(CollectorBlock)); JSCell* jsArray = new (storage) JSArray(JSArray::createStructure(jsNull())); jsArrayVPtr = jsArray->vptr(); jsArray->~JSCell(); JSCell* jsByteArray = new (storage) JSByteArray(JSByteArray::VPtrStealingHack); jsByteArrayVPtr = jsByteArray->vptr(); jsByteArray->~JSCell(); JSCell* jsString = new (storage) JSString(JSString::VPtrStealingHack); jsStringVPtr = jsString->vptr(); jsString->~JSCell(); JSCell* jsFunction = new (storage) JSFunction(JSFunction::createStructure(jsNull())); jsFunctionVPtr = jsFunction->vptr(); jsFunction->~JSCell(); fastFree(storage); }
PropertyTable::PropertyTable(VM& vm, JSCell* owner, const PropertyTable& other) : JSCell(vm, vm.propertyTableStructure.get()) , m_indexSize(other.m_indexSize) , m_indexMask(other.m_indexMask) , m_index(static_cast<unsigned*>(fastMalloc(dataSize()))) , m_keyCount(other.m_keyCount) , m_deletedCount(other.m_deletedCount) { ASSERT(isPowerOf2(m_indexSize)); memcpy(m_index, other.m_index, dataSize()); iterator end = this->end(); for (iterator iter = begin(); iter != end; ++iter) { iter->key->ref(); Heap::writeBarrier(owner, iter->specificValue.get()); } // Copy the m_deletedOffsets vector. Vector<PropertyOffset>* otherDeletedOffsets = other.m_deletedOffsets.get(); if (otherDeletedOffsets) m_deletedOffsets = adoptPtr(new Vector<PropertyOffset>(*otherDeletedOffsets)); }
static String userVisibleWebKitVersionString() { String versionStr = "420+"; void* data = 0; struct LANGANDCODEPAGE { WORD wLanguage; WORD wCodePage; } *lpTranslate; TCHAR path[MAX_PATH]; ::GetModuleFileName(instanceHandle(), path, WTF_ARRAY_LENGTH(path)); DWORD handle; DWORD versionSize = ::GetFileVersionInfoSize(path, &handle); if (!versionSize) goto exit; data = fastMalloc(versionSize); if (!data) goto exit; if (!::GetFileVersionInfo(path, 0, versionSize, data)) goto exit; UINT cbTranslate; if (!::VerQueryValue(data, TEXT("\\VarFileInfo\\Translation"), (LPVOID*)&lpTranslate, &cbTranslate)) goto exit; TCHAR key[256]; _stprintf_s(key, WTF_ARRAY_LENGTH(key), TEXT("\\StringFileInfo\\%04x%04x\\ProductVersion"), lpTranslate[0].wLanguage, lpTranslate[0].wCodePage); LPCTSTR productVersion; UINT productVersionLength; if (!::VerQueryValue(data, (LPTSTR)(LPCTSTR)key, (void**)&productVersion, &productVersionLength)) goto exit; versionStr = String(productVersion, productVersionLength - 1); exit: if (data) fastFree(data); return versionStr; }
void CSSSelectorList::adoptSelectorVector(Vector<OwnPtr<CSSParserSelector> >& selectorVector) { deleteSelectors(); const size_t vectorSize = selectorVector.size(); size_t flattenedSize = 0; for (size_t i = 0; i < vectorSize; ++i) { for (CSSParserSelector* selector = selectorVector[i].get(); selector; selector = selector->tagHistory()) ++flattenedSize; } ASSERT(flattenedSize); if (flattenedSize == 1) { m_selectorArray = selectorVector[0]->releaseSelector().leakPtr(); m_selectorArray->setLastInSelectorList(); ASSERT(m_selectorArray->isLastInTagHistory()); selectorVector.shrink(0); return; } m_selectorArray = reinterpret_cast<CSSSelector*>(fastMalloc(sizeof(CSSSelector) * flattenedSize)); size_t arrayIndex = 0; for (size_t i = 0; i < vectorSize; ++i) { CSSParserSelector* current = selectorVector[i].get(); while (current) { OwnPtr<CSSSelector> selector = current->releaseSelector(); current = current->tagHistory(); move(selector.release(), &m_selectorArray[arrayIndex]); ASSERT(!m_selectorArray[arrayIndex].isLastInSelectorList()); if (current) m_selectorArray[arrayIndex].setNotLastInTagHistory(); ++arrayIndex; } ASSERT(m_selectorArray[arrayIndex - 1].isLastInTagHistory()); } ASSERT(flattenedSize == arrayIndex); m_selectorArray[arrayIndex - 1].setLastInSelectorList(); selectorVector.shrink(0); }