int main( int argc, char* argv[] ) { // Set default for minimum number of threads. MinThread = 1; ParseCommandLine(argc,argv); TestEmptyQueue<char>(); TestEmptyQueue<Foo>(); TestFullQueue(); TestConcurrenetQueueType(); TestIterator(); // Test concurrent operations for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { TestNegativeQueue<Foo>(nthread); for( int prefill=0; prefill<64; prefill+=(1+prefill/3) ) { TestPushPop(prefill,ptrdiff_t(-1),nthread); TestPushPop(prefill,ptrdiff_t(1),nthread); TestPushPop(prefill,ptrdiff_t(2),nthread); TestPushPop(prefill,ptrdiff_t(10),nthread); TestPushPop(prefill,ptrdiff_t(100),nthread); } } printf("done\n"); return 0; }
// Verify: KOKKOS_INLINE_FUNCTION void operator()( size_t iwork, value_type & errors ) const { const size_t tile_dim0 = ( m_array.dimension_0() + TileLayout::N0 - 1 ) / TileLayout::N0; const size_t tile_dim1 = ( m_array.dimension_1() + TileLayout::N1 - 1 ) / TileLayout::N1; const size_t itile = iwork % tile_dim0; const size_t jtile = iwork / tile_dim0; if ( jtile < tile_dim1 ) { tile_type tile = Kokkos::tile_subview( m_array, itile, jtile ); if ( tile( 0, 0 ) != ptrdiff_t( ( itile + jtile * tile_dim0 ) * TileLayout::N0 * TileLayout::N1 ) ) { ++errors; } else { for ( size_t j = 0; j < size_t( TileLayout::N1 ); ++j ) { for ( size_t i = 0; i < size_t( TileLayout::N0 ); ++i ) { const size_t iglobal = i + itile * TileLayout::N0; const size_t jglobal = j + jtile * TileLayout::N1; if ( iglobal < m_array.dimension_0() && jglobal < m_array.dimension_1() ) { if ( tile( i, j ) != ptrdiff_t( tile( 0, 0 ) + i + j * TileLayout::N0 ) ) ++errors; //printf( "tile(%d, %d)(%d, %d) = %d\n", int( itile ), int( jtile ), int( i ), int( j ), int( tile( i, j ) ) ); } } } } } }
bool protoshares_revalidateCollision(blockHeader_t* block, uint8_t* midHash, uint32_t indexA, uint32_t indexB, uint64_t birthdayB, CBlockProvider* bp, unsigned int thread_id) { //if( indexA > MAX_MOMENTUM_NONCE ) // printf("indexA out of range\n"); //if( indexB > MAX_MOMENTUM_NONCE ) // printf("indexB out of range\n"); //if( indexA == indexB ) // printf("indexA == indexB"); uint8_t tempHash[32+4]; uint64_t resultHash[8]; memcpy(tempHash+4, midHash, 32); uint64_t birthdayA; if (shamode == AVXSSE4 || shamode == AVX2) { // get birthday A *(uint32_t*)tempHash = indexA&~7; //AVX/SSE SHA512_Context c512_avxsse; SHA512_Init(&c512_avxsse); SHA512_Update(&c512_avxsse, tempHash, 32+4); SHA512_Final(&c512_avxsse, (unsigned char*)resultHash); birthdayA = resultHash[ptrdiff_t(indexA&7)] >> (64ULL-SEARCH_SPACE_BITS); if (!birthdayB) { *(uint32_t*)tempHash = indexB&~7; SHA512_Init(&c512_avxsse); SHA512_Update(&c512_avxsse, tempHash, 32+4); SHA512_Final(&c512_avxsse, (unsigned char*)resultHash); birthdayB = resultHash[ptrdiff_t(indexB&7)] >> (64ULL-SEARCH_SPACE_BITS); }
ptrdiff_t Assembler::LinkAndGetOffsetTo(BufferOffset branch, Label* label) { if (armbuffer_.oom()) return js::jit::LabelBase::INVALID_OFFSET; if (label->bound()) { // The label is bound: all uses are already linked. ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() / element_size); ptrdiff_t label_offset = ptrdiff_t(label->offset() / element_size); return label_offset - branch_offset; } if (!label->used()) { // The label is unbound and unused: store the offset in the label itself // for patching by bind(). label->use(branch.getOffset()); return js::jit::LabelBase::INVALID_OFFSET; } // The label is unbound but used. Create an implicit linked list between // the branches, and update the linked list head in the label struct. ptrdiff_t prevHeadOffset = static_cast<ptrdiff_t>(label->offset()); label->use(branch.getOffset()); VIXL_ASSERT(prevHeadOffset - branch.getOffset() != js::jit::LabelBase::INVALID_OFFSET); return prevHeadOffset - branch.getOffset(); }
XRCORE_API BOOL is_stack_ptr ( void* _ptr) { int local_value = 0; void* ptr_refsound = _ptr; void* ptr_local = &local_value; ptrdiff_t difference = (ptrdiff_t)_abs(s64(ptrdiff_t(ptr_local) - ptrdiff_t(ptr_refsound))); return (difference < (512*1024)); }
// // Return a pointer to the data at a given position. APTR At(ULONG x,ULONG y) const { if (ibm_ucPixelType == 0) return NULL; // Blank bitmaps keep blank assert(x < ibm_ulWidth && y < ibm_ulHeight); // return (((UBYTE *)(ibm_pData)) + (ptrdiff_t(ibm_cBytesPerPixel) * x) + (ptrdiff_t(ibm_lBytesPerRow) * y)); }
SharedPkt MakeSharedPkt(const void* begin, const void* end) { auto& heap = SharedPkt::GetHeap(); auto size = size_t(ptrdiff_t(end) - ptrdiff_t(begin)); SharedPkt pkt(heap.Allocate((unsigned)size), size); if (pkt.begin()) { XlCopyMemory(pkt.begin(), begin, size); } return std::move(pkt); }
Serializer::Serializer(ACE_Message_Block* chain, bool swap_bytes, Alignment align) : current_(chain) , swap_bytes_(swap_bytes) , good_bit_(true) , alignment_(align) , align_rshift_(chain ? ptrdiff_t(chain->rd_ptr()) % MAX_ALIGN : 0) , align_wshift_(chain ? ptrdiff_t(chain->wr_ptr()) % MAX_ALIGN : 0) { }
static void compute(A0& out,const C& c, const R& r) { size_t p = numel(r); size_t m = numel(c); BOOST_AUTO_TPL(idx, nt2::_(ptrdiff_t(p), ptrdiff_t(-1), ptrdiff_t(2))); BOOST_AUTO_TPL(ridx, nt2::colvect(r(idx))); BOOST_AUTO_TPL(x, catv(ridx, c)); //build vector of user data BOOST_AUTO_TPL(v1, nt2::fliplr(nt2::cif(m, p, meta::as_<ptrdiff_t>()))); BOOST_AUTO_TPL(v2, nt2::ric(m, p, meta::as_<ptrdiff_t>())); out(nt2::_) = x(v1+v2); }
void DirectorySearchRules::ResolveFile(ResChar destination[], unsigned destinationCount, const ResChar baseName[]) const { ResChar tempBuffer[MaxPath]; auto splitter = MakeFileNameSplitter(baseName); bool baseFileExist = false; if (!splitter.ParametersWithDivider().Empty()) { XlCopyString(tempBuffer, splitter.AllExceptParameters()); baseFileExist = DoesFileExist(tempBuffer); } else { baseFileExist = DoesFileExist(baseName); } // by definition, we always check the unmodified file name first if (!baseFileExist) { const ResChar* b = _buffer; if (!_bufferOverflow.empty()) { b = AsPointer(_bufferOverflow.begin()); } // We want to support the case were destination == baseName // But that cases requires another temporary buffer, because we // don't want to trash "baseName" while searching for matches ResChar* workingBuffer = (baseName!=destination) ? destination : tempBuffer; unsigned workingBufferSize = (baseName!=destination) ? destinationCount : unsigned(dimof(tempBuffer)); for (unsigned c=0; c<_startPointCount; ++c) { XlConcatPath(workingBuffer, workingBufferSize, &b[_startOffsets[c]], splitter.AllExceptParameters().begin(), splitter.AllExceptParameters().end()); if (DoesFileExist(workingBuffer)) { SplitPath<ResChar>(workingBuffer).Simplify().Rebuild(workingBuffer, workingBufferSize); if (workingBuffer != destination) { auto workingBufferLen = std::min((ptrdiff_t)XlStringLen(workingBuffer), ptrdiff_t(destinationCount) - 1); auto colonLen = (ptrdiff_t)splitter.ParametersWithDivider().Length(); auto colonCopy = std::min(ptrdiff_t(destinationCount) - workingBufferLen - 1, colonLen); assert((workingBufferLen + colonCopy) < ptrdiff_t(destinationCount)); if (colonCopy > 0) XlMoveMemory(&destination[workingBufferLen], splitter.ParametersWithDivider().begin(), colonCopy); destination[workingBufferLen + colonCopy] = '\0'; assert(workingBufferLen < (ptrdiff_t(destinationCount)-1)); XlCopyMemory(destination, workingBuffer, workingBufferLen); } else { XlCatString(destination, destinationCount, splitter.ParametersWithDivider()); } return; } } } if (baseName != destination) XlCopyString(destination, destinationCount, baseName); SplitPath<ResChar>(destination).Simplify().Rebuild(destination, destinationCount); }
SDst AMUnitList::AdjustOffsetForHeight(SDst h, bool is_top) { if(GetFullViewHeight() > h) { const SDst item_h(GetItemHeight()); if(YB_UNLIKELY(item_h == 0)) return 0; vwList.RestrictSelected(); if(is_top) return AdjustBottomForHeight(item_h, h); else { const auto d((h + uTopOffset) % item_h); if(d != 0) { const auto tmp(uTopOffset + item_h - d); uTopOffset = tmp % item_h; AdjustViewLengthForHeight(item_h, h); // XXX: Conversion to 'ptrdiff_t' might be implementation-defined. vwList.IncreaseHead(ptrdiff_t(tmp / item_h), GetTotal()); } return d; } } return 0; }
/// Environ::FreeVec void Environ::FreeVec(void *mem) { if (mem) { size_t *sptr = (size_t *)(ptrdiff_t(mem) - sizeof(union Align)); CoreFreeMem(sptr,ULONG(*sptr)); } }
JS_REQUIRES_STACK void TraceRecorder::slurpSlot(LIns* val_ins, jsval* vp, SlurpInfo* info) { /* Don't re-read slots that aren't needed. */ if (info->curSlot < info->slurpFailSlot) { info->curSlot++; return; } VMSideExit* exit = copy(info->exit); exit->slurpFailSlot = info->curSlot; exit->slurpType = info->typeMap[info->curSlot]; #if defined DEBUG /* Make sure that we don't try and record infinity branches */ JS_ASSERT_IF(anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT && info->curSlot == info->slurpFailSlot, anchor->slurpType != exit->slurpType); #endif LIns* val = slurpSlot(val_ins, vp, exit); lir->insStorei(val, lirbuf->sp, -tree->nativeStackBase + ptrdiff_t(info->curSlot) * sizeof(double)); info->curSlot++; }
/** Given an sarray, returns a small number uniquely associated with * that sarray. This number is unique over the course of the * program run. */ static size_t unique_sarray_tag(const std::shared_ptr<sarray<flexible_type> >& sa) { static mutex access_lock; std::lock_guard<mutex> _lg(access_lock); static size_t current_number = 0; static std::map<ptrdiff_t, std::pair<std::weak_ptr<sarray<flexible_type> >, size_t> > tracked_numbers; ptrdiff_t key = ptrdiff_t(sa.get()); auto it = tracked_numbers.find(key); if(it != tracked_numbers.end()) { if(!it->second.first.expired()) return it->second.second; else tracked_numbers.erase(it); } ++current_number; // Purge out expired weak pointers if they are present if(current_number % 256 == 0) { for(auto it = tracked_numbers.begin(); it != tracked_numbers.end();) { if(it->second.first.expired()) it = tracked_numbers.erase(it); else ++it; } } tracked_numbers[key] = {sa, current_number}; return current_number; }
void TAO::Unknown_IDL_Type::_tao_decode (TAO_InputCDR & cdr) { // @@ (JP) The following code depends on the fact that // TAO_InputCDR does not contain chained message blocks, // otherwise <begin> and <end> could be part of // different buffers! // This will be the start of a new message block. char const * const begin = cdr.rd_ptr (); // Skip over the next argument. TAO::traverse_status const status = TAO_Marshal_Object::perform_skip (this->type_, &cdr); if (status != TAO::TRAVERSE_CONTINUE) { throw ::CORBA::MARSHAL (); } // This will be the end of the new message block. char const * const end = cdr.rd_ptr (); // The ACE_CDR::mb_align() call can shift the rd_ptr by up to // ACE_CDR::MAX_ALIGNMENT - 1 bytes. Similarly, the offset adjustment // can move the rd_ptr by up to the same amount. We accommodate // this by including 2 * ACE_CDR::MAX_ALIGNMENT bytes of additional // space in the message block. size_t const size = end - begin; ACE_Message_Block new_mb (size + 2 * ACE_CDR::MAX_ALIGNMENT); ACE_CDR::mb_align (&new_mb); ptrdiff_t offset = ptrdiff_t (begin) % ACE_CDR::MAX_ALIGNMENT; if (offset < 0) { offset += ACE_CDR::MAX_ALIGNMENT; } new_mb.rd_ptr (offset); new_mb.wr_ptr (offset + size); ACE_OS::memcpy (new_mb.rd_ptr (), begin, size); this->cdr_.reset (&new_mb, cdr.byte_order ()); this->cdr_.char_translator (cdr.char_translator ()); this->cdr_.wchar_translator (cdr.wchar_translator ()); this->cdr_.set_repo_id_map (cdr.get_repo_id_map ()); this->cdr_.set_codebase_url_map (cdr.get_codebase_url_map ()); this->cdr_.set_value_map (cdr.get_value_map ()); // Take over the GIOP version, the input cdr can have a different // version then our current GIOP version. ACE_CDR::Octet major_version; ACE_CDR::Octet minor_version; cdr.get_version (major_version, minor_version); this->cdr_.set_version (major_version, minor_version); }
BOOST_FORCEINLINE result_type operator()(Expr& e) const { c0_t a0 = boost::proto::child_c<0>(e); ptrdiff_t k = boost::proto::value(boost::proto::child_c<1>(e)); BOOST_ASSERT_MSG(k > 0, "Error using find: second argument must be a positive scalar integer."); ptrdiff_t n =ptrdiff_t(nt2::nbtrue(nt2::colvect(a0))(1)); return (k > 0)? nt2::min(k, n) : 0; }
/// Environ::AllocVec without reqments void *Environ::AllocVec(size_t bytesize) { size_t *mem; // This is build directly on AllocMem bytesize += sizeof(union Align); mem = (size_t *)CoreAllocMem(ULONG(bytesize),0); *mem = bytesize; // enter the bytesize return (void *)(ptrdiff_t(mem) + sizeof(union Align)); }
ptrdiff_t LineBuf::getline(FILE* f) { assert(NULL != f); #if defined(__USE_GNU) || defined(__CYGWIN__) || defined(__CYGWIN32__) // has ::getline return n = ::getline(&p, &capacity, f); #else // #error only _GNU_SOURCE is supported if (NULL == p) { capacity = BUFSIZ; p = (char*)malloc(BUFSIZ); if (NULL == p) THROW_STD(runtime_error, "malloc(BUFSIZ=%d) failed", BUFSIZ); } n = 0; p[0] = '\0'; for (;;) { assert(n < capacity); char* ret = ::fgets(p + n, capacity - n, f); size_t len = ::strlen(p + n); if (0 == len && (feof(f) || ferror(f))) return -1; n += len; if (ret) { if (capacity-1 == n && p[n-1] != '\n') { size_t newcap = capacity * 2; ret = (char*)realloc(p, newcap); if (NULL == ret) THROW_STD(runtime_error, "realloc(newcap=%zd)", newcap); p = ret; capacity = newcap; } else { return ptrdiff_t(n); } } else if (feof(f)) return ptrdiff_t(n); else return -1; } #endif }
int TestMain () { TestEmptyQueue<char>(); TestEmptyQueue<Foo>(); TestFullQueue(); TestConcurrentQueueType(); TestIterator(); // Test concurrent operations for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { TestNegativeQueue<Foo>(nthread); for( int prefill=0; prefill<64; prefill+=(1+prefill/3) ) { TestPushPop(prefill,ptrdiff_t(-1),nthread); TestPushPop(prefill,ptrdiff_t(1),nthread); TestPushPop(prefill,ptrdiff_t(2),nthread); TestPushPop(prefill,ptrdiff_t(10),nthread); TestPushPop(prefill,ptrdiff_t(100),nthread); } } return Harness::Done; }
TempSpace(unsigned char** strs, size_t n) : strings(0), allocated(0), elements_in_strings(0) { debug()<<__PRETTY_FUNCTION__<<"\n"; char* raw = reinterpret_cast<char*>(strs); size_t rawbytes = n*sizeof(unsigned char*); if (ptrdiff_t(raw) % sizeof(cacheblock_t)) { unsigned diff = ptrdiff_t(raw) % sizeof(cacheblock_t); debug()<<"\t: alignment mismatch by "<<diff<<"bytes\n"; raw += diff; rawbytes -= diff; } if (rawbytes % sizeof(cacheblock_t)) { unsigned diff = rawbytes % sizeof(cacheblock_t); debug()<<"\t: truncate by "<<diff<<"bytes\n"; rawbytes -= diff; } strings = reinterpret_cast<cacheblock_t*>(raw); elements_in_strings = rawbytes / sizeof(cacheblock_t); }
TEST_F( serial, tile_16x16) { static const size_t dim = 9; typedef Kokkos::LayoutTileLeft<16,16> tile_layout; typedef ReduceTileErrors< Kokkos::Serial, tile_layout > functor_type; functor_type::array_type array("",dim,dim); ptrdiff_t errors = 0 ; Kokkos::parallel_reduce(dim, functor_type(array) , errors ); EXPECT_EQ( errors, ptrdiff_t(0) ); }
template <class T> void Vec<T>::grow() { size_type new_size = std::max(2 * (limit - data), ptrdiff_t(1)); iterator new_data = alloc.allocate(new_size); iterator new_avail = std::uninitialized_copy(data, avail, new_data); uncreate(); data = new_data; avail = new_avail; limit = data + new_size; }
constexpr static_string<N> slice(ptrdiff_t start, ptrdiff_t len, typename std::enable_if<(sizeof...(Node) < N)>::type*, Node const &...newnodes) const { return slice(start - ptrdiff_t(nodes[sizeof...(Node)].size), len, nullptr, newnodes..., node{nodes[sizeof...(Node)].data + static_min(static_max(start, 0), nodes[sizeof...(Node)].size), static_min(static_max(start + len, 0), nodes[sizeof...(Node)].size) - static_max(start, 0)}); }
pair<_Tp*, ptrdiff_t> _STLP_CALL __get_temporary_buffer(ptrdiff_t __len, _Tp*) { if (__len > ptrdiff_t(INT_MAX / sizeof(_Tp))) __len = INT_MAX / sizeof(_Tp); while (__len > 0) { _Tp* __tmp = (_Tp*) malloc((size_t)__len * sizeof(_Tp)); if (__tmp != 0) return pair<_Tp*, ptrdiff_t>(__tmp, __len); __len /= 2; } return pair<_Tp*, ptrdiff_t>((_Tp*)0, 0); }
_STLP_EXP_DECLSPEC strstreambuf::int_type strstreambuf::overflow(int_type c) { if (c == traits_type::eof()) return traits_type::not_eof(c); #ifdef __SYMBIAN32__ if (pptr() != 0 && pptr() < epptr()) { *pptr() = c; pbump(1); return c; } if (!_M_dynamic || _M_constant || _M_frozen) return (EOF); // can't extend #endif // Try to expand the buffer. if (pptr() == epptr() && _M_dynamic && !_M_frozen && !_M_constant) { ptrdiff_t old_size = epptr() - pbase(); ptrdiff_t new_size = (max)(2 * old_size, ptrdiff_t(1)); char* buf = _M_alloc(new_size); if (buf) { memcpy(buf, pbase(), old_size); char* old_buffer = pbase(); bool reposition_get = false; ptrdiff_t old_get_offset; if (gptr() != 0) { reposition_get = true; old_get_offset = gptr() - eback(); } setp(buf, buf + new_size); pbump((int)old_size); if (reposition_get) setg(buf, buf + old_get_offset, buf + (max)(old_get_offset, old_size)); _M_free(old_buffer); } } if (pptr() != epptr()) { *pptr() = c; pbump(1); return c; } else return traits_type::eof(); }
void gram_schmidt( StateType &x , LyapType &lyap , size_t n ) { if( !num_of_lyap ) return; if( ptrdiff_t( ( num_of_lyap + 1 ) * n ) != std::distance( x.begin() , x.end() ) ) throw std::domain_error( "renormalization() : size of state does not match the number of lyapunov exponents." ); typedef typename StateType::value_type value_type; typedef typename StateType::iterator iterator; value_type norm[num_of_lyap]; value_type tmp[num_of_lyap]; iterator first = x.begin() + n; iterator beg1 = first , end1 = first + n ; std::fill( norm , norm+num_of_lyap , 0.0 ); // normalize first vector norm[0] = sqrt( std::inner_product( beg1 , end1 , beg1 , 0.0 ) ); normalize( beg1 , end1 , norm[0] ); beg1 += n; end1 += n; for( size_t j=1 ; j<num_of_lyap ; ++j , beg1+=n , end1+=n ) { for( size_t k=0 ; k<j ; ++k ) { tmp[k] = std::inner_product( beg1 , end1 , first + k*n , 0.0 ); // clog << j << " " << k << " " << tmp[k] << "\n"; } for( size_t k=0 ; k<j ; ++k ) substract_vector( beg1 , end1 , first + k*n , tmp[k] ); // normalize j-th vector norm[j] = sqrt( std::inner_product( beg1 , end1 , beg1 , 0.0 ) ); // clog << j << " " << norm[j] << "\n"; normalize( beg1 , end1 , norm[j] ); } for( size_t j=0 ; j<num_of_lyap ; j++ ) lyap[j] += log( norm[j] ); }
Raycaster::Raycaster(const unsigned int sDataSize[3],const Raycaster::Box& sDomain) :domain(sDomain),domainExtent(0),cellSize(0), renderDomain(Polyhedron<Scalar>::Point(domain.min),Polyhedron<Scalar>::Point(domain.max)), stepSize(1) { /* Copy the data sizes and calculate the data strides and cell size: */ ptrdiff_t stride=1; for(int i=0;i<3;++i) { dataSize[i]=sDataSize[i]; dataStrides[i]=stride; stride*=ptrdiff_t(dataSize[i]); domainExtent+=Math::sqr(domain.max[i]-domain.min[i]); cellSize+=Math::sqr((domain.max[i]-domain.min[i])/Scalar(dataSize[i]-1)); } domainExtent=Math::sqrt(domainExtent); cellSize=Math::sqrt(cellSize); }
void test( const size_t dim0, const size_t dim1 ) { typedef Kokkos::LayoutTileLeft< N0, N1 > array_layout; typedef ReduceTileErrors< Space, array_layout > functor_type; const size_t tile_dim0 = ( dim0 + N0 - 1 ) / N0; const size_t tile_dim1 = ( dim1 + N1 - 1 ) / N1; typename functor_type::array_type array( "", dim0, dim1 ); Kokkos::parallel_for( Kokkos::RangePolicy< Space, size_t >( 0, dim0 * dim1 ), functor_type( array ) ); ptrdiff_t error = 0; Kokkos::parallel_reduce( Kokkos::RangePolicy< Space, size_t >( 0, tile_dim0 * tile_dim1 ), functor_type( array ), error ); EXPECT_EQ( error, ptrdiff_t( 0 ) ); }
//-------------------------------------------------------------------------------- int CAtExit::Append( Function func ) { # if ( __CQOR_DEFINED_MULTITHREADED ) nsSync::CMutexLock __lock( m_Mutex ); # endif if ( !m_pBegin || m_pEnd - m_pBegin == ptrdiff_t( m_Size - 1 ) ) { if ( !Grow() ) { return 1; } } *m_pEnd++ = func; return 0; }
void vbo_builder_tristrip::update_color_vec3fptr(const vec3f * const first, const vec3f * const last) { #if 0 assert( tristrip_ != nullptr ); //assert( 0 ); // assert( std::distance( first, last ) == ptrdiff_t(tristrip_->idx_pairs().size()) ); glBindBuffer( GL_ARRAY_BUFFER, buffers_[1] ); GLubyte *b_base = (GLubyte*) glMapBuffer( GL_ARRAY_BUFFER, GL_WRITE_ONLY ); check_gl_error; std::fill( b_base, b_base + tristrip_->vecs().size() * 4 * sizeof(GLubyte), 255 ); // glUnmapBuffer( GL_ARRAY_BUFFER ); // return; assert( b_base != nullptr ); //b += 4 * 3 * num_planes_; #if 0 const vec3f *cur = first; auto &ts = *tristrip_; const auto &idx_pairs = ts.idx_pairs(); //assert( idx_pairs.size() == num_planes_ ); for( ; cur != last; ++cur ) { auto idx = std::distance(first, cur); assert( idx < ptrdiff_t(idx_pairs.size()) ); auto pair = idx_pairs[idx]; for( uint32_t i = pair.first; i < pair.second; ++i ) { GLubyte *b = b_base + i * 4; *(b++) = gl_utils::clamp<0,255>(255 * cur->r); *(b++) = gl_utils::clamp<0,255>(255 * cur->g); *(b++) = gl_utils::clamp<0,255>(255 * cur->b); *(b++) = 255; } } #endif glUnmapBuffer( GL_ARRAY_BUFFER ); #endif }