/** * Checks to see if the new data is the next we expect to use * * When several interests are outstanding at the same time, it is possible that * the data for each arrives in a different order than one might expect. This is * more true in a widely distributed environment where more than one source of the * data exists to satisfy the request. And so we must be prepared for getting the * data out of order, and re-aligning it as necessary. * * Our context keep track of what it expects to post to the pipeline buffers next * with the post_seg attribute. If we do not get this segment next, we copy the data * over to our own buffer [I think we only borrow the one on input], and mark the * interest array in a fashion that shows we have the data. * * If we do match the next segment, then we would process that data, making it * available for the pipeline. We then continue looking for the other segments * that may have arrived ahead of 'schedule'. Care is taken to skip segments * that have for some reason been dropped; likely excessive time outs. * * \todo at this time I have yet to test the out-of-order code...creating a test for * this would take some time because the producer needs to be savy to it. * * \param me source context for the gst element controlling this data * \param segment number of the segment that this data is for * \param data pointer into the data buffer * \param data_size how many bytes we are to use * \param b_last flag telling us this is the last block of data; which can still arrive out of order of course */ static void process_or_queue (Gstccnxsrc * me, const uintmax_t segment, const guchar * data, const size_t data_size, const gboolean b_last) { CcnxInterestState *istate = NULL; istate = fetchSegmentInterest (me, segment); if (NULL == istate) { GST_INFO ("failed to find segment in interest array: %d", segment); return; } istate->state = OInterest_havedata; if (me->post_seg == segment) { // This is the next segment we need GST_INFO ("porq - got the segment we need: %d", segment); process_segment (me, data, data_size, b_last); freeInterestState (me, istate); if (0 == segment) me->post_seg = me->i_seg; // special case for segment zero else me->post_seg++; /* Also look to see if other segments have arrived earlier that need to be posted */ istate = nextSegmentInterest (me, me->post_seg); while (istate && OInterest_havedata == istate->state) { GST_INFO ("porq - also processing extra segment: %d", istate->seg); process_segment (me, istate->data, istate->size, istate->lastBlock); me->post_seg = 1 + istate->seg; // because we may skip some data, we use this segment to key off of freeInterestState (me, istate); istate = nextSegmentInterest (me, me->post_seg); } } else if (me->post_seg > segment) { // this one is arriving very late, throw it out freeInterestState (me, istate); } else { // This segment needs to await processing in the queue GST_INFO ("porq - segment needs to wait: %d", segment); istate->size = data_size; istate->lastBlock = b_last; istate->data = calloc (1, data_size); // We need to copy it to our own buffer memcpy (istate->data, data, data_size); } }
void update() { if (object_vec.size() > 0) { if (ballCol) { vec<vec<int> > cont; if (use_quadtree) { quadtree.update(); quadtree.get(cont); } else if (use_fixedgrid) { fixedgrid.update(); fixedgrid.get(cont); } // If we´re using collision optimizations if (use_quadtree || use_fixedgrid) Calc(cont, 0, cont.size()); else if (numThreads > 0) { // Multithreading. DOESNT WORK. int total = object_vec.size(); int parts = total / numThreads; std::vector<std::thread> thread_pool(numThreads); process_segment(parts * numThreads, total); for (int i = 0; i < numThreads; ++i) { thread_pool[i] = std::thread(process_segment, parts * i, parts * (i + 1)); } for (auto &thread : thread_pool) thread.join(); } else process_segment(0, object_vec.size()); } // Update each objects position for (auto &object : object_vec) object->update(); } }
int icejpeg_read(unsigned char **buffer, int *width, int *height, int *num_components) { int err; while (!iceenv.eoi) { err = process_segment(); if (err != ERR_OK) return err; } *buffer = iceenv.image; *width = iceenv.sof0.width; *height = iceenv.sof0.height; *num_components = iceenv.sof0.num_components; return ERR_OK; }
void *zlib_stream(void *data, int s, int *osize) { t_size_pair **seg; char *kek[3]; int os; os = 0; seg = make_arr(data, s, 0xffff); ft_memset(kek, 0, sizeof(kek)); kek[2] = (void*)seg; while (*seg) { process_segment(seg, kek, &os); ++seg; } free(kek[2]); *osize = 2 + os + 4; kek[2] = malloc(*osize); ft_memcpy(kek[2], "\x78\x01", 2); ft_memcpy(&kek[2][2], kek[0], os); free(kek[0]); *(unsigned*)&kek[2][2 + os] = bswap_32(adler32(data, s)); return (kek[2]); }
inline void find_format_all_impl2( InputT& Input, FinderT Finder, FormatterT Formatter, FindResultT FindResult, FormatResultT FormatResult) { typedef BOOST_STRING_TYPENAME range_iterator<InputT>::type input_iterator_type; typedef find_format_store< input_iterator_type, FormatterT, FormatResultT > store_type; // Create store for the find result store_type M( FindResult, FormatResult, Formatter ); // Instantiate replacement storage std::deque< BOOST_STRING_TYPENAME range_value<InputT>::type> Storage; // Initialize replacement iterators input_iterator_type InsertIt=::pdalboost::begin(Input); input_iterator_type SearchIt=::pdalboost::begin(Input); while( M ) { // process the segment InsertIt=process_segment( Storage, Input, InsertIt, SearchIt, M.begin() ); // Adjust search iterator SearchIt=M.end(); // Copy formatted replace to the storage ::pdalboost::algorithm::detail::copy_to_storage( Storage, M.format_result() ); // Find range for a next match M=Finder( SearchIt, ::pdalboost::end(Input) ); } // process the last segment InsertIt=::pdalboost::algorithm::detail::process_segment( Storage, Input, InsertIt, SearchIt, ::pdalboost::end(Input) ); if ( Storage.empty() ) { // Truncate input ::pdalboost::algorithm::detail::erase( Input, InsertIt, ::pdalboost::end(Input) ); } else { // Copy remaining data to the end of input ::pdalboost::algorithm::detail::insert( Input, ::pdalboost::end(Input), Storage.begin(), Storage.end() ); } }