infinite& operator-=(const T& n) { if (n < 0) { next_(n); } if (n > 0) { prev_(-n); } return *this; }
SegmentSeeker::cluster_map_t::iterator SegmentSeeker::add_cluster( KaxCluster * const p_cluster ) { Cluster cinfo = { /* fpos */ p_cluster->GetElementPosition(), /* pts */ mtime_t( p_cluster->GlobalTimecode() / INT64_C( 1000 ) ), /* duration */ mtime_t( -1 ), /* size */ p_cluster->GetEndPosition() - p_cluster->GetElementPosition() }; add_cluster_position( cinfo.fpos ); cluster_map_t::iterator it = _clusters.lower_bound( cinfo.pts ); if( it != _clusters.end() && it->second.pts == cinfo.pts ) { // cluster already known } else { it = _clusters.insert( cluster_map_t::value_type( cinfo.pts, cinfo ) ).first; } // ------------------------------------------------------------------ // IF we have two adjecent clusters, update duration where applicable // ------------------------------------------------------------------ struct Duration { static void fix( Cluster& prev, Cluster& next ) { if( ( prev.fpos + prev.size) == next.fpos ) prev.duration = next.pts - prev.pts; } }; if( it != _clusters.begin() ) { Duration::fix( prev_( it )->second, it->second ); } if( it != _clusters.end() && next_( it ) != _clusters.end() ) { Duration::fix( it->second, next_( it )->second ); } return it; }
size_t KeySetOut::append (const KeyData& kd) { int i(0); #ifdef CHECK_PREVIOUS_KEY /* find common ancestor with the previous key */ for (; i < kd.parts_num && size_t(i + 1) < prev_.size() && prev_[i + 1].match(kd.parts[i].ptr, kd.parts[i].len); ++i) { #if 0 log_info << "prev[" << (i+1) << "]\n" << prev_[i+1] << "\nmatches\n" << gu::Hexdump(kd.parts[i].ptr, kd.parts[i].len, true); #endif /* 0 */ } // log_info << "matched " << i << " parts"; /* if we have a fully matched key OR common ancestor is exclusive, return */ if (i > 0) { assert (size_t(i) < prev_.size()); if (prev_[i].exclusive()) { assert (prev_.size() == (i + 1U)); // only leaf can be exclusive. // log_info << "Returning after matching exclusive key:\n"<< prev_[i]; return 0; } if (kd.parts_num == i) /* leaf */ { assert (prev_[i].shared()); if (kd.shared()) { // log_info << "Returning after matching all " << i << " parts"; return 0; } else /* need to add exclusive copy of the key */ --i; } } int const anc(i); const KeyPart* parent(&prev_[anc]); // log_info << "Common ancestor: " << anc << ' ' << *parent; #else KeyPart tmp(prev_[0]); const KeyPart* const parent(&tmp); #endif /* CHECK_PREVIOUS_KEY */ /* create parts that didn't match previous key and add to the set * of preiously added keys. */ size_t const old_size (size()); int j(0); for (; i < kd.parts_num; ++i, ++j) { try { KeyPart kp(added_, *this, parent, kd, i); #ifdef CHECK_PREVIOUS_KEY if (size_t(j) < new_.size()) { new_[j] = kp; } else { new_().push_back (kp); } parent = &new_[j]; #else if (kd.copy) kp.acquire(); if (i + 1 != kd.parts_num) tmp = kp; // <- updating parent for next iteration #endif /* CHECK_PREVIOUS_KEY */ // log_info << "pushed " << kp; } catch (KeyPart::DUPLICATE& e) { assert (i + 1 == kd.parts_num); /* There is a very small probability that child part thows DUPLICATE * even after parent was added as a new key. It does not matter: * a duplicate will be a duplicate in certification as well. */ #ifndef NDEBUG log_debug << "Returning after catching a DUPLICATE. Part: " << i; #endif /* NDEBUG */ goto out; } } assert (i == kd.parts_num); assert (anc + j == kd.parts_num); #ifdef CHECK_PREVIOUS_KEY /* copy new parts to prev_ */ prev_().resize(1 + kd.parts_num); std::copy(new_().begin(), new_().begin() + j, prev_().begin() + anc + 1); /* acquire key part value if it is volatile */ if (kd.copy) for (int k(anc + 1); size_t(k) < prev_.size(); ++k) { prev_[k].acquire(); } #endif /* CHECK_PREVIOUS_KEY */ out: return size() - old_size; }
infinite& operator-=(std::size_t n) { prev_(n); return *this; }