Пример #1
0
void
SynchronizationTrack::slotTagRemoved()
{
    SemaphoreReleaser releaser( &m_semaphore );
    QNetworkReply *reply =  qobject_cast<QNetworkReply *>( sender() );
    if( !reply )
    {
        warning() << __PRETTY_FUNCTION__ << "cannot cast sender to QNetworkReply. (?)";
        return;
    }
    reply->deleteLater();

    lastfm::XmlQuery lfm;
    if( !lfm.parse( reply->readAll() ) )
    {
        warning() << __PRETTY_FUNCTION__ << "error removing a tag:" << lfm.parseError().message();
        return;
    }

    // remove the next one, sadly only one at a time can be removed
    if( !m_tagsToRemove.isEmpty() )
    {
        releaser.dontRelease();
        emit startTagRemoval();
    }
}
Пример #2
0
// will check for an existing stream for the packet, or create a new one if it did not exist
// when creating a new stream, checks for existing partner stream
tcp_reassembler_t::stream_set_t::iterator
tcp_reassembler_t::find_or_create_stream(packet_t *packet, const layer_t *tcplay)
{
	assert(tcplay);

	tcp_stream_t *r = claim();
	auto_release_t<tcp_stream_t> releaser(r); // make sure it will be released if we don't use it

	r->set_src_dst_from_packet(packet, false);
	std::pair<stream_set_t::iterator,bool> ituple = d_streams.insert(*r);

	if (ituple.second) // new stream was inserted
	{
		r->init(d_listener);
		releaser.do_not_release();

		// find partner
		tcp_stream_t *pr = claim();
		auto_release_t<tcp_stream_t> releaser(pr);

		pr->set_src_dst_from_packet(packet, true);
		stream_set_t::iterator pi = d_streams.find(*pr);
		if (pi != d_streams.end() && pi != ituple.first)
		{
			tcp_stream_t *partner = &*pi;

			// if we already trust sequence numbers and the other side happens to
			// have acks they must be close
			const tcphdr &hdr = reinterpret_cast<const tcphdr &>(*tcplay->data());
			bool seqs_are_close = true;

			if (partner->d_smallest_ack != 0 && !is_reasonable_seq(
						seq_nr_t(htonl(hdr.th_seq)), partner->d_smallest_ack))
				seqs_are_close = false;
			if (seqs_are_close && (hdr.th_flags & TH_ACK) && !is_reasonable_seq(
						seq_nr_t(htonl(hdr.th_ack)), partner->d_next_seq))
				seqs_are_close = false;
			if (seqs_are_close)
				r->found_partner(packet, &*pi);
			else
				d_listener->debug_packet(packet, "potential partner found, but sequence numbers too far apart");
		}
	}
	return ituple.first;
}
Пример #3
0
static void KVPairFree(KVPair * e, void (*releaser)(Type t)) {
	if (e) {
		free(e->key);
		if (releaser) {
			releaser(e->value);
		}
		CO_Free(e);
	}
}
Пример #4
0
void
SynchronizationTrack::slotTagsAdded()
{
    SemaphoreReleaser releaser( &m_semaphore );
    QNetworkReply *reply =  qobject_cast<QNetworkReply *>( sender() );
    if( !reply )
    {
        warning() << __PRETTY_FUNCTION__ << "cannot cast sender to QNetworkReply. (?)";
        return;
    }
    reply->deleteLater();

    lastfm::XmlQuery lfm;
    if( !lfm.parse( reply->readAll() ) )
    {
        warning() << __PRETTY_FUNCTION__ << "error adding tags:" << lfm.parseError().message();
        return;
    }
}
Пример #5
0
    cv_status::cv_status wait_for(unique_lock<mutex>& lock,
                                  const chrono::duration<RepT, PeriodT>& d)
    {
        // First enqueue ourselves in the list of waiters.
        WaitingThread w;
        enqueue(w);

        // We can only unlock the lock when we are sure that a signal will
        // reach our thread.
        detail::lock_releaser<unique_lock<mutex> > releaser(lock);
        // Wait until we receive a signal, then re-lock the lock.
        bool gotSignal = w.signal.try_wait_for(d);

        // If we have received a signal, we have _always_ been dequeued.
        // If the other case, we might have been dequeued or might have
        // been not. In that case, we have to check the dequeued flag.
        if (!gotSignal)
        {
            maybeDequeue(w);
            return cv_status::timeout;
        }

        return cv_status::no_timeout;
    }
Пример #6
0
void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
                                           ChunkManager* manager,
                                           Chunk* chunk,
                                           long dataWritten) {
    // Disable lastError tracking so that any errors, which occur during auto-split do not get
    // bubbled up on the client connection doing a write.
    LastError::Disabled d(&LastError::get(cc()));

    const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();

    const bool minIsInf =
        (0 == manager->getShardKeyPattern().getKeyPattern().globalMin().woCompare(chunk->getMin()));
    const bool maxIsInf =
        (0 == manager->getShardKeyPattern().getKeyPattern().globalMax().woCompare(chunk->getMax()));

    const uint64_t chunkBytesWritten = chunk->addBytesWritten(dataWritten);

    const uint64_t desiredChunkSize =
        calculateDesiredChunkSize(balancerConfig->getMaxChunkSizeBytes(), manager->numChunks());

    if (!chunk->shouldSplit(desiredChunkSize, minIsInf, maxIsInf)) {
        return;
    }

    const NamespaceString nss(manager->getns());

    if (!manager->_autoSplitThrottle._splitTickets.tryAcquire()) {
        LOG(1) << "won't auto split because not enough tickets: " << nss;
        return;
    }

    TicketHolderReleaser releaser(&(manager->_autoSplitThrottle._splitTickets));

    const ChunkRange chunkRange(chunk->getMin(), chunk->getMax());

    try {
        // Ensure we have the most up-to-date balancer configuration
        uassertStatusOK(balancerConfig->refreshAndCheck(opCtx));

        if (!balancerConfig->getShouldAutoSplit()) {
            return;
        }

        LOG(1) << "about to initiate autosplit: " << redact(chunk->toString())
               << " dataWritten: " << chunkBytesWritten
               << " desiredChunkSize: " << desiredChunkSize;

        const uint64_t chunkSizeToUse = [&]() {
            const uint64_t estNumSplitPoints = chunkBytesWritten / desiredChunkSize * 2;

            if (estNumSplitPoints >= kTooManySplitPoints) {
                // The current desired chunk size will split the chunk into lots of small chunk and
                // at the worst case this can result into thousands of chunks. So check and see if a
                // bigger value can be used.
                return std::min(chunkBytesWritten, balancerConfig->getMaxChunkSizeBytes());
            } else {
                return desiredChunkSize;
            }
        }();

        auto splitPoints =
            uassertStatusOK(shardutil::selectChunkSplitPoints(opCtx,
                                                              chunk->getShardId(),
                                                              nss,
                                                              manager->getShardKeyPattern(),
                                                              chunkRange,
                                                              chunkSizeToUse,
                                                              boost::none));

        if (splitPoints.size() <= 1) {
            // No split points means there isn't enough data to split on; 1 split point means we
            // have
            // between half the chunk size to full chunk size so there is no need to split yet
            chunk->clearBytesWritten();
            return;
        }

        if (minIsInf || maxIsInf) {
            // We don't want to reset _dataWritten since we want to check the other side right away
        } else {
            // We're splitting, so should wait a bit
            chunk->clearBytesWritten();
        }

        // We assume that if the chunk being split is the first (or last) one on the collection,
        // this chunk is likely to see more insertions. Instead of splitting mid-chunk, we use the
        // very first (or last) key as a split point.
        //
        // This heuristic is skipped for "special" shard key patterns that are not likely to produce
        // monotonically increasing or decreasing values (e.g. hashed shard keys).
        if (KeyPattern::isOrderedKeyPattern(manager->getShardKeyPattern().toBSON())) {
            if (minIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
                if (!key.isEmpty()) {
                    splitPoints.front() = key.getOwned();
                }
            } else if (maxIsInf) {
                BSONObj key = findExtremeKeyForShard(
                    opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
                if (!key.isEmpty()) {
                    splitPoints.back() = key.getOwned();
                }
            }
        }

        const auto suggestedMigrateChunk =
            uassertStatusOK(shardutil::splitChunkAtMultiplePoints(opCtx,
                                                                  chunk->getShardId(),
                                                                  nss,
                                                                  manager->getShardKeyPattern(),
                                                                  manager->getVersion(),
                                                                  chunkRange,
                                                                  splitPoints));

        // Balance the resulting chunks if the option is enabled and if the shard suggested a chunk
        // to balance
        const bool shouldBalance = [&]() {
            if (!balancerConfig->shouldBalanceForAutoSplit())
                return false;

            auto collStatus =
                Grid::get(opCtx)->catalogClient()->getCollection(opCtx, manager->getns());
            if (!collStatus.isOK()) {
                log() << "Auto-split for " << nss << " failed to load collection metadata"
                      << causedBy(redact(collStatus.getStatus()));
                return false;
            }

            return collStatus.getValue().value.getAllowBalance();
        }();

        log() << "autosplitted " << nss << " chunk: " << redact(chunk->toString()) << " into "
              << (splitPoints.size() + 1) << " parts (desiredChunkSize " << desiredChunkSize << ")"
              << (suggestedMigrateChunk ? "" : (std::string) " (migrate suggested" +
                          (shouldBalance ? ")" : ", but no migrations allowed)"));

        // Reload the chunk manager after the split
        auto routingInfo = uassertStatusOK(
            Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
                                                                                         nss));

        if (!shouldBalance || !suggestedMigrateChunk) {
            return;
        }

        // Top chunk optimization - try to move the top chunk out of this shard to prevent the hot
        // spot from staying on a single shard. This is based on the assumption that succeeding
        // inserts will fall on the top chunk.

        // We need to use the latest chunk manager (after the split) in order to have the most
        // up-to-date view of the chunk we are about to move
        auto suggestedChunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(
            suggestedMigrateChunk->getMin());

        ChunkType chunkToMove;
        chunkToMove.setNS(nss.ns());
        chunkToMove.setShard(suggestedChunk->getShardId());
        chunkToMove.setMin(suggestedChunk->getMin());
        chunkToMove.setMax(suggestedChunk->getMax());
        chunkToMove.setVersion(suggestedChunk->getLastmod());

        uassertStatusOK(configsvr_client::rebalanceChunk(opCtx, chunkToMove));

        // Ensure the collection gets reloaded because of the move
        Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
    } catch (const DBException& ex) {
        chunk->clearBytesWritten();

        if (ErrorCodes::isStaleShardingError(ErrorCodes::Error(ex.getCode()))) {
            log() << "Unable to auto-split chunk " << redact(chunkRange.toString()) << causedBy(ex)
                  << ", going to invalidate routing table entry for " << nss;
            Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss);
        }
    }
}