Ejemplo n.º 1
0
/**
 * Socket incoming data signal handler
 * use for audio, control and timing socket
 */
void MythRAOPConnection::udpDataReady(QByteArray buf, QHostAddress peer,
                                      quint16 port)
{
    // restart the idle timer
    if (m_watchdogTimer)
        m_watchdogTimer->start(10000);

    if (!m_audio || !m_codec || !m_codeccontext)
        return;

    uint8_t  type;
    uint16_t seq;
    uint64_t timestamp;

    if (!GetPacketType(buf, type, seq, timestamp))
    {
        LOG(VB_GENERAL, LOG_DEBUG, LOC +
            QString("Packet doesn't start with valid Rtp Header (0x%1)")
            .arg((uint8_t)buf[0], 0, 16));
        return;
    }

    switch (type)
    {
        case SYNC:
        case FIRSTSYNC:
            ProcessSync(buf);
            ProcessAudio();
            return;

        case FIRSTAUDIO_DATA:
            m_nextSequence  = seq;
            m_nextTimestamp = timestamp;
            // With iTunes we know what the first sequence is going to be.
            // iOS device do not tell us before streaming start what the first
            // packet is going to be.
            m_streamingStarted = true;
            break;

        case AUDIO_DATA:
        case AUDIO_RESEND:
            break;

        case TIMING_RESPONSE:
            ProcessTimeResponse(buf);
            return;

        default:
            LOG(VB_GENERAL, LOG_DEBUG, LOC +
                QString("Packet type (0x%1) not handled")
                .arg(type, 0, 16));
            return;
    }

    timestamp = framesToMs(timestamp);
    if (timestamp < m_currentTimestamp)
    {
        LOG(VB_GENERAL, LOG_DEBUG, LOC +
            QString("Received packet %1 too late, ignoring")
            .arg(seq));
        return;
    }
    // regular data packet
    if (type == AUDIO_DATA || type == FIRSTAUDIO_DATA)
    {
        if (m_streamingStarted && seq != m_nextSequence)
            SendResendRequest(timestamp, m_nextSequence, seq);

        m_nextSequence     = seq + 1;
        m_nextTimestamp    = timestamp;
        m_streamingStarted = true;
    }

    if (!m_streamingStarted)
        return;

    // resent packet
    if (type == AUDIO_RESEND)
    {
        if (m_resends.contains(seq))
        {
            LOG(VB_GENERAL, LOG_DEBUG, LOC +
                QString("Received required resend %1 (with ts:%2 last:%3)")
                .arg(seq).arg(timestamp).arg(m_nextSequence));
            m_resends.remove(seq);
        }
        else
            LOG(VB_GENERAL, LOG_WARNING, LOC +
                QString("Received unexpected resent packet %1")
                .arg(seq));
    }

    // Check that the audio packet is valid, do so by decoding it. If an error
    // occurs, ask to resend it
    QList<AudioData> *decoded = new QList<AudioData>();
    int numframes = decodeAudioPacket(type, &buf, decoded);
    if (numframes < 0)
    {
        // an error occurred, ask for the audio packet once again.
        LOG(VB_GENERAL, LOG_ERR, LOC + QString("Error decoding audio"));
        SendResendRequest(timestamp, seq, seq+1);
        return;
    }
    AudioPacket frames;
    frames.seq    = seq;
    frames.data   = decoded;
    m_audioQueue.insert(timestamp, frames);
    ProcessAudio();
}
//------------------------------------------------------------------------------
void videoDecodingThread(ThreadInfo* p_threadInfo)
{
    // Read ThreadInfo struct, then delete it
    FFmpegVideoPlayer* videoPlayer = p_threadInfo->videoPlayer;
    VideoInfo& videoInfo = videoPlayer->getVideoInfo();
    boost::mutex* playerMutex = p_threadInfo->playerMutex;
    boost::condition_variable* playerCondVar = p_threadInfo->playerCondVar;
    boost::mutex* decodeMutex = p_threadInfo->decodingMutex;
    boost::condition_variable* decodeCondVar = p_threadInfo->decodingCondVar;
    bool isLoop = p_threadInfo->isLoop;
    staticOgreLog = videoPlayer->getLog();
    delete p_threadInfo;
    
    // Initialize FFmpeg  
    av_register_all();
    av_log_set_callback(log_callback);
    av_log_set_level(AV_LOG_WARNING);
    
    // Initialize video decoding, filling the VideoInfo
    // Open the input file
    AVFormatContext* formatContext = NULL;
    const char* name = videoPlayer->getVideoFilename().c_str();
    if (avformat_open_input(&formatContext, name, NULL, NULL) < 0) 
    {
        videoInfo.error = "Could not open input: ";
        videoInfo.error.append(videoPlayer->getVideoFilename());
        playerCondVar->notify_all();
        return;
    }
    
    // Read stream information
    if (avformat_find_stream_info(formatContext, NULL) < 0) 
    {
        videoInfo.error = "Could not find stream information.";
        playerCondVar->notify_all();
        return;
    }
    
    // Get streams
    // Audio stream
    AVStream* audioStream = NULL;
    AVCodecContext* audioCodecContext = NULL;
    int audioStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_AUDIO, videoInfo, audioStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    audioStream = formatContext->streams[audioStreamIndex];
    audioCodecContext = audioStream->codec;
    
    // Video stream
    AVStream* videoStream = NULL;
    AVCodecContext* videoCodecContext = NULL;
    int videoStreamIndex = -1;
    if (!openCodecContext(formatContext, AVMEDIA_TYPE_VIDEO, videoInfo, videoStreamIndex)) 
    {
        // The error itself is set by openCodecContext
        playerCondVar->notify_all();
        return;
    }
    videoStream = formatContext->streams[videoStreamIndex];
    videoCodecContext = videoStream->codec;
    
    // Dump information
    av_dump_format(formatContext, 0, videoPlayer->getVideoFilename().c_str(), 0);
    
    // Store useful information in VideoInfo struct
    double timeBase = ((double)audioStream->time_base.num) / (double)audioStream->time_base.den;
    videoInfo.audioDuration = audioStream->duration * timeBase;
    videoInfo.audioSampleRate = audioCodecContext->sample_rate;
    videoInfo.audioBitRate = audioCodecContext->bit_rate;
    videoInfo.audioNumChannels = 
            videoInfo.audioNumChannels > 0 ? videoInfo.audioNumChannels : audioCodecContext->channels;
    
    timeBase = ((double)videoStream->time_base.num) / (double)videoStream->time_base.den;
    videoInfo.videoDuration = videoStream->duration * timeBase;
    videoInfo.videoWidth = videoCodecContext->width;
    videoInfo.videoHeight = videoCodecContext->height;
    
    // If the a duration is below 0 seconds, something is very fishy. 
    // Use format duration instead, it's the best guess we have
    if (videoInfo.audioDuration < 0.0)
    {
        videoInfo.audioDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
    if (videoInfo.videoDuration < 0.0)
    {
        videoInfo.videoDuration = ((double)formatContext->duration) / AV_TIME_BASE;
    }
 
    // Store the longer of both durations. This is what determines when looped videos
    // will begin anew
    videoInfo.longerDuration = videoInfo.videoDuration > videoInfo.audioDuration ? 
                                videoInfo.videoDuration : videoInfo.audioDuration;
            
    // Wake up video player
    videoInfo.infoFilled = true;
    playerCondVar->notify_all();
    
    // Initialize packet, set data to NULL, let the demuxer fill it
    AVPacket packet;
    av_init_packet(&packet);
    packet.data = NULL;
    packet.size = 0;
    
    // Initialize SWS context
    SwsContext* swsContext = NULL;
    swsContext = sws_getCachedContext(swsContext,
                                videoInfo.videoWidth, videoInfo.videoHeight, videoCodecContext->pix_fmt, 
                                videoInfo.videoWidth, videoInfo.videoHeight, PIX_FMT_RGBA, 
                                SWS_BICUBIC, NULL, NULL, NULL);
    
    // Create destination picture
    AVFrame* destPic = avcodec_alloc_frame();
    avpicture_alloc((AVPicture*)destPic, PIX_FMT_RGBA, videoInfo.videoWidth, videoInfo.videoHeight);
    
    // Get the correct target channel layout
    uint64_t targetChannelLayout;
    // Keep the source layout
    if (audioCodecContext->channels == videoInfo.audioNumChannels)
    {
        targetChannelLayout = audioCodecContext->channel_layout;
    }
    // Or determine a new one
    else
    {
        switch (videoInfo.audioNumChannels)
        {
            case 1:
                targetChannelLayout = AV_CH_LAYOUT_MONO;
                break;
                
            case 2:
                targetChannelLayout = AV_CH_LAYOUT_STEREO;
                break;
                
            default:
                targetChannelLayout = audioCodecContext->channel_layout;
                break;
        }
    }
    
    // Initialize SWR context
    SwrContext* swrContext = swr_alloc_set_opts(NULL, 
                targetChannelLayout, AV_SAMPLE_FMT_FLT, audioCodecContext->sample_rate,
                audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 
                0, NULL);
    int result = swr_init(swrContext);
    if (result != 0) 
    {
        videoInfo.error = "Could not initialize swr context: " + boost::lexical_cast<std::string>(result);
        playerCondVar->notify_all();
        return;
    }
    
    // Create destination sample buffer
    uint8_t** destBuffer = NULL;
    int destBufferLinesize;
    av_samples_alloc_array_and_samples( &destBuffer,
                                        &destBufferLinesize,
                                        videoInfo.audioNumChannels,
                                        2048,
                                        AV_SAMPLE_FMT_FLT,
                                        0);
    
    // Main decoding loop
    // Read the input file frame by frame
    AVFrame* frame = NULL;
    while (av_read_frame(formatContext, &packet) >= 0) 
    {
        // Only start decoding when at least one of the buffers is not full
        while (videoPlayer->getVideoBufferIsFull() && videoPlayer->getAudioBufferIsFull())
        {
            boost::unique_lock<boost::mutex> lock(*decodeMutex);
            boost::chrono::steady_clock::time_point const timeOut = 
                boost::chrono::steady_clock::now() + boost::chrono::milliseconds((int)videoPlayer->getBufferTarget() * 1000);
            decodeCondVar->wait_until(lock, timeOut);
            
            if (videoInfo.decodingAborted)
            {
                break;
            }
        }
            
        // Break if the decoding was aborted
        if (videoInfo.decodingAborted)
        {
            break;
        }
        
        // Initialize frame
        if (!frame) 
        {
            if (!(frame = avcodec_alloc_frame())) 
            {
                videoInfo.error = "Out of memory.";
                return;
            }
        } 
        else
        {
            avcodec_get_frame_defaults(frame);
        }
        
        // Decode the packet
        AVPacket orig_pkt = packet;
        do 
        {
            int decoded = 0;
            if (packet.stream_index == audioStreamIndex)
            {
                decoded = decodeAudioPacket(packet, audioCodecContext, audioStream, frame, swrContext,
                                            destBuffer, destBufferLinesize, videoPlayer, videoInfo, isLoop);
            }
            else if (packet.stream_index == videoStreamIndex)
            {
                decoded = decodeVideoPacket(packet, videoCodecContext, videoStream, frame, swsContext, 
                                            (AVPicture*)destPic, videoPlayer, videoInfo, isLoop);
            }
            else
            {
                // This means that we have a stream that is neither our video nor audio stream
                // Just skip the package
                break;
            }
            
            // decoded will be negative on an error
            if (decoded < 0)
            {
                // The error itself is set by the decode functions
                playerCondVar->notify_all();
                return;
            }
            
            // Increment data pointer, subtract from size
            packet.data += decoded;
            packet.size -= decoded;
        } while (packet.size > 0);
        
        av_free_packet(&orig_pkt);
    }
    
    // We're done. Close everything
    avcodec_free_frame(&frame);
    avpicture_free((AVPicture*)destPic);
    avcodec_free_frame(&destPic);
    avcodec_close(videoCodecContext);
    avcodec_close(audioCodecContext);
    sws_freeContext(swsContext);
    av_freep(&destBuffer[0]);
    swr_free(&swrContext);
    avformat_close_input(&formatContext);
    
    videoInfo.audioDuration = videoInfo.audioDecodedDuration;
    videoInfo.decodingDone = videoInfo.decodingAborted ? false : true;
}