int FFDecHWAccel::decode(Packet &encodedPacket, QByteArray &decoded, bool flush, unsigned) { int frameFinished = 0; decodeFirstStep(encodedPacket, flush); const int bytes_consumed = avcodec_decode_video2(codec_ctx, frame, &frameFinished, packet); if (frameFinished) { VideoFrame::create(decoded, frame->data, frame->linesize, frame->interlaced_frame, frame->top_field_first); decodeLastStep(encodedPacket, frame); } else encodedPacket.ts.setInvalid(); return bytes_consumed > 0 ? bytes_consumed : 0; }
int FFDecSW::decode( Packet &encodedPacket, QByteArray &decoded, bool flush, unsigned hurry_up ) { int bytes_consumed = 0, frameFinished = 0; AVPacket packet; decodeFirstStep( packet, encodedPacket, flush ); switch ( codec_ctx->codec_type ) { case AVMEDIA_TYPE_AUDIO: bytes_consumed = avcodec_decode_audio4( codec_ctx, frame, &frameFinished, &packet ); if ( frameFinished ) { const int samples_with_channels = frame->nb_samples * codec_ctx->channels; const int decoded_size = samples_with_channels * sizeof( float ); if ( decoded.size() != decoded_size ) decoded.resize( decoded_size ); float *decoded_data = ( float * )decoded.data(); switch ( codec_ctx->sample_fmt ) { case AV_SAMPLE_FMT_U8: { uint8_t *data = ( uint8_t * )*frame->data; for ( int i = 0 ; i < samples_with_channels ; i++ ) decoded_data[ i ] = ( data[ i ] - 0x7F ) / 128.0f; } break; case AV_SAMPLE_FMT_S16: { int16_t *data = ( int16_t * )*frame->data; for ( int i = 0 ; i < samples_with_channels ; i++ ) decoded_data[ i ] = data[ i ] / 32768.0f; } break; case AV_SAMPLE_FMT_S32: { int32_t *data = ( int32_t * )*frame->data; for ( int i = 0 ; i < samples_with_channels ; i++ ) decoded_data[ i ] = data[ i ] / 2147483648.0f; } break; case AV_SAMPLE_FMT_FLT: memcpy( decoded_data, *frame->data, decoded_size ); break; case AV_SAMPLE_FMT_DBL: { double *data = ( double * )*frame->data; for ( int i = 0 ; i < samples_with_channels ; i++ ) decoded_data[ i ] = data[ i ]; } break; /* Thanks Wang Bin for this patch */ case AV_SAMPLE_FMT_U8P: { uint8_t **data = ( uint8_t ** )frame->extended_data; for ( int i = 0 ; i < frame->nb_samples ; ++i ) for ( int ch = 0; ch < codec_ctx->channels; ++ch ) *decoded_data++ = ( data[ ch ][ i ] - 0x7F ) / 128.0f; } break; case AV_SAMPLE_FMT_S16P: { int16_t **data = ( int16_t ** )frame->extended_data; for ( int i = 0; i < frame->nb_samples ; ++i ) for ( int ch = 0 ; ch < codec_ctx->channels ; ++ch ) *decoded_data++ = data[ ch ][ i ] / 32768.0f; } break; case AV_SAMPLE_FMT_S32P: { int32_t **data = ( int32_t ** )frame->extended_data; for ( int i = 0 ; i < frame->nb_samples ; ++i ) for ( int ch = 0 ; ch < codec_ctx->channels ; ++ch ) *decoded_data++ = data[ ch ][ i ] / 2147483648.0f; } break; case AV_SAMPLE_FMT_FLTP: { float **data = ( float ** )frame->extended_data; for ( int i = 0 ; i < frame->nb_samples ; ++i ) for ( int ch = 0 ; ch < codec_ctx->channels ; ++ch ) *decoded_data++ = data[ ch ][ i ]; } break; case AV_SAMPLE_FMT_DBLP: { double **data = ( double ** )frame->extended_data; for ( int i = 0 ; i < frame->nb_samples ; ++i ) for ( int ch = 0 ; ch < codec_ctx->channels ; ++ch ) *decoded_data++ = data[ ch ][ i ]; } break; /**/ default: decoded.clear(); break; } } break; case AVMEDIA_TYPE_VIDEO: { if ( respectHurryUP && hurry_up ) { if ( skipFrames && !forceSkipFrames && hurry_up > 1 ) codec_ctx->skip_frame = AVDISCARD_NONREF; codec_ctx->skip_loop_filter = AVDISCARD_ALL; if ( hurry_up > 1 ) codec_ctx->skip_idct = AVDISCARD_NONREF; codec_ctx->flags2 |= CODEC_FLAG2_FAST; } else { if ( !forceSkipFrames ) codec_ctx->skip_frame = AVDISCARD_DEFAULT; codec_ctx->skip_loop_filter = codec_ctx->skip_idct = AVDISCARD_DEFAULT; codec_ctx->flags2 &= ~CODEC_FLAG2_FAST; } bytes_consumed = avcodec_decode_video2( codec_ctx, frame, &frameFinished, &packet ); if ( forceSkipFrames ) //Nie możemy pomijać na pierwszej klatce, ponieważ wtedy może nie być odczytany przeplot codec_ctx->skip_frame = AVDISCARD_NONREF; if ( frameFinished && ~hurry_up ) { VideoFrame *videoFrame = VideoFrame::create( decoded, streamInfo->W, streamInfo->H, frame->interlaced_frame, frame->top_field_first ); sws_scale( sws_ctx, frame->data, frame->linesize, 0, frame->height, videoFrame->data, videoFrame->linesize ); } } break; default: break; } if ( frameFinished ) { if ( frame->best_effort_timestamp != QMPLAY2_NOPTS_VALUE ) encodedPacket.ts = frame->best_effort_timestamp * time_base; } else encodedPacket.ts.setInvalid(); if ( bytes_consumed < 0 ) bytes_consumed = 0; return bytes_consumed; }