static av_cold int libgsm_decode_init(AVCodecContext *avctx) { LibGSMDecodeContext *s = avctx->priv_data; if (avctx->channels > 1) { av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n", avctx->channels); return -1; } if (!avctx->channels) avctx->channels = 1; if (!avctx->sample_rate) avctx->sample_rate = 8000; avctx->sample_fmt = AV_SAMPLE_FMT_S16; s->state = gsm_create(); switch(avctx->codec_id) { case CODEC_ID_GSM: avctx->frame_size = GSM_FRAME_SIZE; avctx->block_align = GSM_BLOCK_SIZE; break; case CODEC_ID_GSM_MS: { int one = 1; gsm_option(s->state, GSM_OPT_WAV49, &one); avctx->frame_size = 2 * GSM_FRAME_SIZE; avctx->block_align = GSM_MS_BLOCK_SIZE; } } avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; return 0; }
static av_cold int amrwb_decode_init(AVCodecContext *avctx) { AMRWBContext *ctx = avctx->priv_data; int i; if (avctx->channels > 1) { av_log_missing_feature(avctx, "multi-channel AMR", 0); return AVERROR_PATCHWELCOME; } avctx->channels = 1; avctx->channel_layout = AV_CH_LAYOUT_MONO; if (!avctx->sample_rate) avctx->sample_rate = 16000; avctx->sample_fmt = AV_SAMPLE_FMT_FLT; av_lfg_init(&ctx->prng, 1); ctx->excitation = &ctx->excitation_buf[AMRWB_P_DELAY_MAX + LP_ORDER + 1]; ctx->first_frame = 1; for (i = 0; i < LP_ORDER; i++) ctx->isf_past_final[i] = isf_init[i] * (1.0f / (1 << 15)); for (i = 0; i < 4; i++) ctx->prediction_error[i] = MIN_ENERGY; avcodec_get_frame_defaults(&ctx->avframe); avctx->coded_frame = &ctx->avframe; ff_acelp_filter_init(&ctx->acelpf_ctx); ff_acelp_vectors_init(&ctx->acelpv_ctx); ff_celp_filter_init(&ctx->celpf_ctx); ff_celp_math_init(&ctx->celpm_ctx); return 0; }
static av_cold int aac_decode_init(AVCodecContext *avctx) { AACContext *s = avctx->priv_data; AAC_DECODER_ERROR err; int ret = AVERROR(EINVAL); s->handle = aacDecoder_Open(avctx->extradata_size ? TT_MP4_RAW : TT_MP4_ADTS, 1); if (avctx->extradata_size) { if ((err = aacDecoder_ConfigRaw(s->handle, &avctx->extradata, &avctx->extradata_size)) != AAC_DEC_OK) { av_log(avctx, AV_LOG_WARNING, "Unable to set extradata\n"); goto error; } } avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; avctx->sample_fmt = AV_SAMPLE_FMT_S16; return 0; error: return ret; }
static av_cold int bmp_encode_init(AVCodecContext *avctx){ BMPContext *s = avctx->priv_data; avcodec_get_frame_defaults(&s->picture); avctx->coded_frame = &s->picture; switch (avctx->pix_fmt) { case AV_PIX_FMT_BGRA: avctx->bits_per_coded_sample = 32; break; case AV_PIX_FMT_BGR24: avctx->bits_per_coded_sample = 24; break; case AV_PIX_FMT_RGB555: case AV_PIX_FMT_RGB565: case AV_PIX_FMT_RGB444: avctx->bits_per_coded_sample = 16; break; case AV_PIX_FMT_RGB8: case AV_PIX_FMT_BGR8: case AV_PIX_FMT_RGB4_BYTE: case AV_PIX_FMT_BGR4_BYTE: case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_PAL8: avctx->bits_per_coded_sample = 8; break; case AV_PIX_FMT_MONOBLACK: avctx->bits_per_coded_sample = 1; break; default: av_log(avctx, AV_LOG_INFO, "unsupported pixel format\n"); return -1; } return 0; }
static av_cold int amrnb_decode_init(AVCodecContext *avctx) { AMRContext *p = avctx->priv_data; int i; if (avctx->channels > 1) { av_log_missing_feature(avctx, "multi-channel AMR", 0); return AVERROR_PATCHWELCOME; } avctx->channels = 1; avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->sample_rate = 8000; avctx->sample_fmt = AV_SAMPLE_FMT_FLT; // p->excitation always points to the same position in p->excitation_buf p->excitation = &p->excitation_buf[PITCH_DELAY_MAX + LP_FILTER_ORDER + 1]; for (i = 0; i < LP_FILTER_ORDER; i++) { p->prev_lsp_sub4[i] = lsp_sub4_init[i] * 1000 / (float)(1 << 15); p->lsf_avg[i] = p->lsf_q[3][i] = lsp_avg_init[i] / (float)(1 << 15); } for (i = 0; i < 4; i++) p->prediction_error[i] = MIN_ENERGY; avcodec_get_frame_defaults(&p->avframe); avctx->coded_frame = &p->avframe; ff_acelp_filter_init(&p->acelpf_ctx); ff_acelp_vectors_init(&p->acelpv_ctx); ff_celp_filter_init(&p->celpf_ctx); ff_celp_math_init(&p->celpm_ctx); return 0; }
static av_cold int gsm_init(AVCodecContext *avctx) { GSMContext *s = avctx->priv_data; avctx->channels = 1; if (!avctx->sample_rate) avctx->sample_rate = 8000; avctx->sample_fmt = AV_SAMPLE_FMT_S16; switch (avctx->codec_id) { case AV_CODEC_ID_GSM: avctx->frame_size = GSM_FRAME_SIZE; avctx->block_align = GSM_BLOCK_SIZE; break; case AV_CODEC_ID_GSM_MS: avctx->frame_size = 2 * GSM_FRAME_SIZE; avctx->block_align = GSM_MS_BLOCK_SIZE; } avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; return 0; }
static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; s->avctx = avctx; s->imdct_out = s->imdct_buf[0]; s->imdct_prev = s->imdct_buf[1]; av_lfg_init(&s->random_state, 0); ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); ff_dsputil_init(&s->dsp, avctx); if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { s->scale_bias = 1.0/(32768*8); avctx->sample_fmt = AV_SAMPLE_FMT_FLT; } else { s->scale_bias = 1.0/(1*8); avctx->sample_fmt = AV_SAMPLE_FMT_S16; ff_fmt_convert_init(&s->fmt_conv, avctx); s->float_buf = av_mallocz(NELLY_SAMPLES * sizeof(*s->float_buf)); if (!s->float_buf) { av_log(avctx, AV_LOG_ERROR, "error allocating float buffer\n"); return AVERROR(ENOMEM); } } /* Generate overlap window */ if (!ff_sine_128[127]) ff_init_ff_sine_windows(7); avctx->channel_layout = AV_CH_LAYOUT_MONO; avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; return 0; }
static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, guint8 * audio_in, guint in_size, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket pkt; AVFrame frame; gboolean planar; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer "); memset (&pkt, 0, sizeof (pkt)); memset (&frame, 0, sizeof (frame)); avcodec_get_frame_defaults (&frame); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); if (planar && info->channels > 1) { gint channels, nsamples; gint i, j; nsamples = frame.nb_samples = in_size / info->bpf; channels = info->channels; if (info->channels > AV_NUM_DATA_POINTERS) { frame.extended_data = g_new (uint8_t *, info->channels); } else {
static int encode_init(AVCodecContext * avctx){ WMACodecContext *s = avctx->priv_data; int i, flags1, flags2, block_align; uint8_t *extradata; s->avctx = avctx; if(avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "too many channels: got %i, need %i or fewer\n", avctx->channels, MAX_CHANNELS); return AVERROR(EINVAL); } if (avctx->sample_rate > 48000) { av_log(avctx, AV_LOG_ERROR, "sample rate is too high: %d > 48kHz\n", avctx->sample_rate); return AVERROR(EINVAL); } if(avctx->bit_rate < 24*1000) { av_log(avctx, AV_LOG_ERROR, "bitrate too low: got %i, need 24000 or higher\n", avctx->bit_rate); return AVERROR(EINVAL); } /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == AV_CODEC_ID_WMAV1) { extradata= av_malloc(4); avctx->extradata_size= 4; AV_WL16(extradata, flags1); AV_WL16(extradata+2, flags2); } else if (avctx->codec->id == AV_CODEC_ID_WMAV2) { extradata= av_mallocz(10); avctx->extradata_size= 10; AV_WL32(extradata, flags1); AV_WL16(extradata+4, flags2); }else av_assert0(0); avctx->extradata= extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; if (avctx->channels == 2) s->ms_stereo = 1; ff_wma_init(avctx, flags2); /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); block_align = avctx->bit_rate * (int64_t)s->frame_len / (avctx->sample_rate * 8); block_align = FFMIN(block_align, MAX_CODED_SUPERFRAME_SIZE); avctx->block_align = block_align; avctx->frame_size = avctx->delay = s->frame_len; #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame = &s->frame; avcodec_get_frame_defaults(avctx->coded_frame); #endif return 0; }
void Editor::readFrame(bool needKeyFrame) { AVPacket packet; AVFrame frame; int frameFinished; bool gotKeyFramePacket = false; avcodec_get_frame_defaults(&frame); while(av_read_frame(m_stream, &packet) == 0) { if(packet.stream_index != m_videoID) continue; #if PACKET_DEBUG if(needKeyFrame) printf("DTS = %'10lld\n", packet.dts); #endif if(needKeyFrame && !gotKeyFramePacket) { if(packet.flags & AV_PKT_FLAG_KEY) gotKeyFramePacket = true; else continue; } if(avcodec_decode_video2(m_videoCodecCtx, &frame, &frameFinished, &packet) < 0) { error("Could not decode packet"); return; } if(!frameFinished) continue; if(m_videoCodecCtx->pix_fmt != PIX_FMT_YUV420P) { error("Pixel format %d is unsupported.", m_videoCodecCtx->pix_fmt); return; } if(needKeyFrame && !frame.key_frame) { av_free_packet(&packet); continue; } m_frameTimestamps[m_headFrame] = packet.dts; av_picture_copy( (AVPicture*)m_frameBuffer[m_headFrame], (AVPicture*)&frame, PIX_FMT_YUV420P, m_videoCodecCtx->width, m_videoCodecCtx->height ); m_frameBuffer[m_headFrame]->pict_type = frame.pict_type; av_free_packet(&packet); if(!needKeyFrame) return; if(frame.key_frame) { log_debug("key frame seek: got keyframe at %'10lld", pts_val(packet.dts - m_timeStampStart)); return; } } }
void Movie::EncodeAudio(bool last) { AVStream *astream = av->fmt_ctx->streams[av->audio_stream_idx]; AVCodecContext *acodec = astream->codec; av_fifo_generic_write(av->audio_fifo, &audiobuf[0], audiobuf.size(), NULL); // bps: bytes per sample int channels = acodec->channels; int read_bps = 2; int write_bps = av_get_bytes_per_sample(acodec->sample_fmt); int max_read = acodec->frame_size * read_bps * channels; int min_read = last ? read_bps * channels : max_read; while (av_fifo_size(av->audio_fifo) >= min_read) { int read_bytes = MIN(av_fifo_size(av->audio_fifo), max_read); av_fifo_generic_read(av->audio_fifo, av->audio_data, read_bytes, NULL); // convert int read_samples = read_bytes / (read_bps * channels); int write_samples = read_samples; if (read_samples < acodec->frame_size) { // shrink or pad audio frame if (acodec->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) acodec->frame_size = write_samples; else write_samples = acodec->frame_size; } convert_audio(read_samples, acodec->channels, -1, AV_SAMPLE_FMT_S16, av->audio_data, write_samples, acodec->channels, write_samples * write_bps, acodec->sample_fmt, av->audio_data_conv); #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1) avcodec_get_frame_defaults(av->audio_frame); #else av_frame_unref(av->audio_frame); #endif av->audio_frame->nb_samples = write_samples; av->audio_frame->pts = av_rescale_q(av->audio_counter, (AVRational){1, acodec->sample_rate}, acodec->time_base); av->audio_counter += write_samples; int asize = avcodec_fill_audio_frame(av->audio_frame, acodec->channels, acodec->sample_fmt, av->audio_data_conv, write_samples * write_bps * channels, 1); if (asize >= 0) { AVPacket pkt; memset(&pkt, 0, sizeof(AVPacket)); av_init_packet(&pkt); int got_pkt = 0; if (0 == avcodec_encode_audio2(acodec, &pkt, av->audio_frame, &got_pkt) && got_pkt) { if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts) pkt.pts = pkt.dts; if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base); pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base); pkt.stream_index = astream->index; av_interleaved_write_frame(av->fmt_ctx, &pkt); av_free_packet(&pkt); } } } if (last) { bool done = false; while (!done) { AVPacket pkt; memset(&pkt, 0, sizeof(AVPacket)); av_init_packet(&pkt); int got_pkt = 0; if (0 == avcodec_encode_audio2(acodec, &pkt, NULL, &got_pkt) && got_pkt) { if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts) pkt.pts = pkt.dts; if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base); pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base); pkt.stream_index = astream->index; av_interleaved_write_frame(av->fmt_ctx, &pkt); av_free_packet(&pkt); } else { done = true; } } } }
// Protected void AVFile::decode() { AVFrame frame; int got_frame; AVPacket packet; int packet_size; uint8_t *packet_data; av_init_packet(&packet); uint8_t * shadow = reinterpret_cast<uint8_t*>(av_malloc(192000 * 4)); decoding = true; while (av_read_frame(formatCtx, &packet) == 0) { if (packet.stream_index == audioStream) { // make shure that we will be able to free it later packet_size = packet.size; packet_data = packet.data; // decode frames till packet contains data while (packet.size > 0) { avcodec_get_frame_defaults(&frame); int len = avcodec_decode_audio4(codecCtx, &frame, &got_frame, &packet); if (len < 0) { break; // probably corrupted packet } packet.data += len; packet.size -= len; if (got_frame) { got_frame = 0; if (swrCtx) { uint8_t *shadow_array[] = { shadow }; const uint8_t **input_array = (const uint8_t **)frame.extended_data; // todo: check original code^ some nasty shit inside int ret = swr_convert(swrCtx, shadow_array, 192000, input_array, frame.nb_samples); if (ret > 0) { _output->push(reinterpret_cast<float *>(shadow), ret * _channels); } } else { _output->push(reinterpret_cast<float *>(frame.data[0]), frame.nb_samples * _channels); } // update position if (frame.pts != AV_NOPTS_VALUE) { _position = frame.pts; } else if (packet.pts != AV_NOPTS_VALUE) { _position = packet.pts; } else { _position = 0; } } // hurry up, no time to decode one more frame if (!decoding) { break; } } // restore original size and pointer packet.size = packet_size; packet.data = packet_data; } // free packet data, reuse structure av_free_packet(&packet); // complete decoding thread shutdown if (!decoding) { break; } if (_seek_to > -1) { int flags = AVSEEK_FLAG_ANY; if (_seek_to < _position) flags = flags | AVSEEK_FLAG_BACKWARD; av_seek_frame(formatCtx, audioStream, _seek_to, flags); _seek_to = -1; } } av_free(shadow); }
uint8_t *getAVAudioData(StreamPtr stream, size_t *length) { int got_frame; int len; if(length) *length = 0; if(!stream || stream->CodecCtx->codec_type != AVMEDIA_TYPE_AUDIO) return NULL; next_packet: if(!stream->Packets && !getNextPacket(stream->parent, stream->StreamIdx)) return NULL; /* Decode some data, and check for errors */ avcodec_get_frame_defaults(stream->Frame); while((len=avcodec_decode_audio4(stream->CodecCtx, stream->Frame, &got_frame, &stream->Packets->pkt)) < 0) { struct PacketList *self; /* Error? Drop it and try the next, I guess... */ self = stream->Packets; stream->Packets = self->next; av_free_packet(&self->pkt); av_free(self); if(!stream->Packets) goto next_packet; } if(len < stream->Packets->pkt.size) { /* Move the unread data to the front and clear the end bits */ int remaining = stream->Packets->pkt.size - len; memmove(stream->Packets->pkt.data, &stream->Packets->pkt.data[len], remaining); memset(&stream->Packets->pkt.data[remaining], 0, stream->Packets->pkt.size - remaining); stream->Packets->pkt.size -= len; } else { struct PacketList *self; self = stream->Packets; stream->Packets = self->next; av_free_packet(&self->pkt); av_free(self); } if(!got_frame || stream->Frame->nb_samples == 0) goto next_packet; /* Set the output buffer size */ *length = av_samples_get_buffer_size(NULL, stream->CodecCtx->channels, stream->Frame->nb_samples, stream->CodecCtx->sample_fmt, 1); return stream->Frame->data[0]; }
int main(int argc, char **argv) { //curses int color_pair; if(initscr() == NULL){ fprintf(stderr, "init failure\n"); exit(EXIT_FAILURE); } /* start_colorは色属性を使用するときは最初に必ず実行する. initscrの直後に実行するのがよい習慣らしい. */ if(has_colors() == FALSE || start_color() == ERR){ endwin(); fprintf(stderr, "This term seems not to having Color\n"); exit(EXIT_FAILURE); } if(signal(SIGINT, sig_handler) == SIG_ERR || signal(SIGQUIT, sig_handler) == SIG_ERR){ fprintf(stderr, "signal failure\n"); exit(EXIT_FAILURE); } curs_set(0); /* 色のペアを作る */ color_pair = 1; for(color_pair = 1; color_pair < 256; color_pair++){ init_pair(color_pair, color_pair, color_pair); } refresh(); char filter_descr[10000]; int w, h; w = 80;//COLS; h = 25;//LINES; bak = malloc(sizeof (int) * LINES*COLS+1); sprintf(filter_descr, "scale=%d:%d", w, h); int ret; AVPacket packet; AVFrame frame; int got_frame; if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(&frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame.pts = av_frame_get_best_effort_timestamp(&frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (repeat_flag) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { display_picref(picref, buffersink_ctx->inputs[0]->time_base); avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } end: endwin(); avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
static av_cold int mpc8_decode_init(AVCodecContext * avctx) { int i; MPCContext *c = avctx->priv_data; GetBitContext gb; static int vlc_initialized = 0; int channels; static VLC_TYPE band_table[542][2]; static VLC_TYPE q1_table[520][2]; static VLC_TYPE q9up_table[524][2]; static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2]; static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2]; static VLC_TYPE dscf0_table[560][2]; static VLC_TYPE dscf1_table[598][2]; static VLC_TYPE q3_0_table[512][2]; static VLC_TYPE q3_1_table[516][2]; static VLC_TYPE codes_table[5708][2]; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size); return -1; } memset(c->oldDSCF, 0, sizeof(c->oldDSCF)); av_lfg_init(&c->rnd, 0xDEADBEEF); ff_dsputil_init(&c->dsp, avctx); ff_mpadsp_init(&c->mpadsp); ff_mpc_init(); init_get_bits(&gb, avctx->extradata, 16); skip_bits(&gb, 3);//sample rate c->maxbands = get_bits(&gb, 5) + 1; if (c->maxbands >= BANDS) { av_log(avctx,AV_LOG_ERROR, "maxbands %d too high\n", c->maxbands); return AVERROR_INVALIDDATA; } channels = get_bits(&gb, 4) + 1; if (channels > 2) { av_log_missing_feature(avctx, "Multichannel MPC SV8", 1); return -1; } c->MSS = get_bits1(&gb); c->frames = 1 << (get_bits(&gb, 3) * 2); avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channels = channels; avcodec_get_frame_defaults(&c->frame); avctx->coded_frame = &c->frame; if(vlc_initialized) return 0; av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); band_vlc.table = band_table; band_vlc.table_allocated = 542; init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE, mpc8_bands_bits, 1, 1, mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q1_vlc.table = q1_table; q1_vlc.table_allocated = 520; init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE, mpc8_q1_bits, 1, 1, mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q9up_vlc.table = q9up_table; q9up_vlc.table_allocated = 524; init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE, mpc8_q9up_bits, 1, 1, mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[0].table = scfi0_table; scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS; init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE, mpc8_scfi0_bits, 1, 1, mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[1].table = scfi1_table; scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS; init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE, mpc8_scfi1_bits, 1, 1, mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[0].table = dscf0_table; dscf_vlc[0].table_allocated = 560; init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE, mpc8_dscf0_bits, 1, 1, mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[1].table = dscf1_table; dscf_vlc[1].table_allocated = 598; init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE, mpc8_dscf1_bits, 1, 1, mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[0].table = q3_0_table; q3_vlc[0].table_allocated = 512; ff_init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE, mpc8_q3_bits, 1, 1, mpc8_q3_codes, 1, 1, mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[1].table = q3_1_table; q3_vlc[1].table_allocated = 516; ff_init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE, mpc8_q4_bits, 1, 1, mpc8_q4_codes, 1, 1, mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); for(i = 0; i < 2; i++){ res_vlc[i].table = &codes_table[vlc_offsets[0+i]]; res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i]; init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE, &mpc8_res_bits[i], 1, 1, &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); q2_vlc[i].table = &codes_table[vlc_offsets[2+i]]; q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i]; init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE, &mpc8_q2_bits[i], 1, 1, &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]]; quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i]; init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE, &mpc8_q5_bits[i], 1, 1, &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]]; quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i]; init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE, &mpc8_q6_bits[i], 1, 1, &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]]; quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i]; init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE, &mpc8_q7_bits[i], 1, 1, &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]]; quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i]; init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE, &mpc8_q8_bits[i], 1, 1, &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlc_initialized = 1; return 0; }
static av_cold int libopus_decode_init(AVCodecContext *avc) { struct libopus_context *opus = avc->priv_data; int ret, channel_map = 0, gain_db = 0, nb_streams, nb_coupled; uint8_t mapping_arr[8] = { 0, 1 }, *mapping; avc->sample_rate = 48000; avc->sample_fmt = avc->request_sample_fmt == AV_SAMPLE_FMT_FLT ? AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_S16; avc->channel_layout = avc->channels > 8 ? 0 : ff_vorbis_channel_layouts[avc->channels - 1]; if (avc->extradata_size >= OPUS_HEAD_SIZE) { gain_db = sign_extend(AV_RL16(avc->extradata + 16), 16); channel_map = AV_RL8 (avc->extradata + 18); } if (avc->extradata_size >= OPUS_HEAD_SIZE + 2 + avc->channels) { nb_streams = avc->extradata[OPUS_HEAD_SIZE + 0]; nb_coupled = avc->extradata[OPUS_HEAD_SIZE + 1]; if (nb_streams + nb_coupled != avc->channels) av_log(avc, AV_LOG_WARNING, "Inconsistent channel mapping.\n"); mapping = avc->extradata + OPUS_HEAD_SIZE + 2; } else { if (avc->channels > 2 || channel_map) { av_log(avc, AV_LOG_ERROR, "No channel mapping for %d channels.\n", avc->channels); return AVERROR(EINVAL); } nb_streams = 1; nb_coupled = avc->channels > 1; mapping = mapping_arr; } if (avc->channels > 2 && avc->channels <= 8) { const uint8_t *vorbis_offset = ff_vorbis_channel_layout_offsets[avc->channels - 1]; int ch; /* Remap channels from vorbis order to libav order */ for (ch = 0; ch < avc->channels; ch++) mapping_arr[ch] = mapping[vorbis_offset[ch]]; mapping = mapping_arr; } opus->dec = opus_multistream_decoder_create(avc->sample_rate, avc->channels, nb_streams, nb_coupled, mapping, &ret); if (!opus->dec) { av_log(avc, AV_LOG_ERROR, "Unable to create decoder: %s\n", opus_strerror(ret)); return ff_opus_error_to_averror(ret); } ret = opus_multistream_decoder_ctl(opus->dec, OPUS_SET_GAIN(gain_db)); if (ret != OPUS_OK) av_log(avc, AV_LOG_WARNING, "Failed to set gain: %s\n", opus_strerror(ret)); avc->delay = 3840; /* Decoder delay (in samples) at 48kHz */ avcodec_get_frame_defaults(&opus->frame); avc->coded_frame = &opus->frame; return 0; }
int AUD_FFMPEGReader::decode(AVPacket& packet, AUD_Buffer& buffer) { #ifdef FFMPEG_HAVE_DECODE_AUDIO4 AVFrame* frame = NULL; int got_frame; int read_length; uint8_t* orig_data = packet.data; int orig_size = packet.size; int buf_size = buffer.getSize(); int buf_pos = 0; while(packet.size > 0) { got_frame = 0; if(!frame) frame = avcodec_alloc_frame(); else avcodec_get_frame_defaults(frame); read_length = avcodec_decode_audio4(m_codecCtx, frame, &got_frame, &packet); if(read_length < 0) break; if(got_frame) { int data_size = av_samples_get_buffer_size(NULL, m_codecCtx->channels, frame->nb_samples, m_codecCtx->sample_fmt, 1); if(buf_size - buf_pos < data_size) { buffer.resize(buf_size + data_size, true); buf_size += data_size; } if(m_tointerleave) { int single_size = data_size / m_codecCtx->channels / frame->nb_samples; for(int channel = 0; channel < m_codecCtx->channels; channel++) { for(int i = 0; i < frame->nb_samples; i++) { memcpy(((data_t*)buffer.getBuffer()) + buf_pos + ((m_codecCtx->channels * i) + channel) * single_size, frame->data[channel] + i * single_size, single_size); } } } else memcpy(((data_t*)buffer.getBuffer()) + buf_pos, frame->data[0], data_size); buf_pos += data_size; } packet.size -= read_length; packet.data += read_length; } packet.data = orig_data; packet.size = orig_size; av_free(frame); return buf_pos; #else // save packet parameters uint8_t *audio_pkg_data = packet.data; int audio_pkg_size = packet.size; int buf_size = buffer.getSize(); int buf_pos = 0; int read_length, data_size; AVPacket tmp_pkt; av_init_packet(&tmp_pkt); // as long as there is still data in the package while(audio_pkg_size > 0) { // resize buffer if needed if(buf_size - buf_pos < AVCODEC_MAX_AUDIO_FRAME_SIZE) { buffer.resize(buf_size + AVCODEC_MAX_AUDIO_FRAME_SIZE, true); buf_size += AVCODEC_MAX_AUDIO_FRAME_SIZE; } // read samples from the packet data_size = buf_size - buf_pos; tmp_pkt.data = audio_pkg_data; tmp_pkt.size = audio_pkg_size; read_length = avcodec_decode_audio3( m_codecCtx, (int16_t*)(((data_t*)buffer.getBuffer()) + buf_pos), &data_size, &tmp_pkt); // read error, next packet! if(read_length < 0) break; buf_pos += data_size; // move packet parameters audio_pkg_data += read_length; audio_pkg_size -= read_length; } return buf_pos; #endif }
int audio_thr(LPVOID lpParam) { int iRet = -1; //音频测试,播放文件显示波形 AVFormatContext * pFmtCtx = NULL; AVFormatContext * pFOutmtCtx = NULL; AVInputFormat * pAudioInputFmt = NULL; AVOutputFormat * pAudioOutputFmt = NULL; AVCodecContext * pOutputCodecCtx = NULL; AVPacket * pAudioPacket = NULL; int iAudioIndex = -1; int data_size = 0; int resampled_data_size = 0; uint8_t * out_buffer = 0; int64_t dec_channel_layout = 0; double pts; CLS_DlgStreamPusher* pThis = (CLS_DlgStreamPusher*)lpParam; if (pThis == NULL || pThis->m_pStreamInfo == NULL){ TRACE("audio_thr--pThis == NULL || pThis->m_pStreamInfo == NULL\n"); return iRet; } struct_stream_info* strct_stream_info = pThis->m_pStreamInfo; pAudioInputFmt = av_find_input_format("dshow"); if (pAudioInputFmt == NULL){ TRACE("pAudioInputFmt == NULL\n"); return iRet; } char* psDevName = pThis->GetDeviceName(n_Audio); if (psDevName == NULL){ TRACE("audio_thr--psDevName == NULL"); return iRet; } if (avformat_open_input(&pFmtCtx, psDevName, pAudioInputFmt, NULL) != 0){ TRACE("avformat_open_input err!\n"); goto END; } if (avformat_find_stream_info(pFmtCtx, NULL) < 0){ TRACE("avformat_find_stream_info(pFmtCtx, NULL) < 0\n"); goto END; } for (int i = 0; i < pFmtCtx->nb_streams; i++){ if (pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){ iAudioIndex = i; AVCodec *tmpCodec = avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id); if (0 > avcodec_open2(pFmtCtx->streams[i]->codec, tmpCodec, NULL)){ TRACE("can not find or open decoder!\n"); } break; } } //找到音频流信息 strct_stream_info->m_pAudioStream = pFmtCtx->streams[iAudioIndex]; if (strct_stream_info->m_pAudioStream == NULL){ TRACE("strct_stream_info->m_pAudioStream == NULL\n"); goto END; } AVCodecContext *pAudioDec = strct_stream_info->m_pAudioStream->codec; if (NULL == pAudioDec){ TRACE("NULL == pAudioDec\n"); goto END; } AVCodec* audio_encoder = avcodec_find_encoder(AV_CODEC_ID_AAC); if (audio_encoder == NULL){ TRACE("audio_encoder == NULL\r\n"); goto END; } pOutputCodecCtx = avcodec_alloc_context3(audio_encoder); if (pOutputCodecCtx == NULL){ TRACE("pOutputCodecCtx == NULL"); goto END; } pOutputCodecCtx->sample_rate = pFmtCtx->streams[0]->codec->sample_rate; pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO; pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout); pOutputCodecCtx->sample_fmt = audio_encoder->sample_fmts[0]; pOutputCodecCtx->codec = audio_encoder; pOutputCodecCtx->codec_tag = 0; if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0){ //编码器打开失败,退出程序 TRACE("音频编码器打开失败!\n"); goto END; } //SDL_AudioSpec int out_nb_samples = AUDIO_BUF_SIZE; AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; int out_buffer_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, out_nb_samples, out_sample_fmt, 1); SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = pOutputCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = pOutputCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = out_nb_samples; wanted_spec.callback = fill_audio;//&CLS_DlgStreamPusher:: wanted_spec.userdata = strct_stream_info; strct_stream_info->m_content_out_channels = pOutputCodecCtx->channels; if (SDL_OpenAudio(&wanted_spec, &spec)<0){ TRACE("can't open audio.\n"); goto END; } int audio_hw_buf_size = spec.size; if (audio_hw_buf_size < 0){ TRACE("audio_hw_buf_size < 0\n"); return -1; } strct_stream_info->m_audio_src.fmt = AV_SAMPLE_FMT_S16; strct_stream_info->m_audio_src.freq = spec.freq; strct_stream_info->m_audio_src.channel_layout = pOutputCodecCtx->channel_layout; strct_stream_info->m_audio_src.channels = spec.channels; strct_stream_info->m_audio_hw_buf_size = audio_hw_buf_size; strct_stream_info->m_audio_tgt = strct_stream_info->m_audio_src; AVPacket pkt; out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2); strct_stream_info->m_audio_refresh_tid = SDL_CreateThread(audio_refresh_thread, NULL, strct_stream_info); while (av_read_frame(pFmtCtx, &pkt) == 0 && _kbhit() == 0){ if (!pThis->m_blAudioShow){ break; } if (pkt.stream_index != iAudioIndex){ continue; } if (!strct_stream_info->m_pAudioFrame) { if (!(strct_stream_info->m_pAudioFrame = avcodec_alloc_frame())){ TRACE("!(strct_stream_info->m_pAudioFrame = avcodec_alloc_frame())\n"); goto END; } } else{ avcodec_get_frame_defaults(strct_stream_info->m_pAudioFrame); } int gotframe = -1; strct_stream_info->m_pAudioFrame = av_frame_alloc(); if (avcodec_decode_audio4(pAudioDec, strct_stream_info->m_pAudioFrame, &gotframe, &pkt) < 0){ av_frame_free(&strct_stream_info->m_pAudioFrame); TRACE("can not decoder a frame\n"); break; } av_free_packet(&pkt); if (!gotframe){ //没有获取到数据,继续下一次 continue; } strct_stream_info->m_pAudioFrame->nb_samples = 1024;//这里暂时写死值 data_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, strct_stream_info->m_pAudioFrame->nb_samples, pOutputCodecCtx->sample_fmt, 1); dec_channel_layout = (pOutputCodecCtx->channel_layout && pOutputCodecCtx->channels == av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout)) ? pOutputCodecCtx->channel_layout : av_get_default_channel_layout(pOutputCodecCtx->channels); //wanted_nb_samples = SynAudio(strct_stream_info, strct_stream_info->m_pAudioFrame->nb_samples); /*if (pOutputCodecCtx->sample_fmt != strct_stream_info->m_audio_src.fmt || dec_channel_layout != strct_stream_info->m_audio_src.channel_layout || pOutputCodecCtx->sample_rate != strct_stream_info->m_audio_src.freq){*/ swr_free(&strct_stream_info->m_audio_swr_ctx); strct_stream_info->m_audio_swr_ctx = swr_alloc_set_opts(NULL, strct_stream_info->m_audio_tgt.channel_layout, strct_stream_info->m_audio_tgt.fmt, strct_stream_info->m_audio_tgt.freq, dec_channel_layout, pOutputCodecCtx->sample_fmt, pOutputCodecCtx->sample_rate, 0, NULL); if (!strct_stream_info->m_audio_swr_ctx || swr_init(strct_stream_info->m_audio_swr_ctx) < 0){ TRACE("!pThis->m_pStreamInfstrct_stream_infoo->m_audio_swr_ctx || swr_init(strct_stream_info->m_audio_swr_ctx) < 0"); break; } strct_stream_info->m_audio_src.channel_layout = dec_channel_layout; strct_stream_info->m_audio_src.channels = pOutputCodecCtx->channels; strct_stream_info->m_audio_src.freq = pOutputCodecCtx->sample_rate; strct_stream_info->m_audio_src.fmt = pOutputCodecCtx->sample_fmt; //} if (NULL != strct_stream_info->m_audio_swr_ctx){ const uint8_t **in = (const uint8_t **)strct_stream_info->m_pAudioFrame->extended_data; uint8_t *out[] = { strct_stream_info->m_audio_buf2 }; int out_count = sizeof(strct_stream_info->m_audio_buf2) / strct_stream_info->m_audio_tgt.channels / av_get_bytes_per_sample(strct_stream_info->m_audio_tgt.fmt); int iRet = swr_convert(strct_stream_info->m_audio_swr_ctx, out, out_count, in, strct_stream_info->m_pAudioFrame->nb_samples); if (iRet < 0){ TRACE("swr_convert < 0\n"); break; } if (iRet == out_count) { TRACE("warning: audio buffer is probably too small\n"); swr_init(strct_stream_info->m_audio_swr_ctx); } strct_stream_info->m_audio_buf = strct_stream_info->m_audio_buf2; resampled_data_size = iRet * strct_stream_info->m_audio_tgt.channels * av_get_bytes_per_sample(strct_stream_info->m_audio_tgt.fmt); } else{ strct_stream_info->m_audio_buf = strct_stream_info->m_pAudioFrame->data[0]; resampled_data_size = data_size; } /* if no pts, then compute it */ pts = strct_stream_info->m_audio_clock; //*pts_ptr = pts; strct_stream_info->m_audio_clock += (double)data_size / (pAudioDec->channels * pAudioDec->sample_rate * av_get_bytes_per_sample(pAudioDec->sample_fmt)); #ifdef DEBUG { static double last_clock; /*printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n", is->audio_clock - last_clock, is->audio_clock, pts);*/ last_clock = strct_stream_info->m_audio_clock; } #endif //FIX:FLAC,MP3,AAC Different number of samples /*if (wanted_spec.samples != strct_stream_info->m_pAudioFrame->nb_samples){ SDL_CloseAudio(); out_nb_samples = strct_stream_info->m_pAudioFrame->nb_samples; out_buffer_size = av_samples_get_buffer_size(NULL, pOutputCodecCtx->channels, out_nb_samples, out_sample_fmt, 1); wanted_spec.samples = out_nb_samples; SDL_OpenAudio(&wanted_spec, NULL); }*/ //设置PCM数据 TRACE("----out_buffer_size---is [%ld]\n",out_buffer_size); audio_chunk = (Uint8 *)out_buffer; audio_len = out_buffer_size; audio_pos = audio_chunk; strct_stream_info->m_aduio_pkt_size = resampled_data_size;//audio_len;// av_free_packet(&pkt); //写PCM进行test if (1){ FILE *p = NULL; fopen_s(&p, "test.pcm", "a+b"); if (p == NULL){ continue; } int tempLenght = 2 * strct_stream_info->m_pAudioFrame->nb_samples;//由于实验中知道这是16位深,所以才这么写 uint8_t *tmpPtr = strct_stream_info->m_pAudioFrame->data[0]; if (NULL != p) { while (tempLenght > 0) { size_t temp = fwrite(tmpPtr, 1, tempLenght, p); tmpPtr += temp; tempLenght = tempLenght - temp; } fclose(p); } } SDL_PauseAudio(0); //while (audio_len > 0){ // //Wait until finish // SDL_Delay(1); //} //if (pFmtCtx->streams[iAudioIndex]->codec->sample_fmt != pOutputCodecCtx->sample_fmt // || pFmtCtx->streams[iAudioIndex]->codec->channels != pOutputCodecCtx->channels // || pFmtCtx->streams[iAudioIndex]->codec->sample_rate != pOutputCodecCtx->sample_rate){ // //TODO如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做 //} //av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame->nb_samples); //av_audio_fifo_write(fifo, (void **)frame->data, frame->nb_samples); ////循环读取数据,直到buf里数据采样数不够 //while (av_audio_fifo_size(fifo) >= (pOutputCodecCtx->frame_size > 0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE)) //{ // av_frame_free(&frame); // frame = av_frame_alloc(); // frame->nb_samples = pOutputCodecCtx->frame_size>0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE; // frame->channel_layout = pOutputCodecCtx->channel_layout; // frame->format = pOutputCodecCtx->sample_fmt; // frame->sample_rate = pOutputCodecCtx->sample_rate; // av_frame_get_buffer(frame, 0); // av_audio_fifo_read(fifo, (void **)frame->data, (pOutputCodecCtx->frame_size > 0 ? pOutputCodecCtx->frame_size : AUDIO_BUF_SIZE)); // av_init_packet(&pkt_out); // //frame->pts = pFrame->pts; // int got_picture = -1; // pkt_out.data = NULL; // pkt_out.size = 0; // if (avcodec_encode_audio2(pOutputCodecCtx, &pkt_out, frame, &got_picture) < 0){ // printf("can not decoder a frame"); // } // av_frame_free(&frame); // if (got_picture) // { // pkt_out.pts = frameIndex * pOutputCodecCtx->frame_size; // pkt_out.dts = frameIndex * pOutputCodecCtx->frame_size; // pkt_out.duration = pOutputCodecCtx->frame_size; // //TODO将编码结果后续做合成处理[pkt_out] // if (pFile != NULL){ // /*fwrite((uint8_t *)pDlg->m_streamstate->audio_buf + pDlg->m_streamstate->audio_buf_index, 1, len1, pFile);*/ // } // frameIndex++; // } //} } iRet = 1; END: //swr_free(&au_convert_ctx); SDL_CloseAudio(); SDL_Quit(); av_free(out_buffer); avcodec_close(pOutputCodecCtx); return iRet; }
/* * Audio encoding example */ static void audio_encode_example(const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; AVFrame *frame; AVPacket pkt; int i, j, k, ret, got_output; int buffer_size; FILE *f; uint16_t *samples; float t, tincr; printf("Encode audio file %s\n", filename); /* find the MP2 encoder */ codec = avcodec_find_encoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* put sample parameters */ c->bit_rate = 64000; /* check that the encoder supports s16 pcm input */ c->sample_fmt = AV_SAMPLE_FMT_S16; if (!check_sample_fmt(codec, c->sample_fmt)) { fprintf(stderr, "Encoder does not support sample format %s", av_get_sample_fmt_name(c->sample_fmt)); exit(1); } /* select other audio parameters supported by the encoder */ c->sample_rate = select_sample_rate(codec); c->channel_layout = select_channel_layout(codec); c->channels = av_get_channel_layout_nb_channels(c->channel_layout); printf("Channels:%d %d %d\n",c->channels,c->sample_rate,c->channel_layout); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } /* frame containing input raw audio */ frame = avcodec_alloc_frame(); if (!frame) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } frame->nb_samples = c->frame_size; printf("FrameSize:%d\n",c->frame_size); frame->format = c->sample_fmt; frame->channel_layout = c->channel_layout; /* the codec gives us the frame size, in samples, * we calculate the size of the samples buffer in bytes */ buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size, c->sample_fmt, 0); samples = (uint16_t *)av_malloc(buffer_size); if (!samples) { fprintf(stderr, "Could not allocate %d bytes for samples buffer\n", buffer_size); exit(1); } /* setup the data pointers in the AVFrame */ ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (const uint8_t*)samples, buffer_size, 0); if (ret < 0) { fprintf(stderr, "Could not setup audio frame\n"); exit(1); } int t1=0; /* encode a single tone sound */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; for(i=0;i<200;i++) { av_init_packet(&pkt); pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; for (j = 0; j < c->frame_size; j++) { samples[2*j] = (int)(sin(t) * 10000); for (k = 1; k < c->channels; k++) samples[2*j + k] = samples[2*j]; t += tincr; } /* encode the samples */ if(t1==0) { FILE *f=fopen("C:/o11.dump","w"); fwrite(samples,buffer_size,1,f); fclose(f); } ret = avcodec_encode_audio2(c, &pkt, frame, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding audio frame\n"); exit(1); } if (got_output) { if(t1==1000000) { AVFrame *dframe=avcodec_alloc_frame(); avcodec_get_frame_defaults(dframe); AVCodec *decoder=avcodec_find_decoder(AV_CODEC_ID_MP2); AVCodecContext *decodecontext=avcodec_alloc_context3(decoder); avcodec_open2(decodecontext, decoder,NULL); int pic; int ret=avcodec_decode_audio4(decodecontext,dframe,&pic,&pkt); if(ret<0) { printf("Seem fuck\n"); } printf("LINE SIZE:%d<->%d\n",dframe->linesize[0],dframe->nb_samples); FILE *f=fopen("c:/o12.dump","w"); fwrite(dframe->data[0],dframe->linesize[0],1,f); fclose(f); } fwrite(pkt.data, 1, pkt.size, f); av_free_packet(&pkt); } t1++; } /* get the delayed frames */ fclose(f); av_freep(&samples); avcodec_free_frame(&frame); avcodec_close(c); av_free(c); printf("OK"); }
status_t AVCodecDecoder::_DecodeAudio(void* _buffer, int64* outFrameCount, media_header* mediaHeader, media_decode_info* info) { TRACE_AUDIO("AVCodecDecoder::_DecodeAudio(audio start_time %.6fs)\n", mediaHeader->start_time / 1000000.0); *outFrameCount = 0; uint8* buffer = reinterpret_cast<uint8*>(_buffer); while (*outFrameCount < fOutputFrameCount) { // Check conditions which would hint at broken code below. if (fOutputBufferSize < 0) { fprintf(stderr, "Decoding read past the end of the output buffer! " "%ld\n", fOutputBufferSize); fOutputBufferSize = 0; } if (fChunkBufferSize < 0) { fprintf(stderr, "Decoding read past the end of the chunk buffer! " "%ld\n", fChunkBufferSize); fChunkBufferSize = 0; } if (fOutputBufferSize > 0) { // We still have decoded audio frames from the last // invokation, which start at fOutputBufferOffset // and are of fOutputBufferSize. Copy those into the buffer, // but not more than it can hold. int32 frames = min_c(fOutputFrameCount - *outFrameCount, fOutputBufferSize / fOutputFrameSize); if (frames == 0) debugger("fOutputBufferSize not multiple of frame size!"); size_t remainingSize = frames * fOutputFrameSize; memcpy(buffer, fOutputFrame->data[0] + fOutputBufferOffset, remainingSize); fOutputBufferOffset += remainingSize; fOutputBufferSize -= remainingSize; buffer += remainingSize; *outFrameCount += frames; fStartTime += (bigtime_t)((1000000LL * frames) / fOutputFrameRate); continue; } if (fChunkBufferSize == 0) { // Time to read the next chunk buffer. We use a separate // media_header, since the chunk header may not belong to // the start of the decoded audio frames we return. For // example we may have used frames from a previous invokation, // or we may have to read several chunks until we fill up the // output buffer. media_header chunkMediaHeader; status_t err = GetNextChunk(&fChunkBuffer, &fChunkBufferSize, &chunkMediaHeader); if (err == B_LAST_BUFFER_ERROR) { TRACE_AUDIO(" Last Chunk with chunk size %ld\n", fChunkBufferSize); fChunkBufferSize = 0; return err; } if (err != B_OK || fChunkBufferSize < 0) { printf("GetNextChunk error %ld\n",fChunkBufferSize); fChunkBufferSize = 0; break; } fChunkBufferOffset = 0; fStartTime = chunkMediaHeader.start_time; } fTempPacket.data = (uint8_t*)fChunkBuffer + fChunkBufferOffset; fTempPacket.size = fChunkBufferSize; avcodec_get_frame_defaults(fOutputFrame); int gotFrame = 0; int usedBytes = avcodec_decode_audio4(fContext, fOutputFrame, &gotFrame, &fTempPacket); if (usedBytes < 0 && !fAudioDecodeError) { // Report failure if not done already printf("########### audio decode error, " "fChunkBufferSize %ld, fChunkBufferOffset %ld\n", fChunkBufferSize, fChunkBufferOffset); fAudioDecodeError = true; } if (usedBytes <= 0) { // Error or failure to produce decompressed output. // Skip the chunk buffer data entirely. usedBytes = fChunkBufferSize; fOutputBufferSize = 0; // Assume the audio decoded until now is broken. memset(_buffer, 0, buffer - (uint8*)_buffer); } else { // Success fAudioDecodeError = false; if (gotFrame == 1) { fOutputBufferSize = av_samples_get_buffer_size(NULL, fContext->channels, fOutputFrame->nb_samples, fContext->sample_fmt, 1); if (fOutputBufferSize < 0) fOutputBufferSize = 0; } else fOutputBufferSize = 0; } //printf(" chunk size: %d, decoded: %d, used: %d\n", //fTempPacket.size, decodedBytes, usedBytes); fChunkBufferOffset += usedBytes; fChunkBufferSize -= usedBytes; fOutputBufferOffset = 0; } fFrame += *outFrameCount; TRACE_AUDIO(" frame count: %lld current: %lld\n", *outFrameCount, fFrame); return B_OK; }
int FileFFMPEG::read_frame(VFrame *frame) { int error = 0; const int debug = 0; ffmpeg_lock->lock("FileFFMPEG::read_frame"); FileFFMPEGStream *stream = video_streams.get(0); if(debug) printf("FileFFMPEG::read_frame %d stream=%p stream->ffmpeg_file_contex=%p\n", __LINE__, stream, stream->ffmpeg_file_context); AVStream *ffmpeg_stream = ((AVFormatContext*)stream->ffmpeg_file_context)->streams[stream->index]; AVCodecContext *decoder_context = ffmpeg_stream->codec; if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); // if(file->current_frame == 100) // { // printf("FileFFMPEG::read_frame %d fake crash\n", __LINE__); // exit(1); // } //dump_context(stream->codec); if(stream->first_frame) { stream->first_frame = 0; int got_it = 0; while(!got_it && !error) { AVPacket packet; if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, &packet); if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); if(!error && packet.size > 0) { if(packet.stream_index == stream->index) { if(!ffmpeg_frame) ffmpeg_frame = avcodec_alloc_frame(); int got_picture = 0; if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame); if(debug) printf("FileFFMPEG::read_frame %d decoder_context=%p ffmpeg_frame=%p\n", __LINE__, decoder_context, ffmpeg_frame); int result = avcodec_decode_video( decoder_context, (AVFrame*)ffmpeg_frame, &got_picture, packet.data, packet.size); if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1; if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); } } av_free_packet(&packet); } error = 0; } if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); #define SEEK_THRESHOLD 16 // printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", // __LINE__, // current_frame, // file->current_frame); if(stream->current_frame != file->current_frame && (file->current_frame < stream->current_frame || file->current_frame > stream->current_frame + SEEK_THRESHOLD)) { if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", __LINE__, (long long)stream->current_frame, (long long)file->current_frame); int64_t timestamp = (int64_t)((double)file->current_frame * ffmpeg_stream->time_base.den / ffmpeg_stream->time_base.num / asset->frame_rate); // Want to seek to the nearest keyframe and read up to the current frame // but ffmpeg doesn't support that kind of precision. // Also, basing all the seeking on the same stream seems to be required for synchronization. av_seek_frame((AVFormatContext*)stream->ffmpeg_file_context, /* stream->index */ 0, timestamp, AVSEEK_FLAG_ANY); stream->current_frame = file->current_frame - 1; } if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); int got_it = 0; // Read frames until we catch up to the current position. // if(current_frame >= file->current_frame - SEEK_THRESHOLD && // current_frame < file->current_frame - 1) // { // printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", // __LINE__, // current_frame, // file->current_frame); // } while(stream->current_frame < file->current_frame && !error) { got_it = 0; if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", __LINE__, (long long)stream->current_frame, (long long)file->current_frame); while(!got_it && !error) { AVPacket packet; error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, &packet); if(!error && packet.size > 0) { if(packet.stream_index == stream->index) { if(!ffmpeg_frame) ffmpeg_frame = avcodec_alloc_frame(); int got_picture = 0; avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame); // printf("FileFFMPEG::read_frame %d current_frame=%lld ffmpeg_frame=%p packet.data=%p packet.size=%d\n", // __LINE__, // file->current_frame, // ffmpeg_frame, // packet.data, // packet.size); // for(int i = 0; i < decoder_context->extradata_size; i++) // printf("0x%02x, ", decoder_context->extradata[i]); // printf("\n"); // // if(file->current_frame >= 200 && file->current_frame < 280) // { // char string[1024]; // sprintf(string, "/tmp/debug%03lld", file->current_frame); // FILE *out = fopen(string, "w"); // fwrite(packet.data, packet.size, 1, out); // fclose(out); // } int result = avcodec_decode_video( decoder_context, (AVFrame*)ffmpeg_frame, &got_picture, packet.data, packet.size); //printf("FileFFMPEG::read_frame %d result=%d\n", __LINE__, result); if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1; //printf("FileFFMPEG::read_frame %d result=%d got_it=%d\n", __LINE__, result, got_it); } } av_free_packet(&packet); } if(got_it) stream->current_frame++; } //PRINT_TRACE // printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld got_it=%d\n", // __LINE__, // current_frame, // file->current_frame, // got_it); if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); // Convert colormodel if(got_it) { int input_cmodel; AVFrame *input_frame = (AVFrame*)ffmpeg_frame; switch(decoder_context->pix_fmt) { case PIX_FMT_YUV420P: input_cmodel = BC_YUV420P; break; #ifndef FFMPEG_2010 case PIX_FMT_YUV422: input_cmodel = BC_YUV422; break; #endif case PIX_FMT_YUV422P: input_cmodel = BC_YUV422P; break; case PIX_FMT_YUV410P: input_cmodel = BC_YUV9P; break; default: fprintf(stderr, "quicktime_ffmpeg_decode: unrecognized color model %d\n", decoder_context->pix_fmt); input_cmodel = BC_YUV420P; break; } unsigned char **input_rows = (unsigned char**)malloc(sizeof(unsigned char*) * decoder_context->height); for(int i = 0; i < decoder_context->height; i++) input_rows[i] = input_frame->data[0] + i * decoder_context->width * cmodel_calculate_pixelsize(input_cmodel); cmodel_transfer(frame->get_rows(), /* Leave NULL if non existent */ input_rows, frame->get_y(), /* Leave NULL if non existent */ frame->get_u(), frame->get_v(), input_frame->data[0], /* Leave NULL if non existent */ input_frame->data[1], input_frame->data[2], 0, /* Dimensions to capture from input frame */ 0, decoder_context->width, decoder_context->height, 0, /* Dimensions to project on output frame */ 0, frame->get_w(), frame->get_h(), input_cmodel, frame->get_color_model(), 0, /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */ input_frame->linesize[0], /* For planar use the luma rowspan */ frame->get_w()); free(input_rows); } //PRINT_TRACE ffmpeg_lock->unlock(); if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__); return error; }
int ac_decode_audio_package(lp_ac_package pPackage, lp_ac_audio_decoder pDecoder, lp_ac_decoder pDec) { double pts; //Variables describing the destination buffer int dest_buffer_pos = pDecoder->decoder.buffer_size; //Make a copy of the package read by avformat, so that we can move the data pointers around AVPacket pkt_tmp = ((lp_ac_package_data)pPackage)->ffpackage; AVFrame *decoded_frame = NULL; if (pDecoder->tmp_data_length > 0) { pkt_tmp.data = av_realloc(pkt_tmp.data, pkt_tmp.size + pDecoder->tmp_data_length); pkt_tmp.size = pkt_tmp.size + pDecoder->tmp_data_length; memcpy(pkt_tmp.data + pDecoder->tmp_data_length, &pkt_tmp.data, pkt_tmp.size); memcpy(&pkt_tmp.data, pDecoder->tmp_data, pDecoder->tmp_data_length); } while (pkt_tmp.size > 0) { if (!decoded_frame) { if (!(decoded_frame = avcodec_alloc_frame())) { return 0; } } else avcodec_get_frame_defaults(decoded_frame); int got_frame = 0; int len1 = avcodec_decode_audio4(pDecoder->pCodecCtx, decoded_frame, &got_frame, &pkt_tmp); //If an error occured, skip the frame if (len1 < 0){ return 0; } //Increment the source buffer pointers pkt_tmp.size -= len1; pkt_tmp.data += len1; if (got_frame){ int data_size = av_samples_get_buffer_size(NULL, (pDecoder->pCodecCtx)->channels, decoded_frame->nb_samples, (pDecoder->pCodecCtx)->sample_fmt, 1); //Reserve enough memory for coping the result data if (dest_buffer_pos + data_size > pDecoder->max_buffer_size) { pDecoder->decoder.pBuffer = av_realloc(pDecoder->decoder.pBuffer, dest_buffer_pos + data_size); pDecoder->max_buffer_size = dest_buffer_pos + data_size; } memcpy(pDecoder->decoder.pBuffer + dest_buffer_pos, decoded_frame->data[0], data_size); //Increment the destination buffer pointers, copy the result to the output buffer dest_buffer_pos += data_size; pDecoder->decoder.buffer_size += data_size; pts=0; if(((lp_ac_package_data)pPackage)->ffpackage.dts != AV_NOPTS_VALUE){ pts = ((lp_ac_package_data)pPackage)->ffpackage.dts * av_q2d(((lp_ac_data)pDec->pacInstance)->pFormatCtx->streams[pPackage->stream_index]->time_base); pDec->video_clock = pts; } else { pts = pDec->video_clock; } double bytes_per_second = 2 * pDec->stream_info.audio_info.samples_per_second * pDec->stream_info.audio_info.channel_count; if (bytes_per_second > 0) pDec->video_clock += data_size / bytes_per_second; pDec->timecode = pts; if (pkt_tmp.size > 0) { pDecoder->tmp_data = av_malloc(pkt_tmp.size); memcpy(pDecoder->tmp_data, &pkt_tmp.data, pkt_tmp.size); } else { av_free(pDecoder->tmp_data); pDecoder->tmp_data_length = 0; } return 1; } } av_free(pDecoder->tmp_data); pDecoder->tmp_data_length; return 0; }
static av_cold int flic_decode_init(AVCodecContext *avctx) { FlicDecodeContext *s = avctx->priv_data; unsigned char *fli_header = (unsigned char *)avctx->extradata; int depth; if (avctx->extradata_size != 0 && avctx->extradata_size != 12 && avctx->extradata_size != 128 && avctx->extradata_size != 256 && avctx->extradata_size != 904 && avctx->extradata_size != 1024) { av_log(avctx, AV_LOG_ERROR, "Unexpected extradata size %d\n", avctx->extradata_size); return AVERROR_INVALIDDATA; } s->avctx = avctx; if (s->avctx->extradata_size == 12) { /* special case for magic carpet FLIs */ s->fli_type = FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE; depth = 8; } else if (avctx->extradata_size == 1024) { uint8_t *ptr = avctx->extradata; int i; for (i = 0; i < 256; i++) { s->palette[i] = AV_RL32(ptr); ptr += 4; } depth = 8; /* FLI in MOV, see e.g. FFmpeg trac issue #626 */ } else if (avctx->extradata_size == 0 || avctx->extradata_size == 256 || /* see FFmpeg ticket #1234 */ avctx->extradata_size == 904) { s->fli_type = FLI_TYPE_CODE; depth = 8; } else { s->fli_type = AV_RL16(&fli_header[4]); depth = AV_RL16(&fli_header[12]); } if (depth == 0) { depth = 8; /* Some FLC generators set depth to zero, when they mean 8Bpp. Fix up here */ } if ((s->fli_type == FLC_FLX_TYPE_CODE) && (depth == 16)) { depth = 15; /* Original Autodesk FLX's say the depth is 16Bpp when it is really 15Bpp */ } switch (depth) { case 8 : avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case 15 : avctx->pix_fmt = AV_PIX_FMT_RGB555; break; case 16 : avctx->pix_fmt = AV_PIX_FMT_RGB565; break; case 24 : avctx->pix_fmt = AV_PIX_FMT_BGR24; /* Supposedly BGR, but havent any files to test with */ av_log(avctx, AV_LOG_ERROR, "24Bpp FLC/FLX is unsupported due to no test files.\n"); return AVERROR_PATCHWELCOME; default : av_log(avctx, AV_LOG_ERROR, "Unknown FLC/FLX depth of %d Bpp is unsupported.\n",depth); return AVERROR_INVALIDDATA; } avcodec_get_frame_defaults(&s->frame); s->frame.data[0] = NULL; s->new_palette = 0; return 0; }
static void *start_encoder( void *ptr ) { obe_aud_enc_params_t *enc_params = ptr; obe_t *h = enc_params->h; obe_encoder_t *encoder = enc_params->encoder; obe_output_stream_t *stream = enc_params->stream; obe_raw_frame_t *raw_frame; obe_coded_frame_t *coded_frame; void *audio_buf = NULL; int64_t cur_pts = -1, pts_increment; int i, frame_size, ret, got_pkt, num_frames = 0, total_size = 0, audio_buf_len; AVFifoBuffer *out_fifo = NULL; AVAudioResampleContext *avr = NULL; AVPacket pkt; AVCodecContext *codec = NULL; AVFrame *frame = NULL; AVDictionary *opts = NULL; char is_latm[2]; avcodec_register_all(); codec = avcodec_alloc_context3( NULL ); if( !codec ) { fprintf( stderr, "Malloc failed\n" ); goto finish; } for( i = 0; lavc_encoders[i].obe_name != -1; i++ ) { if( lavc_encoders[i].obe_name == stream->stream_format ) break; } if( lavc_encoders[i].obe_name == -1 ) { fprintf( stderr, "[lavc] Could not find encoder1\n" ); goto finish; } AVCodec *enc = avcodec_find_encoder( lavc_encoders[i].lavc_name ); if( !enc ) { fprintf( stderr, "[lavc] Could not find encoder2\n" ); goto finish; } if( enc->sample_fmts[0] == -1 ) { fprintf( stderr, "[lavc] No valid sample formats\n" ); goto finish; } codec->sample_rate = enc_params->sample_rate; codec->bit_rate = stream->bitrate * 1000; codec->sample_fmt = enc->sample_fmts[0]; codec->channels = av_get_channel_layout_nb_channels( stream->channel_layout ); codec->channel_layout = stream->channel_layout; codec->time_base.num = 1; codec->time_base.den = OBE_CLOCK; codec->profile = stream->aac_opts.aac_profile == AAC_HE_V2 ? FF_PROFILE_AAC_HE_V2 : stream->aac_opts.aac_profile == AAC_HE_V1 ? FF_PROFILE_AAC_HE : FF_PROFILE_AAC_LOW; snprintf( is_latm, sizeof(is_latm), "%i", stream->aac_opts.latm_output ); av_dict_set( &opts, "latm", is_latm, 0 ); av_dict_set( &opts, "header_period", "2", 0 ); if( avcodec_open2( codec, enc, &opts ) < 0 ) { fprintf( stderr, "[lavc] Could not open encoder\n" ); goto finish; } avr = avresample_alloc_context(); if( !avr ) { fprintf( stderr, "Malloc failed\n" ); goto finish; } av_opt_set_int( avr, "in_channel_layout", codec->channel_layout, 0 ); av_opt_set_int( avr, "in_sample_fmt", enc_params->input_sample_format, 0 ); av_opt_set_int( avr, "in_sample_rate", enc_params->sample_rate, 0 ); av_opt_set_int( avr, "out_channel_layout", codec->channel_layout, 0 ); av_opt_set_int( avr, "out_sample_fmt", codec->sample_fmt, 0 ); av_opt_set_int( avr, "dither_method", AV_RESAMPLE_DITHER_TRIANGULAR_NS, 0 ); if( avresample_open( avr ) < 0 ) { fprintf( stderr, "Could not open AVResample\n" ); goto finish; } /* The number of samples per E-AC3 frame is unknown until the encoder is ready */ if( stream->stream_format == AUDIO_E_AC_3 || stream->stream_format == AUDIO_AAC ) { pthread_mutex_lock( &encoder->queue.mutex ); encoder->is_ready = 1; encoder->num_samples = codec->frame_size; /* Broadcast because input and muxer can be stuck waiting for encoder */ pthread_cond_broadcast( &encoder->queue.in_cv ); pthread_mutex_unlock( &encoder->queue.mutex ); } frame_size = (double)codec->frame_size * 125 * stream->bitrate * enc_params->frames_per_pes / enc_params->sample_rate; /* NB: libfdk-aac already doubles the frame size appropriately */ pts_increment = (double)codec->frame_size * OBE_CLOCK * enc_params->frames_per_pes / enc_params->sample_rate; out_fifo = av_fifo_alloc( frame_size ); if( !out_fifo ) { fprintf( stderr, "Malloc failed\n" ); goto finish; } audio_buf_len = codec->frame_size * av_get_bytes_per_sample( codec->sample_fmt ) * codec->channels; audio_buf = av_malloc( audio_buf_len ); if( !audio_buf ) { fprintf( stderr, "Malloc failed\n" ); goto finish; } frame = avcodec_alloc_frame(); if( !frame ) { fprintf( stderr, "Could not allocate frame\n" ); goto finish; } while( 1 ) { /* TODO: detect bitrate or channel reconfig */ pthread_mutex_lock( &encoder->queue.mutex ); while( !encoder->queue.size && !encoder->cancel_thread ) pthread_cond_wait( &encoder->queue.in_cv, &encoder->queue.mutex ); if( encoder->cancel_thread ) { pthread_mutex_unlock( &encoder->queue.mutex ); goto finish; } raw_frame = encoder->queue.queue[0]; pthread_mutex_unlock( &encoder->queue.mutex ); if( cur_pts == -1 ) cur_pts = raw_frame->pts; if( avresample_convert( avr, NULL, 0, raw_frame->audio_frame.num_samples, raw_frame->audio_frame.audio_data, raw_frame->audio_frame.linesize, raw_frame->audio_frame.num_samples ) < 0 ) { syslog( LOG_ERR, "[lavc] Sample format conversion failed\n" ); break; } raw_frame->release_data( raw_frame ); raw_frame->release_frame( raw_frame ); remove_from_queue( &encoder->queue ); while( avresample_available( avr ) >= codec->frame_size ) { got_pkt = 0; avcodec_get_frame_defaults( frame ); frame->nb_samples = codec->frame_size; avresample_read( avr, &audio_buf, codec->frame_size ); if( avcodec_fill_audio_frame( frame, codec->channels, codec->sample_fmt, audio_buf, audio_buf_len, 0 ) < 0 ) { syslog( LOG_ERR, "[lavc] Could not fill audio frame\n" ); break; } av_init_packet( &pkt ); pkt.data = NULL; pkt.size = 0; ret = avcodec_encode_audio2( codec, &pkt, frame, &got_pkt ); if( ret < 0 ) { syslog( LOG_ERR, "[lavc] Audio encoding failed\n" ); goto finish; } if( !got_pkt ) continue; total_size += pkt.size; num_frames++; if( av_fifo_realloc2( out_fifo, av_fifo_size( out_fifo ) + pkt.size ) < 0 ) { syslog( LOG_ERR, "Malloc failed\n" ); break; } av_fifo_generic_write( out_fifo, pkt.data, pkt.size, NULL ); obe_free_packet( &pkt ); if( num_frames == enc_params->frames_per_pes ) { coded_frame = new_coded_frame( encoder->output_stream_id, total_size ); if( !coded_frame ) { syslog( LOG_ERR, "Malloc failed\n" ); goto finish; } av_fifo_generic_read( out_fifo, coded_frame->data, total_size, NULL ); coded_frame->pts = cur_pts; coded_frame->random_access = 1; /* Every frame output is a random access point */ add_to_queue( &h->mux_queue, coded_frame ); /* We need to generate PTS because frame sizes have changed */ cur_pts += pts_increment; total_size = num_frames = 0; } } } finish: if( frame ) avcodec_free_frame( &frame ); if( audio_buf ) av_free( audio_buf ); if( out_fifo ) av_fifo_free( out_fifo ); if( avr ) avresample_free( &avr ); if( codec ) { avcodec_close( codec ); av_free( codec ); } free( enc_params ); return NULL; }
/* * Audio decoding. */ static void audio_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int len; FILE *f, *outfile; uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; AVPacket avpkt; AVFrame *decoded_frame = NULL; av_init_packet(&avpkt); printf("Decode audio file %s to %s\n", filename, outfilename); /* find the mpeg audio decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } outfile = fopen(outfilename, "wb"); if (!outfile) { av_free(c); exit(1); } /* decode until eof */ avpkt.data = inbuf; avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); while (avpkt.size > 0) { int got_frame = 0; if (!decoded_frame) { if (!(decoded_frame = avcodec_alloc_frame())) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } } else avcodec_get_frame_defaults(decoded_frame); len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); if (len < 0) { fprintf(stderr, "Error while decoding\n"); exit(1); } if (got_frame) { /* if a frame has been decoded, output it */ int data_size = av_samples_get_buffer_size(NULL, c->channels, decoded_frame->nb_samples, c->sample_fmt, 1); fwrite(decoded_frame->data[0], 1, data_size, outfile); } avpkt.size -= len; avpkt.data += len; avpkt.dts = avpkt.pts = AV_NOPTS_VALUE; if (avpkt.size < AUDIO_REFILL_THRESH) { /* Refill the input buffer, to avoid trying to decode * incomplete frames. Instead of this, one could also use * a parser, or use a proper container format through * libavformat. */ memmove(inbuf, avpkt.data, avpkt.size); avpkt.data = inbuf; len = fread(avpkt.data + avpkt.size, 1, AUDIO_INBUF_SIZE - avpkt.size, f); if (len > 0) avpkt.size += len; } } fclose(outfile); fclose(f); avcodec_close(c); av_free(c); avcodec_free_frame(&decoded_frame); }
void *recorder_thread(void *ptr) { struct camera *cam = (struct camera *)ptr; struct motion_detection md; AVPacket packet; AVFrame *frame; int got_frame, ret; unsigned int cnt = 0; time_t first_activity = 0; time_t last_activity = 0; if(open_camera(cam) < 0) return NULL; if(open_output(cam) < 0) return NULL; av_dump_format(cam->context, 0, cam->context->filename, 0); av_dump_format(cam->output_context, 0, cam->output_context->filename, 1); md.cam = cam; md.prev = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1); md.cur = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1); md.silh = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1); cvZero(md.prev); cvZero(md.cur); cvZero(md.silh); md.img_convert_ctx = sws_getContext( cam->codec->width, cam->codec->height, cam->codec->pix_fmt, cam->codec->width, cam->codec->height, PIX_FMT_GRAY8, SWS_BICUBIC, NULL, NULL, NULL); md.buffer = (uint8_t*)av_malloc(3 * cam->codec->width * cam->codec->height); int got_key_frame = 0, first_detection = 1; frame = avcodec_alloc_frame(); if(!frame) { av_err_msg("avcodec_alloc_frame", 0); return NULL; } while(1) { cam->last_io = time(NULL); if((ret = av_read_frame(cam->context, &packet)) < 0) { if(ret == AVERROR_EOF) break; else av_err_msg("av_read_frame", ret); } if(packet.stream_index == cam->video_stream_index) { // start on keyframe if(!got_key_frame && !(packet.flags & AV_PKT_FLAG_KEY)) { continue; } got_key_frame = 1; avcodec_get_frame_defaults(frame); got_frame = 0; cnt = (cnt + 1) % cam->analize_frames; if(cnt == 0) { if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0) av_err_msg("avcodec_decode_video2", ret); if(got_frame) { if(detect_motion(&md, frame)) { if(first_activity == 0) first_activity = time(NULL); last_activity = time(NULL); } else { if(first_activity > 0 && time(NULL) - last_activity > cam->motion_delay) { if(!first_detection) db_create_event(cam->id, first_activity, last_activity); else first_detection = 0; first_activity = 0; } } } if(time(NULL) - cam->last_screenshot > 60 && (packet.flags & AV_PKT_FLAG_KEY)) { char fname[128]; snprintf(fname, sizeof(fname), "%s/%s/screenshot.png", store_dir, cam->name); cvSaveImage(fname, md.cur, 0); cam->last_screenshot = time(NULL); } } packet.stream_index = cam->output_stream->id; if((ret = av_write_frame(cam->output_context, &packet)) < 0) av_err_msg("av_write_frame", ret); pthread_mutex_lock(&cam->consumers_lock); for(l1 *p = cam->cam_consumers_list; p != NULL; p = p->next) { struct cam_consumer *consumer = (struct cam_consumer *)p->value; if(!consumer->screen->active) continue; if(consumer->screen->tmpl_size == 1) { packet.stream_index = 0; if((ret = av_write_frame(consumer->screen->rtp_context, &packet)) < 0) av_err_msg("av_write_frame", ret); } else { if(!got_frame) { if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0) { av_err_msg("avcodec_decode_video2", ret); break; } } if(got_frame) copy_frame_to_consumer(frame, cam->codec->height, consumer); } } pthread_mutex_unlock(&cam->consumers_lock); } av_free_packet(&packet); if(!cam->active) { break; } if(time(NULL) - cam->file_started_at > 60 * 60) { db_update_videofile(cam); close_output(cam); open_output(cam); got_key_frame = 0; } } db_update_videofile(cam); close_output(cam); if((ret = avcodec_close(cam->codec)) < 0) av_err_msg("avcodec_close", ret); avformat_close_input(&cam->context); av_free(frame); cvReleaseImage(&md.prev); cvReleaseImage(&md.cur); cvReleaseImage(&md.silh); av_free(md.buffer); sws_freeContext(md.img_convert_ctx); return NULL; }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered frames from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); av_frame_unref(filt_frame); } av_frame_unref(frame); } } av_free_packet(&packet); } end: avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
int decode_audio(AVCodecContext *ctx, queue_t *qa) { static struct SwrContext *swr_ctx; static int64_t src_layout; static int src_freq; static int src_channels; static enum AVSampleFormat src_fmt = -1; static AVFrame *aFrame; AVPacket pkt; AVPacket pkt_tmp; int64_t dec_channel_layout; int len, len2; int got_frame; int data_size; if( astream.count > 192000*2) return -1; if( get_packet(qa, &pkt) == 0 ) return 0; // __asm__("int3"); if (!aFrame) { if (!(aFrame = avcodec_alloc_frame())) return -1; } else avcodec_get_frame_defaults(aFrame); pkt_tmp = pkt; while(pkt_tmp.size > 0) { data_size = 192000; // len = avcodec_decode_audio3(ctx,(int16_t*)decoder_buffer, // &data_size, &pkt_tmp); got_frame = 0; len = avcodec_decode_audio4(ctx, aFrame, &got_frame, &pkt_tmp); if(len >= 0 && got_frame) { char *samples; int ch, plane_size; int planar = av_sample_fmt_is_planar(ctx->sample_fmt); int data_size = av_samples_get_buffer_size(&plane_size, ctx->channels, aFrame->nb_samples, ctx->sample_fmt, 1); // if(audio_base == -1.0) // { // if (pkt.pts != AV_NOPTS_VALUE) // audio_base = get_audio_base() * pkt.pts; // printf("audio base %f\n", audio_base); // }; pkt_tmp.data += len; pkt_tmp.size -= len; dec_channel_layout = (aFrame->channel_layout && aFrame->channels == av_get_channel_layout_nb_channels(aFrame->channel_layout)) ? aFrame->channel_layout : av_get_default_channel_layout(aFrame->channels); if (aFrame->format != src_fmt || dec_channel_layout != src_layout || aFrame->sample_rate != src_freq || !swr_ctx) { swr_free(&swr_ctx); swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, aFrame->sample_rate, dec_channel_layout,aFrame->format, aFrame->sample_rate, 0, NULL); if (!swr_ctx || swr_init(swr_ctx) < 0) { printf("Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n", aFrame->sample_rate, av_get_sample_fmt_name(aFrame->format), (int)aFrame->channels, aFrame->sample_rate, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 2); break; } src_layout = dec_channel_layout; src_channels = aFrame->channels; src_freq = aFrame->sample_rate; src_fmt = aFrame->format; }; if (swr_ctx) { const uint8_t **in = (const uint8_t **)aFrame->extended_data; uint8_t *out[] = {decoder_buffer}; int out_count = 192000 * 3 / 2 / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); len2 = swr_convert(swr_ctx, out, out_count, in, aFrame->nb_samples); if (len2 < 0) { printf("swr_convert() failed\n"); break; } if (len2 == out_count) { printf("warning: audio buffer is probably too small\n"); swr_init(swr_ctx); } data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); mutex_lock(&astream.lock); samples = astream.buffer+astream.count; memcpy(samples, decoder_buffer, data_size); /* memcpy(samples, aFrame->extended_data[0], plane_size); if (planar && ctx->channels > 1) { uint8_t *out = ((uint8_t *)samples) + plane_size; for (ch = 1; ch < ctx->channels; ch++) { memcpy(out, aFrame->extended_data[ch], plane_size); out += plane_size; } } */ astream.count += data_size; mutex_unlock(&astream.lock); }; } else pkt_tmp.size = 0; } av_free_packet(&pkt); return 1; };
static bool encode_audio(ffemu_t *handle, AVPacket *pkt, bool dry) { av_init_packet(pkt); pkt->data = handle->audio.outbuf; pkt->size = handle->audio.outbuf_size; #ifdef HAVE_FFMPEG_AVCODEC_ENCODE_AUDIO2 AVFrame frame; avcodec_get_frame_defaults(&frame); frame.nb_samples = handle->audio.frames_in_buffer; frame.pts = handle->audio.frame_cnt; int samples_size = frame.nb_samples * handle->audio.codec->channels * sizeof(int16_t); avcodec_fill_audio_frame(&frame, handle->audio.codec->channels, handle->audio.codec->sample_fmt, (const uint8_t*)handle->audio.buffer, samples_size, 1); int got_packet = 0; if (avcodec_encode_audio2(handle->audio.codec, pkt, dry ? NULL : &frame, &got_packet) < 0) return false; if (!got_packet) { pkt->size = 0; pkt->pts = AV_NOPTS_VALUE; pkt->dts = AV_NOPTS_VALUE; return true; } if (pkt->pts != (int64_t)AV_NOPTS_VALUE) { pkt->pts = av_rescale_q(pkt->pts, handle->audio.codec->time_base, handle->muxer.astream->time_base); } if (pkt->dts != (int64_t)AV_NOPTS_VALUE) { pkt->dts = av_rescale_q(pkt->dts, handle->audio.codec->time_base, handle->muxer.astream->time_base); } #else if (dry) return false; memset(handle->audio.buffer + handle->audio.frames_in_buffer * handle->audio.codec->channels, 0, (handle->audio.codec->frame_size - handle->audio.frames_in_buffer) * handle->audio.codec->channels * sizeof(int16_t)); int out_size = avcodec_encode_audio(handle->audio.codec, handle->audio.outbuf, handle->audio.outbuf_size, handle->audio.buffer); if (out_size < 0) return false; if (out_size == 0) { pkt->size = 0; return true; } pkt->size = out_size; if (handle->audio.codec->coded_frame->pts != (int64_t)AV_NOPTS_VALUE) { pkt->pts = av_rescale_q(handle->audio.codec->coded_frame->pts, handle->audio.codec->time_base, handle->muxer.astream->time_base); } else pkt->pts = AV_NOPTS_VALUE; if (handle->audio.codec->coded_frame->key_frame) pkt->flags |= AV_PKT_FLAG_KEY; #endif pkt->stream_index = handle->muxer.astream->index; return true; }
int read_audio(char *fname, audio_data_t *ad) { // It's important this be aligned correctly... AVFormatContext *pFormatCtx __attribute__ ((aligned (16))); if (avformat_open_input(&pFormatCtx, fname, NULL, 0) != 0) { return -1; } avformat_find_stream_info(pFormatCtx, NULL); if (pFormatCtx->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) { avformat_close_input(&pFormatCtx); return -1; } AVPacket packet; av_init_packet(&packet); AVCodecContext *aCodecCtx; aCodecCtx=pFormatCtx->streams[0]->codec; AVCodec *aCodec; aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if (!aCodec) { avformat_close_input(&pFormatCtx); return -1; } if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) { avformat_close_input(&pFormatCtx); return -1; } int gotit = 0; AVFrame *frame = NULL; if (!frame) { if (!(frame = avcodec_alloc_frame())) { avcodec_close(aCodecCtx); avformat_close_input(&pFormatCtx); return -1; } } else avcodec_get_frame_defaults(frame); double sec_duration = pFormatCtx->duration/(double)AV_TIME_BASE; ad->duration = sec_duration; int brate = pFormatCtx->bit_rate; int xp = 0; int total_data_size = 0; int total_samples = 0; int estimated_buff_size = brate *(int)floor(sec_duration)/2; int allocated_buffer = estimated_buff_size; ad->samples = malloc(allocated_buffer); ad->channels = aCodecCtx->channels; ad->sample_rate = aCodecCtx->sample_rate; int rv = av_read_frame(pFormatCtx, &packet); while (packet.size > 0) { int len = avcodec_decode_audio4(pFormatCtx->streams[0]->codec, frame, &gotit, &packet); int plane_size; int data_size = av_samples_get_buffer_size( &plane_size, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1); if (total_data_size+data_size > allocated_buffer) { allocated_buffer = allocated_buffer*1.25; ad->samples = realloc(ad->samples, allocated_buffer); } memcpy(ad->samples+total_data_size, frame->extended_data[0], data_size); total_data_size += data_size; total_samples += frame->nb_samples; rv = av_read_frame(pFormatCtx, &packet); } // Use the last frame to fill in the info needed ad->used_buffer_size = total_data_size; ad->buffer_size = allocated_buffer; ad->planar = av_sample_fmt_is_planar(aCodecCtx->sample_fmt); ad->sample_size = av_get_bytes_per_sample(aCodecCtx->sample_fmt); ad->samples = realloc(ad->samples, total_data_size); ad->buffer_size = total_data_size; ad->num_samples = total_samples; avcodec_close(aCodecCtx); avformat_close_input(&pFormatCtx); return 1; }