status_t AVCodecEncoder::SetUp(const media_format* inputFormat) { TRACE("AVCodecEncoder::SetUp()\n"); if (fContext == NULL) return B_NO_INIT; if (inputFormat == NULL) return B_BAD_VALUE; // Codec IDs for raw-formats may need to be figured out here. if (fCodec == NULL && fCodecID == CODEC_ID_NONE) { fCodecID = raw_audio_codec_id_for(*inputFormat); if (fCodecID != CODEC_ID_NONE) fCodec = avcodec_find_encoder(fCodecID); } if (fCodec == NULL) { TRACE(" encoder not found!\n"); return B_NO_INIT; } _CloseCodecIfNeeded(); fInputFormat = *inputFormat; fFramesWritten = 0; const uchar* userData = inputFormat->user_data; if (*(uint32*)userData == 'ffmp') { userData += sizeof(uint32); // The Writer plugin used is the FFmpeg plugin. It stores the // AVCodecContext pointer in the user data section. Use this // context instead of our own. It requires the Writer living in // the same team, of course. app_info appInfo; if (be_app->GetAppInfo(&appInfo) == B_OK && *(team_id*)userData == appInfo.team) { userData += sizeof(team_id); // Use the AVCodecContext from the Writer. This works better // than using our own context with some encoders. fContext = *(AVCodecContext**)userData; } } return _Setup(); }
status_t AVFormatWriter::StreamCookie::Init(media_format* format, const media_codec_info* codecInfo) { TRACE("AVFormatWriter::StreamCookie::Init()\n"); BAutolock _(fStreamLock); fPacket.stream_index = fContext->nb_streams; fStream = av_new_stream(fContext, fPacket.stream_index); if (fStream == NULL) { TRACE(" failed to add new stream\n"); return B_ERROR; } // TRACE(" fStream->codec: %p\n", fStream->codec); // TODO: This is a hack for now! Use avcodec_find_encoder_by_name() // or something similar... fStream->codec->codec_id = (CodecID)codecInfo->sub_id; if (fStream->codec->codec_id == CODEC_ID_NONE) fStream->codec->codec_id = raw_audio_codec_id_for(*format); // Setup the stream according to the media format... if (format->type == B_MEDIA_RAW_VIDEO) { fStream->codec->codec_type = AVMEDIA_TYPE_VIDEO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->time_base.den = (int)format->u.raw_video.field_rate; fStream->codec->time_base.num = 1; // video size fStream->codec->width = format->u.raw_video.display.line_width; fStream->codec->height = format->u.raw_video.display.line_count; // pixel aspect ratio fStream->sample_aspect_ratio.num = format->u.raw_video.pixel_width_aspect; fStream->sample_aspect_ratio.den = format->u.raw_video.pixel_height_aspect; if (fStream->sample_aspect_ratio.num == 0 || fStream->sample_aspect_ratio.den == 0) { av_reduce(&fStream->sample_aspect_ratio.num, &fStream->sample_aspect_ratio.den, fStream->codec->width, fStream->codec->height, 255); } fStream->codec->gop_size = 12; fStream->codec->sample_aspect_ratio = fStream->sample_aspect_ratio; // Use the last supported pixel format of the AVCodec, which we hope // is the one with the best quality (true for all currently supported // encoders). // AVCodec* codec = fStream->codec->codec; // for (int i = 0; codec->pix_fmts[i] != PIX_FMT_NONE; i++) // fStream->codec->pix_fmt = codec->pix_fmts[i]; fStream->codec->pix_fmt = PIX_FMT_YUV420P; } else if (format->type == B_MEDIA_RAW_AUDIO) { fStream->codec->codec_type = AVMEDIA_TYPE_AUDIO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->sample_rate = (int)format->u.raw_audio.frame_rate; // channels fStream->codec->channels = format->u.raw_audio.channel_count; switch (format->u.raw_audio.format) { case media_raw_audio_format::B_AUDIO_FLOAT: fStream->codec->sample_fmt = SAMPLE_FMT_FLT; break; case media_raw_audio_format::B_AUDIO_DOUBLE: fStream->codec->sample_fmt = SAMPLE_FMT_DBL; break; case media_raw_audio_format::B_AUDIO_INT: fStream->codec->sample_fmt = SAMPLE_FMT_S32; break; case media_raw_audio_format::B_AUDIO_SHORT: fStream->codec->sample_fmt = SAMPLE_FMT_S16; break; case media_raw_audio_format::B_AUDIO_UCHAR: fStream->codec->sample_fmt = SAMPLE_FMT_U8; break; case media_raw_audio_format::B_AUDIO_CHAR: default: return B_MEDIA_BAD_FORMAT; break; } if (format->u.raw_audio.channel_mask == 0) { // guess the channel mask... switch (format->u.raw_audio.channel_count) { default: case 2: fStream->codec->channel_layout = CH_LAYOUT_STEREO; break; case 1: fStream->codec->channel_layout = CH_LAYOUT_MONO; break; case 3: fStream->codec->channel_layout = CH_LAYOUT_SURROUND; break; case 4: fStream->codec->channel_layout = CH_LAYOUT_QUAD; break; case 5: fStream->codec->channel_layout = CH_LAYOUT_5POINT0; break; case 6: fStream->codec->channel_layout = CH_LAYOUT_5POINT1; break; case 8: fStream->codec->channel_layout = CH_LAYOUT_7POINT1; break; case 10: fStream->codec->channel_layout = CH_LAYOUT_7POINT1_WIDE; break; } } else { // The bits match 1:1 for media_multi_channels and FFmpeg defines. fStream->codec->channel_layout = format->u.raw_audio.channel_mask; } } // Some formats want stream headers to be separate if ((fContext->oformat->flags & AVFMT_GLOBALHEADER) != 0) fStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; TRACE(" stream->time_base: (%d/%d), codec->time_base: (%d/%d))\n", fStream->time_base.num, fStream->time_base.den, fStream->codec->time_base.num, fStream->codec->time_base.den); #if 0 // Write the AVCodecContext pointer to the user data section of the // media_format. For some encoders, it seems to be necessary to use // the AVCodecContext of the AVStream in order to successfully encode // anything and write valid media files. For example some codecs need // to store meta data or global data in the container. app_info appInfo; if (be_app->GetAppInfo(&appInfo) == B_OK) { uchar* userData = format->user_data; *(uint32*)userData = 'ffmp'; userData += sizeof(uint32); *(team_id*)userData = appInfo.team; userData += sizeof(team_id); *(AVCodecContext**)userData = fStream->codec; } #endif return B_OK; }