void register_encoder() { status_t err; media_format tempFormat, mediaFormat; media_format_description formatDescription; BMediaFormats formatObject; formatObject.Lock(); /* register as a WAV codec */ memset(&mediaFormat, 0, sizeof(media_format)); mediaFormat.type = B_MEDIA_ENCODED_AUDIO; mediaFormat.u.encoded_audio = media_encoded_audio_format::wildcard; memset(&formatDescription, 0, sizeof(media_format_description)); formatDescription.family = B_WAV_FORMAT_FAMILY; formatDescription.u.wav.codec = WAVE_FORMAT_MPEG; err = formatObject.MakeFormatFor(formatDescription, mediaFormat, &tempFormat); if (err == B_MEDIA_DUPLICATE_FORMAT) formatObject.GetFormatFor(formatDescription, &tempFormat); s_wavFormat = tempFormat; /* register as a MPEG codec */ memset(&mediaFormat, 0, sizeof(media_format)); mediaFormat.type = B_MEDIA_ENCODED_AUDIO; mediaFormat.u.encoded_audio = media_encoded_audio_format::wildcard; memset(&formatDescription, 0, sizeof(media_format_description)); formatDescription.family = B_MPEG_FORMAT_FAMILY; formatDescription.u.mpeg.id = B_MPEG_1_AUDIO_LAYER_3; err = formatObject.MakeFormatFor(formatDescription, mediaFormat, &tempFormat); if (err == B_MEDIA_DUPLICATE_FORMAT) formatObject.GetFormatFor(formatDescription, &tempFormat); s_mpegFormat = tempFormat; formatObject.Unlock(); }
status_t BMediaFormats::GetBeOSFormatFor(uint32 format, media_format* _format, media_type type) { BMediaFormats formats; media_format_description description; description.family = B_BEOS_FORMAT_FAMILY; description.u.beos.format = format; status_t status = formats.GetFormatFor(description, _format); if (status < B_OK) return status; if (type != B_MEDIA_UNKNOWN_TYPE && type != _format->type) return B_BAD_TYPE; return B_OK; }
status_t OpenSoundDevice::get_media_format_for(int fmt, media_format &format) { status_t err; BMediaFormats formats; if (formats.InitCheck() < B_OK) return formats.InitCheck(); /* shortcut for raw */ if (fmt & AFMT_SUPPORTED_PCM) { format = media_format(); format.type = B_MEDIA_RAW_AUDIO; format.u.raw_audio = media_raw_audio_format::wildcard; return B_OK; } media_format_description desc; err = get_media_format_description_for(fmt, &desc); if (err < B_OK) return err; err = formats.GetFormatFor(desc, &format); return err; };
status_t BMediaFormats::GetQuicktimeFormatFor(uint32 vendor, uint32 codec, media_format* _format, media_type type) { BMediaFormats formats; media_format_description description; description.family = B_QUICKTIME_FORMAT_FAMILY; description.u.quicktime.vendor = vendor; description.u.quicktime.codec = codec; status_t status = formats.GetFormatFor(description, _format); if (status < B_OK) return status; if (type != B_MEDIA_UNKNOWN_TYPE && type != _format->type) return B_BAD_TYPE; return B_OK; }
status_t BMediaFormats::GetAVIFormatFor(uint32 codec, media_format* _format, media_type type) { UNIMPLEMENTED(); BMediaFormats formats; media_format_description description; description.family = B_AVI_FORMAT_FAMILY; description.u.avi.codec = codec; status_t status = formats.GetFormatFor(description, _format); if (status < B_OK) return status; if (type != B_MEDIA_UNKNOWN_TYPE && type != _format->type) return B_BAD_TYPE; return B_OK; }
static status_t get_text_format(tobias_stream_header * header, media_format * format) { TRACE(" get_text_format\n"); // get the format for the description media_format_description description = tobias_text_description(); BMediaFormats formats; status_t result = formats.InitCheck(); if (result == B_OK) { result = formats.GetFormatFor(description, format); } if (result != B_OK) { *format = tobias_text_encoded_media_format(); // ignore error, allow user to use ReadChunk interface } // fill out format from header packet return B_OK; }
static status_t get_video_format(tobias_stream_header * header, media_format * format) { TRACE(" get_video_format\n"); // get the format for the description media_format_description description = tobias_video_description(); description.u.avi.codec = header->subtype[3] << 24 | header->subtype[2] << 16 | header->subtype[1] << 8 | header->subtype[0]; BMediaFormats formats; status_t result = formats.InitCheck(); if (result == B_OK) { result = formats.GetFormatFor(description, format); } if (result != B_OK) { *format = tobias_video_encoded_media_format(); // ignore error, allow user to use ReadChunk interface } // fill out format from header packet format->user_data_type = B_CODEC_TYPE_INFO; strncpy((char*)format->user_data, header->subtype, 4); format->u.encoded_video.frame_size = header->video.width * header->video.height; format->u.encoded_video.output.field_rate = 10000000.0 / header->time_unit; format->u.encoded_video.output.interlace = 1; format->u.encoded_video.output.first_active = 0; format->u.encoded_video.output.last_active = header->video.height - 1; format->u.encoded_video.output.orientation = B_VIDEO_TOP_LEFT_RIGHT; format->u.encoded_video.output.pixel_width_aspect = 1; format->u.encoded_video.output.pixel_height_aspect = 1; format->u.encoded_video.output.display.line_width = header->video.width; format->u.encoded_video.output.display.line_count = header->video.height; format->u.encoded_video.output.display.bytes_per_row = 0; format->u.encoded_video.output.display.pixel_offset = 0; format->u.encoded_video.output.display.line_offset = 0; format->u.encoded_video.output.display.flags = 0; // TODO: wring more info out of the headers return B_OK; }
static status_t get_audio_format(tobias_stream_header * header, media_format * format) { TRACE(" get_audio_format\n"); // get the format for the description media_format_description description = tobias_audio_description(); unsigned int wav_id = 0; sscanf(header->subtype, "%04x", &wav_id); description.u.wav.codec = wav_id; BMediaFormats formats; status_t result = formats.InitCheck(); if (result == B_OK) { result = formats.GetFormatFor(description, format); } if (result != B_OK) { *format = tobias_audio_encoded_media_format(); // ignore error, allow user to use ReadChunk interface } // fill out format from header packet format->user_data_type = B_CODEC_TYPE_INFO; strncpy((char*)format->user_data, header->subtype, 4); format->u.encoded_audio.bit_rate = header->audio.avgbytespersec * 8; if (header->audio.channels == 1) { format->u.encoded_audio.multi_info.channel_mask = B_CHANNEL_LEFT; } else { format->u.encoded_audio.multi_info.channel_mask = B_CHANNEL_LEFT | B_CHANNEL_RIGHT; } format->u.encoded_audio.output.frame_rate = header->samples_per_unit * 10000000.0 / header->time_unit; format->u.encoded_audio.output.channel_count = header->audio.channels; format->u.encoded_audio.output.buffer_size = AudioBufferSize(&format->u.encoded_audio.output); // TODO: wring more info out of the headers return B_OK; }
status_t AVFormatReader::Stream::Init(int32 virtualIndex) { TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex); status_t ret = StreamBase::Init(virtualIndex); if (ret != B_OK) return ret; // Get a pointer to the AVCodecContext for the stream at streamIndex. AVCodecContext* codecContext = fStream->codec; #if 0 // stippi: Here I was experimenting with the question if some fields of the // AVCodecContext change (or get filled out at all), if the AVCodec is opened. class CodecOpener { public: CodecOpener(AVCodecContext* context) { fCodecContext = context; AVCodec* codec = avcodec_find_decoder(context->codec_id); fCodecOpen = avcodec_open(context, codec) >= 0; if (!fCodecOpen) TRACE(" failed to open the codec!\n"); } ~CodecOpener() { if (fCodecOpen) avcodec_close(fCodecContext); } private: AVCodecContext* fCodecContext; bool fCodecOpen; } codecOpener(codecContext); #endif // initialize the media_format for this stream media_format* format = &fFormat; memset(format, 0, sizeof(media_format)); media_format_description description; // Set format family and type depending on codec_type of the stream. switch (codecContext->codec_type) { case AVMEDIA_TYPE_AUDIO: if ((codecContext->codec_id >= CODEC_ID_PCM_S16LE) && (codecContext->codec_id <= CODEC_ID_PCM_U8)) { TRACE(" raw audio\n"); format->type = B_MEDIA_RAW_AUDIO; description.family = B_ANY_FORMAT_FAMILY; // This will then apparently be handled by the (built into // BMediaTrack) RawDecoder. } else { TRACE(" encoded audio\n"); format->type = B_MEDIA_ENCODED_AUDIO; description.family = B_MISC_FORMAT_FAMILY; description.u.misc.file_format = 'ffmp'; } break; case AVMEDIA_TYPE_VIDEO: TRACE(" encoded video\n"); format->type = B_MEDIA_ENCODED_VIDEO; description.family = B_MISC_FORMAT_FAMILY; description.u.misc.file_format = 'ffmp'; break; default: TRACE(" unknown type\n"); format->type = B_MEDIA_UNKNOWN_TYPE; return B_ERROR; break; } if (format->type == B_MEDIA_RAW_AUDIO) { // We cannot describe all raw-audio formats, some are unsupported. switch (codecContext->codec_id) { case CODEC_ID_PCM_S16LE: format->u.raw_audio.format = media_raw_audio_format::B_AUDIO_SHORT; format->u.raw_audio.byte_order = B_MEDIA_LITTLE_ENDIAN; break; case CODEC_ID_PCM_S16BE: format->u.raw_audio.format = media_raw_audio_format::B_AUDIO_SHORT; format->u.raw_audio.byte_order = B_MEDIA_BIG_ENDIAN; break; case CODEC_ID_PCM_U16LE: // format->u.raw_audio.format // = media_raw_audio_format::B_AUDIO_USHORT; // format->u.raw_audio.byte_order // = B_MEDIA_LITTLE_ENDIAN; return B_NOT_SUPPORTED; break; case CODEC_ID_PCM_U16BE: // format->u.raw_audio.format // = media_raw_audio_format::B_AUDIO_USHORT; // format->u.raw_audio.byte_order // = B_MEDIA_BIG_ENDIAN; return B_NOT_SUPPORTED; break; case CODEC_ID_PCM_S8: format->u.raw_audio.format = media_raw_audio_format::B_AUDIO_CHAR; break; case CODEC_ID_PCM_U8: format->u.raw_audio.format = media_raw_audio_format::B_AUDIO_UCHAR; break; default: return B_NOT_SUPPORTED; break; } } else { if (description.family == B_MISC_FORMAT_FAMILY) description.u.misc.codec = codecContext->codec_id; BMediaFormats formats; status_t status = formats.GetFormatFor(description, format); if (status < B_OK) TRACE(" formats.GetFormatFor() error: %s\n", strerror(status)); format->user_data_type = B_CODEC_TYPE_INFO; *(uint32*)format->user_data = codecContext->codec_tag; format->user_data[4] = 0; } format->require_flags = 0; format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS; switch (format->type) { case B_MEDIA_RAW_AUDIO: format->u.raw_audio.frame_rate = (float)codecContext->sample_rate; format->u.raw_audio.channel_count = codecContext->channels; format->u.raw_audio.channel_mask = codecContext->channel_layout; format->u.raw_audio.byte_order = avformat_to_beos_byte_order(codecContext->sample_fmt); format->u.raw_audio.format = avformat_to_beos_format(codecContext->sample_fmt); format->u.raw_audio.buffer_size = 0; // Read one packet and mark it for later re-use. (So our first // GetNextChunk() call does not read another packet.) if (_NextPacket(true) == B_OK) { TRACE(" successfully determined audio buffer size: %d\n", fPacket.size); format->u.raw_audio.buffer_size = fPacket.size; } break; case B_MEDIA_ENCODED_AUDIO: format->u.encoded_audio.bit_rate = codecContext->bit_rate; format->u.encoded_audio.frame_size = codecContext->frame_size; // Fill in some info about possible output format format->u.encoded_audio.output = media_multi_audio_format::wildcard; format->u.encoded_audio.output.frame_rate = (float)codecContext->sample_rate; // Channel layout bits match in Be API and FFmpeg. format->u.encoded_audio.output.channel_count = codecContext->channels; format->u.encoded_audio.multi_info.channel_mask = codecContext->channel_layout; format->u.encoded_audio.output.byte_order = avformat_to_beos_byte_order(codecContext->sample_fmt); format->u.encoded_audio.output.format = avformat_to_beos_format(codecContext->sample_fmt); if (codecContext->block_align > 0) { format->u.encoded_audio.output.buffer_size = codecContext->block_align; } else { format->u.encoded_audio.output.buffer_size = codecContext->frame_size * codecContext->channels * (format->u.encoded_audio.output.format & media_raw_audio_format::B_AUDIO_SIZE_MASK); } break; case B_MEDIA_ENCODED_VIDEO: // TODO: Specifying any of these seems to throw off the format matching // later on. // format->u.encoded_video.avg_bit_rate = codecContext->bit_rate; // format->u.encoded_video.max_bit_rate = codecContext->bit_rate // + codecContext->bit_rate_tolerance; // format->u.encoded_video.encoding // = media_encoded_video_format::B_ANY; // format->u.encoded_video.frame_size = 1; // format->u.encoded_video.forward_history = 0; // format->u.encoded_video.backward_history = 0; format->u.encoded_video.output.field_rate = FrameRate(); format->u.encoded_video.output.interlace = 1; format->u.encoded_video.output.first_active = 0; format->u.encoded_video.output.last_active = codecContext->height - 1; // TODO: Maybe libavformat actually provides that info // somewhere... format->u.encoded_video.output.orientation = B_VIDEO_TOP_LEFT_RIGHT; // Calculate the display aspect ratio AVRational displayAspectRatio; if (codecContext->sample_aspect_ratio.num != 0) { av_reduce(&displayAspectRatio.num, &displayAspectRatio.den, codecContext->width * codecContext->sample_aspect_ratio.num, codecContext->height * codecContext->sample_aspect_ratio.den, 1024 * 1024); TRACE(" pixel aspect ratio: %d/%d, " "display aspect ratio: %d/%d\n", codecContext->sample_aspect_ratio.num, codecContext->sample_aspect_ratio.den, displayAspectRatio.num, displayAspectRatio.den); } else { av_reduce(&displayAspectRatio.num, &displayAspectRatio.den, codecContext->width, codecContext->height, 1024 * 1024); TRACE(" no display aspect ratio (%d/%d)\n", displayAspectRatio.num, displayAspectRatio.den); } format->u.encoded_video.output.pixel_width_aspect = displayAspectRatio.num; format->u.encoded_video.output.pixel_height_aspect = displayAspectRatio.den; format->u.encoded_video.output.display.format = pixfmt_to_colorspace(codecContext->pix_fmt); format->u.encoded_video.output.display.line_width = codecContext->width; format->u.encoded_video.output.display.line_count = codecContext->height; TRACE(" width/height: %d/%d\n", codecContext->width, codecContext->height); format->u.encoded_video.output.display.bytes_per_row = 0; format->u.encoded_video.output.display.pixel_offset = 0; format->u.encoded_video.output.display.line_offset = 0; format->u.encoded_video.output.display.flags = 0; // TODO break; default: // This is an unknown format to us. break; } // Add the meta data, if any if (codecContext->extradata_size > 0) { format->SetMetaData(codecContext->extradata, codecContext->extradata_size); TRACE(" extradata: %p\n", format->MetaData()); } TRACE(" extradata_size: %d\n", codecContext->extradata_size); // TRACE(" intra_matrix: %p\n", codecContext->intra_matrix); // TRACE(" inter_matrix: %p\n", codecContext->inter_matrix); // TRACE(" get_buffer(): %p\n", codecContext->get_buffer); // TRACE(" release_buffer(): %p\n", codecContext->release_buffer); #ifdef TRACE_AVFORMAT_READER char formatString[512]; if (string_for_format(*format, formatString, sizeof(formatString))) TRACE(" format: %s\n", formatString); uint32 encoding = format->Encoding(); TRACE(" encoding '%.4s'\n", (char*)&encoding); #endif return B_OK; }
status_t auReader::Sniff(int32 *streamCount) { TRACE("auReader::Sniff\n"); fSource = dynamic_cast<BPositionIO *>(Reader::Source()); if (!fSource) { TRACE("auReader::Sniff: not a BPositionIO\n"); return B_ERROR; } int64 filesize = Source()->Seek(0, SEEK_END); if (filesize < sizeof(struct snd_header)) { TRACE("auReader::Sniff: File too small\n"); return B_ERROR; } struct snd_header header; if (sizeof(header) != Source()->ReadAt(0, &header, sizeof(header))) { TRACE("auReader::Sniff: header reading failed\n"); return B_ERROR; } if (UINT32(header.magic) != SND_MAGIC) { TRACE("auReader::Sniff: header not recognized\n"); return B_ERROR; } TRACE("auReader::Sniff: we found something that looks like:\n"); TRACE(" data_start %ld\n", UINT32(header.data_start)); TRACE(" data_size %ld\n", UINT32(header.data_size)); TRACE(" data_format %ld\n", UINT32(header.data_format)); TRACE(" sampling_rate %ld\n", UINT32(header.sampling_rate)); TRACE(" channel_count %ld\n", UINT32(header.channel_count)); fDataStart = UINT32(header.data_start); fDataSize = UINT32(header.data_size); fChannelCount = UINT32(header.channel_count); fFrameRate = UINT32(header.sampling_rate); fFormatCode = UINT32(header.data_format); if (fDataStart > filesize) { TRACE("auReader::Sniff: data start too large\n"); return B_ERROR; } if (fDataStart + fDataSize > filesize) fDataSize = filesize - fDataStart; if (fDataSize < 1) { TRACE("auReader::Sniff: data size too small\n"); return B_ERROR; } if (fChannelCount < 1) fChannelCount = 1; if (fFrameRate < 1) fFrameRate = 44100; switch (fFormatCode) { case SND_FORMAT_UNSPECIFIED: TRACE("SND_FORMAT_UNSPECIFIED\n"); break; case SND_FORMAT_MULAW_8: TRACE("SND_FORMAT_MULAW_8\n"); break; case SND_FORMAT_LINEAR_8: TRACE("SND_FORMAT_LINEAR_8\n"); break; case SND_FORMAT_LINEAR_16: TRACE("SND_FORMAT_LINEAR_16\n"); break; case SND_FORMAT_LINEAR_24: TRACE("SND_FORMAT_LINEAR_24\n"); break; case SND_FORMAT_LINEAR_32: TRACE("SND_FORMAT_LINEAR_32\n"); break; case SND_FORMAT_FLOAT: TRACE("SND_FORMAT_FLOAT\n"); break; case SND_FORMAT_DOUBLE: TRACE("SND_FORMAT_DOUBLE\n"); break; case SND_FORMAT_INDIRECT: TRACE("SND_FORMAT_INDIRECT\n"); break; case SND_FORMAT_NESTED: TRACE("SND_FORMAT_NESTED\n"); break; case SND_FORMAT_DSP_CORE: TRACE("SND_FORMAT_DSP_CORE\n"); break; case SND_FORMAT_DSP_DATA_8: TRACE("SND_FORMAT_DSP_DATA_8\n"); break; case SND_FORMAT_DSP_DATA_16: TRACE("SND_FORMAT_DSP_DATA_16\n"); break; case SND_FORMAT_DSP_DATA_24: TRACE("SND_FORMAT_DSP_DATA_24\n"); break; case SND_FORMAT_DSP_DATA_32: TRACE("SND_FORMAT_DSP_DATA_32\n"); break; case SND_FORMAT_DISPLAY: TRACE("SND_FORMAT_DISPLAY\n"); break; case SND_FORMAT_MULAW_SQUELCH: TRACE("SND_FORMAT_MULAW_SQUELCH\n"); break; case SND_FORMAT_EMPHASIZED: TRACE("SND_FORMAT_EMPHASIZED\n"); break; case SND_FORMAT_COMPRESSED: TRACE("SND_FORMAT_COMPRESSED\n"); break; case SND_FORMAT_COMPRESSED_EMPHASIZED: TRACE("SND_FORMAT_COMPRESSED_EMPHASIZED\n"); break; case SND_FORMAT_DSP_COMMANDS: TRACE("SND_FORMAT_DSP_COMMANDS\n"); break; case SND_FORMAT_DSP_COMMANDS_SAMPLES: TRACE("SND_FORMAT_DSP_COMMANDS_SAMPLES\n"); break; case SND_FORMAT_ADPCM_G721: TRACE("SND_FORMAT_ADPCM_G721\n"); break; case SND_FORMAT_ADPCM_G722: TRACE("SND_FORMAT_ADPCM_G722\n"); break; case SND_FORMAT_ADPCM_G723_3: TRACE("SND_FORMAT_ADPCM_G723_3\n"); break; case SND_FORMAT_ADPCM_G723_5: TRACE("SND_FORMAT_ADPCM_G723_5\n"); break; case SND_FORMAT_ALAW_8: TRACE("SND_FORMAT_ALAW_8\n"); break; } switch (fFormatCode) { case SND_FORMAT_MULAW_8: fBitsPerSample = 8; fRaw = false; break; case SND_FORMAT_LINEAR_8: fBitsPerSample = 8; fRaw = true; break; case SND_FORMAT_LINEAR_16: fBitsPerSample = 16; fRaw = true; break; case SND_FORMAT_LINEAR_24: fBitsPerSample = 24; fRaw = true; break; case SND_FORMAT_LINEAR_32: fBitsPerSample = 32; fRaw = true; break; case SND_FORMAT_FLOAT: fBitsPerSample = 32; fRaw = true; break; case SND_FORMAT_DOUBLE: fBitsPerSample = 64; fRaw = true; break; case SND_FORMAT_ADPCM_G721: fBitsPerSample = 4; fRaw = false; break; case SND_FORMAT_ADPCM_G722: fBitsPerSample = 8; fRaw = false; break; case SND_FORMAT_ADPCM_G723_3: fBitsPerSample = 3; fRaw = false; break; case SND_FORMAT_ADPCM_G723_5: fBitsPerSample = 5; fRaw = false; break; case SND_FORMAT_ALAW_8: fBitsPerSample = 8; fRaw = false; break; default: fBitsPerSample = 0; break; } if (fBitsPerSample == 0) { TRACE("auReader::Sniff: sample format not recognized\n"); return B_ERROR; } fFrameCount = (8 * fDataSize) / (fChannelCount * fBitsPerSample); fDuration = (1000000LL * fFrameCount) / fFrameRate; fBitsPerFrame = fChannelCount * fBitsPerSample; fBlockAlign = fBitsPerFrame; while (fBlockAlign % 8 && fBlockAlign < 1000) fBlockAlign += fBlockAlign; if (fBlockAlign % 8) { TRACE("auReader::Sniff: can't find block alignment, fChannelCount %d, fBitsPerSample %d\n", fChannelCount, fBitsPerSample); return B_ERROR; } fBlockAlign /= 8; fPosition = 0; fBufferSize = (BUFFER_SIZE / fBlockAlign) * fBlockAlign; fBuffer = malloc(fBufferSize); TRACE(" fDataStart %Ld\n", fDataStart); TRACE(" fDataSize %Ld\n", fDataSize); TRACE(" fFrameCount %Ld\n", fFrameCount); TRACE(" fDuration %Ld\n", fDuration); TRACE(" fChannelCount %d\n", fChannelCount); TRACE(" fFrameRate %ld\n", fFrameRate); TRACE(" fBitsPerSample %d\n", fBitsPerSample); TRACE(" fBlockAlign %d\n", fBlockAlign); TRACE(" fFormatCode %ld\n", fFormatCode); TRACE(" fRaw %d\n", fRaw); BMediaFormats formats; if (fRaw) { // a raw PCM format media_format_description description; description.family = B_BEOS_FORMAT_FAMILY; description.u.beos.format = B_BEOS_FORMAT_RAW_AUDIO; formats.GetFormatFor(description, &fFormat); fFormat.u.raw_audio.frame_rate = (fFrameRate == 8012) ? SND_RATE_8012 : fFrameRate; fFormat.u.raw_audio.channel_count = fChannelCount; switch (fFormatCode) { case SND_FORMAT_LINEAR_8: fFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_UCHAR; break; case SND_FORMAT_LINEAR_16: fFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_SHORT; break; case SND_FORMAT_LINEAR_24: fFormat.u.raw_audio.format = B_AUDIO_FORMAT_INT24; break; case SND_FORMAT_LINEAR_32: fFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_INT; break; case SND_FORMAT_FLOAT: fFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT; break; case SND_FORMAT_DOUBLE: fFormat.u.raw_audio.format = B_AUDIO_FORMAT_FLOAT64; break; default: TRACE("auReader::Sniff: unhandled raw format\n"); return B_ERROR; } fFormat.u.raw_audio.byte_order = B_MEDIA_BIG_ENDIAN; fFormat.u.raw_audio.buffer_size = fBufferSize; } else { // some encoded format media_format_description description; description.family = B_MISC_FORMAT_FAMILY; description.u.misc.file_format = 'au'; description.u.misc.codec = fFormatCode; formats.GetFormatFor(description, &fFormat); fFormat.u.encoded_audio.output.frame_rate = fFrameRate; fFormat.u.encoded_audio.output.channel_count = fChannelCount; } *streamCount = 1; return B_OK; }
status_t OggVorbisSeekable::GetStreamInfo(int64 *frameCount, bigtime_t *duration, media_format *format) { TRACE("OggVorbisSeekable::GetStreamInfo\n"); status_t result = B_OK; ogg_packet packet; // get header packet if (GetHeaderPackets().size() < 1) { result = GetPacket(&packet); if (result != B_OK) { return result; } SaveHeaderPacket(packet); } packet = GetHeaderPackets()[0]; if (!packet.b_o_s) { return B_ERROR; // first packet was not beginning of stream } // parse header packet // based on libvorbis/info.c vorbis_synthesis_headerin(...) oggpack_buffer opb; oggpack_readinit(&opb, packet.packet, packet.bytes); int packtype = oggpack_read(&opb, 8); if (packtype != 0x01) { return B_ERROR; // first packet was not an info packet } // discard vorbis string for (uint i = 0 ; i < sizeof("vorbis") - 1 ; i++) { oggpack_read(&opb, 8); } vorbis_info info; if (_vorbis_unpack_info(&info, &opb) != 0) { return B_ERROR; // couldn't unpack info } // get the format for the description media_format_description description = vorbis_description(); BMediaFormats formats; result = formats.InitCheck(); if (result == B_OK) { result = formats.GetFormatFor(description, format); } if (result != B_OK) { *format = vorbis_encoded_media_format(); // ignore error, allow user to use ReadChunk interface } // fill out format from header packet if (info.bitrate_nominal > 0) { format->u.encoded_audio.bit_rate = info.bitrate_nominal; } else if (info.bitrate_upper > 0) { format->u.encoded_audio.bit_rate = info.bitrate_upper; } else if (info.bitrate_lower > 0) { format->u.encoded_audio.bit_rate = info.bitrate_lower; } if (info.channels == 1) { format->u.encoded_audio.multi_info.channel_mask = B_CHANNEL_LEFT; } else { format->u.encoded_audio.multi_info.channel_mask = B_CHANNEL_LEFT | B_CHANNEL_RIGHT; } fFrameRate = format->u.encoded_audio.output.frame_rate = (float)info.rate; format->u.encoded_audio.output.channel_count = info.channels; format->u.encoded_audio.output.buffer_size = AudioBufferSize(&format->u.encoded_audio.output); // get comment packet if (GetHeaderPackets().size() < 2) { result = GetPacket(&packet); if (result != B_OK) { return result; } SaveHeaderPacket(packet); } // get codebook packet if (GetHeaderPackets().size() < 3) { result = GetPacket(&packet); if (result != B_OK) { return result; } SaveHeaderPacket(packet); } format->SetMetaData((void*)&GetHeaderPackets(),sizeof(GetHeaderPackets())); // TODO: count the frames in the first page.. somehow.. :-/ int64 frames = 0; ogg_page page; // read the first page result = ReadPage(&page); if (result != B_OK) { return result; } int64 fFirstGranulepos = ogg_page_granulepos(&page); TRACE("OggVorbisSeekable::GetStreamInfo: first granulepos: %lld\n", fFirstGranulepos); // read our last page off_t last = inherited::Seek(GetLastPagePosition(), SEEK_SET); if (last < 0) { return last; } result = ReadPage(&page); if (result != B_OK) { return result; } int64 last_granulepos = ogg_page_granulepos(&page); // seek back to the start int64 frame = 0; bigtime_t time = 0; result = Seek(B_MEDIA_SEEK_TO_TIME, &frame, &time); if (result != B_OK) { return result; } // compute frame count and duration from sample count frames = last_granulepos - fFirstGranulepos; *frameCount = frames; *duration = (1000000LL * frames) / (long long)fFrameRate; return B_OK; }