/*! Returns the number of bytes required to represent one frame (a sample in each channel) in this format. Returns 0 if this format is invalid. */ int QAudioFormat::bytesPerFrame() const { if (!isValid()) return 0; return (sampleSize() * channelCount()) / 8; }
void GameObjectAudioSourceComponentWidget::on_m_AudioSourceTree_itemSelectionChanged() { if (!m_IsReady) { return; } AudioSourceGOC* audioSourceGOC = GetAudioSourceGOC(); if (audioSourceGOC == nullptr) { return; } m_GameObjectAudioSourceComponentWidgetQtUI.m_NameAudioSourceTextBox->setText(QString::fromStdWString(audioSourceGOC->GetName())); m_GameObjectAudioSourceComponentWidgetQtUI.m_AudioAssetTextBox->setText(QString::fromStdWString(audioSourceGOC->GetAudioName())); AudioPlayer* player = audioSourceGOC->GetAudioPlayer(); m_GameObjectAudioSourceComponentWidgetQtUI.m_VolumeSB->setValue((double)player->GetVolume()); m_GameObjectAudioSourceComponentWidgetQtUI.m_MinimumDistanceSB->setValue((double)player->GetMinDistance()); m_GameObjectAudioSourceComponentWidgetQtUI.m_PitchSB->setValue((double)player->GetPitch()); m_GameObjectAudioSourceComponentWidgetQtUI.m_AttenuationSB->setValue((double)player->GetAttenuation()); m_GameObjectAudioSourceComponentWidgetQtUI.m_Is3DCheckBox->setChecked(player->Is3D()); m_GameObjectAudioSourceComponentWidgetQtUI.m_LoopCheckBox->setChecked(player->IsLooped()); QString channelCount(std::to_string(player->GetChannelCount()).c_str()); m_GameObjectAudioSourceComponentWidgetQtUI.m_ChannelCountLabelText->setText(channelCount); QString sampleRate(std::to_string(player->GetSampleRate()).c_str()); m_GameObjectAudioSourceComponentWidgetQtUI.m_SampleRateLabelText->setText(sampleRate); QString duration(std::to_string(player->GetDuration()).c_str()); m_GameObjectAudioSourceComponentWidgetQtUI.m_DurationLabelText->setText(duration); }
static bool parseHeader(QTextStream &textStream, unsigned int &channels) { QRegExp header("^DynamicAudioNormalizer Logfile v(\\d).(\\d\\d)-(\\d)$"); QRegExp channelCount("^CHANNEL_COUNT:(\\d+)$"); QString headerLine = textStream.readLine(); if(header.indexIn(headerLine) < 0) { return false; } if(header.cap(1).toUInt() != 2) { return false; } QString channelLine = textStream.readLine(); if(channelCount.indexIn(channelLine) < 0) { return false; } bool ok = false; channels = channelCount.cap(1).toUInt(&ok); return ok && (channels > 0); }
void DefaultAudioDestinationHandler::createDestination() { float hardwareSampleRate = AudioDestination::hardwareSampleRate(); VLOG(1) << ">>>> hardwareSampleRate = " << hardwareSampleRate; m_destination = AudioDestination::create( *this, m_inputDeviceId, m_numberOfInputChannels, channelCount(), hardwareSampleRate, context()->getSecurityOrigin()); }
int AudioTrack::frameSize() const { if (audio_is_linear_pcm(mFormat)) { return channelCount()*audio_bytes_per_sample(mFormat); } else { return sizeof(uint8_t); } }
int AudioTrack::frameSize() const { if (AudioSystem::isLinearPCM(mFormat)) { return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t)); } else { return sizeof(uint8_t); } }
size_t AudioRecord::frameSize() const { if (inputSource() == AUDIO_SOURCE_VOICE_COMMUNICATION) { if (audio_is_linear_pcm(mFormat)) { return channelCount()*audio_bytes_per_sample(mFormat); } else { return channelCount()*sizeof(int16_t); } } else { if (format() ==AUDIO_FORMAT_AMR_NB) { return channelCount() * AMR_FRAMESIZE; // Full rate framesize } else if (format() == AUDIO_FORMAT_EVRC) { return channelCount() * EVRC_FRAMESIZE; // Full rate framesize } else if (format() == AUDIO_FORMAT_QCELP) { return channelCount() * QCELP_FRAMESIZE; // Full rate framesize } else if (format() == AUDIO_FORMAT_AAC) { // Not actual framsize but for variable frame rate AAC encoding, // buffer size is treated as a frame size return AAC_FRAMESIZE; } else if(format() == AUDIO_FORMAT_AMR_WB) { return channelCount() * AMR_WB_FRAMESIZE; } if (audio_is_linear_pcm(mFormat)) { return channelCount()*audio_bytes_per_sample(mFormat); } else { return sizeof(uint8_t); } } }
status_t A2dpAudioInterface::A2dpAudioStreamOut::set( int format, int channels, uint32_t rate) { LOGD("A2dpAudioStreamOut::set %d, %d, %d\n", format, channels, rate); // fix up defaults if (format == 0) format = AudioSystem::PCM_16_BIT; if (channels == 0) channels = channelCount(); if (rate == 0) rate = sampleRate(); // check values if ((format != AudioSystem::PCM_16_BIT) || (channels != channelCount()) || (rate != sampleRate())) return BAD_VALUE; return NO_ERROR; }
status_t AudioStreamOutGeneric::set( AudioHardwareGeneric *hw, int fd, int format, int channels, uint32_t rate) { // fix up defaults if (format == 0) format = AudioSystem::PCM_16_BIT; if (channels == 0) channels = channelCount(); if (rate == 0) rate = sampleRate(); // check values if ((format != AudioSystem::PCM_16_BIT) || (channels != channelCount()) || (rate != sampleRate())) return BAD_VALUE; mAudioHardware = hw; mFd = fd; return NO_ERROR; }
void SoundSourceFLAC::flacMetadata(const FLAC__StreamMetadata* metadata) { // https://xiph.org/flac/api/group__flac__stream__decoder.html#ga43e2329c15731c002ac4182a47990f85 // "...one STREAMINFO block, followed by zero or more other metadata blocks." // "...by default the decoder only calls the metadata callback for the STREAMINFO block..." // "...always before the first audio frame (i.e. write callback)." switch (metadata->type) { case FLAC__METADATA_TYPE_STREAMINFO: { setChannelCount(metadata->data.stream_info.channels); setSampleRate(metadata->data.stream_info.sample_rate); initFrameIndexRangeOnce( IndexRange::forward( 0, metadata->data.stream_info.total_samples)); const unsigned bitsPerSample = metadata->data.stream_info.bits_per_sample; DEBUG_ASSERT(kBitsPerSampleDefault != bitsPerSample); if (kBitsPerSampleDefault == m_bitsPerSample) { // not set before if ((bitsPerSample >= 4) && (bitsPerSample <= 32)) { m_bitsPerSample = bitsPerSample; } else { kLogger.warning() << "Invalid bits per sample:" << bitsPerSample; } } else { // already set before -> check for consistency if (bitsPerSample != m_bitsPerSample) { kLogger.warning() << "Unexpected bits per sample:" << bitsPerSample << " <> " << m_bitsPerSample; } } m_maxBlocksize = metadata->data.stream_info.max_blocksize; if (0 >= m_maxBlocksize) { kLogger.warning() << "Invalid max. blocksize" << m_maxBlocksize; } const SINT sampleBufferCapacity = m_maxBlocksize * channelCount(); if (m_sampleBuffer.capacity() < sampleBufferCapacity) { m_sampleBuffer.adjustCapacity(sampleBufferCapacity); } break; } default: // Ignore all other metadata types break; } }
TranscodeProcess *DlnaYouTubeVideo::getTranscodeProcess() { FfmpegTranscoding* transcodeProcess = new FfmpegTranscoding(log()); transcodeProcess->setUrl(m_streamUrl); transcodeProcess->setLengthInSeconds(getLengthInSeconds()); transcodeProcess->setFormat(transcodeFormat); transcodeProcess->setBitrate(bitrate()); transcodeProcess->setAudioLanguages(audioLanguages()); transcodeProcess->setSubtitleLanguages(subtitleLanguages()); transcodeProcess->setFrameRate(framerate()); transcodeProcess->setAudioChannelCount(channelCount()); transcodeProcess->setAudioSampleRate(samplerate()); return transcodeProcess; }
void AudioResampleImpl::splitAudioData(AudioData & data, boost::scoped_array<char*> & split) const { auto dataFormat = data.format(); if (dataFormat.isPlanar()) { /// Для планарного формата необходимо представить данные из result const int numChannels = dataFormat.channelCount(); split.reset(new char*[numChannels]); split_ref(data.begin(), data.end(), data.numBytes() / numChannels, split.get()); } else { /// Interleaved данные помещаются в один массив split.reset(new char*[1]); split[0] = data.data(); } }
QString LabU16ColorSpace::normalisedChannelValueText(const quint8 *pixel, quint32 channelIndex) const { const KoLabU16Traits::channels_type *pix = reinterpret_cast<const KoLabU16Traits::channels_type *>(pixel); Q_ASSERT(channelIndex < channelCount()); // These convert from lcms encoded format to standard ranges. switch (channelIndex) { case 0: return QString().setNum(100.0 * static_cast<float>(pix[0]) / MAX_CHANNEL_L); case 1: return QString().setNum(100.0 * ((static_cast<float>(pix[1]) - CHANNEL_AB_ZERO_OFFSET) / MAX_CHANNEL_AB)); case 2: return QString().setNum(100.0 * ((static_cast<float>(pix[2]) - CHANNEL_AB_ZERO_OFFSET) / MAX_CHANNEL_AB)); case 3: return QString().setNum(100.0 * static_cast<float>(pix[3]) / UINT16_MAX); default: return QString("Error"); } }
// record functions status_t AudioStreamInGeneric::set( AudioHardwareGeneric *hw, int fd, int format, int channels, uint32_t rate) { // FIXME: remove logging LOGD("AudioStreamInGeneric::set(%p, %d, %d, %d, %u)", hw, fd, format, channels, rate); // check values if ((format != AudioSystem::PCM_16_BIT) || (channels != channelCount()) || (rate != sampleRate())) { LOGE("Error opening input channel"); return BAD_VALUE; } mAudioHardware = hw; mFd = fd; return NO_ERROR; }
status_t AudioStreamInGeneric::dump(int fd, const Vector<String16>& args) { const size_t SIZE = 256; char buffer[SIZE]; String8 result; snprintf(buffer, SIZE, "AudioStreamInGeneric::dump\n"); result.append(buffer); snprintf(buffer, SIZE, "\tsample rate: %d\n", sampleRate()); result.append(buffer); snprintf(buffer, SIZE, "\tbuffer size: %d\n", bufferSize()); result.append(buffer); snprintf(buffer, SIZE, "\tchannel count: %d\n", channelCount()); result.append(buffer); snprintf(buffer, SIZE, "\tformat: %d\n", format()); result.append(buffer); snprintf(buffer, SIZE, "\tmAudioHardware: %p\n", mAudioHardware); result.append(buffer); snprintf(buffer, SIZE, "\tmFd: %d\n", mFd); result.append(buffer); ::write(fd, result.string(), result.size()); return NO_ERROR; }
/* * Returns XML (DIDL) representation of the DLNA node. It gives a * complete representation of the item, with as many tags as available. * * Reference: http://www.upnp.org/specs/av/UPnP-av-ContentDirectory-v1-Service.pdf */ QDomElement DlnaVideoItem::getXmlContentDirectory(QDomDocument *xml, QStringList properties) const { if (!xml) return QDomElement(); QDomElement xml_obj = xml->createElement("item"); updateXmlContentDirectory(xml, &xml_obj, properties); // properties optional of videoItem if (properties.contains("*") or properties.contains("upnp:genre")) { QDomElement upnpGenre = xml->createElement("upnp:genre"); upnpGenre.appendChild(xml->createTextNode(metaDataGenre())); xml_obj.appendChild(upnpGenre); } if (properties.contains("*") or properties.contains("upnp:longDescription")) { } if (properties.contains("*") or properties.contains("upnp:producer")) { } if (properties.contains("*") or properties.contains("upnp:rating")) { } if (properties.contains("*") or properties.contains("upnp:actor")) { } if (properties.contains("*") or properties.contains("upnp:director")) { } if (properties.contains("*") or properties.contains("dc:description")) { } if (properties.contains("*") or properties.contains("dc:publisher")) { } if (properties.contains("*") or properties.contains("dc:language")) { } if (properties.contains("*") or properties.contains("dc:relation")) { } // add <res> element QTime duration(0, 0, 0); QDomElement res = xml->createElement("res"); res.setAttribute("xmlns:dlna", "urn:schemas-dlna-org:metadata-1-0/"); // mandatory properties: protocolInfo res.setAttribute("protocolInfo", getProtocolInfo()); // optional properties if ((properties.contains("*") or properties.contains("res@bitrate")) and bitrate() != -1) { // bitrate in bytes/sec res.setAttribute("bitrate", QString("%1").arg(qRound(double(bitrate())/8.0))); } if (properties.contains("*") or properties.contains("res@resolution")) { res.setAttribute("resolution", resolution()); } if (properties.contains("*") or properties.contains("res@duration")) { res.setAttribute("duration", QString("%1").arg(duration.addSecs(getLengthInSeconds()).toString("hh:mm:ss"))); } if (properties.contains("*") or properties.contains("res@sampleFrequency")) { res.setAttribute("sampleFrequency", QString("%1").arg(samplerate())); } if (properties.contains("*") or properties.contains("res@nrAudioChannels")) { res.setAttribute("nrAudioChannels", QString("%1").arg(channelCount())); } if ((properties.contains("*") or properties.contains("res@size")) and size() != -1) { // size in bytes res.setAttribute("size", QString("%1").arg(size())); } res.appendChild(xml->createTextNode(QString("http://%2:%3/get/%1/%4").arg(getResourceId()).arg(host).arg(port).arg(getName().toUtf8().toPercentEncoding().constData()))); xml_obj.appendChild(res); return xml_obj; }
void DefaultAudioDestinationNode::createDestination() { float hardwareSampleRate = AudioDestination::hardwareSampleRate(); LOG("Hardware Samplerate: %f", hardwareSampleRate); m_destination = std::unique_ptr<AudioDestination>(AudioDestination::MakePlatformAudioDestination(*this, channelCount(), hardwareSampleRate)); }
ReadableSampleFrames SoundSourceOggVorbis::readSampleFramesClamped( WritableSampleFrames writableSampleFrames) { const SINT firstFrameIndex = writableSampleFrames.frameIndexRange().start(); if (m_curFrameIndex != firstFrameIndex) { const int seekResult = ov_pcm_seek(&m_vf, firstFrameIndex); if (seekResult == 0) { m_curFrameIndex = firstFrameIndex; } else { kLogger.warning() << "Failed to seek file:" << seekResult; const ogg_int64_t pcmOffset = ov_pcm_tell(&m_vf); if (0 <= pcmOffset) { m_curFrameIndex = pcmOffset; } else { // Reset to EOF m_curFrameIndex = frameIndexMax(); } // Abort return ReadableSampleFrames( IndexRange::between( m_curFrameIndex, m_curFrameIndex)); } } DEBUG_ASSERT(m_curFrameIndex == firstFrameIndex); const SINT numberOfFramesTotal = writableSampleFrames.frameLength(); CSAMPLE* pSampleBuffer = writableSampleFrames.writableData(); SINT numberOfFramesRemaining = numberOfFramesTotal; while (0 < numberOfFramesRemaining) { float** pcmChannels; int currentSection; // Use 'long' here, because ov_read_float() returns this type. // This is an exception from the rule not to any types with // differing sizes on different platforms. // https://bugs.launchpad.net/mixxx/+bug/1094143 const long readResult = ov_read_float(&m_vf, &pcmChannels, numberOfFramesRemaining, ¤tSection); if (0 < readResult) { m_curFrameIndex += readResult; if (pSampleBuffer) { switch (channelCount()) { case 1: for (long i = 0; i < readResult; ++i) { *pSampleBuffer++ = pcmChannels[0][i]; } break; case 2: for (long i = 0; i < readResult; ++i) { *pSampleBuffer++ = pcmChannels[0][i]; *pSampleBuffer++ = pcmChannels[1][i]; } break; default: for (long i = 0; i < readResult; ++i) { for (SINT j = 0; j < channelCount(); ++j) { *pSampleBuffer++ = pcmChannels[j][i]; } } } } numberOfFramesRemaining -= readResult; } else { kLogger.warning() << "Failed to read from file:" << readResult; break; // abort } } DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex)); DEBUG_ASSERT(numberOfFramesTotal >= numberOfFramesRemaining); const SINT numberOfFrames = numberOfFramesTotal - numberOfFramesRemaining; return ReadableSampleFrames( IndexRange::forward(firstFrameIndex, numberOfFrames), SampleBuffer::ReadableSlice( writableSampleFrames.writableData(), std::min(writableSampleFrames.writableLength(), frames2samples(numberOfFrames)))); }
MsgPacket* ChannelController::processGetChannels(MsgPacket* request) { ChannelCache& channelCache = ChannelCache::instance(); isyslog("Fetching channels ..."); int type = request->get_U32(); isyslog("Type: %s", type == 0 ? "MPEG2/H.264 channels" : type == 1 ? "radio channels" : type == 2 ? "H.264 channels" : "UNDEFINED" ); // // PROTOCOL 7 - CLIENT MUST SEND THIS // const char* language = request->get_String(); // do we want fta channels ? m_wantFta = request->get_U32(); isyslog("Free To Air channels: %s", m_wantFta ? "Yes" : "No"); // display only channels with native language audio ? m_filterLanguage = request->get_U32(); isyslog("Only native language: %s", m_filterLanguage ? "Yes" : "No"); // read caids m_caids.clear(); uint32_t count = request->get_U32(); isyslog("Enabled CaIDs: "); // sanity check (maximum of 20 caids) if(count < 20) { for(uint32_t i = 0; i < count; i++) { int caid = request->get_U32(); m_caids.push_back(caid); isyslog("%04X", caid); } } m_languageIndex = I18nLanguageIndex(language); m_channelCount = channelCount(); MsgPacket* response = createResponse(request); std::string groupName; LOCK_CHANNELS_READ; for(const cChannel* channel = Channels->First(); channel; channel = Channels->Next(channel)) { if(channel->GroupSep()) { groupName = m_toUtf8.convert(channel->Name()); continue; } // skip disabled channels if filtering is enabled if(!channelCache.isEnabled(channel)) { continue; } if(!isChannelWanted(channel, type)) { continue; } addChannelToPacket(channel, response, groupName.c_str()); } isyslog("client got %i channels", m_channelCount); return response; }
FLAC__StreamDecoderWriteStatus SoundSourceFLAC::flacWrite( const FLAC__Frame* frame, const FLAC__int32* const buffer[]) { const SINT numChannels = frame->header.channels; if (channelCount() > numChannels) { kLogger.warning() << "Corrupt or unsupported FLAC file:" << "Invalid number of channels in FLAC frame header" << frame->header.channels << "<>" << channelCount(); return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT; } if (sampleRate() != SINT(frame->header.sample_rate)) { kLogger.warning() << "Corrupt or unsupported FLAC file:" << "Invalid sample rate in FLAC frame header" << frame->header.sample_rate << "<>" << sampleRate(); return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT; } const SINT numReadableFrames = frame->header.blocksize; if (numReadableFrames > m_maxBlocksize) { kLogger.warning() << "Corrupt or unsupported FLAC file:" << "Block size in FLAC frame header exceeds the maximum block size" << frame->header.blocksize << ">" << m_maxBlocksize; return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT; } // According to the API docs the decoder will always report the current // position in "FLAC samples" (= "Mixxx frames") for convenience DEBUG_ASSERT(frame->header.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER); m_curFrameIndex = frame->header.number.sample_number; // Decode buffer should be empty before decoding the next frame DEBUG_ASSERT(m_sampleBuffer.empty()); const SampleBuffer::WritableSlice writableSlice( m_sampleBuffer.growForWriting(frames2samples(numReadableFrames))); const SINT numWritableFrames = samples2frames(writableSlice.length()); DEBUG_ASSERT(numWritableFrames <= numReadableFrames); if (numWritableFrames < numReadableFrames) { kLogger.warning() << "Sample buffer has not enough free space for all decoded FLAC samples:" << numWritableFrames << "<" << numReadableFrames; } CSAMPLE* pSampleBuffer = writableSlice.data(); DEBUG_ASSERT(channelCount() <= numChannels); switch (channelCount()) { case 1: { // optimized code for 1 channel (mono) for (SINT i = 0; i < numWritableFrames; ++i) { *pSampleBuffer++ = convertDecodedSample(buffer[0][i], m_bitsPerSample); } break; } case 2: { // optimized code for 2 channels (stereo) for (SINT i = 0; i < numWritableFrames; ++i) { *pSampleBuffer++ = convertDecodedSample(buffer[0][i], m_bitsPerSample); *pSampleBuffer++ = convertDecodedSample(buffer[1][i], m_bitsPerSample); } break; } default: { // generic code for multiple channels for (SINT i = 0; i < numWritableFrames; ++i) { for (SINT j = 0; j < channelCount(); ++j) { *pSampleBuffer++ = convertDecodedSample(buffer[j][i], m_bitsPerSample); } } } } return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE; }
/*! @return The size, in bytes, of a pixel in this pixel format. */ size_t size() const { return channelSize() * channelCount(); }
int AudioRecord::frameSize() const { return channelCount()*((format() == AudioSystem::PCM_8_BIT) ? sizeof(uint8_t) : sizeof(int16_t)); }