AudioInput *AudioMixer::createInputSerialized( QDataStream *stream, int before) { if(before < 0) { // Position is relative to the right before += m_inputs.count() + 1; } before = qBound(0, before, m_inputs.count()); AudioInput *input = new AudioInput(this, 0, m_profile->getAudioMode()); appLog(LOG_CAT) << "Unserializing audio input " << input->getIdString(false) << "..."; if(!input->unserialize(stream)) { // Failed to unserialize data appLog(LOG_CAT, Log::Warning) << "Failed to fully unserialize audio input data"; delete input; return NULL; } m_inputs.insert(before, input); appLog(LOG_CAT) << "Created audio input " << input->getIdString(); input->setInitialized(); calcMinInputDelay(); emit inputAdded(input, before); return input; }
void EngineMicrophone::onInputDisconnected(AudioInput input) { if (input.getType() != AudioPath::MICROPHONE || AudioInput::channelsNeededForType(input.getType()) != 1) { // This is an error! qWarning() << "EngineMicrophone connected to AudioInput for a non-Microphone type or a non-mono buffer!"; return; } m_sampleBuffer.clear(); m_pEnabled->set(0.0f); }
void EngineMicrophone::receiveBuffer(AudioInput input, const CSAMPLE* pBuffer, unsigned int nFrames) { if (!isTalkover()) { return; } if (input.getType() != AudioPath::MICROPHONE) { // This is an error! qWarning() << "EngineMicrophone receieved an AudioInput for a non-Microphone type!"; return; } const unsigned int iChannels = input.getChannelGroup().getChannelCount(); // Check that the number of mono frames doesn't exceed MAX_BUFFER_LEN/2 // because thats our conversion buffer size. if (nFrames > MAX_BUFFER_LEN / iChannels) { qWarning() << "Dropping microphone samples because the input buffer is too large."; nFrames = MAX_BUFFER_LEN / iChannels; } const CSAMPLE* pWriteBuffer = NULL; unsigned int samplesToWrite = 0; if (iChannels == 1) { // Do mono -> stereo conversion. for (unsigned int i = 0; i < nFrames; ++i) { m_pConversionBuffer[i*2 + 0] = pBuffer[i]; m_pConversionBuffer[i*2 + 1] = pBuffer[i]; } pWriteBuffer = m_pConversionBuffer; samplesToWrite = nFrames * 2; } else if (iChannels == 2) { // Already in stereo. Use pBuffer as-is. pWriteBuffer = pBuffer; samplesToWrite = nFrames * iChannels; } else { qWarning() << "EngineMicrophone got greater than stereo input. Not currently handled."; } if (pWriteBuffer != NULL) { // TODO(rryan) do we need to verify the input is the one we asked for? // Oh well. unsigned int samplesWritten = m_sampleBuffer.write(pWriteBuffer, samplesToWrite); if (samplesWritten < samplesToWrite) { // Buffer overflow. We aren't processing samples fast enough. This // shouldn't happen since the mic spits out samples just as fast as they // come in, right? qWarning() << "ERROR: Buffer overflow in EngineMicrophone. Dropping samples on the floor."; } } }
void AudioMixer::serialize(QDataStream *stream) const { // Write data version number *stream << (quint32)0; // Save our data *stream << (qint32)m_masterAttenuation; *stream << (quint32)m_inputs.count(); for(int i = 0; i < m_inputs.count(); i++) { AudioInput *input = m_inputs.at(i); input->serialize(stream); } }
void EngineDeck::onInputUnconfigured(AudioInput input) { if (input.getType() != AudioPath::VINYLCONTROL) { // This is an error! qDebug() << "WARNING: EngineDeck connected to AudioInput for a non-vinylcontrol type!"; return; } m_sampleBuffer = NULL; }
void EngineMicrophone::onInputUnconfigured(AudioInput input) { if (input.getType() != AudioPath::MICROPHONE) { // This is an error! qWarning() << "EngineMicrophone connected to AudioInput for a non-Microphone type!"; return; } m_sampleBuffer = NULL; m_pEnabled->set(0.0); }
void EngineAux::onInputUnconfigured(AudioInput input) { if (input.getType() != AudioPath::AUXILIARY) { // This is an error! qDebug() << "WARNING: EngineAux connected to AudioInput for a non-auxiliary type!"; return; } m_sampleBuffer = NULL; m_pEnabled->set(0.0); }
AudioInput *AudioMixer::createInput( quint64 sourceId, const QString &name, int before) { if(before < 0) { // Position is relative to the right before += m_inputs.count() + 1; } before = qBound(0, before, m_inputs.count()); AudioInput *input = new AudioInput(this, sourceId, m_profile->getAudioMode()); if(!name.isEmpty()) input->setName(name); m_inputs.insert(before, input); appLog(LOG_CAT) << "Created audio input " << input->getIdString(); input->setInitialized(); calcMinInputDelay(); emit inputAdded(input, before); return input; }
void EngineMicrophone::receiveBuffer(AudioInput input, const short* pBuffer, unsigned int nFrames) { if (input.getType() != AudioPath::MICROPHONE || AudioInput::channelsNeededForType(input.getType()) != 1) { // This is an error! qWarning() << "EngineMicrophone receieved an AudioInput for a non-Microphone type or a non-mono buffer!"; return; } // Use the conversion buffer to both convert from short and double into // stereo. // Check that the number of mono frames doesn't exceed MAX_BUFFER_LEN/2 // because thats our conversion buffer size. if (nFrames > MAX_BUFFER_LEN / 2) { qWarning() << "Dropping microphone samples because the input buffer is too large."; nFrames = MAX_BUFFER_LEN / 2; } // There isn't a suitable SampleUtil method that can do mono->stereo and // short->float in one pass. // SampleUtil::convert(m_pConversionBuffer, pBuffer, iNumSamples); for (unsigned int i = 0; i < nFrames; ++i) { m_pConversionBuffer[i*2 + 0] = pBuffer[i]; m_pConversionBuffer[i*2 + 1] = pBuffer[i]; } // m_pConversionBuffer is now stereo, so double the number of samples const unsigned int iNumSamples = nFrames * 2; // TODO(rryan) do we need to verify the input is the one we asked for? Oh well. unsigned int samplesWritten = m_sampleBuffer.write(m_pConversionBuffer, iNumSamples); if (samplesWritten < iNumSamples) { // Buffer overflow. We aren't processing samples fast enough. This // shouldn't happen since the mic spits out samples just as fast as they // come in, right? qWarning() << "ERROR: Buffer overflow in EngineMicrophone. Dropping samples on the floor."; } }
void setup() { // start AudioInputphone mic.startCapturing(); // setup plane and sphere plane.init( 100000 ); plane.setPosition( width/2, height/2 + 500, 0 ); sphere.init( 100 ); sphere.setPosition( width/2, height/2, 0 ); // setup light ambientLight( 20, 20, 20 ); light.init( 0, 0, 0, width/2, height/2, 200 ); }
void draw() { // get the mic level and map it to the range 0..255 to use as color // the sensitivity variable allows you to adjust how sensitive it is (depends on the audio card input level) float soundLevel = mic.getCurrentLevel(); float sensitivity = 250; soundLevel = constrain( soundLevel * sensitivity, 0, 1 ); println( "Sound Level: %f", soundLevel ); float soundLevelRamapped = map( soundLevel, 0, 1, 0, 255 ); // calculate mic level average (to smooth out results) average.addValue( soundLevelRamapped ); float soundLevelAv = average.getValue(); // change color of the light println( "Sound Level AV: %f", soundLevelAv ); light.setDiffuseColor( soundLevelAv, soundLevelAv, soundLevelAv ); }
void EngineMaster::onInputDisconnected(AudioInput input) { switch (input.getType()) { case AudioInput::MICROPHONE: m_pNumMicsConfigured->set(m_pNumMicsConfigured->get() - 1); break; case AudioInput::AUXILIARY: // We don't track enabled auxiliary inputs. break; case AudioInput::VINYLCONTROL: // We don't track enabled vinyl control inputs. break; case AudioInput::RECORD_BROADCAST: m_bExternalRecordBroadcastInputConnected = false; break; default: break; } }
/** * Defined for QHash, so AudioInput can be used as a QHash key. */ unsigned int qHash(const AudioInput &input) { return input.getHash(); }
void UndoList::clearDelete() { if (!empty()) { for (iUndo iu = begin(); iu != end(); ++iu) { Undo& u = *iu; for (riUndoOp i = u.rbegin(); i != u.rend(); ++i) { switch (i->type) { case UndoOp::DeleteTrack: if (i->oTrack) { delete i->oTrack; iUndo iu2 = iu; ++iu2; for (; iu2 != end(); ++iu2) { Undo& u2 = *iu2; for (riUndoOp i2 = u2.rbegin(); i2 != u2.rend(); ++i2) { if (i2->type == UndoOp::DeleteTrack) { if (i2->oTrack == i->oTrack) i2->oTrack = 0; } } } } break; case UndoOp::ModifyTrack: if (i->oTrack) { // Prevent delete i->oTrack from crashing. switch (i->oTrack->type()) { case Track::AUDIO_OUTPUT: { AudioOutput* ao = (AudioOutput*) i->oTrack; for (int ch = 0; ch < ao->channels(); ++ch) ao->setJackPort(ch, 0); } break; case Track::AUDIO_INPUT: { AudioInput* ai = (AudioInput*) i->oTrack; for (int ch = 0; ch < ai->channels(); ++ch) ai->setJackPort(ch, 0); } break; default: break; } if (!i->oTrack->isMidiTrack()) ((AudioTrack*) i->oTrack)->clearEfxList(); delete i->oTrack; iUndo iu2 = iu; ++iu2; for (; iu2 != end(); ++iu2) { Undo& u2 = *iu2; for (riUndoOp i2 = u2.rbegin(); i2 != u2.rend(); ++i2) { if (i2->type == UndoOp::ModifyTrack) { if (i2->oTrack == i->oTrack) i2->oTrack = 0; } } } } break; //case UndoOp::DeletePart: //delete i->oPart; // break; //case UndoOp::DeleteTempo: // break; //case UndoOp::DeleteSig: // break; case UndoOp::ModifyMarker: if (i->copyMarker) delete i->copyMarker; default: break; } } u.clear(); } } clear(); }
void Song::doRedo2() { Undo& u = redoList->back(); for (iUndoOp i = u.begin(); i != u.end(); ++i) { switch (i->type) { case UndoOp::AddTrack: insertTrack2(i->oTrack, i->trackno); // Added by T356. chainTrackParts(i->oTrack, true); updateFlags |= SC_TRACK_INSERTED; break; case UndoOp::DeleteTrack: removeTrack2(i->oTrack); updateFlags |= SC_TRACK_REMOVED; break; case UndoOp::ModifyTrack: { // Unchain the track parts, but don't touch the ref counts. unchainTrackParts(i->nTrack, false); //Track* track = i->nTrack->clone(); Track* track = i->nTrack->clone(false); *(i->nTrack) = *(i->oTrack); // Prevent delete i->oTrack from crashing. switch (i->oTrack->type()) { case Track::AUDIO_OUTPUT: { AudioOutput* ao = (AudioOutput*) i->oTrack; for (int ch = 0; ch < ao->channels(); ++ch) ao->setJackPort(ch, 0); } break; case Track::AUDIO_INPUT: { AudioInput* ai = (AudioInput*) i->oTrack; for (int ch = 0; ch < ai->channels(); ++ch) ai->setJackPort(ch, 0); } break; default: break; } if (!i->oTrack->isMidiTrack()) ((AudioTrack*) i->oTrack)->clearEfxList(); delete i->oTrack; i->oTrack = track; // Chain the track parts, but don't touch the ref counts. chainTrackParts(i->nTrack, false); // Connect and register ports. switch (i->nTrack->type()) { case Track::AUDIO_OUTPUT: { AudioOutput* ao = (AudioOutput*) i->nTrack; ao->setName(ao->name()); } break; case Track::AUDIO_INPUT: { AudioInput* ai = (AudioInput*) i->nTrack; ai->setName(ai->name()); } break; default: break; } // Update solo states, since the user may have changed soloing on other tracks. updateSoloStates(); updateFlags |= SC_TRACK_MODIFIED; } break; /* // Prevent delete i->oTrack from crashing. switch(i->oTrack->type()) { case Track::AUDIO_OUTPUT: { AudioOutput* ao = (AudioOutput*)i->oTrack; for(int ch = 0; ch < ao->channels(); ++ch) ao->setJackPort(ch, 0); } break; case Track::AUDIO_INPUT: { AudioInput* ai = (AudioInput*)i->oTrack; for(int ch = 0; ch < ai->channels(); ++ch) ai->setJackPort(ch, 0); } break; default: break; } if(!i->oTrack->isMidiTrack()) ((AudioTrack*)i->oTrack)->clearEfxList(); //delete i->oTrack; //i->oTrack = track; // Remove the track. removeTrack2 takes care of unchaining the old track. removeTrack2(i->oTrack); // Connect and register ports. switch(i->nTrack->type()) { case Track::AUDIO_OUTPUT: { AudioOutput* ao = (AudioOutput*)i->nTrack; ao->setName(ao->name()); } break; case Track::AUDIO_INPUT: { AudioInput* ai = (AudioInput*)i->nTrack; ai->setName(ai->name()); } break; default: break; } // Insert the new track. insertTrack2(i->nTrack, i->trackno); // Chain the new track parts. (removeTrack2, above, takes care of unchaining the old track). chainTrackParts(i->nTrack, true); // Update solo states, since the user may have changed soloing on other tracks. updateSoloStates(); updateFlags |= SC_TRACK_MODIFIED; } break; */ case UndoOp::SwapTrack: { Track* track = _tracks[i->a]; _tracks[i->a] = _tracks[i->b]; _tracks[i->b] = track; updateFlags |= SC_TRACK_MODIFIED; } break; case UndoOp::AddPart: addPart(i->oPart); updateFlags |= SC_PART_INSERTED; i->oPart->events()->incARef(1); //i->oPart->chainClone(); chainClone(i->oPart); break; case UndoOp::DeletePart: removePart(i->oPart); updateFlags |= SC_PART_REMOVED; i->oPart->events()->incARef(-1); //i->oPart->unchainClone(); unchainClone(i->oPart); break; case UndoOp::ModifyPart: if (i->doCtrls) removePortCtrlEvents(i->nPart, i->doClones); changePart(i->nPart, i->oPart); i->oPart->events()->incARef(1); i->nPart->events()->incARef(-1); //i->nPart->replaceClone(i->oPart); replaceClone(i->nPart, i->oPart); if (i->doCtrls) addPortCtrlEvents(i->oPart, i->doClones); updateFlags |= SC_PART_MODIFIED; break; case UndoOp::AddEvent: addEvent(i->nEvent, i->part); if (i->doCtrls) addPortCtrlEvents(i->nEvent, i->part, i->doClones); updateFlags |= SC_EVENT_INSERTED; break; case UndoOp::DeleteEvent: if (i->doCtrls) removePortCtrlEvents(i->nEvent, i->part, i->doClones); deleteEvent(i->nEvent, i->part); updateFlags |= SC_EVENT_REMOVED; break; case UndoOp::ModifyEvent: if (i->doCtrls) removePortCtrlEvents(i->nEvent, i->part, i->doClones); changeEvent(i->nEvent, i->oEvent, i->part); if (i->doCtrls) addPortCtrlEvents(i->oEvent, i->part, i->doClones); updateFlags |= SC_EVENT_MODIFIED; break; case UndoOp::AddTempo: //printf("doRedo2: UndoOp::AddTempo. adding tempo at: %d with tempo=%d\n", i->a, i->b); tempomap.addTempo(i->a, i->b); updateFlags |= SC_TEMPO; break; case UndoOp::DeleteTempo: //printf("doRedo2: UndoOp::DeleteTempo. deleting tempo at: %d with tempo=%d\n", i->a, i->b); tempomap.delTempo(i->a); updateFlags |= SC_TEMPO; break; case UndoOp::AddSig: ///sigmap.add(i->a, i->b, i->c); AL::sigmap.add(i->a, AL::TimeSignature(i->b, i->c)); updateFlags |= SC_SIG; break; case UndoOp::DeleteSig: ///sigmap.del(i->a); AL::sigmap.del(i->a); updateFlags |= SC_SIG; break; case UndoOp::ModifyClip: case UndoOp::ModifyMarker: break; } } }