Exemple #1
0
bool Sampler::__render_note_resample(
	Sample *pSample,
	Note *pNote,
	SelectedLayerInfo *pSelectedLayerInfo,
	InstrumentComponent *pCompo,
	DrumkitComponent *pDrumCompo,
	int nBufferSize,
	int nInitialSilence,
	float cost_L,
	float cost_R,
	float cost_track_L,
	float cost_track_R,
	float fLayerPitch,
	Song* pSong
)
{
	AudioOutput* pAudioOutput = Hydrogen::get_instance()->getAudioOutput();

	int nNoteLength = -1;
	if ( pNote->get_length() != -1 ) {
		nNoteLength = ( int )( pNote->get_length() * pAudioOutput->m_transport.m_nTickSize );
	}
	float fNotePitch = pNote->get_total_pitch() + fLayerPitch;

	float fStep = pow( 1.0594630943593, ( double )fNotePitch );
//	_ERRORLOG( QString("pitch: %1, step: %2" ).arg(fNotePitch).arg( fStep) );
	fStep *= ( float )pSample->get_sample_rate() / pAudioOutput->getSampleRate(); // Adjust for audio driver sample rate

	// verifico il numero di frame disponibili ancora da eseguire
	int nAvail_bytes = ( int )( ( float )( pSample->get_frames() - pSelectedLayerInfo->SamplePosition ) / fStep );


	bool retValue = true; // the note is ended
	if ( nAvail_bytes > nBufferSize - nInitialSilence ) {	// il sample e' piu' grande del buffersize
		// imposto il numero dei bytes disponibili uguale al buffersize
		nAvail_bytes = nBufferSize - nInitialSilence;
		retValue = false; // the note is not ended yet
	}

	//	ADSR *pADSR = pNote->m_pADSR;

	int nInitialBufferPos = nInitialSilence;
	//float fInitialSamplePos = pNote->get_sample_position( pCompo->get_drumkit_componentID() );
	double fSamplePos = pSelectedLayerInfo->SamplePosition;
	int nTimes = nInitialBufferPos + nAvail_bytes;

	float *pSample_data_L = pSample->get_data_l();
	float *pSample_data_R = pSample->get_data_r();

	float fInstrPeak_L = pNote->get_instrument()->get_peak_l(); // this value will be reset to 0 by the mixer..
	float fInstrPeak_R = pNote->get_instrument()->get_peak_r(); // this value will be reset to 0 by the mixer..

	float fADSRValue = 1.0;
	float fVal_L;
	float fVal_R;
	int nSampleFrames = pSample->get_frames();


#ifdef H2CORE_HAVE_JACK
	JackAudioDriver* pJackAudioDriver = 0;
	float *		pTrackOutL = 0;
	float *		pTrackOutR = 0;

	if( pAudioOutput->has_track_outs()
	&& (pJackAudioDriver = dynamic_cast<JackAudioDriver*>(pAudioOutput)) ) {
				pTrackOutL = pJackAudioDriver->getTrackOut_L( pNote->get_instrument(), pCompo );
				pTrackOutR = pJackAudioDriver->getTrackOut_R( pNote->get_instrument(), pCompo );
	}
#endif

	for ( int nBufferPos = nInitialBufferPos; nBufferPos < nTimes; ++nBufferPos ) {
		if ( ( nNoteLength != -1 ) && ( nNoteLength <= pSelectedLayerInfo->SamplePosition ) ) {
						if ( pNote->get_adsr()->release() == 0 ) {
				retValue = 1;	// the note is ended
			}
		}

		int nSamplePos = ( int )fSamplePos;
		double fDiff = fSamplePos - nSamplePos;
		if ( ( nSamplePos + 1 ) >= nSampleFrames ) {
			//we reach the last audioframe.
			//set this last frame to zero do nothin wrong.
						fVal_L = 0.0;
						fVal_R = 0.0;
		} else {
			// some interpolation methods need 4 frames data.
				float last_l;
				float last_r;
				if ( ( nSamplePos + 2 ) >= nSampleFrames ) {
					last_l = 0.0;
					last_r = 0.0;
				} else {
					last_l =  pSample_data_L[nSamplePos + 2];
					last_r =  pSample_data_R[nSamplePos + 2];
				}

				switch( __interpolateMode ){

						case LINEAR:
								fVal_L = pSample_data_L[nSamplePos] * (1 - fDiff ) + pSample_data_L[nSamplePos + 1] * fDiff;
								fVal_R = pSample_data_R[nSamplePos] * (1 - fDiff ) + pSample_data_R[nSamplePos + 1] * fDiff;
								//fVal_L = linear_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
								//fVal_R = linear_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
								break;
						case COSINE:
								fVal_L = cosine_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
								fVal_R = cosine_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
								break;
						case THIRD:
								fVal_L = third_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = third_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
						case CUBIC:
								fVal_L = cubic_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = cubic_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
						case HERMITE:
								fVal_L = hermite_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = hermite_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
				}
		}

		// ADSR envelope
		fADSRValue = pNote->get_adsr()->get_value( fStep );
		fVal_L = fVal_L * fADSRValue;
		fVal_R = fVal_R * fADSRValue;
		// Low pass resonant filter
		if ( pNote->get_instrument()->is_filter_active() ) {
			pNote->compute_lr_values( &fVal_L, &fVal_R );
		}



#ifdef H2CORE_HAVE_JACK
		if( 		pTrackOutL ) {
					pTrackOutL[nBufferPos] += fVal_L * cost_track_L;
		}
		if( 		pTrackOutR ) {
					pTrackOutR[nBufferPos] += fVal_R * cost_track_R;
		}
#endif

		fVal_L = fVal_L * cost_L;
		fVal_R = fVal_R * cost_R;

		// update instr peak
		if ( fVal_L > fInstrPeak_L ) {
			fInstrPeak_L = fVal_L;
		}
		if ( fVal_R > fInstrPeak_R ) {
			fInstrPeak_R = fVal_R;
		}

		pDrumCompo->set_outs( nBufferPos, fVal_L, fVal_R );

		// to main mix
		__main_out_L[nBufferPos] += fVal_L;
		__main_out_R[nBufferPos] += fVal_R;

		fSamplePos += fStep;
	}
	pSelectedLayerInfo->SamplePosition += nAvail_bytes * fStep;
	pNote->get_instrument()->set_peak_l( fInstrPeak_L );
	pNote->get_instrument()->set_peak_r( fInstrPeak_R );



#ifdef H2CORE_HAVE_LADSPA
	// LADSPA
	// change the below return logic if you add code after that ifdef
	if (pNote->get_instrument()->is_muted() || pSong->__is_muted) return retValue;
	float masterVol = pSong->get_volume();
	for ( unsigned nFX = 0; nFX < MAX_FX; ++nFX ) {
		LadspaFX *pFX = Effects::get_instance()->getLadspaFX( nFX );
		float fLevel = pNote->get_instrument()->get_fx_level( nFX );
		if ( ( pFX ) && ( fLevel != 0.0 ) ) {
			fLevel = fLevel * pFX->getVolume();

			float *pBuf_L = pFX->m_pBuffer_L;
			float *pBuf_R = pFX->m_pBuffer_R;

//			float fFXCost_L = cost_L * fLevel;
//			float fFXCost_R = cost_R * fLevel;
			float fFXCost_L = fLevel * masterVol;
			float fFXCost_R = fLevel * masterVol;

			int nBufferPos = nInitialBufferPos;
			float fSamplePos = pSelectedLayerInfo->SamplePosition;
			for ( int i = 0; i < nAvail_bytes; ++i ) {
				int nSamplePos = ( int )fSamplePos;
				double fDiff = fSamplePos - nSamplePos;

				if ( ( nSamplePos + 1 ) >= nSampleFrames ) {
					//we reach the last audioframe.
					//set this last frame to zero do nothin wrong.
					fVal_L = 0.0;
					fVal_R = 0.0;
				} else {
					// some interpolation methods need 4 frames data.
					float last_l;
					float last_r;
					if ( ( nSamplePos + 2 ) >= nSampleFrames ) {
						last_l = 0.0;
						last_r = 0.0;
					}else
					{
						last_l =  pSample_data_L[nSamplePos + 2];
						last_r =  pSample_data_R[nSamplePos + 2];
					}

					switch( __interpolateMode ){

					case LINEAR:
						fVal_L = pSample_data_L[nSamplePos] * (1 - fDiff ) + pSample_data_L[nSamplePos + 1] * fDiff;
						fVal_R = pSample_data_R[nSamplePos] * (1 - fDiff ) + pSample_data_R[nSamplePos + 1] * fDiff;
						break;
					case COSINE:
						fVal_L = cosine_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
						fVal_R = cosine_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
						break;
					case THIRD:
						fVal_L = third_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = third_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					case CUBIC:
						fVal_L = cubic_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = cubic_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					case HERMITE:
						fVal_L = hermite_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = hermite_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					}
				}

				pBuf_L[ nBufferPos ] += fVal_L * fFXCost_L;
								pBuf_R[ nBufferPos ] += fVal_R * fFXCost_R;
				fSamplePos += fStep;
				++nBufferPos;
			}
		}
	}
#endif

	return retValue;
}
Exemple #2
0
int Sampler::__render_note_resample(
	Sample *pSample,
	Note *pNote,
	int nBufferSize,
	int nInitialSilence,
	float cost_L,
	float cost_R,
	float cost_track_L,
	float cost_track_R,
	float fLayerPitch,
	Song* pSong
)
{
	AudioOutput* audio_output = Hydrogen::get_instance()->getAudioOutput();
	int nNoteLength = -1;
	if ( pNote->get_length() != -1 ) {
		nNoteLength = ( int )( pNote->get_length() * audio_output->m_transport.m_nTickSize );
	}
	float fNotePitch = pNote->get_total_pitch() + fLayerPitch;

	float fStep = pow( 1.0594630943593, ( double )fNotePitch );
//	_ERRORLOG( QString("pitch: %1, step: %2" ).arg(fNotePitch).arg( fStep) );
	fStep *= ( float )pSample->get_sample_rate() / audio_output->getSampleRate(); // Adjust for audio driver sample rate

	// verifico il numero di frame disponibili ancora da eseguire
	int nAvail_bytes = ( int )( ( float )( pSample->get_frames() - pNote->get_sample_position() ) / fStep );


	int retValue = 1; // the note is ended
	if ( nAvail_bytes > nBufferSize - nInitialSilence ) {	// il sample e' piu' grande del buffersize
		// imposto il numero dei bytes disponibili uguale al buffersize
		nAvail_bytes = nBufferSize - nInitialSilence;
		retValue = 0; // the note is not ended yet
	}

	//	ADSR *pADSR = pNote->m_pADSR;

	int nInitialBufferPos = nInitialSilence;
	float fInitialSamplePos = pNote->get_sample_position();
	double fSamplePos = pNote->get_sample_position();
	int nTimes = nInitialBufferPos + nAvail_bytes;
	int nInstrument = pSong->get_instrument_list()->index( pNote->get_instrument() );

	float *pSample_data_L = pSample->get_data_l();
	float *pSample_data_R = pSample->get_data_r();

	float fInstrPeak_L = pNote->get_instrument()->get_peak_l(); // this value will be reset to 0 by the mixer..
	float fInstrPeak_R = pNote->get_instrument()->get_peak_r(); // this value will be reset to 0 by the mixer..

	float fADSRValue = 1.0;
	float fVal_L;
	float fVal_R;
	int nSampleFrames = pSample->get_frames();

	/*
	 * nInstrument could be -1 if the instrument is not found in the current drumset.
	 * This happens when someone is using the prelistening function of the soundlibrary.
	 */

	if( nInstrument < 0 ) {
		nInstrument = 0;
	}

#ifdef H2CORE_HAVE_JACK
	JackOutput* jao = 0;
	float *track_out_L = 0;
	float *track_out_R = 0;
	if( audio_output->has_track_outs()
	&& (jao = dynamic_cast<JackOutput*>(audio_output)) ) {
		track_out_L = jao->getTrackOut_L( nInstrument );
		track_out_R = jao->getTrackOut_R( nInstrument );
	}
#endif

	for ( int nBufferPos = nInitialBufferPos; nBufferPos < nTimes; ++nBufferPos ) {
		if ( ( nNoteLength != -1 ) && ( nNoteLength <= pNote->get_sample_position() )  ) {
						if ( pNote->get_adsr()->release() == 0 ) {
				retValue = 1;	// the note is ended
			}
		}

		int nSamplePos = ( int )fSamplePos;
		double fDiff = fSamplePos - nSamplePos;
		if ( ( nSamplePos + 1 ) >= nSampleFrames ) {
			//we reach the last audioframe.
			//set this last frame to zero do nothin wrong.
						fVal_L = 0.0;
						fVal_R = 0.0;
		} else {
			// some interpolation methods need 4 frames data.
				float last_l;
				float last_r;
				if ( ( nSamplePos + 2 ) >= nSampleFrames ) {
					last_l = 0.0;
					last_r = 0.0;
				} else {
					last_l =  pSample_data_L[nSamplePos + 2];
					last_r =  pSample_data_R[nSamplePos + 2];
				}

				switch( __interpolateMode ){

						case LINEAR:
								fVal_L = pSample_data_L[nSamplePos] * (1 - fDiff ) + pSample_data_L[nSamplePos + 1] * fDiff;
								fVal_R = pSample_data_R[nSamplePos] * (1 - fDiff ) + pSample_data_R[nSamplePos + 1] * fDiff;
								//fVal_L = linear_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
								//fVal_R = linear_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
								break;
						case COSINE:
								fVal_L = cosine_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
								fVal_R = cosine_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
								break;
						case THIRD:
								fVal_L = third_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = third_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
						case CUBIC:
								fVal_L = cubic_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = cubic_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
						case HERMITE:
								fVal_L = hermite_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
								fVal_R = hermite_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
								break;
				}
		}

		// ADSR envelope
		fADSRValue = pNote->get_adsr()->get_value( fStep );
		fVal_L = fVal_L * fADSRValue;
		fVal_R = fVal_R * fADSRValue;
		// Low pass resonant filter
		if ( pNote->get_instrument()->is_filter_active() ) {
			pNote->compute_lr_values( &fVal_L, &fVal_R );
		}



#ifdef H2CORE_HAVE_JACK
		if( track_out_L ) {
			track_out_L[nBufferPos] += fVal_L * cost_track_L;
		}
		if( track_out_R ) {
			track_out_R[nBufferPos] += fVal_R * cost_track_R;
		}
#endif

		fVal_L = fVal_L * cost_L;
		fVal_R = fVal_R * cost_R;

		// update instr peak
		if ( fVal_L > fInstrPeak_L ) {
			fInstrPeak_L = fVal_L;
		}
		if ( fVal_R > fInstrPeak_R ) {
			fInstrPeak_R = fVal_R;
		}

		// to main mix
		__main_out_L[nBufferPos] += fVal_L;
		__main_out_R[nBufferPos] += fVal_R;

		fSamplePos += fStep;
	}
	pNote->update_sample_position( nAvail_bytes * fStep );
	pNote->get_instrument()->set_peak_l( fInstrPeak_L );
	pNote->get_instrument()->set_peak_r( fInstrPeak_R );



#ifdef H2CORE_HAVE_LADSPA
	// LADSPA
	float masterVol = pSong->get_volume();
	for ( unsigned nFX = 0; nFX < MAX_FX; ++nFX ) {
		LadspaFX *pFX = Effects::get_instance()->getLadspaFX( nFX );
		float fLevel = pNote->get_instrument()->get_fx_level( nFX );
		if ( ( pFX ) && ( fLevel != 0.0 ) ) {
			fLevel = fLevel * pFX->getVolume();

			float *pBuf_L = pFX->m_pBuffer_L;
			float *pBuf_R = pFX->m_pBuffer_R;

//			float fFXCost_L = cost_L * fLevel;
//			float fFXCost_R = cost_R * fLevel;
			float fFXCost_L = fLevel * masterVol;
			float fFXCost_R = fLevel * masterVol;

			int nBufferPos = nInitialBufferPos;
			float fSamplePos = fInitialSamplePos;
			for ( int i = 0; i < nAvail_bytes; ++i ) {
				int nSamplePos = ( int )fSamplePos;
				double fDiff = fSamplePos - nSamplePos;

				if ( ( nSamplePos + 1 ) >= nSampleFrames ) {
					//we reach the last audioframe.
					//set this last frame to zero do nothin wrong.
					fVal_L = 0.0;
					fVal_R = 0.0;
				} else {
					// some interpolation methods need 4 frames data.
					float last_l;
					float last_r;
					if ( ( nSamplePos + 2 ) >= nSampleFrames ) {
						last_l = 0.0;
						last_r = 0.0;
					}else
					{
						last_l =  pSample_data_L[nSamplePos + 2];
						last_r =  pSample_data_R[nSamplePos + 2];
					}

					switch( __interpolateMode ){

					case LINEAR:
						fVal_L = pSample_data_L[nSamplePos] * (1 - fDiff ) + pSample_data_L[nSamplePos + 1] * fDiff;
						fVal_R = pSample_data_R[nSamplePos] * (1 - fDiff ) + pSample_data_R[nSamplePos + 1] * fDiff;
						//fVal_L = linear_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
						//fVal_R = linear_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
						break;
					case COSINE:
						fVal_L = cosine_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
						fVal_R = cosine_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
						break;
					case THIRD:
						fVal_L = third_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = third_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					case CUBIC:
						fVal_L = cubic_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = cubic_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					case HERMITE:
						fVal_L = hermite_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
						fVal_R = hermite_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
						break;
					}
					// methode Interpolate produce an extra function call and eat much more time here.
					// so i deside to code the switch direct in the resampler methode
					//fVal_L = Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], pSample_data_L[nSamplePos + 2] ,fDiff);
					//fVal_L = Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], pSample_data_R[nSamplePos + 2] ,fDiff);
				}

				pBuf_L[ nBufferPos ] += fVal_L * fFXCost_L;
								pBuf_R[ nBufferPos ] += fVal_R * fFXCost_R;
				fSamplePos += fStep;
				++nBufferPos;
			}
		}
	}
#endif

	return retValue;
}
Exemple #3
0
bool Sampler::processPlaybackTrack(int nBufferSize)
{
	Hydrogen* pEngine = Hydrogen::get_instance();
	AudioOutput* pAudioOutput = Hydrogen::get_instance()->getAudioOutput();
	Song* pSong = pEngine->getSong();

	if(   !pSong->get_playback_track_enabled()
	   || pEngine->getState() != STATE_PLAYING
	   || pSong->get_mode() != Song::SONG_MODE)
	{
		return false;
	}

	InstrumentComponent *pCompo = __playback_instrument->get_components()->front();
	Sample *pSample = pCompo->get_layer(0)->get_sample();

	float fVal_L;
	float fVal_R;

	float *pSample_data_L = pSample->get_data_l();
	float *pSample_data_R = pSample->get_data_r();
	
	float fInstrPeak_L = __playback_instrument->get_peak_l(); // this value will be reset to 0 by the mixer..
	float fInstrPeak_R = __playback_instrument->get_peak_r(); // this value will be reset to 0 by the mixer..

	assert(pSample);

	int nAvail_bytes = 0;
	int	nInitialBufferPos = 0;

	if(pSample->get_sample_rate() == pAudioOutput->getSampleRate()){
		//No resampling	
		__playBackSamplePosition = pAudioOutput->m_transport.m_nFrames;
	
		nAvail_bytes = pSample->get_frames() - ( int )__playBackSamplePosition;
		
		if ( nAvail_bytes > nBufferSize ) {
			nAvail_bytes = nBufferSize;
		}

		int nInitialSamplePos = ( int ) __playBackSamplePosition;
		int nSamplePos = nInitialSamplePos;
	
		int nTimes = nInitialBufferPos + nAvail_bytes;
	
		if(__playBackSamplePosition > pSample->get_frames()){
			//playback track has ended..
			return true;
		}
	
		for ( int nBufferPos = nInitialBufferPos; nBufferPos < nTimes; ++nBufferPos ) {
			fVal_L = pSample_data_L[ nSamplePos ];
			fVal_R = pSample_data_R[ nSamplePos ];
	
			fVal_L = fVal_L * 1.0f * pSong->get_playback_track_volume(); //costr
			fVal_R = fVal_R * 1.0f * pSong->get_playback_track_volume(); //cost l
	
			//pDrumCompo->set_outs( nBufferPos, fVal_L, fVal_R );
	
			// to main mix
			if ( fVal_L > fInstrPeak_L ) {
				fInstrPeak_L = fVal_L;
			}
			if ( fVal_R > fInstrPeak_R ) {
				fInstrPeak_R = fVal_R;
			}
			
			__main_out_L[nBufferPos] += fVal_L;
			__main_out_R[nBufferPos] += fVal_R;
			
			++nSamplePos;
		}
	} else {
		//Perform resampling
		double	fSamplePos = 0;
		int		nSampleFrames = pSample->get_frames();
		float	fStep = 1.0594630943593;
		fStep *= ( float )pSample->get_sample_rate() / pAudioOutput->getSampleRate(); // Adjust for audio driver sample rate
		
		
		if(pAudioOutput->m_transport.m_nFrames == 0){
			fSamplePos = 0;
		} else {
			fSamplePos = ( (pAudioOutput->m_transport.m_nFrames/nBufferSize) * (nBufferSize * fStep));
		}
		
		nAvail_bytes = ( int )( ( float )( pSample->get_frames() - fSamplePos ) / fStep );
	
		if ( nAvail_bytes > nBufferSize ) {
			nAvail_bytes = nBufferSize;
		}

		int nTimes = nInitialBufferPos + nAvail_bytes;
	
		for ( int nBufferPos = nInitialBufferPos; nBufferPos < nTimes; ++nBufferPos ) {
			int nSamplePos = ( int ) fSamplePos;
			double fDiff = fSamplePos - nSamplePos;
			if ( ( nSamplePos + 1 ) >= nSampleFrames ) {
				//we reach the last audioframe.
				//set this last frame to zero do nothin wrong.
							fVal_L = 0.0;
							fVal_R = 0.0;
			} else {
				// some interpolation methods need 4 frames data.
					float last_l;
					float last_r;
					if ( ( nSamplePos + 2 ) >= nSampleFrames ) {
						last_l = 0.0;
						last_r = 0.0;
					} else {
						last_l =  pSample_data_L[nSamplePos + 2];
						last_r =  pSample_data_R[nSamplePos + 2];
					}
	
					switch( __interpolateMode ){
	
							case LINEAR:
									fVal_L = pSample_data_L[nSamplePos] * (1 - fDiff ) + pSample_data_L[nSamplePos + 1] * fDiff;
									fVal_R = pSample_data_R[nSamplePos] * (1 - fDiff ) + pSample_data_R[nSamplePos + 1] * fDiff;
									break;
							case COSINE:
									fVal_L = cosine_Interpolate( pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], fDiff);
									fVal_R = cosine_Interpolate( pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], fDiff);
									break;
							case THIRD:
									fVal_L = third_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
									fVal_R = third_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
									break;
							case CUBIC:
									fVal_L = cubic_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
									fVal_R = cubic_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
									break;
							case HERMITE:
									fVal_L = hermite_Interpolate( pSample_data_L[ nSamplePos -1], pSample_data_L[nSamplePos], pSample_data_L[nSamplePos + 1], last_l, fDiff);
									fVal_R = hermite_Interpolate( pSample_data_R[ nSamplePos -1], pSample_data_R[nSamplePos], pSample_data_R[nSamplePos + 1], last_r, fDiff);
									break;
					}
			}
			
			if ( fVal_L > fInstrPeak_L ) {
				fInstrPeak_L = fVal_L;
			}
			if ( fVal_R > fInstrPeak_R ) {
				fInstrPeak_R = fVal_R;
			}

			__main_out_L[nBufferPos] += fVal_L;
			__main_out_R[nBufferPos] += fVal_R;


			fSamplePos += fStep;
		} //for
	}
	
	__playback_instrument->set_peak_l( fInstrPeak_L );
	__playback_instrument->set_peak_r( fInstrPeak_R );

	return true;
}