Ref<AudioEffectInstance> AudioEffectChorus::instance() {

	Ref<AudioEffectChorusInstance> ins;
	ins.instance();
	ins->base = Ref<AudioEffectChorus>(this);
	for (int i = 0; i < 4; i++) {
		ins->filter_h[i] = AudioFrame(0, 0);
		ins->cycles[i] = 0;
	}

	float ring_buffer_max_size = AudioEffectChorus::MAX_DELAY_MS + AudioEffectChorus::MAX_DEPTH_MS + AudioEffectChorus::MAX_WIDTH_MS;

	ring_buffer_max_size *= 2; //just to avoid complications
	ring_buffer_max_size /= 1000.0; //convert to seconds
	ring_buffer_max_size *= AudioServer::get_singleton()->get_mix_rate();

	int ringbuff_size = ring_buffer_max_size;

	int bits = 0;

	while (ringbuff_size > 0) {
		bits++;
		ringbuff_size /= 2;
	}

	ringbuff_size = 1 << bits;
	ins->buffer_mask = ringbuff_size - 1;
	ins->buffer_pos = 0;
	ins->audio_buffer.resize(ringbuff_size);
	for (int i = 0; i < ringbuff_size; i++) {
		ins->audio_buffer.write[i] = AudioFrame(0, 0);
	}

	return ins;
}
Beispiel #2
0
AudioFrame AudioFrame::to(const AudioFormat &fmt) const
{
    if (!isValid() || !constBits(0))
        return AudioFrame();
    //if (fmt == format())
      //  return clone(); //FIXME: clone a frame from ffmpeg is not enough?
    Q_D(const AudioFrame);
    // TODO: use a pool
    AudioResampler *conv = d->conv;
    QScopedPointer<AudioResampler> c;
    if (!conv) {
        conv = AudioResampler::create(AudioResamplerId_FF);
        if (!conv)
            conv = AudioResampler::create(AudioResamplerId_Libav);
        if (!conv) {
            qWarning("no audio resampler is available");
            return AudioFrame();
        }
        c.reset(conv);
    }
    conv->setInAudioFormat(format());
    conv->setOutAudioFormat(fmt);
    //conv->prepare(); // already called in setIn/OutFormat
    conv->setInSampesPerChannel(samplesPerChannel()); //TODO
    if (!conv->convert((const quint8**)d->planes.constData())) {
        qWarning() << "AudioFrame::to error: " << format() << "=>" << fmt;
        return AudioFrame();
    }
    AudioFrame f(conv->outData(), fmt);
    f.setSamplesPerChannel(conv->outSamplesPerChannel());
    f.setTimestamp(timestamp());
    f.d_ptr->metadata = d->metadata; // need metadata?
    return f;
}
Ref<AudioEffectInstance> AudioEffectDelay::instance() {
	Ref<AudioEffectDelayInstance> ins;
	ins.instance();
	ins->base=Ref<AudioEffectDelay>(this);

	float ring_buffer_max_size=MAX_DELAY_MS+100; //add 100ms of extra room, just in case
	ring_buffer_max_size/=1000.0;//convert to seconds
	ring_buffer_max_size*=AudioServer::get_singleton()->get_mix_rate();

	int ringbuff_size=ring_buffer_max_size;

	int bits=0;

	while(ringbuff_size>0) {
		bits++;
		ringbuff_size/=2;
	}

	ringbuff_size=1<<bits;
	ins->ring_buffer_mask=ringbuff_size-1;
	ins->ring_buffer_pos=0;

	ins->ring_buffer.resize( ringbuff_size );
	ins->feedback_buffer.resize( ringbuff_size );

	ins->feedback_buffer_pos=0;

	ins->h=AudioFrame(0,0);

	return ins;
}
Beispiel #4
0
void AudioEffectEQInstance::process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {

	int band_count = bands[0].size();
	EQ::BandProcess *proc_l = bands[0].ptrw();
	EQ::BandProcess *proc_r = bands[1].ptrw();
	float *bgain = gains.ptrw();
	for (int i = 0; i < band_count; i++) {
		bgain[i] = Math::db2linear(base->gain[i]);
	}

	for (int i = 0; i < p_frame_count; i++) {

		AudioFrame src = p_src_frames[i];
		AudioFrame dst = AudioFrame(0, 0);

		for (int j = 0; j < band_count; j++) {

			float l = src.l;
			float r = src.r;

			proc_l[j].process_one(l);
			proc_r[j].process_one(r);

			dst.l += l * bgain[j];
			dst.r += r * bgain[j];
		}

		p_dst_frames[i] = dst;
	}
}
Beispiel #5
0
Ref<AudioEffectInstance> AudioEffectPhaser::instance() {
	Ref<AudioEffectPhaserInstance> ins;
	ins.instance();
	ins->base = Ref<AudioEffectPhaser>(this);
	ins->phase = 0;
	ins->h = AudioFrame(0, 0);

	return ins;
}
Beispiel #6
0
AudioFrame AudioFrame::clone() const
{
    Q_D(const AudioFrame);
    if (d->format.sampleFormatFFmpeg() == AV_SAMPLE_FMT_NONE
            || d->format.channels() <= 0)
        return AudioFrame();
    if (d->samples_per_ch <= 0 || bytesPerLine(0) <= 0)
        return AudioFrame(format());
    QByteArray buf(bytesPerLine()*planeCount(), 0);
    AudioFrame f(buf, d->format);
    f.setSamplesPerChannel(samplesPerChannel());
    char *dst = buf.data(); //must before buf is shared, otherwise data will be detached.
    for (int i = 0; i < f.planeCount(); ++i) {
        const int plane_size = f.bytesPerLine(i);
        memcpy(dst, f.constBits(i), plane_size);
        dst += plane_size;
    }
    f.setTimestamp(timestamp());
    // meta data?
    return f;
}
Beispiel #7
0
AudioFrame AudioFrame::clone() const
{
    Q_D(const AudioFrame);
    if (!d->format.isValid())
        return AudioFrame();
    AudioFrame f(QByteArray(), d->format);
    f.setSamplesPerChannel(samplesPerChannel());
    f.allocate();
    // TODO: Frame.planes(), bytesPerLines()
    int nb_planes = f.planeCount();
    QVector<uchar*> dst(nb_planes);
    for (int i = 0; i < nb_planes; ++i) {
        dst[i] = f.bits(i);
    }
    av_samples_copy(dst.data(), d->planes.data(), 0, 0, samplesPerChannel(), d->format.channels(), (AVSampleFormat)d->format.sampleFormatFFmpeg());
    return f;
}
Beispiel #8
0
AudioFrame *AudioServer::thread_get_channel_mix_buffer(int p_bus, int p_buffer) {

	ERR_FAIL_INDEX_V(p_bus, buses.size(), NULL);
	ERR_FAIL_INDEX_V(p_buffer, buses[p_bus]->channels.size(), NULL);

	AudioFrame *data = buses[p_bus]->channels[p_buffer].buffer.ptr();

	if (!buses[p_bus]->channels[p_buffer].used) {
		buses[p_bus]->channels[p_buffer].used = true;
		buses[p_bus]->channels[p_buffer].active = true;
		buses[p_bus]->channels[p_buffer].last_mix_with_audio = mix_frames;
		for (uint32_t i = 0; i < buffer_size; i++) {
			data[i] = AudioFrame(0, 0);
		}
	}

	return data;
}
Beispiel #9
0
AudioFrame AudioDecoderFFmpeg::frame()
{
    DPTR_D(AudioDecoderFFmpeg);
    AudioFormat fmt;
    fmt.setSampleFormatFFmpeg(d.frame->format);
    fmt.setChannelLayoutFFmpeg(d.frame->channel_layout);
    fmt.setSampleRate(d.frame->sample_rate);
    if (!fmt.isValid()) {// need more data to decode to get a frame
        return AudioFrame();
    }
    AudioFrame f(fmt);
    //av_frame_get_pkt_duration ffmpeg
    f.setBits(d.frame->extended_data); // TODO: ref
    f.setBytesPerLine(d.frame->linesize[0], 0); // for correct alignment
    f.setSamplesPerChannel(d.frame->nb_samples);
    // TODO: ffplay check AVFrame.pts, pkt_pts, last_pts+nb_samples. move to AudioFrame::from(AVFrame*)
    f.setTimestamp((double)d.frame->pkt_pts/1000.0);
    f.setAudioResampler(d.resampler); // TODO: remove. it's not safe if frame is shared. use a pool or detach if ref >1
    return f;
}
Beispiel #10
0
bool AudioRBResampler::mix(AudioFrame *p_dest, int p_frames) {

	if (!rb)
		return false;

	int32_t increment = (src_mix_rate * MIX_FRAC_LEN) / target_mix_rate;
	int read_space = get_reader_space();
	int target_todo = MIN(get_num_of_ready_frames(), p_frames);

	{
		int src_read = 0;
		switch (channels) {
			case 1: src_read = _resample<1>(p_dest, target_todo, increment); break;
			case 2: src_read = _resample<2>(p_dest, target_todo, increment); break;
			case 4: src_read = _resample<4>(p_dest, target_todo, increment); break;
			case 6: src_read = _resample<6>(p_dest, target_todo, increment); break;
		}

		if (src_read > read_space)
			src_read = read_space;

		rb_read_pos = (rb_read_pos + src_read) & rb_mask;

		// Create fadeout effect for the end of stream (note that it can be because of slow writer)
		if (p_frames - target_todo > 0) {
			for (int i = 0; i < target_todo; i++) {
				p_dest[i] = p_dest[i] * float(target_todo - i) / float(target_todo);
			}
		}

		// Fill zeros (silence) for the rest of frames
		for (uint32_t i = target_todo; i < p_frames; i++) {
			p_dest[i] = AudioFrame(0, 0);
		}
	}

	return true;
}
Beispiel #11
0
void AudioEffectDelayInstance::_process_chunk(const AudioFrame *p_src_frames,AudioFrame *p_dst_frames,int p_frame_count) {



	float main_level_f=base->dry;


	float mix_rate = AudioServer::get_singleton()->get_mix_rate();

	float tap_1_level_f=base->tap_1_active?Math::db2linear(base->tap_1_level):0.0;
	int tap_1_delay_frames=int((base->tap_1_delay_ms/1000.0)*mix_rate);;

	float tap_2_level_f=base->tap_2_active?Math::db2linear(base->tap_2_level):0.0;
	int tap_2_delay_frames=int((base->tap_2_delay_ms/1000.0)*mix_rate);;

	float feedback_level_f=base->feedback_active?Math::db2linear(base->feedback_level):0.0;
	unsigned int feedback_delay_frames=int((base->feedback_delay_ms/1000.0)*mix_rate);;


	AudioFrame tap1_vol=AudioFrame(tap_1_level_f,tap_1_level_f);

	tap1_vol.l*=CLAMP( 1.0 - base->tap_1_pan, 0, 1);
	tap1_vol.r*=CLAMP( 1.0 + base->tap_1_pan, 0, 1);

	AudioFrame tap2_vol=AudioFrame(tap_2_level_f,tap_2_level_f);

	tap2_vol.l*=CLAMP( 1.0 - base->tap_2_pan, 0, 1);
	tap2_vol.r*=CLAMP( 1.0 + base->tap_2_pan, 0, 1);

	// feedback lowpass here
	float lpf_c=expf(-2.0*Math_PI*base->feedback_lowpass/mix_rate); // 0 .. 10khz
	float lpf_ic=1.0-lpf_c;

	const AudioFrame *src=p_src_frames;
	AudioFrame *dst=p_dst_frames;
	AudioFrame *rb_buf=ring_buffer.ptr();
	AudioFrame *fb_buf=feedback_buffer.ptr();


	for (int i=0;i<p_frame_count;i++) {


		rb_buf[ring_buffer_pos&ring_buffer_mask]=src[i];

		AudioFrame main_val=src[i]*main_level_f;
		AudioFrame tap_1_val=rb_buf[(ring_buffer_pos-tap_1_delay_frames)&ring_buffer_mask]*tap1_vol;
		AudioFrame tap_2_val=rb_buf[(ring_buffer_pos-tap_2_delay_frames)&ring_buffer_mask]*tap2_vol;

		AudioFrame out=main_val+tap_1_val+tap_2_val;

		out+=fb_buf[ feedback_buffer_pos ];

		//apply lowpass and feedback gain
		AudioFrame fb_in=out*feedback_level_f*lpf_ic+h*lpf_c;
		fb_in.undenormalise(); //avoid denormals

		h=fb_in;
		fb_buf[ feedback_buffer_pos ]=fb_in;

		dst[i]=out;

		ring_buffer_pos++;

		if ( (++feedback_buffer_pos) >= feedback_delay_frames )
			feedback_buffer_pos=0;
	}
}
Beispiel #12
0
uint32_t AudioRBResampler::_resample(AudioFrame *p_dest, int p_todo, int32_t p_increment) {

	uint32_t read = offset & MIX_FRAC_MASK;

	for (int i = 0; i < p_todo; i++) {

		offset = (offset + p_increment) & (((1 << (rb_bits + MIX_FRAC_BITS)) - 1));
		read += p_increment;
		uint32_t pos = offset >> MIX_FRAC_BITS;
		float frac = float(offset & MIX_FRAC_MASK) / float(MIX_FRAC_LEN);
		ERR_FAIL_COND_V(pos >= rb_len, 0);
		uint32_t pos_next = (pos + 1) & rb_mask;

		// since this is a template with a known compile time value (C), conditionals go away when compiling.
		if (C == 1) {

			float v0 = rb[pos];
			float v0n = rb[pos_next];
			v0 += (v0n - v0) * frac;
			p_dest[i] = AudioFrame(v0, v0);
		}

		if (C == 2) {

			float v0 = rb[(pos << 1) + 0];
			float v1 = rb[(pos << 1) + 1];
			float v0n = rb[(pos_next << 1) + 0];
			float v1n = rb[(pos_next << 1) + 1];

			v0 += (v0n - v0) * frac;
			v1 += (v1n - v1) * frac;
			p_dest[i] = AudioFrame(v0, v1);
		}

		// For now, channels higher than stereo are almost ignored
		if (C == 4) {

			float v0 = rb[(pos << 2) + 0];
			float v1 = rb[(pos << 2) + 1];
			float v2 = rb[(pos << 2) + 2];
			float v3 = rb[(pos << 2) + 3];
			float v0n = rb[(pos_next << 2) + 0];
			float v1n = rb[(pos_next << 2) + 1];
			float v2n = rb[(pos_next << 2) + 2];
			float v3n = rb[(pos_next << 2) + 3];

			v0 += (v0n - v0) * frac;
			v1 += (v1n - v1) * frac;
			v2 += (v2n - v2) * frac;
			v3 += (v3n - v3) * frac;
			p_dest[i] = AudioFrame(v0, v1);
		}

		if (C == 6) {

			float v0 = rb[(pos * 6) + 0];
			float v1 = rb[(pos * 6) + 1];
			float v2 = rb[(pos * 6) + 2];
			float v3 = rb[(pos * 6) + 3];
			float v4 = rb[(pos * 6) + 4];
			float v5 = rb[(pos * 6) + 5];
			float v0n = rb[(pos_next * 6) + 0];
			float v1n = rb[(pos_next * 6) + 1];
			float v2n = rb[(pos_next * 6) + 2];
			float v3n = rb[(pos_next * 6) + 3];
			float v4n = rb[(pos_next * 6) + 4];
			float v5n = rb[(pos_next * 6) + 5];

			p_dest[i] = AudioFrame(v0, v1);
		}
	}

	return read >> MIX_FRAC_BITS; //rb_read_pos = offset >> MIX_FRAC_BITS;
}
Beispiel #13
0
void AudioServer::_mix_step() {

	for (int i = 0; i < buses.size(); i++) {
		Bus *bus = buses[i];
		bus->index_cache = i; //might be moved around by editor, so..
		for (int k = 0; k < bus->channels.size(); k++) {

			bus->channels[k].used = false;
		}
	}

	//make callbacks for mixing the audio
	for (Set<CallbackItem>::Element *E = callbacks.front(); E; E = E->next()) {

		E->get().callback(E->get().userdata);
	}

	for (int i = buses.size() - 1; i >= 0; i--) {
		//go bus by bus
		Bus *bus = buses[i];

		for (int k = 0; k < bus->channels.size(); k++) {

			if (bus->channels[k].active && !bus->channels[k].used) {
				//buffer was not used, but it's still active, so it must be cleaned
				AudioFrame *buf = bus->channels[k].buffer.ptr();

				for (uint32_t j = 0; j < buffer_size; j++) {

					buf[j] = AudioFrame(0, 0);
				}
			}
		}

		//process effects
		for (int j = 0; j < bus->effects.size(); j++) {

			if (!bus->effects[j].enabled)
				continue;

			for (int k = 0; k < bus->channels.size(); k++) {

				if (!bus->channels[k].active)
					continue;
				bus->channels[k].effect_instances[j]->process(bus->channels[k].buffer.ptr(), temp_buffer[k].ptr(), buffer_size);
			}

			//swap buffers, so internal buffer always has the right data
			for (int k = 0; k < bus->channels.size(); k++) {

				if (!buses[i]->channels[k].active)
					continue;
				SWAP(bus->channels[k].buffer, temp_buffer[k]);
			}
		}

		//process send

		Bus *send = NULL;

		if (i > 0) {
			//everything has a send save for master bus
			if (!bus_map.has(bus->send)) {
				send = buses[0];
			} else {
				send = bus_map[bus->send];
				if (send->index_cache >= bus->index_cache) { //invalid, send to master
					send = buses[0];
				}
			}
		}

		for (int k = 0; k < bus->channels.size(); k++) {

			if (!bus->channels[k].active)
				continue;

			AudioFrame *buf = bus->channels[k].buffer.ptr();

			AudioFrame peak = AudioFrame(0, 0);
			for (uint32_t j = 0; j < buffer_size; j++) {
				float l = ABS(buf[j].l);
				if (l > peak.l) {
					peak.l = l;
				}
				float r = ABS(buf[j].r);
				if (r > peak.r) {
					peak.r = r;
				}
			}

			bus->channels[k].peak_volume = AudioFrame(Math::linear2db(peak.l + 0.0000000001), Math::linear2db(peak.r + 0.0000000001));

			if (!bus->channels[k].used) {
				//see if any audio is contained, because channel was not used

				if (MAX(peak.r, peak.l) > Math::db2linear(channel_disable_treshold_db)) {
					bus->channels[k].last_mix_with_audio = mix_frames;
				} else if (mix_frames - bus->channels[k].last_mix_with_audio > channel_disable_frames) {
					bus->channels[k].active = false;
					continue; //went inactive, don't mix.
				}
			}

			if (send) {
				//if not master bus, send
				AudioFrame *target_buf = thread_get_channel_mix_buffer(send->index_cache, k);

				for (uint32_t j = 0; j < buffer_size; j++) {
					target_buf[j] += buf[j];
				}
			}
		}
	}

	mix_frames += buffer_size;
	to_mix = buffer_size;
}
void AudioEffectChorusInstance::_process_chunk(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {

	//fill ringbuffer
	for (int i = 0; i < p_frame_count; i++) {
		audio_buffer.write[(buffer_pos + i) & buffer_mask] = p_src_frames[i];
		p_dst_frames[i] = p_src_frames[i] * base->dry;
	}

	float mix_rate = AudioServer::get_singleton()->get_mix_rate();

	/* process voices */
	for (int vc = 0; vc < base->voice_count; vc++) {

		AudioEffectChorus::Voice &v = base->voice[vc];

		double time_to_mix = (float)p_frame_count / mix_rate;
		double cycles_to_mix = time_to_mix * v.rate;

		unsigned int local_rb_pos = buffer_pos;
		AudioFrame *dst_buff = p_dst_frames;
		AudioFrame *rb_buff = audio_buffer.ptrw();

		double delay_msec = v.delay;
		unsigned int delay_frames = Math::fast_ftoi((delay_msec / 1000.0) * mix_rate);
		float max_depth_frames = (v.depth / 1000.0) * mix_rate;

		uint64_t local_cycles = cycles[vc];
		uint64_t increment = llrint(cycles_to_mix / (double)p_frame_count * (double)(1 << AudioEffectChorus::CYCLES_FRAC));

		//check the LFO doesn't read ahead of the write pos
		if ((((unsigned int)max_depth_frames) + 10) > delay_frames) { //10 as some threshold to avoid precision stuff
			delay_frames += (int)max_depth_frames - delay_frames;
			delay_frames += 10; //threshold to avoid precision stuff
		}

		//low pass filter
		if (v.cutoff == 0)
			continue;
		float auxlp = expf(-2.0 * Math_PI * v.cutoff / mix_rate);
		float c1 = 1.0 - auxlp;
		float c2 = auxlp;
		AudioFrame h = filter_h[vc];
		if (v.cutoff >= AudioEffectChorus::MS_CUTOFF_MAX) {
			c1 = 1.0;
			c2 = 0.0;
		}

		//vol modifier

		AudioFrame vol_modifier = AudioFrame(base->wet, base->wet) * Math::db2linear(v.level);
		vol_modifier.l *= CLAMP(1.0 - v.pan, 0, 1);
		vol_modifier.r *= CLAMP(1.0 + v.pan, 0, 1);

		for (int i = 0; i < p_frame_count; i++) {

			/** COMPUTE WAVEFORM **/

			float phase = (float)(local_cycles & AudioEffectChorus::CYCLES_MASK) / (float)(1 << AudioEffectChorus::CYCLES_FRAC);

			float wave_delay = sinf(phase * 2.0 * Math_PI) * max_depth_frames;

			int wave_delay_frames = lrint(floor(wave_delay));
			float wave_delay_frac = wave_delay - (float)wave_delay_frames;

			/** COMPUTE RINGBUFFER POS**/

			unsigned int rb_source = local_rb_pos;
			rb_source -= delay_frames;

			rb_source -= wave_delay_frames;

			/** READ FROM RINGBUFFER, LINEARLY INTERPOLATE */

			AudioFrame val = rb_buff[rb_source & buffer_mask];
			AudioFrame val_next = rb_buff[(rb_source - 1) & buffer_mask];

			val += (val_next - val) * wave_delay_frac;

			val = val * c1 + h * c2;
			h = val;

			/** MIX VALUE TO OUTPUT **/

			dst_buff[i] += val * vol_modifier;

			local_cycles += increment;
			local_rb_pos++;
		}

		filter_h[vc] = h;
		cycles[vc] += Math::fast_ftoi(cycles_to_mix * (double)(1 << AudioEffectChorus::CYCLES_FRAC));
	}

	buffer_pos += p_frame_count;
}
Beispiel #15
0
void AudioStreamPlayer2D::_notification(int p_what) {

	if (p_what == NOTIFICATION_ENTER_TREE) {

		AudioServer::get_singleton()->add_callback(_mix_audios, this);
		if (autoplay && !Engine::get_singleton()->is_editor_hint()) {
			play();
		}
	}

	if (p_what == NOTIFICATION_EXIT_TREE) {

		AudioServer::get_singleton()->remove_callback(_mix_audios, this);
	}

	if (p_what == NOTIFICATION_INTERNAL_FIXED_PROCESS) {

		//update anything related to position first, if possible of course

		if (!output_ready) {
			List<Viewport *> viewports;
			Ref<World2D> world_2d = get_world_2d();
			ERR_FAIL_COND(world_2d.is_null());

			int new_output_count = 0;

			Vector2 global_pos = get_global_position();

			int bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus);

			//check if any area is diverting sound into a bus

			Physics2DDirectSpaceState *space_state = Physics2DServer::get_singleton()->space_get_direct_state(world_2d->get_space());

			Physics2DDirectSpaceState::ShapeResult sr[MAX_INTERSECT_AREAS];

			int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask, Physics2DDirectSpaceState::TYPE_MASK_AREA);

			for (int i = 0; i < areas; i++) {

				Area2D *area2d = Object::cast_to<Area2D>(sr[i].collider);
				if (!area2d)
					continue;

				if (!area2d->is_overriding_audio_bus())
					continue;

				StringName bus_name = area2d->get_audio_bus_name();
				bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);
				break;
			}

			world_2d->get_viewport_list(&viewports);
			for (List<Viewport *>::Element *E = viewports.front(); E; E = E->next()) {

				Viewport *vp = E->get();
				if (vp->is_audio_listener_2d()) {

					//compute matrix to convert to screen
					Transform2D to_screen = vp->get_global_canvas_transform() * vp->get_canvas_transform();
					Vector2 screen_size = vp->get_visible_rect().size;

					//screen in global is used for attenuation
					Vector2 screen_in_global = to_screen.affine_inverse().xform(screen_size * 0.5);

					float dist = global_pos.distance_to(screen_in_global); //distance to screen center

					if (dist > max_distance)
						continue; //cant hear this sound in this viewport

					float multiplier = Math::pow(1.0f - dist / max_distance, attenuation);
					multiplier *= Math::db2linear(volume_db); //also apply player volume!

					//point in screen is used for panning
					Vector2 point_in_screen = to_screen.xform(global_pos);

					float pan = CLAMP(point_in_screen.x / screen_size.width, 0.0, 1.0);

					float l = 1.0 - pan;
					float r = pan;

					outputs[new_output_count].vol = AudioFrame(l, r) * multiplier;
					outputs[new_output_count].bus_index = bus_index;
					outputs[new_output_count].viewport = vp; //keep pointer only for reference
					new_output_count++;
					if (new_output_count == MAX_OUTPUTS)
						break;
				}
			}

			output_count = new_output_count;
			output_ready = true;
		}

		//start playing if requested
		if (setplay >= 0.0) {
			setseek = setplay;
			active = true;
			setplay = -1;
			//do not update, this makes it easier to animate (will shut off otherise)
			//_change_notify("playing"); //update property in editor
		}

		//stop playing if no longer active
		if (!active) {
			set_fixed_process_internal(false);
			//do not update, this makes it easier to animate (will shut off otherise)
			//_change_notify("playing"); //update property in editor
			emit_signal("finished");
		}
	}
}
void AudioStreamPlayer3D::_notification(int p_what) {

	if (p_what == NOTIFICATION_ENTER_TREE) {

		velocity_tracker->reset(get_global_transform().origin);
		AudioServer::get_singleton()->add_callback(_mix_audios, this);
		if (autoplay && !Engine::get_singleton()->is_editor_hint()) {
			play();
		}
	}

	if (p_what == NOTIFICATION_EXIT_TREE) {

		AudioServer::get_singleton()->remove_callback(_mix_audios, this);
	}
	if (p_what == NOTIFICATION_TRANSFORM_CHANGED) {

		if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
			velocity_tracker->update_position(get_global_transform().origin);
		}
	}

	if (p_what == NOTIFICATION_INTERNAL_PHYSICS_PROCESS) {

		//update anything related to position first, if possible of course

		if (!output_ready) {

			Vector3 linear_velocity;

			//compute linear velocity for doppler
			if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
				linear_velocity = velocity_tracker->get_tracked_linear_velocity();
			}

			Ref<World> world = get_world();
			ERR_FAIL_COND(world.is_null());

			int new_output_count = 0;

			Vector3 global_pos = get_global_transform().origin;

			int bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus);

			//check if any area is diverting sound into a bus

			PhysicsDirectSpaceState *space_state = PhysicsServer::get_singleton()->space_get_direct_state(world->get_space());

			PhysicsDirectSpaceState::ShapeResult sr[MAX_INTERSECT_AREAS];

			int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask);
			Area *area = NULL;

			for (int i = 0; i < areas; i++) {
				if (!sr[i].collider)
					continue;

				Area *tarea = Object::cast_to<Area>(sr[i].collider);
				if (!tarea)
					continue;

				if (!tarea->is_overriding_audio_bus() && !tarea->is_using_reverb_bus())
					continue;

				area = tarea;
				break;
			}

			List<Camera *> cameras;
			world->get_camera_list(&cameras);

			for (List<Camera *>::Element *E = cameras.front(); E; E = E->next()) {

				Camera *camera = E->get();
				Viewport *vp = camera->get_viewport();
				if (!vp->is_audio_listener())
					continue;

				Vector3 local_pos = camera->get_global_transform().orthonormalized().affine_inverse().xform(global_pos);

				float dist = local_pos.length();

				Vector3 area_sound_pos;
				Vector3 cam_area_pos;

				if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
					area_sound_pos = space_state->get_closest_point_to_object_volume(area->get_rid(), camera->get_global_transform().origin);
					cam_area_pos = camera->get_global_transform().affine_inverse().xform(area_sound_pos);
				}

				if (max_distance > 0) {

					float total_max = max_distance;

					if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
						total_max = MAX(total_max, cam_area_pos.length());
					}
					if (total_max > max_distance) {
						continue; //cant hear this sound in this camera
					}
				}

				float multiplier = Math::db2linear(_get_attenuation_db(dist));
				if (max_distance > 0) {
					multiplier *= MAX(0, 1.0 - (dist / max_distance));
				}

				Output output;
				output.bus_index = bus_index;
				output.reverb_bus_index = -1; //no reverb by default
				output.viewport = vp;

				float db_att = (1.0 - MIN(1.0, multiplier)) * attenuation_filter_db;

				if (emission_angle_enabled) {
					Vector3 camtopos = global_pos - camera->get_global_transform().origin;
					float c = camtopos.normalized().dot(get_global_transform().basis.get_axis(2).normalized()); //it's z negative
					float angle = Math::rad2deg(Math::acos(c));
					if (angle > emission_angle)
						db_att -= -emission_angle_filter_attenuation_db;
				}

				output.filter_gain = Math::db2linear(db_att);

				Vector3 flat_pos = local_pos;
				flat_pos.y = 0;
				flat_pos.normalize();

				unsigned int cc = AudioServer::get_singleton()->get_channel_count();
				if (cc == 1) {
					// Stereo pair
					float c = flat_pos.x * 0.5 + 0.5;

					output.vol[0].l = 1.0 - c;
					output.vol[0].r = c;
				} else {
					Vector3 camtopos = global_pos - camera->get_global_transform().origin;
					float c = camtopos.normalized().dot(get_global_transform().basis.get_axis(2).normalized()); //it's z negative
					float angle = Math::rad2deg(Math::acos(c));
					float av = angle * (flat_pos.x < 0 ? -1 : 1) / 180.0;

					if (cc >= 1) {
						// Stereo pair
						float fl = Math::abs(1.0 - Math::abs(-0.8 - av));
						float fr = Math::abs(1.0 - Math::abs(0.8 - av));

						output.vol[0].l = fl;
						output.vol[0].r = fr;
					}

					if (cc >= 2) {
						// Center pair
						float center = 1.0 - Math::sin(Math::acos(c));

						output.vol[1].l = center;
						output.vol[1].r = center;
					}

					if (cc >= 3) {
						// Side pair
						float sl = Math::abs(1.0 - Math::abs(-0.4 - av));
						float sr = Math::abs(1.0 - Math::abs(0.4 - av));

						output.vol[2].l = sl;
						output.vol[2].r = sr;
					}

					if (cc >= 4) {
						// Rear pair
						float rl = Math::abs(1.0 - Math::abs(-0.2 - av));
						float rr = Math::abs(1.0 - Math::abs(0.2 - av));

						output.vol[3].l = rl;
						output.vol[3].r = rr;
					}
				}

				for (int k = 0; k < cc; k++) {
					output.vol[k] *= multiplier;
				}

				bool filled_reverb = false;
				int vol_index_max = AudioServer::get_singleton()->get_speaker_mode() + 1;

				if (area) {

					if (area->is_overriding_audio_bus()) {
						//override audio bus
						StringName bus_name = area->get_audio_bus();
						output.bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);
					}

					if (area->is_using_reverb_bus()) {

						filled_reverb = true;
						StringName bus_name = area->get_reverb_bus();
						output.reverb_bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);

						float uniformity = area->get_reverb_uniformity();
						float area_send = area->get_reverb_amount();

						if (uniformity > 0.0) {

							float distance = cam_area_pos.length();
							float attenuation = Math::db2linear(_get_attenuation_db(distance));

							//float dist_att_db = -20 * Math::log(dist + 0.00001); //logarithmic attenuation, like in real life

							float center_val[3] = { 0.5, 0.25, 0.16666 };
							AudioFrame center_frame(center_val[vol_index_max - 1], center_val[vol_index_max - 1]);

							if (attenuation < 1.0) {
								//pan the uniform sound
								Vector3 rev_pos = cam_area_pos;
								rev_pos.y = 0;
								rev_pos.normalize();

								if (cc >= 1) {
									// Stereo pair
									float c = rev_pos.x * 0.5 + 0.5;
									output.reverb_vol[0].l = 1.0 - c;
									output.reverb_vol[0].r = c;
								}

								if (cc >= 3) {
									// Center pair + Side pair
									float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
									float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;

									output.reverb_vol[1].l = xl;
									output.reverb_vol[1].r = xr;
									output.reverb_vol[2].l = 1.0 - xr;
									output.reverb_vol[2].r = 1.0 - xl;
								}

								if (cc >= 4) {
									// Rear pair
									// FIXME: Not sure what math should be done here
									float c = rev_pos.x * 0.5 + 0.5;
									output.reverb_vol[3].l = 1.0 - c;
									output.reverb_vol[3].r = c;
								}

								for (int i = 0; i < vol_index_max; i++) {

									output.reverb_vol[i] = output.reverb_vol[i].linear_interpolate(center_frame, attenuation);
								}
							} else {
								for (int i = 0; i < vol_index_max; i++) {

									output.reverb_vol[i] = center_frame;
								}
							}

							for (int i = 0; i < vol_index_max; i++) {

								output.reverb_vol[i] = output.vol[i].linear_interpolate(output.reverb_vol[i] * attenuation, uniformity);
								output.reverb_vol[i] *= area_send;
							}

						} else {

							for (int i = 0; i < vol_index_max; i++) {

								output.reverb_vol[i] = output.vol[i] * area_send;
							}
						}
					}
				}

				if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {

					Vector3 camera_velocity = camera->get_doppler_tracked_velocity();

					Vector3 local_velocity = camera->get_global_transform().orthonormalized().basis.xform_inv(linear_velocity - camera_velocity);

					if (local_velocity == Vector3()) {
						output.pitch_scale = 1.0;
					} else {
						float approaching = local_pos.normalized().dot(local_velocity.normalized());
						float velocity = local_velocity.length();
						float speed_of_sound = 343.0;

						output.pitch_scale = speed_of_sound / (speed_of_sound + velocity * approaching);
						output.pitch_scale = CLAMP(output.pitch_scale, (1 / 8.0), 8.0); //avoid crazy stuff
					}

				} else {
					output.pitch_scale = 1.0;
				}

				if (!filled_reverb) {

					for (int i = 0; i < vol_index_max; i++) {

						output.reverb_vol[i] = AudioFrame(0, 0);
					}
				}

				outputs[new_output_count] = output;
				new_output_count++;
				if (new_output_count == MAX_OUTPUTS)
					break;
			}

			output_count = new_output_count;
			output_ready = true;
		}

		//start playing if requested
		if (setplay >= 0.0) {
			setseek = setplay;
			active = true;
			setplay = -1;
			//do not update, this makes it easier to animate (will shut off otherise)
			///_change_notify("playing"); //update property in editor
		}

		//stop playing if no longer active
		if (!active) {
			set_physics_process_internal(false);
			//do not update, this makes it easier to animate (will shut off otherise)
			//_change_notify("playing"); //update property in editor
			emit_signal("finished");
		}
	}
}