JNIEXPORT jshortArray JNICALL Java_com_jabber_audio_encoder_AudioWebrtcAecm_yy_1webrtc_1aecm_1process
  (JNIEnv *env, jobject obj, jint handle, jshortArray nearendNoisy, jshortArray nearendClean){  //nearendNoisy , nearendClean
//	if( 0!=WebRtcAec_BufferFarend(haec,farend,160) ) printf("bufferfarend error\n");
//	                if( 0!=WebRtcAec_Process(haec,nearend,NULL,output,NULL,160,100,0) ) printf("process error\n");
//
//	                fwrite(output,2,160,foutput);
	//get handle
	void* AECM_instance = (void* )handle;
	//create native shorts from java shorts
	jshort *native_nearend_noisy = (*env)->GetShortArrayElements(env, nearendNoisy, NULL);
	jshort *native_nearend_clean = (*env)->GetShortArrayElements(env, nearendClean, NULL);
	//allocate memory for output data
	jint length = (*env)->GetArrayLength(env, nearendNoisy);
	jshortArray temp = (*env)->NewShortArray(env, length);
	jshort *native_output_frame = (*env)->GetShortArrayElements(env, temp, 0);
//	int32_t WebRtcAecm_BufferFarend(void* aecmInst,
//	                                const int16_t* farend,
//	                                int16_t nrOfSamples);
	short farend[160] = {0};
	if( 0!=WebRtcAecm_BufferFarend(AECM_instance,farend,160) ){  //farend value??? audio
		__android_log_print(ANDROID_LOG_ERROR ,TAG ,"WebRtcAec_BufferFarend failed");
	}
//	int32_t WebRtcAecm_Process(void* aecmInst,
//	                           const int16_t* nearendNoisy,
//	                           const int16_t* nearendClean,
//	                           int16_t* out,
//	                           int16_t nrOfSamples,
//	                           int16_t msInSndCardBuf);
//	if( 0!=WebRtcAecm_Process(AECM_instance,nearendNoisy,nearendClean,native_output_frame,160,0) ){
	if( 0!=WebRtcAecm_Process(AECM_instance,native_nearend_noisy,native_nearend_clean,native_output_frame,160,0) ){
		__android_log_print(ANDROID_LOG_ERROR ,TAG ,"WebRtcAec_Process failed ");
	}


	//convert native output to java layer output
	jshortArray output_shorts = (*env)->NewShortArray(env, length);
	(*env)->SetShortArrayRegion(env, output_shorts, 0, length, native_output_frame);

	//cleanup and return
	(*env)->ReleaseShortArrayElements(env, nearendNoisy, native_nearend_noisy, 0);
	(*env)->ReleaseShortArrayElements(env, nearendClean, native_nearend_clean, 0);
	(*env)->ReleaseShortArrayElements(env, temp, native_output_frame, 0);

	return output_shorts;

}
Esempio n. 2
0
/*	inputs[0]= reference signal from far end (sent to soundcard)
 *	inputs[1]= near speech & echo signal (read from soundcard)
 *	outputs[0]=  is a copy of inputs[0] to be sent to soundcard
 *	outputs[1]=  near end speech, echo removed - towards far end
*/
static void webrtc_aec_process(MSFilter *f)
{
	WebRTCAECState *s = (WebRTCAECState *) f->data;
	int nbytes = s->framesize * 2;
	mblk_t *refm;
	uint8_t *ref, *echo;

	if (s->bypass_mode) {
		while ((refm = ms_queue_get(f->inputs[0])) != NULL) {
			ms_queue_put(f->outputs[0], refm);
		}
		while ((refm = ms_queue_get(f->inputs[1])) != NULL) {
			ms_queue_put(f->outputs[1], refm);
		}
		return;
	}

	if (f->inputs[0] != NULL) {
		if (s->echostarted) {
			while ((refm = ms_queue_get(f->inputs[0])) != NULL) {
				refm=ms_audio_flow_controller_process(&s->afc,refm);
				if (refm){
					mblk_t *cp=dupmsg(refm);
					ms_bufferizer_put(&s->delayed_ref,cp);
					ms_bufferizer_put(&s->ref,refm);
				}
			}
		} else {
			ms_warning("Getting reference signal but no echo to synchronize on.");
			ms_queue_flush(f->inputs[0]);
		}
	}

	ms_bufferizer_put_from_queue(&s->echo, f->inputs[1]);

	ref = (uint8_t *) alloca(nbytes);
	echo = (uint8_t *) alloca(nbytes);
	while (ms_bufferizer_read(&s->echo, echo, nbytes) >= nbytes) {
		mblk_t *oecho = allocb(nbytes, 0);
		int avail;
		int avail_samples;

		if (!s->echostarted) s->echostarted = TRUE;
		if ((avail = ms_bufferizer_get_avail(&s->delayed_ref)) < ((s->nominal_ref_samples * 2) + nbytes)) {
			/*we don't have enough to read in a reference signal buffer, inject silence instead*/
			refm = allocb(nbytes, 0);
			memset(refm->b_wptr, 0, nbytes);
			refm->b_wptr += nbytes;
			ms_bufferizer_put(&s->delayed_ref, refm);
			ms_queue_put(f->outputs[0], dupmsg(refm));
			if (!s->using_zeroes) {
				ms_warning("Not enough ref samples, using zeroes");
				s->using_zeroes = TRUE;
			}
		} else {
			if (s->using_zeroes) {
				ms_message("Samples are back.");
				s->using_zeroes = FALSE;
			}
			/* read from our no-delay buffer and output */
			refm = allocb(nbytes, 0);
			if (ms_bufferizer_read(&s->ref, refm->b_wptr, nbytes) == 0) {
				ms_fatal("Should never happen");
			}
			refm->b_wptr += nbytes;
			ms_queue_put(f->outputs[0], refm);
		}

		/*now read a valid buffer of delayed ref samples*/
		if (ms_bufferizer_read(&s->delayed_ref, ref, nbytes) == 0) {
			ms_fatal("Should never happen");
		}
		avail -= nbytes;
		avail_samples = avail / 2;
		if (avail_samples < s->min_ref_samples || s->min_ref_samples == -1) {
			s->min_ref_samples = avail_samples;
		}

#ifdef EC_DUMP
		if (s->reffile)
			fwrite(ref, nbytes, 1, s->reffile);
		if (s->echofile)
			fwrite(echo, nbytes, 1, s->echofile);
#endif
		if (WebRtcAecm_BufferFarend(s->aecmInst, (const WebRtc_Word16 *) ref, s->framesize)!=0)
			ms_error("WebRtcAecm_BufferFarend() failed.");
		if (WebRtcAecm_Process(s->aecmInst, (const WebRtc_Word16 *) echo, NULL, (WebRtc_Word16 *) oecho->b_wptr, s->framesize, 0)!=0)
			ms_error("WebRtcAecm_Process() failed.");
#ifdef EC_DUMP
		if (s->cleanfile)
			fwrite(oecho->b_wptr, nbytes, 1, s->cleanfile);
#endif
		oecho->b_wptr += nbytes;
		ms_queue_put(f->outputs[1], oecho);
	}

	/*verify our ref buffer does not become too big, meaning that we are receiving more samples than we are sending*/
	if ((((uint32_t) (f->ticker->time - s->flow_control_time)) >= flow_control_interval_ms) && (s->min_ref_samples != -1)) {
		int diff = s->min_ref_samples - s->nominal_ref_samples;
		if (diff > (nbytes / 2)) {
			int purge = diff - (nbytes / 2);
			ms_warning("echo canceller: we are accumulating too much reference signal, need to throw out %i samples", purge);
			ms_audio_flow_controller_set_target(&s->afc, purge, (flow_control_interval_ms * s->samplerate) / 1000);
		}
		s->min_ref_samples = -1;
		s->flow_control_time = f->ticker->time;
	}
}
Esempio n. 3
0
int KotiAEC_process(const int16_t* farend, const int16_t* nearend, int16_t* out)
{
    int ret = -1, i = 0, frame_size = 0;
    switch(aec_core_used)
    {
#ifdef WEBRTC_AEC_CORE_ENABLED
    case WEBRTC_AEC:
        if(farend)
            WebRtcAec_BufferFarend(webrtc_aec_pty.webrtc_aec, farend, webrtc_aec_pty.frame_size);
        if(!WebRtcAec_Process(webrtc_aec_pty.webrtc_aec, nearend, NULL, out, NULL, webrtc_aec_pty.frame_size,
                                   webrtc_aec_pty.sndcard_delay_ms, 0))
        {
            ret = 0;
        }

        if(webrtc_aec_pty.webrtc_ns)
        {
            WebRtcNsx_Process((NsxHandle*)webrtc_aec_pty.webrtc_ns, out, NULL, out, NULL);
            if(webrtc_aec_pty.frame_size == 160)
                WebRtcNsx_Process((NsxHandle*)webrtc_aec_pty.webrtc_ns, out+80, NULL, out+80, NULL);
        }

        if(webrtc_aec_pty.webrtc_agc)
        {
            int32_t out_c; uint8_t warn_status;
            WebRtcAgc_Process(webrtc_aec_pty.webrtc_agc, out, NULL, webrtc_aec_pty.frame_size, out, NULL, 32,
                              &out_c, 1, &warn_status);
        }

//        if(webrtc_aec_pty.webrtc_ns)
//        {
//            WebRtcNsx_Process((NsxHandle*)webrtc_aec_pty.webrtc_ns, out, NULL, out, NULL);
//            if(webrtc_aec_pty.frame_size == 160)
//                WebRtcNsx_Process((NsxHandle*)webrtc_aec_pty.webrtc_ns, out+80, NULL, out+80, NULL);
//        }

        frame_size = webrtc_aec_pty.frame_size;
        break;
    case WEBRTC_AECM:
/*
        if(farend)
            WebRtcAecm_BufferFarend(webrtc_aecm_pty.webrtc_aec, farend, webrtc_aecm_pty.frame_size);
        if(!WebRtcAecm_Process(webrtc_aecm_pty.webrtc_aec, nearend, NULL, out, webrtc_aecm_pty.frame_size,
                              webrtc_aecm_pty.sndcard_delay_ms))
        {
            ret = 0;
        }
*/

	memcpy(proc_tmp_buf, nearend, webrtc_aecm_pty.frame_size*2);
        if(webrtc_aecm_pty.webrtc_ns)
        {
            WebRtcNsx_Process((NsxHandle*)webrtc_aecm_pty.webrtc_ns, proc_tmp_buf, NULL, proc_tmp_buf, NULL);
            if(webrtc_aecm_pty.frame_size == 160)
                WebRtcNsx_Process((NsxHandle*)webrtc_aecm_pty.webrtc_ns, proc_tmp_buf+80, NULL, proc_tmp_buf+80, NULL);
        }

        if(webrtc_aecm_pty.webrtc_agc)
        {
            int32_t out_c; uint8_t warn_status;
            WebRtcAgc_Process(webrtc_aecm_pty.webrtc_agc, proc_tmp_buf, NULL, webrtc_aecm_pty.frame_size, proc_tmp_buf, NULL, 32,
                              &out_c, 1, &warn_status);
        }

	// AEC
        if(farend)
            WebRtcAecm_BufferFarend(webrtc_aecm_pty.webrtc_aec, farend, webrtc_aecm_pty.frame_size);
        if(!WebRtcAecm_Process(webrtc_aecm_pty.webrtc_aec, proc_tmp_buf, NULL, out, webrtc_aecm_pty.frame_size,
                              webrtc_aecm_pty.sndcard_delay_ms))
        {
            ret = 0;
        }

        frame_size = webrtc_aecm_pty.frame_size;
        break;
#endif
    case SPEEX_AEC:
    default:
#ifdef OLD_SPEEX_AEC
        speex_echo_cancel((SpeexEchoState*)speex_aec_pty.speex_echo_state, nearend, farend, out, speex_aec_pty.nosie);
        if(speex_preprocess((SpeexPreprocessState*)speex_aec_pty.speex_preprocess_state, out, speex_aec_pty.nosie) == 1)
            ret = 0;
#else
        if(farend)
            speex_echo_cancellation((SpeexEchoState*)speex_aec_pty.speex_echo_state, nearend, farend, out);
        else
            speex_echo_capture((SpeexEchoState*)speex_aec_pty.speex_echo_state, nearend, out);
//        speex_preprocess_estimate_update((SpeexPreprocessState*)speex_aec_pty.speex_preprocess_state, out);
        if(speex_preprocess_run((SpeexPreprocessState*)speex_aec_pty.speex_preprocess_state, out) == 1)
            ret = 0;
#endif

        frame_size = speex_aec_pty.frame_size;
        break;
    }

    // if the output sound needed amplify
    if(output_sound_amplification != 1.0f && output_sound_amplification > 0)
    {
        for(; i<frame_size; ++i)
            out[i] = out[i]*output_sound_amplification;
    }

    return ret;
}