/* * Let the Echo Canceller knows that a frame has been captured from * the microphone. */ PJ_DEF(pj_status_t) pjmedia_echo_capture( pjmedia_echo_state *echo, pj_int16_t *rec_frm, unsigned options ) { struct frame *oldest_frm; pj_status_t status, rc; if (!echo->lat_ready) { /* Prefetching to fill in the desired latency */ PJ_LOG(5,(echo->obj_name, "Prefetching..")); return PJ_SUCCESS; } /* Retrieve oldest frame from the latency buffer */ oldest_frm = echo->lat_buf.next; pj_list_erase(oldest_frm); /* Cancel echo using this reference frame */ status = pjmedia_echo_cancel(echo, rec_frm, oldest_frm->buf, options, NULL); /* Move one frame from delay buffer to the latency buffer. */ rc = pjmedia_delay_buf_get(echo->delay_buf, oldest_frm->buf); if (rc != PJ_SUCCESS) { /* Ooops.. no frame! */ PJ_LOG(5,(echo->obj_name, "No frame from delay buffer. This will upset EC later")); pjmedia_zero_samples(oldest_frm->buf, echo->samples_per_frame); } pj_list_push_back(&echo->lat_buf, oldest_frm); return status; }
/* * Let the Echo Canceller knows that a frame has been captured from * the microphone. */ pj_status_t pjs_echo_canceller::capture(pj_int16_t *rec_frm, unsigned size) { struct frame *oldest_frm; pj_status_t status, rc; if(samples_per_frame!=size) { PJ_LOG(1, (THIS_FILE, "WRONG SIZE ON CAPTURE %d != %d",size,samples_per_frame)); return -1; } for (unsigned i = 0; i < samples_per_frame; i++) { REAL f = hp00.highpass(rec_frm[i]); f = hp0.highpass(f); rec_frm[i] = round(f); } PPJ_WaitAndLock wl(*lock); if (!lat_ready) { /* Prefetching to fill in the desired latency */ PJ_LOG(4, (THIS_FILE, "Prefetching..")); return PJ_SUCCESS; } /* Retrieve oldest frame from the latency buffer */ oldest_frm = lat_buf.next; pj_list_erase(oldest_frm); lock->release(); speex_echo_cancellation(state, (const spx_int16_t*)rec_frm, (const spx_int16_t*)oldest_frm->buf, (spx_int16_t*)tmp_frame); /* Preprocess output */ speex_preprocess_run(preprocess, (spx_int16_t*)tmp_frame); pjmedia_copy_samples(rec_frm, tmp_frame, samples_per_frame); status = PJ_SUCCESS; /* Cancel echo using this reference frame */ lock->acquire(); /* Move one frame from delay buffer to the latency buffer. */ rc = pjmedia_delay_buf_get(delay_buf, oldest_frm->buf); if (rc != PJ_SUCCESS) { /* Ooops.. no frame! */ PJ_LOG(4, (THIS_FILE, "No frame from delay buffer. This will upset EC later")); pjmedia_zero_samples(oldest_frm->buf, samples_per_frame); } pj_list_push_back(&lat_buf, oldest_frm); return status; }
/* Play cb */ static pj_status_t play_cb(void *user_data, pj_uint32_t timestamp, void *output, unsigned size) { PJ_UNUSED_ARG(user_data); PJ_UNUSED_ARG(timestamp); PJ_UNUSED_ARG(size); pjmedia_delay_buf_get(delaybuf, (pj_int16_t*)output); ++play_cnt; return PJ_SUCCESS; }
/* * Let the Echo Canceller know that a frame has been played to the speaker. */ PJ_DEF(pj_status_t) pjmedia_echo_playback( pjmedia_echo_state *echo, pj_int16_t *play_frm ) { /* If EC algo has playback handler, just pass the frame. */ if (echo->op->ec_playback) { return (*echo->op->ec_playback)(echo->state, play_frm); } /* Playing frame should be stored, as it will be used by echo_capture() * as reference frame, delay buffer is used for storing the playing frames * as in case there was clock drift between mic & speaker. * * Ticket #830: * Note that pjmedia_delay_buf_put() may modify the input frame and those * modified frames may not be smooth, i.e: if there were two or more * consecutive pjmedia_delay_buf_get() before next pjmedia_delay_buf_put(), * so we'll just feed the delay buffer with the copy of playing frame, * instead of the original playing frame. However this will cause the EC * uses slight 'different' frames (for reference) than actually played * by the speaker. */ pjmedia_copy_samples(echo->frm_buf, play_frm, echo->samples_per_frame); pjmedia_delay_buf_put(echo->delay_buf, echo->frm_buf); if (!echo->lat_ready) { /* We've not built enough latency in the buffer, so put this frame * in the latency buffer list. */ struct frame *frm; if (pj_list_empty(&echo->lat_free)) { echo->lat_ready = PJ_TRUE; PJ_LOG(5,(echo->obj_name, "Latency bufferring complete")); return PJ_SUCCESS; } frm = echo->lat_free.prev; pj_list_erase(frm); /* Move one frame from delay buffer to the latency buffer. */ pjmedia_delay_buf_get(echo->delay_buf, echo->frm_buf); pjmedia_copy_samples(frm->buf, echo->frm_buf, echo->samples_per_frame); pj_list_push_back(&echo->lat_buf, frm); } return PJ_SUCCESS; }
/* * Let the Echo Canceller know that a frame has been played to the speaker. */ pj_status_t pjs_echo_canceller::playback(pj_int16_t *play_frm, unsigned size) { /* Playing frame should be stored, as it will be used by echo_capture() * as reference frame, delay buffer is used for storing the playing frames * as in case there was clock drift between mic & speaker. * * Ticket #830: * Note that pjmedia_delay_buf_put() may modify the input frame and those * modified frames may not be smooth, i.e: if there were two or more * consecutive pjmedia_delay_buf_get() before next pjmedia_delay_buf_put(), * so we'll just feed the delay buffer with the copy of playing frame, * instead of the original playing frame. However this will cause the EC * uses slight 'different' frames (for reference) than actually played * by the speaker. */ if(samples_per_frame!=size) { PJ_LOG(1, (THIS_FILE, "WRONG SIZE ON PLAYBACK %d != %d",size,samples_per_frame)); return -1; } PPJ_WaitAndLock wl(*lock); pjmedia_copy_samples(frm_buf, play_frm, samples_per_frame); pjmedia_delay_buf_put(delay_buf, frm_buf); if (!lat_ready) { /* We've not built enough latency in the buffer, so put this frame * in the latency buffer list. */ struct frame *frm; if (pj_list_empty(&lat_free)) { lat_ready = PJ_TRUE; PJ_LOG(4, (THIS_FILE, "Latency bufferring complete")); return PJ_SUCCESS; } frm = lat_free.prev; pj_list_erase(frm); /* Move one frame from delay buffer to the latency buffer. */ pjmedia_delay_buf_get(delay_buf, frm_buf); pjmedia_copy_samples(frm->buf, frm_buf, samples_per_frame); pj_list_push_back(&lat_buf, frm); } return PJ_SUCCESS; }
/* Play cb */ static pj_status_t play_cb(void *user_data, pjmedia_frame *frame) { PJ_UNUSED_ARG(user_data); if (param.ext_fmt.id == PJMEDIA_FORMAT_PCM) { pjmedia_delay_buf_get(delaybuf, (pj_int16_t*)frame->buf); frame->size = SAMPLES_PER_FRAME*2; frame->type = PJMEDIA_FRAME_TYPE_AUDIO; } else { pjmedia_frame_ext *f_src = (pjmedia_frame_ext*)frame_buf; pjmedia_frame_ext *f_dst = (pjmedia_frame_ext*)frame; copy_frame_ext(f_dst, f_src); } ++play_cnt; return PJ_SUCCESS; }
/* Get a mono frame from a reversed phase channel (downstream direction). * The frame is put by put_frame() call to the splitcomb. */ static pj_status_t rport_get_frame(pjmedia_port *this_port, pjmedia_frame *frame) { struct reverse_port *rport = (struct reverse_port*) this_port; /* Update state */ op_update(rport, DIR_DOWNSTREAM, OP_GET); /* Return no frame if media flow on this direction is being * paused. */ if (rport->buf[DIR_DOWNSTREAM].paused) { frame->type = PJMEDIA_FRAME_TYPE_NONE; return PJ_SUCCESS; } /* Get frame from delay buffer */ frame->size = PJMEDIA_PIA_AVG_FSZ(&this_port->info); frame->type = PJMEDIA_FRAME_TYPE_AUDIO; frame->timestamp.u64 = rport->buf[DIR_DOWNSTREAM].ts.u64; return pjmedia_delay_buf_get(rport->buf[DIR_DOWNSTREAM].dbuf, (short*)frame->buf); }
/* * Get a multichannel frame upstream. * This will get mono channel frame from each port and put the * mono frame into the multichannel frame. */ static pj_status_t get_frame(pjmedia_port *this_port, pjmedia_frame *frame) { struct splitcomb *sc = (struct splitcomb*) this_port; unsigned ch; pj_bool_t has_frame = PJ_FALSE; /* Read frame from each port */ for (ch=0; ch < PJMEDIA_PIA_CCNT(&this_port->info); ++ch) { pjmedia_port *port = sc->port_desc[ch].port; pjmedia_frame mono_frame; pj_status_t status; if (!port) { pjmedia_zero_samples(sc->get_buf, PJMEDIA_PIA_SPF(&this_port->info) / PJMEDIA_PIA_CCNT(&this_port->info)); } else if (sc->port_desc[ch].reversed == PJ_FALSE) { /* Read from normal port */ mono_frame.buf = sc->get_buf; mono_frame.size = PJMEDIA_PIA_AVG_FSZ(&port->info); mono_frame.timestamp.u64 = frame->timestamp.u64; status = pjmedia_port_get_frame(port, &mono_frame); if (status != PJ_SUCCESS || mono_frame.type != PJMEDIA_FRAME_TYPE_AUDIO) { pjmedia_zero_samples(sc->get_buf, PJMEDIA_PIA_SPF(&port->info)); } frame->timestamp.u64 = mono_frame.timestamp.u64; } else { /* Read from temporary buffer for reverse port */ struct reverse_port *rport = (struct reverse_port*)port; /* Update rport state. */ op_update(rport, DIR_UPSTREAM, OP_GET); if (!rport->buf[DIR_UPSTREAM].paused) { pjmedia_delay_buf_get(rport->buf[DIR_UPSTREAM].dbuf, sc->get_buf); } else { pjmedia_zero_samples(sc->get_buf, PJMEDIA_PIA_SPF(&port->info)); } frame->timestamp.u64 = rport->buf[DIR_UPSTREAM].ts.u64; } /* Combine the mono frame into multichannel frame */ store_mono_frame(sc->get_buf, (pj_int16_t*)frame->buf, ch, PJMEDIA_PIA_CCNT(&this_port->info), PJMEDIA_PIA_SPF(&this_port->info) / PJMEDIA_PIA_CCNT(&this_port->info)); has_frame = PJ_TRUE; } /* Return NO_FRAME is we don't get any frames from downstream ports */ if (has_frame) { frame->type = PJMEDIA_FRAME_TYPE_AUDIO; frame->size = PJMEDIA_PIA_AVG_FSZ(&this_port->info); } else frame->type = PJMEDIA_FRAME_TYPE_NONE; return PJ_SUCCESS; }