static guint count_packets (GstRtpQCELPDepay * depay, guint8 * data, guint size) { guint count = 0; while (size > 0) { gint frame_len; frame_len = get_frame_len (depay, data[0]); /* 0 is invalid and we throw away the remainder of the frames */ if (frame_len == 0) break; if (frame_len < 0) frame_len = -frame_len; if (frame_len > size) break; size -= frame_len; data += frame_len; count++; } return count; }
void end_audio_frame() { if (frame_offset == 0) // No audio added; blip_end_frame() dislikes being called with an // offset of 0 return; assert(!(is_backwards_frame && frame_offset != get_frame_len())); // Bring the signal level at the end of the frame to zero as outlined in // set_audio_signal_level() set_audio_signal_level(0); blip_end_frame(blip, frame_offset); if (playback_started) { // Fudge playback rate by an amount proportional to the difference // between the desired and current buffer fill levels to try to steer // towards it double const fudge_factor = 1.0 + 2*max_adjust*(0.5 - fill_level()); blip_set_rates(blip, cpu_clock_rate, sample_rate*fudge_factor); } else { if (fill_level() >= 0.5) { start_audio_playback(); playback_started = true; } } int const n_samples = blip_read_samples(blip, blip_samples, ARRAY_LEN(blip_samples), 0); // We expect to read all samples from blip_buf. If something goes wrong and // we don't, clear the buffer to prevent data piling up in blip_buf's // buffer (which lacks bounds checking). int const avail = blip_samples_avail(blip); if (avail != 0) { printf("Warning: didn't read all samples from blip_buf (%d samples remain) - dropping samples\n", avail); blip_clear(blip); } #ifdef RECORD_MOVIE add_movie_audio_frame(blip_samples, n_samples); #endif // Save the samples to the audio ring buffer lock_audio(); write_samples(blip_samples, n_samples); unlock_audio(); }
void set_audio_signal_level(int16_t level) { // TODO: Do something to reduce the initial pop here? static int16_t previous_signal_level = 0; unsigned time = frame_offset; int delta = level - previous_signal_level; if (is_backwards_frame) { // Flip deltas and add them from the end of the frame to reverse audio. // Since the exact length of the frame can't be known in advance, the // length of each frame is recorded when it is saved to the rewind // buffer. // // This is easiest to visualize by thinking of deltas as fenceposts and // the signal level as spans between them. While rewinding, the signal // level that's being set should be considered the one to the left of // the fencepost. // // One complication is the boundary between frames while rewinding - // there the final sample added to one frame is not followed in time by // the first sample of the next frame. To solve this, we bring the // signal level down to zero at the end of each frame, and then adjust // it to the correct value in the next frame (when rewinding, "to zero" // becomes "from zero", and everything still works out). We also call // begin_frame() between frames to invalidate the cached signal level // in apu.cpp. Together this allows frames to be mixed-and-matched // arbitrarily in time. // // Thanks to Blargg for help on this. time = get_frame_len() - time; delta = -delta; } blip_add_delta(blip, time, delta); previous_signal_level = level; }
static GstBuffer * gst_rtp_qcelp_depay_process (GstRTPBaseDepayload * depayload, GstRTPBuffer * rtp) { GstRtpQCELPDepay *depay; GstBuffer *outbuf; GstClockTime timestamp; guint payload_len, offset, index; guint8 *payload; guint LLL, NNN; depay = GST_RTP_QCELP_DEPAY (depayload); payload_len = gst_rtp_buffer_get_payload_len (rtp); if (payload_len < 2) goto too_small; timestamp = GST_BUFFER_PTS (rtp->buffer); payload = gst_rtp_buffer_get_payload (rtp); /* 0 1 2 3 4 5 6 7 * +-+-+-+-+-+-+-+-+ * |RR | LLL | NNN | * +-+-+-+-+-+-+-+-+ */ /* RR = payload[0] >> 6; */ LLL = (payload[0] & 0x38) >> 3; NNN = (payload[0] & 0x07); payload_len--; payload++; GST_DEBUG_OBJECT (depay, "LLL %u, NNN %u", LLL, NNN); if (LLL > 5) goto invalid_lll; if (NNN > LLL) goto invalid_nnn; if (LLL != 0) { /* we are interleaved */ if (!depay->interleaved) { guint size; GST_DEBUG_OBJECT (depay, "starting interleaving group"); /* bundling is not allowed to change in one interleave group */ depay->bundling = count_packets (depay, payload, payload_len); GST_DEBUG_OBJECT (depay, "got bundling of %u", depay->bundling); /* we have one bundle where NNN goes from 0 to L, we don't store the index * 0 frames, so L+1 packets. Each packet has 'bundling - 1' packets */ size = (depay->bundling - 1) * (LLL + 1); /* create the array to hold the packets */ if (depay->packets == NULL) depay->packets = g_ptr_array_sized_new (size); GST_DEBUG_OBJECT (depay, "created packet array of size %u", size); g_ptr_array_set_size (depay->packets, size); /* we were previously not interleaved, figure out how much space we * need to deinterleave */ depay->interleaved = TRUE; } } else { /* we are not interleaved */ if (depay->interleaved) { GST_DEBUG_OBJECT (depay, "stopping interleaving"); /* flush packets if we were previously interleaved */ flush_packets (depay); } depay->bundling = 0; } index = 0; offset = 1; while (payload_len > 0) { gint frame_len; gboolean do_erasure; frame_len = get_frame_len (depay, payload[0]); GST_DEBUG_OBJECT (depay, "got frame len %d", frame_len); if (frame_len == 0) goto invalid_frame; if (frame_len < 0) { /* need to add an erasure frame but we can recover */ frame_len = -frame_len; do_erasure = TRUE; } else { do_erasure = FALSE; } if (frame_len > payload_len) goto invalid_frame; if (do_erasure) { /* create erasure frame */ outbuf = create_erasure_buffer (depay); } else { /* each frame goes into its buffer */ outbuf = gst_rtp_buffer_get_payload_subbuffer (rtp, offset, frame_len); } GST_BUFFER_PTS (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = FRAME_DURATION; gst_rtp_drop_meta (GST_ELEMENT_CAST (depayload), outbuf, g_quark_from_static_string (GST_META_TAG_AUDIO_STR)); if (!depay->interleaved || index == 0) { /* not interleaved or first frame in packet, just push */ gst_rtp_base_depayload_push (depayload, outbuf); if (timestamp != -1) timestamp += FRAME_DURATION; } else { /* put in interleave buffer */ add_packet (depay, LLL, NNN, index, outbuf); if (timestamp != -1) timestamp += (FRAME_DURATION * (LLL + 1)); } payload_len -= frame_len; payload += frame_len; offset += frame_len; index++; /* discard excess packets */ if (depay->bundling > 0 && depay->bundling <= index) break; } while (index < depay->bundling) { GST_DEBUG_OBJECT (depay, "filling with erasure buffer"); /* fill remainder with erasure packets */ outbuf = create_erasure_buffer (depay); add_packet (depay, LLL, NNN, index, outbuf); index++; } if (depay->interleaved && LLL == NNN) { GST_DEBUG_OBJECT (depay, "interleave group ended, flushing"); /* we have the complete interleave group, flush */ flush_packets (depay); } return NULL; /* ERRORS */ too_small: { GST_ELEMENT_WARNING (depay, STREAM, DECODE, (NULL), ("QCELP RTP payload too small (%d)", payload_len)); return NULL; } invalid_lll: { GST_ELEMENT_WARNING (depay, STREAM, DECODE, (NULL), ("QCELP RTP invalid LLL received (%d)", LLL)); return NULL; } invalid_nnn: { GST_ELEMENT_WARNING (depay, STREAM, DECODE, (NULL), ("QCELP RTP invalid NNN received (%d)", NNN)); return NULL; } invalid_frame: { GST_ELEMENT_WARNING (depay, STREAM, DECODE, (NULL), ("QCELP RTP invalid frame received")); return NULL; } }