void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size) { RTPMuxContext *s = s1->priv_data; AVStream *st = s1->streams[0]; const int max_au_headers_size = 2 + 2 * s->max_frames_per_packet; int len, max_packet_size = s->max_payload_size - max_au_headers_size; uint8_t *p; /* skip ADTS header, if present */ if ((s1->streams[0]->codecpar->extradata_size) == 0) { size -= 7; buff += 7; } /* test if the packet must be sent */ len = (s->buf_ptr - s->buf); if (s->num_frames && (s->num_frames == s->max_frames_per_packet || (len + size) > s->max_payload_size || av_compare_ts(s->cur_timestamp - s->timestamp, st->time_base, s1->max_delay, AV_TIME_BASE_Q) >= 0)) { int au_size = s->num_frames * 2; p = s->buf + max_au_headers_size - au_size - 2; if (p != s->buf) { memmove(p + 2, s->buf + 2, au_size); } /* Write the AU header size */ AV_WB16(p, au_size * 8); ff_rtp_send_data(s1, p, s->buf_ptr - p, 1); s->num_frames = 0; } if (s->num_frames == 0) { s->buf_ptr = s->buf + max_au_headers_size; s->timestamp = s->cur_timestamp; } if (size <= max_packet_size) { p = s->buf + s->num_frames++ * 2 + 2; AV_WB16(p, size * 8); memcpy(s->buf_ptr, buff, size); s->buf_ptr += size; } else { int au_size = size; max_packet_size = s->max_payload_size - 4; p = s->buf; AV_WB16(p, 2 * 8); while (size > 0) { len = FFMIN(size, max_packet_size); AV_WB16(&p[2], au_size * 8); memcpy(p + 4, buff, len); ff_rtp_send_data(s1, p, len + 4, len == size); size -= len; buff += len; } } }
static void nal_send(AVFormatContext *s1, const uint8_t *buf, int size, int last) { RTPMuxContext *s = s1->priv_data; av_log(s1, AV_LOG_DEBUG, "Sending NAL %x of len %d M=%d\n", buf[0] & 0x1F, size, last); if (size <= s->max_payload_size) { ff_rtp_send_data(s1, buf, size, last); } else { uint8_t type = buf[0] & 0x1F; uint8_t nri = buf[0] & 0x60; av_log(s1, AV_LOG_DEBUG, "NAL size %d > %d\n", size, s->max_payload_size); s->buf[0] = 28; /* FU Indicator; Type = 28 ---> FU-A */ s->buf[0] |= nri; s->buf[1] = type; s->buf[1] |= 1 << 7; buf += 1; size -= 1; while (size + 2 > s->max_payload_size) { memcpy(&s->buf[2], buf, s->max_payload_size - 2); ff_rtp_send_data(s1, s->buf, s->max_payload_size, 0); buf += s->max_payload_size - 2; size -= s->max_payload_size - 2; s->buf[1] &= ~(1 << 7); } s->buf[1] |= 1 << 6; memcpy(&s->buf[2], buf, size); ff_rtp_send_data(s1, s->buf, size + 2, last); } }
static void send_mode_b(AVFormatContext *s1, const struct H263Info *info, const struct H263State *state, const uint8_t *buf, int len, int sbits, int ebits, int m) { RTPMuxContext *s = s1->priv_data; PutBitContext pb; init_put_bits(&pb, s->buf, 64); put_bits(&pb, 1, 1); /* F - 1, mode B */ put_bits(&pb, 1, 0); /* P - 0, mode B */ put_bits(&pb, 3, sbits); /* SBIT - 0 bits */ put_bits(&pb, 3, ebits); /* EBIT - 0 bits */ put_bits(&pb, 3, info->src); /* SRC - source format */ put_bits(&pb, 5, state->quant); /* QUANT - quantizer for the first MB */ put_bits(&pb, 5, state->gobn); /* GOBN - GOB number */ put_bits(&pb, 9, state->mba); /* MBA - MB address */ put_bits(&pb, 2, 0); /* R - reserved */ put_bits(&pb, 1, info->i); /* I - inter/intra */ put_bits(&pb, 1, info->u); /* U - unrestricted motion vector */ put_bits(&pb, 1, info->s); /* S - syntax-baesd arithmetic coding */ put_bits(&pb, 1, info->a); /* A - advanced prediction */ put_bits(&pb, 7, state->hmv1); /* HVM1 - horizontal motion vector 1 */ put_bits(&pb, 7, state->vmv1); /* VMV1 - vertical motion vector 1 */ put_bits(&pb, 7, state->hmv2); /* HVM2 - horizontal motion vector 2 */ put_bits(&pb, 7, state->vmv2); /* VMV2 - vertical motion vector 2 */ flush_put_bits(&pb); memcpy(s->buf + 8, buf, len); ff_rtp_send_data(s1, s->buf, len + 8, m); }
static void send_mode_a(AVFormatContext *s1, const struct H263Info *info, const uint8_t *buf, int len, int ebits, int m) { RTPMuxContext *s = s1->priv_data; PutBitContext pb; init_put_bits(&pb, s->buf, 32); put_bits(&pb, 1, 0); /* F - 0, mode A */ put_bits(&pb, 1, 0); /* P - 0, normal I/P */ put_bits(&pb, 3, 0); /* SBIT - 0 bits */ put_bits(&pb, 3, ebits); /* EBIT */ put_bits(&pb, 3, info->src); /* SRC - source format */ put_bits(&pb, 1, info->i); /* I - inter/intra */ put_bits(&pb, 1, info->u); /* U - unrestricted motion vector */ put_bits(&pb, 1, info->s); /* S - syntax-baesd arithmetic coding */ put_bits(&pb, 1, info->a); /* A - advanced prediction */ put_bits(&pb, 4, 0); /* R - reserved */ put_bits(&pb, 2, 0); /* DBQ - 0 */ put_bits(&pb, 3, 0); /* TRB - 0 */ put_bits(&pb, 8, info->tr); /* TR */ flush_put_bits(&pb); memcpy(s->buf + 4, buf, len); ff_rtp_send_data(s1, s->buf, len + 4, m); }
/* send an integer number of samples and compute time stamp and fill the rtp send buffer before sending. */ static void rtp_send_samples(AVFormatContext *s1, const uint8_t *buf1, int size, int sample_size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size, n; max_packet_size = (s->max_payload_size / sample_size) * sample_size; /* not needed, but who nows */ if ((size % sample_size) != 0) av_abort(); n = 0; while (size > 0) { s->buf_ptr = s->buf; len = FFMIN(max_packet_size, size); /* copy data */ memcpy(s->buf_ptr, buf1, len); s->buf_ptr += len; buf1 += len; size -= len; s->timestamp = s->cur_timestamp + n / sample_size; ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); n += (s->buf_ptr - s->buf); } }
/* send an integer number of samples and compute time stamp and fill the rtp send buffer before sending. */ static int rtp_send_samples(AVFormatContext *s1, const uint8_t *buf1, int size, int sample_size_bits) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size, n; /* Calculate the number of bytes to get samples aligned on a byte border */ int aligned_samples_size = sample_size_bits/av_gcd(sample_size_bits, 8); max_packet_size = (s->max_payload_size / aligned_samples_size) * aligned_samples_size; /* Not needed, but who knows. Don't check if samples aren't an even number of bytes. */ if ((sample_size_bits % 8) == 0 && ((8 * size) % sample_size_bits) != 0) return AVERROR(EINVAL); n = 0; while (size > 0) { s->buf_ptr = s->buf; len = FFMIN(max_packet_size, size); /* copy data */ memcpy(s->buf_ptr, buf1, len); s->buf_ptr += len; buf1 += len; size -= len; s->timestamp = s->cur_timestamp + n * 8 / sample_size_bits; ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); n += (s->buf_ptr - s->buf); } return 0; }
void ff_rtp_send_aac(AVFormatContext *s1, const uint8_t *buff, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; uint8_t *p; const int max_frames_per_packet = s->max_frames_per_packet ? s->max_frames_per_packet : 5; const int max_au_headers_size = 2 + 2 * max_frames_per_packet; /* skip ADTS header, if present */ if ((s1->streams[0]->codec->extradata_size) == 0) { size -= 7; buff += 7; } max_packet_size = s->max_payload_size - max_au_headers_size; /* test if the packet must be sent */ len = (s->buf_ptr - s->buf); if ((s->num_frames == max_frames_per_packet) || (len && (len + size) > s->max_payload_size)) { int au_size = s->num_frames * 2; p = s->buf + max_au_headers_size - au_size - 2; if (p != s->buf) { memmove(p + 2, s->buf + 2, au_size); } /* Write the AU header size */ p[0] = ((au_size * 8) & 0xFF) >> 8; p[1] = (au_size * 8) & 0xFF; ff_rtp_send_data(s1, p, s->buf_ptr - p, 1); s->num_frames = 0; }
static void rtp_send_mpegaudio(AVFormatContext *s1, const uint8_t *buf1, int size) { RTPMuxContext *s = s1->priv_data; int len, count, max_packet_size; max_packet_size = s->max_payload_size; /* test if we must flush because not enough space */ len = (s->buf_ptr - s->buf); if ((len + size) > max_packet_size) { if (len > 4) { ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); s->buf_ptr = s->buf + 4; } } if (s->buf_ptr == s->buf + 4) { s->timestamp = s->cur_timestamp; } /* add the packet */ if (size > max_packet_size) { /* big packet: fragment */ count = 0; while (size > 0) { len = max_packet_size - 4; if (len > size) len = size; /* build fragmented packet */ s->buf[0] = 0; s->buf[1] = 0; s->buf[2] = count >> 8; s->buf[3] = count; memcpy(s->buf + 4, buf1, len); ff_rtp_send_data(s1, s->buf, len + 4, 0); size -= len; buf1 += len; count += len; } }
/* send an integer number of samples and compute time stamp and fill the rtp send buffer before sending. */ static void rtp_send_samples(AVFormatContext *s1, const uint8_t *buf1, int size, int sample_size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size, n; /* modify max packet size to 20ms according to rtp default */ //max_packet_size = (s->max_payload_size / sample_size) * sample_size; max_packet_size = 160 * sample_size; /* not needed, but who nows */ if ((size % sample_size) != 0) av_abort(); n = 0; s->buf_ptr = s->buf; if (s->stock_len > 0) { memcpy(s->buf, s->stock_buf, s->stock_len); s->buf_ptr += s->stock_len; s->stock_len = 0; } //while (size > 0) { while (size+s->buf_ptr-s->buf >= max_packet_size) { //s->buf_ptr = s->buf; len = FFMIN(max_packet_size, size) - (s->buf_ptr-s->buf); /* copy data */ memcpy(s->buf_ptr, buf1, len); s->buf_ptr += len; buf1 += len; size -= len; s->timestamp = s->cur_timestamp + n / sample_size; ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); //usleep(19*1000); n += (s->buf_ptr - s->buf); s->buf_ptr = s->buf; } if (size > 0) { memcpy(s->stock_buf, buf1, size); s->stock_len = size; } s->cur_timestamp += n / sample_size; }
/* Based on a draft spec for VP8 RTP. * ( http://www.webmproject.org/code/specs/rtp/ ) */ void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buf, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; s->buf_ptr = s->buf; s->timestamp = s->cur_timestamp; max_packet_size = s->max_payload_size - 1; // minus one for header byte *s->buf_ptr++ = 1; // 0b1 indicates start of frame while (size > 0) { len = FFMIN(size, max_packet_size); memcpy(s->buf_ptr, buf, len); ff_rtp_send_data(s1, s->buf, len+1, size == len); // marker bit is last packet in frame size -= len; buf += len; s->buf_ptr = s->buf; *s->buf_ptr++ = 0; // payload descriptor } }
/** * Packetize H.263 frames into RTP packets according to RFC 4629 */ void ff_rtp_send_h263(AVFormatContext *s1, const uint8_t *buf1, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; uint8_t *q; max_packet_size = s->max_payload_size; while (size > 0) { q = s->buf; if (size >= 2 && (buf1[0] == 0) && (buf1[1] == 0)) { *q++ = 0x04; buf1 += 2; size -= 2; } else { *q++ = 0; } *q++ = 0; len = FFMIN(max_packet_size - 2, size); /* Look for a better place to split the frame into packets. */ if (len < size) { const uint8_t *end = find_resync_marker_reverse(buf1, buf1 + len); len = end - buf1; } memcpy(q, buf1, len); q += len; /* 90 KHz time stamp */ s->timestamp = s->cur_timestamp; ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size)); buf1 += len; size -= len; } }
/* Based on a draft spec for VP8 RTP. * ( http://tools.ietf.org/html/draft-ietf-payload-vp8-05 ) */ void ff_rtp_send_vp8(AVFormatContext *s1, const uint8_t *buf, int size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size; s->buf_ptr = s->buf; s->timestamp = s->cur_timestamp; max_packet_size = s->max_payload_size - 1; // minus one for header byte // no extended control bits, reference frame, start of partition, // partition id 0 *s->buf_ptr++ = 0x10; while (size > 0) { len = FFMIN(size, max_packet_size); memcpy(s->buf_ptr, buf, len); ff_rtp_send_data(s1, s->buf, len+1, size == len); // marker bit is last packet in frame size -= len; buf += len; s->buf_ptr = s->buf; *s->buf_ptr++ = 0; // payload descriptor } }
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size) { RTPMuxContext *s = s1->priv_data; const uint8_t *qtables[4] = { NULL }; int nb_qtables = 0; uint8_t type; uint8_t w, h; uint8_t *p; int off = 0; /* fragment offset of the current JPEG frame */ int len; int i; int default_huffman_tables = 0; s->buf_ptr = s->buf; s->timestamp = s->cur_timestamp; /* convert video pixel dimensions from pixels to blocks */ w = AV_CEIL_RSHIFT(s1->streams[0]->codecpar->width, 3); h = AV_CEIL_RSHIFT(s1->streams[0]->codecpar->height, 3); /* get the pixel format type or fail */ if (s1->streams[0]->codecpar->format == AV_PIX_FMT_YUVJ422P || (s1->streams[0]->codecpar->color_range == AVCOL_RANGE_JPEG && s1->streams[0]->codecpar->format == AV_PIX_FMT_YUV422P)) { type = 0; } else if (s1->streams[0]->codecpar->format == AV_PIX_FMT_YUVJ420P || (s1->streams[0]->codecpar->color_range == AVCOL_RANGE_JPEG && s1->streams[0]->codecpar->format == AV_PIX_FMT_YUV420P)) { type = 1; } else { av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n"); return; } /* preparse the header for getting some infos */ for (i = 0; i < size; i++) { if (buf[i] != 0xff) continue; if (buf[i + 1] == DQT) { int tables, j; if (buf[i + 4] & 0xF0) av_log(s1, AV_LOG_WARNING, "Only 8-bit precision is supported.\n"); /* a quantization table is 64 bytes long */ tables = AV_RB16(&buf[i + 2]) / 65; if (i + 5 + tables * 65 > size) { av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n"); return; } if (nb_qtables + tables > 4) { av_log(s1, AV_LOG_ERROR, "Invalid number of quantisation tables\n"); return; } for (j = 0; j < tables; j++) qtables[nb_qtables + j] = buf + i + 5 + j * 65; nb_qtables += tables; } else if (buf[i + 1] == SOF0) { if (buf[i + 14] != 17 || buf[i + 17] != 17) { av_log(s1, AV_LOG_ERROR, "Only 1x1 chroma blocks are supported. Aborted!\n"); return; } } else if (buf[i + 1] == DHT) { int dht_size = AV_RB16(&buf[i + 2]); default_huffman_tables |= 1 << 4; i += 3; dht_size -= 2; if (i + dht_size >= size) continue; while (dht_size > 0) switch (buf[i + 1]) { case 0x00: if ( dht_size >= 29 && !memcmp(buf + i + 2, avpriv_mjpeg_bits_dc_luminance + 1, 16) && !memcmp(buf + i + 18, avpriv_mjpeg_val_dc, 12)) { default_huffman_tables |= 1; i += 29; dht_size -= 29; } else { i += dht_size; dht_size = 0; } break; case 0x01: if ( dht_size >= 29 && !memcmp(buf + i + 2, avpriv_mjpeg_bits_dc_chrominance + 1, 16) && !memcmp(buf + i + 18, avpriv_mjpeg_val_dc, 12)) { default_huffman_tables |= 1 << 1; i += 29; dht_size -= 29; } else { i += dht_size; dht_size = 0; } break; case 0x10: if ( dht_size >= 179 && !memcmp(buf + i + 2, avpriv_mjpeg_bits_ac_luminance + 1, 16) && !memcmp(buf + i + 18, avpriv_mjpeg_val_ac_luminance, 162)) { default_huffman_tables |= 1 << 2; i += 179; dht_size -= 179; } else { i += dht_size; dht_size = 0; } break; case 0x11: if ( dht_size >= 179 && !memcmp(buf + i + 2, avpriv_mjpeg_bits_ac_chrominance + 1, 16) && !memcmp(buf + i + 18, avpriv_mjpeg_val_ac_chrominance, 162)) { default_huffman_tables |= 1 << 3; i += 179; dht_size -= 179; } else { i += dht_size; dht_size = 0; } break; default: i += dht_size; dht_size = 0; continue; } } else if (buf[i + 1] == SOS) { /* SOS is last marker in the header */ i += AV_RB16(&buf[i + 2]) + 2; if (i > size) { av_log(s1, AV_LOG_ERROR, "Insufficient data. Aborted!\n"); return; } break; } } if (default_huffman_tables && default_huffman_tables != 31) { av_log(s1, AV_LOG_ERROR, "RFC 2435 requires standard Huffman tables for jpeg\n"); return; } if (nb_qtables && nb_qtables != 2) av_log(s1, AV_LOG_WARNING, "RFC 2435 suggests two quantization tables, %d provided\n", nb_qtables); /* skip JPEG header */ buf += i; size -= i; for (i = size - 2; i >= 0; i--) { if (buf[i] == 0xff && buf[i + 1] == EOI) { /* Remove the EOI marker */ size = i; break; } } p = s->buf_ptr; while (size > 0) { int hdr_size = 8; if (off == 0 && nb_qtables) hdr_size += 4 + 64 * nb_qtables; /* payload max in one packet */ len = FFMIN(size, s->max_payload_size - hdr_size); /* set main header */ bytestream_put_byte(&p, 0); bytestream_put_be24(&p, off); bytestream_put_byte(&p, type); bytestream_put_byte(&p, 255); bytestream_put_byte(&p, w); bytestream_put_byte(&p, h); if (off == 0 && nb_qtables) { /* set quantization tables header */ bytestream_put_byte(&p, 0); bytestream_put_byte(&p, 0); bytestream_put_be16(&p, 64 * nb_qtables); for (i = 0; i < nb_qtables; i++) bytestream_put_buffer(&p, qtables[i], 64); } /* copy payload data */ memcpy(p, buf, len); /* marker bit is last packet in frame */ ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len); buf += len; size -= len; off += len; p = s->buf; } }
void ff_rtp_send_jpeg(AVFormatContext *s1, const uint8_t *buf, int size) { RTPMuxContext *s = s1->priv_data; const uint8_t *qtables = NULL; int nb_qtables = 0; uint8_t type; uint8_t w, h; uint8_t *p; int off = 0; /* fragment offset of the current JPEG frame */ int len; int i; s->buf_ptr = s->buf; s->timestamp = s->cur_timestamp; /* convert video pixel dimensions from pixels to blocks */ w = (s1->streams[0]->codec->width + 7) >> 3; h = (s1->streams[0]->codec->height + 7) >> 3; /* get the pixel format type or fail */ if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ422P || (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG && s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV422P)) { type = 0; } else if (s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUVJ420P || (s1->streams[0]->codec->color_range == AVCOL_RANGE_JPEG && s1->streams[0]->codec->pix_fmt == AV_PIX_FMT_YUV420P)) { type = 1; } else { av_log(s1, AV_LOG_ERROR, "Unsupported pixel format\n"); return; } /* preparse the header for getting some infos */ for (i = 0; i < size; i++) { if (buf[i] != 0xff) continue; if (buf[i + 1] == DQT) { if (buf[i + 4]) av_log(s1, AV_LOG_WARNING, "Only 8-bit precision is supported.\n"); /* a quantization table is 64 bytes long */ nb_qtables = AV_RB16(&buf[i + 2]) / 65; if (i + 4 + nb_qtables * 65 > size) { av_log(s1, AV_LOG_ERROR, "Too short JPEG header. Aborted!\n"); return; } qtables = &buf[i + 4]; } else if (buf[i + 1] == SOF0) { if (buf[i + 14] != 17 || buf[i + 17] != 17) { av_log(s1, AV_LOG_ERROR, "Only 1x1 chroma blocks are supported. Aborted!\n"); return; } } else if (buf[i + 1] == SOS) { /* SOS is last marker in the header */ i += AV_RB16(&buf[i + 2]) + 2; break; } } /* skip JPEG header */ buf += i; size -= i; for (i = size - 2; i >= 0; i--) { if (buf[i] == 0xff && buf[i + 1] == EOI) { /* Remove the EOI marker */ size = i; break; } } p = s->buf_ptr; while (size > 0) { int hdr_size = 8; if (off == 0 && nb_qtables) hdr_size += 4 + 64 * nb_qtables; /* payload max in one packet */ len = FFMIN(size, s->max_payload_size - hdr_size); /* set main header */ bytestream_put_byte(&p, 0); bytestream_put_be24(&p, off); bytestream_put_byte(&p, type); bytestream_put_byte(&p, 255); bytestream_put_byte(&p, w); bytestream_put_byte(&p, h); if (off == 0 && nb_qtables) { /* set quantization tables header */ bytestream_put_byte(&p, 0); bytestream_put_byte(&p, 0); bytestream_put_be16(&p, 64 * nb_qtables); for (i = 0; i < nb_qtables; i++) bytestream_put_buffer(&p, &qtables[65 * i + 1], 64); } /* copy payload data */ memcpy(p, buf, len); /* marker bit is last packet in frame */ ff_rtp_send_data(s1, s->buf, len + hdr_size, size == len); buf += len; size -= len; off += len; p = s->buf; } }