static int ogg_build_flac_headers(const uint8_t *extradata, int extradata_size, OGGStreamContext *oggstream, int bitexact) { const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT; uint8_t *p; if (extradata_size != 34) return -1; oggstream->header_len[0] = 51; oggstream->header[0] = av_mallocz(51); // per ogg flac specs p = oggstream->header[0]; bytestream_put_byte(&p, 0x7F); bytestream_put_buffer(&p, "FLAC", 4); bytestream_put_byte(&p, 1); // major version bytestream_put_byte(&p, 0); // minor version bytestream_put_be16(&p, 1); // headers packets without this one bytestream_put_buffer(&p, "fLaC", 4); bytestream_put_byte(&p, 0x00); // streaminfo bytestream_put_be24(&p, 34); bytestream_put_buffer(&p, extradata, 34); oggstream->header_len[1] = 1+3+4+strlen(vendor)+4; oggstream->header[1] = av_mallocz(oggstream->header_len[1]); p = oggstream->header[1]; bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment bytestream_put_be24(&p, oggstream->header_len[1] - 4); bytestream_put_le32(&p, strlen(vendor)); bytestream_put_buffer(&p, vendor, strlen(vendor)); bytestream_put_le32(&p, 0); // user comment list length return 0; }
static int send_media_file_request(MMSContext *mms) { start_command_packet(mms, CS_PKT_MEDIA_FILE_REQUEST); insert_command_prefixes(mms, 1, 0xffffffff); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0); mms_put_utf16(mms, mms->path + 1); // +1 for skip "/" return send_command_packet(mms); }
static int read_packet(AVFormatContext *s, AVPacket *pkt) { IcoDemuxContext *ico = s->priv_data; IcoImage *image; AVIOContext *pb = s->pb; AVStream *st = s->streams[0]; int ret; if (ico->current_image >= ico->nb_images) return AVERROR(EIO); image = &ico->images[ico->current_image]; if ((ret = avio_seek(pb, image->offset, SEEK_SET)) < 0) return ret; if (s->streams[ico->current_image]->codec->codec_id == AV_CODEC_ID_PNG) { if ((ret = av_get_packet(pb, pkt, image->size)) < 0) return ret; } else { uint8_t *buf; if ((ret = av_new_packet(pkt, 14 + image->size)) < 0) return ret; buf = pkt->data; /* add BMP header */ bytestream_put_byte(&buf, 'B'); bytestream_put_byte(&buf, 'M'); bytestream_put_le32(&buf, pkt->size); bytestream_put_le16(&buf, 0); bytestream_put_le16(&buf, 0); bytestream_put_le32(&buf, 0); if ((ret = avio_read(pb, buf, image->size)) < 0) return ret; st->codec->bits_per_coded_sample = AV_RL16(buf + 14); if (AV_RL32(buf + 32)) image->nb_pal = AV_RL32(buf + 32); if (st->codec->bits_per_coded_sample <= 8 && !image->nb_pal) { image->nb_pal = 1 << st->codec->bits_per_coded_sample; AV_WL32(buf + 32, image->nb_pal); } AV_WL32(buf - 4, 14 + 40 + image->nb_pal * 4); AV_WL32(buf + 8, AV_RL32(buf + 8) / 2); } pkt->stream_index = ico->current_image++; pkt->flags |= AV_PKT_FLAG_KEY; return 0; }
static int send_media_file_request(MMSTContext *mmst) { int ret; MMSContext *mms = &mmst->mms; start_command_packet(mmst, CS_PKT_MEDIA_FILE_REQUEST); insert_command_prefixes(mms, 1, 0xffffffff); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0); if ((ret = mms_put_utf16(mms, mmst->path + 1)) < 0) // +1 for skip "/" return ret; return send_command_packet(mmst); }
static int roq_dpcm_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { int i, samples, stereo, ch; short *in; unsigned char *out; ROQDPCMContext *context = avctx->priv_data; stereo = (avctx->channels == 2); if (stereo) { context->lastSample[0] &= 0xFF00; context->lastSample[1] &= 0xFF00; } out = frame; in = data; bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, 0x10); bytestream_put_le32(&out, avctx->frame_size*avctx->channels); if (stereo) { bytestream_put_byte(&out, (context->lastSample[1])>>8); bytestream_put_byte(&out, (context->lastSample[0])>>8); } else
static int send_media_packet_request(MMSContext *mms) { start_command_packet(mms, CS_PKT_START_FROM_PKT_ID); insert_command_prefixes(mms, 1, 0x0001FFFF); bytestream_put_le64(&mms->write_out_ptr, 0); // seek timestamp bytestream_put_le32(&mms->write_out_ptr, 0xffffffff); // unknown bytestream_put_le32(&mms->write_out_ptr, 0xffffffff); // packet offset bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit bytestream_put_byte(&mms->write_out_ptr, 0xff); // max stream time limit bytestream_put_byte(&mms->write_out_ptr, 0x00); // stream time limit flag mms->packet_id++; // new packet_id bytestream_put_le32(&mms->write_out_ptr, mms->packet_id); return send_command_packet(mms); }
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int chunk_size, RTMPPacket *prev_pkt) { uint8_t pkt_hdr[16], *p = pkt_hdr; int mode = RTMP_PS_TWELVEBYTES; int off = 0; //TODO: header compression bytestream_put_byte(&p, pkt->channel_id | (mode << 6)); if (mode != RTMP_PS_ONEBYTE) { bytestream_put_be24(&p, pkt->timestamp); if (mode != RTMP_PS_FOURBYTES) { bytestream_put_be24(&p, pkt->data_size); bytestream_put_byte(&p, pkt->type); if (mode == RTMP_PS_TWELVEBYTES) bytestream_put_le32(&p, pkt->extra); } } url_write(h, pkt_hdr, p-pkt_hdr); while (off < pkt->data_size) { int towrite = FFMIN(chunk_size, pkt->data_size - off); url_write(h, pkt->data + off, towrite); off += towrite; if (off < pkt->data_size) { uint8_t marker = 0xC0 | pkt->channel_id; url_write(h, &marker, 1); } } return 0; }
static int send_media_header_request(MMSContext *mms) { start_command_packet(mms, CS_PKT_MEDIA_HEADER_REQUEST); insert_command_prefixes(mms, 1, 0); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0x00800000); bytestream_put_le32(&mms->write_out_ptr, 0xffffffff); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0); // the media preroll value in milliseconds? bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, 0x40AC2000); bytestream_put_le32(&mms->write_out_ptr, 2); bytestream_put_le32(&mms->write_out_ptr, 0); return send_command_packet(mms); }
static int send_protocol_select(MMSContext *mms) { char data_string[256]; start_command_packet(mms, CS_PKT_PROTOCOL_SELECT); insert_command_prefixes(mms, 0, 0xffffffff); bytestream_put_le32(&mms->write_out_ptr, 0); // maxFunnelBytes bytestream_put_le32(&mms->write_out_ptr, 0x00989680); // maxbitRate bytestream_put_le32(&mms->write_out_ptr, 2); // funnelMode snprintf(data_string, sizeof(data_string), "\\\\%d.%d.%d.%d\\%s\\%d", (LOCAL_ADDRESS>>24)&0xff, (LOCAL_ADDRESS>>16)&0xff, (LOCAL_ADDRESS>>8)&0xff, LOCAL_ADDRESS&0xff, "TCP", // or UDP LOCAL_PORT); mms_put_utf16(mms, data_string); return send_command_packet(mms); }
int ff_vorbiscomment_write(uint8_t **p, AVDictionary **m, const char *vendor_string, const unsigned count) { bytestream_put_le32(p, strlen(vendor_string)); bytestream_put_buffer(p, vendor_string, strlen(vendor_string)); if (*m) { AVDictionaryEntry *tag = NULL; bytestream_put_le32(p, count); while ((tag = av_dict_get(*m, "", tag, AV_DICT_IGNORE_SUFFIX))) { unsigned int len1 = strlen(tag->key); unsigned int len2 = strlen(tag->value); bytestream_put_le32(p, len1+1+len2); bytestream_put_buffer(p, tag->key, len1); bytestream_put_byte(p, '='); bytestream_put_buffer(p, tag->value, len2); } } else bytestream_put_le32(p, 0); return 0; }
static uint8_t *ogg_write_vorbiscomment(int offset, int bitexact, int *header_len) { const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT; int size; uint8_t *p, *p0; size = offset + 4 + strlen(vendor) + 4; p = av_mallocz(size); if (!p) return NULL; p0 = p; p += offset; bytestream_put_le32(&p, strlen(vendor)); bytestream_put_buffer(&p, vendor, strlen(vendor)); bytestream_put_le32(&p, 0); // user comment list length *header_len = size; return p0; }
static int roq_dpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int i, stereo, data_size, ret; const int16_t *in = frame ? (const int16_t *)frame->data[0] : NULL; uint8_t *out; ROQDPCMContext *context = avctx->priv_data; stereo = (avctx->channels == 2); if (!in && context->input_frames >= 8) return 0; if (in && context->input_frames < 8) { memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels], in, avctx->frame_size * avctx->channels * sizeof(*in)); context->buffered_samples += avctx->frame_size; if (context->input_frames == 0) context->first_pts = frame->pts; if (context->input_frames < 7) { context->input_frames++; return 0; } } if (context->input_frames < 8) in = context->frame_buffer; if (stereo) { context->lastSample[0] &= 0xFF00; context->lastSample[1] &= 0xFF00; } if (context->input_frames == 7) data_size = avctx->channels * context->buffered_samples; else data_size = avctx->channels * avctx->frame_size; if ((ret = ff_alloc_packet(avpkt, ROQ_HEADER_SIZE + data_size))) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); return ret; } out = avpkt->data; bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, 0x10); bytestream_put_le32(&out, data_size); if (stereo) { bytestream_put_byte(&out, (context->lastSample[1])>>8); bytestream_put_byte(&out, (context->lastSample[0])>>8); } else
/** Send MMST stream selection command based on the AVStream->discard values. */ static int send_stream_selection_request(MMSTContext *mmst) { int i; MMSContext *mms = &mmst->mms; // send the streams we want back... start_command_packet(mmst, CS_PKT_STREAM_ID_REQUEST); bytestream_put_le32(&mms->write_out_ptr, mms->stream_num); // stream nums for(i= 0; i<mms->stream_num; i++) { bytestream_put_le16(&mms->write_out_ptr, 0xffff); // flags bytestream_put_le16(&mms->write_out_ptr, mms->streams[i].id); // stream id bytestream_put_le16(&mms->write_out_ptr, 0); // selection } return send_command_packet(mmst); }
/** Send the initial handshake. */ static int send_startup_packet(MMSContext *mms) { char data_string[256]; // SubscriberName is defined in MS specification linked below. // The guid value can be any valid value. // http://download.microsoft.com/ // download/9/5/E/95EF66AF-9026-4BB0-A41D-A4F81802D92C/%5BMS-WMSP%5D.pdf snprintf(data_string, sizeof(data_string), "NSPlayer/7.0.0.1956; {%s}; Host: %s", "7E667F5D-A661-495E-A512-F55686DDA178", mms->host); start_command_packet(mms, CS_PKT_INITIAL); insert_command_prefixes(mms, 0, 0x0004000b); bytestream_put_le32(&mms->write_out_ptr, 0x0003001c); mms_put_utf16(mms, data_string); return send_command_packet(mms); }
/** Create MMST command packet header */ static void start_command_packet(MMSContext *mms, MMSCSPacketType packet_type) { mms->write_out_ptr = mms->out_buffer; bytestream_put_le32(&mms->write_out_ptr, 1); // start sequence bytestream_put_le32(&mms->write_out_ptr, 0xb00bface); bytestream_put_le32(&mms->write_out_ptr, 0); // Length starts from after the protocol type bytes bytestream_put_le32(&mms->write_out_ptr, MKTAG('M','M','S',' ')); bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le32(&mms->write_out_ptr, mms->outgoing_packet_seq++); bytestream_put_le64(&mms->write_out_ptr, 0); // timestamp bytestream_put_le32(&mms->write_out_ptr, 0); bytestream_put_le16(&mms->write_out_ptr, packet_type); bytestream_put_le16(&mms->write_out_ptr, 3); // direction to server }
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { int i, j, ret; int aligned_width = FFALIGN(avctx->width, avctx->codec_id == AV_CODEC_ID_R10K ? 1 : 64); int pad = (aligned_width - avctx->width) * 4; uint8_t *src_line; uint8_t *dst; if ((ret = ff_alloc_packet2(avctx, pkt, 4 * aligned_width * avctx->height)) < 0) return ret; avctx->coded_frame->key_frame = 1; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; src_line = pic->data[0]; dst = pkt->data; for (i = 0; i < avctx->height; i++) { uint16_t *src = (uint16_t *)src_line; for (j = 0; j < avctx->width; j++) { uint32_t pixel; uint16_t r = *src++ >> 6; uint16_t g = *src++ >> 6; uint16_t b = *src++ >> 6; if (avctx->codec_id == AV_CODEC_ID_R210) pixel = (r << 20) | (g << 10) | b; else pixel = (r << 22) | (g << 12) | (b << 2); if (avctx->codec_id == AV_CODEC_ID_AVRP) bytestream_put_le32(&dst, pixel); else bytestream_put_be32(&dst, pixel); } memset(dst, 0, pad); dst += pad; src_line += pic->linesize[0]; } pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
/** * Write codebook chunk */ static void write_codebooks(RoqContext *enc, RoqTempdata *tempData) { int i, j; uint8_t **outp= &enc->out_buf; if (tempData->numCB2) { bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK); bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4); bytestream_put_byte(outp, tempData->numCB4); bytestream_put_byte(outp, tempData->numCB2); for (i=0; i<tempData->numCB2; i++) { bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4); bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u); bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v); } for (i=0; i<tempData->numCB4; i++) for (j=0; j<4; j++) bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]); } }
static void roq_write_video_info_chunk(RoqContext *enc) { /* ROQ info chunk */ bytestream_put_le16(&enc->out_buf, RoQ_INFO); /* Size: 8 bytes */ bytestream_put_le32(&enc->out_buf, 8); /* Unused argument */ bytestream_put_byte(&enc->out_buf, 0x00); bytestream_put_byte(&enc->out_buf, 0x00); /* Width */ bytestream_put_le16(&enc->out_buf, enc->width); /* Height */ bytestream_put_le16(&enc->out_buf, enc->height); /* Unused in Quake 3, mimics the output of the real encoder */ bytestream_put_byte(&enc->out_buf, 0x08); bytestream_put_byte(&enc->out_buf, 0x00); bytestream_put_byte(&enc->out_buf, 0x04); bytestream_put_byte(&enc->out_buf, 0x00); }
static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value) { unsigned int bit_buf; int bit_cnt; // printf("put_bits=%d %x\n", n, value); assert(n == 32 || value < (1U << n)); bit_buf = s->bit_buf; bit_cnt = 32 - s->bit_left; /* XXX:lazyness... was = s->bit_cnt; */ // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf); /* XXX: optimize */ if (n < (32-bit_cnt)) { bit_buf |= value << (bit_cnt); bit_cnt+=n; } else { bit_buf |= value << (bit_cnt); bytestream_put_le32(&s->buf_ptr, bit_buf); //printf("bitbuf = %08x\n", bit_buf); if (s->buf_ptr >= s->buf_end) puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ??? // flush_buffer_rev(s); bit_cnt=bit_cnt + n - 32; if (bit_cnt == 0) { bit_buf = 0; } else { bit_buf = value >> (n - bit_cnt); } } s->bit_buf = bit_buf; s->bit_left = 32 - bit_cnt; }
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { BMPContext *s = avctx->priv_data; AVFrame * const p = &s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret; const uint32_t *pal = NULL; uint32_t palette256[256]; int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB; int bit_count = avctx->bits_per_coded_sample; uint8_t *ptr, *buf; *p = *pict; p->pict_type= AV_PICTURE_TYPE_I; p->key_frame= 1; switch (avctx->pix_fmt) { case AV_PIX_FMT_RGB444: compression = BMP_BITFIELDS; pal = rgb444_masks; // abuse pal to hold color masks pal_entries = 3; break; case AV_PIX_FMT_RGB565: compression = BMP_BITFIELDS; pal = rgb565_masks; // abuse pal to hold color masks pal_entries = 3; break; case AV_PIX_FMT_RGB8: case AV_PIX_FMT_BGR8: case AV_PIX_FMT_RGB4_BYTE: case AV_PIX_FMT_BGR4_BYTE: case AV_PIX_FMT_GRAY8: av_assert1(bit_count == 8); ff_set_systematic_pal2(palette256, avctx->pix_fmt); pal = palette256; break; case AV_PIX_FMT_PAL8: pal = (uint32_t *)p->data[1]; break; case AV_PIX_FMT_MONOBLACK: pal = monoblack_pal; break; } if (pal && !pal_entries) pal_entries = 1 << bit_count; n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; pad_bytes_per_row = (4 - n_bytes_per_row) & 3; n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row); // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2); n_bytes = n_bytes_image + hsize; if ((ret = ff_alloc_packet2(avctx, pkt, n_bytes)) < 0) return ret; buf = pkt->data; bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, bit_count); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, compression); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant for (i = 0; i < pal_entries; i++) bytestream_put_le32(&buf, pal[i] & 0xFFFFFF); // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = pkt->data + hsize; for(i = 0; i < avctx->height; i++) { if (bit_count == 16) { const uint16_t *src = (const uint16_t *) ptr; uint16_t *dst = (uint16_t *) buf; for(n = 0; n < avctx->width; n++) AV_WL16(dst + n, src[n]); } else { memcpy(buf, ptr, n_bytes_per_row); } buf += n_bytes_per_row; memset(buf, 0, pad_bytes_per_row); buf += pad_bytes_per_row; ptr -= p->linesize[0]; // ... and go back } pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
/* returns the size or -1 on error */ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc) { int bps, blkalign, bytespersec, frame_size; int hdrsize = 18; int waveformatextensible; uint8_t temp[256]; uint8_t *riff_extradata = temp; uint8_t *riff_extradata_start = temp; if (!enc->codec_tag || enc->codec_tag > 0xffff) return -1; /* We use the known constant frame size for the codec if known, otherwise * fall back on using AVCodecContext.frame_size, which is not as reliable * for indicating packet duration. */ frame_size = av_get_audio_frame_duration(enc, enc->block_align); waveformatextensible = (enc->channels > 2 && enc->channel_layout) || enc->sample_rate > 48000 || av_get_bits_per_sample(enc->codec_id) > 16; if (waveformatextensible) avio_wl16(pb, 0xfffe); else avio_wl16(pb, enc->codec_tag); avio_wl16(pb, enc->channels); avio_wl32(pb, enc->sample_rate); if (enc->codec_id == AV_CODEC_ID_MP2 || enc->codec_id == AV_CODEC_ID_MP3 || enc->codec_id == AV_CODEC_ID_GSM_MS) { bps = 0; } else { if (!(bps = av_get_bits_per_sample(enc->codec_id))) { if (enc->bits_per_coded_sample) bps = enc->bits_per_coded_sample; else bps = 16; // default to 16 } } if (bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample) { av_log(enc, AV_LOG_WARNING, "requested bits_per_coded_sample (%d) " "and actually stored (%d) differ\n", enc->bits_per_coded_sample, bps); } if (enc->codec_id == AV_CODEC_ID_MP2) { blkalign = frame_size; } else if (enc->codec_id == AV_CODEC_ID_MP3) { blkalign = 576 * (enc->sample_rate <= 24000 ? 1 : 2); } else if (enc->codec_id == AV_CODEC_ID_AC3) { blkalign = 3840; /* maximum bytes per frame */ } else if (enc->block_align != 0) { /* specified by the codec */ blkalign = enc->block_align; } else blkalign = bps * enc->channels / av_gcd(8, bps); if (enc->codec_id == AV_CODEC_ID_PCM_U8 || enc->codec_id == AV_CODEC_ID_PCM_S24LE || enc->codec_id == AV_CODEC_ID_PCM_S32LE || enc->codec_id == AV_CODEC_ID_PCM_F32LE || enc->codec_id == AV_CODEC_ID_PCM_F64LE || enc->codec_id == AV_CODEC_ID_PCM_S16LE) { bytespersec = enc->sample_rate * blkalign; } else { bytespersec = enc->bit_rate / 8; } avio_wl32(pb, bytespersec); /* bytes per second */ avio_wl16(pb, blkalign); /* block align */ avio_wl16(pb, bps); /* bits per sample */ if (enc->codec_id == AV_CODEC_ID_MP3) { hdrsize += 12; bytestream_put_le16(&riff_extradata, 1); /* wID */ bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */ bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */ bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */ bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */ } else if (enc->codec_id == AV_CODEC_ID_MP2) { hdrsize += 22; /* fwHeadLayer */ bytestream_put_le16(&riff_extradata, 2); /* dwHeadBitrate */ bytestream_put_le32(&riff_extradata, enc->bit_rate); /* fwHeadMode */ bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8); /* fwHeadModeExt */ bytestream_put_le16(&riff_extradata, 0); /* wHeadEmphasis */ bytestream_put_le16(&riff_extradata, 1); /* fwHeadFlags */ bytestream_put_le16(&riff_extradata, 16); /* dwPTSLow */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */ bytestream_put_le32(&riff_extradata, 0); } else if (enc->codec_id == AV_CODEC_ID_GSM_MS || enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) { hdrsize += 2; /* wSamplesPerBlock */ bytestream_put_le16(&riff_extradata, frame_size); } else if (enc->extradata_size) { riff_extradata_start = enc->extradata; riff_extradata = enc->extradata + enc->extradata_size; hdrsize += enc->extradata_size; } /* write WAVEFORMATEXTENSIBLE extensions */ if (waveformatextensible) { hdrsize += 22; /* 22 is WAVEFORMATEXTENSIBLE size */ avio_wl16(pb, riff_extradata - riff_extradata_start + 22); /* ValidBitsPerSample || SamplesPerBlock || Reserved */ avio_wl16(pb, bps); /* dwChannelMask */ avio_wl32(pb, enc->channel_layout); /* GUID + next 3 */ avio_wl32(pb, enc->codec_tag); avio_wl32(pb, 0x00100000); avio_wl32(pb, 0xAA000080); avio_wl32(pb, 0x719B3800); } else { avio_wl16(pb, riff_extradata - riff_extradata_start); /* cbSize */ } avio_write(pb, riff_extradata_start, riff_extradata - riff_extradata_start); if (hdrsize & 1) { hdrsize++; avio_w8(pb, 0); } return hdrsize; }
static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks) { int i, j, k; int x, y; int subX, subY; int dist=0; roq_qcell *qcell; CelEvaluation *eval; CodingSpool spool; spool.typeSpool=0; spool.typeSpoolLength=0; spool.args = spool.argumentSpool; spool.pout = &enc->out_buf; if (tempData->used_option[RoQ_ID_CCC]%2) tempData->mainChunkSize+=8; //FIXME /* Write the video chunk header */ bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ); bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8); bytestream_put_byte(&enc->out_buf, 0x0); bytestream_put_byte(&enc->out_buf, 0x0); for (i=0; i<numBlocks; i++) { eval = tempData->cel_evals + i; x = eval->sourceX; y = eval->sourceY; dist += eval->eval_dist[eval->best_coding]; switch (eval->best_coding) { case RoQ_ID_MOT: write_typecode(&spool, RoQ_ID_MOT); break; case RoQ_ID_FCC: bytestream_put_byte(&spool.args, motion_arg(eval->motion)); write_typecode(&spool, RoQ_ID_FCC); ff_apply_motion_8x8(enc, x, y, eval->motion.d[0], eval->motion.d[1]); break; case RoQ_ID_SLD: bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]); write_typecode(&spool, RoQ_ID_SLD); qcell = enc->cb4x4 + eval->cbEntry; ff_apply_vector_4x4(enc, x , y , enc->cb2x2 + qcell->idx[0]); ff_apply_vector_4x4(enc, x+4, y , enc->cb2x2 + qcell->idx[1]); ff_apply_vector_4x4(enc, x , y+4, enc->cb2x2 + qcell->idx[2]); ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]); break; case RoQ_ID_CCC: write_typecode(&spool, RoQ_ID_CCC); for (j=0; j<4; j++) { subX = x + 4*(j&1); subY = y + 2*(j&2); switch(eval->subCels[j].best_coding) { case RoQ_ID_MOT: break; case RoQ_ID_FCC: bytestream_put_byte(&spool.args, motion_arg(eval->subCels[j].motion)); ff_apply_motion_4x4(enc, subX, subY, eval->subCels[j].motion.d[0], eval->subCels[j].motion.d[1]); break; case RoQ_ID_SLD: bytestream_put_byte(&spool.args, tempData->i2f4[eval->subCels[j].cbEntry]); qcell = enc->cb4x4 + eval->subCels[j].cbEntry; ff_apply_vector_2x2(enc, subX , subY , enc->cb2x2 + qcell->idx[0]); ff_apply_vector_2x2(enc, subX+2, subY , enc->cb2x2 + qcell->idx[1]); ff_apply_vector_2x2(enc, subX , subY+2, enc->cb2x2 + qcell->idx[2]); ff_apply_vector_2x2(enc, subX+2, subY+2, enc->cb2x2 + qcell->idx[3]); break; case RoQ_ID_CCC: for (k=0; k<4; k++) { int cb_idx = eval->subCels[j].subCels[k]; bytestream_put_byte(&spool.args, tempData->i2f2[cb_idx]); ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2), enc->cb2x2 + cb_idx); } break; } write_typecode(&spool, eval->subCels[j].best_coding); } break; } } /* Flush the remainder of the argument/type spool */ while (spool.typeSpoolLength) write_typecode(&spool, 0x0); #if 0 uint8_t *fdata[3] = {enc->frame_to_enc->data[0], enc->frame_to_enc->data[1], enc->frame_to_enc->data[2]}; uint8_t *cdata[3] = {enc->current_frame->data[0], enc->current_frame->data[1], enc->current_frame->data[2]}; av_log(enc->avctx, AV_LOG_ERROR, "Expected distortion: %i Actual: %i\n", dist, block_sse(fdata, cdata, 0, 0, 0, 0, enc->frame_to_enc->linesize, enc->current_frame->linesize, enc->width)); //WARNING: Square dimensions implied... #endif }
/** Add prefixes to MMST command packet. */ static void insert_command_prefixes(MMSContext *mms, uint32_t prefix1, uint32_t prefix2) { bytestream_put_le32(&mms->write_out_ptr, prefix1); // first prefix bytestream_put_le32(&mms->write_out_ptr, prefix2); // second prefix }
static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ BMPContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize; uint8_t *ptr; unsigned char* buf0 = buf; *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; n_bytes_per_row = (avctx->width*3 + 3) & ~3; n_bytes_image = avctx->height*n_bytes_per_row; // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER; n_bytes = n_bytes_image + hsize; if(n_bytes>buf_size) { av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", n_bytes, buf_size); return -1; } bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, 24); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, BMP_RGB); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = buf0 + hsize; for(i = 0; i < avctx->height; i++) { n = 3*avctx->width; memcpy(buf, ptr, n); buf += n; memset(buf, 0, n_bytes_per_row-n); buf += n_bytes_per_row-n; ptr -= p->linesize[0]; // ... and go back } return n_bytes; }
static int xkcd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { const AVFrame * const picture = pict; /* Actual image data */ /* header_size = header size */ int bytes_in_image, bytes_per_row, total_bytes, i, header_size, ret; /* pad_bytes_per_row = bytes of null to fill in at the end of a row of image data */ int pad_bytes_per_row = 0; /* Number of bits per pixel */ int bit_count = avctx->bits_per_coded_sample; /* buffer_data = data to be buffered, buf = buffer to write to */ uint8_t *buffer_data, *buffer; /* Cite: BMP encoder */ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; /* Number of bytes of image data in a row */ /* (width in pixels * bits per pixel) / 8 to put it in bytes. Add 7 bits to the width in bits to make sure to have enough bytes of storage when we divide (making sure when it truncates in division, it doesn't get rid of what we need) */ /* Cite: BMP encoder */ bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; /* End cite */ /* Bytes at the end of a row that are 'crossed out' */ /* Take the remainder from the above bytes and fill in with padding by looking at the last two bits after 4 - bytes_per_row.*/ pad_bytes_per_row = (4 - bytes_per_row) & 3; /* Total bytes in image */ bytes_in_image = avctx->height * (bytes_per_row + pad_bytes_per_row); header_size = 14; /* Number of bytes in the entire file */ total_bytes = bytes_in_image + header_size; /* Cite: BMP encoder */ if ((ret = ff_alloc_packet2(avctx, pkt, total_bytes)) < 0) return ret; buffer = pkt->data; /* End cite */ /* Start building the header */ bytestream_put_byte(&buffer, 'X'); // Filetype bytestream_put_byte(&buffer, 'K'); // Filetype bytestream_put_byte(&buffer, 'C'); // Filetype bytestream_put_byte(&buffer, 'D'); // Filetype bytestream_put_le32(&buffer, total_bytes); // Size of entire file bytestream_put_le16(&buffer, avctx->width); // Width of image in pixels bytestream_put_le16(&buffer, avctx->height); // Height of image in pixels bytestream_put_le16(&buffer, bit_count); // Bits per pixel // Start the buffer buffer_data = picture->data[0]; /* Write the image */ /* Cite: BMP encoder */ for(i = 0; i < avctx->height; i++) { /* Write line to buffer */ memcpy(buffer, buffer_data, bytes_per_row); /* Point buffer to the end of the data and start of the padding */ buffer += bytes_per_row; /* Null out the array which creates padding */ memset(buffer, 0, pad_bytes_per_row); /* Point buffer to the end of the padding and start of the new data */ buffer += pad_bytes_per_row; /* Now point to next row */ buffer_data += picture->linesize[0]; } pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; /* End cite */ return 0; }
/* returns the size or -1 on error */ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc, int flags) { int bps, blkalign, bytespersec, frame_size; int hdrsize; int64_t hdrstart = avio_tell(pb); int waveformatextensible; uint8_t temp[256]; uint8_t *riff_extradata = temp; uint8_t *riff_extradata_start = temp; if (!enc->codec_tag || enc->codec_tag > 0xffff) return -1; /* We use the known constant frame size for the codec if known, otherwise * fall back on using AVCodecContext.frame_size, which is not as reliable * for indicating packet duration. */ frame_size = av_get_audio_frame_duration(enc, enc->block_align); waveformatextensible = (enc->channels > 2 && enc->channel_layout) || enc->sample_rate > 48000 || enc->codec_id == AV_CODEC_ID_EAC3 || av_get_bits_per_sample(enc->codec_id) > 16; if (waveformatextensible) avio_wl16(pb, 0xfffe); else avio_wl16(pb, enc->codec_tag); avio_wl16(pb, enc->channels); avio_wl32(pb, enc->sample_rate); if (enc->codec_id == AV_CODEC_ID_ATRAC3 || enc->codec_id == AV_CODEC_ID_G723_1 || enc->codec_id == AV_CODEC_ID_MP2 || enc->codec_id == AV_CODEC_ID_MP3 || enc->codec_id == AV_CODEC_ID_GSM_MS) { bps = 0; } else { if (!(bps = av_get_bits_per_sample(enc->codec_id))) { if (enc->bits_per_coded_sample) bps = enc->bits_per_coded_sample; else bps = 16; // default to 16 } } if (bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample) { av_log(enc, AV_LOG_WARNING, "requested bits_per_coded_sample (%d) " "and actually stored (%d) differ\n", enc->bits_per_coded_sample, bps); } if (enc->codec_id == AV_CODEC_ID_MP2) { blkalign = (144 * enc->bit_rate - 1)/enc->sample_rate + 1; } else if (enc->codec_id == AV_CODEC_ID_MP3) { blkalign = 576 * (enc->sample_rate <= (24000 + 32000)/2 ? 1 : 2); } else if (enc->codec_id == AV_CODEC_ID_AC3) { blkalign = 3840; /* maximum bytes per frame */ } else if (enc->codec_id == AV_CODEC_ID_AAC) { blkalign = 768 * enc->channels; /* maximum bytes per frame */ } else if (enc->codec_id == AV_CODEC_ID_G723_1) { blkalign = 24; } else if (enc->block_align != 0) { /* specified by the codec */ blkalign = enc->block_align; } else blkalign = bps * enc->channels / av_gcd(8, bps); if (enc->codec_id == AV_CODEC_ID_PCM_U8 || enc->codec_id == AV_CODEC_ID_PCM_S24LE || enc->codec_id == AV_CODEC_ID_PCM_S32LE || enc->codec_id == AV_CODEC_ID_PCM_F32LE || enc->codec_id == AV_CODEC_ID_PCM_F64LE || enc->codec_id == AV_CODEC_ID_PCM_S16LE) { bytespersec = enc->sample_rate * blkalign; } else if (enc->codec_id == AV_CODEC_ID_G723_1) { bytespersec = 800; } else { bytespersec = enc->bit_rate / 8; } avio_wl32(pb, bytespersec); /* bytes per second */ avio_wl16(pb, blkalign); /* block align */ avio_wl16(pb, bps); /* bits per sample */ if (enc->codec_id == AV_CODEC_ID_MP3) { bytestream_put_le16(&riff_extradata, 1); /* wID */ bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */ bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */ bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */ bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */ } else if (enc->codec_id == AV_CODEC_ID_MP2) { /* fwHeadLayer */ bytestream_put_le16(&riff_extradata, 2); /* dwHeadBitrate */ bytestream_put_le32(&riff_extradata, enc->bit_rate); /* fwHeadMode */ bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8); /* fwHeadModeExt */ bytestream_put_le16(&riff_extradata, 0); /* wHeadEmphasis */ bytestream_put_le16(&riff_extradata, 1); /* fwHeadFlags */ bytestream_put_le16(&riff_extradata, 16); /* dwPTSLow */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */ bytestream_put_le32(&riff_extradata, 0); } else if (enc->codec_id == AV_CODEC_ID_G723_1) { bytestream_put_le32(&riff_extradata, 0x9ace0002); /* extradata needed for msacm g723.1 codec */ bytestream_put_le32(&riff_extradata, 0xaea2f732); bytestream_put_le16(&riff_extradata, 0xacde); } else if (enc->codec_id == AV_CODEC_ID_GSM_MS || enc->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) { /* wSamplesPerBlock */ bytestream_put_le16(&riff_extradata, frame_size); } else if (enc->extradata_size) { riff_extradata_start = enc->extradata; riff_extradata = enc->extradata + enc->extradata_size; } /* write WAVEFORMATEXTENSIBLE extensions */ if (waveformatextensible) { int write_channel_mask = enc->strict_std_compliance < FF_COMPLIANCE_NORMAL || enc->channel_layout < 0x40000; /* 22 is WAVEFORMATEXTENSIBLE size */ avio_wl16(pb, riff_extradata - riff_extradata_start + 22); /* ValidBitsPerSample || SamplesPerBlock || Reserved */ avio_wl16(pb, bps); /* dwChannelMask */ avio_wl32(pb, write_channel_mask ? enc->channel_layout : 0); /* GUID + next 3 */ if (enc->codec_id == AV_CODEC_ID_EAC3) { ff_put_guid(pb, ff_get_codec_guid(enc->codec_id, ff_codec_wav_guids)); } else { avio_wl32(pb, enc->codec_tag); avio_wl32(pb, 0x00100000); avio_wl32(pb, 0xAA000080); avio_wl32(pb, 0x719B3800); } } else if ((flags & FF_PUT_WAV_HEADER_FORCE_WAVEFORMATEX) || enc->codec_tag != 0x0001 /* PCM */ || riff_extradata - riff_extradata_start) { /* WAVEFORMATEX */ avio_wl16(pb, riff_extradata - riff_extradata_start); /* cbSize */ } /* else PCMWAVEFORMAT */ avio_write(pb, riff_extradata_start, riff_extradata - riff_extradata_start); hdrsize = avio_tell(pb) - hdrstart; if (hdrsize & 1) { hdrsize++; avio_w8(pb, 0); } return hdrsize; }
int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int chunk_size, RTMPPacket *prev_pkt) { uint8_t pkt_hdr[16], *p = pkt_hdr; int mode = RTMP_PS_TWELVEBYTES; int off = 0; int size = 0; pkt->ts_delta = pkt->timestamp - prev_pkt[pkt->channel_id].timestamp; //if channel_id = 0, this is first presentation of prev_pkt, send full hdr. if (prev_pkt[pkt->channel_id].channel_id && pkt->extra == prev_pkt[pkt->channel_id].extra) { if (pkt->type == prev_pkt[pkt->channel_id].type && pkt->data_size == prev_pkt[pkt->channel_id].data_size) { mode = RTMP_PS_FOURBYTES; if (pkt->ts_delta == prev_pkt[pkt->channel_id].ts_delta) mode = RTMP_PS_ONEBYTE; } else { mode = RTMP_PS_EIGHTBYTES; } } if (pkt->channel_id < 64) { bytestream_put_byte(&p, pkt->channel_id | (mode << 6)); } else if (pkt->channel_id < 64 + 256) { bytestream_put_byte(&p, 0 | (mode << 6)); bytestream_put_byte(&p, pkt->channel_id - 64); } else { bytestream_put_byte(&p, 1 | (mode << 6)); bytestream_put_le16(&p, pkt->channel_id - 64); } if (mode != RTMP_PS_ONEBYTE) { uint32_t timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) timestamp = pkt->ts_delta; bytestream_put_be24(&p, timestamp >= 0xFFFFFF ? 0xFFFFFF : timestamp); if (mode != RTMP_PS_FOURBYTES) { bytestream_put_be24(&p, pkt->data_size); bytestream_put_byte(&p, pkt->type); if (mode == RTMP_PS_TWELVEBYTES) bytestream_put_le32(&p, pkt->extra); } if (timestamp >= 0xFFFFFF) bytestream_put_be32(&p, timestamp); } // save history prev_pkt[pkt->channel_id].channel_id = pkt->channel_id; prev_pkt[pkt->channel_id].type = pkt->type; prev_pkt[pkt->channel_id].data_size = pkt->data_size; prev_pkt[pkt->channel_id].timestamp = pkt->timestamp; if (mode != RTMP_PS_TWELVEBYTES) { prev_pkt[pkt->channel_id].ts_delta = pkt->ts_delta; } else { prev_pkt[pkt->channel_id].ts_delta = pkt->timestamp; } prev_pkt[pkt->channel_id].extra = pkt->extra; ffurl_write(h, pkt_hdr, p-pkt_hdr); size = p - pkt_hdr + pkt->data_size; while (off < pkt->data_size) { int towrite = FFMIN(chunk_size, pkt->data_size - off); ffurl_write(h, pkt->data + off, towrite); off += towrite; if (off < pkt->data_size) { uint8_t marker = 0xC0 | pkt->channel_id; ffurl_write(h, &marker, 1); size++; } } return size; }
/* returns the size or -1 on error */ int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc) { int bps, blkalign, bytespersec; int hdrsize = 18; int waveformatextensible; uint8_t temp[256]; uint8_t *riff_extradata= temp; uint8_t *riff_extradata_start= temp; if(!enc->codec_tag || enc->codec_tag > 0xffff) return -1; waveformatextensible = (enc->channels > 2 && enc->channel_layout) || enc->sample_rate > 48000 || av_get_bits_per_sample(enc->codec_id) > 16; if (waveformatextensible) { avio_wl16(pb, 0xfffe); } else { avio_wl16(pb, enc->codec_tag); } avio_wl16(pb, enc->channels); avio_wl32(pb, enc->sample_rate); if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_GSM_MS) { bps = 0; } else { if (!(bps = av_get_bits_per_sample(enc->codec_id))) { if (enc->bits_per_coded_sample) bps = enc->bits_per_coded_sample; else bps = 16; // default to 16 } } if(bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample){ av_log(enc, AV_LOG_WARNING, "requested bits_per_coded_sample (%d) and actually stored (%d) differ\n", enc->bits_per_coded_sample, bps); } if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) { blkalign = enc->frame_size; //this is wrong, but it seems many demuxers do not work if this is set correctly //blkalign = 144 * enc->bit_rate/enc->sample_rate; } else if (enc->codec_id == CODEC_ID_AC3) { blkalign = 3840; //maximum bytes per frame } else if (enc->block_align != 0) { /* specified by the codec */ blkalign = enc->block_align; } else blkalign = bps * enc->channels / av_gcd(8, bps); if (enc->codec_id == CODEC_ID_PCM_U8 || enc->codec_id == CODEC_ID_PCM_S24LE || enc->codec_id == CODEC_ID_PCM_S32LE || enc->codec_id == CODEC_ID_PCM_F32LE || enc->codec_id == CODEC_ID_PCM_F64LE || enc->codec_id == CODEC_ID_PCM_S16LE) { bytespersec = enc->sample_rate * blkalign; } else { bytespersec = enc->bit_rate / 8; } avio_wl32(pb, bytespersec); /* bytes per second */ avio_wl16(pb, blkalign); /* block align */ avio_wl16(pb, bps); /* bits per sample */ if (enc->codec_id == CODEC_ID_MP3) { hdrsize += 12; bytestream_put_le16(&riff_extradata, 1); /* wID */ bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */ bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */ bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */ bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */ } else if (enc->codec_id == CODEC_ID_MP2) { hdrsize += 22; bytestream_put_le16(&riff_extradata, 2); /* fwHeadLayer */ bytestream_put_le32(&riff_extradata, enc->bit_rate); /* dwHeadBitrate */ bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8); /* fwHeadMode */ bytestream_put_le16(&riff_extradata, 0); /* fwHeadModeExt */ bytestream_put_le16(&riff_extradata, 1); /* wHeadEmphasis */ bytestream_put_le16(&riff_extradata, 16); /* fwHeadFlags */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSLow */ bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */ } else if (enc->codec_id == CODEC_ID_GSM_MS || enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) { hdrsize += 2; bytestream_put_le16(&riff_extradata, enc->frame_size); /* wSamplesPerBlock */ } else if(enc->extradata_size){ riff_extradata_start= enc->extradata; riff_extradata= enc->extradata + enc->extradata_size; hdrsize += enc->extradata_size; } if(waveformatextensible) { /* write WAVEFORMATEXTENSIBLE extensions */ hdrsize += 22; avio_wl16(pb, riff_extradata - riff_extradata_start + 22); /* 22 is WAVEFORMATEXTENSIBLE size */ avio_wl16(pb, enc->bits_per_coded_sample); /* ValidBitsPerSample || SamplesPerBlock || Reserved */ avio_wl32(pb, enc->channel_layout); /* dwChannelMask */ avio_wl32(pb, enc->codec_tag); /* GUID + next 3 */ avio_wl32(pb, 0x00100000); avio_wl32(pb, 0xAA000080); avio_wl32(pb, 0x719B3800); } else { avio_wl16(pb, riff_extradata - riff_extradata_start); /* cbSize */ } avio_write(pb, riff_extradata_start, riff_extradata - riff_extradata_start); if(hdrsize&1){ hdrsize++; avio_w8(pb, 0); } return hdrsize; }
static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ BMPContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p= (AVFrame*)&s->picture; int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize; const uint32_t *pal = NULL; int pad_bytes_per_row, bit_count, pal_entries = 0, compression = BMP_RGB; uint8_t *ptr; unsigned char* buf0 = buf; *p = *pict; p->pict_type= FF_I_TYPE; p->key_frame= 1; switch (avctx->pix_fmt) { case PIX_FMT_BGR24: bit_count = 24; break; case PIX_FMT_RGB555: bit_count = 16; break; case PIX_FMT_RGB565: bit_count = 16; compression = BMP_BITFIELDS; pal = rgb565_masks; // abuse pal to hold color masks pal_entries = 3; break; case PIX_FMT_RGB8: case PIX_FMT_BGR8: case PIX_FMT_RGB4_BYTE: case PIX_FMT_BGR4_BYTE: case PIX_FMT_GRAY8: case PIX_FMT_PAL8: bit_count = 8; pal = (uint32_t *)p->data[1]; break; case PIX_FMT_MONOBLACK: bit_count = 1; pal = monoblack_pal; break; default: return -1; } if (pal && !pal_entries) pal_entries = 1 << bit_count; n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL; pad_bytes_per_row = (4 - n_bytes_per_row) & 3; n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row); // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER // and related pages. #define SIZE_BITMAPFILEHEADER 14 #define SIZE_BITMAPINFOHEADER 40 hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2); n_bytes = n_bytes_image + hsize; if(n_bytes>buf_size) { av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", n_bytes, buf_size); return -1; } bytestream_put_byte(&buf, 'B'); // BITMAPFILEHEADER.bfType bytestream_put_byte(&buf, 'M'); // do. bytestream_put_le32(&buf, n_bytes); // BITMAPFILEHEADER.bfSize bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved1 bytestream_put_le16(&buf, 0); // BITMAPFILEHEADER.bfReserved2 bytestream_put_le32(&buf, hsize); // BITMAPFILEHEADER.bfOffBits bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize bytestream_put_le32(&buf, avctx->width); // BITMAPINFOHEADER.biWidth bytestream_put_le32(&buf, avctx->height); // BITMAPINFOHEADER.biHeight bytestream_put_le16(&buf, 1); // BITMAPINFOHEADER.biPlanes bytestream_put_le16(&buf, bit_count); // BITMAPINFOHEADER.biBitCount bytestream_put_le32(&buf, compression); // BITMAPINFOHEADER.biCompression bytestream_put_le32(&buf, n_bytes_image); // BITMAPINFOHEADER.biSizeImage bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biXPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biYPelsPerMeter bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrUsed bytestream_put_le32(&buf, 0); // BITMAPINFOHEADER.biClrImportant for (i = 0; i < pal_entries; i++) bytestream_put_le32(&buf, pal[i] & 0xFFFFFF); // BMP files are bottom-to-top so we start from the end... ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; buf = buf0 + hsize; for(i = 0; i < avctx->height; i++) { if (bit_count == 16) { const uint16_t *src = (const uint16_t *) ptr; uint16_t *dst = (uint16_t *) buf; for(n = 0; n < avctx->width; n++) AV_WL16(dst + n, src[n]); } else { memcpy(buf, ptr, n_bytes_per_row); } buf += n_bytes_per_row; memset(buf, 0, pad_bytes_per_row); buf += pad_bytes_per_row; ptr -= p->linesize[0]; // ... and go back } return n_bytes; }
static int read_packet(AVFormatContext *s, AVPacket *pkt) { AVCodecContext *codec = s->streams[0]->codec; BRSTMDemuxContext *b = s->priv_data; uint32_t samples, size, skip = 0; int ret, i; if (avio_feof(s->pb)) return AVERROR_EOF; b->current_block++; if (b->current_block == b->block_count) { size = b->last_block_used_bytes; samples = b->last_block_samples; skip = b->last_block_size - b->last_block_used_bytes; if (samples < size * 14 / 8) { uint32_t adjusted_size = samples / 14 * 8; if (samples % 14) adjusted_size += (samples % 14 + 1) / 2 + 1; skip += size - adjusted_size; size = adjusted_size; } } else if (b->current_block < b->block_count) { size = b->block_size; samples = b->samples_per_block; } else { return AVERROR_EOF; } if (codec->codec_id == AV_CODEC_ID_ADPCM_THP || codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) { uint8_t *dst; if (!b->adpc) { av_log(s, AV_LOG_ERROR, "adpcm_thp requires ADPC chunk, but none was found.\n"); return AVERROR_INVALIDDATA; } if (!b->table) { b->table = av_mallocz(32 * codec->channels); if (!b->table) return AVERROR(ENOMEM); } if (size > (INT_MAX - 32 - 4) || (32 + 4 + size) > (INT_MAX / codec->channels) || (32 + 4 + size) * codec->channels > INT_MAX - 8) return AVERROR_INVALIDDATA; if (av_new_packet(pkt, 8 + (32 + 4 + size) * codec->channels) < 0) return AVERROR(ENOMEM); dst = pkt->data; if (codec->codec_id == AV_CODEC_ID_ADPCM_THP_LE) { bytestream_put_le32(&dst, size * codec->channels); bytestream_put_le32(&dst, samples); } else { bytestream_put_be32(&dst, size * codec->channels); bytestream_put_be32(&dst, samples); } bytestream_put_buffer(&dst, b->table, 32 * codec->channels); bytestream_put_buffer(&dst, b->adpc + 4 * codec->channels * (b->current_block - 1), 4 * codec->channels); for (i = 0; i < codec->channels; i++) { ret = avio_read(s->pb, dst, size); dst += size; avio_skip(s->pb, skip); if (ret != size) { av_free_packet(pkt); break; } } pkt->duration = samples; } else { size *= codec->channels; ret = av_get_packet(s->pb, pkt, size); } pkt->stream_index = 0; if (ret != size) ret = AVERROR(EIO); return ret; }