static void end_frame(AVFilterLink *inlink) { TransContext *trans = inlink->dst->priv; AVFilterBufferRef *inpic = inlink->cur_buf; AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; AVFilterLink *outlink = inlink->dst->outputs[0]; int plane; for (plane = 0; outpic->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; int pixstep = trans->pixsteps[plane]; int inh = inpic->video->h>>vsub; int outw = outpic->video->w>>hsub; int outh = outpic->video->h>>vsub; uint8_t *out, *in; int outlinesize, inlinesize; int x, y; out = outpic->data[plane]; outlinesize = outpic->linesize[plane]; in = inpic ->data[plane]; inlinesize = inpic ->linesize[plane]; if (trans->dir&1) { in += inpic->linesize[plane] * (inh-1); inlinesize *= -1; } if (trans->dir&2) { out += outpic->linesize[plane] * (outh-1); outlinesize *= -1; } for (y = 0; y < outh; y++) { switch (pixstep) { case 1: for (x = 0; x < outw; x++) out[x] = in[x*inlinesize + y]; break; case 2: for (x = 0; x < outw; x++) *((uint16_t *)(out + 2*x)) = *((uint16_t *)(in + x*inlinesize + y*2)); break; case 3: for (x = 0; x < outw; x++) { int32_t v = AV_RB24(in + x*inlinesize + y*3); AV_WB24(out + 3*x, v); } break; case 4: for (x = 0; x < outw; x++) *((uint32_t *)(out + 4*x)) = *((uint32_t *)(in + x*inlinesize + y*4)); break; } out += outlinesize; } } avfilter_unref_buffer(inpic); avfilter_draw_slice(outlink, 0, outpic->video->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpic); }
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket *prev_pkt) { uint8_t hdr, t, buf[16]; int channel_id, timestamp, size, offset = 0; uint32_t extra = 0; enum RTMPPacketType type; int written = 0; if (ffurl_read(h, &hdr, 1) != 1) return AVERROR(EIO); written++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); written += channel_id + 1; channel_id = AV_RL16(buf) + 64; } size = prev_pkt[channel_id].size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; if (hdr == RTMP_PS_ONEBYTE) { timestamp = prev_pkt[channel_id].ts_delta; } else { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; timestamp = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; size = AV_RB24(buf); if (ffurl_read_complete(h, buf, 1) != 1) return AVERROR(EIO); written++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); written += 4; extra = AV_RL32(buf); } } if (timestamp == 0xFFFFFF) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } } if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (ff_rtmp_packet_create(p, channel_id, type, timestamp, size)) return -1; p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].size = size; prev_pkt[channel_id].ts_delta = timestamp - prev_pkt[channel_id].timestamp; prev_pkt[channel_id].timestamp = timestamp; prev_pkt[channel_id].extra = extra; while (size > 0) { int toread = FFMIN(size, chunk_size); if (ffurl_read_complete(h, p->data + offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } size -= chunk_size; offset += chunk_size; written += chunk_size; if (size > 0) { ffurl_read_complete(h, &t, 1); //marker written++; if (t != (0xC0 + channel_id)) return -1; } } return written; }
static int rtmp_packet_read_one_chunk(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket **prev_pkt_ptr, int *nb_prev_pkt, uint8_t hdr) { uint8_t buf[16]; int channel_id, timestamp, size; uint32_t ts_field; // non-extended timestamp or delta field uint32_t extra = 0; enum RTMPPacketType type; int written = 0; int ret, toread; RTMPPacket *prev_pkt; written++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); written += channel_id + 1; channel_id = AV_RL16(buf) + 64; } if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt, channel_id)) < 0) return ret; prev_pkt = *prev_pkt_ptr; size = prev_pkt[channel_id].size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; // header size indicator if (hdr == RTMP_PS_ONEBYTE) { ts_field = prev_pkt[channel_id].ts_field; } else { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; ts_field = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; size = AV_RB24(buf); if (ffurl_read_complete(h, buf, 1) != 1) return AVERROR(EIO); written++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); written += 4; extra = AV_RL32(buf); } } } if (ts_field == 0xFFFFFF) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } else { timestamp = ts_field; } if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (!prev_pkt[channel_id].read) { if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp, size)) < 0) return ret; p->read = written; p->offset = 0; prev_pkt[channel_id].ts_field = ts_field; prev_pkt[channel_id].timestamp = timestamp; } else { // previous packet in this channel hasn't completed reading RTMPPacket *prev = &prev_pkt[channel_id]; p->data = prev->data; p->size = prev->size; p->channel_id = prev->channel_id; p->type = prev->type; p->ts_field = prev->ts_field; p->extra = prev->extra; p->offset = prev->offset; p->read = prev->read + written; p->timestamp = prev->timestamp; prev->data = NULL; } p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].size = size; prev_pkt[channel_id].extra = extra; size = size - p->offset; toread = FFMIN(size, chunk_size); if (ffurl_read_complete(h, p->data + p->offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } size -= toread; p->read += toread; p->offset += toread; if (size > 0) { RTMPPacket *prev = &prev_pkt[channel_id]; prev->data = p->data; prev->read = p->read; prev->offset = p->offset; return AVERROR(EAGAIN); } prev_pkt[channel_id].read = 0; // read complete; reset if needed return p->read; }
int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket *prev_pkt) { uint8_t hdr, t, buf[16]; int channel_id, timestamp, data_size, offset = 0; uint32_t extra = 0; uint8_t type; if (url_read(h, &hdr, 1) != 1) return AVERROR(EIO); channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (url_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); channel_id = AV_RL16(buf) + 64; } data_size = prev_pkt[channel_id].data_size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; if (hdr == RTMP_PS_ONEBYTE) { timestamp = prev_pkt[channel_id].timestamp; } else { if (url_read_complete(h, buf, 3) != 3) return AVERROR(EIO); timestamp = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (url_read_complete(h, buf, 3) != 3) return AVERROR(EIO); data_size = AV_RB24(buf); if (url_read_complete(h, &type, 1) != 1) return AVERROR(EIO); if (hdr == RTMP_PS_TWELVEBYTES) { if (url_read_complete(h, buf, 4) != 4) return AVERROR(EIO); extra = AV_RL32(buf); } } } if (ff_rtmp_packet_create(p, channel_id, type, timestamp, data_size)) return -1; p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].data_size = data_size; prev_pkt[channel_id].timestamp = timestamp; prev_pkt[channel_id].extra = extra; while (data_size > 0) { int toread = FFMIN(data_size, chunk_size); if (url_read_complete(h, p->data + offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } data_size -= chunk_size; offset += chunk_size; if (data_size > 0) { url_read_complete(h, &t, 1); //marker if (t != (0xC0 + channel_id)) return -1; } } return 0; }
static int gif_read_image(GifState *s) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i; uint8_t *ptr, *spal, *palette, *ptr1; left = bytestream_get_le16(&s->bytestream); top = bytestream_get_le16(&s->bytestream); width = bytestream_get_le16(&s->bytestream); height = bytestream_get_le16(&s->bytestream); flags = bytestream_get_byte(&s->bytestream); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; av_dlog(s->avctx, "gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height); if (has_local_palette) { bytestream_get_buffer(&s->bytestream, s->local_palette, 3 * (1 << bits_per_pixel)); palette = s->local_palette; } else { palette = s->global_palette; bits_per_pixel = s->bits_per_pixel; } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height) return AVERROR(EINVAL); /* build the palette */ n = (1 << bits_per_pixel); spal = palette; for(i = 0; i < n; i++) { s->image_palette[i] = (0xff << 24) | AV_RB24(spal); spal += 3; } for(; i < 256; i++) s->image_palette[i] = (0xff << 24); /* handle transparency */ if (s->transparent_color_index >= 0) s->image_palette[s->transparent_color_index] = 0; /* now get the image data */ code_size = bytestream_get_byte(&s->bytestream); ff_lzw_decode_init(s->lzw, code_size, s->bytestream, s->bytestream_end - s->bytestream, FF_LZW_GIF); /* read all the image */ linesize = s->picture.linesize[0]; ptr1 = s->picture.data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { ff_lzw_decode(s->lzw, ptr, width); if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); s->bytestream = ff_lzw_cur_ptr(s->lzw); return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { PicContext *s = avctx->priv_data; int buf_size = avpkt->size; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + buf_size; uint32_t *palette; int bits_per_plane, bpp, etype, esize, npal; int i, x, y, plane; if (buf_size < 11) return AVERROR_INVALIDDATA; if (bytestream_get_le16(&buf) != 0x1234) return AVERROR_INVALIDDATA; s->width = bytestream_get_le16(&buf); s->height = bytestream_get_le16(&buf); buf += 4; bits_per_plane = *buf & 0xF; s->nb_planes = (*buf++ >> 4) + 1; bpp = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane; if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { av_log_ask_for_sample(s, "unsupported bit depth\n"); return AVERROR_INVALIDDATA; } if (*buf == 0xFF) { buf += 2; etype = bytestream_get_le16(&buf); esize = bytestream_get_le16(&buf); if (buf_end - buf < esize) return AVERROR_INVALIDDATA; } else { etype = -1; esize = 0; } avctx->pix_fmt = PIX_FMT_PAL8; if (s->width != avctx->width && s->height != avctx->height) { if (av_image_check_size(s->width, s->height, 0, avctx) < 0) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); } if (avctx->get_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]); s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame.palette_has_changed = 1; palette = (uint32_t*)s->frame.data[1]; if (etype == 1 && esize > 1 && *buf < 6) { int idx = *buf; npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ]; } else if (etype == 2) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)]; } else if (etype == 3) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)]; } else if (etype == 4 || etype == 5) { npal = FFMIN(esize / 3, 256); for (i = 0; i < npal; i++) palette[i] = AV_RB24(buf + i*3) << 2; } else { if (bpp == 1) { npal = 2; palette[0] = 0x000000; palette[1] = 0xFFFFFF; } else if (bpp == 2) { npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ]; } else { npal = 16; memcpy(palette, ff_cga_palette, npal * 4); } } // fill remaining palette entries memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4); buf += esize; x = 0; y = s->height - 1; plane = 0; if (bytestream_get_le16(&buf)) { while (buf_end - buf >= 6) { const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf); //ignore uncompressed block size reported at buf[2] int marker = buf[4]; buf += 5; while (plane < s->nb_planes && buf_pend - buf >= 1) { int run = 1; int val = *buf++; if (val == marker) { run = *buf++; if (run == 0) run = bytestream_get_le16(&buf); val = *buf++; } if (buf > buf_end) break; if (bits_per_plane == 8) { picmemset_8bpp(s, val, run, &x, &y); if (y < 0) break; } else { picmemset(s, val, run, &x, &y, &plane, bits_per_plane); } } } } else { av_log_ask_for_sample(s, "uncompressed image\n"); return buf_size; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }
static int filter_slice(RotContext *rot, ThreadData *td, int job, int nb_jobs) { AVFrame *in = td->in; AVFrame *out = td->out; const int outw = td->outw, outh = td->outh; const int inw = td->inw, inh = td->inh; const int plane = td->plane; const int xi = td->xi, yi = td->yi; const int c = td->c, s = td->s; const int start = (outh * job ) / nb_jobs; const int end = (outh * (job+1)) / nb_jobs; int xprime = td->xprime + start * s; int yprime = td->yprime + start * c; int i, j, x, y; for (j = start; j < end; j++) { x = xprime + xi + FIXP*(inw-1)/2; y = yprime + yi + FIXP*(inh-1)/2; if (fabs(rot->angle - 0) < FLT_EPSILON && outw == inw && outh == inh) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + j * in->linesize[plane], in->linesize[plane], 0, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + j * rot->draw.pixelstep[plane], in->linesize[plane], 1, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - M_PI) < FLT_EPSILON && outw == inw && outh == inh) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + (outh-j-1) * in->linesize[plane], in->linesize[plane], 2, rot->draw.pixelstep[plane], outw); } else if (fabs(rot->angle - 3*M_PI/2) < FLT_EPSILON && outw == inh && outh == inw) { simple_rotate(out->data[plane] + j * out->linesize[plane], in->data[plane] + (outh-j-1) * rot->draw.pixelstep[plane], in->linesize[plane], 3, rot->draw.pixelstep[plane], outw); } else { for (i = 0; i < outw; i++) { int32_t v; int x1, y1; uint8_t *pin, *pout; x1 = x>>16; y1 = y>>16; /* the out-of-range values avoid border artifacts */ if (x1 >= -1 && x1 <= inw && y1 >= -1 && y1 <= inh) { uint8_t inp_inv[4]; /* interpolated input value */ pout = out->data[plane] + j * out->linesize[plane] + i * rot->draw.pixelstep[plane]; if (rot->use_bilinear) { pin = rot->interpolate_bilinear(inp_inv, in->data[plane], in->linesize[plane], rot->draw.pixelstep[plane], x, y, inw-1, inh-1); } else { int x2 = av_clip(x1, 0, inw-1); int y2 = av_clip(y1, 0, inh-1); pin = in->data[plane] + y2 * in->linesize[plane] + x2 * rot->draw.pixelstep[plane]; } switch (rot->draw.pixelstep[plane]) { case 1: *pout = *pin; break; case 2: v = AV_RL16(pin); AV_WL16(pout, v); break; case 3: v = AV_RB24(pin); AV_WB24(pout, v); break; case 4: *((uint32_t *)pout) = *((uint32_t *)pin); break; default: memcpy(pout, pin, rot->draw.pixelstep[plane]); break; } } x += c; y -= s; } } xprime += s; yprime += c; } return 0; }
static int jpeg_parse_packet(AVFormatContext *ctx, PayloadContext *jpeg, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { uint8_t type, q, width, height; const uint8_t *qtables = NULL; uint16_t qtable_len; uint32_t off; int ret, dri = 0; if (len < 8) { av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n"); return AVERROR_INVALIDDATA; } /* Parse the main JPEG header. */ off = AV_RB24(buf + 1); /* fragment byte offset */ type = AV_RB8(buf + 4); /* id of jpeg decoder params */ q = AV_RB8(buf + 5); /* quantization factor (or table id) */ width = AV_RB8(buf + 6); /* frame width in 8 pixel blocks */ height = AV_RB8(buf + 7); /* frame height in 8 pixel blocks */ buf += 8; len -= 8; if (type & 0x40) { if (len < 4) { av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n"); return AVERROR_INVALIDDATA; } dri = AV_RB16(buf); buf += 4; len -= 4; type &= ~0x40; } /* Parse the restart marker header. */ if (type > 63) { av_log(ctx, AV_LOG_ERROR, "Unimplemented RTP/JPEG restart marker header.\n"); return AVERROR_PATCHWELCOME; } if (type > 1) { av_log(ctx, AV_LOG_ERROR, "Unimplemented RTP/JPEG type %d\n", type); return AVERROR_PATCHWELCOME; } /* Parse the quantization table header. */ if (off == 0) { /* Start of JPEG data packet. */ uint8_t new_qtables[128]; uint8_t hdr[1024]; if (q > 127) { uint8_t precision; if (len < 4) { av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n"); return AVERROR_INVALIDDATA; } /* The first byte is reserved for future use. */ precision = AV_RB8(buf + 1); /* size of coefficients */ qtable_len = AV_RB16(buf + 2); /* length in bytes */ buf += 4; len -= 4; if (precision) av_log(ctx, AV_LOG_WARNING, "Only 8-bit precision is supported.\n"); if (qtable_len > 0) { if (len < qtable_len) { av_log(ctx, AV_LOG_ERROR, "Too short RTP/JPEG packet.\n"); return AVERROR_INVALIDDATA; } qtables = buf; buf += qtable_len; len -= qtable_len; if (q < 255) { if (jpeg->qtables_len[q - 128] && (jpeg->qtables_len[q - 128] != qtable_len || memcmp(qtables, &jpeg->qtables[q - 128][0], qtable_len))) { av_log(ctx, AV_LOG_WARNING, "Quantization tables for q=%d changed\n", q); } else if (!jpeg->qtables_len[q - 128] && qtable_len <= 128) { memcpy(&jpeg->qtables[q - 128][0], qtables, qtable_len); jpeg->qtables_len[q - 128] = qtable_len; } } } else { if (q == 255) { av_log(ctx, AV_LOG_ERROR, "Invalid RTP/JPEG packet. Quantization tables not found.\n"); return AVERROR_INVALIDDATA; } if (!jpeg->qtables_len[q - 128]) { av_log(ctx, AV_LOG_ERROR, "No quantization tables known for q=%d yet.\n", q); return AVERROR_INVALIDDATA; } qtables = &jpeg->qtables[q - 128][0]; qtable_len = jpeg->qtables_len[q - 128]; } } else { /* q <= 127 */ if (q == 0 || q > 99) { av_log(ctx, AV_LOG_ERROR, "Reserved q value %d\n", q); return AVERROR_INVALIDDATA; } create_default_qtables(new_qtables, q); qtables = new_qtables; qtable_len = sizeof(new_qtables); } /* Skip the current frame in case of the end packet * has been lost somewhere. */ free_frame_if_needed(jpeg); if ((ret = avio_open_dyn_buf(&jpeg->frame)) < 0) return ret; jpeg->timestamp = *timestamp; /* Generate a frame and scan headers that can be prepended to the * RTP/JPEG data payload to produce a JPEG compressed image in * interchange format. */ jpeg->hdr_size = jpeg_create_header(hdr, sizeof(hdr), type, width, height, qtables, qtable_len / 64, dri); /* Copy JPEG header to frame buffer. */ avio_write(jpeg->frame, hdr, jpeg->hdr_size); } if (!jpeg->frame) { av_log(ctx, AV_LOG_ERROR, "Received packet without a start chunk; dropping frame.\n"); return AVERROR(EAGAIN); } if (jpeg->timestamp != *timestamp) { /* Skip the current frame if timestamp is incorrect. * A start packet has been lost somewhere. */ free_frame_if_needed(jpeg); av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match.\n"); return AVERROR_INVALIDDATA; } if (off != avio_tell(jpeg->frame) - jpeg->hdr_size) { av_log(ctx, AV_LOG_ERROR, "Missing packets; dropping frame.\n"); return AVERROR(EAGAIN); } /* Copy data to frame buffer. */ avio_write(jpeg->frame, buf, len); if (flags & RTP_FLAG_MARKER) { /* End of JPEG data packet. */ uint8_t buf[2] = { 0xff, EOI }; /* Put EOI marker. */ avio_write(jpeg->frame, buf, sizeof(buf)); /* Prepare the JPEG packet. */ if ((ret = ff_rtp_finalize_packet(pkt, &jpeg->frame, st->index)) < 0) { av_log(ctx, AV_LOG_ERROR, "Error occurred when getting frame buffer.\n"); return ret; } return 0; } return AVERROR(EAGAIN); }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { JvContext *s = avctx->priv_data; int buf_size = avpkt->size; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + buf_size; int video_size, video_type, i, j, ret; video_size = AV_RL32(buf); video_type = buf[4]; buf += 5; if (video_size) { if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } if (video_type == 0 || video_type == 1) { GetBitContext gb; init_get_bits(&gb, buf, 8 * FFMIN(video_size, buf_end - buf)); for (j = 0; j < avctx->height; j += 8) for (i = 0; i < avctx->width; i += 8) decode8x8(&gb, s->frame->data[0] + j * s->frame->linesize[0] + i, s->frame->linesize[0], &s->bdsp); buf += video_size; } else if (video_type == 2) { if (buf + 1 <= buf_end) { int v = *buf++; for (j = 0; j < avctx->height; j++) memset(s->frame->data[0] + j * s->frame->linesize[0], v, avctx->width); } } else { av_log(avctx, AV_LOG_WARNING, "unsupported frame type %i\n", video_type); return AVERROR_INVALIDDATA; } } if (buf < buf_end) { for (i = 0; i < AVPALETTE_COUNT && buf + 3 <= buf_end; i++) { s->palette[i] = AV_RB24(buf) << 2; buf += 3; } s->palette_has_changed = 1; } if (video_size) { s->frame->key_frame = 1; s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = s->palette_has_changed; s->palette_has_changed = 0; memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; } return buf_size; }
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs) { FlipContext *s = ctx->priv; ThreadData *td = arg; AVFrame *in = td->in; AVFrame *out = td->out; uint8_t *inrow, *outrow; int i, j, plane, step; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { const int width = s->planewidth[plane]; const int height = s->planeheight[plane]; const int start = (height * job ) / nb_jobs; const int end = (height * (job+1)) / nb_jobs; step = s->max_step[plane]; outrow = out->data[plane] + start * out->linesize[plane]; inrow = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step; for (i = start; i < end; i++) { switch (step) { case 1: for (j = 0; j < width; j++) outrow[j] = inrow[-j]; break; case 2: { uint16_t *outrow16 = (uint16_t *)outrow; uint16_t * inrow16 = (uint16_t *) inrow; for (j = 0; j < width; j++) outrow16[j] = inrow16[-j]; } break; case 3: { uint8_t *in = inrow; uint8_t *out = outrow; for (j = 0; j < width; j++, out += 3, in -= 3) { int32_t v = AV_RB24(in); AV_WB24(out, v); } } break; case 4: { uint32_t *outrow32 = (uint32_t *)outrow; uint32_t * inrow32 = (uint32_t *) inrow; for (j = 0; j < width; j++) outrow32[j] = inrow32[-j]; } break; default: for (j = 0; j < width; j++) memcpy(outrow + j*step, inrow - j*step, step); } inrow += in ->linesize[plane]; outrow += out->linesize[plane]; } } return 0; }
static int demux_ty_fill_buffer( demuxer_t *demux, demux_stream_t *dsds ) { int invalidType = 0; int errorHeader = 0; int recordsDecoded = 0; int readSize; int numberRecs; unsigned char *recPtr; int offset; int counter; int aid; TiVoInfo *tivo = demux->priv; unsigned char *chunk = tivo->chunk; if ( demux->stream->type == STREAMTYPE_DVD ) return 0; mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:ty processing\n" ); if( demux->stream->eof ) return 0; // ====================================================================== // If we haven't figured out the size of the stream, let's do so // ====================================================================== if ( demux->stream->type == STREAMTYPE_VSTREAM ) { // The vstream code figures out the exact size of the stream demux->movi_start = 0; demux->movi_end = demux->stream->end_pos; tivo->size = demux->stream->end_pos; } else { // If its a local file, try to find the Part Headers, so we can // calculate the ACTUAL stream size // If we can't find it, go off with the file size and hope the // extract program did the "right thing" if ( tivo->readHeader == 0 ) { loff_t filePos; tivo->readHeader = 1; filePos = demux->filepos; stream_seek( demux->stream, 0 ); readSize = stream_read( demux->stream, chunk, CHUNKSIZE ); if ( memcmp( chunk, TMF_SIG, sizeof( TMF_SIG ) ) == 0 ) { mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Detected a tmf\n" ); tivo->tmf = 1; ty_tmf_filetoparts( demux, tivo ); readSize = tmf_load_chunk( demux, tivo, chunk, 0 ); } if ( readSize == CHUNKSIZE && AV_RB32(chunk) == TIVO_PES_FILEID ) { loff_t numberParts; readSize = 0; if ( tivo->tmf != 1 ) { loff_t offset; numberParts = demux->stream->end_pos / TIVO_PART_LENGTH; offset = numberParts * TIVO_PART_LENGTH; mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:ty/ty+Number Parts %"PRId64"\n", (int64_t)numberParts ); if ( offset + CHUNKSIZE < demux->stream->end_pos ) { stream_seek( demux->stream, offset ); readSize = stream_read( demux->stream, chunk, CHUNKSIZE ); } } else { numberParts = tivo->tmf_totalparts; offset = numberParts * TIVO_PART_LENGTH; readSize = tmf_load_chunk( demux, tivo, chunk, numberParts * ( TIVO_PART_LENGTH - CHUNKSIZE ) / CHUNKSIZE ); } if ( readSize == CHUNKSIZE && AV_RB32(chunk) == TIVO_PES_FILEID ) { int size = AV_RB24(chunk + 12); size -= 4; size *= CHUNKSIZE; tivo->size = numberParts * TIVO_PART_LENGTH; tivo->size += size; mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Header Calc Stream Size %"PRId64"\n", tivo->size ); } } if ( demux->stream->start_pos > 0 ) filePos = demux->stream->start_pos; stream_seek( demux->stream, filePos ); demux->filepos = stream_tell( demux->stream ); tivo->whichChunk = filePos / CHUNKSIZE; } demux->movi_start = 0; demux->movi_end = tivo->size; } // ====================================================================== // Give a clue as to where we are in the stream // ====================================================================== mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:ty header size %"PRIx64"\n", (int64_t)tivo->size ); mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:ty which Chunk %d\n", tivo->whichChunk ); mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:file end_pos %"PRIx64"\n", (int64_t)demux->stream->end_pos ); mp_msg( MSGT_DEMUX, MSGL_DBG3, "\nty:wanted current offset %"PRIx64"\n", (int64_t)stream_tell( demux->stream ) ); if ( tivo->size > 0 && stream_tell( demux->stream ) > tivo->size ) { demux->stream->eof = 1; return 0; } do { if ( tivo->tmf != 1 ) { // Make sure we are on a 128k boundary if ( demux->filepos % CHUNKSIZE != 0 ) { int whichChunk = demux->filepos / CHUNKSIZE; if ( demux->filepos % CHUNKSIZE > CHUNKSIZE / 2 ) whichChunk++; stream_seek( demux->stream, whichChunk * CHUNKSIZE ); } demux->filepos = stream_tell( demux->stream ); tivo->whichChunk = demux->filepos / CHUNKSIZE; readSize = stream_read( demux->stream, chunk, CHUNKSIZE ); if ( readSize != CHUNKSIZE ) return 0; } else { readSize = tmf_load_chunk( demux, tivo, chunk, tivo->whichChunk ); if ( readSize != CHUNKSIZE ) return 0; tivo->whichChunk++; } if (AV_RB32(chunk) == TIVO_PES_FILEID) mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Skipping PART Header\n" ); } while (AV_RB32(chunk) == TIVO_PES_FILEID); mp_msg( MSGT_DEMUX, MSGL_DBG3, "\nty:actual current offset %"PRIx64"\n", stream_tell( demux->stream ) - CHUNKSIZE ); // Let's make a Video Demux Stream for MPlayer aid = 0x0; if( !demux->v_streams[ aid ] ) new_sh_video( demux, aid ); if( demux->video->id == -1 ) demux->video->id = aid; if( demux->video->id == aid ) { demux_stream_t *ds = demux->video; if( !ds->sh ) ds->sh = demux->v_streams[ aid ]; } // ====================================================================== // Finally, we get to actually parse the chunk // ====================================================================== mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:ty parsing a chunk\n" ); numberRecs = chunk[ 0 ]; recPtr = &chunk[ 4 ]; offset = numberRecs * 16 + 4; for ( counter = 0 ; counter < numberRecs ; counter++ ) { int size = AV_RB24(recPtr) >> 4; int type = recPtr[ 3 ]; int nybbleType = recPtr[ 2 ] & 0x0f; recordsDecoded++; mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Record Type %x/%x %d\n", nybbleType, type, size ); // ================================================================ // Video Parsing // ================================================================ if ( type == 0xe0 ) { if ( size > 0 && size + offset <= CHUNKSIZE ) { int esOffset1 = demux_ty_FindESHeader( VIDEO_NAL, &chunk[ offset ], size); if ( esOffset1 != -1 ) tivo->lastVideoPTS = get_ty_pts( &chunk[ offset + esOffset1 + 9 ] ); // Do NOT Pass the PES Header onto the MPEG2 Decode if( nybbleType != 0x06 ) demux_ty_CopyToDemuxPacket( demux->video, &chunk[ offset ], size, demux->filepos + offset, tivo->lastVideoPTS ); offset += size; } else errorHeader++; } // ================================================================ // Audio Parsing // ================================================================ else if ( type == 0xc0 ) { if ( size > 0 && size + offset <= CHUNKSIZE ) { if( demux->audio->id == -1 ) { if ( nybbleType == 0x02 ) continue; // DTiVo inconclusive, wait for more else if ( nybbleType == 0x09 ) { mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Setting AC-3 Audio\n" ); aid = 0x80; // AC-3 } else { mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Setting MPEG Audio\n" ); aid = 0x0; // MPEG Audio } demux->audio->id = aid; if( !demux->a_streams[ aid ] ) new_sh_audio( demux, aid, NULL ); if( demux->audio->id == aid ) { demux_stream_t *ds = demux->audio; if( !ds->sh ) { sh_audio_t* sh_a; ds->sh = demux->a_streams[ aid ]; sh_a = (sh_audio_t*)ds->sh; switch(aid & 0xE0){ // 1110 0000 b (high 3 bit: type low 5: id) case 0x00: sh_a->format=0x50;break; // mpeg case 0xA0: sh_a->format=0x10001;break; // dvd pcm case 0x80: if((aid & 0xF8) == 0x88) sh_a->format=0x2001;//dts else sh_a->format=0x2000;break; // ac3 } } } } aid = demux->audio->id; // SA DTiVo Audio Data, no PES // ================================================ if ( nybbleType == 0x02 || nybbleType == 0x04 ) { if ( nybbleType == 0x02 && tivo->tivoType == 2 ) demux_ty_AddToAudioBuffer( tivo, &chunk[ offset ], size ); else { mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Adding Audio Packet Size %d\n", size ); demux_ty_CopyToDemuxPacket( demux->audio, &chunk[ offset ], size, ( demux->filepos + offset ), tivo->lastAudioPTS ); } } // 3 - MPEG Audio with PES Header, either SA or DTiVo // 9 - DTiVo AC3 Audio Data with PES Header // ================================================ if ( nybbleType == 0x03 || nybbleType == 0x09 ) { int esOffset1, esOffset2; if ( nybbleType == 0x03 ) esOffset1 = demux_ty_FindESHeader( AUDIO_NAL, &chunk[ offset ], size); // SA PES Header, No Audio Data // ================================================ if ( nybbleType == 0x03 && esOffset1 == 0 && size == 16 ) { tivo->tivoType = 1; tivo->lastAudioPTS = get_ty_pts( &chunk[ offset + SERIES2_PTS_OFFSET ] ); } else // DTiVo Audio with PES Header // ================================================ { tivo->tivoType = 2; demux_ty_AddToAudioBuffer( tivo, &chunk[ offset ], size ); demux_ty_FindESPacket( nybbleType == 9 ? AC3_NAL : AUDIO_NAL, tivo->lastAudio, tivo->lastAudioEnd, &esOffset1, &esOffset2 ); if ( esOffset1 != -1 && esOffset2 != -1 ) { int packetSize = esOffset2 - esOffset1; int headerSize; int ptsOffset; if ( IsValidAudioPacket( packetSize, &ptsOffset, &headerSize ) ) { mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:Adding DTiVo Audio Packet Size %d\n", packetSize ); tivo->lastAudioPTS = get_ty_pts( &tivo->lastAudio[ esOffset1 + ptsOffset ] ); if (nybbleType == 9) headerSize = 0; demux_ty_CopyToDemuxPacket ( demux->audio, &tivo->lastAudio[ esOffset1 + headerSize ], packetSize - headerSize, demux->filepos + offset, tivo->lastAudioPTS ); } // Collapse the Audio Buffer tivo->lastAudioEnd -= esOffset2; memmove( &tivo->lastAudio[ 0 ], &tivo->lastAudio[ esOffset2 ], tivo->lastAudioEnd ); } } } offset += size; } else errorHeader++; } // ================================================================ // 1 = Closed Caption // 2 = Extended Data Services // ================================================================ else if ( type == 0x01 || type == 0x02 ) { unsigned char lastXDS[ 16 ]; int b = AV_RB24(recPtr) >> 4; b &= 0x7f7f; mp_msg( MSGT_DEMUX, MSGL_DBG3, "ty:%s %04x\n", type == 1 ? "CC" : "XDS", b); lastXDS[ 0x00 ] = 0x00; lastXDS[ 0x01 ] = 0x00; lastXDS[ 0x02 ] = 0x01; lastXDS[ 0x03 ] = 0xb2; lastXDS[ 0x04 ] = 'T'; lastXDS[ 0x05 ] = 'Y'; lastXDS[ 0x06 ] = type; lastXDS[ 0x07 ] = b >> 8; lastXDS[ 0x08 ] = b; if ( subcc_enabled ) demux_ty_CopyToDemuxPacket( demux->video, lastXDS, 0x09, demux->filepos + offset, tivo->lastVideoPTS ); } // ================================================================ // Unknown // ================================================================ else { if ( size > 0 && size + offset <= CHUNKSIZE )
static int mov_text_tx3g(AVCodecContext *avctx, MovTextContext *m) { uint8_t *tx3g_ptr = avctx->extradata; int i, box_size, font_length; int8_t v_align, h_align; int style_fontID; StyleBox s_default; m->count_f = 0; m->ftab_entries = 0; box_size = BOX_SIZE_INITIAL; /* Size till ftab_entries */ if (avctx->extradata_size < box_size) return -1; // Display Flags tx3g_ptr += 4; // Alignment h_align = *tx3g_ptr++; v_align = *tx3g_ptr++; if (h_align == 0) { if (v_align == 0) m->d.alignment = TOP_LEFT; if (v_align == 1) m->d.alignment = MIDDLE_LEFT; if (v_align == -1) m->d.alignment = BOTTOM_LEFT; } if (h_align == 1) { if (v_align == 0) m->d.alignment = TOP_CENTER; if (v_align == 1) m->d.alignment = MIDDLE_CENTER; if (v_align == -1) m->d.alignment = BOTTOM_CENTER; } if (h_align == -1) { if (v_align == 0) m->d.alignment = TOP_RIGHT; if (v_align == 1) m->d.alignment = MIDDLE_RIGHT; if (v_align == -1) m->d.alignment = BOTTOM_RIGHT; } // Background Color m->d.back_color = AV_RB24(tx3g_ptr); tx3g_ptr += 4; // BoxRecord tx3g_ptr += 8; // StyleRecord tx3g_ptr += 4; // fontID style_fontID = AV_RB16(tx3g_ptr); tx3g_ptr += 2; // face-style-flags s_default.style_flag = *tx3g_ptr++; m->d.bold = s_default.style_flag & STYLE_FLAG_BOLD; m->d.italic = s_default.style_flag & STYLE_FLAG_ITALIC; m->d.underline = s_default.style_flag & STYLE_FLAG_UNDERLINE; // fontsize m->d.fontsize = *tx3g_ptr++; // Primary color m->d.color = AV_RB24(tx3g_ptr); tx3g_ptr += 4; // FontRecord // FontRecord Size tx3g_ptr += 4; // ftab tx3g_ptr += 4; m->ftab_entries = AV_RB16(tx3g_ptr); tx3g_ptr += 2; for (i = 0; i < m->ftab_entries; i++) { box_size += 3; if (avctx->extradata_size < box_size) { mov_text_cleanup_ftab(m); m->ftab_entries = 0; return -1; } m->ftab_temp = av_mallocz(sizeof(*m->ftab_temp)); if (!m->ftab_temp) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } m->ftab_temp->fontID = AV_RB16(tx3g_ptr); tx3g_ptr += 2; font_length = *tx3g_ptr++; box_size = box_size + font_length; if (avctx->extradata_size < box_size) { mov_text_cleanup_ftab(m); m->ftab_entries = 0; return -1; } m->ftab_temp->font = av_malloc(font_length + 1); if (!m->ftab_temp->font) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } memcpy(m->ftab_temp->font, tx3g_ptr, font_length); m->ftab_temp->font[font_length] = '\0'; av_dynarray_add(&m->ftab, &m->count_f, m->ftab_temp); if (!m->ftab) { mov_text_cleanup_ftab(m); return AVERROR(ENOMEM); } m->ftab_temp = NULL; tx3g_ptr = tx3g_ptr + font_length; } for (i = 0; i < m->ftab_entries; i++) { if (style_fontID == m->ftab[i]->fontID) m->d.font = m->ftab[i]->font; } return 0; }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) { AVFilterLink *outlink = inlink->dst->outputs[0]; TransContext *trans = inlink->dst->priv; AVFilterBufferRef *out; int plane; out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!out) { avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } out->pts = in->pts; if (in->video->pixel_aspect.num == 0) { out->video->pixel_aspect = in->video->pixel_aspect; } else { out->video->pixel_aspect.num = in->video->pixel_aspect.den; out->video->pixel_aspect.den = in->video->pixel_aspect.num; } for (plane = 0; out->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; int pixstep = trans->pixsteps[plane]; int inh = in->video->h>>vsub; int outw = out->video->w>>hsub; int outh = out->video->h>>vsub; uint8_t *dst, *src; int dstlinesize, srclinesize; int x, y; dst = out->data[plane]; dstlinesize = out->linesize[plane]; src = in->data[plane]; srclinesize = in->linesize[plane]; if (trans->dir&1) { src += in->linesize[plane] * (inh-1); srclinesize *= -1; } if (trans->dir&2) { dst += out->linesize[plane] * (outh-1); dstlinesize *= -1; } for (y = 0; y < outh; y++) { switch (pixstep) { case 1: for (x = 0; x < outw; x++) dst[x] = src[x*srclinesize + y]; break; case 2: for (x = 0; x < outw; x++) *((uint16_t *)(dst + 2*x)) = *((uint16_t *)(src + x*srclinesize + y*2)); break; case 3: for (x = 0; x < outw; x++) { int32_t v = AV_RB24(src + x*srclinesize + y*3); AV_WB24(dst + 3*x, v); } break; case 4: for (x = 0; x < outw; x++) *((uint32_t *)(dst + 4*x)) = *((uint32_t *)(src + x*srclinesize + y*4)); break; } dst += dstlinesize; } } avfilter_unref_bufferp(&in); return ff_filter_frame(outlink, out); }