void build_ordered_chapter_timeline(struct MPContext *mpctx) { struct MPOpts *opts = mpctx->opts; if (!opts->ordered_chapters) { mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, but " "you have disabled support for them. Ignoring.\n"); return; } mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, will build " "edit timeline.\n"); struct demuxer *demuxer = mpctx->demuxer; struct matroska_data *m = &demuxer->matroska_data; // +1 because sources/uid_map[0] is original file even if all chapters // actually use other sources and need separate entries struct demuxer **sources = talloc_array_ptrtype(NULL, sources, m->num_ordered_chapters+1); sources[0] = mpctx->demuxer; unsigned char (*uid_map)[16] = talloc_array_ptrtype(NULL, uid_map, m->num_ordered_chapters + 1); int num_sources = 1; memcpy(uid_map[0], m->segment_uid, 16); for (int i = 0; i < m->num_ordered_chapters; i++) { struct matroska_chapter *c = m->ordered_chapters + i; if (!c->has_segment_uid) memcpy(c->segment_uid, m->segment_uid, 16); for (int j = 0; j < num_sources; j++) if (!memcmp(c->segment_uid, uid_map[j], 16)) goto found1; memcpy(uid_map[num_sources], c->segment_uid, 16); sources[num_sources] = NULL; num_sources++; found1: ; } num_sources = find_ordered_chapter_sources(mpctx, sources, num_sources, uid_map); // +1 for terminating chapter with start time marking end of last real one struct timeline_part *timeline = talloc_array_ptrtype(NULL, timeline, m->num_ordered_chapters + 1); struct chapter *chapters = talloc_array_ptrtype(NULL, chapters, m->num_ordered_chapters); uint64_t starttime = 0; uint64_t missing_time = 0; int part_count = 0; int num_chapters = 0; uint64_t prev_part_offset = 0; for (int i = 0; i < m->num_ordered_chapters; i++) { struct matroska_chapter *c = m->ordered_chapters + i; int j; for (j = 0; j < num_sources; j++) { if (!memcmp(c->segment_uid, uid_map[j], 16)) goto found2; } missing_time += c->end - c->start; continue; found2:; /* Only add a separate part if the time or file actually changes. * Matroska files have chapter divisions that are redundant from * timeline point of view because the same chapter structure is used * both to specify the timeline and for normal chapter information. * Removing a missing inserted external chapter can also cause this. * We allow for a configurable fudge factor because of files which * specify chapter end times that are one frame too early; * we don't want to try seeking over a one frame gap. */ int64_t join_diff = c->start - starttime - prev_part_offset; if (part_count == 0 || FFABS(join_diff) > opts->chapter_merge_threshold * 1000000 || sources[j] != timeline[part_count - 1].source) { timeline[part_count].source = sources[j]; timeline[part_count].start = starttime / 1e9; timeline[part_count].source_start = c->start / 1e9; prev_part_offset = c->start - starttime; part_count++; } else if (part_count > 0 && join_diff) { /* Chapter was merged at an inexact boundary; * adjust timestamps to match. */ mp_msg(MSGT_CPLAYER, MSGL_V, "Merging timeline part %d with " "offset %g ms.\n", i, join_diff / 1e6); starttime += join_diff; } chapters[num_chapters].start = starttime / 1e9; chapters[num_chapters].name = talloc_strdup(chapters, c->name); starttime += c->end - c->start; num_chapters++; } timeline[part_count].start = starttime / 1e9; talloc_free(uid_map); if (!part_count) { // None of the parts come from the file itself??? talloc_free(sources); talloc_free(timeline); talloc_free(chapters); return; } if (missing_time) mp_msg(MSGT_CPLAYER, MSGL_ERR, "There are %.3f seconds missing " "from the timeline!\n", missing_time / 1e9); mpctx->sources = sources; mpctx->num_sources = num_sources; mpctx->timeline = timeline; mpctx->num_timeline_parts = part_count; mpctx->num_chapters = num_chapters; mpctx->chapters = chapters; }
/* put sequence header if needed */ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; int i; uint64_t time_code; float best_aspect_error= 1E10; float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); int constraint_parameter_flag; if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) if (s->current_picture.f.key_frame) { AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index]; /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); put_sbits(&s->pb, 12, s->width & 0xFFF); put_sbits(&s->pb, 12, s->height & 0xFFF); for(i=1; i<15; i++){ float error= aspect_ratio; if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <=1) error-= 1.0/ff_mpeg1_aspect[i]; else error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width; error= FFABS(error); if(error < best_aspect_error){ best_aspect_error= error; s->aspect_ratio_info= i; } } put_bits(&s->pb, 4, s->aspect_ratio_info); put_bits(&s->pb, 4, s->frame_rate_index); if(s->avctx->rc_max_rate){ v = (s->avctx->rc_max_rate + 399) / 400; if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO) v = 0x3ffff; }else{ v= 0x3FFFF; } if(s->avctx->rc_buffer_size) vbv_buffer_size = s->avctx->rc_buffer_size; else /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; put_sbits(&s->pb, 18, v); put_bits(&s->pb, 1, 1); /* marker */ put_sbits(&s->pb, 10, vbv_buffer_size); constraint_parameter_flag= s->width <= 768 && s->height <= 576 && s->mb_width * s->mb_height <= 396 && s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 && framerate.num <= framerate.den*30 && s->avctx->me_range && s->avctx->me_range < 128 && vbv_buffer_size <= 20 && v <= 1856000/400 && s->codec_id == AV_CODEC_ID_MPEG1VIDEO; put_bits(&s->pb, 1, constraint_parameter_flag); ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); if(s->codec_id == AV_CODEC_ID_MPEG2VIDEO){ put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 1); //seq ext put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */ put_bits(&s->pb, 3, s->avctx->profile); //profile put_bits(&s->pb, 4, s->avctx->level); //level put_bits(&s->pb, 1, s->progressive_sequence); put_bits(&s->pb, 2, s->chroma_format); put_bits(&s->pb, 2, s->width >>12); put_bits(&s->pb, 2, s->height>>12); put_bits(&s->pb, 12, v>>18); //bitrate ext put_bits(&s->pb, 1, 1); //marker put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d }
static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int start = 0, i, w, w2, g; float uplim[128], maxq[128]; int minq, maxsf; float distfact = ((sce->ics.num_windows > 1) ? 85.80 : 147.84) / lambda; int last = 0, lastband = 0, curband = 0; float avg_energy = 0.0; if (sce->ics.num_windows == 1) { start = 0; for (i = 0; i < 1024; i++) { if (i - start >= sce->ics.swb_sizes[curband]) { start += sce->ics.swb_sizes[curband]; curband++; } if (sce->coeffs[i]) { avg_energy += sce->coeffs[i] * sce->coeffs[i]; last = i; lastband = curband; } } } else { for (w = 0; w < 8; w++) { const float *coeffs = sce->coeffs + w*128; start = 0; for (i = 0; i < 128; i++) { if (i - start >= sce->ics.swb_sizes[curband]) { start += sce->ics.swb_sizes[curband]; curband++; } if (coeffs[i]) { avg_energy += coeffs[i] * coeffs[i]; last = FFMAX(last, i); lastband = FFMAX(lastband, curband); } } } } last++; avg_energy /= last; if (avg_energy == 0.0f) { for (i = 0; i < FF_ARRAY_ELEMS(sce->sf_idx); i++) sce->sf_idx[i] = SCALE_ONE_POS; return; } for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { float *coefs = sce->coeffs + start; const int size = sce->ics.swb_sizes[g]; int start2 = start, end2 = start + size, peakpos = start; float maxval = -1, thr = 0.0f, t; maxq[w*16+g] = 0.0f; if (g > lastband) { maxq[w*16+g] = 0.0f; start += size; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) memset(coefs + w2*128, 0, sizeof(coefs[0])*size); continue; } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { for (i = 0; i < size; i++) { float t = coefs[w2*128+i]*coefs[w2*128+i]; maxq[w*16+g] = FFMAX(maxq[w*16+g], fabsf(coefs[w2*128 + i])); thr += t; if (sce->ics.num_windows == 1 && maxval < t) { maxval = t; peakpos = start+i; } } } if (sce->ics.num_windows == 1) { start2 = FFMAX(peakpos - 2, start2); end2 = FFMIN(peakpos + 3, end2); } else { start2 -= start; end2 -= start; } start += size; thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband); t = 1.0 - (1.0 * start2 / last); uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075); } } memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; const float *scaled = s->scoefs + start; const int size = sce->ics.swb_sizes[g]; int scf, prev_scf, step; int min_scf = -1, max_scf = 256; float curdiff; if (maxq[w*16+g] < 21.544) { sce->zeroes[w*16+g] = 1; start += size; continue; } sce->zeroes[w*16+g] = 0; scf = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218); step = 16; for (;;) { float dist = 0.0f; int quant_max; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { int b; dist += quantize_band_cost(s, coefs + w2*128, scaled + w2*128, sce->ics.swb_sizes[g], scf, ESC_BT, lambda, INFINITY, &b); dist -= b; } dist *= 1.0f / 512.0f / lambda; quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[POW_SF2_ZERO - scf + SCALE_ONE_POS - SCALE_DIV_512]); if (quant_max >= 8191) { // too much, return to the previous quantizer sce->sf_idx[w*16+g] = prev_scf; break; } prev_scf = scf; curdiff = fabsf(dist - uplim[w*16+g]); if (curdiff <= 1.0f) step = 0; else step = log2f(curdiff); if (dist > uplim[w*16+g]) step = -step; scf += step; scf = av_clip_uint8(scf); step = scf - prev_scf; if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) { sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf); break; } if (step > 0) min_scf = prev_scf; else max_scf = prev_scf; } start += size; } } minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX; for (i = 1; i < 128; i++) { if (!sce->sf_idx[i]) sce->sf_idx[i] = sce->sf_idx[i-1]; else minq = FFMIN(minq, sce->sf_idx[i]); } if (minq == INT_MAX) minq = 0; minq = FFMIN(minq, SCALE_MAX_POS); maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS); for (i = 126; i >= 0; i--) { if (!sce->sf_idx[i]) sce->sf_idx[i] = sce->sf_idx[i+1]; sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf); } }
/* * Write an empty XING header and initialize respective data. */ static void mp3_write_xing(AVFormatContext *s) { MP3Context *mp3 = s->priv_data; AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec; AVDictionaryEntry *enc = av_dict_get(s->streams[mp3->audio_stream_idx]->metadata, "encoder", NULL, 0); AVIOContext *dyn_ctx; int32_t header; MPADecodeHeader mpah; int srate_idx, i, channels; int bitrate_idx; int best_bitrate_idx; int best_bitrate_error = INT_MAX; int ret; int ver = 0; int lsf, bytes_needed; if (!s->pb->seekable || !mp3->write_xing) return; for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) { const uint16_t base_freq = avpriv_mpa_freq_tab[i]; if (codec->sample_rate == base_freq) ver = 0x3; // MPEG 1 else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2 else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5 else continue; srate_idx = i; break; } if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) { av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing " "header.\n"); return; } switch (codec->channels) { case 1: channels = MPA_MONO; break; case 2: channels = MPA_STEREO; break; default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, " "not writing Xing header.\n"); return; } /* dummy MPEG audio header */ header = 0xff << 24; // sync header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/ header |= (srate_idx << 2) << 8; header |= channels << 6; lsf = !((header & (1 << 20) && header & (1 << 19))); mp3->xing_offset = xing_offtbl[ver != 3][channels == 1] + 4; bytes_needed = mp3->xing_offset + XING_SIZE; for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) { int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx]; int error = FFABS(bit_rate - codec->bit_rate); if (error < best_bitrate_error){ best_bitrate_error = error; best_bitrate_idx = bitrate_idx; } } for (bitrate_idx = best_bitrate_idx; bitrate_idx < 15; bitrate_idx++) { int32_t mask = bitrate_idx << (4 + 8); header |= mask; avpriv_mpegaudio_decode_header(&mpah, header); if (bytes_needed <= mpah.frame_size) break; header &= ~mask; } ret = avio_open_dyn_buf(&dyn_ctx); if (ret < 0) return; avio_wb32(dyn_ctx, header); avpriv_mpegaudio_decode_header(&mpah, header); av_assert0(mpah.frame_size >= bytes_needed); ffio_fill(dyn_ctx, 0, mp3->xing_offset - 4); ffio_wfourcc(dyn_ctx, "Xing"); avio_wb32(dyn_ctx, 0x01 | 0x02 | 0x04 | 0x08); // frames / size / TOC / vbr scale mp3->size = mpah.frame_size; mp3->want = 1; avio_wb32(dyn_ctx, 0); // frames avio_wb32(dyn_ctx, 0); // size // TOC for (i = 0; i < XING_TOC_SIZE; i++) avio_w8(dyn_ctx, 255 * i / XING_TOC_SIZE); // vbr quality // we write it, because some (broken) tools always expect it to be present avio_wb32(dyn_ctx, 0); // encoder short version string if (enc) { uint8_t encoder_str[9] = { 0 }; memcpy(encoder_str, enc->value, FFMIN(strlen(enc->value), sizeof(encoder_str))); avio_write(dyn_ctx, encoder_str, sizeof(encoder_str)); } else ffio_fill(dyn_ctx, 0, 9); avio_w8(dyn_ctx, 0); // tag revision 0 / unknown vbr method avio_w8(dyn_ctx, 0); // unknown lowpass filter value ffio_fill(dyn_ctx, 0, 8); // empty replaygain fields avio_w8(dyn_ctx, 0); // unknown encoding flags avio_w8(dyn_ctx, 0); // unknown abr/minimal bitrate // encoder delay if (codec->initial_padding >= 1 << 12) { av_log(s, AV_LOG_WARNING, "Too many samples of initial padding.\n"); avio_wb24(dyn_ctx, 0); } else { avio_wb24(dyn_ctx, codec->initial_padding << 12); } avio_w8(dyn_ctx, 0); // misc avio_w8(dyn_ctx, 0); // mp3gain avio_wb16(dyn_ctx, 0); // preset // audio length and CRCs (will be updated later) avio_wb32(dyn_ctx, 0); // music length avio_wb16(dyn_ctx, 0); // music crc avio_wb16(dyn_ctx, 0); // tag crc ffio_fill(dyn_ctx, 0, mpah.frame_size - bytes_needed); mp3->xing_frame_size = avio_close_dyn_buf(dyn_ctx, &mp3->xing_frame); mp3->xing_frame_offset = avio_tell(s->pb); avio_write(s->pb, mp3->xing_frame, mp3->xing_frame_size); mp3->audio_size = mp3->xing_frame_size; }
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number) { int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref; int best_clock_code=1; int best_divisor=60; int best_error= INT_MAX; if(s->h263_plus){ for(i=0; i<2; i++){ int div, error; div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den); div= av_clip(div, 1, 127); error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div); if(error < best_error){ best_error= error; best_divisor= div; best_clock_code= i; } } } s->custom_pcf= best_clock_code!=1 || best_divisor!=60; coded_frame_rate= 1800000; coded_frame_rate_base= (1000+best_clock_code)*best_divisor; avpriv_align_put_bits(&s->pb); /* Update the pointer to last GOB */ s->ptr_lastgob = put_bits_ptr(&s->pb); put_bits(&s->pb, 22, 0x20); /* PSC */ temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp (coded_frame_rate_base * (int64_t)s->avctx->time_base.den); put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */ put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 0); /* h263 id */ put_bits(&s->pb, 1, 0); /* split screen off */ put_bits(&s->pb, 1, 0); /* camera off */ put_bits(&s->pb, 1, 0); /* freeze picture release off */ format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height); if (!s->h263_plus) { /* H.263v1 */ put_bits(&s->pb, 3, format); put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P)); /* By now UMV IS DISABLED ON H.263v1, since the restrictions of H.263v1 UMV implies to check the predicted MV after calculation of the current MB to see if we're on the limits */ put_bits(&s->pb, 1, 0); /* Unrestricted Motion Vector: off */ put_bits(&s->pb, 1, 0); /* SAC: off */ put_bits(&s->pb, 1, s->obmc); /* Advanced Prediction */ put_bits(&s->pb, 1, 0); /* only I/P frames, no PB frame */ put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ } else { int ufep=1; /* H.263v2 */ /* H.263 Plus PTYPE */ put_bits(&s->pb, 3, 7); put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */ if (format == 8) put_bits(&s->pb,3,6); /* Custom Source Format */ else put_bits(&s->pb, 3, format); put_bits(&s->pb,1, s->custom_pcf); put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */ put_bits(&s->pb,1,0); /* SAC: off */ put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */ put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */ put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */ put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */ put_bits(&s->pb,1,0); /* Reference Picture Selection: off */ put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */ put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */ put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,3,0); /* Reserved */ put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P); put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */ put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */ put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */ put_bits(&s->pb,2,0); /* Reserved */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ /* This should be here if PLUSPTYPE */ put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ if (format == 8) { /* Custom Picture Format (CPFMT) */ s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio); put_bits(&s->pb,4,s->aspect_ratio_info); put_bits(&s->pb,9,(s->width >> 2) - 1); put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,9,(s->height >> 2)); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){ put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num); put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den); } } if(s->custom_pcf){ if(ufep){ put_bits(&s->pb, 1, best_clock_code); put_bits(&s->pb, 7, best_divisor); } put_sbits(&s->pb, 2, temp_ref>>8); } /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ if (s->umvplus) // put_bits(&s->pb,1,1); /* Limited according tables of Annex D */ //FIXME check actual requested range put_bits(&s->pb,2,1); /* unlimited */ if(s->h263_slice_structured) put_bits(&s->pb,2,0); /* no weird submodes */ put_bits(&s->pb, 5, s->qscale); }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AnsiContext *s = avctx->priv_data; uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; int ret, i, count; ret = ff_reget_buffer(avctx, s->frame); if (ret < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } if (!avctx->frame_number) { memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0])); memset(s->frame->data[1], 0, AVPALETTE_SIZE); } s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = 1; memcpy(s->frame->data[1], ff_cga_palette, 16 * 4); while(buf < buf_end) { switch(s->state) { case STATE_NORMAL: switch (buf[0]) { case 0x00: //NUL case 0x07: //BEL case 0x1A: //SUB /* ignore */ break; case 0x08: //BS s->x = FFMAX(s->x - 1, 0); break; case 0x09: //HT i = s->x / FONT_WIDTH; count = ((i + 8) & ~7) - i; for (i = 0; i < count; i++) draw_char(avctx, ' '); break; case 0x0A: //LF hscroll(avctx); case 0x0D: //CR s->x = 0; break; case 0x0C: //FF erase_screen(avctx); break; case 0x1B: //ESC s->state = STATE_ESCAPE; break; default: draw_char(avctx, buf[0]); } break; case STATE_ESCAPE: if (buf[0] == '[') { s->state = STATE_CODE; s->nb_args = 0; s->args[0] = 0; } else { s->state = STATE_NORMAL; draw_char(avctx, 0x1B); continue; } break; case STATE_CODE: switch(buf[0]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = s->args[s->nb_args] * 10 + buf[0] - '0'; break; case ';': s->nb_args++; if (s->nb_args < MAX_NB_ARGS) s->args[s->nb_args] = 0; break; case 'M': s->state = STATE_MUSIC_PREAMBLE; break; case '=': case '?': /* ignore */ break; default: if (s->nb_args > MAX_NB_ARGS) av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args); if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args]) s->nb_args++; if ((ret = execute_code(avctx, buf[0])) < 0) return ret; s->state = STATE_NORMAL; } break; case STATE_MUSIC_PREAMBLE: if (buf[0] == 0x0E || buf[0] == 0x1B) s->state = STATE_NORMAL; /* ignore music data */ break; } buf++; } *got_frame = 1; if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; return buf_size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; C93DecoderContext * const c93 = avctx->priv_data; AVFrame * const newpic = c93->pictures[c93->currentpic]; AVFrame * const oldpic = c93->pictures[c93->currentpic^1]; GetByteContext gb; uint8_t *out; int stride, ret, i, x, y, b, bt = 0; if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0) return ret; c93->currentpic ^= 1; if ((ret = ff_reget_buffer(avctx, newpic)) < 0) return ret; stride = newpic->linesize[0]; bytestream2_init(&gb, buf, buf_size); b = bytestream2_get_byte(&gb); if (b & C93_FIRST_FRAME) { newpic->pict_type = AV_PICTURE_TYPE_I; newpic->key_frame = 1; } else { newpic->pict_type = AV_PICTURE_TYPE_P; newpic->key_frame = 0; } for (y = 0; y < HEIGHT; y += 8) { out = newpic->data[0] + y * stride; for (x = 0; x < WIDTH; x += 8) { uint8_t *copy_from = oldpic->data[0]; unsigned int offset, j; uint8_t cols[4], grps[4]; C93BlockType block_type; if (!bt) bt = bytestream2_get_byte(&gb); block_type= bt & 0x0F; switch (block_type) { case C93_8X8_FROM_PREV: offset = bytestream2_get_le16(&gb); if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0) return ret; break; case C93_4X4_FROM_CURR: copy_from = newpic->data[0]; case C93_4X4_FROM_PREV: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { int offset = bytestream2_get_le16(&gb); int from_x = offset % WIDTH; int from_y = offset / WIDTH; if (block_type == C93_4X4_FROM_CURR && from_y == y+j && (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) { avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j); return AVERROR_INVALIDDATA; } if ((ret = copy_block(avctx, &out[j*stride+i], copy_from, offset, 4, stride)) < 0) return ret; } } break; case C93_8X8_2COLOR: bytestream2_get_buffer(&gb, cols, 2); for (i = 0; i < 8; i++) { draw_n_color(out + i*stride, stride, 8, 1, 1, cols, NULL, bytestream2_get_byte(&gb)); } break; case C93_4X4_2COLOR: case C93_4X4_4COLOR: case C93_4X4_4COLOR_GRP: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { if (block_type == C93_4X4_2COLOR) { bytestream2_get_buffer(&gb, cols, 2); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, NULL, bytestream2_get_le16(&gb)); } else if (block_type == C93_4X4_4COLOR) { bytestream2_get_buffer(&gb, cols, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 2, cols, NULL, bytestream2_get_le32(&gb)); } else { bytestream2_get_buffer(&gb, grps, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, grps, bytestream2_get_le16(&gb)); } } } break; case C93_NOOP: break; case C93_8X8_INTRA: for (j = 0; j < 8; j++) bytestream2_get_buffer(&gb, out + j*stride, 8); break; default: av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n", block_type, x, y); return AVERROR_INVALIDDATA; } bt >>= 4; out += 8; } } if (b & C93_HAS_PALETTE) { uint32_t *palette = (uint32_t *) newpic->data[1]; for (i = 0; i < 256; i++) { palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb); } newpic->palette_has_changed = 1; } else { if (oldpic->data[1]) memcpy(newpic->data[1], oldpic->data[1], 256 * 4); } if ((ret = av_frame_ref(data, newpic)) < 0) return ret; *got_frame = 1; return buf_size; }
static int filter_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { ATADenoiseContext *s = ctx->priv; ThreadData *td = arg; AVFrame *in = td->in; AVFrame *out = td->out; const int size = s->size; const int mid = s->mid; int p, x, y, i, j; for (p = 0; p < s->nb_planes; p++) { const int h = s->planeheight[p]; const int w = s->planewidth[p]; const int slice_start = (h * jobnr) / nb_jobs; const int slice_end = (h * (jobnr+1)) / nb_jobs; const uint16_t *src = (uint16_t *)(in->data[p] + slice_start * in->linesize[p]); uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]); const int thra = s->thra[p]; const int thrb = s->thrb[p]; const uint8_t **data = (const uint8_t **)s->data[p]; const int *linesize = (const int *)s->linesize[p]; const uint16_t *srcf[SIZE]; if (!((1 << p) & s->planes)) { av_image_copy_plane((uint8_t *)dst, out->linesize[p], (uint8_t *)src, in->linesize[p], w * 2, slice_end - slice_start); continue; } for (i = 0; i < s->size; i++) srcf[i] = (const uint16_t *)(data[i] + slice_start * linesize[i]); for (y = slice_start; y < slice_end; y++) { for (x = 0; x < w; x++) { const int srcx = src[x]; unsigned lsumdiff = 0, rsumdiff = 0; unsigned ldiff, rdiff; unsigned sum = srcx; int l = 0, r = 0; int srcjx, srcix; for (j = mid - 1, i = mid + 1; j >= 0 && i < size; j--, i++) { srcjx = srcf[j][x]; ldiff = FFABS(srcx - srcjx); lsumdiff += ldiff; if (ldiff > thra || lsumdiff > thrb) break; l++; sum += srcjx; srcix = srcf[i][x]; rdiff = FFABS(srcx - srcix); rsumdiff += rdiff; if (rdiff > thra || rsumdiff > thrb) break; r++; sum += srcix; } dst[x] = sum / (r + l + 1); } dst += out->linesize[p] / 2; src += in->linesize[p] / 2; for (i = 0; i < size; i++) srcf[i] += linesize[i] / 2; } } return 0; }
/* put sequence header if needed */ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; int i; uint64_t time_code; float best_aspect_error= 1E10; float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); int constraint_parameter_flag; if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA) if (s->current_picture.key_frame) { AVRational framerate= ff_frame_rate_tab[s->frame_rate_index]; /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); put_bits(&s->pb, 12, s->width); put_bits(&s->pb, 12, s->height); #if 0 //MEANX for(i=1; i<15; i++){ float error= aspect_ratio; if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1) error-= 1.0/ff_mpeg1_aspect[i]; else error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width; error= FFABS(error); if(error < best_aspect_error){ best_aspect_error= error; s->aspect_ratio_info= i; } } #endif // MEANX //MEANX put_bits(&s->pb, 4, s->aspect_ratio_info); //MEANX put_bits(&s->pb, 4, s->frame_rate_index); // MEANX 4:3 if(s->avctx->sample_aspect_ratio.num==16 && s->avctx->sample_aspect_ratio.den==9) { //printf("FFmpeg : Wide\n"); put_bits(&s->pb,4,3); //16:9 } else //4:3 { if(s->codec_id == CODEC_ID_MPEG2VIDEO) put_bits(&s->pb, 4, 2); else put_bits(&s->pb, 4, 12); // MPEG1 } // /MEANX // //MEANX PULLDOWN put_bits(&s->pb, 4, s->frame_rate_index); if((s->flags2 & CODEC_FLAG2_32_PULLDOWN) && (s->codec_id == CODEC_ID_MPEG2VIDEO)) { put_bits(&s->pb, 4,4); } else { put_bits(&s->pb, 4, s->frame_rate_index); } //MEANX pulldown if(s->avctx->rc_max_rate_header){ //MEANX we use header v = (s->avctx->rc_max_rate_header + 399) / 400; if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO) v = 0x3ffff; }else{ v= 0x3FFFF; } // MEANX we use rc_buffer_size_header here to force // a correct rc_buffer_size if(s->avctx->rc_buffer_size_header) vbv_buffer_size = s->avctx->rc_buffer_size_header; else /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; put_bits(&s->pb, 18, v & 0x3FFFF); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF); constraint_parameter_flag= s->width <= 768 && s->height <= 576 && s->mb_width * s->mb_height <= 396 && s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 && framerate.num <= framerate.den*30 && s->avctx->me_range && s->avctx->me_range < 128 && vbv_buffer_size <= 20 && v <= 1856000/400 && s->codec_id == CODEC_ID_MPEG1VIDEO; put_bits(&s->pb, 1, constraint_parameter_flag); ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); if(s->codec_id == CODEC_ID_MPEG2VIDEO){ put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 1); //seq ext put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */ put_bits(&s->pb, 3, s->avctx->profile); //profile put_bits(&s->pb, 4, s->avctx->level); //level // MEANX pulldown put_bits(&s->pb, 1, s->progressive_sequence); // MEANX Pulldown if(s->flags2 & CODEC_FLAG2_32_PULLDOWN) //MEANX put_bits(&s->pb, 1, 0); else put_bits(&s->pb, 1, s->progressive_sequence); // /MEANX put_bits(&s->pb, 2, s->chroma_format); put_bits(&s->pb, 2, 0); //horizontal size ext put_bits(&s->pb, 2, 0); //vertical size ext put_bits(&s->pb, 12, v>>18); //bitrate ext put_bits(&s->pb, 1, 1); //marker put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 2, 0); // frame_rate_ext_n put_bits(&s->pb, 5, 0); // frame_rate_ext_d }
static void put_subframe(DCAContext *c, int32_t subband_data[8 * SUBSUBFRAMES][MAX_CHANNELS][32], int subframe) { int i, sub, ss, ch, max_value; int32_t *lfe_data = c->lfe_data + 4 * SUBSUBFRAMES * subframe; /* Subsubframes count */ put_bits(&c->pb, 2, SUBSUBFRAMES -1); /* Partial subsubframe sample count: dummy */ put_bits(&c->pb, 3, 0); /* Prediction mode: no ADPCM, in each channel and subband */ for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) put_bits(&c->pb, 1, 0); /* Prediction VQ addres: not transmitted */ /* Bit allocation index */ for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) put_bits(&c->pb, 5, QUANTIZER_BITS+3); if (SUBSUBFRAMES > 1) { /* Transition mode: none for each channel and subband */ for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) put_bits(&c->pb, 1, 0); /* codebook A4 */ } /* Determine scale_factor */ for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) { max_value = 0; for (i = 0; i < 8 * SUBSUBFRAMES; i++) max_value = FFMAX(max_value, FFABS(subband_data[i][ch][sub])); c->scale_factor[ch][sub] = find_scale_factor7(max_value, QUANTIZER_BITS); } if (c->lfe_channel) { max_value = 0; for (i = 0; i < 4 * SUBSUBFRAMES; i++) max_value = FFMAX(max_value, FFABS(lfe_data[i])); c->lfe_scale_factor = find_scale_factor7(max_value, LFE_BITS); } /* Scale factors: the same for each channel and subband, encoded according to Table D.1.2 */ for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) put_bits(&c->pb, 7, c->scale_factor[ch][sub]); /* Joint subband scale factor codebook select: not transmitted */ /* Scale factors for joint subband coding: not transmitted */ /* Stereo down-mix coefficients: not transmitted */ /* Dynamic range coefficient: not transmitted */ /* Stde information CRC check word: not transmitted */ /* VQ encoded high frequency subbands: not transmitted */ /* LFE data */ if (c->lfe_channel) { for (i = 0; i < 4 * SUBSUBFRAMES; i++) put_sample7(c, lfe_data[i], LFE_BITS, c->lfe_scale_factor); put_bits(&c->pb, 8, c->lfe_scale_factor); } /* Audio data (subsubframes) */ for (ss = 0; ss < SUBSUBFRAMES ; ss++) for (ch = 0; ch < c->prim_channels; ch++) for (sub = 0; sub < DCA_SUBBANDS; sub++) for (i = 0; i < 8; i++) put_sample7(c, subband_data[ss * 8 + i][ch][sub], QUANTIZER_BITS, c->scale_factor[ch][sub]); /* DSYNC */ put_bits(&c->pb, 16, 0xffff); }
static void swap_channel_layouts_on_filter(AVFilterContext *filter) { AVFilterLink *link = NULL; int i, j, k; for (i = 0; i < filter->nb_inputs; i++) { link = filter->inputs[i]; if (link->type == AVMEDIA_TYPE_AUDIO && link->out_channel_layouts->nb_channel_layouts == 1) break; } if (i == filter->nb_inputs) return; for (i = 0; i < filter->nb_outputs; i++) { AVFilterLink *outlink = filter->outputs[i]; int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX; if (outlink->type != AVMEDIA_TYPE_AUDIO || outlink->in_channel_layouts->nb_channel_layouts < 2) continue; for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) { uint64_t in_chlayout = link->out_channel_layouts->channel_layouts[0]; uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j]; int in_channels = av_get_channel_layout_nb_channels(in_chlayout); int out_channels = av_get_channel_layout_nb_channels(out_chlayout); int count_diff = out_channels - in_channels; int matched_channels, extra_channels; int score = 100000; if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) { /* Compute score in case the input or output layout encodes a channel count; in this case the score is not altered by the computation afterwards, as in_chlayout and out_chlayout have both been set to 0 */ if (FF_LAYOUT2COUNT(in_chlayout)) in_channels = FF_LAYOUT2COUNT(in_chlayout); if (FF_LAYOUT2COUNT(out_chlayout)) out_channels = FF_LAYOUT2COUNT(out_chlayout); score -= 10000 + FFABS(out_channels - in_channels) + (in_channels > out_channels ? 10000 : 0); in_chlayout = out_chlayout = 0; /* Let the remaining computation run, even if the score value is not altered */ } /* channel substitution */ for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) { uint64_t cmp0 = ch_subst[k][0]; uint64_t cmp1 = ch_subst[k][1]; if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) && (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) { in_chlayout &= ~cmp0; out_chlayout &= ~cmp1; /* add score for channel match, minus a deduction for having to do the substitution */ score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2; } } /* no penalty for LFE channel mismatch */ if ( (in_chlayout & AV_CH_LOW_FREQUENCY) && (out_chlayout & AV_CH_LOW_FREQUENCY)) score += 10; in_chlayout &= ~AV_CH_LOW_FREQUENCY; out_chlayout &= ~AV_CH_LOW_FREQUENCY; matched_channels = av_get_channel_layout_nb_channels(in_chlayout & out_chlayout); extra_channels = av_get_channel_layout_nb_channels(out_chlayout & (~in_chlayout)); score += 10 * matched_channels - 5 * extra_channels; if (score > best_score || (count_diff < best_count_diff && score == best_score)) { best_score = score; best_idx = j; best_count_diff = count_diff; } } av_assert0(best_idx >= 0); FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0], outlink->in_channel_layouts->channel_layouts[best_idx]); } }
int main(void) { AVRational a,b,r; for (a.num = -2; a.num <= 2; a.num++) { for (a.den = -2; a.den <= 2; a.den++) { for (b.num = -2; b.num <= 2; b.num++) { for (b.den = -2; b.den <= 2; b.den++) { int c = av_cmp_q(a,b); double d = av_q2d(a) == av_q2d(b) ? 0 : (av_q2d(a) - av_q2d(b)); if (d > 0) d = 1; else if (d < 0) d = -1; else if (d != d) d = INT_MIN; if (c != d) av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num, a.den, b.num, b.den, c,d); r = av_sub_q(av_add_q(b,a), b); if(b.den && (r.num*a.den != a.num*r.den || !r.num != !a.num || !r.den != !a.den)) av_log(NULL, AV_LOG_ERROR, "%d/%d ", r.num, r.den); } } } } for (a.num = 1; a.num <= 10; a.num++) { for (a.den = 1; a.den <= 10; a.den++) { if (av_gcd(a.num, a.den) > 1) continue; for (b.num = 1; b.num <= 10; b.num++) { for (b.den = 1; b.den <= 10; b.den++) { int start; if (av_gcd(b.num, b.den) > 1) continue; if (av_cmp_q(b, a) < 0) continue; for (start = 0; start < 10 ; start++) { int acc= start; int i; for (i = 0; i<100; i++) { int exact = start + av_rescale_q(i+1, b, a); acc = av_add_stable(a, acc, b, 1); if (FFABS(acc - exact) > 2) { av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %d\n", a.num, a.den, b.num, b.den, acc, exact); return 1; } } } } } } } for (a.den = 1; a.den < 0x100000000U/3; a.den*=3) { for (a.num = -1; a.num < (1<<27); a.num += 1 + a.num/100) { float f = av_int2float(av_q2intfloat(a)); float f2 = av_q2d(a); if (fabs(f - f2) > fabs(f)/5000000) { av_log(NULL, AV_LOG_ERROR, "%d/%d %f %f\n", a.num, a.den, f, f2); return 1; } } } return 0; }
static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd) { const uint8_t * const *src = (const uint8_t * const *)in->data; const int slinesizex = in->linesize[s->x]; const int slinesizey = in->linesize[s->y]; const int slinesized = in->linesize[pd]; const int dlinesize = out->linesize[0]; const int intensity = s->intensity; const int px = s->x, py = s->y; const int h = s->planeheight[py]; const int w = s->planewidth[px]; const uint8_t *spx = src[px]; const uint8_t *spy = src[py]; const uint8_t *spd = src[pd]; const int hsub = s->hsub; const int vsub = s->vsub; uint8_t **dst = out->data; uint8_t *dpx = dst[px]; uint8_t *dpy = dst[py]; uint8_t *dpd = dst[pd]; const int tmin = s->tmin; const int tmax = s->tmax; int i, j, k; for (k = 0; k < 4 && dst[k]; k++) for (i = 0; i < out->height ; i++) memset(dst[k] + i * out->linesize[k], (s->mode == COLOR || s->mode == COLOR5) && k == s->pd ? 0 : s->bg_color[k], out->width); switch (s->mode) { case COLOR5: case COLOR: case GRAY: if (s->is_yuv) { for (i = 0; i < h; i++) { const int iwx = i * slinesizex; const int iwy = i * slinesizey; const int iwd = i * slinesized; for (j = 0; j < w; j++) { const int x = spx[iwx + j]; const int y = spy[iwy + j]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; dpd[pos] = FFMIN(dpd[pos] + intensity, 255); if (dst[3]) dst[3][pos] = 255; } } } else { for (i = 0; i < h; i++) { const int iwx = i * slinesizex; const int iwy = i * slinesizey; const int iwd = i * slinesized; for (j = 0; j < w; j++) { const int x = spx[iwx + j]; const int y = spy[iwy + j]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255); dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255); dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255); if (dst[3]) dst[3][pos] = 255; } } } break; case COLOR2: if (s->is_yuv) { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; const int iwd = i * slinesized; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; if (!dpd[pos]) dpd[pos] = FFABS(128 - x) + FFABS(128 - y); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } else { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; const int iwd = i * slinesized; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; if (!dpd[pos]) dpd[pos] = FFMIN(x + y, 255); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } break; case COLOR3: for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; const int iwd = i * slinesized; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; dpd[pos] = FFMIN(255, dpd[pos] + intensity); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } break; case COLOR4: for (i = 0; i < in->height; i++) { const int iwx = (i >> vsub) * slinesizex; const int iwy = (i >> vsub) * slinesizey; const int iwd = i * slinesized; for (j = 0; j < in->width; j++) { const int x = spx[iwx + (j >> hsub)]; const int y = spy[iwy + (j >> hsub)]; const int z = spd[iwd + j]; const int pos = y * dlinesize + x; if (z < tmin || z > tmax) continue; dpd[pos] = FFMAX(z, dpd[pos]); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } break; default: av_assert0(0); } envelope(s, out); if (s->mode == COLOR) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * out->linesize[pd] + j]) { dpx[i * out->linesize[px] + j] = j; dpy[i * out->linesize[py] + j] = i; dpd[i * out->linesize[pd] + j] = 128; } } } } else if (s->mode == COLOR5) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * out->linesize[pd] + j]) { dpx[i * out->linesize[px] + j] = j; dpy[i * out->linesize[py] + j] = i; dpd[i * out->linesize[pd] + j] = 128 * M_SQRT2 - hypot(i - 128, j - 128); } } } } }
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) { FLVContext *flv = s->priv_data; int ret, i, type, size, flags; int stream_type=-1; int64_t next, pos; int64_t dts, pts = AV_NOPTS_VALUE; int av_uninit(channels); int av_uninit(sample_rate); AVStream *st = NULL; for(;; avio_skip(s->pb, 4)) { /* pkt size is repeated at end. skip it */ pos = avio_tell(s->pb); type = avio_r8(s->pb); size = avio_rb24(s->pb); dts = avio_rb24(s->pb); dts |= avio_r8(s->pb) << 24; av_dlog(s, "type:%d, size:%d, dts:%"PRId64"\n", type, size, dts); if (url_feof(s->pb)) return AVERROR_EOF; avio_skip(s->pb, 3); /* stream id, always 0 */ flags = 0; if (flv->validate_next < flv->validate_count) { int64_t validate_pos = flv->validate_index[flv->validate_next].pos; if (pos == validate_pos) { if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <= VALIDATE_INDEX_TS_THRESH) { flv->validate_next++; } else { clear_index_entries(s, validate_pos); flv->validate_count = 0; } } else if (pos > validate_pos) { clear_index_entries(s, validate_pos); flv->validate_count = 0; } } if(size == 0) continue; next= size + avio_tell(s->pb); if (type == FLV_TAG_TYPE_AUDIO) { stream_type=FLV_STREAM_TYPE_AUDIO; flags = avio_r8(s->pb); size--; } else if (type == FLV_TAG_TYPE_VIDEO) { stream_type=FLV_STREAM_TYPE_VIDEO; flags = avio_r8(s->pb); size--; if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD) goto skip; } else if (type == FLV_TAG_TYPE_META) { if (size > 13+1+4 && dts == 0) { // Header-type metadata stuff flv_read_metabody(s, next); goto skip; } else if (dts != 0) { // Script-data "special" metadata frames - don't skip stream_type=FLV_STREAM_TYPE_DATA; } else { goto skip; } } else { av_log(s, AV_LOG_DEBUG, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags); skip: avio_seek(s->pb, next, SEEK_SET); continue; } /* skip empty data packets */ if (!size) continue; /* now find stream */ for(i=0; i<s->nb_streams; i++) { st = s->streams[i]; if (st->id == stream_type) break; } if(i == s->nb_streams) { av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed\n"); st = create_stream(s, stream_type, (int[]) { AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_DATA }[stream_type]); } av_dlog(s, "%d %X %d \n", stream_type, flags, st->discard); if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO))) ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO))) || st->discard >= AVDISCARD_ALL ) { avio_seek(s->pb, next, SEEK_SET); continue; } if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME); break; }
int AAC_RENAME(ff_ps_read_data)(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps, int bits_left) { int e; int bit_count_start = get_bits_count(gb_host); int header; int bits_consumed; GetBitContext gbc = *gb_host, *gb = &gbc; header = get_bits1(gb); if (header) { //enable_ps_header ps->enable_iid = get_bits1(gb); if (ps->enable_iid) { int iid_mode = get_bits(gb, 3); if (iid_mode > 5) { av_log(avctx, AV_LOG_ERROR, "iid_mode %d is reserved.\n", iid_mode); goto err; } ps->nr_iid_par = nr_iidicc_par_tab[iid_mode]; ps->iid_quant = iid_mode > 2; ps->nr_ipdopd_par = nr_iidopd_par_tab[iid_mode]; } ps->enable_icc = get_bits1(gb); if (ps->enable_icc) { ps->icc_mode = get_bits(gb, 3); if (ps->icc_mode > 5) { av_log(avctx, AV_LOG_ERROR, "icc_mode %d is reserved.\n", ps->icc_mode); goto err; } ps->nr_icc_par = nr_iidicc_par_tab[ps->icc_mode]; } ps->enable_ext = get_bits1(gb); } ps->frame_class = get_bits1(gb); ps->num_env_old = ps->num_env; ps->num_env = num_env_tab[ps->frame_class][get_bits(gb, 2)]; ps->border_position[0] = -1; if (ps->frame_class) { for (e = 1; e <= ps->num_env; e++) { ps->border_position[e] = get_bits(gb, 5); if (ps->border_position[e] < ps->border_position[e-1]) { av_log(avctx, AV_LOG_ERROR, "border_position non monotone.\n"); goto err; } } } else for (e = 1; e <= ps->num_env; e++) ps->border_position[e] = (e * numQMFSlots >> ff_log2_tab[ps->num_env]) - 1; if (ps->enable_iid) { for (e = 0; e < ps->num_env; e++) { int dt = get_bits1(gb); if (read_iid_data(avctx, gb, ps, ps->iid_par, huff_iid[2*dt+ps->iid_quant], e, dt)) goto err; } } else memset(ps->iid_par, 0, sizeof(ps->iid_par)); if (ps->enable_icc) for (e = 0; e < ps->num_env; e++) { int dt = get_bits1(gb); if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt)) goto err; } else memset(ps->icc_par, 0, sizeof(ps->icc_par)); if (ps->enable_ext) { int cnt = get_bits(gb, 4); if (cnt == 15) { cnt += get_bits(gb, 8); } cnt *= 8; while (cnt > 7) { int ps_extension_id = get_bits(gb, 2); cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id); } if (cnt < 0) { av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d\n", cnt); goto err; } skip_bits(gb, cnt); } ps->enable_ipdopd &= !PS_BASELINE; //Fix up envelopes if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) { //Create a fake envelope int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1; int b; if (source >= 0 && source != ps->num_env) { if (ps->enable_iid) { memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0])); } if (ps->enable_icc) { memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0])); } if (ps->enable_ipdopd) { memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0])); memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0])); } } if (ps->enable_iid){ for (b = 0; b < ps->nr_iid_par; b++) { if (FFABS(ps->iid_par[ps->num_env][b]) > 7 + 8 * ps->iid_quant) { av_log(avctx, AV_LOG_ERROR, "iid_par invalid\n"); goto err; } } } if (ps->enable_icc){ for (b = 0; b < ps->nr_iid_par; b++) { if (ps->icc_par[ps->num_env][b] > 7U) { av_log(avctx, AV_LOG_ERROR, "icc_par invalid\n"); goto err; } } } ps->num_env++; ps->border_position[ps->num_env] = numQMFSlots - 1; } ps->is34bands_old = ps->is34bands; if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc)) ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) || (ps->enable_icc && ps->nr_icc_par == 34); //Baseline if (!ps->enable_ipdopd) { memset(ps->ipd_par, 0, sizeof(ps->ipd_par)); memset(ps->opd_par, 0, sizeof(ps->opd_par)); } if (header) ps->start = 1; bits_consumed = get_bits_count(gb) - bit_count_start; if (bits_consumed <= bits_left) { skip_bits_long(gb_host, bits_consumed); return bits_consumed; } av_log(avctx, AV_LOG_ERROR, "Expected to read %d PS bits actually read %d.\n", bits_left, bits_consumed); err: ps->start = 0; skip_bits_long(gb_host, bits_left); memset(ps->iid_par, 0, sizeof(ps->iid_par)); memset(ps->icc_par, 0, sizeof(ps->icc_par)); memset(ps->ipd_par, 0, sizeof(ps->ipd_par)); memset(ps->opd_par, 0, sizeof(ps->opd_par)); return bits_left; }
static av_cold int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; if(avctx->profile == FF_PROFILE_UNKNOWN){ if(avctx->level != FF_LEVEL_UNKNOWN){ av_log(avctx, AV_LOG_ERROR, "Set profile and level\n"); return -1; } avctx->profile = avctx->pix_fmt == PIX_FMT_YUV420P ? 4 : 0; /* Main or 4:2:2 */ } if(avctx->level == FF_LEVEL_UNKNOWN){ if(avctx->profile == 0){ /* 4:2:2 */ if(avctx->width <= 720 && avctx->height <= 608) avctx->level = 5; /* Main */ else avctx->level = 2; /* High */ }else{ if(avctx->profile != 1 && avctx->pix_fmt != PIX_FMT_YUV420P){ av_log(avctx, AV_LOG_ERROR, "Only High(1) and 4:2:2(0) profiles support 4:2:2 color sampling\n"); return -1; } if(avctx->width <= 720 && avctx->height <= 576) avctx->level = 8; /* Main */ else if(avctx->width <= 1440) avctx->level = 6; /* High 1440 */ else avctx->level = 4; /* High */ } } if (avctx->rc_max_rate && !avctx->rc_buffer_size) { int max = 0; if (avctx->profile == 0) { max = avctx->level == 5 ? 9437184 : 47185920; } else if (avctx->profile == 4) { switch (avctx->level) { case 8: max = 1835008; break; case 6: max = 7340032; break; case 4: max = 9781248; break; } } else { switch (avctx->level) { case 8: max = 2441216; break; case 6: max = 9781248; break; case 4: max = 12222464; break; } } avctx->rc_buffer_size = FFMIN(max, avctx->rc_max_rate*65535LL/90000); } switch (avctx->color_primaries) { case AVCOL_PRI_BT709: avctx->color_transfer = AVCOL_TRC_BT709; avctx->color_matrix = AVCOL_MTX_BT709; break; case AVCOL_PRI_SMPTE170M: case AVCOL_PRI_BT470BG: avctx->color_transfer = AVCOL_TRC_BT709; avctx->color_matrix = AVCOL_MTX_SMPTE170M; break; } if(MPV_encode_init(avctx) < 0) return -1; if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) { float best_aspect_error = 1E10; float aspect_ratio = av_q2d(s->avctx->sample_aspect_ratio); int i; for (i = 1; i < 15; i++) { float error = aspect_ratio; if (s->codec_id == CODEC_ID_MPEG1VIDEO || i <= 1) error -= 1.0/ff_mpeg1_aspect[i]; else error -= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width; error = FFABS(error); // <= so square pixels can match 4:3 or 16:9 if (error <= best_aspect_error) { best_aspect_error = error; s->aspect_ratio_info = i; } } } if (!s->aspect_ratio_info) s->aspect_ratio_info = 1; if(find_frame_rate_index(s) < 0){ if(s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){ av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num); return -1; }else{ av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num); } } if (s->timecode) { int drop, framenum; AVRational fps = ff_frame_rate_tab[s->frame_rate_index]; framenum = ff_timecode_to_framenum(s->timecode, (AVRational){fps.den, fps.num}, &drop); if (framenum < 0) { if (framenum == -1) av_log(s, AV_LOG_ERROR, "error parsing timecode, syntax: 00:00:00[;:]00\n"); else if (framenum == -2) av_log(s, AV_LOG_ERROR, "error, unsupported fps for timecode\n"); else if (framenum == -3) av_log(s, AV_LOG_ERROR, "error, drop frame is only allowed with " "30000/1001 or 60000/1001 fps\n"); return -1; } s->timecode_start = framenum; s->timecode_drop_frame = drop; } if (s->pulldown) { if (strcmp(s->pulldown, "3:2")) { av_log(s, AV_LOG_ERROR, "error, unknown pulldown value\n"); return -1; } if (s->frame_rate_index != 1) { av_log(s, AV_LOG_ERROR, "error, pulldown only works with 24000/1001 fps\n"); return -1; } s->frame_rate_index = 4; // override frame rate to 30000/1001 } return 0; }
static void vectorscope(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd) { const uint8_t * const *src = (const uint8_t * const *)in->data; const int slinesizex = in->linesize[s->x]; const int slinesizey = in->linesize[s->y]; const int dlinesize = out->linesize[0]; int i, j, px = s->x, py = s->y; const int h = s->planeheight[py]; const int w = s->planewidth[px]; const uint8_t *spx = src[px]; const uint8_t *spy = src[py]; uint8_t **dst = out->data; uint8_t *dpx = dst[px]; uint8_t *dpy = dst[py]; uint8_t *dpd = dst[pd]; switch (s->mode) { case COLOR: case GRAY: if (s->is_yuv) { for (i = 0; i < h; i++) { const int iwx = i * slinesizex; const int iwy = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iwx + j]; const int y = spy[iwy + j]; const int pos = y * dlinesize + x; dpd[pos] = FFMIN(dpd[pos] + 1, 255); if (dst[3]) dst[3][pos] = 255; } } } else { for (i = 0; i < h; i++) { const int iwx = i * slinesizex; const int iwy = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iwx + j]; const int y = spy[iwy + j]; const int pos = y * dlinesize + x; dst[0][pos] = FFMIN(dst[0][pos] + 1, 255); dst[1][pos] = FFMIN(dst[1][pos] + 1, 255); dst[2][pos] = FFMIN(dst[2][pos] + 1, 255); if (dst[3]) dst[3][pos] = 255; } } } if (s->mode == COLOR) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * out->linesize[pd] + j]) { dpx[i * out->linesize[px] + j] = j; dpy[i * out->linesize[py] + j] = i; } } } } break; case COLOR2: if (s->is_yuv) { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; if (!dpd[pos]) dpd[pos] = FFABS(128 - x) + FFABS(128 - y); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } else { for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; if (!dpd[pos]) dpd[pos] = FFMIN(x + y, 255); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } } break; case COLOR3: for (i = 0; i < h; i++) { const int iw1 = i * slinesizex; const int iw2 = i * slinesizey; for (j = 0; j < w; j++) { const int x = spx[iw1 + j]; const int y = spy[iw2 + j]; const int pos = y * dlinesize + x; dpd[pos] = FFMIN(255, dpd[pos] + 1); dpx[pos] = x; dpy[pos] = y; if (dst[3]) dst[3][pos] = 255; } } break; default: av_assert0(0); } }
static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, GetByteContext *gb) { unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; unsigned int pixel_ptr = 0; int row_dec = pic->linesize[0]; int row_ptr = (avctx->height - 1) * row_dec; int frame_size = FFABS(row_dec) * avctx->height; int i; while (row_ptr >= 0) { if (bytestream2_get_bytes_left(gb) <= 0) { av_log(avctx, AV_LOG_ERROR, "MS RLE: bytestream overrun, %d rows left\n", row_ptr); return AVERROR_INVALIDDATA; } rle_code = stream_byte = bytestream2_get_byteu(gb); if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ stream_byte = bytestream2_get_byte(gb); if (stream_byte == 0) { /* line is done, goto the next one */ row_ptr -= row_dec; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ stream_byte = bytestream2_get_byte(gb); pixel_ptr += stream_byte; stream_byte = bytestream2_get_byte(gb); row_ptr -= stream_byte * row_dec; } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if (row_ptr + pixel_ptr + stream_byte > frame_size || bytestream2_get_bytes_left(gb) < rle_code) { av_log(avctx, AV_LOG_ERROR, "MS RLE: frame/stream ptr just went out of bounds (copy)\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; stream_byte = bytestream2_get_byteu(gb); pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) bytestream2_skip(gb, 1); } } else { // decode a run of data if (row_ptr + pixel_ptr + stream_byte > frame_size) {
GtkWidget * CreatePopUpMenu( void ) { GtkWidget * window1; GtkWidget * Menu = NULL; GtkWidget * SubMenu = NULL; GtkWidget * MenuItem = NULL; GtkWidget * H, * N, * D, * F; demuxer_t *demuxer = mpctx_get_demuxer(guiInfo.mpcontext); mixer_t *mixer = mpctx_get_mixer(guiInfo.mpcontext); int subs = 0, sub_pos; Menu=gtk_menu_new(); gtk_widget_realize (Menu); window1 = gtk_widget_get_toplevel(Menu); AddMenuItem( window1, (const char*)about_xpm, Menu,MSGTR_GUI_AboutMPlayer" ", evAbout ); AddSeparator( Menu ); SubMenu=AddSubMenu( window1, (const char*)open_xpm, Menu,MSGTR_GUI_Open ); AddMenuItem( window1, (const char*)file2_xpm, SubMenu,MSGTR_GUI_File"... ", evLoadPlay ); #ifdef CONFIG_CDDA AddMenuItem( window1, (const char*)playcd_xpm, SubMenu,MSGTR_GUI_CD, evPlayCD ); CDSubMenu=AddSubMenu( window1, (const char*)cd_xpm, Menu,MSGTR_GUI_CD ); AddMenuItem( window1, (const char*)playcd_xpm, CDSubMenu,MSGTR_GUI_Play,evPlayCD ); AddSeparator( CDSubMenu ); CDTitleMenu=AddSubMenu( window1, (const char*)title_xpm, CDSubMenu,MSGTR_GUI_Titles ); if ( guiInfo.Tracks && ( guiInfo.StreamType == STREAMTYPE_CDDA ) ) { char tmp[32]; int i; for ( i=1;i <= guiInfo.Tracks;i++ ) { snprintf( tmp,32,MSGTR_GUI_TitleNN,i ); //AddMenuItem( CDTitleMenu,tmp,( i << 16 ) + ivSetCDTrack ); AddMenuCheckItem(window1, (const char*)empty1px_xpm, CDTitleMenu,tmp, guiInfo.Track == i, ( i << 16 ) + ivSetCDTrack ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, CDTitleMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } #endif #ifdef CONFIG_VCD AddMenuItem( window1, (const char*)playvcd_xpm, SubMenu,MSGTR_GUI_VCD, evPlayVCD ); VCDSubMenu=AddSubMenu( window1, (const char*)vcd_xpm, Menu,MSGTR_GUI_VCD ); AddMenuItem( window1, (const char*)playvcd_xpm, VCDSubMenu,MSGTR_GUI_Play,evPlayVCD ); AddSeparator( VCDSubMenu ); VCDTitleMenu=AddSubMenu( window1, (const char*)title_xpm, VCDSubMenu,MSGTR_GUI_Titles ); if ( guiInfo.Tracks && ( guiInfo.StreamType == STREAMTYPE_VCD ) ) { char tmp[32]; int i; for ( i=1;i < guiInfo.Tracks;i++ ) { snprintf( tmp,32,MSGTR_GUI_TitleNN,i ); //AddMenuItem( VCDTitleMenu,tmp,( i << 16 ) + ivSetVCDTrack ); AddMenuCheckItem(window1, (const char*)empty1px_xpm, VCDTitleMenu,tmp, guiInfo.Track == i + 1, ( ( i + 1 ) << 16 ) + ivSetVCDTrack ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, VCDTitleMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } #endif #ifdef CONFIG_DVDREAD AddMenuItem( window1, (const char*)playdvd_xpm, SubMenu,MSGTR_GUI_DVD, evPlayDVD ); DVDSubMenu=AddSubMenu( window1, (const char*)dvd_xpm, Menu,MSGTR_GUI_DVD ); AddMenuItem( window1, (const char*)playdvd_xpm, DVDSubMenu,MSGTR_GUI_Play" ", evPlayDVD ); // AddMenuItem( DVDSubMenu,MSGTR_MENU_ShowDVDMenu, evNone ); AddSeparator( DVDSubMenu ); DVDTitleMenu=AddSubMenu( window1, (const char*)title_xpm, DVDSubMenu,MSGTR_GUI_Titles ); if ( guiInfo.Tracks && ( guiInfo.StreamType == STREAMTYPE_DVD ) ) { char tmp[32]; int i; for ( i=1 ; i<= guiInfo.Tracks;i++ ) { snprintf( tmp,32,MSGTR_GUI_TitleNN,i); AddMenuCheckItem( window1, (const char*)empty1px_xpm, DVDTitleMenu,tmp, guiInfo.Track == i, (i << 16) + ivSetDVDTitle ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, DVDTitleMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } DVDChapterMenu=AddSubMenu( window1, (const char*)chapter_xpm, DVDSubMenu,MSGTR_GUI_Chapters ); if ( guiInfo.Chapters && ( guiInfo.StreamType == STREAMTYPE_DVD ) ) { char tmp[32]; int i; for ( i=1;i <= guiInfo.Chapters;i++ ) { snprintf( tmp,32,MSGTR_GUI_ChapterNN,i ); AddMenuCheckItem( window1, (const char*)empty1px_xpm, DVDChapterMenu,tmp,guiInfo.Chapter == i, ( i << 16 ) + ivSetDVDChapter ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, DVDChapterMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } DVDAudioLanguageMenu=AddSubMenu( window1, (const char*)audio_xpm, DVDSubMenu,MSGTR_GUI_AudioTracks ); if ( guiInfo.AudioStreams && demuxer && ( guiInfo.StreamType == STREAMTYPE_DVD ) ) { char tmp[64]; int i; for ( i=0;i < guiInfo.AudioStreams;i++ ) { snprintf( tmp,64,"%s - %s %s",GetLanguage( &guiInfo.AudioStream[i].language, GET_LANG_INT ), ChannelTypes[ guiInfo.AudioStream[i].type ], ChannelNumbers[ guiInfo.AudioStream[i].channels ] ); AddMenuCheckItem( window1, (const char*)dolby_xpm, DVDAudioLanguageMenu,tmp, demuxer->audio->id == guiInfo.AudioStream[i].id, ( guiInfo.AudioStream[i].id << 16 ) + ivSetDVDAudio ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, DVDAudioLanguageMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } DVDSubtitleLanguageMenu=AddSubMenu( window1, (const char*)subtitle_xpm, DVDSubMenu,MSGTR_GUI_Subtitles ); if ( guiInfo.Subtitles && ( guiInfo.StreamType == STREAMTYPE_DVD ) ) { char tmp[64]; int i; AddMenuItem( window1, (const char*)empty1px_xpm, DVDSubtitleLanguageMenu,MSGTR_GUI__none_,( (unsigned short)-1 << 16 ) + ivSetDVDSubtitle ); for ( i=0;i < guiInfo.Subtitles;i++ ) { av_strlcpy( tmp,GetLanguage( &guiInfo.Subtitle[i].language, GET_LANG_INT ),sizeof(tmp) ); AddMenuCheckItem( window1, (const char*)empty1px_xpm, DVDSubtitleLanguageMenu,tmp, dvdsub_id == guiInfo.Subtitle[i].id, ( guiInfo.Subtitle[i].id << 16 ) + ivSetDVDSubtitle ); } } else { MenuItem=AddMenuItem( window1, (const char*)empty1px_xpm, DVDSubtitleLanguageMenu,MSGTR_GUI__none_,evNone ); gtk_widget_set_sensitive( MenuItem,FALSE ); } #endif #if defined(CONFIG_LIBCDIO) || defined(CONFIG_DVDREAD) AddMenuItem( window1, (const char*)playimage_xpm, SubMenu,MSGTR_GUI_Image"... ", evPlayImage ); #endif AddMenuItem( window1, (const char*)url_xpm, SubMenu,MSGTR_GUI_URL"...", evLoadURL ); #ifdef CONFIG_TV AddMenuItem( window1, (const char*)tv_xpm, SubMenu,MSGTR_GUI_TV, evPlayTV ); #endif AddSeparator( SubMenu ); AddMenuItem( window1, (const char*)loadeaf_xpm, SubMenu,MSGTR_GUI_AudioTrack"...", evLoadAudioFile ); AddMenuItem( window1, (const char*)sub_xpm, SubMenu,MSGTR_GUI_Subtitle"... ", evLoadSubtitle ); AddMenuItem( window1, (const char*)nosub_xpm, SubMenu,MSGTR_GUI_DropSubtitle,evDropSubtitle ); SubMenu=AddSubMenu(window1, (const char*)play_xpm, Menu,MSGTR_GUI_Playback ); AddMenuItem( window1, (const char*)play_xpm, SubMenu,MSGTR_GUI_Play" ", evPlay ); AddMenuItem( window1, (const char*)pause_xpm, SubMenu,MSGTR_GUI_Pause, evPause ); AddMenuItem( window1, (const char*)stop_xpm, SubMenu,MSGTR_GUI_Stop, evStop ); AddMenuItem( window1, (const char*)next_xpm, SubMenu,MSGTR_GUI_Next, evNext ); AddMenuItem( window1, (const char*)prev_xpm, SubMenu,MSGTR_GUI_Previous, evPrev ); // AddSeparator( SubMenu ); // AddMenuItem( SubMenu,"Back 10 sec", evBackward10sec ); // AddMenuItem( SubMenu,"Fwd 10 sec", evForward10sec ); // AddMenuItem( SubMenu,"Back 1 min", evBackward1min ); // AddMenuItem( SubMenu,"Fwd 1 min", evForward1min ); // SubMenu=AddSubMenu( Menu,MSGTR_MENU_Size ); // AddMenuItem( SubMenu,MSGTR_GUI_SizeNormal" ", evNormalSize ); // AddMenuItem( SubMenu,MSGTR_GUI_SizeDouble, evDoubleSize ); // AddMenuItem( SubMenu,MSGTR_GUI_SizeFullscreen, evFullScreen + ( True << 16 ) ); if ( guiInfo.VideoWindow ) { AddSeparator( Menu ); RotationMenu=AddSubMenu( window1, (const char*)rotate_xpm, Menu,MSGTR_GUI_Rotation ); N=AddMenuCheckItem( window1, (const char*)rotate0_xpm, RotationMenu,MSGTR_GUI__none_, guiInfo.Rotation == -1, evSetRotation ); D=AddMenuCheckItem( window1, (const char*)rotate90cw_xpm, RotationMenu,MSGTR_GUI_Rotation90CW, guiInfo.Rotation == 1, evSetRotation + ( 90 << 16 ) ); F=AddMenuCheckItem( window1, (const char*)rotate90ccw_xpm, RotationMenu,MSGTR_GUI_Rotation90CCW, guiInfo.Rotation == 2, evSetRotation + ( -90 << 16 ) ); H=AddMenuCheckItem( window1, (const char*)rotate180_xpm, RotationMenu,MSGTR_GUI_Rotation180, guiInfo.Rotation == 8, evSetRotation + ( 180 << 16 ) ); if ( !guiInfo.Playing ) { gtk_widget_set_sensitive( N,FALSE ); gtk_widget_set_sensitive( D,FALSE ); gtk_widget_set_sensitive( F,FALSE ); gtk_widget_set_sensitive( H,FALSE ); } } if ( guiInfo.VideoWindow ) { int a11 = False, a169 = False, a43 = False, a235 = False; if (movie_aspect == -1.0f) a11 = True; else { a169 = (FFABS(movie_aspect - 16.0f / 9.0f) <= 0.01f); a43 = (FFABS(movie_aspect - 4.0f / 3.0f) <= 0.01f); a235 = (FFABS(movie_aspect - 2.35f) <= 0.01f); } AspectMenu=AddSubMenu( window1, (const char*)aspect_xpm, Menu,MSGTR_GUI_AspectRatio ); H=AddMenuCheckItem( window1, (const char*)aspect11_xpm, AspectMenu,MSGTR_GUI_Original, a11, evSetAspect + ( 1 << 16 ) ); N=AddMenuCheckItem( window1, (const char*)aspect169_xpm, AspectMenu,"16:9", a169, evSetAspect + ( 2 << 16 ) ); D=AddMenuCheckItem( window1, (const char*)aspect43_xpm, AspectMenu,"4:3", a43, evSetAspect + ( 3 << 16 ) ); F=AddMenuCheckItem( window1, (const char*)aspect235_xpm, AspectMenu,MSGTR_GUI_235To1, a235, evSetAspect + ( 4 << 16 ) ); if ( !guiInfo.Playing ) { gtk_widget_set_sensitive( H,FALSE ); gtk_widget_set_sensitive( N,FALSE ); gtk_widget_set_sensitive( D,FALSE ); gtk_widget_set_sensitive( F,FALSE ); } } if ( guiInfo.VideoWindow ) { int b1 = False, b2 = False, b_half = False; if ( !guiApp.videoWindow.isFullScreen && guiInfo.Playing ) { if ( ( guiApp.videoWindow.Width == guiInfo.VideoWidth * 2 )&& ( guiApp.videoWindow.Height == guiInfo.VideoHeight * 2 ) ) b2=True; else if ( ( guiApp.videoWindow.Width == guiInfo.VideoWidth / 2 ) && ( guiApp.videoWindow.Height == guiInfo.VideoHeight / 2 ) ) b_half=True; else b1=( guiApp.videoWindow.Width == guiInfo.VideoWidth && guiApp.videoWindow.Height == guiInfo.VideoHeight ); } else b1=!guiApp.videoWindow.isFullScreen; F=AddMenuCheckItem( window1, (const char*)full_xpm, Menu,MSGTR_GUI_SizeFullscreen,guiApp.videoWindow.isFullScreen,evFullScreen + ( True << 16 ) ); D=AddMenuCheckItem( window1, (const char*)double_xpm, Menu,MSGTR_GUI_SizeDouble,b2,evDoubleSize ); N=AddMenuCheckItem( window1, (const char*)normal_xpm, Menu,MSGTR_GUI_SizeNormal" ",b1,evNormalSize ); H=AddMenuCheckItem( window1, (const char*)half_xpm, Menu,MSGTR_GUI_SizeHalf,b_half,evHalfSize ); if ( !guiInfo.Playing ) { gtk_widget_set_sensitive( H,FALSE ); gtk_widget_set_sensitive( N,FALSE ); gtk_widget_set_sensitive( D,FALSE ); gtk_widget_set_sensitive( F,FALSE ); } } AddSeparator( Menu ); MenuItem=AddMenuCheckItem( window1, (const char*)sound_xpm, Menu,MSGTR_GUI_Mute,mixer->muted,evMute ); if ( !guiInfo.AudioChannels ) gtk_widget_set_sensitive( MenuItem,FALSE ); if ( guiInfo.Playing && demuxer && guiInfo.StreamType != STREAMTYPE_DVD ) { int i,c = 0; for ( i=0;i < MAX_A_STREAMS;i++ ) if ( demuxer->a_streams[i] ) c++; if ( c > 1 ) { SubMenu=AddSubMenu( window1, (const char*)audio_xpm, Menu,MSGTR_GUI_AudioTracks ); for ( i=0;i < MAX_A_STREAMS;i++ ) if ( demuxer->a_streams[i] ) { int aid = ((sh_audio_t *)demuxer->a_streams[i])->aid; int selected_id = (audio_id == aid || (audio_id == -1 && aid == demuxer_default_audio_track(demuxer))); char lng[32], tmp[64]; if ( demuxer_audio_lang( demuxer, i, lng, sizeof(lng) ) == 0 ) snprintf( tmp,sizeof(tmp),MSGTR_GUI_TrackN" - %s",aid,GetLanguage( lng, GET_LANG_CHR ) ); else snprintf( tmp,sizeof(tmp),MSGTR_GUI_TrackN,aid ); AddMenuCheckItem( window1, (const char*)empty1px_xpm, SubMenu,tmp,selected_id,( aid << 16 ) + ivSetAudio ); } } for ( c=0,i=0;i < MAX_V_STREAMS;i++ ) if ( demuxer->v_streams[i] ) c++; if ( c > 1 ) { SubMenu=AddSubMenu( window1, (const char*)video_xpm, Menu,MSGTR_GUI_VideoTracks ); for ( i=0;i < MAX_V_STREAMS;i++ ) if ( demuxer->v_streams[i] ) { int vid = ((sh_video_t *)demuxer->v_streams[i])->vid; char tmp[32]; int selected_id = (video_id == vid || (video_id == -1 && vid == demuxer_default_video_track(demuxer))); snprintf( tmp,32,MSGTR_GUI_TrackN,vid ); AddMenuCheckItem( window1, (const char*)empty1px_xpm, SubMenu,tmp,selected_id,( vid << 16 ) + ivSetVideo ); } } } /* cheap subtitle switching for non-DVD streams */ mpctx_get_global_sub_info(guiInfo.mpcontext, &subs, &sub_pos); if ( subs && guiInfo.StreamType != STREAMTYPE_DVD ) { int i, j, subs0 = guiInfo.mpcontext->sub_counts[SUB_SOURCE_SUBS], subs1 = guiInfo.mpcontext->sub_counts[SUB_SOURCE_VOBSUB]; SubMenu=AddSubMenu( window1, (const char*)subtitle_xpm, Menu, MSGTR_GUI_Subtitles ); AddMenuCheckItem( window1, (const char*)empty1px_xpm, SubMenu, MSGTR_GUI__none_, sub_pos == -1, (-1 << 16) + ivSetSubtitle ); for ( i=0;i < subs;i++ ) { int ret = -1; char lng[32], tmp[64], *lang = NULL; /* file */ if ( i < subs0 ) { if ( guiInfo.mpcontext->set_of_subtitles[i] ) lang = guiInfo.mpcontext->set_of_subtitles[i]->filename; #ifdef CONFIG_ASS if ( ass_track && ass_track->name ) lang = ass_track->name; #endif if ( lang ) { av_strlcpy( lng, mp_basename(lang), sizeof(lng) ); ret = 0; } } /* VOBsub */ else if ( ( i >= subs0 && i < subs0 + subs1 ) && vo_vobsub ) { lang = vobsub_get_id( vo_vobsub, vobsub_get_id_by_index( vo_vobsub, i - subs0 ) ); if ( lang ) { av_strlcpy( lng, lang, sizeof(lng) ); ret = 0; } } /* embedded (demuxer) */ else if ( ( i >= subs0 + subs1 ) && demuxer ) { for ( j=0;j < MAX_S_STREAMS;j++ ) { if ( demuxer->s_streams[j] ) ret++; if ( ret == i - subs0 - subs1 ) { ret = demuxer_sub_lang( demuxer, j, lng, sizeof(lng) ); break; } } } if ( ret == 0 ) snprintf( tmp, sizeof(tmp), MSGTR_GUI_TrackN" - %s", i, GetLanguage( lng, GET_LANG_CHR ) ); else snprintf( tmp, sizeof(tmp), MSGTR_GUI_TrackN, i ); AddMenuCheckItem( window1,(const char*)empty1px_xpm,SubMenu,tmp,sub_pos == i,( i << 16 ) + ivSetSubtitle ); } } AddSeparator( Menu ); AddMenuItem( window1, (const char*)equalizer_xpm, Menu,MSGTR_GUI_Equalizer, evEqualizer ); AddMenuItem( window1, (const char*)playlist_xpm, Menu,MSGTR_GUI_Playlist, evPlaylist ); AddMenuItem( window1, (const char*)skin_xpm, Menu,MSGTR_GUI_SkinBrowser, evSkinBrowser ); AddMenuItem( window1, (const char*)prefs_xpm, Menu,MSGTR_GUI_Preferences, evPreferences ); AddSeparator( Menu ); AddMenuItem( window1, (const char*)exit_xpm, Menu,MSGTR_GUI_Quit, evExit ); return Menu; }
static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { FlashSVContext * const s = avctx->priv_data; const AVFrame * const p = pict; uint8_t *pfptr; int res; int I_frame = 0; int opt_w = 4, opt_h = 4; /* First frame needs to be a keyframe */ if (avctx->frame_number == 0) { s->previous_frame = av_mallocz(FFABS(p->linesize[0]) * s->image_height); if (!s->previous_frame) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return AVERROR(ENOMEM); } I_frame = 1; } if (p->linesize[0] < 0) pfptr = s->previous_frame - (s->image_height - 1) * p->linesize[0]; else pfptr = s->previous_frame; /* Check the placement of keyframes */ if (avctx->gop_size > 0 && avctx->frame_number >= s->last_key_frame + avctx->gop_size) { I_frame = 1; } if ((res = ff_alloc_packet(pkt, s->image_width * s->image_height * 3)) < 0) { //Conservative upper bound check for compressed data av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", s->image_width * s->image_height * 3); return res; } pkt->size = encode_bitstream(s, p, pkt->data, pkt->size, opt_w * 16, opt_h * 16, pfptr, &I_frame); //save the current frame if (p->linesize[0] > 0) memcpy(s->previous_frame, p->data[0], s->image_height * p->linesize[0]); else memcpy(s->previous_frame, p->data[0] + p->linesize[0] * (s->image_height - 1), s->image_height * FFABS(p->linesize[0])); //mark the frame type so the muxer can mux it correctly if (I_frame) { avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; s->last_key_frame = avctx->frame_number; av_dlog(avctx, "Inserting keyframe at frame %d\n", avctx->frame_number); } else { avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; avctx->coded_frame->key_frame = 0; } if (avctx->coded_frame->key_frame) pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
static int dts_probe(AVProbeData *p) { const uint8_t *buf, *bufp; uint32_t state = -1; int markers[4*16] = {0}; int sum, max, i; int64_t diff = 0; uint8_t hdr[12 + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 }; buf = p->buf + FFMIN(4096, p->buf_size); for(; buf < (p->buf+p->buf_size)-2; buf+=2) { int marker, sample_blocks, sample_rate, sr_code, framesize; int lfe; GetBitContext gb; bufp = buf; state = (state << 16) | bytestream_get_be16(&bufp); if (buf - p->buf >= 4) diff += FFABS(((int16_t)AV_RL16(buf)) - (int16_t)AV_RL16(buf-4)); /* regular bitstream */ if (state == DCA_SYNCWORD_CORE_BE) marker = 0; else if (state == DCA_SYNCWORD_CORE_LE) marker = 1; /* 14 bits big-endian bitstream */ else if (state == DCA_SYNCWORD_CORE_14B_BE && (bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0) marker = 2; /* 14 bits little-endian bitstream */ else if (state == DCA_SYNCWORD_CORE_14B_LE && (bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007) marker = 3; else continue; if (avpriv_dca_convert_bitstream(buf-2, 12, hdr, 12) < 0) continue; init_get_bits(&gb, hdr, 96); skip_bits_long(&gb, 39); sample_blocks = get_bits(&gb, 7) + 1; if (sample_blocks < 8) continue; framesize = get_bits(&gb, 14) + 1; if (framesize < 95) continue; skip_bits(&gb, 6); sr_code = get_bits(&gb, 4); sample_rate = avpriv_dca_sample_rates[sr_code]; if (sample_rate == 0) continue; get_bits(&gb, 5); if (get_bits(&gb, 1)) continue; skip_bits_long(&gb, 9); lfe = get_bits(&gb, 2); if (lfe > 2) continue; marker += 4* sr_code; markers[marker] ++; } sum = max = 0; for (i=0; i<FF_ARRAY_ELEMS(markers); i++) { sum += markers[i]; if (markers[max] < markers[i]) max = i; } if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 && markers[max] * 4 > sum * 3 && diff / p->buf_size > 200) return AVPROBE_SCORE_EXTENSION + 1; return 0; }
static int flashsv_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data) { FlashSVContext * const s = avctx->priv_data; AVFrame *pict = data; AVFrame * const p = &s->frame; uint8_t *pfptr; int res; int I_frame = 0; int opt_w, opt_h; *p = *pict; /* First frame needs to be a keyframe */ if (avctx->frame_number == 0) { s->previous_frame = av_mallocz(FFABS(p->linesize[0])*s->image_height); if (!s->previous_frame) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); return -1; } I_frame = 1; } if (p->linesize[0] < 0) pfptr = s->previous_frame - ((s->image_height-1) * p->linesize[0]); else pfptr = s->previous_frame; /* Check the placement of keyframes */ if (avctx->gop_size > 0) { if (avctx->frame_number >= s->last_key_frame + avctx->gop_size) { I_frame = 1; } } opt_w=4; opt_h=4; if (buf_size < s->image_width*s->image_height*3) { //Conservative upper bound check for compressed data av_log(avctx, AV_LOG_ERROR, "buf_size %d < %d\n", buf_size, s->image_width*s->image_height*3); return -1; } res = encode_bitstream(s, p, buf, buf_size, opt_w*16, opt_h*16, pfptr, &I_frame); //save the current frame if(p->linesize[0] > 0) memcpy(s->previous_frame, p->data[0], s->image_height*p->linesize[0]); else memcpy(s->previous_frame, p->data[0] + p->linesize[0] * (s->image_height-1), s->image_height*FFABS(p->linesize[0])); //mark the frame type so the muxer can mux it correctly if (I_frame) { p->pict_type = FF_I_TYPE; p->key_frame = 1; s->last_key_frame = avctx->frame_number; av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n",avctx->frame_number); } else { p->pict_type = FF_P_TYPE; p->key_frame = 0; } avctx->coded_frame = p; return res; }
int val = 0; \ for (b = 0; b < num; b++) { \ val += get_vlc2(gb, vlc_table, 9, 3) - OFFSET; \ if (MASK) val &= MASK; \ PAR[e][b] = val; \ if (ERR_CONDITION) \ goto err; \ } \ } \ return 0; \ err: \ av_log(avctx, AV_LOG_ERROR, "illegal "#PAR"\n"); \ return -1; \ } READ_PAR_DATA(iid, huff_offset[table_idx], 0, FFABS(ps->iid_par[e][b]) > 7 + 8 * ps->iid_quant) READ_PAR_DATA(icc, huff_offset[table_idx], 0, ps->icc_par[e][b] > 7U) READ_PAR_DATA(ipdopd, 0, 0x07, 0) static int ps_read_extension_data(GetBitContext *gb, PSContext *ps, int ps_extension_id) { int e; int count = get_bits_count(gb); if (ps_extension_id) return 0; ps->enable_ipdopd = get_bits1(gb); if (ps->enable_ipdopd) { for (e = 0; e < ps->num_env; e++) { int dt = get_bits1(gb);
int main(int argc, char **argv){ int in_sample_rate, out_sample_rate, ch ,i, in_ch_layout_index, out_ch_layout_index, osr, flush_count; uint64_t in_ch_layout, out_ch_layout; enum AVSampleFormat in_sample_fmt, out_sample_fmt; int sample_rates[]={8000,11025,16000,22050,32000}; uint8_t array_in[SAMPLES*8*8]; uint8_t array_mid[SAMPLES*8*8*3]; uint8_t array_out[SAMPLES*8*8+100]; uint8_t *ain[SWR_CH_MAX]; uint8_t *aout[SWR_CH_MAX]; uint8_t *amid[SWR_CH_MAX]; struct SwrContext * forw_ctx= NULL; struct SwrContext *backw_ctx= NULL; in_sample_rate=16000; for(osr=0; osr<5; osr++){ out_sample_rate= sample_rates[osr]; for(in_sample_fmt= AV_SAMPLE_FMT_U8; in_sample_fmt<=AV_SAMPLE_FMT_DBL; in_sample_fmt++){ for(out_sample_fmt= AV_SAMPLE_FMT_U8; out_sample_fmt<=AV_SAMPLE_FMT_DBL; out_sample_fmt++){ for(in_ch_layout_index=0; layouts[in_ch_layout_index]; in_ch_layout_index++){ in_ch_layout= layouts[in_ch_layout_index]; int in_ch_count= av_get_channel_layout_nb_channels(in_ch_layout); for(out_ch_layout_index=0; layouts[out_ch_layout_index]; out_ch_layout_index++){ int out_count, mid_count; out_ch_layout= layouts[out_ch_layout_index]; int out_ch_count= av_get_channel_layout_nb_channels(out_ch_layout); fprintf(stderr, "ch %d->%d, rate:%5d->%5d, fmt:%s->%s", in_ch_count, out_ch_count, in_sample_rate, out_sample_rate, av_get_sample_fmt_name(in_sample_fmt), av_get_sample_fmt_name(out_sample_fmt)); forw_ctx = swr_alloc_set_opts(forw_ctx, out_ch_layout, av_get_alt_sample_fmt(out_sample_fmt, 1), out_sample_rate, in_ch_layout, av_get_alt_sample_fmt( in_sample_fmt, 1), in_sample_rate, 0, 0); backw_ctx = swr_alloc_set_opts(backw_ctx, in_ch_layout, in_sample_fmt, in_sample_rate, out_ch_layout, av_get_alt_sample_fmt(out_sample_fmt, 1), out_sample_rate, 0, 0); if(swr_init( forw_ctx) < 0) fprintf(stderr, "swr_init(->) failed\n"); if(swr_init(backw_ctx) < 0) fprintf(stderr, "swr_init(<-) failed\n"); if(!forw_ctx) fprintf(stderr, "Failed to init forw_cts\n"); if(!backw_ctx) fprintf(stderr, "Failed to init backw_ctx\n"); //FIXME test planar setup_array(ain , array_in , av_get_alt_sample_fmt( in_sample_fmt, 1), SAMPLES); setup_array(amid, array_mid, av_get_alt_sample_fmt(out_sample_fmt, 1), 3*SAMPLES); setup_array(aout, array_out, in_sample_fmt , SAMPLES); for(ch=0; ch<in_ch_count; ch++){ for(i=0; i<SAMPLES; i++) set(ain, ch, i, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1), sin(i*i*3/SAMPLES)); } mid_count= swr_convert(forw_ctx, amid, 3*SAMPLES, ain, SAMPLES); out_count= swr_convert(backw_ctx,aout, SAMPLES, amid, mid_count); for(ch=0; ch<in_ch_count; ch++){ double sse, x, maxdiff=0; double sum_a= 0; double sum_b= 0; double sum_aa= 0; double sum_bb= 0; double sum_ab= 0; for(i=0; i<out_count; i++){ double a= get(ain , ch, i, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1)); double b= get(aout, ch, i, in_ch_count, in_sample_fmt); sum_a += a; sum_b += b; sum_aa+= a*a; sum_bb+= b*b; sum_ab+= a*b; maxdiff= FFMAX(maxdiff, FFABS(a-b)); } x = sum_ab/sum_bb; sse= sum_aa + sum_bb*x*x - 2*x*sum_ab; fprintf(stderr, "[%f %f %f] len:%5d\n", sqrt(sse/out_count), x, maxdiff, out_count); } flush_count=swr_convert(backw_ctx,aout, SAMPLES, 0, 0); if(flush_count){ for(ch=0; ch<in_ch_count; ch++){ double sse, x, maxdiff=0; double sum_a= 0; double sum_b= 0; double sum_aa= 0; double sum_bb= 0; double sum_ab= 0; for(i=0; i<flush_count; i++){ double a= get(ain , ch, i+out_count, in_ch_count, av_get_alt_sample_fmt(in_sample_fmt, 1)); double b= get(aout, ch, i, in_ch_count, in_sample_fmt); sum_a += a; sum_b += b; sum_aa+= a*a; sum_bb+= b*b; sum_ab+= a*b; maxdiff= FFMAX(maxdiff, FFABS(a-b)); } x = sum_ab/sum_bb; sse= sum_aa + sum_bb*x*x - 2*x*sum_ab; fprintf(stderr, "[%f %f %f] len:%5d\n", sqrt(sse/flush_count), x, maxdiff, flush_count); } } fprintf(stderr, "\n"); } } } } } return 0; }
av_cold int swri_rematrix_init(SwrContext *s){ int i, j; int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout); int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout); s->mix_any_f = NULL; if (!s->rematrix_custom) { int r = auto_matrix(s); if (r) return r; } if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){ int maxsum = 0; s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int)); s->native_one = av_mallocz(sizeof(int)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) { double rem = 0; int sum = 0; for (j = 0; j < nb_in; j++) { double target = s->matrix[i][j] * 32768 + rem; ((int*)s->native_matrix)[i * nb_in + j] = lrintf(target); rem += target - ((int*)s->native_matrix)[i * nb_in + j]; sum += FFABS(((int*)s->native_matrix)[i * nb_in + j]); } maxsum = FFMAX(maxsum, sum); } *((int*)s->native_one) = 32768; if (maxsum <= 32768) { s->mix_1_1_f = (mix_1_1_func_type*)copy_s16; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s16; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s16(s); } else { s->mix_1_1_f = (mix_1_1_func_type*)copy_clip_s16; s->mix_2_1_f = (mix_2_1_func_type*)sum2_clip_s16; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_clip_s16(s); } }else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(float)); s->native_one = av_mallocz(sizeof(float)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((float*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((float*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_float; s->mix_2_1_f = (mix_2_1_func_type*)sum2_float; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_float(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_DBLP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double)); s->native_one = av_mallocz(sizeof(double)); if (!s->native_matrix || !s->native_one) return AVERROR(ENOMEM); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((double*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_double; s->mix_2_1_f = (mix_2_1_func_type*)sum2_double; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_double(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_S32P){ s->native_one = av_mallocz(sizeof(int)); if (!s->native_one) return AVERROR(ENOMEM); s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int)); if (!s->native_matrix) { av_freep(&s->native_one); return AVERROR(ENOMEM); } for (i = 0; i < nb_out; i++) { double rem = 0; for (j = 0; j < nb_in; j++) { double target = s->matrix[i][j] * 32768 + rem; ((int*)s->native_matrix)[i * nb_in + j] = lrintf(target); rem += target - ((int*)s->native_matrix)[i * nb_in + j]; } } *((int*)s->native_one) = 32768; s->mix_1_1_f = (mix_1_1_func_type*)copy_s32; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s32; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s32(s); }else av_assert0(0); //FIXME quantize for integeres for (i = 0; i < SWR_CH_MAX; i++) { int ch_in=0; for (j = 0; j < SWR_CH_MAX; j++) { s->matrix32[i][j]= lrintf(s->matrix[i][j] * 32768); if(s->matrix[i][j]) s->matrix_ch[i][++ch_in]= j; } s->matrix_ch[i][0]= ch_in; } if(HAVE_X86ASM && HAVE_MMX) return swri_rematrix_init_x86(s); return 0; }
/* put sequence header if needed */ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size, fps, v; int i, constraint_parameter_flag; uint64_t time_code; int64_t best_aspect_error = INT64_MAX; AVRational aspect_ratio = s->avctx->sample_aspect_ratio; if (aspect_ratio.num == 0 || aspect_ratio.den == 0) aspect_ratio = (AVRational){1,1}; // pixel aspect 1.1 (VGA) if (s->current_picture.f->key_frame) { AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index]; /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); put_sbits(&s->pb, 12, s->width & 0xFFF); put_sbits(&s->pb, 12, s->height & 0xFFF); for (i = 1; i < 15; i++) { int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den; if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1) error -= (1LL<<32) / ff_mpeg1_aspect[i]; else error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->height / s->width / ff_mpeg2_aspect[i].den; error = FFABS(error); if (error - 2 <= best_aspect_error) { best_aspect_error = error; s->aspect_ratio_info = i; } } put_bits(&s->pb, 4, s->aspect_ratio_info); put_bits(&s->pb, 4, s->frame_rate_index); if (s->avctx->rc_max_rate) { v = (s->avctx->rc_max_rate + 399) / 400; if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO) v = 0x3ffff; } else { v = 0x3FFFF; } if (s->avctx->rc_buffer_size) vbv_buffer_size = s->avctx->rc_buffer_size; else /* VBV calculation: Scaled so that a VCD has the proper * VBV size of 40 kilobytes */ vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; vbv_buffer_size = (vbv_buffer_size + 16383) / 16384; put_sbits(&s->pb, 18, v); put_bits(&s->pb, 1, 1); // marker put_sbits(&s->pb, 10, vbv_buffer_size); constraint_parameter_flag = s->width <= 768 && s->height <= 576 && s->mb_width * s->mb_height <= 396 && s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den && framerate.num <= framerate.den * 30 && s->avctx->me_range && s->avctx->me_range < 128 && vbv_buffer_size <= 20 && v <= 1856000 / 400 && s->codec_id == AV_CODEC_ID_MPEG1VIDEO; put_bits(&s->pb, 1, constraint_parameter_flag); ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { AVFrameSideData *side_data; int width = s->width; int height = s->height; int use_seq_disp_ext; put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 1); // seq ext put_bits(&s->pb, 1, s->avctx->profile == 0); // escx 1 for 4:2:2 profile put_bits(&s->pb, 3, s->avctx->profile); // profile put_bits(&s->pb, 4, s->avctx->level); // level put_bits(&s->pb, 1, s->progressive_sequence); put_bits(&s->pb, 2, s->chroma_format); put_bits(&s->pb, 2, s->width >> 12); put_bits(&s->pb, 2, s->height >> 12); put_bits(&s->pb, 12, v >> 18); // bitrate ext put_bits(&s->pb, 1, 1); // marker put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d side_data = av_frame_get_side_data(s->current_picture_ptr->f, AV_FRAME_DATA_PANSCAN); if (side_data) { AVPanScan *pan_scan = (AVPanScan *)side_data->data; if (pan_scan->width && pan_scan->height) { width = pan_scan->width >> 4; height = pan_scan->height >> 4; } }
static void pred_spatial_direct_motion(const H264Context *const h, H264SliceContext *sl, int *mb_type) { int b8_stride = 2; int b4_stride = h->b_stride; int mb_xy = sl->mb_xy, mb_y = sl->mb_y; int mb_type_col[2]; const int16_t (*l1mv0)[2], (*l1mv1)[2]; const int8_t *l1ref0, *l1ref1; const int is_b8x8 = IS_8X8(*mb_type); unsigned int sub_mb_type = MB_TYPE_L0L1; int i8, i4; int ref[2]; int mv[2]; int list; assert(sl->ref_list[1][0].reference & 3); await_reference_mb_row(h, &sl->ref_list[1][0], sl->mb_y + !!IS_INTERLACED(*mb_type)); #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \ MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM) /* ref = min(neighbors) */ for (list = 0; list < 2; list++) { int left_ref = sl->ref_cache[list][scan8[0] - 1]; int top_ref = sl->ref_cache[list][scan8[0] - 8]; int refc = sl->ref_cache[list][scan8[0] - 8 + 4]; const int16_t *C = sl->mv_cache[list][scan8[0] - 8 + 4]; if (refc == PART_NOT_AVAILABLE) { refc = sl->ref_cache[list][scan8[0] - 8 - 1]; C = sl->mv_cache[list][scan8[0] - 8 - 1]; } ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc); if (ref[list] >= 0) { /* This is just pred_motion() but with the cases removed that * cannot happen for direct blocks. */ const int16_t *const A = sl->mv_cache[list][scan8[0] - 1]; const int16_t *const B = sl->mv_cache[list][scan8[0] - 8]; int match_count = (left_ref == ref[list]) + (top_ref == ref[list]) + (refc == ref[list]); if (match_count > 1) { // most common mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]), mid_pred(A[1], B[1], C[1])); } else { assert(match_count == 1); if (left_ref == ref[list]) mv[list] = AV_RN32A(A); else if (top_ref == ref[list]) mv[list] = AV_RN32A(B); else mv[list] = AV_RN32A(C); } av_assert2(ref[list] < (sl->ref_count[list] << !!FRAME_MBAFF(h))); } else { int mask = ~(MB_TYPE_L0 << (2 * list)); mv[list] = 0; ref[list] = -1; if (!is_b8x8) *mb_type &= mask; sub_mb_type &= mask; } } if (ref[0] < 0 && ref[1] < 0) { ref[0] = ref[1] = 0; if (!is_b8x8) *mb_type |= MB_TYPE_L0L1; sub_mb_type |= MB_TYPE_L0L1; } if (!(is_b8x8 | mv[0] | mv[1])) { fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; return; } if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (sl->mb_y & ~1) + sl->col_parity; mb_xy = sl->mb_x + ((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride; b8_stride = 0; } else { mb_y += sl->col_fieldoff; mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity } goto single_col; } else { // AFL/AFR/FR/FL -> AFR/FR if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR mb_y = sl->mb_y & ~1; mb_xy = (sl->mb_y & ~1) * h->mb_stride + sl->mb_x; mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy]; mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride]; b8_stride = 2 + 4 * h->mb_stride; b4_stride *= 6; if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { mb_type_col[0] &= ~MB_TYPE_INTERLACED; mb_type_col[1] &= ~MB_TYPE_INTERLACED; } sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) && !is_b8x8) { *mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */ } else { *mb_type |= MB_TYPE_8x8; } } else { // AFR/FR -> AFR/FR single_col: mb_type_col[0] = mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy]; sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) { *mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */ } else if (!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) { *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16)); } else { if (!h->ps.sps->direct_8x8_inference_flag) { /* FIXME: Save sub mb types from previous frames (or derive * from MVs) so we know exactly what block size to use. */ sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */ } *mb_type |= MB_TYPE_8x8; } } } await_reference_mb_row(h, &sl->ref_list[1][0], mb_y); l1mv0 = (void*)&sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]]; l1mv1 = (void*)&sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]]; l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy]; l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy]; if (!b8_stride) { if (sl->mb_y & 1) { l1ref0 += 2; l1ref1 += 2; l1mv0 += 2 * b4_stride; l1mv1 += 2 * b4_stride; } } if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) { int n = 0; for (i8 = 0; i8 < 4; i8++) { int x8 = i8 & 1; int y8 = i8 >> 1; int xy8 = x8 + y8 * b8_stride; int xy4 = x8 * 3 + y8 * b4_stride; int a, b; if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8])) continue; sl->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[0], 1); fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, (uint8_t)ref[1], 1); if (!IS_INTRA(mb_type_col[y8]) && !sl->ref_list[1][0].parent->long_ref && ((l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1) || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))) { a = b = 0; if (ref[0] > 0) a = mv[0]; if (ref[1] > 0) b = mv[1]; n++; } else { a = mv[0]; b = mv[1]; } fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4); fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4); } if (!is_b8x8 && !(n & 3)) *mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_P1L0 | MB_TYPE_P1L1)) | MB_TYPE_16x16 | MB_TYPE_DIRECT2; } else if (IS_16X16(*mb_type)) {
/** * accurate deblock filter */ static av_always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, const PPContext *c, int mode) { int y; const int QP= c->QP; const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1; const int dcThreshold= dcOffset*2 + 1; //START_TIMER src+= step*4; // src points to begin of the 8x8 Block for(y=0; y<8; y++){ int numEq= 0; numEq += ((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold; numEq += ((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold; if(numEq > c->ppMode.flatnessThreshold){ int min, max, x; if(src[0] > src[step]){ max= src[0]; min= src[step]; }else{ max= src[step]; min= src[0]; } for(x=2; x<8; x+=2){ if(src[x*step] > src[(x+1)*step]){ if(src[x *step] > max) max= src[ x *step]; if(src[(x+1)*step] < min) min= src[(x+1)*step]; }else{ if(src[(x+1)*step] > max) max= src[(x+1)*step]; if(src[ x *step] < min) min= src[ x *step]; } } if(max-min < 2*QP){ const int first= FFABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0]; const int last= FFABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step]; int sums[10]; sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4; sums[1] = sums[0] - first + src[3*step]; sums[2] = sums[1] - first + src[4*step]; sums[3] = sums[2] - first + src[5*step]; sums[4] = sums[3] - first + src[6*step]; sums[5] = sums[4] - src[0*step] + src[7*step]; sums[6] = sums[5] - src[1*step] + last; sums[7] = sums[6] - src[2*step] + last; sums[8] = sums[7] - src[3*step] + last; sums[9] = sums[8] - src[4*step] + last; if (mode & VISUALIZE) { src[0*step] = src[1*step] = src[2*step] = src[3*step] = src[4*step] = src[5*step] = src[6*step] = src[7*step] = 128; } src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4; src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4; src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4; src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4; src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4; src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4; src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4; src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4; } }else{
static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE], int total_gain) { int v, bsize, ch, coef_nb_bits, parse_exponents; float mdct_norm; int nb_coefs[MAX_CHANNELS]; static const int fixed_exp[25] = { 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 }; // FIXME remove duplication relative to decoder if (s->use_variable_block_len) { av_assert0(0); // FIXME not implemented } else { /* fixed block len */ s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; } s->block_len = 1 << s->block_len_bits; // av_assert0((s->block_pos + s->block_len) <= s->frame_len); bsize = s->frame_len_bits - s->block_len_bits; // FIXME factor v = s->coefs_end[bsize] - s->coefs_start; for (ch = 0; ch < s->avctx->channels; ch++) nb_coefs[ch] = v; { int n4 = s->block_len / 2; mdct_norm = 1.0 / (float) n4; if (s->version == 1) mdct_norm *= sqrt(n4); } if (s->avctx->channels == 2) put_bits(&s->pb, 1, !!s->ms_stereo); for (ch = 0; ch < s->avctx->channels; ch++) { // FIXME only set channel_coded when needed, instead of always s->channel_coded[ch] = 1; if (s->channel_coded[ch]) init_exp(s, ch, fixed_exp); } for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { WMACoef *coefs1; float *coefs, *exponents, mult; int i, n; coefs1 = s->coefs1[ch]; exponents = s->exponents[ch]; mult = ff_exp10(total_gain * 0.05) / s->max_exponent[ch]; mult *= mdct_norm; coefs = src_coefs[ch]; if (s->use_noise_coding && 0) { av_assert0(0); // FIXME not implemented } else { coefs += s->coefs_start; n = nb_coefs[ch]; for (i = 0; i < n; i++) { double t = *coefs++ / (exponents[i] * mult); if (t < -32768 || t > 32767) return -1; coefs1[i] = lrint(t); } } } } v = 0; for (ch = 0; ch < s->avctx->channels; ch++) { int a = s->channel_coded[ch]; put_bits(&s->pb, 1, a); v |= a; } if (!v) return 1; for (v = total_gain - 1; v >= 127; v -= 127) put_bits(&s->pb, 7, 127); put_bits(&s->pb, 7, v); coef_nb_bits = ff_wma_total_gain_to_bits(total_gain); if (s->use_noise_coding) { for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int i, n; n = s->exponent_high_sizes[bsize]; for (i = 0; i < n; i++) { put_bits(&s->pb, 1, s->high_band_coded[ch][i] = 0); if (0) nb_coefs[ch] -= s->exponent_high_bands[bsize][i]; } } } } parse_exponents = 1; if (s->block_len_bits != s->frame_len_bits) put_bits(&s->pb, 1, parse_exponents); if (parse_exponents) { for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { if (s->use_exp_vlc) { encode_exp_vlc(s, ch, fixed_exp); } else { av_assert0(0); // FIXME not implemented // encode_exp_lsp(s, ch); } } } } else av_assert0(0); // FIXME not implemented for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int run, tindex; WMACoef *ptr, *eptr; tindex = (ch == 1 && s->ms_stereo); ptr = &s->coefs1[ch][0]; eptr = ptr + nb_coefs[ch]; run = 0; for (; ptr < eptr; ptr++) { if (*ptr) { int level = *ptr; int abs_level = FFABS(level); int code = 0; if (abs_level <= s->coef_vlcs[tindex]->max_level) if (run < s->coef_vlcs[tindex]->levels[abs_level - 1]) code = run + s->int_table[tindex][abs_level - 1]; av_assert2(code < s->coef_vlcs[tindex]->n); put_bits(&s->pb, s->coef_vlcs[tindex]->huffbits[code], s->coef_vlcs[tindex]->huffcodes[code]); if (code == 0) { if (1 << coef_nb_bits <= abs_level) return -1; put_bits(&s->pb, coef_nb_bits, abs_level); put_bits(&s->pb, s->frame_len_bits, run); } // FIXME the sign is flipped somewhere put_bits(&s->pb, 1, level < 0); run = 0; } else run++; } if (run) put_bits(&s->pb, s->coef_vlcs[tindex]->huffbits[1], s->coef_vlcs[tindex]->huffcodes[1]); } if (s->version == 1 && s->avctx->channels >= 2) avpriv_align_put_bits(&s->pb); } return 0; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { HistogramContext *h = inlink->dst->priv; AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; const uint8_t *src; uint8_t *dst; int i, j, k, l, ret; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } out->pts = in->pts; for (k = 0; k < h->ncomp; k++) for (i = 0; i < outlink->h; i++) memset(out->data[k] + i * out->linesize[k], h->bg_color[k], outlink->w); switch (h->mode) { case MODE_LEVELS: for (k = 0; k < h->ncomp; k++) { int start = k * (h->level_height + h->scale_height) * h->display_mode; for (i = 0; i < in->height; i++) { src = in->data[k] + i * in->linesize[k]; for (j = 0; j < in->width; j++) h->histogram[src[j]]++; } for (i = 0; i < 256; i++) h->max_hval = FFMAX(h->max_hval, h->histogram[i]); for (i = 0; i < outlink->w; i++) { int col_height = h->level_height - (float)h->histogram[i] / h->max_hval * h->level_height; for (j = h->level_height - 1; j >= col_height; j--) { if (h->display_mode) { for (l = 0; l < h->ncomp; l++) out->data[l][(j + start) * out->linesize[l] + i] = h->fg_color[l]; } else { out->data[k][(j + start) * out->linesize[k] + i] = 255; } } for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--) out->data[k][(j + start) * out->linesize[k] + i] = i; } memset(h->histogram, 0, 256 * sizeof(unsigned)); h->max_hval = 0; } break; case MODE_WAVEFORM: if (h->waveform_mode) { for (k = 0; k < h->ncomp; k++) { int offset = k * 256 * h->display_mode; for (i = 0; i < inlink->w; i++) { for (j = 0; j < inlink->h; j++) { int pos = (offset + in->data[k][j * in->linesize[k] + i]) * out->linesize[k] + i; unsigned value = out->data[k][pos]; value = FFMIN(value + h->step, 255); out->data[k][pos] = value; } } } } else { for (k = 0; k < h->ncomp; k++) { int offset = k * 256 * h->display_mode; for (i = 0; i < inlink->h; i++) { src = in ->data[k] + i * in ->linesize[k]; dst = out->data[k] + i * out->linesize[k]; for (j = 0; j < inlink->w; j++) { int pos = src[j] + offset; unsigned value = dst[pos]; value = FFMIN(value + h->step, 255); dst[pos] = value; } } } } break; case MODE_COLOR: for (i = 0; i < inlink->h; i++) { int iw1 = i * in->linesize[1]; int iw2 = i * in->linesize[2]; for (j = 0; j < inlink->w; j++) { int pos = in->data[1][iw1 + j] * out->linesize[0] + in->data[2][iw2 + j]; if (out->data[0][pos] < 255) out->data[0][pos]++; } } for (i = 0; i < 256; i++) { dst = out->data[0] + i * out->linesize[0]; for (j = 0; j < 256; j++) { if (!dst[j]) { out->data[1][i * out->linesize[0] + j] = i; out->data[2][i * out->linesize[0] + j] = j; } } } break; case MODE_COLOR2: for (i = 0; i < inlink->h; i++) { int iw1 = i * in->linesize[1]; int iw2 = i * in->linesize[2]; for (j = 0; j < inlink->w; j++) { int u = in->data[1][iw1 + j]; int v = in->data[2][iw2 + j]; int pos = u * out->linesize[0] + v; if (!out->data[0][pos]) out->data[0][pos] = FFABS(128 - u) + FFABS(128 - v); out->data[1][pos] = u; out->data[2][pos] = v; } } break; default: av_assert0(0); } ret = ff_filter_frame(outlink, out); av_frame_free(&in); if (ret < 0) return ret; return 0; }