void kbd_interrupt(void) { if(cur_key=get_scan()) if(cur_key=decode(cur_key)){ vt_inproc(1,(mode&KEY_MODE_CTRL)?(cur_key&0x1f):cur_key); } if (UART_LSR&1) vt_inproc(2,UART_DAT); //if(ch) vt_inproc(1,ch); }
int vp9_decode_block_tokens(VP9_COMMON *cm, MACROBLOCKD *xd, int plane, int block, BLOCK_SIZE plane_bsize, int x, int y, TX_SIZE tx_size, vp9_reader *r) { struct macroblockd_plane *const pd = &xd->plane[plane]; const int ctx = get_entropy_context(tx_size, pd->above_context + x, pd->left_context + y); const scan_order *so = get_scan(xd, tx_size, pd->plane_type, block); const int eob = decode_coefs(cm, xd, pd->plane_type, BLOCK_OFFSET(pd->dqcoeff, block), tx_size, pd->dequant, ctx, so->scan, so->neighbors, r); vp9_set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, x, y); return eob; }
RTC::ReturnCode_t RTCHokuyoAIST::onExecute(RTC::UniqueId ec_id) { coil::Guard<coil::Mutex> guard(mutex_); try { if(!pull_mode_) { get_scan(); } } catch(hokuyoaist::BaseError &e) { std::cerr << "Error getting laser scan: " << e.what() << '\n'; time_t now = time(NULL); if (now - last_error_time_ <= error_time_) { last_error_time_ = now; return RTC::RTC_ERROR; } else { last_error_time_ = now; try { std::cerr << "Attempting to reset laser.\n"; reset_laser(); } catch(hokuyoaist::BaseError &e) { std::cerr << "Reset failed: " << e.what() << '\n'; return RTC::RTC_ERROR; } catch(flexiport::PortException &e) { std::cerr << "Reset failed: " << e.what() << '\n'; return RTC::RTC_ERROR; } } } return RTC::RTC_OK; }
int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size, int ctx) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblock_plane *const p = &mb->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; const int ref = is_inter_block(xd->mi[0]); vp9_token_state tokens[1025][2]; uint8_t token_cache[1024]; const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); const int eob = p->eobs[block]; const PLANE_TYPE type = get_plane_type(plane); const int default_eob = 16 << (tx_size << 1); const int shift = (tx_size == TX_32X32); const int16_t *const dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); const scan_order *const so = get_scan(xd, tx_size, type, block); const int16_t *const scan = so->scan; const int16_t *const nb = so->neighbors; const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; ThreadData *const td = args->td; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; TOKENEXTRA **tp = args->tp; uint8_t token_cache[32 * 32]; struct macroblock_plane *p = &x->plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; int pt; /* near block/prev token context index */ int c; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; const int ref = is_inter_block(mbmi); unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = td->rd_counts.coef_counts[tx_size][type][ref]; vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = cpi->common.fc->coef_probs[tx_size][type][ref]; unsigned int (*const eob_branch)[COEFF_CONTEXTS] = td->counts->eob_branch[tx_size][type][ref]; const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); int16_t token; EXTRABIT extra; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff[scan[c]]; while (!v) { add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff[scan[c]]; } vp9_get_token_extra(v, &token, &extra); add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, counts[band[c]][pt]); ++eob_branch[band[c]][pt]; } *tp = t; vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size, int ctx) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblock_plane *const p = &mb->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; const int ref = is_inter_block(xd->mi[0]); vp9_token_state tokens[1025][2]; unsigned best_index[1025][2]; uint8_t token_cache[1024]; const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); const int eob = p->eobs[block]; const PLANE_TYPE type = get_plane_type(plane); const int default_eob = 16 << (tx_size << 1); const int mul = 1 + (tx_size == TX_32X32); const int16_t *dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); const scan_order *const so = get_scan(xd, tx_size, type, block); const int16_t *const scan = so->scan; const int16_t *const nb = so->neighbors; int next = eob, sz = 0; int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv; int64_t rd_cost0, rd_cost1; int rate0, rate1, error0, error1; int16_t t0, t1; EXTRABIT e0; int best, band, pt, i, final_eob; #if CONFIG_VP9_HIGHBITDEPTH const int *cat6_high_cost = vp9_get_high_cost_table(xd->bd); #else const int *cat6_high_cost = vp9_get_high_cost_table(8); #endif assert((!type && !plane) || (type && plane)); assert(eob <= default_eob); /* Now set up a Viterbi trellis to evaluate alternative roundings. */ if (!ref) rdmult = (rdmult * 9) >> 4; /* Initialize the sentinel node of the trellis. */ tokens[eob][0].rate = 0; tokens[eob][0].error = 0; tokens[eob][0].next = default_eob; tokens[eob][0].token = EOB_TOKEN; tokens[eob][0].qc = 0; tokens[eob][1] = tokens[eob][0]; for (i = 0; i < eob; i++) token_cache[scan[i]] = vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])]; for (i = eob; i-- > 0;) { int base_bits, d2, dx; const int rc = scan[i]; int x = qcoeff[rc]; /* Only add a trellis state for non-zero coefficients. */ if (x) { int shortcut = 0; error0 = tokens[next][0].error; error1 = tokens[next][1].error; /* Evaluate the first possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; vp9_get_token_extra(x, &t0, &e0); /* Consider both possible successor states. */ if (next < default_eob) { band = band_translate[i + 1]; pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][0].token]; rate1 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][1].token]; } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = vp9_get_cost(t0, e0, cat6_high_cost); dx = mul * (dqcoeff[rc] - coeff[rc]); #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { dx >>= xd->bd - 8; } #endif // CONFIG_VP9_HIGHBITDEPTH d2 = dx * dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); tokens[i][0].error = d2 + (best ? error1 : error0); tokens[i][0].next = next; tokens[i][0].token = t0; tokens[i][0].qc = x; best_index[i][0] = best; /* Evaluate the second possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + dequant_ptr[rc != 0])) shortcut = 1; else shortcut = 0; if (shortcut) { sz = -(x < 0); x -= 2 * sz + 1; } /* Consider both possible successor states. */ if (!x) { /* If we reduced this coefficient to zero, check to see if * we need to move the EOB back here. */ t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; e0 = 0; } else { vp9_get_token_extra(x, &t0, &e0); t1 = t0; } if (next < default_eob) { band = band_translate[i + 1]; if (t0 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][0].token]; } if (t1 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache); rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][1].token]; } } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = vp9_get_cost(t0, e0, cat6_high_cost); if (shortcut) { #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz; } else { dx -= (dequant_ptr[rc != 0] + sz) ^ sz; } #else dx -= (dequant_ptr[rc != 0] + sz) ^ sz; #endif // CONFIG_VP9_HIGHBITDEPTH d2 = dx * dx; }
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; uint8_t *token_cache = args->token_cache; struct macroblock_plane *p = &cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; int pt; /* near block/prev token context index */ int c = 0; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const int16_t *qcoeff_ptr = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; vp9_coeff_count *const counts = cpi->coef_counts[tx_size]; vp9_coeff_probs_model *const coef_probs = cpi->common.fc.coef_probs[tx_size]; const int ref = is_inter_block(mbmi); const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff_ptr[scan[c]]; while (!v) { add_token(&t, coef_probs[type][ref][band[c]][pt], 0, ZERO_TOKEN, skip_eob, counts[type][ref][band[c]][pt]); cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff_ptr[scan[c]]; } add_token(&t, coef_probs[type][ref][band[c]][pt], vp9_dct_value_tokens_ptr[v].extra, vp9_dct_value_tokens_ptr[v].token, skip_eob, counts[type][ref][band[c]][pt]); cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[v].token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token(&t, coef_probs[type][ref][band[c]][pt], 0, EOB_TOKEN, 0, counts[type][ref][band[c]][pt]); ++cpi->common.counts.eob_branch[tx_size][type][ref][band[c]][pt]; } *tp = t; set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct tokenize_b_args* const args = arg; VP9_COMP *cpi = args->cpi; MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; uint8_t token_cache[32 * 32]; struct macroblock_plane *p = &cpi->mb.plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; int pt; /* near block/prev token context index */ int c; TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); const int segment_id = mbmi->segment_id; const int16_t *scan, *nb; const scan_order *so; const int ref = is_inter_block(mbmi); unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = cpi->coef_counts[tx_size][type][ref]; vp9_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = cpi->common.fc.coef_probs[tx_size][type][ref]; unsigned int (*const eob_branch)[COEFF_CONTEXTS] = cpi->common.counts.eob_branch[tx_size][type][ref]; const uint8_t *const band = get_band_translate(tx_size); const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); const TOKENVALUE *dct_value_tokens; int aoff, loff; txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); pt = get_entropy_context(tx_size, pd->above_context + aoff, pd->left_context + loff); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; c = 0; #if CONFIG_VP9_HIGHBITDEPTH if (cpi->common.profile >= PROFILE_2) { dct_value_tokens = (cpi->common.bit_depth == VPX_BITS_10 ? vp9_dct_value_tokens_high10_ptr : vp9_dct_value_tokens_high12_ptr); } else { dct_value_tokens = vp9_dct_value_tokens_ptr; } #else dct_value_tokens = vp9_dct_value_tokens_ptr; #endif while (c < eob) { int v = 0; int skip_eob = 0; v = qcoeff[scan[c]]; while (!v) { add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; skip_eob = 1; token_cache[scan[c]] = 0; ++c; pt = get_coef_context(nb, token_cache, c); v = qcoeff[scan[c]]; } add_token(&t, coef_probs[band[c]][pt], dct_value_tokens[v].extra, (uint8_t)dct_value_tokens[v].token, (uint8_t)skip_eob, counts[band[c]][pt]); eob_branch[band[c]][pt] += !skip_eob; token_cache[scan[c]] = vp9_pt_energy_class[dct_value_tokens[v].token]; ++c; pt = get_coef_context(nb, token_cache, c); } if (c < seg_eob) { add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, counts[band[c]][pt]); ++eob_branch[band[c]][pt]; } *tp = t; vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); }
static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, vp9_reader *r, int block_idx, PLANE_TYPE type, int seg_eob, int16_t *qcoeff_ptr, TX_SIZE tx_size, const int16_t *dq, int pt) { const FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; const int ref = is_inter_block(&xd->mi_8x8[0]->mbmi); int band, c = 0; const vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; vp9_prob coef_probs_full[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; uint8_t load_map[COEF_BANDS][PREV_COEF_CONTEXTS] = { { 0 } }; const vp9_prob *prob; vp9_coeff_count_model *coef_counts = counts->coef[tx_size]; const int16_t *scan, *nb; const uint8_t *const band_translate = get_band_translate(tx_size); uint8_t token_cache[1024]; get_scan(xd, tx_size, type, block_idx, &scan, &nb); while (1) { int val; const uint8_t *cat6 = cat6_prob; if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!cm->frame_parallel_decoding_mode) ++counts->eob_branch[tx_size][type][ref][band][pt]; if (!vp9_read(r, prob[EOB_CONTEXT_NODE])) break; SKIP_START: if (c >= seg_eob) break; if (c) pt = get_coef_context(nb, token_cache, c); band = get_coef_band(band_translate, c); prob = coef_probs[band][pt]; if (!vp9_read(r, prob[ZERO_CONTEXT_NODE])) { INCREMENT_COUNT(ZERO_TOKEN); ++c; goto SKIP_START; } // ONE_CONTEXT_NODE_0_ if (!vp9_read(r, prob[ONE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(1, ONE_TOKEN); } // Load full probabilities if not already loaded if (!load_map[band][pt]) { vp9_model_to_full_probs(coef_probs[band][pt], coef_probs_full[band][pt]); load_map[band][pt] = 1; } prob = coef_probs_full[band][pt]; // LOW_VAL_CONTEXT_NODE_0_ if (!vp9_read(r, prob[LOW_VAL_CONTEXT_NODE])) { if (!vp9_read(r, prob[TWO_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(2, TWO_TOKEN); } if (!vp9_read(r, prob[THREE_CONTEXT_NODE])) { WRITE_COEF_CONTINUE(3, THREE_TOKEN); } WRITE_COEF_CONTINUE(4, FOUR_TOKEN); } // HIGH_LOW_CONTEXT_NODE_0_ if (!vp9_read(r, prob[HIGH_LOW_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_ONE_CONTEXT_NODE])) { val = CAT1_MIN_VAL; ADJUST_COEF(CAT1_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY1); } val = CAT2_MIN_VAL; ADJUST_COEF(CAT2_PROB1, 1); ADJUST_COEF(CAT2_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY2); } // CAT_THREEFOUR_CONTEXT_NODE_0_ if (!vp9_read(r, prob[CAT_THREEFOUR_CONTEXT_NODE])) { if (!vp9_read(r, prob[CAT_THREE_CONTEXT_NODE])) { val = CAT3_MIN_VAL; ADJUST_COEF(CAT3_PROB2, 2); ADJUST_COEF(CAT3_PROB1, 1); ADJUST_COEF(CAT3_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY3); } val = CAT4_MIN_VAL; ADJUST_COEF(CAT4_PROB3, 3); ADJUST_COEF(CAT4_PROB2, 2); ADJUST_COEF(CAT4_PROB1, 1); ADJUST_COEF(CAT4_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY4); } // CAT_FIVE_CONTEXT_NODE_0_: if (!vp9_read(r, prob[CAT_FIVE_CONTEXT_NODE])) { val = CAT5_MIN_VAL; ADJUST_COEF(CAT5_PROB4, 4); ADJUST_COEF(CAT5_PROB3, 3); ADJUST_COEF(CAT5_PROB2, 2); ADJUST_COEF(CAT5_PROB1, 1); ADJUST_COEF(CAT5_PROB0, 0); WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY5); } val = 0; while (*cat6) { val = (val << 1) | vp9_read(r, *cat6++); } val += CAT6_MIN_VAL; WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6); } if (c < seg_eob) { if (!cm->frame_parallel_decoding_mode) ++coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]; } return c; }
/* ** NUMBER ** scans numerical constants (both integer and floating). Each ** constant is converted from ascii to its numerical representation ** and is entered into the symbol table, indexed by 'yylval'. ** A token is returned for the number type. ** ** due to the current atof in the utility library, floating overflow ** is not checked. */ int number(char chr) { extern int yylval; double ftemp; long ltemp; short itemp; char buf[256]; register int lsave; register char *ptr; lsave = Lcase; Lcase = 0; ptr = buf; if ((*ptr = chr) != '.') { do { /* get integer portion */ if ((ptr - buf) >= 256) { /* buffer overflow */ par_error(NUMBUFOFLO, WARN, 0, 0, 0); } *++ptr = get_scan(NORMAL); } while (parser_cmap(*ptr) == NUMBR); } /* do rest of type determination */ switch (*ptr) { case '.': /* floating point */ do { /* fill into ptr with up to next non-digit */ if ((ptr - buf) >= 256) { /* buf oflo */ par_error(NUMBUFOFLO, WARN, 0, 0, 0); } *++ptr = get_scan(NORMAL); } while (parser_cmap(*ptr) == NUMBR); if (*ptr != 'e' && *ptr != 'E') { backup(*ptr); *ptr = 0; goto convr; } case 'e': case 'E': if ((ptr - buf) >= 256) { par_error(NUMBUFOFLO, WARN, 0, 0, 0); /* buf oflo */ } *++ptr = get_scan(NORMAL); if (parser_cmap(*ptr) == NUMBR || *ptr == '-' || *ptr == '+') { do { /* get exponent */ if ((ptr - buf) >= 256) { /* buf oflo */ par_error(NUMBUFOFLO, WARN, 0, 0, 0); } *++ptr = get_scan(NORMAL); } while (parser_cmap(*ptr) == NUMBR); } backup(*ptr); *ptr = 0; /* FALLTHROUGH */ convr: if (ingres_atof(buf, &ftemp)) { /* floating conversion error */ par_error(FCONSTERR, WARN, buf, 0, 0); } yylval = (int) syment(&ftemp, 8); Lastok.toktyp = Tokens.f8const; break; default: /* integer */ backup(*ptr); *ptr = 0; if (ingres_atol(buf, <emp)) { /* long conversion error */ goto convr; } if (ltemp > 32767) { yylval = (int) syment(<emp, 4); Lastok.toktyp = Tokens.i4const; break; } itemp = ltemp; yylval = (int) syment(&itemp, 2); Lastok.toktyp = Tokens.i2const; break; } Lcase = lsave; Lastok.tok = (char *) yylval; Lastok.tokop = 0; return (Lastok.toktyp); }
static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size, int ctx) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblock_plane *const p = &mb->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; const int ref = is_inter_block(&xd->mi[0]->mbmi); vp9_token_state tokens[1025][2]; unsigned best_index[1025][2]; uint8_t token_cache[1024]; const int16_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); const int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const int default_eob = 16 << (tx_size << 1); const int mul = 1 + (tx_size == TX_32X32); const int16_t *dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); const scan_order *const so = get_scan(xd, tx_size, type, block); const int16_t *const scan = so->scan; const int16_t *const nb = so->neighbors; int next = eob, sz = 0; int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv; int64_t rd_cost0, rd_cost1; int rate0, rate1, error0, error1, t0, t1; int best, band, pt, i, final_eob; assert((!type && !plane) || (type && plane)); assert(eob <= default_eob); /* Now set up a Viterbi trellis to evaluate alternative roundings. */ if (!ref) rdmult = (rdmult * 9) >> 4; /* Initialize the sentinel node of the trellis. */ tokens[eob][0].rate = 0; tokens[eob][0].error = 0; tokens[eob][0].next = default_eob; tokens[eob][0].token = EOB_TOKEN; tokens[eob][0].qc = 0; tokens[eob][1] = tokens[eob][0]; for (i = 0; i < eob; i++) token_cache[scan[i]] = vp9_pt_energy_class[vp9_dct_value_tokens_ptr[qcoeff[scan[i]]].token]; for (i = eob; i-- > 0;) { int base_bits, d2, dx; const int rc = scan[i]; int x = qcoeff[rc]; /* Only add a trellis state for non-zero coefficients. */ if (x) { int shortcut = 0; error0 = tokens[next][0].error; error1 = tokens[next][1].error; /* Evaluate the first possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; t0 = (vp9_dct_value_tokens_ptr + x)->token; /* Consider both possible successor states. */ if (next < default_eob) { band = band_translate[i + 1]; pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][0].token]; rate1 += mb->token_costs[tx_size][type][ref][band][0][pt] [tokens[next][1].token]; } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = vp9_dct_value_cost_ptr[x]; dx = mul * (dqcoeff[rc] - coeff[rc]); d2 = dx * dx; tokens[i][0].rate = base_bits + (best ? rate1 : rate0); tokens[i][0].error = d2 + (best ? error1 : error0); tokens[i][0].next = next; tokens[i][0].token = t0; tokens[i][0].qc = x; best_index[i][0] = best; /* Evaluate the second possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + dequant_ptr[rc != 0])) shortcut = 1; else shortcut = 0; if (shortcut) { sz = -(x < 0); x -= 2 * sz + 1; } /* Consider both possible successor states. */ if (!x) { /* If we reduced this coefficient to zero, check to see if * we need to move the EOB back here. */ t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; } else { t0 = t1 = (vp9_dct_value_tokens_ptr + x)->token; } if (next < default_eob) { band = band_translate[i + 1]; if (t0 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][0].token]; } if (t1 != EOB_TOKEN) { pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache); rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt] [tokens[next][1].token]; } } UPDATE_RD_COST(); /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = vp9_dct_value_cost_ptr[x]; if (shortcut) { dx -= (dequant_ptr[rc != 0] + sz) ^ sz; d2 = dx * dx; } tokens[i][1].rate = base_bits + (best ? rate1 : rate0); tokens[i][1].error = d2 + (best ? error1 : error0); tokens[i][1].next = next; tokens[i][1].token = best ? t1 : t0; tokens[i][1].qc = x; best_index[i][1] = best; /* Finally, make this the new head of the trellis. */ next = i; } else { /* There's no choice to make for a zero coefficient, so we don't * add a new trellis node, but we do need to update the costs. */ band = band_translate[i + 1]; t0 = tokens[next][0].token; t1 = tokens[next][1].token; /* Update the cost of each path if we're past the EOB token. */ if (t0 != EOB_TOKEN) { tokens[next][0].rate += mb->token_costs[tx_size][type][ref][band][1][0][t0]; tokens[next][0].token = ZERO_TOKEN; } if (t1 != EOB_TOKEN) { tokens[next][1].rate += mb->token_costs[tx_size][type][ref][band][1][0][t1]; tokens[next][1].token = ZERO_TOKEN; } best_index[i][0] = best_index[i][1] = 0; /* Don't update next, because we didn't add a new node. */ } } /* Now pick the best path through the whole trellis. */ band = band_translate[i + 1]; rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; error0 = tokens[next][0].error; error1 = tokens[next][1].error; t0 = tokens[next][0].token; t1 = tokens[next][1].token; rate0 += mb->token_costs[tx_size][type][ref][band][0][ctx][t0]; rate1 += mb->token_costs[tx_size][type][ref][band][0][ctx][t1]; UPDATE_RD_COST(); best = rd_cost1 < rd_cost0; final_eob = -1; vpx_memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2))); vpx_memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2))); for (i = next; i < eob; i = next) { const int x = tokens[i][best].qc; const int rc = scan[i]; if (x) { final_eob = i; } qcoeff[rc] = x; dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul; next = tokens[i][best].next; best = best_index[i][best]; } final_eob++; mb->plane[plane].eobs[block] = final_eob; return final_eob; }
void RTCHokuyoAIST::request_scan() { coil::Guard<coil::Mutex> guard(mutex_); get_scan(); }