static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, n_coeffs; uint64_t qmul, qadd; uint64_t correction; qadd = WORD_VEC((qscale - 1) | 1); qmul = qscale << 1; /* This mask kills spill from negative subwords to the next subword. */ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ n_coeffs = s->intra_scantable.raster_end[s->block_last_index[n]]; for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add; levels = ldq(block); if (levels == 0) continue; #ifdef __alpha_max__ /* I don't think the speed difference justifies runtime detection. */ negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */ negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */ #else negmask = cmpbge(WORD_VEC(0x7fff), levels); negmask &= (negmask >> 1) | (1 << 7); negmask = zap(-1, negmask); #endif zeros = cmpbge(0, levels); zeros &= zeros >> 1; /* zeros |= zeros << 1 is not needed since qadd <= 255, so zapping the lower byte suffices. */ levels *= qmul; levels -= correction & (negmask << 16); /* Negate qadd for negative levels. */ add = qadd ^ negmask; add += WORD_VEC(0x0001) & negmask; /* Set qadd to 0 for levels == 0. */ add = zap(add, zeros); levels += add; stq(levels, block); } }
static unsigned char *do_40bit(unsigned char *dst, uint64_t d1) { uint64_t d2; d2 = d1; /* copy */ d2 >>= 12; /* shift copy */ d1 &= 0xFFFFFFFF00000000ULL; /* eliminate */ d2 &= 0x00000000FFFFFFFFULL; d1 |= d2; /* join */ d2 = d1; /* copy */ d2 >>= 6; /* shift copy */ d1 &= 0xFFFF0000FFFF0000ULL; /* eliminate */ d2 &= 0x0000FFFF0000FFFFULL; d1 |= d2; /* join */ d2 = d1; /* copy */ d2 >>= 3; /* shift copy */ d1 &= 0xFF00FF00FF00FF00ULL; /* eliminate */ d2 &= 0x00FF00FF00FF00FFULL; d1 |= d2; /* join */ d1 >>= 3; /* bring it down */ d1 &= 0x1F1F1F1F1F1F1F1FULL; /* eliminate */ /* convert */ d1 += 0x6161616161616161ULL; d1 -= zapnot(0x4949494949494949ULL, cmpbge(d1, 0x7B7B7B7B7B7B7B7BULL)); /* write out */ put_unaligned_be64(d1, dst); return dst + 8; }
static void dct_unquantize_h263_axp(DCTELEM *block, int n_coeffs, uint64_t qscale, uint64_t qadd) { uint64_t qmul = qscale << 1; uint64_t correction = WORD_VEC(qmul * 255 >> 8); int i; qadd = WORD_VEC(qadd); for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add, sub; levels = ldq(block); if (levels == 0) continue; #ifdef __alpha_max__ /* I don't think the speed difference justifies runtime detection. */ negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */ negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */ #else negmask = cmpbge(WORD_VEC(0x7fff), levels); negmask &= (negmask >> 1) | (1 << 7); negmask = zap(-1, negmask); #endif zeros = cmpbge(0, levels); zeros &= zeros >> 1; /* zeros |= zeros << 1 is not needed since qadd <= 255, so zapping the lower byte suffices. */ levels *= qmul; levels -= correction & (negmask << 16); add = qadd & ~negmask; sub = qadd & negmask; /* Set qadd to 0 for levels == 0. */ add = zap(add, zeros); levels += add; levels -= sub; stq(levels, block); } }
static void dct_unquantize_h263_intra_axp(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, n_coeffs; uint64_t qmul, qadd; uint64_t correction; DCTELEM *orig_block = block; DCTELEM block0; /* might not be used uninitialized */ qadd = WORD_VEC((qscale - 1) | 1); qmul = qscale << 1; /* This mask kills spill from negative subwords to the next subword. */ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ if (!s->h263_aic) { if (n < 4) block0 = block[0] * s->y_dc_scale; else block0 = block[0] * s->c_dc_scale; } else { qadd = 0; } n_coeffs = 63; // does not always use zigzag table for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add; levels = ldq(block); if (levels == 0) continue; #ifdef __alpha_max__ /* I don't think the speed difference justifies runtime detection. */ negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */ negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */ #else negmask = cmpbge(WORD_VEC(0x7fff), levels); negmask &= (negmask >> 1) | (1 << 7); negmask = zap(-1, negmask); #endif zeros = cmpbge(0, levels); zeros &= zeros >> 1; /* zeros |= zeros << 1 is not needed since qadd <= 255, so zapping the lower byte suffices. */ levels *= qmul; levels -= correction & (negmask << 16); /* Negate qadd for negative levels. */ add = qadd ^ negmask; add += WORD_VEC(0x0001) & negmask; /* Set qadd to 0 for levels == 0. */ add = zap(add, zeros); levels += add; stq(levels, block); } if (s->mb_intra && !s->h263_aic) orig_block[0] = block0; }