Esempio n. 1
0
unsigned udns_jranval(struct udns_jranctx *x) {
  /* This routine can be made to work with either 32 or 64bit words -
   * if JRAN_32_64 is defined when compiling the file.
   * We use if() instead of #if since there's no good
   * portable way to check sizeof() in preprocessor without
   * introducing some ugly configure-time checks.
   * Most compilers will optimize the wrong branches away anyway.
   * By default it assumes 32bit integers
   */
#ifdef JRAN_32_64
  if (sizeof(unsigned) == 4) {
#endif
    unsigned e = tr32(x->a - rot32(x->b, 27));
    x->a = tr32(x->b ^ rot32(x->c, 17));
    x->b = tr32(x->c + x->d);
    x->c = tr32(x->d + e);
    x->d = tr32(e + x->a);
#ifdef JRAN_32_64
  }
  else if (sizeof(unsigned) == 8) { /* assuming it's 64bits */
    unsigned e = x->a - rot64(x->b, 7);
    x->a = x->b ^ rot64(x->c, 13);
    x->b = x->c + rot64(x->d, 37);
    x->c = x->d + e;
    x->d = e + x->a;
  }
  else {
    unsigned e = 0;
    x->d = 1/e; /* bail */
  }
#endif
  return x->d;
}
Esempio n. 2
0
static __always_inline uint64_t final128(uint64_t a, uint64_t b, uint64_t c,
                                         uint64_t d, uint64_t *h) {
  mixup64(&a, &b, rot64(c, 41) ^ d, prime_0);
  mixup64(&b, &c, rot64(d, 23) ^ a, prime_6);
  mixup64(&c, &d, rot64(a, 19) ^ b, prime_5);
  mixup64(&d, &a, rot64(b, 31) ^ c, prime_4);
  *h = c + d;
  return a ^ b;
}
Esempio n. 3
0
static __inline uint64_t final_weak_avalanche(uint64_t a, uint64_t b) {
  /* LY: for performance reason on a some not high-end CPUs
   * I replaced the second mux64() operation by mix64().
   * Unfortunately this approach fails the "strict avalanche criteria",
   * see test results at https://github.com/demerphq/smhasher. */
  return mux64(rot64(a + b, 17), prime_4) + mix64(a ^ b, prime_0);
}
Esempio n. 4
0
/* xor-mul-xor mixer */
static __inline uint64_t mix64(uint64_t v, uint64_t p) {
  v *= p;
  return v ^ rot64(v, 41);
}
Esempio n. 5
0
static __always_inline void squash(t1ha_state256_t *s) {
  s->n.a ^= prime_6 * (s->n.c + rot64(s->n.d, 23));
  s->n.b ^= prime_5 * (rot64(s->n.c, 19) + s->n.d);
}
Esempio n. 6
0
static __always_inline void init_cd(t1ha_state256_t *s, uint64_t x,
                                    uint64_t y) {
  s->n.c = rot64(y, 23) + ~x;
  s->n.d = ~y + rot64(x, 19);
}