static ucl_bool ptr_check(void) { ucl_bool r = 1; int i; char _wrkmem[10 * sizeof(ucl_byte *) + sizeof(ucl_align_t)]; ucl_byte *wrkmem; ucl_bytepp dict; unsigned char x[4 * sizeof(ucl_align_t)]; long d; ucl_align_t a; for (i = 0; i < (int) sizeof(x); i++) x[i] = UCL_BYTE(i); wrkmem = (ucl_byte *) UCL_PTR_ALIGN_UP(_wrkmem,sizeof(ucl_align_t)); /* Expect a compiler warning on architectures that * do not allow unaligned access. */ dict = (ucl_bytepp) wrkmem; d = (long) ((const ucl_bytep) dict - (const ucl_bytep) _wrkmem); r &= __ucl_assert(d >= 0); r &= __ucl_assert(d < (long) sizeof(ucl_align_t)); memset(&a,0xff,sizeof(a)); r &= __ucl_assert(a.a_ushort == USHRT_MAX); r &= __ucl_assert(a.a_uint == UINT_MAX); r &= __ucl_assert(a.a_ulong == ULONG_MAX); r &= __ucl_assert(a.a_ucl_uint == UCL_UINT_MAX); /* sanity check of the memory model */ if (r == 1) { for (i = 0; i < 8; i++) r &= __ucl_assert((const ucl_voidp) (&dict[i]) == (const ucl_voidp) (&wrkmem[i * sizeof(ucl_byte *)])); } /* check BZERO8_PTR and that NULL == 0 */ memset(&a,0,sizeof(a)); r &= __ucl_assert(a.a_charp == NULL); r &= __ucl_assert(a.a_ucl_bytep == NULL); r &= __ucl_assert(NULL == 0); if (r == 1) { for (i = 0; i < 10; i++) dict[i] = wrkmem; BZERO8_PTR(dict+1,sizeof(dict[0]),8); r &= __ucl_assert(dict[0] == wrkmem); for (i = 1; i < 9; i++) r &= __ucl_assert(dict[i] == NULL); r &= __ucl_assert(dict[9] == wrkmem); } /* check that the pointer constructs work as expected */ if (r == 1) { unsigned k = 1; const unsigned n = (unsigned) sizeof(ucl_uint32); ucl_byte *p0; ucl_byte *p1; k += __ucl_align_gap(&x[k],n); p0 = (ucl_bytep) &x[k]; #if defined(PTR_LINEAR) r &= __ucl_assert((PTR_LINEAR(p0) & (n-1)) == 0); #else r &= __ucl_assert(n == 4); r &= __ucl_assert(PTR_ALIGNED_4(p0)); #endif r &= __ucl_assert(k >= 1); p1 = (ucl_bytep) &x[1]; r &= __ucl_assert(PTR_GE(p0,p1)); r &= __ucl_assert(k < 1+n); p1 = (ucl_bytep) &x[1+n]; r &= __ucl_assert(PTR_LT(p0,p1)); /* now check that aligned memory access doesn't core dump */ if (r == 1) { /* Expect 2 compiler warnings on architectures that * do not allow unaligned access. */ ucl_uint32 v0 = * (ucl_uint32 *) &x[k]; ucl_uint32 v1 = * (ucl_uint32 *) &x[k+n]; r &= __ucl_assert(v0 > 0); r &= __ucl_assert(v1 > 0); } } return r; }
static unsigned long align_test(lzo_bytep block, lzo_uint len, lzo_uint step) { lzo_bytep b1 = block; lzo_bytep b2 = block; lzo_bytep k1 = NULL; lzo_bytep k2 = NULL; lzo_bytep k; lzo_bytep x; lzo_uint offset = 0; unsigned long i = 0; assert(step > 0); assert(step <= 65536L); assert((step & (step - 1)) == 0); for (offset = step; offset < len; offset += step) { k1 = LZO_PTR_ALIGN_UP(b1+1,step); k2 = b2 + offset; if (k1 != k2) { printf("error 1: i %lu step %ld offset %ld: " "%p (%ld) %p (%ld)\n", i, (long) step, (long) offset, k1, (long) (k1 - block), k2, (long) (k2 - block)); return 0; } if (k1 - step != b1) { printf("error 2: i %lu step %ld offset %ld: " "%p (%ld) %p (%ld)\n", i, (long) step, (long) offset, b1, (long) (b1 - block), k1, (long) (k1 - block)); return 0; } assert(k1 > b1); assert(k2 > b2); assert((lzo_uint)(k2 - b2) == offset); assert(k1 - offset == b2); #if defined(PTR_ALIGNED_4) if (step == 4) { assert(PTR_ALIGNED_4(k1)); assert(PTR_ALIGNED_4(k2)); assert(PTR_ALIGNED2_4(k1,k2)); } #endif #if defined(PTR_ALIGNED_8) if (step == 8) { assert(PTR_ALIGNED_8(k1)); assert(PTR_ALIGNED_8(k2)); assert(PTR_ALIGNED2_8(k1,k2)); } #endif #if defined(PTR_LINEAR) assert((PTR_LINEAR(k1) & (step-1)) == 0); assert((PTR_LINEAR(k2) & (step-1)) == 0); #endif for (k = b1 + 1; k <= k1; k++) { x = LZO_PTR_ALIGN_UP(k,step); if (x != k1) { printf("error 3: base: %p %p %p i %lu step %ld offset %ld: " "%p (%ld) %p (%ld) %p (%ld)\n", block, b1, b2, i, (long) step, (long) offset, k1, (long) (k1 - block), k, (long) (k - block), x, (long) (x - block)); return 0; } } b1 = k1; i++; } return i; }