static inline void IncrementalCopyFastPath(const char *src, char *op, int len) { while (op - src < 8) { UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src)); len -= op - src; op += op - src; } while (len > 0) { UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src)); src += 8; op += 8; len -= 8; } }
/* * This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64 * on some platforms, in particular ARM. */ static inline void unaligned_copy64(const void *src, void *dst) { if (sizeof(void *) == 8) { UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src)); } else { const char *src_char = (const char *)(src); char *dst_char = (char *)(dst); UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char)); UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4)); } }