int memcmp(const void *m1, const void *m2, __kernel_size_t n) { const u8 *s1 = (u8 *)m1; const u8 *s2 = (u8 *)m2; const u64 *a1, *a2; /* If the size is too small or either pointer is unaligned then we punt * to the byte compare loop. Hopefully this will not turn up in inner * loops. */ CVMX_PREFETCH0(m1); CVMX_PREFETCH0(m2); if (!TOO_SMALL(n) && !UNALIGNED(s1, s2)) { /* Otherwise, load and compare the blocks of memory to use * one word at a time. */ a1 = (u64 *)s1; a2 = (u64 *)s2; while (n >= LBLOCKSIZE) { if (*a1 != *a2) break; a1++; a2++; n -= LBLOCKSIZE; } /* check m mod LBLOCKSIZE remaining characters */ s1 = (u8 *)a1; s2 = (u8 *)a2; } else if (!TOO_SMALL(n) && UNALIGNED(s1, s2)) { u64 t1, t2; while (n >= LBLOCKSIZE) { CVMX_LOADUNA_INT64(t1, s1, 0); CVMX_LOADUNA_INT64(t2, s2, 0); if (t1 != t2) break; s1 += LBLOCKSIZE; s2 += LBLOCKSIZE; n-= LBLOCKSIZE; } } /* Check m mod LBLOCKSIZE remaining characters */ while (n--) { if (*s1 != *s2) return *s1 - *s2; s1++; s2++; } return 0; }
/* Detect whether the character used to fill mask is in X */ #define DETECTCHAR(X,MASK) (DETECTNULL(X^MASK)) #endif /* OPTIMIZED_FOR_SIZE */ static char * strchr (const char *s, int c) { char pc = (char) c; #ifndef OPTIMIZED_FOR_SIZE const unsigned long *ps; unsigned long mask = 0; size_t i; /* Special case for finding \0. */ if (c == '\0') { while (UNALIGNED (s)) { if (*s == '\0') return (char *) s; ++s; } /* Operate a word at a time. */ ps = (const unsigned long *) s; while (!DETECTNULL (*ps)) ++ps; /* Found the end of the string. */ s = (const char *) ps; while (*s != '\0') ++s; return (char *) s; } /* All other bytes. Align the pointer, then search a long at a time. */ while (UNALIGNED (s)) { if (*s == '\0' || *s == pc) return *s ? (char *) s : 0; ++s; } ps = (const unsigned long *) s; for (i = 0; i < sizeof (unsigned long); ++i) mask = ((mask << CHAR_BIT) + ((unsigned char) pc & ~(~0 << CHAR_BIT))); /* Move ps a block at a time. */ while (!DETECTNULL (*ps) && !DETECTCHAR (*ps, mask)) ++ps; /* Pick up any residual with a byte searcher. */ s = (const char *) ps; #endif /* OPTIMIZED_FOR_SIZE */ /* The normal byte-search loop. */ while (*s && *s != pc) ++s; return *s == pc ? (char *) s : 0; }
/* Detect whether the character used to fill mask is in X */ #define DETECTCHAR(X,MASK) (DETECTNULL(X^MASK)) #endif /* OPTIMIZED_FOR_SIZE */ static char * strchr (const char *s, int c) { char pc = (char) c; #ifndef OPTIMIZED_FOR_SIZE unsigned long *ps, mask = 0; size_t i; /* If s is unaligned, punt into the byte search loop. This should be rare. */ if (!UNALIGNED (s)) { ps = (unsigned long *) s; for (i = 0; i < sizeof (unsigned long); i++) mask = ((mask << CHAR_BIT) + ((unsigned char) pc & ~(~0 << CHAR_BIT))); /* Move ps a block at a time. */ while (!DETECTNULL (*ps) && !DETECTCHAR (*ps, mask)) ps++; /* Pick up any residual with a byte searcher. */ s = (char *) ps; } #endif /* The normal byte-search loop. */ while (*s && *s != pc) s++; return *s == pc ? (char *) s : 0; }
int strcmp(const char* str1, const char* str2) { unsigned long *a1; unsigned long *a2; /* If s1 or s2 are unaligned, then compare bytes. */ if (!UNALIGNED (str1, str2)) { /* If s1 and s2 are word-aligned, compare them a word at a time. */ a1 = (unsigned long*)str1; a2 = (unsigned long*)str2; while (*a1 == *a2) { /* To get here, *a1 == *a2, thus if we find a null in *a1, then the strings must be equal, so return zero. */ if (DETECTNULL (*a1)) return 0; a1++; a2++; } /* A difference was detected in last few bytes of s1, so search bytewise */ str1 = (char*)a1; str2 = (char*)a2; } while (*str1 != '\0' && *str1 == *str2) { str1++; str2++; } return (*(unsigned char *) str1) - (*(unsigned char *) str2); }
char* strcpy(char *to, const char *from) { char *dst = to; const char *src = from; long *aligned_dst; const long *aligned_src; /* If SRC or DEST is unaligned, then copy bytes. */ if (!UNALIGNED (src, dst)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* SRC and DEST are both "long int" aligned, try to do "long int" sized copies. */ while (!DETECTNULL(*aligned_src)) { *aligned_dst++ = *aligned_src++; } dst = (char*)aligned_dst; src = (char*)aligned_src; } while ((*dst++ = *src++)) ; return to; }
size_t strlen(const char *str) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) const char *start = str; while (*str) str++; return str - start; #else const char *start = str; unsigned long *aligned_addr; if (!UNALIGNED(str)) { /* If the string is word-aligned, we can check for the presence of a null in each word-sized block. */ aligned_addr = (unsigned long *)str; while (!DETECTNULL(*aligned_addr)) aligned_addr++; /* Once a null is detected, we check each byte in that block for a precise position of the null. */ str = (char *)aligned_addr; } while (*str) str++; return str - start; #endif /* not PREFER_SIZE_OVER_SPEED */ }
/** Compares two memory locations for a given amount of bytes. * * \param m1 First memory location * \param m2 Second memory location * \param length Length of the area in bytes * \return The difference between the values in the first different * position * \see \ref strcmp */ u32 memcmp(const void* m1, const void* m2, size_t length) { u32* a = (u32*) m1; u32* b = (u32*) m2; u8* ab = (u8*) a; u8* bb = (u8*) b; if(!UNALIGNED(m1, m2)) { while(length >= sizeof(u32)) { if(*a != *b) { break; } a++; b++; length -= sizeof(u32); } ab = (u8*) a; bb = (u8*) b; } while(length--) { if(*ab != *bb) { return *ab - *bb; ab++; bb++; } } return 0; }
char* MFString_CopyCat(char *pDest, const char *pSrc, const char *pSrc2) { #if !defined(PREFER_SPEED_OVER_SIZE) char *s = pDest; while(*pDest = *pSrc++) { ++pDest; } while(*pDest++ = *pSrc2++) { } return s; #else char *dst = dst0; _CONST char *src = src0; long *aligned_dst; _CONST long *aligned_src; /* If SRC or DEST is unaligned, then copy bytes. */ if (!UNALIGNED (src, dst)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* SRC and DEST are both "long int" aligned, try to do "long int" sized copies. */ while (!DETECTNULL(*aligned_src)) { *aligned_dst++ = *aligned_src++; } dst = (char*)aligned_dst; src = (char*)aligned_src; } while (*dst++ = *src++) ; return dst0; #endif /* not PREFER_SIZE_OVER_SPEED */ }
size_t strlen(const char *str) { const char *start = str; #if !defined(PREFER_SIZE_OVER_SPEED) unsigned long *aligned_addr; /* Align the pointer, so we can search a word at a time. */ while (UNALIGNED (str)) { if (!*str) return str - start; str++; } /* If the string is word-aligned, we can check for the presence of a null in each word-sized block. */ aligned_addr = (unsigned long *)str; while (!DETECTNULL (*aligned_addr)) aligned_addr++; /* Once a null is detected, we check each byte in that block for a precise position of the null. */ str = (char *) aligned_addr; #endif /* not PREFER_SIZE_OVER_SPEED */ while (*str) str++; return str - start; }
/** * This function will copy memory content from source address to destination * address. * * @param dst the address of destination memory * @param src the address of source memory * @param count the copied length * * @return the address of destination memory * */ void *rt_memcpy(void *dst, const void *src, rt_ubase_t count) { #ifdef RT_TINY_SIZE char *tmp = (char *)dst, *s = (char *)src; while (count--) *tmp++ = *s++; return dst; #else #define UNALIGNED(X, Y) \ (((rt_int32_t)X & (sizeof(rt_int32_t) - 1)) | ((rt_int32_t)Y & (sizeof(rt_int32_t) - 1))) #define BIGBLOCKSIZE (sizeof(rt_int32_t) << 2) #define LITTLEBLOCKSIZE (sizeof(rt_int32_t)) #define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE) char *dst_ptr = (char *)dst; char *src_ptr = (char *)src; rt_int32_t *aligned_dst; rt_int32_t *aligned_src; int len = count; /* If the size is small, or either SRC or DST is unaligned, then punt into the byte copy loop. This should be rare. */ if (!TOO_SMALL(len) && !UNALIGNED(src_ptr, dst_ptr)) { aligned_dst = (rt_int32_t *)dst_ptr; aligned_src = (rt_int32_t *)src_ptr; /* Copy 4X long words at a time if possible. */ while (len >= BIGBLOCKSIZE) { *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; len -= BIGBLOCKSIZE; } /* Copy one long word at a time if possible. */ while (len >= LITTLEBLOCKSIZE) { *aligned_dst++ = *aligned_src++; len -= LITTLEBLOCKSIZE; } /* Pick up any residual with a byte copier. */ dst_ptr = (char *)aligned_dst; src_ptr = (char *)aligned_src; } while (len--) *dst_ptr++ = *src_ptr++; return dst; #undef UNALIGNED #undef BIGBLOCKSIZE #undef LITTLEBLOCKSIZE #undef TOO_SMALL #endif }
void * memmove(void *dst_void, const void *src_void, size_t length) { unsigned char *dst = (unsigned char *)dst_void; const unsigned char *src = (const unsigned char *)src_void; long *aligned_dst; const long *aligned_src; size_t len = length; if (src < dst && dst < src + len) { /* Destructive overlap...have to copy backwards */ src += len; dst += len; while (len--) { *--dst = *--src; } } else { /* Use optimizing algorithm for a non-destructive copy to closely match memcpy. If the size is small or either SRC or DST is unaligned, then punt into the byte copy loop. This should be rare. */ if (!TOO_SMALL(len) && !UNALIGNED (src, dst)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* Copy 4X long words at a time if possible. */ while (len >= BIGBLOCKSIZE) { *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; len -= BIGBLOCKSIZE; } /* Copy one long word at a time if possible. */ while (len >= LITTLEBLOCKSIZE) { *aligned_dst++ = *aligned_src++; len -= LITTLEBLOCKSIZE; } /* Pick up any residual with a byte copier. */ dst = (unsigned char*)aligned_dst; src = (unsigned char*)aligned_src; } while (len--) { *dst++ = *src++; } } return dst_void; }
// ARC MOD BEGIN // Change newlib style function declaration to normal style. void *memset(void *m, int c, size_t n) // ARC MOD END { char *s = (char *) m; #if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) int i; unsigned long buffer; unsigned long *aligned_addr; unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an unsigned variable. */ while (UNALIGNED (s)) { if (n--) *s++ = (char) c; else return m; } if (!TOO_SMALL (n)) { /* If we get this far, we know that n is large and s is word-aligned. */ aligned_addr = (unsigned long *) s; /* Store D into each char sized location in BUFFER so that we can set large blocks quickly. */ buffer = (d << 8) | d; buffer |= (buffer << 16); for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) buffer = (buffer << i) | buffer; /* Unroll the loop. */ while (n >= LBLOCKSIZE*4) { *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; n -= 4*LBLOCKSIZE; } while (n >= LBLOCKSIZE) { *aligned_addr++ = buffer; n -= LBLOCKSIZE; } /* Pick up the remainder with a bytewise loop. */ s = (char*)aligned_addr; } #endif /* not PREFER_SIZE_OVER_SPEED */ while (n--) *s++ = (char) c; return m; }
void * memset(void * m, int c, size_t n) { char * s = (char *)m; /* If optmizing for speed */ #ifndef __OPTIMIZE_SIZE__ unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an * unsigned variable. */ while (UNALIGNED (s)) { if (n--) *s++ = (char)c; else return m; } if (!TOO_SMALL(n)) { unsigned long * aligned_addr; unsigned long buffer; /* If we get this far, we know that n is large and s is word-aligned. */ aligned_addr = (unsigned long *)s; /* Store D into each char sized location in BUFFER so that * we can set large blocks quickly. */ buffer = (d << 8) | d; buffer |= (buffer << 16); for (size_t i = 32; i < LBLOCKSIZE * 8; i <<= 1) buffer = (buffer << i) | buffer; /* Unroll the loop. */ while (n >= LBLOCKSIZE * 4) { *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; n -= 4 * LBLOCKSIZE; } while (n >= LBLOCKSIZE) { *aligned_addr++ = buffer; n -= LBLOCKSIZE; } /* Pick up the remainder with a bytewise loop. */ s = (char *)aligned_addr; } #endif while (n--) { *s++ = (char)c; } return m; }
int strncmp(const char *s1, const char *s2, size_t n) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) if (n == 0) return 0; while (n-- != 0 && *s1 == *s2) { if (n == 0 || *s1 == '\0') break; s1++; s2++; } return (*(unsigned char *) s1) - (*(unsigned char *) s2); #else unsigned long *a1; unsigned long *a2; if (n == 0) return 0; /* If s1 or s2 are unaligned, then compare bytes. */ if (!UNALIGNED (s1, s2)) { /* If s1 and s2 are word-aligned, compare them a word at a time. */ a1 = (unsigned long*)s1; a2 = (unsigned long*)s2; while (n >= sizeof (long) && *a1 == *a2) { n -= sizeof (long); /* If we've run out of bytes or hit a null, return zero since we already know *a1 == *a2. */ if (n == 0 || DETECTNULL (*a1)) return 0; a1++; a2++; } /* A difference was detected in last few bytes of s1, so search bytewise */ s1 = (char*)a1; s2 = (char*)a2; } while (n-- > 0 && *s1 == *s2) { /* If we've run out of bytes or hit a null, return zero since we already know *s1 == *s2. */ if (n == 0 || *s1 == '\0') return 0; s1++; s2++; } return (*(unsigned char *) s1) - (*(unsigned char *) s2); #endif /* not PREFER_SIZE_OVER_SPEED */ }
int memcmp(const void *m1, const void *m2, size_t n) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) unsigned char *s1 = (unsigned char *) m1; unsigned char *s2 = (unsigned char *) m2; while (n--) { if (*s1 != *s2) { return *s1 - *s2; } s1++; s2++; } return 0; #else unsigned char *s1 = (unsigned char *) m1; unsigned char *s2 = (unsigned char *) m2; unsigned long *a1; unsigned long *a2; /* If the size is too small, or either pointer is unaligned, then we punt to the byte compare loop. Hopefully this will not turn up in inner loops. */ if (!TOO_SMALL(n) && !UNALIGNED(s1,s2)) { /* Otherwise, load and compare the blocks of memory one word at a time. */ a1 = (unsigned long*) s1; a2 = (unsigned long*) s2; while (n >= LBLOCKSIZE) { if (*a1 != *a2) break; a1++; a2++; n -= LBLOCKSIZE; } /* check m mod LBLOCKSIZE remaining characters */ s1 = (unsigned char*)a1; s2 = (unsigned char*)a2; } while (n--) { if (*s1 != *s2) return *s1 - *s2; s1++; s2++; } return 0; #endif /* not PREFER_SIZE_OVER_SPEED */ }
/** * memchr - Find a character in an area of memory. * @s: The memory area * @c: The byte to search for * @n: The size of the area. * * returns the address of the first occurrence of @c, or %NULL * if @c is not found */ void *memchr(const void *s, int c, size_t n) { const unsigned char *src = (const unsigned char *) s; unsigned char d = c; #if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) unsigned long *asrc; unsigned long mask; int i; while (UNALIGNED(src)) { if (!n--) return NULL; if (*src == d) return (void *) src; src++; } if (!TOO_SMALL(n)) { /* If we get this far, we know that length is large and src is word-aligned. */ /* The fast code reads the source one word at a time and only performs the bytewise search on word-sized segments if they contain the search character, which is detected by XORing the word-sized segment with a word-sized block of the search character and then detecting for the presence of NUL in the result. */ asrc = (unsigned long *) src; mask = d << 8 | d; mask = mask << 16 | mask; for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) mask = (mask << i) | mask; while (n >= LBLOCKSIZE) { if (DETECTCHAR(*asrc, mask)) break; n -= LBLOCKSIZE; asrc++; } /* If there are fewer than LBLOCKSIZE characters left, then we resort to the bytewise loop. */ src = (unsigned char *) asrc; } #endif /* not PREFER_SIZE_OVER_SPEED */ while (n--) { if (*src == d) return (void *) src; src++; } return NULL; }
char* MFString_CopyN(char *pDest, const char *pSrc, int n) { #if !defined(PREFER_SPEED_OVER_SIZE) char *dscan; const char *sscan; dscan = pDest; sscan = pSrc; while(n > 0) { --n; if((*dscan++ = *sscan++) == '\0') break; } while(n-- > 0) *dscan++ = '\0'; return pDest; #else char *dst = dst0; _CONST char *src = src0; long *aligned_dst; _CONST long *aligned_src; /* If SRC and DEST is aligned and count large enough, then copy words. */ if(!UNALIGNED (src, dst) && !TOO_SMALL (count)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* SRC and DEST are both "long int" aligned, try to do "long int" sized copies. */ while(count >= sizeof (long int) && !DETECTNULL(*aligned_src)) { count -= sizeof (long int); *aligned_dst++ = *aligned_src++; } dst = (char*)aligned_dst; src = (char*)aligned_src; } while(count > 0) { --count; if((*dst++ = *src++) == '\0') break; } while(count-- > 0) *dst++ = '\0'; return dst0; #endif }
static void * memmove (void *s1, const void *s2, size_t n) { unsigned char *us1 = s1; const unsigned char *us2 = s2; #ifndef _OPTIMIZED_FOR_SIZE unsigned long *pdst, *psrc; #endif if (us2 < us1 && us1 < us2 + n) { /* Have to copy backwards. */ us1 += n; us2 += n; while (n--) *--us1 = *--us2; return s1; } #ifndef _OPTIMIZED_FOR_SIZE /* If the size is small, or either s1 or s2 is unaligned, punt into the byte copy loop. This should be rare. */ if (!TOO_SMALL (n) && !UNALIGNED (s2, s1)) { pdst = (unsigned long *) s1; psrc = (unsigned long *) s2; /* Copy a big block at a time if possible. */ while (n >= BIGBLOCKSIZE) { *pdst++ = *psrc++; *pdst++ = *psrc++; *pdst++ = *psrc++; *pdst++ = *psrc++; n -= BIGBLOCKSIZE; } /* Copy a little block at a time if possible. */ while (n >= LITTLEBLOCKSIZE) { *pdst++ = *psrc++; n -= LITTLEBLOCKSIZE; } /* Pick up any residual with a byte copier. */ us1 = (unsigned char *) pdst; us2 = (unsigned char *) psrc; } #endif /* The normal byte-copy loop. */ while (n--) *us1++ = *us2++; return s1; }
void *memcpy(void *dst0, const void *src0, size_t len0) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) char *dst = (char *) dst0; char *src = (char *) src0; _PTR save = dst0; while (len0--) { *dst++ = *src++; } return save; #else char *dst = dst0; const char *src = src0; long *aligned_dst; const long *aligned_src; /* If the size is small, or either SRC or DST is unaligned, then punt into the byte copy loop. This should be rare. */ if (!TOO_SMALL(len0) && !UNALIGNED (src, dst)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* Copy 4X long words at a time if possible. */ while (len0 >= BIGBLOCKSIZE) { *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; *aligned_dst++ = *aligned_src++; len0 -= BIGBLOCKSIZE; } /* Copy one long word at a time if possible. */ while (len0 >= LITTLEBLOCKSIZE) { *aligned_dst++ = *aligned_src++; len0 -= LITTLEBLOCKSIZE; } /* Pick up any residual with a byte copier. */ dst = (char*)aligned_dst; src = (char*)aligned_src; } while (len0--) *dst++ = *src++; return dst0; #endif /* not PREFER_SIZE_OVER_SPEED */ }
char *strncpy(char *dst0, const char *src0, size_t count) { #if defined(PREFER_SIZE_OVER_SPEED) char *dscan; const char *sscan; dscan = dst0; sscan = src0; while (count > 0) { --count; if ((*dscan++ = *sscan++) == '\0') break; } while (count-- > 0) *dscan++ = '\0'; return dst0; #else char *dst = dst0; const char *src = src0; long *aligned_dst; const long *aligned_src; /* If SRC and DEST is aligned and count large enough, then copy words. */ if (!UNALIGNED (src, dst) && !TOO_SMALL (count)) { aligned_dst = (long*)dst; aligned_src = (long*)src; /* SRC and DEST are both "long int" aligned, try to do "long int" sized copies. */ while (count >= sizeof (long int) && !DETECTNULL(*aligned_src)) { count -= sizeof (long int); *aligned_dst++ = *aligned_src++; } dst = (char*)aligned_dst; src = (char*)aligned_src; } while (count > 0) { --count; if ((*dst++ = *src++) == '\0') break; } while (count-- > 0) *dst++ = '\0'; return dst0; #endif /* not PREFER_SIZE_OVER_SPEED */ }
static void * memchr (const void *s, int c, size_t n) { const unsigned char *us; unsigned char uc = (unsigned char) c; #ifndef _OPTIMIZED_FOR_SIZE unsigned long *psrc; size_t i; unsigned long mask = 0, buffer = 0; #endif us = s; #ifndef _OPTIMIZED_FOR_SIZE /* If the size is small, or s is unaligned, punt into the bytewise loop. This should be rare. */ if (!TOO_SMALL (n) && !UNALIGNED (s)) { psrc = (unsigned long *) s; /* The fast code reads the data one word at a time and only performs the bytewise search on word-sized segments if they contain the search character, which is detected by XORing the word-sized segment with a word-sized block of the search character and then detecting the presence of a null character in the result. */ for (i = 0; i < LITTLEBLOCKSIZE; i++) mask = (mask << CHAR_BIT) + ((unsigned char) uc & ~(~0 << CHAR_BIT)); /* Check a block at a time if possible. */ while (n >= LITTLEBLOCKSIZE) { buffer = *psrc ^ mask; if (DETECTNULL (buffer)) break; /* found character, so go byte by byte from here */ n -= LITTLEBLOCKSIZE; psrc++; } /* Pick up any residual with a bytewise iterator. */ us = (unsigned char *) psrc; } #endif /* The normal bytewise loop. */ while (n--) { if (*us == uc) return (void *) us; us++; } return 0; }
void * memset(void *m, unsigned char c, size_t n) { unsigned char *s; unsigned long buffer; if (n == 0) return m; s = (unsigned char*) m; while (UNALIGNED (s)) { *s++ = c; if (--n == 0) return m; } if (! TOO_SMALL (n)) { /* If we get this far, we know that n is large and s is word-aligned. */ /* Store D into each char sized location in BUFFER so that we can set large blocks quickly. */ buffer = c; buffer |= (buffer << 8); buffer |= (buffer << 16); if (LBLOCKSIZE > 4) buffer |= (buffer << 16) << 16; while (n >= LBLOCKSIZE*4) { *((unsigned long*) s) = buffer; s += LBLOCKSIZE; *((unsigned long*) s) = buffer; s += LBLOCKSIZE; *((unsigned long*) s) = buffer; s += LBLOCKSIZE; *((unsigned long*) s) = buffer; s += LBLOCKSIZE; n -= 4*LBLOCKSIZE; } while (n >= LBLOCKSIZE) { *((unsigned long*) s) = buffer; s += LBLOCKSIZE; n -= LBLOCKSIZE; } } /* Pick up the remainder with a bytewise loop. */ while (n--) { *s++ = c; } return m; }
void *memset(void *dest, int c, size_t count) { char *s = (char *)dest; int i; unsigned long buffer; unsigned long *aligned_addr; unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an unsigned variable. */ while (UNALIGNED (s)) { if (count--) *s++ = (char) c; else return dest; } if (!TOO_SMALL (count)) { /* If we get this far, we know that n is large and s is word-aligned. */ aligned_addr = (unsigned long *) s; /* Store D into each char sized location in BUFFER so that we can set large blocks quickly. */ buffer = (d << 8) | d; buffer |= (buffer << 16); for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) buffer = (buffer << i) | buffer; /* Unroll the loop. */ while (count >= LBLOCKSIZE*4) { *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; count -= 4*LBLOCKSIZE; } while (count >= LBLOCKSIZE) { *aligned_addr++ = buffer; count -= LBLOCKSIZE; } /* Pick up the remainder with a bytewise loop. */ s = (char*)aligned_addr; } while (count--) *s++ = (char) c; return dest; }
small_int_t strnzcmp(const unsigned char *s1, const unsigned char *s2, size_t limit) { size_t n = limit; unsigned long *a1; unsigned long *a2; if (n == 0) return 0; /* If s1 or s2 are unaligned, then compare bytes. */ if (!UNALIGNED (s1, s2)) { /* If s1 and s2 are word-aligned, compare them a word at a time. */ a1 = (unsigned long*)s1; a2 = (unsigned long*)s2; while (n >= sizeof (long) && *a1 == *a2) { /* If we've run out of bytes or hit a null, return zero since we already know *a1 == *a2. */ if (n == 0 || DETECTNULL (*a1)) return 0; a1++; a2++; n -= sizeof (long); } /* A difference was detected in last few bytes of s1, so search bytewise */ s1 = (unsigned char*)a1; s2 = (unsigned char*)a2; } while (n > 0) { if (__glibc_unlikely(*s1 != *s2)){ if ((*s1 == '\0') || (*s2 == '\0')) return limit-n; return n - limit; } /* If we've run out of bytes or hit a null, return zero since we already know *s1 == *s2. */ if (__glibc_unlikely(*s1 == '\0')) { return limit-n; } s1++; s2++; n--; } return limit; }
void memset16(void *dst, int val, size_t len) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) unsigned short *p = (unsigned short *)dst; while (len--) *p++ = val; #else unsigned short *p = (unsigned short *)dst; unsigned int i; unsigned long buffer; unsigned long *aligned_addr; if (!TOO_SMALL(len) && !UNALIGNED(dst)) { aligned_addr = (unsigned long *)dst; val &= 0xffff; if (LBLOCKSIZE == 2) { buffer = (val << 16) | val; } else { buffer = 0; for (i = 0; i < LBLOCKSIZE; i++) buffer = (buffer << 16) | val; } while (len >= LBLOCKSIZE*4) { *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; len -= 4*LBLOCKSIZE; } while (len >= LBLOCKSIZE) { *aligned_addr++ = buffer; len -= LBLOCKSIZE; } p = (unsigned short *)aligned_addr; } while (len--) *p++ = val; #endif /* not PREFER_SIZE_OVER_SPEED */ }
static void * memset (void *s, int c, size_t n) { unsigned char *us = s; unsigned char uc = (unsigned char) c; #ifndef _OPTIMIZED_FOR_SIZE unsigned long *ps; unsigned long mask = 0; size_t i; /* If the size is small, or s is unaligned, punt into the byte copy loop. This should be rare. */ if (!TOO_SMALL (n) && !UNALIGNED (s)) { ps = (unsigned long *) s; /* Store uc into mask at each location. */ for (i = 0; i < LITTLEBLOCKSIZE; i++) mask = ((mask << CHAR_BIT) + ((unsigned char) uc & ~(~0 << CHAR_BIT))); /* Copy a 4X block at a time if possible. */ while (n >= LITTLEBLOCKSIZE * 4) { *ps++ = mask; *ps++ = mask; *ps++ = mask; *ps++ = mask; n -= LITTLEBLOCKSIZE * 4; } /* Copy a block at a time if possible. */ while (n >= LITTLEBLOCKSIZE) { *ps++ = mask; n -= LITTLEBLOCKSIZE; } /* Pick up any residual with a byte copier. */ us = (unsigned char *) ps; } #endif /* The normal byte-copy loop. */ while (n--) *us++ = uc; return s; }
void *memset(void *m, int c, size_t n) { char *s = (char *) m; int i; unsigned long buffer; unsigned long long *aligned_addr; unsigned int d = c & 0xff; /* To avoid sign extension, * copy C to an unsigned variable. */ if (!TOO_SMALL (n) && !UNALIGNED (m)) { /* If we get this far, we know that n is large and m is word-aligned. */ aligned_addr = (unsigned long long*)m; /* Store D into each char sized location in BUFFER so that * we can set large blocks quickly. */ if (LBLOCKSIZE == 4) { buffer = (d << 8) | d; buffer |= (buffer << 16); } else { buffer = 0; for (i = 0; i < LBLOCKSIZE; i++) buffer = (buffer << 8) | d; } while (n >= LBLOCKSIZE*4) { *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; *aligned_addr++ = buffer; n -= 4*LBLOCKSIZE; } while (n >= LBLOCKSIZE) { *aligned_addr++ = buffer; n -= LBLOCKSIZE; } /* Pick up the remainder with a bytewise loop. */ s = (char*)aligned_addr; } while (n--) { *s++ = (char)d; } return m; }
small_int_t strncmp(const unsigned char *s1, const unsigned char *s2, size_t n) { unsigned long *a1; unsigned long *a2; if (n == 0) return 0; /* If s1 or s2 are unaligned, then compare bytes. */ if (!UNALIGNED (s1, s2)) { /* If s1 and s2 are word-aligned, compare them a word at a time. */ a1 = (unsigned long*)s1; a2 = (unsigned long*)s2; while (n >= sizeof (long) && *a1 == *a2) { n -= sizeof (long); /* If we've run out of bytes or hit a null, return zero since we already know *a1 == *a2. */ if (n == 0 || DETECTNULL (*a1)) return 0; a1++; a2++; } /* A difference was detected in last few bytes of s1, so search bytewise */ s1 = (unsigned char*)a1; s2 = (unsigned char*)a2; } while (n-- > 0 && *s1 == *s2) { /* If we've run out of bytes or hit a null, return zero since we already know *s1 == *s2. */ if (n == 0 || *s1 == '\0') return 0; s1++; s2++; } return *s1 - *s2; }
int strcmp(const char *s1, const char *s2) { #if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } return (*(unsigned char *)s1) - (*(unsigned char *)s2); #else unsigned long *a1; unsigned long *a2; /* If s1 or s2 are unaligned, then compare bytes. */ if (!UNALIGNED(s1, s2)) { /* If s1 and s2 are word-aligned, compare them a word at a time. */ a1 = (unsigned long *)s1; a2 = (unsigned long *)s2; while (*a1 == *a2) { /* To get here, *a1 == *a2, thus if we find a null in *a1, then the strings must be equal, so return zero. */ if (DETECTNULL(*a1)) return 0; a1++; a2++; } /* A difference was detected in last few bytes of s1, so search bytewise */ s1 = (char *)a1; s2 = (char *)a2; } while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } return (*(unsigned char *)s1) - (*(unsigned char *)s2); #endif /* not PREFER_SIZE_OVER_SPEED */ }
int memcmp (const void *_s1, const void *_s2, size_t _n) { unsigned char *s1 = (unsigned char *) _s1; unsigned char *s2 = (unsigned char *) _s2; unsigned long *a1; unsigned long *a2; /* If the size is too small, or either pointer is unaligned, then we punt to the byte compare loop. Hopefully this will not turn up in inner loops. */ if (!TOO_SMALL(_n) && !UNALIGNED(s1,s2)) { /* Otherwise, load and compare the blocks of memory one word at a time. */ a1 = (unsigned long*) s1; a2 = (unsigned long*) s2; while (_n >= LBLOCKSIZE) { if (*a1 != *a2) break; a1++; a2++; _n -= LBLOCKSIZE; } /* check m mod LBLOCKSIZE remaining characters */ s1 = (unsigned char*)a1; s2 = (unsigned char*)a2; } while (_n--) { if (*s1 != *s2) return *s1 - *s2; s1++; s2++; } return 0; }