main (void) { TYPE a[N] = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 }; __builtin_memset (a + 32, 43, (N - 32) * sizeof (TYPE)); TYPE ret = condition_reduction (a, 16); if (ret != 10) __builtin_abort (); return 0; }
struct __go_open_array __go_string_to_byte_array (String str) { uintptr cap; unsigned char *data; struct __go_open_array ret; cap = runtime_roundupsize (str.len); data = (unsigned char *) runtime_malloc (cap); __builtin_memcpy (data, str.str, str.len); if (cap != (uintptr) str.len) __builtin_memset (data + str.len, 0, cap - (uintptr) str.len); ret.__values = (void *) data; ret.__count = str.len; ret.__capacity = (intgo) cap; return ret; }
static xChunk &UnshiftPooled(List<xChunk> &lstDst){ typename List<xChunk>::Node *pNode; { const auto vLock = s_vPoolMutex.GetLock(); if(!s_lstPool.IsEmpty()){ pNode = s_lstPool.GetFirst(); lstDst.Splice(lstDst.GetFirst(), s_lstPool, s_lstPool.GetFirst()); goto jDone; } } pNode = lstDst.Unshift(); jDone: #ifndef NDEBUG __builtin_memset(&pNode->Get(), 0xCC, sizeof(xChunk)); #endif return pNode->Get(); }
struct __go_open_array __go_string_to_int_array (String str) { size_t c; const unsigned char *p; const unsigned char *pend; uintptr mem; uint32_t *data; uint32_t *pd; struct __go_open_array ret; c = 0; p = str.str; pend = p + str.len; while (p < pend) { int rune; ++c; p += __go_get_rune (p, pend - p, &rune); } if (c > MaxMem / sizeof (uint32_t)) runtime_throw ("out of memory"); mem = runtime_roundupsize (c * sizeof (uint32_t)); data = (uint32_t *) runtime_mallocgc (mem, 0, FlagNoScan | FlagNoZero); p = str.str; pd = data; while (p < pend) { int rune; p += __go_get_rune (p, pend - p, &rune); *pd++ = rune; } if (mem > (uintptr) c * sizeof (uint32_t)) __builtin_memset (data + c, 0, mem - (uintptr) c * sizeof (uint32_t)); ret.__values = (void *) data; ret.__count = c; ret.__capacity = (intgo) (mem / sizeof (uint32_t)); return ret; }
static inline HOT OPTIMIZE3 void stress_memthrash_random_chunk(const size_t chunk_size, size_t mem_size) { uint32_t i; const uint32_t max = mwc16(); size_t chunks = mem_size / chunk_size; if (chunks < 1) chunks = 1; for (i = 0; !thread_terminate && (i < max); i++) { const size_t chunk = mwc32() % chunks; const size_t offset = chunk * chunk_size; #if defined(__GNUC__) (void)__builtin_memset((void *)mem + offset, mwc8(), chunk_size); #else (void)memset((void *)mem + offset, mwc8(), chunk_size); #endif } }
struct __go_map * __go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) { struct __go_map *ret; if ((uintptr_t) (int) entries != entries) __go_panic_msg ("map size out of range"); if (entries == 0) entries = 5; else entries = __go_map_next_prime (entries); ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); ret->__descriptor = descriptor; ret->__element_count = 0; ret->__bucket_count = entries; ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); __builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); return ret; }
int main (void) { struct E e = {.row = 5,.col = 0,.defaults = {6, {-1, -1, -1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0}} }; struct C c[4]; struct A a = { c, 4 }; struct B b = { &a, 1 }; struct D d; __builtin_memset (&c, 0, sizeof c); foo (&e, 65, 2, &b); d = e.defaults.attr; d.columns = 2; if (__builtin_memcmp (&d, &c[0].attr, sizeof d)) __builtin_abort (); d.fragment = 1; if (__builtin_memcmp (&d, &c[1].attr, sizeof d)) __builtin_abort (); return 0; }
__attribute__((noinline, noclone)) static int test1 (int i) { char foo[4] = {}; /*__builtin___asan_report_store1 called 1 time here to instrument the initialization. */ foo[i] = 1; /* Instrument tab memory region. */ __builtin_memset (tab, 3, sizeof (tab)); /* Instrument tab[1] with access size 3. */ __builtin_memcpy (&tab[1], foo + i, 3); /* This should not generate a __builtin___asan_report_load1 because the reference to tab[1] has been already instrumented above. */ return tab[1]; /* So for these functions, there should be 3 calls to __builtin___asan_report_store1. */ }
void f(void *d, const void *s, __SIZE_TYPE__ n) { void *t1 = __builtin_memcpy (d, s, n); if (t1 == 0) __builtin_abort (); void *t2 = __builtin_memmove (d, s, n); if (t2 == 0) __builtin_abort (); void *t3 = __builtin_memset (d, 0, n); if (t3 == 0) __builtin_abort (); void *t4 = __builtin_strcpy (d, s); if (t4 == 0) __builtin_abort (); void *t5 = __builtin_strncpy (d, s, n); if (t5 == 0) __builtin_abort (); void *t6 = __builtin_strcat (d, s); if (t6 == 0) __builtin_abort (); void *t7 = __builtin_strncat (d, s, n); if (t7 == 0) __builtin_abort (); void *t8 = __builtin_stpcpy (d, s); if (t8 == 0) __builtin_abort (); void *t9 = __builtin_stpncpy (d, s, n); if (t9 == 0) __builtin_abort (); }
struct __go_map * __go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) { int32 ientries; struct __go_map *ret; /* The master library limits map entries to int32, so we do too. */ ientries = (int32) entries; if (ientries < 0 || (uintptr_t) ientries != entries) runtime_panicstring ("map size out of range"); if (entries == 0) entries = 5; else entries = __go_map_next_prime (entries); ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); ret->__descriptor = descriptor; ret->__element_count = 0; ret->__bucket_count = entries; ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); __builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); return ret; }
//void _RSHashSHA2( RSCUBuffer input, RSUInteger ilen, RSUBlock output[64], BOOL is384 ); RSExport BOOL RSBaseHash(RSHashSelectorID selector, const void* hash, RSUInteger size, RSHashCode* hashCodeExt, RSUInteger codeSize) { if (hashCodeExt == nil) return NO; __builtin_memset(hashCodeExt, 0, codeSize); RSHashCode hashCode = 0x0; RSUBlock output[64] = {0}; switch (selector) { case RSDefaultHash: case RSBlizzardHash: if (codeSize < sizeof(RSHashCode)) return NO; hashCode = __RSBaseHashBlizzardHash(hash, size, 0); *hashCodeExt = hashCode; break; case RSSHA2Hash: if (codeSize < sizeof(RSUBlock)*64) return NO; _RSHashSHA2(hash, size, output, YES); __builtin_memcpy(hashCodeExt, output, sizeof(RSUBlock)*64); break; default: break; } return YES; }
/* 2x mvc */ void * foo (char *a, int c, long len) { return __builtin_memset (a, c, len); }
void h(){ char*p=__builtin_malloc(42); g=__builtin_memset(p,3,10); __builtin_free(p); }
void f(){ char*p=__builtin_malloc(42); __builtin_memset(p,3,10); __builtin_memset(p,7,33); }
char*i(){ char*p=__builtin_malloc(42); __builtin_memset(p,3,10); __builtin_memset(p,7,33); return p; }
void c_cast_void (NonTrivial *p) { __builtin_memset ((void*)p, 0, sizeof *p); // { dg-bogus "\\\[-Wclass-memaccess]" } }
void cast_void (NonTrivial *p) { __builtin_memset (reinterpret_cast<char*>(p), 0, sizeof *p); }
void*f(){ char*p=__builtin_malloc(42); __builtin_memset(p,0,42); __builtin_memset(p,0,42); return p; };
void ulinux_memset(ulinux_u8 *d,ulinux_u8 c,ulinux_u64 len) { __builtin_memset(d,c,(size_t)len);/*use gcc builtin for this arch*/ }
void t (void) { __builtin_memset (a, 1, 2048); }
G* runtime_netpoll(bool block) { fd_set *prfds, *pwfds, *pefds, *ptfds; bool allocatedfds; struct timeval timeout; struct timeval *pt; int max, c, i; G *gp; int32 mode; byte b; struct stat st; retry: runtime_lock(&selectlock); max = allocated; if(max == 0) { runtime_unlock(&selectlock); return nil; } if(inuse) { prfds = runtime_SysAlloc(4 * sizeof fds, &mstats.other_sys); pwfds = prfds + 1; pefds = pwfds + 1; ptfds = pefds + 1; allocatedfds = true; } else { prfds = &grfds; pwfds = &gwfds; pefds = &gefds; ptfds = >fds; inuse = true; allocatedfds = false; } __builtin_memcpy(prfds, &fds, sizeof fds); runtime_unlock(&selectlock); __builtin_memcpy(pwfds, prfds, sizeof fds); FD_CLR(rdwake, pwfds); __builtin_memcpy(pefds, pwfds, sizeof fds); __builtin_memcpy(ptfds, pwfds, sizeof fds); __builtin_memset(&timeout, 0, sizeof timeout); pt = &timeout; if(block) pt = nil; c = select(max, prfds, pwfds, pefds, pt); if(c < 0) { if(errno == EBADF) { // Some file descriptor has been closed. // Check each one, and treat each closed // descriptor as ready for read/write. c = 0; FD_ZERO(prfds); FD_ZERO(pwfds); FD_ZERO(pefds); for(i = 0; i < max; i++) { if(FD_ISSET(i, ptfds) && fstat(i, &st) < 0 && errno == EBADF) { FD_SET(i, prfds); FD_SET(i, pwfds); c += 2; } } } else { if(errno != EINTR) runtime_printf("runtime: select failed with %d\n", errno); goto retry; } } gp = nil; for(i = 0; i < max && c > 0; i++) { mode = 0; if(FD_ISSET(i, prfds)) { mode += 'r'; --c; } if(FD_ISSET(i, pwfds)) { mode += 'w'; --c; } if(FD_ISSET(i, pefds)) { mode = 'r' + 'w'; --c; } if(i == rdwake) { while(read(rdwake, &b, sizeof b) > 0) ; continue; } if(mode) { PollDesc *pd; runtime_lock(&selectlock); pd = data[i]; runtime_unlock(&selectlock); if(pd != nil) runtime_netpollready(&gp, pd, mode); } } if(block && gp == nil) goto retry; if(allocatedfds) { runtime_SysFree(prfds, 4 * sizeof fds, &mstats.other_sys); } else { runtime_lock(&selectlock); inuse = false; runtime_unlock(&selectlock); } return gp; }
void blkzero(struct blkbuf *buf) { __builtin_memset(buf->__bf_data, 0, BLKSIZE); }
void p0(char *b) { __builtin_memset (b, 0, 400); }
void cast_const_volatile (const volatile S *p) { __builtin_memset (const_cast<S*>(p), 0, sizeof *p); }
// A C cast to void* suppresses the warning because it casts away // the qualifiers from the otherwise trivial pointed-to type.. void c_void_cast_const_volatile (const volatile S *p) { __builtin_memset ((void*)p, 0, sizeof *p); }
__attribute__((noinline, noclone)) void * foo (void *p, unsigned int q) { return __builtin_memset (p, 0, q * 4UL); }
// A C cast to a character (or any trivial) type suppresses the warning. void c_cast_uchar (NonTrivial *p) { __builtin_memset ((unsigned char*)p, 0, sizeof *p); }
test(void *a) { __builtin_memset (a,0,n); }
void cast_const (const S *p) { __builtin_memset (const_cast<S*>(p), 0, sizeof *p); }
void memclr (void *p1, uintptr len) { __builtin_memset (p1, 0, len); }