size_t __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, size_t max) { size_t i = 0; bool use_neon = true; #ifdef __ARM_NEON__ # define __memcpy_neon memcpy #else use_neon = (GLRO(dl_hwcap) & HWCAP_ARM_NEON) != 0; #endif #ifndef __ARM_NEON__ bool use_vfp = true; # ifdef __SOFTFP__ use_vfp = (GLRO(dl_hwcap) & HWCAP_ARM_VFP) != 0; # endif #endif IFUNC_IMPL (i, name, memcpy, IFUNC_IMPL_ADD (array, i, memcpy, use_neon, __memcpy_neon) #ifndef __ARM_NEON__ IFUNC_IMPL_ADD (array, i, memcpy, use_vfp, __memcpy_vfp) #endif IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_arm)); return i; }
int internal_function _dl_make_stack_executable (void **stack_endp) { /* This gives us the highest/lowest page that needs to be changed. */ uintptr_t page = ((uintptr_t) *stack_endp & -(intptr_t) GLRO(dl_pagesize)); int result = 0; /* Challenge the caller. */ if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso|allow_libpthread) != 0, 0) || __builtin_expect (*stack_endp != __libc_stack_end, 0)) return EPERM; if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize), __stack_prot) == 0, 1)) goto return_success; result = errno; goto out; return_success: /* Clear the address. */ *stack_endp = NULL; /* Remember that we changed the permission. */ GL(dl_stack_flags) |= PF_X; out: #ifdef check_consistency check_consistency (); #endif return result; }
static inline void frob_brk (void) { __brk (0); /* Initialize the break. */ #if ! __ASSUME_BRK_PAGE_ROUNDED /* If the dynamic linker was executed as a program, then the break may start immediately after our data segment. However, dl-minimal.c has already stolen the remainder of the page for internal allocations. If we don't adjust the break location recorded by the kernel, the normal program startup will inquire, find the value at our &_end, and start allocating its own data there, clobbering dynamic linker data structures allocated there during startup. Later Linux kernels have changed this behavior so that the initial break value is rounded up to the page boundary before we start. */ extern char *__curbrk attribute_hidden; extern char _end[] attribute_hidden; char *const endpage = (void *) 0 + (((__curbrk - (char *) 0) + GLRO(dl_pagesize) - 1) & -GLRO(dl_pagesize)); if (__builtin_expect (__curbrk >= _end && __curbrk < endpage, 0)) __brk (endpage); #endif }
static void _dl_unprotect_relro (struct link_map *l) { ElfW(Addr) start = ((l->l_addr + l->l_relro_addr) & ~(GLRO(dl_pagesize) - 1)); ElfW(Addr) end = ((l->l_addr + l->l_relro_addr + l->l_relro_size) & ~(GLRO(dl_pagesize) - 1)); if (start != end) __mprotect ((void *) start, end - start, PROT_READ | PROT_WRITE); }
void _dl_var_init (void *array[]) { /* It has to match "variables" below. */ enum { DL_PAGESIZE = 0, DL_CLKTCK }; GLRO(dl_pagesize) = *((size_t *) array[DL_PAGESIZE]); GLRO(dl_clktck) = *((int *) array[DL_CLKTCK]); }
void __assert_fail_base (const char *fmt, const char *assertion, const char *file, unsigned int line, const char *function) { char *str; #ifdef FATAL_PREPARE FATAL_PREPARE; #endif int total; if (__asprintf (&str, fmt, __progname, __progname[0] ? ": " : "", file, line, function ? function : "", function ? ": " : "", assertion, &total) >= 0) { /* Print the message. */ (void) __fxprintf (NULL, "%s", str); (void) fflush (stderr); total = (total + 1 + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1); struct abort_msg_s *buf = __mmap (NULL, total, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); if (__builtin_expect (buf != MAP_FAILED, 1)) { buf->size = total; strcpy (buf->msg, str); /* We have to free the old buffer since the application might catch the SIGABRT signal. */ struct abort_msg_s *old = atomic_exchange_acq (&__abort_msg, buf); if (old != NULL) __munmap (old, old->size); } free (str); } else { /* At least print a minimal message. */ static const char errstr[] = "Unexpected error.\n"; __libc_write (STDERR_FILENO, errstr, sizeof (errstr) - 1); } abort (); }
static int grow_heap (heap_info *h, long diff) { size_t pagesize = GLRO (dl_pagesize); long new_size; diff = ALIGN_UP (diff, pagesize); new_size = (long) h->size + diff; if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) return -1; if ((unsigned long) new_size > h->mprotect_size) { if (__mprotect ((char *) h + h->mprotect_size, (unsigned long) new_size - h->mprotect_size, PROT_READ | PROT_WRITE) != 0) return -2; h->mprotect_size = new_size; } h->size = new_size; LIBC_PROBE (memory_heap_more, 2, h, h->size); return 0; }
int __fesetenv (const fenv_t *envp) { if (GLRO (dl_hwcap) & HWCAP_VFP) { unsigned int temp; _FPU_GETCW (temp); temp &= _FPU_RESERVED; if (envp == FE_DFL_ENV) temp |= _FPU_DEFAULT; else if (envp == FE_NOMASK_ENV) temp |= _FPU_IEEE; else temp |= envp->__cw & ~_FPU_RESERVED; _FPU_SETCW (temp); /* Success. */ return 0; } /* Unsupported, so fail. */ return 1; }
int fesetround (int round) { if (GLRO (dl_hwcap) & HWCAP_ARM_VFP) { fpu_control_t temp; switch (round) { case FE_TONEAREST: case FE_UPWARD: case FE_DOWNWARD: case FE_TOWARDZERO: _FPU_GETCW (temp); temp = (temp & ~FE_TOWARDZERO) | round; _FPU_SETCW (temp); return 0; default: return 1; } } else if (round == FE_TONEAREST) /* This is the only supported rounding mode for soft-fp. */ return 0; /* Unsupported, so fail. */ return 1; }
int feholdexcept (fenv_t *envp) { if (GLRO (dl_hwcap) & HWCAP_VFP) { unsigned long int temp; /* Store the environment. */ _FPU_GETCW(temp); envp->__cw = temp; /* Now set all exceptions to non-stop. */ temp &= ~(FE_ALL_EXCEPT << FE_EXCEPT_SHIFT); /* And clear all exception flags. */ temp &= ~FE_ALL_EXCEPT; _FPU_SETCW(temp); return 0; } /* Unsupported, so fail. */ return 1; }
static int grow_heap (heap_info *h, long diff) { size_t page_mask = GLRO (dl_pagesize) - 1; long new_size; diff = (diff + page_mask) & ~page_mask; new_size = (long) h->size + diff; if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE) return -1; if ((unsigned long) new_size > h->mprotect_size) { if (mprotect ((char *) h + h->mprotect_size, (unsigned long) new_size - h->mprotect_size, PROT_READ | PROT_WRITE) != 0) return -2; h->mprotect_size = new_size; } h->size = new_size; /* LIBC_PROBE (memory_heap_more, 2, h, h->size); */ return 0; }
const char * _dl_get_origin (void) { char linkval[PATH_MAX]; char *result; int len; INTERNAL_SYSCALL_DECL (err); len = INTERNAL_SYSCALL (readlinkat, err, 4, AT_FDCWD, "/proc/self/exe", linkval, sizeof (linkval)); if (! INTERNAL_SYSCALL_ERROR_P (len, err) && len > 0 && linkval[0] != '[') { /* We can use this value. */ assert (linkval[0] == '/'); while (len > 1 && linkval[len - 1] != '/') --len; result = (char *) malloc (len + 1); if (result == NULL) result = (char *) -1; else if (len == 1) memcpy (result, "/", 2); else *((char *) __mempcpy (result, linkval, len - 1)) = '\0'; } else { result = (char *) -1; /* We use the environment variable LD_ORIGIN_PATH. If it is set make a copy and strip out trailing slashes. */ if (GLRO(dl_origin_path) != NULL) { size_t len = strlen (GLRO(dl_origin_path)); result = (char *) malloc (len + 1); if (result == NULL) result = (char *) -1; else { char *cp = __mempcpy (result, GLRO(dl_origin_path), len); while (cp > result + 1 && cp[-1] == '/') --cp; *cp = '\0'; } } } return result; }
size_t __pthread_get_minstack (const pthread_attr_t *attr) { struct pthread_attr *iattr = (struct pthread_attr *) attr; return (GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN + iattr->guardsize); }
unsigned long int __getauxval (unsigned long int type) { ElfW(auxv_t) *p; if (type == AT_HWCAP) return GLRO(dl_hwcap); else if (type == AT_HWCAP2) return GLRO(dl_hwcap2); for (p = GLRO(dl_auxv); p->a_type != AT_NULL; p++) if (p->a_type == type) return p->a_un.a_val; __set_errno (ENOENT); return 0; }
void attribute_hidden _init (int argc, char **argv, char **envp) { #endif #ifdef USE_NONOPTION_FLAGS extern void __getopt_clean_environment (char **); #endif __libc_multiple_libcs = &_dl_starting_up && !_dl_starting_up; /* Make sure we don't initialize twice. */ if (!__libc_multiple_libcs) { /* Set the FPU control word to the proper default value if the kernel would use a different value. (In a static program we don't have this information.) */ #ifdef SHARED if (__fpu_control != GLRO(dl_fpu_control)) #endif __setfpucw (__fpu_control); } /* Save the command-line arguments. */ __libc_argc = argc; __libc_argv = argv; __environ = envp; #ifndef SHARED // !BAM // __libc_init_secure (); /* First the initialization which normally would be done by the dynamic linker. */ // !BAM // _dl_non_dynamic_init (); #endif #ifdef VDSO_SETUP VDSO_SETUP (); #endif __init_misc (argc, argv, envp); #ifdef USE_NONOPTION_FLAGS /* This is a hack to make the special getopt in GNU libc working. */ __getopt_clean_environment (envp); #endif /* Initialize ctype data. */ __ctype_init (); #if defined SHARED && !defined NO_CTORS_DTORS_SECTIONS __libc_global_ctors (); #endif }
static int internal_function heap_trim(heap_info *heap, size_t pad) { mstate ar_ptr = heap->ar_ptr; unsigned long pagesz = GLRO(dl_pagesize); mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; heap_info *prev_heap; long new_size, top_size, extra, prev_size, misalign; /* Can this heap go away completely? */ while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { prev_heap = heap->prev; prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ); p = chunk_at_offset(prev_heap, prev_size); /* fencepost must be properly aligned. */ misalign = ((long) p) & MALLOC_ALIGN_MASK; p = chunk_at_offset(prev_heap, prev_size - misalign); assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ p = prev_chunk(p); new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign; assert(new_size>0 && new_size<(long)(2*MINSIZE)); if(!prev_inuse(p)) new_size += p->prev_size; assert(new_size>0 && new_size<HEAP_MAX_SIZE); if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) break; ar_ptr->system_mem -= heap->size; arena_mem -= heap->size; delete_heap(heap); heap = prev_heap; if(!prev_inuse(p)) { /* consolidate backward */ p = prev_chunk(p); unlink(p, bck, fwd); } assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); top(ar_ptr) = top_chunk = p; set_head(top_chunk, new_size | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ } top_size = chunksize(top_chunk); extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1); if(extra < (long)pagesz) return 0; /* Try to shrink. */ if(shrink_heap(heap, extra) != 0) return 0; ar_ptr->system_mem -= extra; arena_mem -= extra; /* Success. Adjust top accordingly. */ set_head(top_chunk, (top_size - extra) | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ return 1; }
static inline void __always_inline do_set_elision_enable (int32_t elision_enable) { /* Enable elision if it's avaliable in hardware. It's not necessary to check if __libc_enable_secure isn't enabled since elision_enable will be set according to the default, which is disabled. */ if (elision_enable == 1) __pthread_force_elision = (GLRO (dl_hwcap) & HWCAP_S390_TE) ? 1 : 0; }
/* Return the system page size. */ int __getpagesize () { if (GLRO(dl_pagesize) != 0) return GLRO(dl_pagesize); #ifdef EXEC_PAGESIZE return EXEC_PAGESIZE; #else /* No EXEC_PAGESIZE. */ #ifdef NBPG #ifndef CLSIZE #define CLSIZE 1 #endif /* No CLSIZE. */ return NBPG * CLSIZE; #else /* No NBPG. */ return NBPC; #endif /* NBPG. */ #endif /* EXEC_PAGESIZE. */ }
/* This function parses the HWCAP/HWCAP2 fields, adding the previous supported ISA bits, as well as converting the AT_PLATFORM string to a number. This data is stored in two global variables that can be used later by the powerpc-specific code to store it into the TCB. */ void __tcb_parse_hwcap_and_convert_at_platform (void) { uint64_t h1, h2; /* Read AT_PLATFORM string from auxv and convert it to a number. */ __tcb_platform = _dl_string_platform (GLRO (dl_platform)); /* Read HWCAP and HWCAP2 from auxv. */ h1 = GLRO (dl_hwcap); h2 = GLRO (dl_hwcap2); /* hwcap contains only the latest supported ISA, the code checks which is and fills the previous supported ones. */ if (h2 & PPC_FEATURE2_ARCH_2_07) h1 |= PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4; else if (h1 & PPC_FEATURE_ARCH_2_06) h1 |= PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4; else if (h1 & PPC_FEATURE_ARCH_2_05) h1 |= PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4; else if (h1 & PPC_FEATURE_POWER5_PLUS) h1 |= PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4; else if (h1 & PPC_FEATURE_POWER5) h1 |= PPC_FEATURE_POWER4; /* Consolidate both HWCAP and HWCAP2 into a single doubleword so that we can read both in a single load later. */ __tcb_hwcap = h2; __tcb_hwcap = (h1 << 32) | __tcb_hwcap; }
/* Return the system page size. */ int __getpagesize (void) { #ifdef __NR_getpagesize int result; #endif if (GLRO(dl_pagesize) != 0) return GLRO(dl_pagesize); #ifdef __NR_getpagesize INTERNAL_SYSCALL_DECL (err); result = INTERNAL_SYSCALL (getpagesize, err, 0); /* The only possible error is ENOSYS. */ if (!INTERNAL_SYSCALL_ERROR_P (result, err)) return result; #endif return 4096; }
void _dl_var_init (void *array[]) { /* It has to match "variables" below. */ enum { DL_PAGESIZE = 0 }; GLRO(dl_pagesize) = *((size_t *) array[DL_PAGESIZE]); }
unsigned long int __getauxval (unsigned long int type) { #ifdef HAVE_AUX_VECTOR ElfW(auxv_t) *p; #endif if (type == AT_HWCAP) return GLRO(dl_hwcap); else if (type == AT_HWCAP2) return GLRO(dl_hwcap2); #ifdef HAVE_AUX_VECTOR for (p = GLRO(dl_auxv); p->a_type != AT_NULL; p++) if (p->a_type == type) return p->a_un.a_val; #endif __set_errno (ENOENT); return 0; }
static mchunkptr internal_function mem2chunk_check(void* mem, unsigned char **magic_p) { mchunkptr p; INTERNAL_SIZE_T sz, c; unsigned char magic; if(!aligned_OK(mem)) return NULL; p = mem2chunk(mem); if (!chunk_is_mmapped(p)) { /* Must be a chunk in conventional heap memory. */ int contig = contiguous(&main_arena); sz = chunksize(p); if((contig && ((char*)p<mp_.sbrk_base || ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) || sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) || ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK || (contig && (char*)prev_chunk(p)<mp_.sbrk_base) || next_chunk(prev_chunk(p))!=p) )) return NULL; magic = MAGICBYTE(p); for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } else { unsigned long offset, page_mask = GLRO(dl_pagesize)-1; /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two alignment relative to the beginning of a page. Check this first. */ offset = (unsigned long)mem & page_mask; if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 && offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 && offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 && offset<0x2000) || !chunk_is_mmapped(p) || (p->size & PREV_INUSE) || ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) return NULL; magic = MAGICBYTE(p); for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; } } ((unsigned char*)p)[sz] ^= 0xFF; if (magic_p) *magic_p = (unsigned char *)p + sz; return p; }
const char * _dl_get_origin (void) { char *result = (char *) -1; /* We use the environment variable LD_ORIGIN_PATH. If it is set make a copy and strip out trailing slashes. */ if (GLRO(dl_origin_path) != NULL) { size_t len = strlen (GLRO(dl_origin_path)); result = (char *) malloc (len + 1); if (result == NULL) result = (char *) -1; else { char *cp = __mempcpy (result, GLRO(dl_origin_path), len); while (cp > result + 1 && cp[-1] == '/') --cp; *cp = '\0'; } } return result; }
int internal_function _dlerror_run (void (*operate) (void *), void *args) { struct dl_action_result *result; /* If we have not yet initialized the buffer do it now. */ __libc_once (once, init); /* Get error string and number. */ if (static_buf != NULL) result = static_buf; else { /* We don't use the static buffer and so we have a key. Use it to get the thread-specific buffer. */ result = __libc_getspecific (key); if (result == NULL) { result = (struct dl_action_result *) calloc (1, sizeof (*result)); if (result == NULL) /* We are out of memory. Since this is no really critical situation we carry on by using the global variable. This might lead to conflicts between the threads but they soon all will have memory problems. */ result = &last_result; else /* Set the tsd. */ __libc_setspecific (key, result); } } if (result->errstring != NULL) { /* Free the error string from the last failed command. This can happen if `dlerror' was not run after an error was found. */ if (result->malloced) free ((char *) result->errstring); result->errstring = NULL; } result->errcode = GLRO(dl_catch_error) (&result->objname, &result->errstring, &result->malloced, operate, args); /* If no error we mark that no error string is available. */ result->returned = result->errstring == NULL; return result->errstring != NULL; }
internal_function _dl_vdso_vsym (const char *name, const struct r_found_version *vers) { #ifndef __ZRT_SO struct link_map *map = GLRO (dl_sysinfo_map); void *value = NULL; if (map != NULL) { /* Use a WEAK REF so we don't error out if the symbol is not found. */ ElfW (Sym) wsym; memset (&wsym, 0, sizeof (ElfW (Sym))); wsym.st_info = (unsigned char) ELFW (ST_INFO (STB_WEAK, STT_NOTYPE)); /* Search the scope of the vdso map. */ const ElfW (Sym) *ref = &wsym; lookup_t result = GLRO (dl_lookup_symbol_x) (name, map, &ref, map->l_local_scope, vers, 0, 0, NULL); if (ref != NULL) value = DL_SYMBOL_ADDRESS (result, ref); }
/* Return the system page size. */ int __getpagesize () { #if 0 && defined __ASSUME_AT_PAGESIZE assert (GLRO(dl_pagesize) != 0); return GLRO(dl_pagesize); #else if (GLRO(dl_pagesize) != 0) return GLRO(dl_pagesize); # ifdef EXEC_PAGESIZE return EXEC_PAGESIZE; # else /* No EXEC_PAGESIZE. */ # ifdef NBPG # ifndef CLSIZE # define CLSIZE 1 # endif /* No CLSIZE. */ return NBPG * CLSIZE; # else /* No NBPG. */ return NBPC; # endif /* NBPG. */ # endif /* EXEC_PAGESIZE. */ #endif }
int __fegetenv (fenv_t *envp) { if (GLRO (dl_hwcap) & HWCAP_ARM_VFP) { unsigned long int temp; _FPU_GETCW (temp); envp->__cw = temp; /* Success. */ return 0; } /* Unsupported, so fail. */ return 1; }
int fegetround (void) { if (GLRO (dl_hwcap) & HWCAP_VFP) { unsigned int temp; /* Get the current environment. */ _FPU_GETCW (temp); return temp & FE_TOWARDZERO; } /* The current soft-float implementation only handles TONEAREST. */ return FE_TONEAREST; }
size_t __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, size_t max) { size_t i = 0; int hwcap; hwcap = GLRO(dl_hwcap); IFUNC_IMPL (i, name, memcpy, IFUNC_IMPL_ADD (array, i, memcpy, hwcap & HWCAP_SPARC_CRYPTO, __memcpy_niagara4) IFUNC_IMPL_ADD (array, i, memcpy, hwcap & HWCAP_SPARC_N2, __memcpy_niagara2) IFUNC_IMPL_ADD (array, i, memcpy, hwcap & HWCAP_SPARC_BLKINIT, __memcpy_niagara1) IFUNC_IMPL_ADD (array, i, memcpy, hwcap & HWCAP_SPARC_ULTRA3, __memcpy_ultra3) IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_ultra1)); IFUNC_IMPL (i, name, mempcpy, IFUNC_IMPL_ADD (array, i, mempcpy, hwcap & HWCAP_SPARC_CRYPTO, __mempcpy_niagara4) IFUNC_IMPL_ADD (array, i, mempcpy, hwcap & HWCAP_SPARC_N2, __mempcpy_niagara2) IFUNC_IMPL_ADD (array, i, mempcpy, hwcap & HWCAP_SPARC_BLKINIT, __mempcpy_niagara1) IFUNC_IMPL_ADD (array, i, mempcpy, hwcap & HWCAP_SPARC_ULTRA3, __mempcpy_ultra3) IFUNC_IMPL_ADD (array, i, mempcpy, 1, __mempcpy_ultra1)); IFUNC_IMPL (i, name, bzero, IFUNC_IMPL_ADD (array, i, bzero, hwcap & HWCAP_SPARC_CRYPTO, __bzero_niagara4) IFUNC_IMPL_ADD (array, i, bzero, hwcap & HWCAP_SPARC_BLKINIT, __bzero_niagara1) IFUNC_IMPL_ADD (array, i, bzero, 1, __bzero_ultra1)); IFUNC_IMPL (i, name, memset, IFUNC_IMPL_ADD (array, i, memset, hwcap & HWCAP_SPARC_CRYPTO, __memset_niagara4) IFUNC_IMPL_ADD (array, i, memset, hwcap & HWCAP_SPARC_BLKINIT, __memset_niagara1) IFUNC_IMPL_ADD (array, i, memset, 1, __memset_ultra1)); return i; }