internal_function _dl_load_cache_lookup (const char *name) { int left, right, middle; int cmpres; const char *cache_data; uint32_t cache_data_size; const char *best; /* Print a message if the loading of libs is traced. */ if (__builtin_expect (GLRO_dl_debug_mask & DL_DEBUG_LIBS, 0)) _dl_debug_printf (" search cache=%s\n", LD_SO_CACHE); if (cache == NULL) { /* Read the contents of the file. */ void *file = _dl_sysdep_read_whole_file (LD_SO_CACHE, &cachesize, PROT_READ); /* We can handle three different cache file formats here: - the old libc5/glibc2.0/2.1 format - the old format with the new format in it - only the new format The following checks if the cache contains any of these formats. */ if (file != MAP_FAILED && cachesize > sizeof *cache && memcmp (file, CACHEMAGIC, sizeof CACHEMAGIC - 1) == 0) { size_t offset; /* Looks ok. */ cache = file; /* Check for new version. */ offset = ALIGN_CACHE (sizeof (struct cache_file) + cache->nlibs * sizeof (struct file_entry)); cache_new = (struct cache_file_new *) ((void *) cache + offset); if (cachesize < (offset + sizeof (struct cache_file_new)) || memcmp (cache_new->magic, CACHEMAGIC_VERSION_NEW, sizeof CACHEMAGIC_VERSION_NEW - 1) != 0) cache_new = (void *) -1; } else if (file != MAP_FAILED && cachesize > sizeof *cache_new && memcmp (file, CACHEMAGIC_VERSION_NEW, sizeof CACHEMAGIC_VERSION_NEW - 1) == 0) { cache_new = file; cache = file; } else { if (file != MAP_FAILED) __munmap (file, cachesize); cache = (void *) -1; } assert (cache != NULL); } if (cache == (void *) -1) /* Previously looked for the cache file and didn't find it. */ return NULL; best = NULL; if (cache_new != (void *) -1) { uint64_t platform; int disable_hwcap = 0; /* This is where the strings start. */ cache_data = (const char *) cache_new; /* Now we can compute how large the string table is. */ cache_data_size = (const char *) cache + cachesize - cache_data; platform = _dl_string_platform (GLRO(dl_platform)); if (platform != (uint64_t) -1) platform = 1ULL << platform; if (__access ("/etc/ld.so.nohwcap", F_OK) == 0) disable_hwcap = 1; #define _DL_HWCAP_TLS_MASK (1LL << 63) uint64_t hwcap_exclude = ~((GLRO(dl_hwcap) & GLRO(dl_hwcap_mask)) | _DL_HWCAP_PLATFORM | _DL_HWCAP_TLS_MASK); /* Only accept hwcap if it's for the right platform. */ #define HWCAP_CHECK \ if (lib->hwcap & hwcap_exclude) \ continue; \ if (GLRO(dl_osversion) && lib->osversion > GLRO(dl_osversion)) \ continue; \ if (disable_hwcap && lib->hwcap != 0) \ continue; \ if (_DL_PLATFORMS_COUNT \ && (lib->hwcap & _DL_HWCAP_PLATFORM) != 0 \ && (lib->hwcap & _DL_HWCAP_PLATFORM) != platform) \ continue SEARCH_CACHE (cache_new); } else { /* This is where the strings start. */ cache_data = (const char *) &cache->libs[cache->nlibs]; /* Now we can compute how large the string table is. */ cache_data_size = (const char *) cache + cachesize - cache_data; #undef HWCAP_CHECK #define HWCAP_CHECK do {} while (0) SEARCH_CACHE (cache); } /* Print our result if wanted. */ if (__builtin_expect (GLRO_dl_debug_mask & DL_DEBUG_LIBS, 0) && best != NULL) _dl_debug_printf (" trying file=%s\n", best); return best; }
void _dl_map_object_deps (struct link_map *map, struct link_map **preloads, unsigned int npreloads, int trace_mode, int open_mode) { struct list *known = __alloca (sizeof *known * (1 + npreloads + 1)); struct list *runp, *tail; unsigned int nlist, i; /* Object name. */ const char *name; int errno_saved; int errno_reason; struct dl_exception exception; /* No loaded object so far. */ nlist = 0; /* First load MAP itself. */ preload (known, &nlist, map); /* Add the preloaded items after MAP but before any of its dependencies. */ for (i = 0; i < npreloads; ++i) preload (known, &nlist, preloads[i]); /* Terminate the lists. */ known[nlist - 1].next = NULL; /* Pointer to last unique object. */ tail = &known[nlist - 1]; struct scratch_buffer needed_space; scratch_buffer_init (&needed_space); /* Process each element of the search list, loading each of its auxiliary objects and immediate dependencies. Auxiliary objects will be added in the list before the object itself and dependencies will be appended to the list as we step through it. This produces a flat, ordered list that represents a breadth-first search of the dependency tree. The whole process is complicated by the fact that we better should use alloca for the temporary list elements. But using alloca means we cannot use recursive function calls. */ errno_saved = errno; errno_reason = 0; errno = 0; name = NULL; for (runp = known; runp; ) { struct link_map *l = runp->map; struct link_map **needed = NULL; unsigned int nneeded = 0; /* Unless otherwise stated, this object is handled. */ runp->done = 1; /* Allocate a temporary record to contain the references to the dependencies of this object. */ if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL && l != map && l->l_ldnum > 0) { /* l->l_ldnum includes space for the terminating NULL. */ if (!scratch_buffer_set_array_size (&needed_space, l->l_ldnum, sizeof (struct link_map *))) _dl_signal_error (ENOMEM, map->l_name, NULL, N_("cannot allocate dependency buffer")); needed = needed_space.data; } if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG]) { const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]); struct openaux_args args; struct list *orig; const ElfW(Dyn) *d; args.strtab = strtab; args.map = l; args.trace_mode = trace_mode; args.open_mode = open_mode; orig = runp; for (d = l->l_ld; d->d_tag != DT_NULL; ++d) if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED) { /* Map in the needed object. */ struct link_map *dep; /* Recognize DSTs. */ name = expand_dst (l, strtab + d->d_un.d_val, 0); /* Store the tag in the argument structure. */ args.name = name; int err = _dl_catch_exception (&exception, openaux, &args); if (__glibc_unlikely (exception.errstring != NULL)) { if (err) errno_reason = err; else errno_reason = -1; goto out; } else dep = args.aux; if (! dep->l_reserved) { /* Allocate new entry. */ struct list *newp; newp = alloca (sizeof (struct list)); /* Append DEP to the list. */ newp->map = dep; newp->done = 0; newp->next = NULL; tail->next = newp; tail = newp; ++nlist; /* Set the mark bit that says it's already in the list. */ dep->l_reserved = 1; } /* Remember this dependency. */ if (needed != NULL) needed[nneeded++] = dep; } else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER) { struct list *newp; /* Recognize DSTs. */ name = expand_dst (l, strtab + d->d_un.d_val, d->d_tag == DT_AUXILIARY); /* Store the tag in the argument structure. */ args.name = name; /* Say that we are about to load an auxiliary library. */ if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)) _dl_debug_printf ("load auxiliary object=%s" " requested by file=%s\n", name, DSO_FILENAME (l->l_name)); /* We must be prepared that the addressed shared object is not available. For filter objects the dependency must be available. */ int err = _dl_catch_exception (&exception, openaux, &args); if (__glibc_unlikely (exception.errstring != NULL)) { if (d->d_tag == DT_AUXILIARY) { /* We are not interested in the error message. */ _dl_exception_free (&exception); /* Simply ignore this error and continue the work. */ continue; } else { if (err) errno_reason = err; else errno_reason = -1; goto out; } } /* The auxiliary object is actually available. Incorporate the map in all the lists. */ /* Allocate new entry. This always has to be done. */ newp = alloca (sizeof (struct list)); /* We want to insert the new map before the current one, but we have no back links. So we copy the contents of the current entry over. Note that ORIG and NEWP now have switched their meanings. */ memcpy (newp, orig, sizeof (*newp)); /* Initialize new entry. */ orig->done = 0; orig->map = args.aux; /* Remember this dependency. */ if (needed != NULL) needed[nneeded++] = args.aux; /* We must handle two situations here: the map is new, so we must add it in all three lists. If the map is already known, we have two further possibilities: - if the object is before the current map in the search list, we do nothing. It is already found early - if the object is after the current one, we must move it just before the current map to make sure the symbols are found early enough */ if (args.aux->l_reserved) { /* The object is already somewhere in the list. Locate it first. */ struct list *late; /* This object is already in the search list we are building. Don't add a duplicate pointer. Just added by _dl_map_object. */ for (late = newp; late->next != NULL; late = late->next) if (late->next->map == args.aux) break; if (late->next != NULL) { /* The object is somewhere behind the current position in the search path. We have to move it to this earlier position. */ orig->next = newp; /* Now remove the later entry from the list and adjust the tail pointer. */ if (tail == late->next) tail = late; late->next = late->next->next; /* We must move the object earlier in the chain. */ if (args.aux->l_prev != NULL) args.aux->l_prev->l_next = args.aux->l_next; if (args.aux->l_next != NULL) args.aux->l_next->l_prev = args.aux->l_prev; args.aux->l_prev = newp->map->l_prev; newp->map->l_prev = args.aux; if (args.aux->l_prev != NULL) args.aux->l_prev->l_next = args.aux; args.aux->l_next = newp->map; } else { /* The object must be somewhere earlier in the list. Undo to the current list element what we did above. */ memcpy (orig, newp, sizeof (*newp)); continue; } } else { /* This is easy. We just add the symbol right here. */ orig->next = newp; ++nlist; /* Set the mark bit that says it's already in the list. */ args.aux->l_reserved = 1; /* The only problem is that in the double linked list of all objects we don't have this new object at the correct place. Correct this here. */ if (args.aux->l_prev) args.aux->l_prev->l_next = args.aux->l_next; if (args.aux->l_next) args.aux->l_next->l_prev = args.aux->l_prev; args.aux->l_prev = newp->map->l_prev; newp->map->l_prev = args.aux; if (args.aux->l_prev != NULL) args.aux->l_prev->l_next = args.aux; args.aux->l_next = newp->map; } /* Move the tail pointer if necessary. */ if (orig == tail) tail = newp; /* Move on the insert point. */ orig = newp; } } /* Terminate the list of dependencies and store the array address. */ if (needed != NULL) { needed[nneeded++] = NULL; struct link_map **l_initfini = (struct link_map **) malloc ((2 * nneeded + 1) * sizeof needed[0]); if (l_initfini == NULL) { scratch_buffer_free (&needed_space); _dl_signal_error (ENOMEM, map->l_name, NULL, N_("cannot allocate dependency list")); } l_initfini[0] = l; memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]); memcpy (&l_initfini[nneeded + 1], l_initfini, nneeded * sizeof needed[0]); atomic_write_barrier (); l->l_initfini = l_initfini; l->l_free_initfini = 1; } /* If we have no auxiliary objects just go on to the next map. */ if (runp->done) do runp = runp->next; while (runp != NULL && runp->done); } out: scratch_buffer_free (&needed_space); if (errno == 0 && errno_saved != 0) __set_errno (errno_saved); struct link_map **old_l_initfini = NULL; if (map->l_initfini != NULL && map->l_type == lt_loaded) { /* This object was previously loaded as a dependency and we have a separate l_initfini list. We don't need it anymore. */ assert (map->l_searchlist.r_list == NULL); old_l_initfini = map->l_initfini; } /* Store the search list we built in the object. It will be used for searches in the scope of this object. */ struct link_map **l_initfini = (struct link_map **) malloc ((2 * nlist + 1) * sizeof (struct link_map *)); if (l_initfini == NULL) _dl_signal_error (ENOMEM, map->l_name, NULL, N_("cannot allocate symbol search list")); map->l_searchlist.r_list = &l_initfini[nlist + 1]; map->l_searchlist.r_nlist = nlist; for (nlist = 0, runp = known; runp; runp = runp->next) { if (__builtin_expect (trace_mode, 0) && runp->map->l_faked) /* This can happen when we trace the loading. */ --map->l_searchlist.r_nlist; else map->l_searchlist.r_list[nlist++] = runp->map; /* Now clear all the mark bits we set in the objects on the search list to avoid duplicates, so the next call starts fresh. */ runp->map->l_reserved = 0; } if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0 && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded) { /* If we are to compute conflicts, we have to build local scope for each library, not just the ultimate loader. */ for (i = 0; i < nlist; ++i) { struct link_map *l = map->l_searchlist.r_list[i]; unsigned int j, cnt; /* The local scope has been already computed. */ if (l == map || (l->l_local_scope[0] && l->l_local_scope[0]->r_nlist) != 0) continue; if (l->l_info[AUXTAG] || l->l_info[FILTERTAG]) { /* As current DT_AUXILIARY/DT_FILTER implementation needs to be rewritten, no need to bother with prelinking the old implementation. */ _dl_signal_error (EINVAL, l->l_name, NULL, N_("\ Filters not supported with LD_TRACE_PRELINKING")); } cnt = _dl_build_local_scope (l_initfini, l); assert (cnt <= nlist); for (j = 0; j < cnt; j++) { l_initfini[j]->l_reserved = 0; if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC] != NULL, 0)) l->l_symbolic_in_local_scope = true; } l->l_local_scope[0] = (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem) + (cnt * sizeof (struct link_map *))); if (l->l_local_scope[0] == NULL) _dl_signal_error (ENOMEM, map->l_name, NULL, N_("cannot allocate symbol search list")); l->l_local_scope[0]->r_nlist = cnt; l->l_local_scope[0]->r_list = (struct link_map **) (l->l_local_scope[0] + 1); memcpy (l->l_local_scope[0]->r_list, l_initfini, cnt * sizeof (struct link_map *)); } }