summaryrefslogtreecommitdiff
path: root/elf/dl-fini.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-10-14 02:08:23 +0000
committerUlrich Drepper <drepper@redhat.com>2004-10-14 02:08:23 +0000
commitc0f62c56788c48b9fb36dc609c0a9f9db3667306 (patch)
tree79c7e9692bb59dc90e4466e238cd4578c2d5c8f3 /elf/dl-fini.c
parent8e9185fb1c0294d2eb4bee0797d7e1c2850111ea (diff)
downloadglibc-c0f62c56788c48b9fb36dc609c0a9f9db3667306.tar.gz
[BZ #77]
Update. Add support for namespaces in the dynamic linker. * dlfcn/Makefile (libdl-routines): Add dlmopen. * dlfcn/Versions [libdl, GLIBC_2.3.4]: Add dlmopen. * dlfcn/dlfcn.h: Define Lmid_t, LM_ID_BASE, and LM_ID_NEWLM. Declare dlmopen. Document RTLD_DI_LMID. * dlfcn/dlinfo.c: Handle RTLD_DI_LMID. * dlfcn/dlmopen.c: New file. * dlfcn/dlopen.c: Pass new parameter to _dl_open. * dlfcn/dlopenold.c: Likewise. * elf/dl-addr.c: Adjust for removal of GL(dl_loaded). * elf/dl-caller.c: Likewise. * elf/dl-close.c: Likewise. * elf/dl-conflict.c: Likewise. * elf/dl-debug.c: Likewise. * elf/dl-lookup.c: Likewise. * elf/dl-sym.c: Likewise. * elf/dl-version.c: Likewise. * elf/do-lookup.h: Likewise. * elf/rtld.c: Likewise. * sysdeps/unix/sysv/linux/i386/dl-librecon.h: Likewise. * elf/dl-depsc: Likewise. Add new parameter to _dl_map_object. * elf/dl-fini.c: Call destructors in all namespaces. * elf/dl-iteratephdr.c: Compute total nloaded. Adjust for removal of GL(dl_loaded). * elf/dl-libc.c: Pass new parameter to _dl_open. Adjust for removal of GL(dl_loaded). * elf/dl-load.c (_dl_map_object_from_fd): Don't load ld.so a second time. Reuse the one from the main namespace in all others. Pass new parameter to _dl_new_object. Adjust for removal of GL(dl_loaded). * elf/dl-object.c: Take new parameter. Use it to initialize l_ns. Adjust for removal of GL(dl_loaded). * elf/dl-open.c (_dl_open): Take new parameter. Adjust for removal of GL(dl_loaded). * elf/dl-support.c: Replace global _dl_loaded etc variables with _dl_ns variable. * include/dlfcn.h: Adjust prototype of _dl_open. Define __LM_ID_CALLER. * include/link.h: Add l_real, l_ns, and l_direct_opencount elements. * sysdeps/generic/dl-tls.c: Bump TLS_STATIC_SURPLUS. Since libc is using TLS we need memory appropriate to the number of namespaces. * sysdeps/generic/ldsodefs.h (struct rtld_global): Replace _dl_loaded, _dl_nloaded, _dl_global_scope, _dl_main_searchlist, and _dl_global_scope_alloc with _dl_ns element. Define DL_NNS. Adjust prototypes of _dl_map_object and member in rtld_global_ro. * malloc/malloc.c: Include <dlfcn.h>. * malloc/arena.c (ptmalloc_init): If libc is not in primary namespace, never use brk. * elf/Makefile: Add rules to build and run tst-dlmopen1 and tst-dlmopen2. * elf/tst-dlmopen1.c: New file. * elf/tst-dlmopen1mod.c: New file. * elf/tst-dlmopen2.c: New file. * elf/dl-close.c: Improve reference counting by tracking direct loads. * elf/dl-lookup.c (add_dependency): Likewise. * elf/dl-open.c (dl_open_worker): Likewise. * elf/rtld.c (dl_main): Likewise. 2004-09-09 GOTO Masanori <gotom@debian.or.jp> [BZ #77] * elf/dl-close.c: Count down l_opencount to check not only for l_reldeps, but also l_initfini. 2004-10-13 Ulrich Drepper <drepper@redhat.com>
Diffstat (limited to 'elf/dl-fini.c')
-rw-r--r--elf/dl-fini.c265
1 files changed, 152 insertions, 113 deletions
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index dd405de415..33036b5cbd 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -32,10 +32,10 @@ internal_function
_dl_fini (void)
{
/* Lots of fun ahead. We have to call the destructors for all still
- loaded objects. The problem is that the ELF specification now
- demands that dependencies between the modules are taken into account.
- I.e., the destructor for a module is called before the ones for any
- of its dependencies.
+ loaded objects, in all namespaces. The problem is that the ELF
+ specification now demands that dependencies between the modules
+ are taken into account. I.e., the destructor for a module is
+ called before the ones for any of its dependencies.
To make things more complicated, we cannot simply use the reverse
order of the constructors. Since the user might have loaded objects
@@ -45,140 +45,179 @@ _dl_fini (void)
unsigned int i;
unsigned int nloaded;
struct link_map *l;
- struct link_map **maps;
-
- /* Protect against concurrent loads and unloads. */
- __rtld_lock_lock_recursive (GL(dl_load_lock));
-
- nloaded = GL(dl_nloaded);
-
- /* XXX Could it be (in static binaries) that there is no object loaded? */
- assert (nloaded > 0);
-
- /* Now we can allocate an array to hold all the pointers and copy
- the pointers in. */
- maps = (struct link_map **) alloca (nloaded * sizeof (struct link_map *));
- for (l = GL(dl_loaded), i = 0; l != NULL; l = l->l_next)
+ struct link_map **maps = NULL;
+ size_t maps_size = 0;
+
+ /* We First run the destructors of the main namespaces, then the
+ other ones. The order should not matter since the namespace
+ content is supposed to be independent. But we can have auditing
+ code in a auxiliaty namespace and we want it to monitor the
+ destructors. */
+ for (Lmid_t cnt = 0; cnt < DL_NNS; ++cnt)
{
- assert (i < nloaded);
+ /* Protect against concurrent loads and unloads. */
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
- maps[i++] = l;
+ nloaded = GL(dl_ns)[cnt]._ns_nloaded;
- /* Bump l_opencount of all objects so that they are not dlclose()ed
- from underneath us. */
- ++l->l_opencount;
- }
- assert (i == nloaded);
-
- /* Now we have to do the sorting. */
- for (l = GL(dl_loaded)->l_next; l != NULL; l = l->l_next)
- {
- unsigned int j;
- unsigned int k;
+ /* XXX Could it be (in static binaries) that there is no object
+ loaded? */
+ assert (cnt != LM_ID_BASE || nloaded > 0);
- /* Find the place in the 'maps' array. */
- for (j = 1; maps[j] != l; ++j)
- ;
+ /* Now we can allocate an array to hold all the pointers and copy
+ the pointers in. */
+ if (maps_size < nloaded * sizeof (struct link_map *))
+ {
+ if (maps_size == 0)
+ {
+ maps_size = nloaded * sizeof (struct link_map *);
+ maps = (struct link_map **) alloca (maps_size);
+ }
+ else
+ maps = (struct link_map **)
+ extend_alloca (maps, maps_size,
+ nloaded * sizeof (struct link_map *));
+ }
- /* Find all object for which the current one is a dependency and
- move the found object (if necessary) in front. */
- for (k = j + 1; k < nloaded; ++k)
+ for (l = GL(dl_ns)[cnt]._ns_loaded, i = 0; l != NULL; l = l->l_next)
{
- struct link_map **runp = maps[k]->l_initfini;
- if (runp != NULL)
+ assert (i < nloaded);
+
+ /* Do not handle ld.so in secondary namespaces. */
+ if (l == l->l_real)
{
- while (*runp != NULL)
- if (*runp == l)
- {
- struct link_map *here = maps[k];
-
- /* Move it now. */
- memmove (&maps[j] + 1,
- &maps[j],
- (k - j) * sizeof (struct link_map *));
- maps[j++] = here;
-
- break;
- }
- else
- ++runp;
+ maps[i++] = l;
+
+ /* Bump l_opencount of all objects so that they are not
+ dlclose()ed from underneath us. */
+ ++l->l_opencount;
}
+ }
+ assert (cnt != LM_ID_BASE || i == nloaded);
+ assert (cnt == LM_ID_BASE || i == nloaded || i == nloaded - 1);
+ unsigned int nmaps = i;
- if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
+ if (nmaps != 0)
+ {
+ /* Now we have to do the sorting. */
+ l = GL(dl_ns)[cnt]._ns_loaded;
+ if (cnt == LM_ID_BASE)
+ /* The main executable always comes first. */
+ l = l->l_next;
+ for (; l != NULL; l = l->l_next)
{
- unsigned int m = maps[k]->l_reldepsact;
- struct link_map **relmaps = maps[k]->l_reldeps;
+ unsigned int j;
+ unsigned int k;
- while (m-- > 0)
+ /* Find the place in the 'maps' array. */
+ for (j = 1; maps[j] != l; ++j)
+ ;
+
+ /* Find all object for which the current one is a dependency and
+ move the found object (if necessary) in front. */
+ for (k = j + 1; k < nmaps; ++k)
{
- if (relmaps[m] == l)
+ struct link_map **runp = maps[k]->l_initfini;
+ if (runp != NULL)
{
- struct link_map *here = maps[k];
-
- /* Move it now. */
- memmove (&maps[j] + 1,
- &maps[j],
- (k - j) * sizeof (struct link_map *));
- maps[j] = here;
+ while (*runp != NULL)
+ if (*runp == l)
+ {
+ struct link_map *here = maps[k];
+
+ /* Move it now. */
+ memmove (&maps[j] + 1,
+ &maps[j],
+ (k - j) * sizeof (struct link_map *));
+ maps[j++] = here;
+
+ break;
+ }
+ else
+ ++runp;
+ }
- break;
+ if (__builtin_expect (maps[k]->l_reldeps != NULL, 0))
+ {
+ unsigned int m = maps[k]->l_reldepsact;
+ struct link_map **relmaps = maps[k]->l_reldeps;
+
+ while (m-- > 0)
+ {
+ if (relmaps[m] == l)
+ {
+ struct link_map *here = maps[k];
+
+ /* Move it now. */
+ memmove (&maps[j] + 1,
+ &maps[j],
+ (k - j) * sizeof (struct link_map *));
+ maps[j] = here;
+
+ break;
+ }
+ }
}
}
}
}
- }
-
- /* We do not rely on the linked list of loaded object anymore from
- this point on. We have our own list here (maps). The various
- members of this list cannot vanish since the open count is too
- high and will be decremented in this loop. So we release the
- lock so that some code which might be called from a destructor
- can directly or indirectly access the lock. */
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
-
- /* 'maps' now contains the objects in the right order. Now call the
- destructors. We have to process this array from the front. */
- for (i = 0; i < nloaded; ++i)
- {
- l = maps[i];
- if (l->l_init_called)
+ /* We do not rely on the linked list of loaded object anymore from
+ this point on. We have our own list here (maps). The various
+ members of this list cannot vanish since the open count is too
+ high and will be decremented in this loop. So we release the
+ lock so that some code which might be called from a destructor
+ can directly or indirectly access the lock. */
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+ /* 'maps' now contains the objects in the right order. Now call the
+ destructors. We have to process this array from the front. */
+ for (i = 0; i < nmaps; ++i)
{
- /* Make sure nothing happens if we are called twice. */
- l->l_init_called = 0;
+ l = maps[i];
- /* Don't call the destructors for objects we are not supposed to. */
- if (l->l_name[0] == '\0' && l->l_type == lt_executable)
- continue;
-
- /* Is there a destructor function? */
- if (l->l_info[DT_FINI_ARRAY] == NULL && l->l_info[DT_FINI] == NULL)
- continue;
-
- /* When debugging print a message first. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
- _dl_debug_printf ("\ncalling fini: %s\n\n",
- l->l_name[0] ? l->l_name : rtld_progname);
-
- /* First see whether an array is given. */
- if (l->l_info[DT_FINI_ARRAY] != NULL)
+ if (l->l_init_called)
{
- ElfW(Addr) *array =
- (ElfW(Addr) *) (l->l_addr
- + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
- unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
- / sizeof (ElfW(Addr)));
- while (i-- > 0)
- ((fini_t) array[i]) ();
+ /* Make sure nothing happens if we are called twice. */
+ l->l_init_called = 0;
+
+ /* Don't call the destructors for objects we are not
+ supposed to. */
+ if (l->l_name[0] == '\0' && l->l_type == lt_executable)
+ continue;
+
+ /* Is there a destructor function? */
+ if (l->l_info[DT_FINI_ARRAY] == NULL
+ && l->l_info[DT_FINI] == NULL)
+ continue;
+
+ /* When debugging print a message first. */
+ if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
+ 0))
+ _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
+ l->l_name[0] ? l->l_name : rtld_progname,
+ cnt);
+
+ /* First see whether an array is given. */
+ if (l->l_info[DT_FINI_ARRAY] != NULL)
+ {
+ ElfW(Addr) *array =
+ (ElfW(Addr) *) (l->l_addr
+ + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
+ unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
+ / sizeof (ElfW(Addr)));
+ while (i-- > 0)
+ ((fini_t) array[i]) ();
+ }
+
+ /* Next try the old-style destructor. */
+ if (l->l_info[DT_FINI] != NULL)
+ ((fini_t) DL_DT_FINI_ADDRESS (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr)) ();
}
- /* Next try the old-style destructor. */
- if (l->l_info[DT_FINI] != NULL)
- ((fini_t) DL_DT_FINI_ADDRESS (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr)) ();
+ /* Correct the previous increment. */
+ --l->l_opencount;
}
-
- /* Correct the previous increment. */
- --l->l_opencount;
}
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS, 0))