Bug Summary

File:ldso/dynlink.c
Location:line 615, column 12
Description:The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage

Annotated Source Code

1#define _GNU_SOURCE
2#include <stdio.h>
3#include <stdlib.h>
4#include <stdarg.h>
5#include <stddef.h>
6#include <string.h>
7#include <unistd.h>
8#include <stdint.h>
9#include <elf.h>
10#include <sys/mman.h>
11#include <limits.h>
12#include <fcntl.h>
13#include <sys/stat.h>
14#include <errno(*__errno_location()).h>
15#include <link.h>
16#include <setjmpsetjmp.h>
17#include <pthread__pthread.h>
18#include <ctype.h>
19#include <dlfcn.h>
20#include "pthread_impl.h"
21#include "libc.h"
22#include "dynlink.h"
23
24static void error(const char *, ...);
25
26#ifdef SHARED1
27
28#define MAXP2(a,b)(-(-(a)&-(b))) (-(-(a)&-(b)))
29#define ALIGN(x,y)((x)+(y)-1 & -(y)) ((x)+(y)-1 & -(y))
30
31struct debug {
32 int ver;
33 void *head;
34 void (*bp)(void);
35 int state;
36 void *base;
37};
38
39struct td_index {
40 size_t args[2];
41 struct td_index *next;
42};
43
44struct dso {
45#if DL_FDPIC0
46 struct fdpic_loadmap *loadmap;
47#else
48 unsigned char *base;
49#endif
50 char *name;
51 size_t *dynv;
52 struct dso *next, *prev;
53
54 Phdr *phdr;
55 int phnum;
56 size_t phentsize;
57 int refcnt;
58 Sym *syms;
59 uint32_t *hashtab;
60 uint32_t *ghashtab;
61 int16_t *versym;
62 char *strings;
63 unsigned char *map;
64 size_t map_len;
65 dev_t dev;
66 ino_t ino;
67 signed char global;
68 char relocated;
69 char constructed;
70 char kernel_mapped;
71 struct dso **deps, *needed_by;
72 char *rpath_orig, *rpath;
73 void *tls_image;
74 size_t tls_len, tls_size, tls_align, tls_id, tls_offset;
75 size_t relro_start, relro_end;
76 void **new_dtv;
77 unsigned char *new_tls;
78 volatile int new_dtv_idx, new_tls_idx;
79 struct td_index *td_index;
80 struct dso *fini_next;
81 char *shortname;
82#if DL_FDPIC0
83 unsigned char *base;
84#else
85 struct fdpic_loadmap *loadmap;
86#endif
87 struct funcdesc {
88 void *addr;
89 size_t *got;
90 } *funcdescs;
91 size_t *got;
92 char buf[];
93};
94
95struct symdef {
96 Sym *sym;
97 struct dso *dso;
98};
99
100int __init_tp(void *);
101void __init_libc(char **, char *);
102
103const char *__libc_get_version(void);
104
105static struct builtin_tls {
106 char c;
107 struct pthread__pthread pt;
108 void *space[16];
109} builtin_tls[1];
110#define MIN_TLS_ALIGN__builtin_offsetof(struct builtin_tls, pt) offsetof(struct builtin_tls, pt)__builtin_offsetof(struct builtin_tls, pt)
111
112#define ADDEND_LIMIT4096 4096
113static size_t *saved_addends, *apply_addends_to;
114
115static struct dso ldso;
116static struct dso *head, *tail, *fini_head;
117static char *env_path, *sys_path;
118static unsigned long long gencnt;
119static int runtime;
120static int ldd_mode;
121static int ldso_fail;
122static int noload;
123static jmp_buf *rtld_fail;
124static pthread_rwlock_t lock;
125static struct debug debug;
126static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN__builtin_offsetof(struct builtin_tls, pt);
127static size_t static_tls_cnt;
128static pthread_mutex_t init_fini_lock = { ._m_type__u.__i[0] = PTHREAD_MUTEX_RECURSIVE1 };
129static struct fdpic_loadmap *app_loadmap;
130static struct fdpic_dummy_loadmap app_dummy_loadmap;
131
132struct debug *_dl_debug_addr = &debug;
133
134static int dl_strcmp(const char *l, const char *r)
135{
136 for (; *l==*r && *l; l++, r++);
137 return *(unsigned char *)l - *(unsigned char *)r;
138}
139#define strcmp(l,r)dl_strcmp(l,r) dl_strcmp(l,r)
140
141/* Compute load address for a virtual address in a given dso. */
142#if DL_FDPIC0
143static void *laddr(const struct dso *p, size_t v)(void *)((const struct dso *p)->base + (size_t v))
144{
145 size_t j=0;
146 if (!p->loadmap) return p->base + v;
147 for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
148 return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
149}
150#define fpaddr(p, v)((void (*)())(void *)((p)->base + (v))) ((void (*)())&(struct funcdesc){ \
151 laddr(p, v)(void *)((p)->base + (v)), (p)->got })
152#else
153#define laddr(p, v)(void *)((p)->base + (v)) (void *)((p)->base + (v))
154#define fpaddr(p, v)((void (*)())(void *)((p)->base + (v))) ((void (*)())laddr(p, v)(void *)((p)->base + (v)))
155#endif
156
157static void decode_vec(size_t *v, size_t *a, size_t cnt)
158{
159 size_t i;
160 for (i=0; i<cnt; i++) a[i] = 0;
161 for (; v[0]; v+=2) if (v[0]-1<cnt-1) {
162 a[0] |= 1UL<<v[0];
163 a[v[0]] = v[1];
164 }
165}
166
167static int search_vec(size_t *v, size_t *r, size_t key)
168{
169 for (; v[0]!=key; v+=2)
170 if (!v[0]) return 0;
171 *r = v[1];
172 return 1;
173}
174
175static uint32_t sysv_hash(const char *s0)
176{
177 const unsigned char *s = (void *)s0;
178 uint_fast32_t h = 0;
179 while (*s) {
180 h = 16*h + *s++;
181 h ^= h>>24 & 0xf0;
182 }
183 return h & 0xfffffff;
184}
185
186static uint32_t gnu_hash(const char *s0)
187{
188 const unsigned char *s = (void *)s0;
189 uint_fast32_t h = 5381;
190 for (; *s; s++)
191 h += h*32 + *s;
192 return h;
193}
194
195static Sym *sysv_lookup(const char *s, uint32_t h, struct dso *dso)
196{
197 size_t i;
198 Sym *syms = dso->syms;
199 uint32_t *hashtab = dso->hashtab;
200 char *strings = dso->strings;
201 for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) {
202 if ((!dso->versym || dso->versym[i] >= 0)
203 && (!strcmp(s, strings+syms[i].st_name)dl_strcmp(s,strings+syms[i].st_name)))
204 return syms+i;
205 }
206 return 0;
207}
208
209static Sym *gnu_lookup(uint32_t h1, uint32_t *hashtab, struct dso *dso, const char *s)
210{
211 uint32_t nbuckets = hashtab[0];
212 uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4);
213 uint32_t i = buckets[h1 % nbuckets];
214
215 if (!i) return 0;
216
217 uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]);
218
219 for (h1 |= 1; ; i++) {
220 uint32_t h2 = *hashval++;
221 if ((h1 == (h2|1)) && (!dso->versym || dso->versym[i] >= 0)
222 && !strcmp(s, dso->strings + dso->syms[i].st_name)dl_strcmp(s,dso->strings + dso->syms[i].st_name))
223 return dso->syms+i;
224 if (h2 & 1) break;
225 }
226
227 return 0;
228}
229
230static Sym *gnu_lookup_filtered(uint32_t h1, uint32_t *hashtab, struct dso *dso, const char *s, uint32_t fofs, size_t fmask)
231{
232 const size_t *bloomwords = (const void *)(hashtab+4);
233 size_t f = bloomwords[fofs & (hashtab[2]-1)];
234 if (!(f & fmask)) return 0;
235
236 f >>= (h1 >> hashtab[3]) % (8 * sizeof f);
237 if (!(f & 1)) return 0;
238
239 return gnu_lookup(h1, hashtab, dso, s);
240}
241
242#define OK_TYPES(1<<0 | 1<<1 | 1<<2 | 1<<5 | 1<<
6)
(1<<STT_NOTYPE0 | 1<<STT_OBJECT1 | 1<<STT_FUNC2 | 1<<STT_COMMON5 | 1<<STT_TLS6)
243#define OK_BINDS(1<<1 | 1<<2 | 1<<10) (1<<STB_GLOBAL1 | 1<<STB_WEAK2 | 1<<STB_GNU_UNIQUE10)
244
245#ifndef ARCH_SYM_REJECT_UND
246#define ARCH_SYM_REJECT_UND(s)0 0
247#endif
248
249static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
250{
251 uint32_t h = 0, gh, gho, *ght;
252 size_t ghm = 0;
253 struct symdef def = {0};
254 for (; dso; dso=dso->next) {
255 Sym *sym;
256 if (!dso->global) continue;
257 if ((ght = dso->ghashtab)) {
258 if (!ghm) {
259 gh = gnu_hash(s);
260 int maskbits = 8 * sizeof ghm;
261 gho = gh / maskbits;
262 ghm = 1ul << gh % maskbits;
263 }
264 sym = gnu_lookup_filtered(gh, ght, dso, s, gho, ghm);
265 } else {
266 if (!h) h = sysv_hash(s);
267 sym = sysv_lookup(s, h, dso);
268 }
269 if (!sym) continue;
270 if (!sym->st_shndx)
271 if (need_def || (sym->st_info&0xf) == STT_TLS6
272 || ARCH_SYM_REJECT_UND(sym)0)
273 continue;
274 if (!sym->st_value)
275 if ((sym->st_info&0xf) != STT_TLS6)
276 continue;
277 if (!(1<<(sym->st_info&0xf) & OK_TYPES(1<<0 | 1<<1 | 1<<2 | 1<<5 | 1<<
6)
)) continue;
278 if (!(1<<(sym->st_info>>4) & OK_BINDS(1<<1 | 1<<2 | 1<<10))) continue;
279
280 if (def.sym && sym->st_info>>4 == STB_WEAK2) continue;
281 def.sym = sym;
282 def.dso = dso;
283 if (sym->st_info>>4 == STB_GLOBAL1) break;
284 }
285 return def;
286}
287
288__attribute__((__visibility__("hidden")))
289ptrdiff_t __tlsdesc_static(), __tlsdesc_dynamic();
290
291static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride)
292{
293 unsigned char *base = dso->base;
294 Sym *syms = dso->syms;
295 char *strings = dso->strings;
296 Sym *sym;
297 const char *name;
298 void *ctx;
299 int type;
300 int sym_index;
301 struct symdef def;
302 size_t *reloc_addr;
303 size_t sym_val;
304 size_t tls_val;
305 size_t addend;
306 int skip_relative = 0, reuse_addends = 0, save_slot = 0;
307
308 if (dso == &ldso) {
309 /* Only ldso's REL table needs addend saving/reuse. */
310 if (rel == apply_addends_to)
311 reuse_addends = 1;
312 skip_relative = 1;
313 }
314
315 for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) {
316 if (skip_relative && IS_RELATIVE(rel[1], dso->syms)( (((rel[1])&0x7fffffff) == 8) || (((rel[1])&0x7fffffff
) == REL_SYM_OR_REL && !((rel[1])>>32)) )
) continue;
317 type = R_TYPE(rel[1])((rel[1])&0x7fffffff);
318 if (type == REL_NONE) continue;
319 sym_index = R_SYM(rel[1])((rel[1])>>32);
320 reloc_addr = laddr(dso, rel[0])(void *)((dso)->base + (rel[0]));
321 if (sym_index) {
322 sym = syms + sym_index;
323 name = strings + sym->st_name;
324 ctx = type==REL_COPY5 ? head->next : head;
325 def = (sym->st_info&0xf) == STT_SECTION3
326 ? (struct symdef){ .dso = dso, .sym = sym }
327 : find_sym(ctx, name, type==REL_PLT7);
328 if (!def.sym && (sym->st_shndx != SHN_UNDEF0
329 || sym->st_info>>4 != STB_WEAK2)) {
330 error("Error relocating %s: %s: symbol not found",
331 dso->name, name);
332 if (runtime) longjmp(*rtld_fail, 1);
333 continue;
334 }
335 } else {
336 sym = 0;
337 def.sym = 0;
338 def.dso = dso;
339 }
340
341 if (stride > 2) {
342 addend = rel[2];
343 } else if (type==REL_GOT6 || type==REL_PLT7|| type==REL_COPY5) {
344 addend = 0;
345 } else if (reuse_addends) {
346 /* Save original addend in stage 2 where the dso
347 * chain consists of just ldso; otherwise read back
348 * saved addend since the inline one was clobbered. */
349 if (head==&ldso)
350 saved_addends[save_slot] = *reloc_addr;
351 addend = saved_addends[save_slot++];
352 } else {
353 addend = *reloc_addr;
354 }
355
356 sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value)(void *)((def.dso)->base + (def.sym->st_value)) : 0;
357 tls_val = def.sym ? def.sym->st_value : 0;
358
359 switch(type) {
360 case REL_NONE:
361 break;
362 case REL_OFFSET:
363 addend -= (size_t)reloc_addr;
364 case REL_SYMBOLIC1:
365 case REL_GOT6:
366 case REL_PLT7:
367 *reloc_addr = sym_val + addend;
368 break;
369 case REL_RELATIVE8:
370 *reloc_addr = (size_t)base + addend;
371 break;
372 case REL_SYM_OR_REL:
373 if (sym) *reloc_addr = sym_val + addend;
374 else *reloc_addr = (size_t)base + addend;
375 break;
376 case REL_COPY5:
377 memcpy(reloc_addr, (void *)sym_val, sym->st_size);
378 break;
379 case REL_OFFSET322:
380 *(uint32_t *)reloc_addr = sym_val + addend
381 - (size_t)reloc_addr;
382 break;
383 case REL_FUNCDESC:
384 *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs
385 + (def.sym - def.dso->syms)) : 0;
386 break;
387 case REL_FUNCDESC_VAL:
388 if ((sym->st_info&0xf) == STT_SECTION3) *reloc_addr += sym_val;
389 else *reloc_addr = sym_val;
390 reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0;
391 break;
392 case REL_DTPMOD16:
393 *reloc_addr = def.dso->tls_id;
394 break;
395 case REL_DTPOFF17:
396 *reloc_addr = tls_val + addend - DTP_OFFSET0;
397 break;
398#ifdef TLS_ABOVE_TP
399 case REL_TPOFF18:
400 *reloc_addr = tls_val + def.dso->tls_offset + TPOFF_K + addend;
401 break;
402#else
403 case REL_TPOFF18:
404 *reloc_addr = tls_val - def.dso->tls_offset + addend;
405 break;
406 case REL_TPOFF_NEG:
407 *reloc_addr = def.dso->tls_offset - tls_val + addend;
408 break;
409#endif
410 case REL_TLSDESC36:
411 if (stride<3) addend = reloc_addr[1];
412 if (runtime && def.dso->tls_id >= static_tls_cnt) {
413 struct td_index *new = malloc(sizeof *new);
414 if (!new) {
415 error(
416 "Error relocating %s: cannot allocate TLSDESC for %s",
417 dso->name, sym ? name : "(local)" );
418 longjmp(*rtld_fail, 1);
419 }
420 new->next = dso->td_index;
421 dso->td_index = new;
422 new->args[0] = def.dso->tls_id;
423 new->args[1] = tls_val + addend;
424 reloc_addr[0] = (size_t)__tlsdesc_dynamic;
425 reloc_addr[1] = (size_t)new;
426 } else {
427 reloc_addr[0] = (size_t)__tlsdesc_static;
428#ifdef TLS_ABOVE_TP
429 reloc_addr[1] = tls_val + def.dso->tls_offset
430 + TPOFF_K + addend;
431#else
432 reloc_addr[1] = tls_val - def.dso->tls_offset
433 + addend;
434#endif
435 }
436 break;
437 default:
438 error("Error relocating %s: unsupported relocation type %d",
439 dso->name, type);
440 if (runtime) longjmp(*rtld_fail, 1);
441 continue;
442 }
443 }
444}
445
446/* A huge hack: to make up for the wastefulness of shared libraries
447 * needing at least a page of dirty memory even if they have no global
448 * data, we reclaim the gaps at the beginning and end of writable maps
449 * and "donate" them to the heap by setting up minimal malloc
450 * structures and then freeing them. */
451
452static void reclaim(struct dso *dso, size_t start, size_t end)
453{
454 size_t *a, *z;
455 if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
456 if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
457 start = start + 6*sizeof(size_t)-1 & -4*sizeof(size_t);
458 end = (end & -4*sizeof(size_t)) - 2*sizeof(size_t);
459 if (start>end || end-start < 4*sizeof(size_t)) return;
460 a = laddr(dso, start)(void *)((dso)->base + (start));
461 z = laddr(dso, end)(void *)((dso)->base + (end));
462 a[-2] = 1;
463 a[-1] = z[0] = end-start + 2*sizeof(size_t) | 1;
464 z[1] = 1;
465 free(a);
466}
467
468static void reclaim_gaps(struct dso *dso)
469{
470 Phdr *ph = dso->phdr;
471 size_t phcnt = dso->phnum;
472
473 if (DL_FDPIC0) return; // FIXME
474 for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) {
475 if (ph->p_type!=PT_LOAD1) continue;
476 if ((ph->p_flags&(PF_R(1 << 2)|PF_W(1 << 1)))!=(PF_R(1 << 2)|PF_W(1 << 1))) continue;
477 reclaim(dso, ph->p_vaddr & -PAGE_SIZE4096, ph->p_vaddr);
478 reclaim(dso, ph->p_vaddr+ph->p_memsz,
479 ph->p_vaddr+ph->p_memsz+PAGE_SIZE4096-1 & -PAGE_SIZE4096);
480 }
481}
482
483static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
484{
485 char *q = mmap(p, n, prot, flags, fd, off);
486 if (q != MAP_FAILED((void *) -1) || errno(*__errno_location()) != EINVAL22) return q;
487 /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
488 if (flags & MAP_ANONYMOUS0x20) {
489 memset(p, 0, n);
490 return p;
491 }
492 ssize_t r;
493 if (lseek(fd, off, SEEK_SET0) < 0) return MAP_FAILED((void *) -1);
494 for (q=p; n; q+=r, off+=r, n-=r) {
495 r = read(fd, q, n);
496 if (r < 0 && errno(*__errno_location()) != EINTR4) return MAP_FAILED((void *) -1);
497 if (!r) {
498 memset(q, 0, n);
499 break;
500 }
501 }
502 return p;
503}
504
505static void unmap_library(struct dso *dso)
506{
507 if (dso->loadmap) {
508 size_t i;
509 for (i=0; i<dso->loadmap->nsegs; i++) {
510 if (!dso->loadmap->segs[i].p_memsz)
511 continue;
512 munmap((void *)dso->loadmap->segs[i].addr,
513 dso->loadmap->segs[i].p_memsz);
514 }
515 free(dso->loadmap);
516 } else if (dso->map && dso->map_len) {
517 munmap(dso->map, dso->map_len);
518 }
519}
520
521static void *map_library(int fd, struct dso *dso)
522{
523 Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
524 void *allocated_buf=0;
525 size_t phsize;
526 size_t addr_min=SIZE_MAX(0xffffffffffffffffu), addr_max=0, map_len;
527 size_t this_min, this_max;
528 size_t nsegs = 0;
529 off_t off_start;
1
'off_start' declared without an initial value
530 Ehdr *eh;
531 Phdr *ph, *ph0;
532 unsigned prot;
533 unsigned char *map=MAP_FAILED((void *) -1), *base;
534 size_t dyn=0;
535 size_t tls_image=0;
536 size_t i;
537
538 ssize_t l = read(fd, buf, sizeof buf);
539 eh = buf;
540 if (l<0) return 0;
2
Assuming 'l' is >= 0
3
Taking false branch
541 if (l<sizeof *eh || (eh->e_type != ET_DYN3 && eh->e_type != ET_EXEC2))
542 goto noexec;
543 phsize = eh->e_phentsize * eh->e_phnum;
544 if (phsize > sizeof buf - sizeof *eh) {
4
Taking false branch
545 allocated_buf = malloc(phsize);
546 if (!allocated_buf) return 0;
547 l = pread(fd, allocated_buf, phsize, eh->e_phoff);
548 if (l < 0) goto error;
549 if (l != phsize) goto noexec;
550 ph = ph0 = allocated_buf;
551 } else if (eh->e_phoff + phsize > l) {
5
Taking false branch
552 l = pread(fd, buf+1, phsize, eh->e_phoff);
553 if (l < 0) goto error;
554 if (l != phsize) goto noexec;
555 ph = ph0 = (void *)(buf + 1);
556 } else {
557 ph = ph0 = (void *)((char *)buf + eh->e_phoff);
558 }
559 for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
6
Loop condition is true. Entering loop body
13
Loop condition is true. Entering loop body
20
Loop condition is true. Entering loop body
24
Loop condition is false. Execution continues on line 584
560 if (ph->p_type == PT_DYNAMIC2) {
7
Taking false branch
14
Taking false branch
21
Taking true branch
561 dyn = ph->p_vaddr;
562 } else if (ph->p_type == PT_TLS7) {
8
Taking false branch
15
Taking false branch
563 tls_image = ph->p_vaddr;
564 dso->tls_align = ph->p_align;
565 dso->tls_len = ph->p_filesz;
566 dso->tls_size = ph->p_memsz;
567 } else if (ph->p_type == PT_GNU_RELRO0x6474e552) {
9
Taking false branch
16
Taking false branch
568 dso->relro_start = ph->p_vaddr & -PAGE_SIZE4096;
569 dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE4096;
570 }
571 if (ph->p_type != PT_LOAD1) continue;
10
Taking false branch
17
Taking false branch
22
Taking true branch
23
Execution continues on line 559
572 nsegs++;
573 if (ph->p_vaddr < addr_min) {
11
Taking false branch
18
Taking false branch
574 addr_min = ph->p_vaddr;
575 off_start = ph->p_offset;
576 prot = (((ph->p_flags&PF_R(1 << 2)) ? PROT_READ1 : 0) |
577 ((ph->p_flags&PF_W(1 << 1)) ? PROT_WRITE2: 0) |
578 ((ph->p_flags&PF_X(1 << 0)) ? PROT_EXEC4 : 0));
579 }
580 if (ph->p_vaddr+ph->p_memsz > addr_max) {
12
Taking false branch
19
Taking false branch
581 addr_max = ph->p_vaddr+ph->p_memsz;
582 }
583 }
584 if (!dyn) goto noexec;
25
Assuming 'dyn' is not equal to 0
26
Taking false branch
585 if (DL_FDPIC0 && !(eh->e_flags & FDPIC_CONSTDISP_FLAG0)) {
586 dso->loadmap = calloc(1, sizeof *dso->loadmap
587 + nsegs * sizeof *dso->loadmap->segs);
588 if (!dso->loadmap) goto error;
589 dso->loadmap->nsegs = nsegs;
590 for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
591 if (ph->p_type != PT_LOAD1) continue;
592 prot = (((ph->p_flags&PF_R(1 << 2)) ? PROT_READ1 : 0) |
593 ((ph->p_flags&PF_W(1 << 1)) ? PROT_WRITE2: 0) |
594 ((ph->p_flags&PF_X(1 << 0)) ? PROT_EXEC4 : 0));
595 map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE4096-1),
596 prot, (prot&PROT_WRITE2) ? MAP_PRIVATE0x02 : MAP_SHARED0x01,
597 fd, ph->p_offset & -PAGE_SIZE4096);
598 if (map == MAP_FAILED((void *) -1)) {
599 unmap_library(dso);
600 goto error;
601 }
602 dso->loadmap->segs[i].addr = (size_t)map +
603 (ph->p_vaddr & PAGE_SIZE4096-1);
604 dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
605 dso->loadmap->segs[i].p_memsz = ph->p_memsz;
606 i++;
607 }
608 map = (void *)dso->loadmap->segs[0].addr;
609 map_len = 0;
610 goto done_mapping;
611 }
612 addr_max += PAGE_SIZE4096-1;
613 addr_max &= -PAGE_SIZE4096;
614 addr_min &= -PAGE_SIZE4096;
615 off_start &= -PAGE_SIZE4096;
27
The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage
616 map_len = addr_max - addr_min + off_start;
617 /* The first time, we map too much, possibly even more than
618 * the length of the file. This is okay because we will not
619 * use the invalid part; we just need to reserve the right
620 * amount of virtual address space to map over later. */
621 map = mmap((void *)addr_min, map_len, prot, MAP_PRIVATE0x02, fd, off_start);
622 if (map==MAP_FAILED((void *) -1)) goto error;
623 dso->map = map;
624 dso->map_len = map_len;
625 /* If the loaded file is not relocatable and the requested address is
626 * not available, then the load operation must fail. */
627 if (eh->e_type != ET_DYN3 && addr_min && map!=(void *)addr_min) {
628 errno(*__errno_location()) = EBUSY16;
629 goto error;
630 }
631 base = map - addr_min;
632 dso->phdr = 0;
633 dso->phnum = 0;
634 for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
635 if (ph->p_type != PT_LOAD1) continue;
636 /* Check if the programs headers are in this load segment, and
637 * if so, record the address for use by dl_iterate_phdr. */
638 if (!dso->phdr && eh->e_phoff >= ph->p_offset
639 && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
640 dso->phdr = (void *)(base + ph->p_vaddr
641 + (eh->e_phoff-ph->p_offset));
642 dso->phnum = eh->e_phnum;
643 dso->phentsize = eh->e_phentsize;
644 }
645 /* Reuse the existing mapping for the lowest-address LOAD */
646 if ((ph->p_vaddr & -PAGE_SIZE4096) == addr_min) continue;
647 this_min = ph->p_vaddr & -PAGE_SIZE4096;
648 this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE4096-1 & -PAGE_SIZE4096;
649 off_start = ph->p_offset & -PAGE_SIZE4096;
650 prot = (((ph->p_flags&PF_R(1 << 2)) ? PROT_READ1 : 0) |
651 ((ph->p_flags&PF_W(1 << 1)) ? PROT_WRITE2: 0) |
652 ((ph->p_flags&PF_X(1 << 0)) ? PROT_EXEC4 : 0));
653 if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE0x02|MAP_FIXED0x10, fd, off_start) == MAP_FAILED((void *) -1))
654 goto error;
655 if (ph->p_memsz > ph->p_filesz) {
656 size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
657 size_t pgbrk = brk+PAGE_SIZE4096-1 & -PAGE_SIZE4096;
658 memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE4096-1);
659 if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE0x02|MAP_FIXED0x10|MAP_ANONYMOUS0x20, -1, 0) == MAP_FAILED((void *) -1))
660 goto error;
661 }
662 }
663 for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
664 if (((size_t *)(base+dyn))[i]==DT_TEXTREL22) {
665 if (mprotect(map, map_len, PROT_READ1|PROT_WRITE2|PROT_EXEC4)
666 && errno(*__errno_location()) != ENOSYS38)
667 goto error;
668 break;
669 }
670done_mapping:
671 dso->base = base;
672 dso->dynv = laddr(dso, dyn)(void *)((dso)->base + (dyn));
673 if (dso->tls_size) dso->tls_image = laddr(dso, tls_image)(void *)((dso)->base + (tls_image));
674 if (!runtime) reclaim_gaps(dso);
675 free(allocated_buf);
676 return map;
677noexec:
678 errno(*__errno_location()) = ENOEXEC8;
679error:
680 if (map!=MAP_FAILED((void *) -1)) unmap_library(dso);
681 free(allocated_buf);
682 return 0;
683}
684
685static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
686{
687 size_t l;
688 int fd;
689 for (;;) {
690 s += strspn(s, ":\n");
691 l = strcspn(s, ":\n");
692 if (l-1 >= INT_MAX0x7fffffff) return -1;
693 if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) {
694 if ((fd = open(buf, O_RDONLY00|O_CLOEXEC02000000))>=0) return fd;
695 switch (errno(*__errno_location())) {
696 case ENOENT2:
697 case ENOTDIR20:
698 case EACCES13:
699 case ENAMETOOLONG36:
700 break;
701 default:
702 /* Any negative value but -1 will inhibit
703 * futher path search. */
704 return -2;
705 }
706 }
707 s += l;
708 }
709}
710
711static int fixup_rpath(struct dso *p, char *buf, size_t buf_size)
712{
713 size_t n, l;
714 const char *s, *t, *origin;
715 char *d;
716 if (p->rpath || !p->rpath_orig) return 0;
717 if (!strchr(p->rpath_orig, '$')) {
718 p->rpath = p->rpath_orig;
719 return 0;
720 }
721 n = 0;
722 s = p->rpath_orig;
723 while ((t=strchr(s, '$'))) {
724 if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9))
725 return 0;
726 s = t+1;
727 n++;
728 }
729 if (n > SSIZE_MAX0x7fffffffffffffffL/PATH_MAX4096) return 0;
730
731 if (p->kernel_mapped) {
732 /* $ORIGIN searches cannot be performed for the main program
733 * when it is suid/sgid/AT_SECURE. This is because the
734 * pathname is under the control of the caller of execve.
735 * For libraries, however, $ORIGIN can be processed safely
736 * since the library's pathname came from a trusted source
737 * (either system paths or a call to dlopen). */
738 if (libc__libc.secure)
739 return 0;
740 l = readlink("/proc/self/exe", buf, buf_size);
741 if (l == -1) switch (errno(*__errno_location())) {
742 case ENOENT2:
743 case ENOTDIR20:
744 case EACCES13:
745 break;
746 default:
747 return -1;
748 }
749 if (l >= buf_size)
750 return 0;
751 buf[l] = 0;
752 origin = buf;
753 } else {
754 origin = p->name;
755 }
756 t = strrchr(origin, '/');
757 l = t ? t-origin : 0;
758 p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1);
759 if (!p->rpath) return -1;
760
761 d = p->rpath;
762 s = p->rpath_orig;
763 while ((t=strchr(s, '$'))) {
764 memcpy(d, s, t-s);
765 d += t-s;
766 memcpy(d, origin, l);
767 d += l;
768 /* It was determined previously that the '$' is followed
769 * either by "ORIGIN" or "{ORIGIN}". */
770 s = t + 7 + 2*(t[1]=='{');
771 }
772 strcpy(d, s);
773 return 0;
774}
775
776static void decode_dyn(struct dso *p)
777{
778 size_t dyn[DYN_CNT32];
779 decode_vec(p->dynv, dyn, DYN_CNT32);
780 p->syms = laddr(p, dyn[DT_SYMTAB])(void *)((p)->base + (dyn[6]));
781 p->strings = laddr(p, dyn[DT_STRTAB])(void *)((p)->base + (dyn[5]));
782 if (dyn[0]&(1<<DT_HASH4))
783 p->hashtab = laddr(p, dyn[DT_HASH])(void *)((p)->base + (dyn[4]));
784 if (dyn[0]&(1<<DT_RPATH15))
785 p->rpath_orig = p->strings + dyn[DT_RPATH15];
786 if (dyn[0]&(1<<DT_RUNPATH29))
787 p->rpath_orig = p->strings + dyn[DT_RUNPATH29];
788 if (dyn[0]&(1<<DT_PLTGOT3))
789 p->got = laddr(p, dyn[DT_PLTGOT])(void *)((p)->base + (dyn[3]));
790 if (search_vec(p->dynv, dyn, DT_GNU_HASH0x6ffffef5))
791 p->ghashtab = laddr(p, *dyn)(void *)((p)->base + (*dyn));
792 if (search_vec(p->dynv, dyn, DT_VERSYM0x6ffffff0))
793 p->versym = laddr(p, *dyn)(void *)((p)->base + (*dyn));
794}
795
796static size_t count_syms(struct dso *p)
797{
798 if (p->hashtab) return p->hashtab[1];
799
800 size_t nsym, i;
801 uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4);
802 uint32_t *hashval;
803 for (i = nsym = 0; i < p->ghashtab[0]; i++) {
804 if (buckets[i] > nsym)
805 nsym = buckets[i];
806 }
807 if (nsym) {
808 hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]);
809 do nsym++;
810 while (!(*hashval++ & 1));
811 }
812 return nsym;
813}
814
815static void *dl_mmap(size_t n)
816{
817 void *p;
818 int prot = PROT_READ1|PROT_WRITE2, flags = MAP_ANONYMOUS0x20|MAP_PRIVATE0x02;
819#ifdef SYS_mmap2
820 p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0)__syscall6(SYS_mmap2,((long) (0)),((long) (n)),((long) (prot)
),((long) (flags)),((long) (-1)),((long) (0)))
;
821#else
822 p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0)__syscall6(9,((long) (0)),((long) (n)),((long) (prot)),((long
) (flags)),((long) (-1)),((long) (0)))
;
823#endif
824 return p == MAP_FAILED((void *) -1) ? 0 : p;
825}
826
827static void makefuncdescs(struct dso *p)
828{
829 static int self_done;
830 size_t nsym = count_syms(p);
831 size_t i, size = nsym * sizeof(*p->funcdescs);
832
833 if (!self_done) {
834 p->funcdescs = dl_mmap(size);
835 self_done = 1;
836 } else {
837 p->funcdescs = malloc(size);
838 }
839 if (!p->funcdescs) {
840 if (!runtime) a_crash();
841 error("Error allocating function descriptors for %s", p->name);
842 longjmp(*rtld_fail, 1);
843 }
844 for (i=0; i<nsym; i++) {
845 if ((p->syms[i].st_info&0xf)==STT_FUNC2 && p->syms[i].st_shndx) {
846 p->funcdescs[i].addr = laddr(p, p->syms[i].st_value)(void *)((p)->base + (p->syms[i].st_value));
847 p->funcdescs[i].got = p->got;
848 } else {
849 p->funcdescs[i].addr = 0;
850 p->funcdescs[i].got = 0;
851 }
852 }
853}
854
855static struct dso *load_library(const char *name, struct dso *needed_by)
856{
857 char buf[2*NAME_MAX255+2];
858 const char *pathname;
859 unsigned char *map;
860 struct dso *p, temp_dso = {0};
861 int fd;
862 struct stat st;
863 size_t alloc_size;
864 int n_th = 0;
865 int is_self = 0;
866
867 if (!*name) {
868 errno(*__errno_location()) = EINVAL22;
869 return 0;
870 }
871
872 /* Catch and block attempts to reload the implementation itself */
873 if (name[0]=='l' && name[1]=='i' && name[2]=='b') {
874 static const char *rp, reserved[] =
875 "c\0pthread\0rt\0m\0dl\0util\0xnet\0";
876 char *z = strchr(name, '.');
877 if (z) {
878 size_t l = z-name;
879 for (rp=reserved; *rp && strncmp(name+3, rp, l-3); rp+=strlen(rp)+1);
880 if (*rp) {
881 if (ldd_mode) {
882 /* Track which names have been resolved
883 * and only report each one once. */
884 static unsigned reported;
885 unsigned mask = 1U<<(rp-reserved);
886 if (!(reported & mask)) {
887 reported |= mask;
888 dprintf(1, "\t%s => %s (%p)\n",
889 name, ldso.name,
890 ldso.base);
891 }
892 }
893 is_self = 1;
894 }
895 }
896 }
897 if (!strcmp(name, ldso.name)dl_strcmp(name,ldso.name)) is_self = 1;
898 if (is_self) {
899 if (!ldso.prev) {
900 tail->next = &ldso;
901 ldso.prev = tail;
902 tail = ldso.next ? ldso.next : &ldso;
903 }
904 return &ldso;
905 }
906 if (strchr(name, '/')) {
907 pathname = name;
908 fd = open(name, O_RDONLY00|O_CLOEXEC02000000);
909 } else {
910 /* Search for the name to see if it's already loaded */
911 for (p=head->next; p; p=p->next) {
912 if (p->shortname && !strcmp(p->shortname, name)dl_strcmp(p->shortname,name)) {
913 p->refcnt++;
914 return p;
915 }
916 }
917 if (strlen(name) > NAME_MAX255) return 0;
918 fd = -1;
919 if (env_path) fd = path_open(name, env_path, buf, sizeof buf);
920 for (p=needed_by; fd == -1 && p; p=p->needed_by) {
921 if (fixup_rpath(p, buf, sizeof buf) < 0)
922 fd = -2; /* Inhibit further search. */
923 if (p->rpath)
924 fd = path_open(name, p->rpath, buf, sizeof buf);
925 }
926 if (fd == -1) {
927 if (!sys_path) {
928 char *prefix = 0;
929 size_t prefix_len;
930 if (ldso.name[0]=='/') {
931 char *s, *t, *z;
932 for (s=t=z=ldso.name; *s; s++)
933 if (*s=='/') z=t, t=s;
934 prefix_len = z-ldso.name;
935 if (prefix_len < PATH_MAX4096)
936 prefix = ldso.name;
937 }
938 if (!prefix) {
939 prefix = "";
940 prefix_len = 0;
941 }
942 char etc_ldso_path[prefix_len + 1
943 + sizeof "/etc/ld-musl-" LDSO_ARCH"x86_64" ".path"];
944 snprintf(etc_ldso_path, sizeof etc_ldso_path,
945 "%.*s/etc/ld-musl-" LDSO_ARCH"x86_64" ".path",
946 (int)prefix_len, prefix);
947 FILE *f = fopen(etc_ldso_path, "rbe");
948 if (f) {
949 if (getdelim(&sys_path, (size_t[1]){0}, 0, f) <= 0) {
950 free(sys_path);
951 sys_path = "";
952 }
953 fclose(f);
954 } else if (errno(*__errno_location()) != ENOENT2) {
955 sys_path = "";
956 }
957 }
958 if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib";
959 fd = path_open(name, sys_path, buf, sizeof buf);
960 }
961 pathname = buf;
962 }
963 if (fd < 0) return 0;
964 if (fstat(fd, &st) < 0) {
965 close(fd);
966 return 0;
967 }
968 for (p=head->next; p; p=p->next) {
969 if (p->dev == st.st_dev && p->ino == st.st_ino) {
970 /* If this library was previously loaded with a
971 * pathname but a search found the same inode,
972 * setup its shortname so it can be found by name. */
973 if (!p->shortname && pathname != name)
974 p->shortname = strrchr(p->name, '/')+1;
975 close(fd);
976 p->refcnt++;
977 return p;
978 }
979 }
980 map = noload ? 0 : map_library(fd, &temp_dso);
981 close(fd);
982 if (!map) return 0;
983
984 /* Allocate storage for the new DSO. When there is TLS, this
985 * storage must include a reservation for all pre-existing
986 * threads to obtain copies of both the new TLS, and an
987 * extended DTV capable of storing an additional slot for
988 * the newly-loaded DSO. */
989 alloc_size = sizeof *p + strlen(pathname) + 1;
990 if (runtime && temp_dso.tls_image) {
991 size_t per_th = temp_dso.tls_size + temp_dso.tls_align
992 + sizeof(void *) * (tls_cnt+3);
993 n_th = libc__libc.threads_minus_1 + 1;
994 if (n_th > SSIZE_MAX0x7fffffffffffffffL / per_th) alloc_size = SIZE_MAX(0xffffffffffffffffu);
995 else alloc_size += n_th * per_th;
996 }
997 p = calloc(1, alloc_size);
998 if (!p) {
999 unmap_library(&temp_dso);
1000 return 0;
1001 }
1002 memcpy(p, &temp_dso, sizeof temp_dso);
1003 decode_dyn(p);
1004 p->dev = st.st_dev;
1005 p->ino = st.st_ino;
1006 p->refcnt = 1;
1007 p->needed_by = needed_by;
1008 p->name = p->buf;
1009 strcpy(p->name, pathname);
1010 /* Add a shortname only if name arg was not an explicit pathname. */
1011 if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
1012 if (p->tls_image) {
1013 p->tls_id = ++tls_cnt;
1014 tls_align = MAXP2(tls_align, p->tls_align)(-(-(tls_align)&-(p->tls_align)));
1015#ifdef TLS_ABOVE_TP
1016 p->tls_offset = tls_offset + ( (tls_align-1) &
1017 -(tls_offset + (uintptr_t)p->tls_image) );
1018 tls_offset += p->tls_size;
1019#else
1020 tls_offset += p->tls_size + p->tls_align - 1;
1021 tls_offset -= (tls_offset + (uintptr_t)p->tls_image)
1022 & (p->tls_align-1);
1023 p->tls_offset = tls_offset;
1024#endif
1025 p->new_dtv = (void *)(-sizeof(size_t) &
1026 (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
1027 p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
1028 }
1029
1030 tail->next = p;
1031 p->prev = tail;
1032 tail = p;
1033
1034 if (DL_FDPIC0) makefuncdescs(p);
1035
1036 if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base);
1037
1038 return p;
1039}
1040
1041static void load_deps(struct dso *p)
1042{
1043 size_t i, ndeps=0;
1044 struct dso ***deps = &p->deps, **tmp, *dep;
1045 for (; p; p=p->next) {
1046 for (i=0; p->dynv[i]; i+=2) {
1047 if (p->dynv[i] != DT_NEEDED1) continue;
1048 dep = load_library(p->strings + p->dynv[i+1], p);
1049 if (!dep) {
1050 error("Error loading shared library %s: %m (needed by %s)",
1051 p->strings + p->dynv[i+1], p->name);
1052 if (runtime) longjmp(*rtld_fail, 1);
1053 continue;
1054 }
1055 if (runtime) {
1056 tmp = realloc(*deps, sizeof(*tmp)*(ndeps+2));
1057 if (!tmp) longjmp(*rtld_fail, 1);
1058 tmp[ndeps++] = dep;
1059 tmp[ndeps] = 0;
1060 *deps = tmp;
1061 }
1062 }
1063 }
1064}
1065
1066static void load_preload(char *s)
1067{
1068 int tmp;
1069 char *z;
1070 for (z=s; *z; s=z) {
1071 for ( ; *s && (isspace(*s)__isspace(*s) || *s==':'); s++);
1072 for (z=s; *z && !isspace(*z)__isspace(*z) && *z!=':'; z++);
1073 tmp = *z;
1074 *z = 0;
1075 load_library(s, 0);
1076 *z = tmp;
1077 }
1078}
1079
1080static void make_global(struct dso *p)
1081{
1082 for (; p; p=p->next) p->global = 1;
1083}
1084
1085static void do_mips_relocs(struct dso *p, size_t *got)
1086{
1087 size_t i, j, rel[2];
1088 unsigned char *base = p->base;
1089 i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO0x7000000a);
1090 if (p==&ldso) {
1091 got += i;
1092 } else {
1093 while (i--) *got++ += (size_t)base;
1094 }
1095 j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM0x70000013);
1096 i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO0x70000011);
1097 Sym *sym = p->syms + j;
1098 rel[0] = (unsigned char *)got - base;
1099 for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) {
1100 rel[1] = sym-p->syms << 8 | R_MIPS_JUMP_SLOT127;
1101 do_relocs(p, rel, sizeof rel, 2);
1102 }
1103}
1104
1105static void reloc_all(struct dso *p)
1106{
1107 size_t dyn[DYN_CNT32];
1108 for (; p; p=p->next) {
1109 if (p->relocated) continue;
1110 decode_vec(p->dynv, dyn, DYN_CNT32);
1111 if (NEED_MIPS_GOT_RELOCS0)
1112 do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT])(void *)((p)->base + (dyn[3])));
1113 do_relocs(p, laddr(p, dyn[DT_JMPREL])(void *)((p)->base + (dyn[23])), dyn[DT_PLTRELSZ2],
1114 2+(dyn[DT_PLTREL20]==DT_RELA7));
1115 do_relocs(p, laddr(p, dyn[DT_REL])(void *)((p)->base + (dyn[17])), dyn[DT_RELSZ18], 2);
1116 do_relocs(p, laddr(p, dyn[DT_RELA])(void *)((p)->base + (dyn[7])), dyn[DT_RELASZ8], 3);
1117
1118 if (head != &ldso && p->relro_start != p->relro_end &&
1119 mprotect(laddr(p, p->relro_start)(void *)((p)->base + (p->relro_start)), p->relro_end-p->relro_start, PROT_READ1)
1120 && errno(*__errno_location()) != ENOSYS38) {
1121 error("Error relocating %s: RELRO protection failed: %m",
1122 p->name);
1123 if (runtime) longjmp(*rtld_fail, 1);
1124 }
1125
1126 p->relocated = 1;
1127 }
1128}
1129
1130static void kernel_mapped_dso(struct dso *p)
1131{
1132 size_t min_addr = -1, max_addr = 0, cnt;
1133 Phdr *ph = p->phdr;
1134 for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) {
1135 if (ph->p_type == PT_DYNAMIC2) {
1136 p->dynv = laddr(p, ph->p_vaddr)(void *)((p)->base + (ph->p_vaddr));
1137 } else if (ph->p_type == PT_GNU_RELRO0x6474e552) {
1138 p->relro_start = ph->p_vaddr & -PAGE_SIZE4096;
1139 p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE4096;
1140 }
1141 if (ph->p_type != PT_LOAD1) continue;
1142 if (ph->p_vaddr < min_addr)
1143 min_addr = ph->p_vaddr;
1144 if (ph->p_vaddr+ph->p_memsz > max_addr)
1145 max_addr = ph->p_vaddr+ph->p_memsz;
1146 }
1147 min_addr &= -PAGE_SIZE4096;
1148 max_addr = (max_addr + PAGE_SIZE4096-1) & -PAGE_SIZE4096;
1149 p->map = p->base + min_addr;
1150 p->map_len = max_addr - min_addr;
1151 p->kernel_mapped = 1;
1152}
1153
1154static void do_fini()
1155{
1156 struct dso *p;
1157 size_t dyn[DYN_CNT32];
1158 for (p=fini_head; p; p=p->fini_next) {
1159 if (!p->constructed) continue;
1160 decode_vec(p->dynv, dyn, DYN_CNT32);
1161 if (dyn[0] & (1<<DT_FINI_ARRAY26)) {
1162 size_t n = dyn[DT_FINI_ARRAYSZ28]/sizeof(size_t);
1163 size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])(void *)((p)->base + (dyn[26]))+n;
1164 while (n--) ((void (*)(void))*--fn)();
1165 }
1166#ifndef NO_LEGACY_INITFINI
1167 if ((dyn[0] & (1<<DT_FINI13)) && dyn[DT_FINI13])
1168 fpaddr(p, dyn[DT_FINI])((void (*)())(void *)((p)->base + (dyn[13])))();
1169#endif
1170 }
1171}
1172
1173static void do_init_fini(struct dso *p)
1174{
1175 size_t dyn[DYN_CNT32];
1176 int need_locking = libc__libc.threads_minus_1;
1177 /* Allow recursive calls that arise when a library calls
1178 * dlopen from one of its constructors, but block any
1179 * other threads until all ctors have finished. */
1180 if (need_locking) pthread_mutex_lock(&init_fini_lock);
1181 for (; p; p=p->prev) {
1182 if (p->constructed) continue;
1183 p->constructed = 1;
1184 decode_vec(p->dynv, dyn, DYN_CNT32);
1185 if (dyn[0] & ((1<<DT_FINI13) | (1<<DT_FINI_ARRAY26))) {
1186 p->fini_next = fini_head;
1187 fini_head = p;
1188 }
1189#ifndef NO_LEGACY_INITFINI
1190 if ((dyn[0] & (1<<DT_INIT12)) && dyn[DT_INIT12])
1191 fpaddr(p, dyn[DT_INIT])((void (*)())(void *)((p)->base + (dyn[12])))();
1192#endif
1193 if (dyn[0] & (1<<DT_INIT_ARRAY25)) {
1194 size_t n = dyn[DT_INIT_ARRAYSZ27]/sizeof(size_t);
1195 size_t *fn = laddr(p, dyn[DT_INIT_ARRAY])(void *)((p)->base + (dyn[25]));
1196 while (n--) ((void (*)(void))*fn++)();
1197 }
1198 if (!need_locking && libc__libc.threads_minus_1) {
1199 need_locking = 1;
1200 pthread_mutex_lock(&init_fini_lock);
1201 }
1202 }
1203 if (need_locking) pthread_mutex_unlock(&init_fini_lock);
1204}
1205
1206void __libc_start_init(void)
1207{
1208 do_init_fini(tail);
1209}
1210
1211static void dl_debug_state(void)
1212{
1213}
1214
1215weak_alias(dl_debug_state, _dl_debug_state)extern __typeof(dl_debug_state) _dl_debug_state __attribute__
((weak, alias("dl_debug_state")))
;
1216
1217void __reset_tls()
1218{
1219 pthread_t self = __pthread_self();
1220 struct dso *p;
1221 for (p=head; p; p=p->next) {
1222 if (!p->tls_id || !self->dtv[p->tls_id]) continue;
1223 memcpy(self->dtv[p->tls_id], p->tls_image, p->tls_len);
1224 memset((char *)self->dtv[p->tls_id]+p->tls_len, 0,
1225 p->tls_size - p->tls_len);
1226 if (p->tls_id == (size_t)self->dtv[0]) break;
1227 }
1228}
1229
1230void *__copy_tls(unsigned char *mem)
1231{
1232 pthread_t td;
1233 struct dso *p;
1234 void **dtv;
1235
1236#ifdef TLS_ABOVE_TP
1237 dtv = (void **)(mem + libc__libc.tls_size) - (tls_cnt + 1);
1238
1239 mem += -((uintptr_t)mem + sizeof(struct pthread__pthread)) & (tls_align-1);
1240 td = (pthread_t)mem;
1241 mem += sizeof(struct pthread__pthread);
1242
1243 for (p=head; p; p=p->next) {
1244 if (!p->tls_id) continue;
1245 dtv[p->tls_id] = mem + p->tls_offset;
1246 memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
1247 }
1248#else
1249 dtv = (void **)mem;
1250
1251 mem += libc__libc.tls_size - sizeof(struct pthread__pthread);
1252 mem -= (uintptr_t)mem & (tls_align-1);
1253 td = (pthread_t)mem;
1254
1255 for (p=head; p; p=p->next) {
1256 if (!p->tls_id) continue;
1257 dtv[p->tls_id] = mem - p->tls_offset;
1258 memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
1259 }
1260#endif
1261 dtv[0] = (void *)tls_cnt;
1262 td->dtv = td->dtv_copy = dtv;
1263 return td;
1264}
1265
1266__attribute__((__visibility__("hidden")))
1267void *__tls_get_new(size_t *v)
1268{
1269 pthread_t self = __pthread_self();
1270
1271 /* Block signals to make accessing new TLS async-signal-safe */
1272 sigset_t set;
1273 __block_all_sigs(&set);
1274 if (v[0]<=(size_t)self->dtv[0]) {
1275 __restore_sigs(&set);
1276 return (char *)self->dtv[v[0]]+v[1]+DTP_OFFSET0;
1277 }
1278
1279 /* This is safe without any locks held because, if the caller
1280 * is able to request the Nth entry of the DTV, the DSO list
1281 * must be valid at least that far out and it was synchronized
1282 * at program startup or by an already-completed call to dlopen. */
1283 struct dso *p;
1284 for (p=head; p->tls_id != v[0]; p=p->next);
1285
1286 /* Get new DTV space from new DSO if needed */
1287 if (v[0] > (size_t)self->dtv[0]) {
1288 void **newdtv = p->new_dtv +
1289 (v[0]+1)*sizeof(void *)*a_fetch_add(&p->new_dtv_idx,1);
1290 memcpy(newdtv, self->dtv,
1291 ((size_t)self->dtv[0]+1) * sizeof(void *));
1292 newdtv[0] = (void *)v[0];
1293 self->dtv = self->dtv_copy = newdtv;
1294 }
1295
1296 /* Get new TLS memory from all new DSOs up to the requested one */
1297 unsigned char *mem;
1298 for (p=head; ; p=p->next) {
1299 if (!p->tls_id || self->dtv[p->tls_id]) continue;
1300 mem = p->new_tls + (p->tls_size + p->tls_align)
1301 * a_fetch_add(&p->new_tls_idx,1);
1302 mem += ((uintptr_t)p->tls_image - (uintptr_t)mem)
1303 & (p->tls_align-1);
1304 self->dtv[p->tls_id] = mem;
1305 memcpy(mem, p->tls_image, p->tls_len);
1306 if (p->tls_id == v[0]) break;
1307 }
1308 __restore_sigs(&set);
1309 return mem + v[1] + DTP_OFFSET0;
1310}
1311
1312static void update_tls_size()
1313{
1314 libc__libc.tls_size = ALIGN((((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
1315 (1+tls_cnt) * sizeof(void *) +(((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
1316 tls_offset +(((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
1317 sizeof(struct pthread) +(((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
1318 tls_align * 2,(((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
1319 tls_align)(((1+tls_cnt) * sizeof(void *) + tls_offset + sizeof(struct __pthread
) + tls_align * 2)+(tls_align)-1 & -(tls_align))
;
1320}
1321
1322/* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the
1323 * following stage 2 and stage 3 functions via primitive symbolic lookup
1324 * since it does not have access to their addresses to begin with. */
1325
1326/* Stage 2 of the dynamic linker is called after relative relocations
1327 * have been processed. It can make function calls to static functions
1328 * and access string literals and static data, but cannot use extern
1329 * symbols. Its job is to perform symbolic relocations on the dynamic
1330 * linker itself, but some of the relocations performed may need to be
1331 * replaced later due to copy relocations in the main program. */
1332
1333void __dls2(unsigned char *base, size_t *sp)
1334{
1335 if (DL_FDPIC0) {
1336 void *p1 = (void *)sp[-2];
1337 void *p2 = (void *)sp[-1];
1338 if (!p1) {
1339 size_t *auxv, aux[AUX_CNT32];
1340 for (auxv=sp+1+*sp+1; *auxv; auxv++); auxv++;
1341 decode_vec(auxv, aux, AUX_CNT32);
1342 if (aux[AT_BASE7]) ldso.base = (void *)aux[AT_BASE7];
1343 else ldso.base = (void *)(aux[AT_PHDR3] & -4096);
1344 }
1345 app_loadmap = p2 ? p1 : 0;
1346 ldso.loadmap = p2 ? p2 : p1;
1347 ldso.base = laddr(&ldso, 0)(void *)((&ldso)->base + (0));
1348 } else {
1349 ldso.base = base;
1350 }
1351 Ehdr *ehdr = (void *)ldso.base;
1352 ldso.name = ldso.shortname = "libc.so";
1353 ldso.global = 1;
1354 ldso.phnum = ehdr->e_phnum;
1355 ldso.phdr = laddr(&ldso, ehdr->e_phoff)(void *)((&ldso)->base + (ehdr->e_phoff));
1356 ldso.phentsize = ehdr->e_phentsize;
1357 kernel_mapped_dso(&ldso);
1358 decode_dyn(&ldso);
1359
1360 if (DL_FDPIC0) makefuncdescs(&ldso);
1361
1362 /* Prepare storage for to save clobbered REL addends so they
1363 * can be reused in stage 3. There should be very few. If
1364 * something goes wrong and there are a huge number, abort
1365 * instead of risking stack overflow. */
1366 size_t dyn[DYN_CNT32];
1367 decode_vec(ldso.dynv, dyn, DYN_CNT32);
1368 size_t *rel = laddr(&ldso, dyn[DT_REL])(void *)((&ldso)->base + (dyn[17]));
1369 size_t rel_size = dyn[DT_RELSZ18];
1370 size_t symbolic_rel_cnt = 0;
1371 apply_addends_to = rel;
1372 for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t))
1373 if (!IS_RELATIVE(rel[1], ldso.syms)( (((rel[1])&0x7fffffff) == 8) || (((rel[1])&0x7fffffff
) == REL_SYM_OR_REL && !((rel[1])>>32)) )
) symbolic_rel_cnt++;
1374 if (symbolic_rel_cnt >= ADDEND_LIMIT4096) a_crash();
1375 size_t addends[symbolic_rel_cnt+1];
1376 saved_addends = addends;
1377
1378 head = &ldso;
1379 reloc_all(&ldso);
1380
1381 ldso.relocated = 0;
1382
1383 /* Call dynamic linker stage-3, __dls3, looking it up
1384 * symbolically as a barrier against moving the address
1385 * load across the above relocation processing. */
1386 struct symdef dls3_def = find_sym(&ldso, "__dls3", 0);
1387 if (DL_FDPIC0) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp);
1388 else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value)(void *)((&ldso)->base + (dls3_def.sym->st_value)))(sp);
1389}
1390
1391/* Stage 3 of the dynamic linker is called with the dynamic linker/libc
1392 * fully functional. Its job is to load (if not already loaded) and
1393 * process dependencies and relocations for the main application and
1394 * transfer control to its entry point. */
1395
1396_Noreturn__attribute__((__noreturn__)) void __dls3(size_t *sp)
1397{
1398 static struct dso app, vdso;
1399 size_t aux[AUX_CNT32], *auxv;
1400 size_t i;
1401 char *env_preload=0;
1402 size_t vdso_base;
1403 int argc = *sp;
1404 char **argv = (void *)(sp+1);
1405 char **argv_orig = argv;
1406 char **envp = argv+argc+1;
1407
1408 /* Find aux vector just past environ[] and use it to initialize
1409 * global data that may be needed before we can make syscalls. */
1410 __environ = envp;
1411 for (i=argc+1; argv[i]; i++);
1412 libc__libc.auxv = auxv = (void *)(argv+i+1);
1413 decode_vec(auxv, aux, AUX_CNT32);
1414 __hwcap = aux[AT_HWCAP16];
1415 libc__libc.page_size = aux[AT_PAGESZ6];
1416 libc__libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID11]!=aux[AT_EUID12]
1417 || aux[AT_GID13]!=aux[AT_EGID14] || aux[AT_SECURE23]);
1418
1419 /* Setup early thread pointer in builtin_tls for ldso/libc itself to
1420 * use during dynamic linking. If possible it will also serve as the
1421 * thread pointer at runtime. */
1422 libc__libc.tls_size = sizeof builtin_tls;
1423 if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
1424 a_crash();
1425 }
1426
1427 /* Only trust user/env if kernel says we're not suid/sgid */
1428 if (!libc__libc.secure) {
1429 env_path = getenv("LD_LIBRARY_PATH");
1430 env_preload = getenv("LD_PRELOAD");
1431 }
1432
1433 /* If the main program was already loaded by the kernel,
1434 * AT_PHDR will point to some location other than the dynamic
1435 * linker's program headers. */
1436 if (aux[AT_PHDR3] != (size_t)ldso.phdr) {
1437 size_t interp_off = 0;
1438 size_t tls_image = 0;
1439 /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */
1440 Phdr *phdr = app.phdr = (void *)aux[AT_PHDR3];
1441 app.phnum = aux[AT_PHNUM5];
1442 app.phentsize = aux[AT_PHENT4];
1443 for (i=aux[AT_PHNUM5]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT4])) {
1444 if (phdr->p_type == PT_PHDR6)
1445 app.base = (void *)(aux[AT_PHDR3] - phdr->p_vaddr);
1446 else if (phdr->p_type == PT_INTERP3)
1447 interp_off = (size_t)phdr->p_vaddr;
1448 else if (phdr->p_type == PT_TLS7) {
1449 tls_image = phdr->p_vaddr;
1450 app.tls_len = phdr->p_filesz;
1451 app.tls_size = phdr->p_memsz;
1452 app.tls_align = phdr->p_align;
1453 }
1454 }
1455 if (DL_FDPIC0) app.loadmap = app_loadmap;
1456 if (app.tls_size) app.tls_image = laddr(&app, tls_image)(void *)((&app)->base + (tls_image));
1457 if (interp_off) ldso.name = laddr(&app, interp_off)(void *)((&app)->base + (interp_off));
1458 if ((aux[0] & (1UL<<AT_EXECFN31))
1459 && strncmp((char *)aux[AT_EXECFN31], "/proc/", 6))
1460 app.name = (char *)aux[AT_EXECFN31];
1461 else
1462 app.name = argv[0];
1463 kernel_mapped_dso(&app);
1464 } else {
1465 int fd;
1466 char *ldname = argv[0];
1467 size_t l = strlen(ldname);
1468 if (l >= 3 && !strcmp(ldname+l-3, "ldd")dl_strcmp(ldname+l-3,"ldd")) ldd_mode = 1;
1469 argv++;
1470 while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') {
1471 char *opt = argv[0]+2;
1472 *argv++ = (void *)-1;
1473 if (!*opt) {
1474 break;
1475 } else if (!memcmp(opt, "list", 5)) {
1476 ldd_mode = 1;
1477 } else if (!memcmp(opt, "library-path", 12)) {
1478 if (opt[12]=='=') env_path = opt+13;
1479 else if (opt[12]) *argv = 0;
1480 else if (*argv) env_path = *argv++;
1481 } else if (!memcmp(opt, "preload", 7)) {
1482 if (opt[7]=='=') env_preload = opt+8;
1483 else if (opt[7]) *argv = 0;
1484 else if (*argv) env_preload = *argv++;
1485 } else {
1486 argv[0] = 0;
1487 }
1488 }
1489 argv[-1] = (void *)(argc - (argv-argv_orig));
1490 if (!argv[0]) {
1491 dprintf(2, "musl libc\n"
1492 "Version %s\n"
1493 "Dynamic Program Loader\n"
1494 "Usage: %s [options] [--] pathname%s\n",
1495 __libc_get_version(), ldname,
1496 ldd_mode ? "" : " [args]");
1497 _exit(1);
1498 }
1499 fd = open(argv[0], O_RDONLY00);
1500 if (fd < 0) {
1501 dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno(*__errno_location())));
1502 _exit(1);
1503 }
1504 runtime = 1;
1505 Ehdr *ehdr = (void *)map_library(fd, &app);
1506 if (!ehdr) {
1507 dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]);
1508 _exit(1);
1509 }
1510 runtime = 0;
1511 close(fd);
1512 ldso.name = ldname;
1513 app.name = argv[0];
1514 aux[AT_ENTRY9] = (size_t)laddr(&app, ehdr->e_entry)(void *)((&app)->base + (ehdr->e_entry));
1515 /* Find the name that would have been used for the dynamic
1516 * linker had ldd not taken its place. */
1517 if (ldd_mode) {
1518 for (i=0; i<app.phnum; i++) {
1519 if (app.phdr[i].p_type == PT_INTERP3)
1520 ldso.name = laddr(&app, app.phdr[i].p_vaddr)(void *)((&app)->base + (app.phdr[i].p_vaddr));
1521 }
1522 dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
1523 }
1524 }
1525 if (app.tls_size) {
1526 app.tls_id = tls_cnt = 1;
1527#ifdef TLS_ABOVE_TP
1528 app.tls_offset = 0;
1529 tls_offset = app.tls_size
1530 + ( -((uintptr_t)app.tls_image + app.tls_size)
1531 & (app.tls_align-1) );
1532#else
1533 tls_offset = app.tls_offset = app.tls_size
1534 + ( -((uintptr_t)app.tls_image + app.tls_size)
1535 & (app.tls_align-1) );
1536#endif
1537 tls_align = MAXP2(tls_align, app.tls_align)(-(-(tls_align)&-(app.tls_align)));
1538 }
1539 app.global = 1;
1540 decode_dyn(&app);
1541 if (DL_FDPIC0) {
1542 makefuncdescs(&app);
1543 if (!app.loadmap) {
1544 app.loadmap = (void *)&app_dummy_loadmap;
1545 app.loadmap->nsegs = 1;
1546 app.loadmap->segs[0].addr = (size_t)app.map;
1547 app.loadmap->segs[0].p_vaddr = (size_t)app.map
1548 - (size_t)app.base;
1549 app.loadmap->segs[0].p_memsz = app.map_len;
1550 }
1551 argv[-3] = (void *)app.loadmap;
1552 }
1553
1554 /* Attach to vdso, if provided by the kernel */
1555 if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR33)) {
1556 Ehdr *ehdr = (void *)vdso_base;
1557 Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff);
1558 vdso.phnum = ehdr->e_phnum;
1559 vdso.phentsize = ehdr->e_phentsize;
1560 for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) {
1561 if (phdr->p_type == PT_DYNAMIC2)
1562 vdso.dynv = (void *)(vdso_base + phdr->p_offset);
1563 if (phdr->p_type == PT_LOAD1)
1564 vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset);
1565 }
1566 vdso.name = "";
1567 vdso.shortname = "linux-gate.so.1";
1568 vdso.global = 1;
1569 vdso.relocated = 1;
1570 decode_dyn(&vdso);
1571 vdso.prev = &ldso;
1572 ldso.next = &vdso;
1573 }
1574
1575 /* Initial dso chain consists only of the app. */
1576 head = tail = &app;
1577
1578 /* Donate unused parts of app and library mapping to malloc */
1579 reclaim_gaps(&app);
1580 reclaim_gaps(&ldso);
1581
1582 /* Load preload/needed libraries, add their symbols to the global
1583 * namespace, and perform all remaining relocations. */
1584 if (env_preload) load_preload(env_preload);
1585 load_deps(&app);
1586 make_global(&app);
1587
1588#ifndef DYNAMIC_IS_RO
1589 for (i=0; app.dynv[i]; i+=2)
1590 if (app.dynv[i]==DT_DEBUG21)
1591 app.dynv[i+1] = (size_t)&debug;
1592#endif
1593
1594 /* The main program must be relocated LAST since it may contin
1595 * copy relocations which depend on libraries' relocations. */
1596 reloc_all(app.next);
1597 reloc_all(&app);
1598
1599 update_tls_size();
1600 if (libc__libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN__builtin_offsetof(struct builtin_tls, pt)) {
1601 void *initial_tls = calloc(libc__libc.tls_size, 1);
1602 if (!initial_tls) {
1603 dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n",
1604 argv[0], libc__libc.tls_size);
1605 _exit(127);
1606 }
1607 if (__init_tp(__copy_tls(initial_tls)) < 0) {
1608 a_crash();
1609 }
1610 } else {
1611 size_t tmp_tls_size = libc__libc.tls_size;
1612 pthread_t self = __pthread_self();
1613 /* Temporarily set the tls size to the full size of
1614 * builtin_tls so that __copy_tls will use the same layout
1615 * as it did for before. Then check, just to be safe. */
1616 libc__libc.tls_size = sizeof builtin_tls;
1617 if (__copy_tls((void*)builtin_tls) != self) a_crash();
1618 libc__libc.tls_size = tmp_tls_size;
1619 }
1620 static_tls_cnt = tls_cnt;
1621
1622 if (ldso_fail) _exit(127);
1623 if (ldd_mode) _exit(0);
1624
1625 /* Switch to runtime mode: any further failures in the dynamic
1626 * linker are a reportable failure rather than a fatal startup
1627 * error. */
1628 runtime = 1;
1629
1630 debug.ver = 1;
1631 debug.bp = dl_debug_state;
1632 debug.head = head;
1633 debug.base = ldso.base;
1634 debug.state = 0;
1635 _dl_debug_state();
1636
1637 __init_libc(envp, argv[0]);
1638 atexit(do_fini);
1639 errno(*__errno_location()) = 0;
1640
1641 CRTJMP((void *)aux[AT_ENTRY], argv-1)__asm__ __volatile__( "mov %1,%%rsp ; jmp *%0" : : "r"((void *
)aux[9]), "r"(argv-1) : "memory" )
;
1642 for(;;);
1643}
1644
1645void *dlopen(const char *file, int mode)
1646{
1647 struct dso *volatile p, *orig_tail, *next;
1648 size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
1649 size_t i;
1650 int cs;
1651 jmp_buf jb;
1652
1653 if (!file) return head;
1654
1655 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE1, &cs);
1656 pthread_rwlock_wrlock(&lock);
1657 __inhibit_ptc();
1658
1659 p = 0;
1660 orig_tls_cnt = tls_cnt;
1661 orig_tls_offset = tls_offset;
1662 orig_tls_align = tls_align;
1663 orig_tail = tail;
1664 noload = mode & RTLD_NOLOAD4;
1665
1666 rtld_fail = &jb;
1667 if (setjmpsetjmp(*rtld_fail)) {
1668 /* Clean up anything new that was (partially) loaded */
1669 if (p && p->deps) for (i=0; p->deps[i]; i++)
1670 if (p->deps[i]->global < 0)
1671 p->deps[i]->global = 0;
1672 for (p=orig_tail->next; p; p=next) {
1673 next = p->next;
1674 while (p->td_index) {
1675 void *tmp = p->td_index->next;
1676 free(p->td_index);
1677 p->td_index = tmp;
1678 }
1679 free(p->funcdescs);
1680 if (p->rpath != p->rpath_orig)
1681 free(p->rpath);
1682 free(p->deps);
1683 unmap_library(p);
1684 free(p);
1685 }
1686 tls_cnt = orig_tls_cnt;
1687 tls_offset = orig_tls_offset;
1688 tls_align = orig_tls_align;
1689 tail = orig_tail;
1690 tail->next = 0;
1691 p = 0;
1692 goto end;
1693 } else p = load_library(file, head);
1694
1695 if (!p) {
1696 error(noload ?
1697 "Library %s is not already loaded" :
1698 "Error loading shared library %s: %m",
1699 file);
1700 goto end;
1701 }
1702
1703 /* First load handling */
1704 if (!p->deps) {
1705 load_deps(p);
1706 if (p->deps) for (i=0; p->deps[i]; i++)
1707 if (!p->deps[i]->global)
1708 p->deps[i]->global = -1;
1709 if (!p->global) p->global = -1;
1710 reloc_all(p);
1711 if (p->deps) for (i=0; p->deps[i]; i++)
1712 if (p->deps[i]->global < 0)
1713 p->deps[i]->global = 0;
1714 if (p->global < 0) p->global = 0;
1715 }
1716
1717 if (mode & RTLD_GLOBAL256) {
1718 if (p->deps) for (i=0; p->deps[i]; i++)
1719 p->deps[i]->global = 1;
1720 p->global = 1;
1721 }
1722
1723 update_tls_size();
1724 _dl_debug_state();
1725 orig_tail = tail;
1726end:
1727 __release_ptc();
1728 if (p) gencnt++;
1729 pthread_rwlock_unlock(&lock);
1730 if (p) do_init_fini(orig_tail);
1731 pthread_setcancelstate(cs, 0);
1732 return p;
1733}
1734
1735static int invalid_dso_handle(void *h)
1736{
1737 struct dso *p;
1738 for (p=head; p; p=p->next) if (h==p) return 0;
1739 error("Invalid library handle %p", (void *)h);
1740 return 1;
1741}
1742
1743static void *addr2dso(size_t a)
1744{
1745 struct dso *p;
1746 for (p=head; p; p=p->next) {
1747 if (DL_FDPIC0 && p->loadmap) {
1748 size_t i;
1749 for (i=0; i<p->loadmap->nsegs; i++) {
1750 if (a-p->loadmap->segs[i].p_vaddr
1751 < p->loadmap->segs[i].p_memsz)
1752 return p;
1753 }
1754 i = count_syms(p);
1755 if (a-(size_t)p->funcdescs < i*sizeof(*p->funcdescs))
1756 return p;
1757 } else {
1758 if (a-(size_t)p->map < p->map_len)
1759 return p;
1760 }
1761 }
1762 return 0;
1763}
1764
1765void *__tls_get_addr(size_t *);
1766
1767static void *do_dlsym(struct dso *p, const char *s, void *ra)
1768{
1769 size_t i;
1770 uint32_t h = 0, gh = 0, *ght;
1771 Sym *sym;
1772 if (p == head || p == RTLD_DEFAULT((void *)0) || p == RTLD_NEXT((void *)-1)) {
1773 if (p == RTLD_DEFAULT((void *)0)) {
1774 p = head;
1775 } else if (p == RTLD_NEXT((void *)-1)) {
1776 p = addr2dso((size_t)ra);
1777 if (!p) p=head;
1778 p = p->next;
1779 }
1780 struct symdef def = find_sym(p, s, 0);
1781 if (!def.sym) goto failed;
1782 if ((def.sym->st_info&0xf) == STT_TLS6)
1783 return __tls_get_addr((size_t []){def.dso->tls_id, def.sym->st_value});
1784 if (DL_FDPIC0 && (def.sym->st_info&0xf) == STT_FUNC2)
1785 return def.dso->funcdescs + (def.sym - def.dso->syms);
1786 return laddr(def.dso, def.sym->st_value)(void *)((def.dso)->base + (def.sym->st_value));
1787 }
1788 if (invalid_dso_handle(p))
1789 return 0;
1790 if ((ght = p->ghashtab)) {
1791 gh = gnu_hash(s);
1792 sym = gnu_lookup(gh, ght, p, s);
1793 } else {
1794 h = sysv_hash(s);
1795 sym = sysv_lookup(s, h, p);
1796 }
1797 if (sym && (sym->st_info&0xf) == STT_TLS6)
1798 return __tls_get_addr((size_t []){p->tls_id, sym->st_value});
1799 if (DL_FDPIC0 && sym && sym->st_shndx && (sym->st_info&0xf) == STT_FUNC2)
1800 return p->funcdescs + (sym - p->syms);
1801 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES(1<<0 | 1<<1 | 1<<2 | 1<<5 | 1<<
6)
))
1802 return laddr(p, sym->st_value)(void *)((p)->base + (sym->st_value));
1803 if (p->deps) for (i=0; p->deps[i]; i++) {
1804 if ((ght = p->deps[i]->ghashtab)) {
1805 if (!gh) gh = gnu_hash(s);
1806 sym = gnu_lookup(gh, ght, p->deps[i], s);
1807 } else {
1808 if (!h) h = sysv_hash(s);
1809 sym = sysv_lookup(s, h, p->deps[i]);
1810 }
1811 if (sym && (sym->st_info&0xf) == STT_TLS6)
1812 return __tls_get_addr((size_t []){p->deps[i]->tls_id, sym->st_value});
1813 if (DL_FDPIC0 && sym && sym->st_shndx && (sym->st_info&0xf) == STT_FUNC2)
1814 return p->deps[i]->funcdescs + (sym - p->deps[i]->syms);
1815 if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES(1<<0 | 1<<1 | 1<<2 | 1<<5 | 1<<
6)
))
1816 return laddr(p->deps[i], sym->st_value)(void *)((p->deps[i])->base + (sym->st_value));
1817 }
1818failed:
1819 error("Symbol not found: %s", s);
1820 return 0;
1821}
1822
1823int __dladdr(const void *addr, Dl_info *info)
1824{
1825 struct dso *p;
1826 Sym *sym;
1827 uint32_t nsym;
1828 char *strings;
1829 void *best = 0;
1830 char *bestname;
1831
1832 pthread_rwlock_rdlock(&lock);
1833 p = addr2dso((size_t)addr);
1834 pthread_rwlock_unlock(&lock);
1835
1836 if (!p) return 0;
1837
1838 sym = p->syms;
1839 strings = p->strings;
1840 nsym = count_syms(p);
1841
1842 for (; nsym; nsym--, sym++) {
1843 if (sym->st_value
1844 && (1<<(sym->st_info&0xf) & OK_TYPES(1<<0 | 1<<1 | 1<<2 | 1<<5 | 1<<
6)
)
1845 && (1<<(sym->st_info>>4) & OK_BINDS(1<<1 | 1<<2 | 1<<10))) {
1846 void *symaddr = laddr(p, sym->st_value)(void *)((p)->base + (sym->st_value));
1847 if (symaddr > addr || symaddr < best)
1848 continue;
1849 best = symaddr;
1850 bestname = strings + sym->st_name;
1851 if (addr == symaddr)
1852 break;
1853 }
1854 }
1855
1856 if (!best) return 0;
1857
1858 info->dli_fname = p->name;
1859 info->dli_fbase = p->base;
1860 info->dli_sname = bestname;
1861 info->dli_saddr = best;
1862
1863 return 1;
1864}
1865
1866__attribute__((__visibility__("hidden")))
1867void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1868{
1869 void *res;
1870 pthread_rwlock_rdlock(&lock);
1871 res = do_dlsym(p, s, ra);
1872 pthread_rwlock_unlock(&lock);
1873 return res;
1874}
1875
1876int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data)
1877{
1878 struct dso *current;
1879 struct dl_phdr_info info;
1880 int ret = 0;
1881 for(current = head; current;) {
1882 info.dlpi_addr = (uintptr_t)current->base;
1883 info.dlpi_name = current->name;
1884 info.dlpi_phdr = current->phdr;
1885 info.dlpi_phnum = current->phnum;
1886 info.dlpi_adds = gencnt;
1887 info.dlpi_subs = 0;
1888 info.dlpi_tls_modid = current->tls_id;
1889 info.dlpi_tls_data = current->tls_image;
1890
1891 ret = (callback)(&info, sizeof (info), data);
1892
1893 if (ret != 0) break;
1894
1895 pthread_rwlock_rdlock(&lock);
1896 current = current->next;
1897 pthread_rwlock_unlock(&lock);
1898 }
1899 return ret;
1900}
1901#else
1902static int invalid_dso_handle(void *h)
1903{
1904 error("Invalid library handle %p", (void *)h);
1905 return 1;
1906}
1907void *dlopen(const char *file, int mode)
1908{
1909 error("Dynamic loading not supported");
1910 return 0;
1911}
1912void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra)
1913{
1914 error("Symbol not found: %s", s);
1915 return 0;
1916}
1917int __dladdr (const void *addr, Dl_info *info)
1918{
1919 return 0;
1920}
1921#endif
1922
1923int __dlinfo(void *dso, int req, void *res)
1924{
1925 if (invalid_dso_handle(dso)) return -1;
1926 if (req != RTLD_DI_LINKMAP2) {
1927 error("Unsupported request %d", req);
1928 return -1;
1929 }
1930 *(struct link_map **)res = dso;
1931 return 0;
1932}
1933
1934char *dlerror()
1935{
1936 pthread_t self = __pthread_self();
1937 if (!self->dlerror_flag) return 0;
1938 self->dlerror_flag = 0;
1939 char *s = self->dlerror_buf;
1940 if (s == (void *)-1)
1941 return "Dynamic linker failed to allocate memory for error message";
1942 else
1943 return s;
1944}
1945
1946int dlclose(void *p)
1947{
1948 return invalid_dso_handle(p);
1949}
1950
1951void __dl_thread_cleanup(void)
1952{
1953 pthread_t self = __pthread_self();
1954 if (self->dlerror_buf != (void *)-1)
1955 free(self->dlerror_buf);
1956}
1957
1958static void error(const char *fmt, ...)
1959{
1960 va_list ap;
1961 va_start(ap, fmt)__builtin_va_start(ap,fmt);
1962#ifdef SHARED1
1963 if (!runtime) {
1964 vdprintf(2, fmt, ap);
1965 dprintf(2, "\n");
1966 ldso_fail = 1;
1967 va_end(ap)__builtin_va_end(ap);
1968 return;
1969 }
1970#endif
1971 pthread_t self = __pthread_self();
1972 if (self->dlerror_buf != (void *)-1)
1973 free(self->dlerror_buf);
1974 size_t len = vsnprintf(0, 0, fmt, ap);
1975 va_end(ap)__builtin_va_end(ap);
1976 char *buf = malloc(len+1);
1977 if (buf) {
1978 va_start(ap, fmt)__builtin_va_start(ap,fmt);
1979 vsnprintf(buf, len+1, fmt, ap);
1980 va_end(ap)__builtin_va_end(ap);
1981 } else {
1982 buf = (void *)-1;
1983 }
1984 self->dlerror_buf = buf;
1985 self->dlerror_flag = 1;
1986}