Linux動態庫載入函式dlopen原始碼梳理(二)
中大概梳理了整個流程,還有_dl_map_object_from_fd(),以及link_map結構沒有進行分析,在這裡對這兩部分進行分析
由於_dl_map_object_from_fd()比較長,整個函式的程式碼就放到最後作為附錄,前面部分來一點點進行梳理。
一、_dl_map_object_from_fd函式內容梳理
首先在_dl_map_object_from_fd中,定義了link_map以及與elf格式相關的內容
struct link_map *l = NULL; const ElfW(Ehdr) *header; const ElfW(Phdr) *phdr; const ElfW(Phdr) *ph; size_t maplength;
上面的Ehdr對應於elf的檔案頭,Phdr對應於elf格式的程式頭(關於elf格式有大量的部落格進行介紹,可以參考其他博文,這裡佔時沒對elf格式進行介紹)
然後使用如下程式碼進行檢查,是否已經存在一個link_map形容了我這個so檔案
/* Look again to see if the real name matched another already loaded. */ for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next) if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev) { /* The object is already loaded. Just bump its reference count and return it. */ __close (fd); /* If the name is not in the list of names for this object add it. */ free (realname); add_name_to_object (l, name); return l; }
如果找到了,就關閉檔案,返回這個link_map
可以看到在其中有這樣一個if語句
if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
其中,st_ino,這是物理檔案在記憶體中編號,且檔案的裝置號st_dev相同,從底層來比較檔案,如果相同就證明已經形成了一個link_map,直接返回就可以了。
繼續往下看,發現了在函式中對最開始的Ehdr進行賦值的情況,即對header進行了賦值,從註釋中發現是將在open_verify中得到的fbp->buf中的內容賦值給header。
/* This is the ELF header. We read it in `open_verify'. */
header = (void *) fbp->buf;
繼續往下
/* Extract the remaining details we need from the ELF header
and then read in the program header table. */
l->l_entry = header->e_entry;
type = header->e_type;
l->l_phnum = header->e_phnum;
maplength = header->e_phnum * sizeof (ElfW(Phdr));
if (header->e_phoff + maplength <= (size_t) fbp->len)
phdr = (void *) (fbp->buf + header->e_phoff);
else
{
phdr = alloca (maplength);
__lseek (fd, header->e_phoff, SEEK_SET);
if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
{
errstring = N_("cannot read file data");
goto call_lose_errno;
}
}
這裡對link_map的相關欄位進行了賦值,同時對maplength、phdr都進行了賦值,這是在為elf格式的分節對映進行了準備。繼續往下分析,在下面定義了這樣一個結構體,通過註釋可知這是在收集載入命令。
/* Scan the program header table, collecting its load commands. */
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
接著對phdr進行遍歷
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
switch (ph->p_type)
{
/* These entries tell us where to find things once the file's
segments are mapped in. We record the addresses it says
verbatim, and later correct for the run-time load address. */
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later. */
c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
/* Determine whether there is a gap between the last segment
and this one. */
if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
has_holes = true;
/* Optimize a common case. */
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
c->prot = (PF_TO_PROT
>> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
#else
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
#endif
break;
case PT_TLS:
if (ph->p_memsz == 0)
/* Nothing to do for an empty segment. */
break;
l->l_tls_blocksize = ph->p_memsz;
l->l_tls_align = ph->p_align;
if (ph->p_align == 0)
l->l_tls_firstbyte_offset = 0;
else
l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
l->l_tls_initimage_size = ph->p_filesz;
/* Since we don't know the load address yet only store the
offset. We will adjust it later. */
l->l_tls_initimage = (void *) ph->p_vaddr;
/* If not loading the initial set of shared libraries,
check whether we should permit loading a TLS segment. */
if (__builtin_expect (l->l_type == lt_library, 1)
/* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
not set up TLS data structures, so don't use them now. */
|| __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
{
/* Assign the next available module ID. */
l->l_tls_modid = _dl_next_tls_modid ();
break;
}
errval = 0;
errstring = N_("cannot handle TLS data");
goto call_lose;
break;
case PT_GNU_STACK:
stack_flags = ph->p_flags;
break;
case PT_GNU_RELRO:
l->l_relro_addr = ph->p_vaddr;
l->l_relro_size = ph->p_memsz;
break;
}
在elf檔案的規範中,根據不同的program header 不同,要實現不同的功能,採用不同的處理策略。
在case PT_LOAD中
把所有的可以載入的節都在載入的資料結構中loadcmds中構建完成,然後繼續往下看
/* Now process the load commands and map segments into memory. */
c = loadcmds;
/* Length of the sections to be loaded. */
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
存在如上兩個賦值,繼續往下就開始了對映
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
c->mapstart & GLRO(dl_use_load_bias))
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses. */
l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
l->l_map_end = l->l_map_start + maplength;
l->l_addr = l->l_map_start - c->mapstart;
把整個檔案都進行對映,並對link_map部分內容進行賦值
if (has_holes)
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated. Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping. */
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE);
上述程式碼為對高地址進行保護,繼續往下看
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart
/* Map the segment contents from the file. */
&& (__mmap ((void *) (l->l_addr + c->mapstart),
c->mapend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
goto map_error;
postmap:
if (c->prot & PROT_EXEC)
l->l_text_end = l->l_addr + c->mapend;
if (l->l_phdr == 0
&& (ElfW(Off)) c->mapoff <= header->e_phoff
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment. */
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it. */
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero
& ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
{
errstring = N_("cannot change memory protections");
goto call_lose_errno;
}
}
memset ((void *) zero, '\0', zeropage - zero);
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}
if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD. */
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
if (__builtin_expect (mapat == MAP_FAILED, 0))
{
errstring = N_("cannot map zero-fill pages");
goto call_lose_errno;
}
}
}
++c;
}
上述程式碼根據在前面從PT_LOAD program header 得到的檔案對映的操作屬性進行修改,但在zeroend>zerorpage的時候不同,把它對映成為程序獨享的資料空間。這也就是一般的初始化資料區BSS的地方。因為zeroend是在檔案中的對映的頁面對齊尾地址,而zeropage是檔案中的內容對映的頁面對齊尾地址,這其中的差就是為未初始化資料準備的,要把它的屬性改成可寫的,且全為0。
if (l->l_ld == 0)
{
if (__builtin_expect (type == ET_DYN, 0))
{
errstring = N_("object file has no dynamic section");
goto call_lose;
}
}
else
l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
繼續往下看
呼叫了
elf_get_dynamic_info (l, NULL);
這裡呼叫的函式elf_get_dynamic_info是在載入過程中最重要的一個之一,因為在這之後的幾乎所有的對動態連結管理的內容都要用要與這裡的l_info資料組相關。進入到elf_get_dynamic_info函式中
inline void __attribute__ ((unused, always_inline))
elf_get_dynamic_info (struct link_map *l, ElfW(Dyn) *temp)
{
ElfW(Dyn) *dyn = l->l_ld;
ElfW(Dyn) **info;
#if __ELF_NATIVE_CLASS == 32
typedef Elf32_Word d_tag_utype;
#elif __ELF_NATIVE_CLASS == 64
typedef Elf64_Xword d_tag_utype;
#endif
#ifndef RTLD_BOOTSTRAP
if (dyn == NULL)
return;
#endif
info = l->l_info;
while (dyn->d_tag != DT_NULL)
{
if ((d_tag_utype) dyn->d_tag < DT_NUM)
info[dyn->d_tag] = dyn;
else if (dyn->d_tag >= DT_LOPROC &&
dyn->d_tag < DT_LOPROC + DT_THISPROCNUM)
info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
else if ((d_tag_utype) DT_VERSIONTAGIDX (dyn->d_tag) < DT_VERSIONTAGNUM)
info[VERSYMIDX (dyn->d_tag)] = dyn;
else if ((d_tag_utype) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM] = dyn;
else if ((d_tag_utype) DT_VALTAGIDX (dyn->d_tag) < DT_VALNUM)
info[DT_VALTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM] = dyn;
else if ((d_tag_utype) DT_ADDRTAGIDX (dyn->d_tag) < DT_ADDRNUM)
info[DT_ADDRTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM] = dyn;
++dyn;
}
#define DL_RO_DYN_TEMP_CNT 8
#ifndef DL_RO_DYN_SECTION
/* Don't adjust .dynamic unnecessarily. */
if (l->l_addr != 0)
{
ElfW(Addr) l_addr = l->l_addr;
int cnt = 0;
# define ADJUST_DYN_INFO(tag) \
do \
if (info[tag] != NULL) \
{ \
if (temp) \
{ \
temp[cnt].d_tag = info[tag]->d_tag; \
temp[cnt].d_un.d_ptr = info[tag]->d_un.d_ptr + l_addr; \
info[tag] = temp + cnt++; \
} \
else \
info[tag]->d_un.d_ptr += l_addr; \
} \
while (0)
ADJUST_DYN_INFO (DT_HASH);
ADJUST_DYN_INFO (DT_PLTGOT);
ADJUST_DYN_INFO (DT_STRTAB);
ADJUST_DYN_INFO (DT_SYMTAB);
# if ! ELF_MACHINE_NO_RELA
ADJUST_DYN_INFO (DT_RELA);
# endif
# if ! ELF_MACHINE_NO_REL
ADJUST_DYN_INFO (DT_REL);
# endif
ADJUST_DYN_INFO (DT_JMPREL);
ADJUST_DYN_INFO (VERSYMIDX (DT_VERSYM));
ADJUST_DYN_INFO (DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM);
# undef ADJUST_DYN_INFO
assert (cnt <= DL_RO_DYN_TEMP_CNT);
}
#endif
if (info[DT_PLTREL] != NULL)
{
#if ELF_MACHINE_NO_RELA
assert (info[DT_PLTREL]->d_un.d_val == DT_REL);
#elif ELF_MACHINE_NO_REL
assert (info[DT_PLTREL]->d_un.d_val == DT_RELA);
#else
assert (info[DT_PLTREL]->d_un.d_val == DT_REL
|| info[DT_PLTREL]->d_un.d_val == DT_RELA);
#endif
}
#if ! ELF_MACHINE_NO_RELA
if (info[DT_RELA] != NULL)
assert (info[DT_RELAENT]->d_un.d_val == sizeof (ElfW(Rela)));
# endif
# if ! ELF_MACHINE_NO_REL
if (info[DT_REL] != NULL)
assert (info[DT_RELENT]->d_un.d_val == sizeof (ElfW(Rel)));
#endif
#ifdef RTLD_BOOTSTRAP
/* Only the bind now flags are allowed. */
assert (info[VERSYMIDX (DT_FLAGS_1)] == NULL
|| (info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val & ~DF_1_NOW) == 0);
assert (info[DT_FLAGS] == NULL
|| (info[DT_FLAGS]->d_un.d_val & ~DF_BIND_NOW) == 0);
/* Flags must not be set for ld.so. */
assert (info[DT_RUNPATH] == NULL);
assert (info[DT_RPATH] == NULL);
#else
if (info[DT_FLAGS] != NULL)
{
/* Flags are used. Translate to the old form where available.
Since these l_info entries are only tested for NULL pointers it
is ok if they point to the DT_FLAGS entry. */
l->l_flags = info[DT_FLAGS]->d_un.d_val;
if (l->l_flags & DF_SYMBOLIC)
info[DT_SYMBOLIC] = info[DT_FLAGS];
if (l->l_flags & DF_TEXTREL)
info[DT_TEXTREL] = info[DT_FLAGS];
if (l->l_flags & DF_BIND_NOW)
info[DT_BIND_NOW] = info[DT_FLAGS];
}
if (info[VERSYMIDX (DT_FLAGS_1)] != NULL)
{
l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
if (l->l_flags_1 & DF_1_NOW)
info[DT_BIND_NOW] = info[VERSYMIDX (DT_FLAGS_1)];
}
if (info[DT_RUNPATH] != NULL)
/* If both RUNPATH and RPATH are given, the latter is ignored. */
info[DT_RPATH] = NULL;
#endif
}
上面的__attribute__ 中的unused 是為了消除編譯器在-Wall 情況下對於其中可能沒有用到在函式中的區域性變數發出警告,而alwayse_inline,很好解釋,就是行內函數的強制標誌。
在elf_get_dynamic_info中對i_info進行了填充
info = l->l_info;
while (dyn->d_tag != DT_NULL)
{
if ((d_tag_utype) dyn->d_tag < DT_NUM)
info[dyn->d_tag] = dyn;
else if (dyn->d_tag >= DT_LOPROC &&
dyn->d_tag < DT_LOPROC + DT_THISPROCNUM)
info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
else if ((d_tag_utype) DT_VERSIONTAGIDX (dyn->d_tag) < DT_VERSIONTAGNUM)
info[VERSYMIDX (dyn->d_tag)] = dyn;
else if ((d_tag_utype) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM] = dyn;
else if ((d_tag_utype) DT_VALTAGIDX (dyn->d_tag) < DT_VALNUM)
info[DT_VALTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM] = dyn;
else if ((d_tag_utype) DT_ADDRTAGIDX (dyn->d_tag) < DT_ADDRNUM)
info[DT_ADDRTAGIDX (dyn->d_tag) + DT_NUM + DT_THISPROCNUM
+ DT_VERSIONTAGNUM + DT_EXTRANUM + DT_VALNUM] = dyn;
++dyn;
}
這為之後有很大的作用,因為這些節是可以找到如函式名與定位資訊的,這裡的的妙處是把陣列的偏移量與d_tag相關聯,程式碼簡潔。
至此對_dl_map_object_from_fd就解析完畢了,感覺還是有很多疑惑,在後續學習中希望能填補這些疑惑,大家有問題也可以評論交流。
二、link_map結構
首先給出link_map的具體內容
struct link_map
{
/* These first few members are part of the protocol with the debugger.
This is the same format used in SVR4. */
ElfW(Addr) l_addr; /* Base address shared object is loaded at. */
char *l_name; /* Absolute file name object was found in. */
ElfW(Dyn) *l_ld; /* Dynamic section of the shared object. */
struct link_map *l_next, *l_prev; /* Chain of loaded objects. */
/* All following members are internal to the dynamic linker.
They may change without notice. */
/* This is an element which is only ever different from a pointer to
the very same copy of this type for ld.so when it is used in more
than one namespace. */
struct link_map *l_real;
/* Number of the namespace this link map belongs to. */
Lmid_t l_ns;
struct libname_list *l_libname;
/* Indexed pointers to dynamic section.
[0,DT_NUM) are indexed by the processor-independent tags.
[DT_NUM,DT_NUM+DT_THISPROCNUM) are indexed by the tag minus DT_LOPROC.
[DT_NUM+DT_THISPROCNUM,DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM) are
indexed by DT_VERSIONTAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM) are indexed by
DT_EXTRATAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM) are
indexed by DT_VALTAGIDX(tagvalue) and
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM+DT_ADDRNUM)
are indexed by DT_ADDRTAGIDX(tagvalue), see <elf.h>. */
ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
const ElfW(Phdr) *l_phdr; /* Pointer to program header table in core. */
ElfW(Addr) l_entry; /* Entry point location. */
ElfW(Half) l_phnum; /* Number of program header entries. */
ElfW(Half) l_ldnum; /* Number of dynamic segment entries. */
/* Array of DT_NEEDED dependencies and their dependencies, in
dependency order for symbol lookup (with and without
duplicates). There is no entry before the dependencies have
been loaded. */
struct r_scope_elem l_searchlist;
/* We need a special searchlist to process objects marked with
DT_SYMBOLIC. */
struct r_scope_elem l_symbolic_searchlist;
/* Dependent object that first caused this object to be loaded. */
struct link_map *l_loader;
/* Array with version names. */
struct r_found_version *l_versions;
unsigned int l_nversions;
/* Symbol hash table. */
Elf_Symndx l_nbuckets;
Elf32_Word l_gnu_bitmask_idxbits;
Elf32_Word l_gnu_shift;
const ElfW(Addr) *l_gnu_bitmask;
union
{
const Elf32_Word *l_gnu_buckets;
const Elf_Symndx *l_chain;
};
union
{
const Elf32_Word *l_gnu_chain_zero;
const Elf_Symndx *l_buckets;
};
unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
lt_library, /* Library needed by main executable. */
lt_loaded /* Extra run-time loaded shared object. */
} l_type:2;
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
to by `l_phdr' is allocated. */
unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
the l_libname list. */
unsigned int l_faked:1; /* Nonzero if this is a faked descriptor
without associated file. */
unsigned int l_need_tls_init:1; /* Nonzero if GL(dl_init_static_tls)
should be called on this link map
when relocation finishes. */
unsigned int l_auditing:1; /* Nonzero if the DSO is used in auditing. */
unsigned int l_audit_any_plt:1; /* Nonzero if at least one audit module
is interested in the PLT interception.*/
unsigned int l_removed:1; /* Nozero if the object cannot be used anymore
since it is removed. */
unsigned int l_contiguous:1; /* Nonzero if inter-segment holes are
mprotected or if no holes are present at
all. */
unsigned int l_symbolic_in_local_scope:1; /* Nonzero if l_local_scope
during LD_TRACE_PRELINKING=1
contains any DT_SYMBOLIC
libraries. */
/* Collected information about own RPATH directories. */
struct r_search_path_struct l_rpath_dirs;
/* Collected results of relocation while profiling. */
struct reloc_result
{
DL_FIXUP_VALUE_TYPE addr;
struct link_map *bound;
unsigned int boundndx;
uint32_t enterexit;
unsigned int flags;
} *l_reloc_result;
/* Pointer to the version information if available. */
ElfW(Versym) *l_versyms;
/* String specifying the path where this object was found. */
const char *l_origin;
/* Start and finish of memory map for this object. l_map_start
need not be the same as l_addr. */
ElfW(Addr) l_map_start, l_map_end;
/* End of the executable part of the mapping. */
ElfW(Addr) l_text_end;
/* Default array for 'l_scope'. */
struct r_scope_elem *l_scope_mem[4];
/* Size of array allocated for 'l_scope'. */
size_t l_scope_max;
/* This is an array defining the lookup scope for this link map.
There are initially at most three different scope lists. */
struct r_scope_elem **l_scope;
/* A similar array, this time only with the local scope. This is
used occasionally. */
struct r_scope_elem *l_local_scope[2];
/* This information is kept to check for sure whether a shared
object is the same as one already loaded. */
dev_t l_dev;
ino64_t l_ino;
/* Collected information about own RUNPATH directories. */
struct r_search_path_struct l_runpath_dirs;
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
/* The init and fini list generated at startup, saved when the
object is also loaded dynamically. */
struct link_map **l_orig_initfini;
/* List of the dependencies introduced through symbol binding. */
struct link_map_reldeps
{
unsigned int act;
struct link_map *list[];
} *l_reldeps;
unsigned int l_reldepsmax;
/* Nonzero if the DSO is used. */
unsigned int l_used;
/* Various flag words. */
ElfW(Word) l_feature_1;
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
/* Temporarily used in `dl_close'. */
int l_idx;
struct link_map_machine l_mach;
struct
{
const ElfW(Sym) *sym;
int type_class;
struct link_map *value;
const ElfW(Sym) *ret;
} l_lookup_cache;
/* Thread-local storage related info. */
/* Start of the initialization image. */
void *l_tls_initimage;
/* Size of the initialization image. */
size_t l_tls_initimage_size;
/* Size of the TLS block. */
size_t l_tls_blocksize;
/* Alignment requirement of the TLS block. */
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
#ifndef NO_TLS_OFFSET
# define NO_TLS_OFFSET 0
#endif
#ifndef FORCED_DYNAMIC_TLS_OFFSET
# if NO_TLS_OFFSET == 0
# define FORCED_DYNAMIC_TLS_OFFSET 1
# elif NO_TLS_OFFSET == -1
# define FORCED_DYNAMIC_TLS_OFFSET -2
# else
# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
# endif
#endif
/* For objects present at startup time: offset in the static TLS block. */
ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
size_t l_relro_size;
unsigned long long int l_serial;
/* Audit information. This array apparent must be the last in the
structure. Never add something after it. */
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
} l_audit[0];
};
link_map是描述載入的共享物件的結構。 ‘l_next’和'l_prev'成員形成啟動時載入的所有共享物件的鏈。 這些資料結構存在於執行時動態連結器使用的空間中;修改它們可能會帶來災難性的後果。如有必要,此資料結構將來可能會發生變化。使用者級別程式必須避免定義此類物件。
struct link_map
{
/* 前幾個成員是偵錯程式協議的一部分。
這與SVR4中使用的格式相同。 */
ElfW(Addr) l_addr; /* 共享物件載入的基地址。 */
char *l_name; /* 檔名物件的絕對地址 */
ElfW(Dyn) *l_ld; /* 共享物件的動態部分 */
struct link_map *l_next, *l_prev; /* 載入物件的鏈 */
/* 以下所有成員都是動態連結器的內部成員,他們可能在沒有通知的情況下改變。 */
/* 這是一個與指標不同的元素ld.so的這種型別的相同副本,當它比一個名稱空間被用於更多時。 */
struct link_map *l_real;
/* link map屬於的名稱空間的編號。 */
Lmid_t l_ns;
struct libname_list *l_libname;
/* Indexed pointers to dynamic section.
[0,DT_NUM) are indexed by the processor-independent tags.
[DT_NUM,DT_NUM+DT_THISPROCNUM) are indexed by the tag minus DT_LOPROC.
[DT_NUM+DT_THISPROCNUM,DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM) are
indexed by DT_VERSIONTAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM) are indexed by
DT_EXTRATAGIDX(tagvalue).
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM) are
indexed by DT_VALTAGIDX(tagvalue) and
[DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM,
DT_NUM+DT_THISPROCNUM+DT_VERSIONTAGNUM+DT_EXTRANUM+DT_VALNUM+DT_ADDRNUM)
are indexed by DT_ADDRTAGIDX(tagvalue), see <elf.h>. */
ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
+ DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
const ElfW(Phdr) *l_phdr; /* Pointer to program header table in core. */
ElfW(Addr) l_entry; /* Entry point location. */
ElfW(Half) l_phnum; /* Number of program header entries. */
ElfW(Half) l_ldnum; /* Number of dynamic segment entries. */
/* Array of DT_NEEDED dependencies and their dependencies, in
dependency order for symbol lookup (with and without
duplicates). There is no entry before the dependencies have
been loaded. */
struct r_scope_elem l_searchlist;
/* We need a special searchlist to process objects marked with
DT_SYMBOLIC. */
struct r_scope_elem l_symbolic_searchlist;
/* Dependent object that first caused this object to be loaded. */
struct link_map *l_loader;
/* Array with version names. */
struct r_found_version *l_versions;
unsigned int l_nversions;
/* Symbol hash table. */
Elf_Symndx l_nbuckets;
Elf32_Word l_gnu_bitmask_idxbits;
Elf32_Word l_gnu_shift;
const ElfW(Addr) *l_gnu_bitmask;
union
{
const Elf32_Word *l_gnu_buckets;
const Elf_Symndx *l_chain;
};
union
{
const Elf32_Word *l_gnu_chain_zero;
const Elf_Symndx *l_buckets;
};
unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose. */
enum /* Where this object came from. */
{
lt_executable, /* The main executable program. */
lt_library, /* Library needed by main executable. */
lt_loaded /* Extra run-time loaded shared object. */
} l_type:2;
unsigned int l_relocated:1; /* Nonzero if object's relocations done. */
unsigned int l_init_called:1; /* Nonzero if DT_INIT function called. */
unsigned int l_global:1; /* Nonzero if object in _dl_global_scope. */
unsigned int l_reserved:2; /* Reserved for internal use. */
unsigned int l_phdr_allocated:1; /* Nonzero if the data structure pointed
to by `l_phdr' is allocated. */
unsigned int l_soname_added:1; /* Nonzero if the SONAME is for sure in
the l_libname list. */
unsigned int l_faked:1; /* Nonzero if this is a faked descriptor
without associated file. */
unsigned int l_need_tls_init:1; /* Nonzero if GL(dl_init_static_tls)
should be called on this link map
when relocation finishes. */
unsigned int l_auditing:1; /* Nonzero if the DSO is used in auditing. */
unsigned int l_audit_any_plt:1; /* Nonzero if at least one audit module
is interested in the PLT interception.*/
unsigned int l_removed:1; /* Nozero if the object cannot be used anymore
since it is removed. */
unsigned int l_contiguous:1; /* Nonzero if inter-segment holes are
mprotected or if no holes are present at
all. */
unsigned int l_symbolic_in_local_scope:1; /* Nonzero if l_local_scope
during LD_TRACE_PRELINKING=1
contains any DT_SYMBOLIC
libraries. */
/* Collected information about own RPATH directories. */
struct r_search_path_struct l_rpath_dirs;
/* Collected results of relocation while profiling. */
struct reloc_result
{
DL_FIXUP_VALUE_TYPE addr;
struct link_map *bound;
unsigned int boundndx;
uint32_t enterexit;
unsigned int flags;
} *l_reloc_result;
/* Pointer to the version information if available. */
ElfW(Versym) *l_versyms;
/* String specifying the path where this object was found. */
const char *l_origin;
/* Start and finish of memory map for this object. l_map_start
need not be the same as l_addr. */
ElfW(Addr) l_map_start, l_map_end;
/* End of the executable part of the mapping. */
ElfW(Addr) l_text_end;
/* Default array for 'l_scope'. */
struct r_scope_elem *l_scope_mem[4];
/* Size of array allocated for 'l_scope'. */
size_t l_scope_max;
/* This is an array defining the lookup scope for this link map.
There are initially at most three different scope lists. */
struct r_scope_elem **l_scope;
/* A similar array, this time only with the local scope. This is
used occasionally. */
struct r_scope_elem *l_local_scope[2];
/* This information is kept to check for sure whether a shared
object is the same as one already loaded. */
dev_t l_dev;
ino64_t l_ino;
/* Collected information about own RUNPATH directories. */
struct r_search_path_struct l_runpath_dirs;
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
/* The init and fini list generated at startup, saved when the
object is also loaded dynamically. */
struct link_map **l_orig_initfini;
/* List of the dependencies introduced through symbol binding. */
struct link_map_reldeps
{
unsigned int act;
struct link_map *list[];
} *l_reldeps;
unsigned int l_reldepsmax;
/* Nonzero if the DSO is used. */
unsigned int l_used;
/* Various flag words. */
ElfW(Word) l_feature_1;
ElfW(Word) l_flags_1;
ElfW(Word) l_flags;
/* Temporarily used in `dl_close'. */
int l_idx;
struct link_map_machine l_mach;
struct
{
const ElfW(Sym) *sym;
int type_class;
struct link_map *value;
const ElfW(Sym) *ret;
} l_lookup_cache;
/* Thread-local storage related info. */
/* Start of the initialization image. */
void *l_tls_initimage;
/* Size of the initialization image. */
size_t l_tls_initimage_size;
/* Size of the TLS block. */
size_t l_tls_blocksize;
/* Alignment requirement of the TLS block. */
size_t l_tls_align;
/* Offset of first byte module alignment. */
size_t l_tls_firstbyte_offset;
#ifndef NO_TLS_OFFSET
# define NO_TLS_OFFSET 0
#endif
#ifndef FORCED_DYNAMIC_TLS_OFFSET
# if NO_TLS_OFFSET == 0
# define FORCED_DYNAMIC_TLS_OFFSET 1
# elif NO_TLS_OFFSET == -1
# define FORCED_DYNAMIC_TLS_OFFSET -2
# else
# error "FORCED_DYNAMIC_TLS_OFFSET is not defined"
# endif
#endif
/* For objects present at startup time: offset in the static TLS block. */
ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
/* Information used to change permission after the relocations are
done. */
ElfW(Addr) l_relro_addr;
size_t l_relro_size;
unsigned long long int l_serial;
/* Audit information. This array apparent must be the last in the
structure. Never add something after it. */
struct auditstate
{
uintptr_t cookie;
unsigned int bindflags;
} l_audit[0];
};
附錄
_dl_map_object_from_fd函式內容:
static
#endif
struct link_map *
_dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
char *realname, struct link_map *loader, int l_type,
int mode, void **stack_endp, Lmid_t nsid)
{
struct link_map *l = NULL;
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
size_t maplength;
int type;
struct stat64 st;
/* Initialize to keep the compiler happy. */
const char *errstring = NULL;
int errval = 0;
struct r_debug *r = _dl_debug_initialize (0, nsid);
bool make_consistent = false;
/* Get file information. */
if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
{
errstring = N_("cannot stat shared object");
call_lose_errno:
errval = errno;
call_lose:
lose (errval, fd, name, realname, l, errstring,
make_consistent ? r : NULL);
}
/* Look again to see if the real name matched another already loaded. */
for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it. */
__close (fd);
/* If the name is not in the list of names for this object add
it. */
free (realname);
add_name_to_object (l, name);
return l;
}
#ifdef SHARED
/* When loading into a namespace other than the base one we must
avoid loading ld.so since there can only be one copy. Ever. */
if (__builtin_expect (nsid != LM_ID_BASE, 0)
&& ((st.st_ino == GL(dl_rtld_map).l_ino
&& st.st_dev == GL(dl_rtld_map).l_dev)
|| _dl_name_match_p (name, &GL(dl_rtld_map))))
{
/* This is indeed ld.so. Create a new link_map which refers to
the real one for almost everything. */
l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
if (l == NULL)
goto fail_new;
/* Refer to the real descriptor. */
l->l_real = &GL(dl_rtld_map);
/* No need to bump the refcount of the real object, ld.so will
never be unloaded. */
__close (fd);
/* Add the map for the mirrored object to the object list. */
_dl_add_to_namespace_list (l, nsid);
return l;
}
#endif
if (mode & RTLD_NOLOAD)
{
/* We are not supposed to load the object unless it is already
loaded. So return now. */
free (realname);
__close (fd);
return NULL;
}
/* Print debugging message. */
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
_dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
/* This is the ELF header. We read it in `open_verify'. */
header = (void *) fbp->buf;
#ifndef MAP_ANON
# define MAP_ANON 0
if (_dl_zerofd == -1)
{
_dl_zerofd = _dl_sysdep_open_zero_fill ();
if (_dl_zerofd == -1)
{
free (realname);
__close (fd);
_dl_signal_error (errno, NULL, NULL,
N_("cannot open zero fill device"));
}
}
#endif
/* Signal that we are going to add new objects. */
if (r->r_state == RT_CONSISTENT)
{
#ifdef SHARED
/* Auditing checkpoint: we are going to add new objects. */
if ((mode & __RTLD_AUDIT) == 0
&& __builtin_expect (GLRO(dl_naudit) > 0, 0))
{
struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
/* Do not call the functions for any auditing object. */
if (head->l_auditing == 0)
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
{
if (afct->activity != NULL)
afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
afct = afct->next;
}
}
}
#endif
/* Notify the debugger we have added some objects. We need to
call _dl_debug_initialize in a static program in case dynamic
linking has not been used before. */
r->r_state = RT_ADD;
_dl_debug_state ();
make_consistent = true;
}
else
assert (r->r_state == RT_ADD);
/* Enter the new object in the list of loaded objects. */
l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
if (__builtin_expect (l == NULL, 0))
{
#ifdef SHARED
fail_new:
#endif
errstring = N_("cannot create shared object descriptor");
goto call_lose_errno;
}
/* Extract the remaining details we need from the ELF header
and then read in the program header table. */
l->l_entry = header->e_entry;
type = header->e_type;
l->l_phnum = header->e_phnum;
maplength = header->e_phnum * sizeof (ElfW(Phdr));
if (header->e_phoff + maplength <= (size_t) fbp->len)
phdr = (void *) (fbp->buf + header->e_phoff);
else
{
phdr = alloca (maplength);
__lseek (fd, header->e_phoff, SEEK_SET);
if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
{
errstring = N_("cannot read file data");
goto call_lose_errno;
}
}
/* On most platforms presume that PT_GNU_STACK is absent and the stack is
* executable. Other platforms default to a nonexecutable stack and don't
* need PT_GNU_STACK to do so. */
uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;
{
/* Scan the program header table, collecting its load commands. */
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
bool has_holes = false;
/* The struct is initialized to zero so this is not necessary:
l->l_ld = 0;
l->l_phdr = 0;
l->l_addr = 0; */
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
switch (ph->p_type)
{
/* These entries tell us where to find things once the file's
segments are mapped in. We record the addresses it says
verbatim, and later correct for the run-time load address. */
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later. */
if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
0))
{
errstring = N_("ELF load command alignment not page-aligned");
goto call_lose;
}
if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
& (ph->p_align - 1)) != 0, 0))
{
errstring
= N_("ELF load command address/offset not properly aligned");
goto call_lose;
}
c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
/* Determine whether there is a gap between the last segment
and this one. */
if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
has_holes = true;
/* Optimize a common case. */
#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
c->prot = (PF_TO_PROT
>> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
#else
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
#endif
break;
case PT_TLS:
if (ph->p_memsz == 0)
/* Nothing to do for an empty segment. */
break;
l->l_tls_blocksize = ph->p_memsz;
l->l_tls_align = ph->p_align;
if (ph->p_align == 0)
l->l_tls_firstbyte_offset = 0;
else
l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
l->l_tls_initimage_size = ph->p_filesz;
/* Since we don't know the load address yet only store the
offset. We will adjust it later. */
l->l_tls_initimage = (void *) ph->p_vaddr;
/* If not loading the initial set of shared libraries,
check whether we should permit loading a TLS segment. */
if (__builtin_expect (l->l_type == lt_library, 1)
/* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
not set up TLS data structures, so don't use them now. */
|| __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
{
/* Assign the next available module ID. */
l->l_tls_modid = _dl_next_tls_modid ();
break;
}
#ifdef SHARED
if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
/* We are loading the executable itself when the dynamic linker
was executed directly. The setup will happen later. */
break;
/* In a static binary there is no way to tell if we dynamically
loaded libpthread. */
if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
#endif
{
/* We have not yet loaded libpthread.
We can do the TLS setup right now! */
void *tcb;
/* The first call allocates TLS bookkeeping data structures.
Then we allocate the TCB for the initial thread. */
if (__builtin_expect (_dl_tls_setup (), 0)
|| __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
0))
{
errval = ENOMEM;
errstring = N_("\
cannot allocate TLS data structures for initial thread");
goto call_lose;
}
/* Now we install the TCB in the thread register. */
errstring = TLS_INIT_TP (tcb, 0);
if (__builtin_expect (errstring == NULL, 1))
{
/* Now we are all good. */
l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
break;
}
/* The kernel is too old or somesuch. */
errval = 0;
_dl_deallocate_tls (tcb, 1);
goto call_lose;
}
/* Uh-oh, the binary expects TLS support but we cannot
provide it. */
errval = 0;
errstring = N_("cannot handle TLS data");
goto call_lose;
break;
case PT_GNU_STACK:
stack_flags = ph->p_flags;
break;
case PT_GNU_RELRO:
l->l_relro_addr = ph->p_vaddr;
l->l_relro_size = ph->p_memsz;
break;
}
if (__builtin_expect (nloadcmds == 0, 0))
{
/* This only happens for a bogus object that will be caught with
another error below. But we don't want to go through the
calculations below using NLOADCMDS - 1. */
errstring = N_("object file has no loadable segments");
goto call_lose;
}
/* Now process the load commands and map segments into memory. */
c = loadcmds;
/* Length of the sections to be loaded. */
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
if (__builtin_expect (type, ET_DYN) == ET_DYN)
{
/* This is a position-independent shared object. We can let the
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments. Then we remove
access from excess portion, and there is known sufficient space
there to remap from the later segments.
As a refinement, sometimes we have an address that we would
prefer to map such objects at; but this is only a preference,
the OS can do whatever it likes. */
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
c->mapstart & GLRO(dl_use_load_bias))
- MAP_BASE_ADDR (l));
/* Remember which part of the address space this object uses. */
l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
c->prot,
MAP_COPY|MAP_FILE,
fd, c->mapoff);
if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
{
map_error:
errstring = N_("failed to map segment from shared object");
goto call_lose_errno;
}
l->l_map_end = l->l_map_start + maplength;
l->l_addr = l->l_map_start - c->mapstart;
if (has_holes)
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated. Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping. */
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE);
l->l_contiguous = 1;
goto postmap;
}
/* This object is loaded at a fixed address. This must never
happen for objects loaded with dlopen(). */
if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
{
errstring = N_("cannot dynamically load executable");
goto call_lose;
}
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
l->l_contiguous = !has_holes;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart
/* Map the segment contents from the file. */
&& (__mmap ((void *) (l->l_addr + c->mapstart),
c->mapend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
goto map_error;
postmap:
if (c->prot & PROT_EXEC)
l->l_text_end = l->l_addr + c->mapend;
if (l->l_phdr == 0
&& (ElfW(Off)) c->mapoff <= header->e_phoff
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment. */
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it. */
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero
& ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
{
errstring = N_("cannot change memory protections");
goto call_lose_errno;
}
}
memset ((void *) zero, '\0', zeropage - zero);
if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
__mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
GLRO(dl_pagesize), c->prot);
}
if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD. */
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
-1, 0);
if (__builtin_expect (mapat == MAP_FAILED, 0))
{
errstring = N_("cannot map zero-fill pages");
goto call_lose_errno;
}
}
}
++c;
}
}
if (l->l_ld == 0)
{
if (__builtin_expect (type == ET_DYN, 0))
{
errstring = N_("object file has no dynamic section");
goto call_lose;
}
}
else
l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
elf_get_dynamic_info (l, NULL);
/* Make sure we are not dlopen'ing an object that has the
DF_1_NOOPEN flag set. */
if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
&& (mode & __RTLD_DLOPEN))
{
/* We are not supposed to load this object. Free all resources. */
__munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
if (!l->l_libname->dont_free)
free (l->l_libname);
if (l->l_phdr_allocated)
free ((void *) l->l_phdr);
errstring = N_("shared object cannot be dlopen()ed");
goto call_lose;
}
if (l->l_phdr == NULL)
{
/* The program header is not contained in any of the segments.
We have to allocate memory ourself and copy it over from out
temporary place. */
ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
* sizeof (ElfW(Phdr)));
if (newp == NULL)
{
errstring = N_("cannot allocate memory for program header");
goto call_lose_errno;
}
l->l_phdr = memcpy (newp, phdr,
(header->e_phnum * sizeof (ElfW(Phdr))));
l->l_phdr_allocated = 1;
}
else
/* Adjust the PT_PHDR value by the runtime load address. */
l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
{
if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
0) != 0)
{
errstring = N_("invalid caller");
goto call_lose;
}
/* The stack is presently not executable, but this module
requires that it be executable. We must change the
protection of the variable which contains the flags used in
the mprotect calls. */
#ifdef SHARED
if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
{
const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
struct link_map *const m = &GL(dl_rtld_map);
const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
+ m->l_relro_size)
& -GLRO(dl_pagesize));
if (__builtin_expect (p + s <= relro_end, 1))
{
/* The variable lies in the region protected by RELRO. */
__mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
__mprotect ((void *) p, s, PROT_READ);
}
else
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
}
else
#endif
__stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
#ifdef check_consistency
check_consistency ();
#endif
errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
if (errval)
{
errstring = N_("\
cannot enable executable stack as shared object requires");
goto call_lose;
}
}
/* Adjust the address of the TLS initialization image. */
if (l->l_tls_initimage != NULL)
l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
/* We are done mapping in the file. We no longer need the descriptor. */
if (__builtin_expect (__close (fd) != 0, 0))
{
errstring = N_("cannot close file descriptor");
goto call_lose_errno;
}
/* Signal that we closed the file. */
fd = -1;
if (l->l_type == lt_library && type == ET_EXEC)
l->l_type = lt_executable;
l->l_entry += l->l_addr;
if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
_dl_debug_printf ("\
dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
(int) sizeof (void *) * 2,
(unsigned long int) l->l_ld,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_addr,
(int) sizeof (void *) * 2, maplength,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_entry,
(int) sizeof (void *) * 2,
(unsigned long int) l->l_phdr,
(int) sizeof (void *) * 2, l->l_phnum);
/* Set up the symbol hash table. */
_dl_setup_hash (l);
/* If this object has DT_SYMBOLIC set modify now its scope. We don't
have to do this for the main map. */
if ((mode & RTLD_DEEPBIND) == 0
&& __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
&& &l->l_searchlist != l->l_scope[0])
{
/* Create an appropriate searchlist. It contains only this map.
This is the definition of DT_SYMBOLIC in SysVr4. */
l->l_symbolic_searchlist.r_list[0] = l;
l->l_symbolic_searchlist.r_nlist = 1;
/* Now move the existing entries one back. */
memmove (&l->l_scope[1], &l->l_scope[0],
(l->l_scope_max - 1) * sizeof (l->l_scope[0]));
/* Now add the new entry. */
l->l_scope[0] = &l->l_symbolic_searchlist;
}
/* Remember whether this object must be initialized first. */
if (l->l_flags_1 & DF_1_INITFIRST)
GL(dl_initfirst) = l;
/* Finally the file information. */
l->l_dev = st.st_dev;
l->l_ino = st.st_ino;
/* When we profile the SONAME might be needed for something else but
loading. Add it right away. */
if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
&& l->l_info[DT_SONAME] != NULL)
add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
+ l->l_info[DT_SONAME]->d_un.d_val));
/* Now that the object is fully initialized add it to the object list. */
_dl_add_to_namespace_list (l, nsid);
#ifdef SHARED
/* Auditing checkpoint: we have a new object. */
if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
&& !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
{
struct audit_ifaces *afct = GLRO(dl_audit);
for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
{
if (afct->objopen != NULL)
{
l->l_audit[cnt].bindflags
= afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
}
afct = afct->next;
}
}
#endif
return l;
}