blob: 396d5c2e6b5e69e035aeda69ab78bf48f56997fd [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
17#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
19#include <linux/signal.h>
20#include <linux/binfmts.h>
21#include <linux/string.h>
22#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/personality.h>
25#include <linux/elfcore.h>
26#include <linux/init.h>
27#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/compiler.h>
29#include <linux/highmem.h>
Anshuman Khandual03911132020-04-06 20:03:51 -070030#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070032#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070035#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070036#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070037#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080038#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010039#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010040#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010041#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010042#include <linux/sched/cputime.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010043#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060044#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080045#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/param.h>
47#include <asm/page.h>
48
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070049#ifndef user_long_t
50#define user_long_t long
51#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070052#ifndef user_siginfo_t
53#define user_siginfo_t siginfo_t
54#endif
55
Nicolas Pitre47552002017-08-16 16:05:13 -040056/* That's for binfmt_elf_fdpic to deal with */
57#ifndef elf_check_fdpic
58#define elf_check_fdpic(ex) false
59#endif
60
Al Viro71613c32012-10-20 22:00:48 -040061static int load_elf_binary(struct linux_binprm *bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Josh Triplett69369a72014-04-03 14:48:27 -070063#ifdef CONFIG_USELIB
64static int load_elf_library(struct file *);
65#else
66#define load_elf_library NULL
67#endif
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/*
70 * If we don't support core dumping, then supply a NULL so we
71 * don't even try.
72 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080073#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080074static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#else
76#define elf_core_dump NULL
77#endif
78
79#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070080#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070082#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#endif
84
85#ifndef ELF_CORE_EFLAGS
86#define ELF_CORE_EFLAGS 0
87#endif
88
89#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
90#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
91#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
92
93static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080094 .module = THIS_MODULE,
95 .load_binary = load_elf_binary,
96 .load_shlib = load_elf_library,
97 .core_dump = elf_core_dump,
98 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070099};
100
Alexey Dobriyan18676ff2020-01-30 22:17:01 -0800101#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800103static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 start = ELF_PAGEALIGN(start);
106 end = ELF_PAGEALIGN(end);
107 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800108 /*
109 * Map the last of the bss segment.
110 * If the header is requesting these pages to be
111 * executable, honour that (ppc32 needs this).
112 */
113 int error = vm_brk_flags(start, end - start,
114 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700115 if (error)
116 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 }
118 current->mm->start_brk = current->mm->brk = end;
119 return 0;
120}
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122/* We need to explicitly zero any fractional pages
123 after the data section (i.e. bss). This would
124 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700125 be in memory
126 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static int padzero(unsigned long elf_bss)
128{
129 unsigned long nbyte;
130
131 nbyte = ELF_PAGEOFFSET(elf_bss);
132 if (nbyte) {
133 nbyte = ELF_MIN_ALIGN - nbyte;
134 if (clear_user((void __user *) elf_bss, nbyte))
135 return -EFAULT;
136 }
137 return 0;
138}
139
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200140/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#ifdef CONFIG_STACK_GROWSUP
142#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
143#define STACK_ROUND(sp, items) \
144 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700145#define STACK_ALLOC(sp, len) ({ \
146 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
147 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#else
149#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
150#define STACK_ROUND(sp, items) \
151 (((unsigned long) (sp - items)) &~ 15UL)
152#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
153#endif
154
Nathan Lynch483fad12008-07-22 04:48:46 +1000155#ifndef ELF_BASE_PLATFORM
156/*
157 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
158 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
159 * will be copied to the user stack in the same manner as AT_PLATFORM.
160 */
161#define ELF_BASE_PLATFORM NULL
162#endif
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static int
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800165create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
166 unsigned long load_addr, unsigned long interp_load_addr,
167 unsigned long e_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800169 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 unsigned long p = bprm->p;
171 int argc = bprm->argc;
172 int envc = bprm->envc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 elf_addr_t __user *sp;
174 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000175 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800176 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000178 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800179 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 int items;
181 elf_addr_t *elf_info;
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800182 int ei_index;
David Howells86a264a2008-11-14 10:39:18 +1100183 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700184 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700187 * In some cases (e.g. Hyper-Threading), we want to avoid L1
188 * evictions by the processes running on the same package. One
189 * thing we can do is to shuffle the initial stack for them.
190 */
191
192 p = arch_align_stack(p);
193
194 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * If this architecture has a platform capability string, copy it
196 * to userspace. In some cases (Sparc), this info is impossible
197 * for userspace to get any other way, in others (i386) it is
198 * merely difficult.
199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 u_platform = NULL;
201 if (k_platform) {
202 size_t len = strlen(k_platform) + 1;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
205 if (__copy_to_user(u_platform, k_platform, len))
206 return -EFAULT;
207 }
208
Nathan Lynch483fad12008-07-22 04:48:46 +1000209 /*
210 * If this architecture has a "base" platform capability
211 * string, copy it to userspace.
212 */
213 u_base_platform = NULL;
214 if (k_base_platform) {
215 size_t len = strlen(k_base_platform) + 1;
216
217 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
218 if (__copy_to_user(u_base_platform, k_base_platform, len))
219 return -EFAULT;
220 }
221
Kees Cookf06295b2009-01-07 18:08:52 -0800222 /*
223 * Generate 16 random bytes for userspace PRNG seeding.
224 */
225 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
226 u_rand_bytes = (elf_addr_t __user *)
227 STACK_ALLOC(p, sizeof(k_rand_bytes));
228 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
229 return -EFAULT;
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* Create the ELF interpreter info */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800232 elf_info = (elf_addr_t *)mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700233 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700235 do { \
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800236 *elf_info++ = id; \
237 *elf_info++ = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700238 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240#ifdef ARCH_DLINFO
241 /*
242 * ARCH_DLINFO must come first so PPC can do its special alignment of
243 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700244 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
245 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 */
247 ARCH_DLINFO;
248#endif
249 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
250 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
251 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
252 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700253 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
255 NEW_AUX_ENT(AT_BASE, interp_load_addr);
256 NEW_AUX_ENT(AT_FLAGS, 0);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800257 NEW_AUX_ENT(AT_ENTRY, e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800258 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
259 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
260 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
261 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Kees Cookc425e182017-07-18 15:25:22 -0700262 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
Kees Cookf06295b2009-01-07 18:08:52 -0800263 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000264#ifdef ELF_HWCAP2
265 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
266#endif
John Reiser65191082008-07-21 14:21:32 -0700267 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700269 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700270 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000272 if (k_base_platform) {
273 NEW_AUX_ENT(AT_BASE_PLATFORM,
274 (elf_addr_t)(unsigned long)u_base_platform);
275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700277 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279#undef NEW_AUX_ENT
280 /* AT_NULL is zero; clear the rest too */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800281 memset(elf_info, 0, (char *)mm->saved_auxv +
282 sizeof(mm->saved_auxv) - (char *)elf_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* And advance past the AT_NULL entry. */
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800285 elf_info += 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800287 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 sp = STACK_ADD(p, ei_index);
289
Andi Kleend20894a2008-02-08 04:21:54 -0800290 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 bprm->p = STACK_ROUND(sp, items);
292
293 /* Point sp at the lowest address on the stack */
294#ifdef CONFIG_STACK_GROWSUP
295 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700296 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#else
298 sp = (elf_addr_t __user *)bprm->p;
299#endif
300
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700301
302 /*
303 * Grow the stack manually; some architectures have a limit on how
304 * far ahead a user-space access may be in order to grow the stack.
305 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800306 vma = find_extend_vma(mm, bprm->p);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700307 if (!vma)
308 return -EFAULT;
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
311 if (__put_user(argc, sp++))
312 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Kees Cook67c67772017-07-10 15:52:54 -0700314 /* Populate list of argv pointers back to argv strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800315 p = mm->arg_end = mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 while (argc-- > 0) {
317 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700318 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800319 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700320 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
321 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800322 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 p += len;
324 }
Kees Cook67c67772017-07-10 15:52:54 -0700325 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800327 mm->arg_end = p;
Kees Cook67c67772017-07-10 15:52:54 -0700328
329 /* Populate list of envp pointers back to envp strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800330 mm->env_end = mm->env_start = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 while (envc-- > 0) {
332 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700333 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800334 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700335 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
336 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800337 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 p += len;
339 }
Kees Cook67c67772017-07-10 15:52:54 -0700340 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800342 mm->env_end = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Put the elf_info on the stack in the right place. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800345 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return -EFAULT;
347 return 0;
348}
349
James Hoganc07380b2011-05-09 10:58:40 +0100350#ifndef elf_map
351
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352static unsigned long elf_map(struct file *filep, unsigned long addr,
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800353 const struct elf_phdr *eppnt, int prot, int type,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100354 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100357 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
358 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
359 addr = ELF_PAGESTART(addr);
360 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700361
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700362 /* mmap() will return -EINVAL if given a zero size, but a
363 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100364 if (!size)
365 return addr;
366
Jiri Kosinacc503c12008-01-30 13:31:07 +0100367 /*
368 * total_size is the size of the ELF (interpreter) image.
369 * The _first_ mmap needs to know the full size, otherwise
370 * randomization might put this image into an overlapping
371 * position with the ELF binary image. (since size < total_size)
372 * So we first map the 'big' image - and unmap the remainder at
373 * the end. (which unmap is needed for ELF images with holes.)
374 */
375 if (total_size) {
376 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400377 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100378 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400379 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100380 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400381 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100382
Tetsuo Handad23a61e2018-04-20 14:56:13 -0700383 if ((type & MAP_FIXED_NOREPLACE) &&
384 PTR_ERR((void *)map_addr) == -EEXIST)
385 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
386 task_pid_nr(current), current->comm, (void *)addr);
Michal Hocko4ed28632018-04-10 16:36:01 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return(map_addr);
389}
390
James Hoganc07380b2011-05-09 10:58:40 +0100391#endif /* !elf_map */
392
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800393static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
Jiri Kosinacc503c12008-01-30 13:31:07 +0100394{
395 int i, first_idx = -1, last_idx = -1;
396
397 for (i = 0; i < nr; i++) {
398 if (cmds[i].p_type == PT_LOAD) {
399 last_idx = i;
400 if (first_idx == -1)
401 first_idx = i;
402 }
403 }
404 if (first_idx == -1)
405 return 0;
406
407 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
408 ELF_PAGESTART(cmds[first_idx].p_vaddr);
409}
410
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800411static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
412{
413 ssize_t rv;
414
415 rv = kernel_read(file, buf, len, &pos);
416 if (unlikely(rv != len)) {
417 return (rv < 0) ? rv : -EIO;
418 }
419 return 0;
420}
421
Paul Burton6a8d3892014-09-11 08:30:14 +0100422/**
423 * load_elf_phdrs() - load ELF program headers
424 * @elf_ex: ELF header of the binary whose program headers should be loaded
425 * @elf_file: the opened ELF binary file
426 *
427 * Loads ELF program headers from the binary file elf_file, which has the ELF
428 * header pointed to by elf_ex, into a newly allocated array. The caller is
429 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
430 */
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800431static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
Paul Burton6a8d3892014-09-11 08:30:14 +0100432 struct file *elf_file)
433{
434 struct elf_phdr *elf_phdata = NULL;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800435 int retval, err = -1;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800436 unsigned int size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100437
438 /*
439 * If the size of this structure has changed, then punt, since
440 * we will be doing the wrong thing.
441 */
442 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
443 goto out;
444
445 /* Sanity check the number of program headers... */
Paul Burton6a8d3892014-09-11 08:30:14 +0100446 /* ...and their total size. */
447 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800448 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
Paul Burton6a8d3892014-09-11 08:30:14 +0100449 goto out;
450
451 elf_phdata = kmalloc(size, GFP_KERNEL);
452 if (!elf_phdata)
453 goto out;
454
455 /* Read in the program headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800456 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
457 if (retval < 0) {
458 err = retval;
Paul Burton6a8d3892014-09-11 08:30:14 +0100459 goto out;
460 }
461
462 /* Success! */
463 err = 0;
464out:
465 if (err) {
466 kfree(elf_phdata);
467 elf_phdata = NULL;
468 }
469 return elf_phdata;
470}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100471
Paul Burton774c1052014-09-11 08:30:16 +0100472#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
473
474/**
475 * struct arch_elf_state - arch-specific ELF loading state
476 *
477 * This structure is used to preserve architecture specific data during
478 * the loading of an ELF file, throughout the checking of architecture
479 * specific ELF headers & through to the point where the ELF load is
480 * known to be proceeding (ie. SET_PERSONALITY).
481 *
482 * This implementation is a dummy for architectures which require no
483 * specific state.
484 */
485struct arch_elf_state {
486};
487
488#define INIT_ARCH_ELF_STATE {}
489
490/**
491 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
492 * @ehdr: The main ELF header
493 * @phdr: The program header to check
494 * @elf: The open ELF file
495 * @is_interp: True if the phdr is from the interpreter of the ELF being
496 * loaded, else false.
497 * @state: Architecture-specific state preserved throughout the process
498 * of loading the ELF.
499 *
500 * Inspects the program header phdr to validate its correctness and/or
501 * suitability for the system. Called once per ELF program header in the
502 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
503 * interpreter.
504 *
505 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
506 * with that return code.
507 */
508static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
509 struct elf_phdr *phdr,
510 struct file *elf, bool is_interp,
511 struct arch_elf_state *state)
512{
513 /* Dummy implementation, always proceed */
514 return 0;
515}
516
517/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000518 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100519 * @ehdr: The main ELF header
520 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000521 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100522 * @state: Architecture-specific state preserved throughout the process
523 * of loading the ELF.
524 *
525 * Provides a final opportunity for architecture code to reject the loading
526 * of the ELF & cause an exec syscall to return an error. This is called after
527 * all program headers to be checked by arch_elf_pt_proc have been.
528 *
529 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
530 * with that return code.
531 */
532static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000533 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100534 struct arch_elf_state *state)
535{
536 /* Dummy implementation, always proceed */
537 return 0;
538}
539
540#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700542static inline int make_prot(u32 p_flags)
543{
544 int prot = 0;
545
546 if (p_flags & PF_R)
547 prot |= PROT_READ;
548 if (p_flags & PF_W)
549 prot |= PROT_WRITE;
550 if (p_flags & PF_X)
551 prot |= PROT_EXEC;
552 return prot;
553}
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555/* This is much more generalized than the library routine read function,
556 so we keep this separate. Technically the library read function
557 is only provided so that we can read a.out libraries that have
558 an ELF header */
559
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700560static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Alexey Dobriyan81696d52019-12-04 16:52:22 -0800561 struct file *interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100562 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 struct elf_phdr *eppnt;
565 unsigned long load_addr = 0;
566 int load_addr_set = 0;
567 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800568 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100570 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100571 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573 /* First of all, some simple consistency checks */
574 if (interp_elf_ex->e_type != ET_EXEC &&
575 interp_elf_ex->e_type != ET_DYN)
576 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -0400577 if (!elf_check_arch(interp_elf_ex) ||
578 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400580 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 goto out;
582
Paul Burtona9d9ef12014-09-11 08:30:15 +0100583 total_size = total_mapping_size(interp_elf_phdata,
584 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100585 if (!total_size) {
586 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100587 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100588 }
589
Paul Burtona9d9ef12014-09-11 08:30:15 +0100590 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700591 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
592 if (eppnt->p_type == PT_LOAD) {
593 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700594 int elf_prot = make_prot(eppnt->p_flags);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700595 unsigned long vaddr = 0;
596 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700598 vaddr = eppnt->p_vaddr;
599 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
Michal Hocko4ed28632018-04-10 16:36:01 -0700600 elf_type |= MAP_FIXED_NOREPLACE;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100601 else if (no_base && interp_elf_ex->e_type == ET_DYN)
602 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700604 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100605 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100606 total_size = 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700607 error = map_addr;
608 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100609 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700611 if (!load_addr_set &&
612 interp_elf_ex->e_type == ET_DYN) {
613 load_addr = map_addr - ELF_PAGESTART(vaddr);
614 load_addr_set = 1;
615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700617 /*
618 * Check to see if the section's size will overflow the
619 * allowed task size. Note that p_filesz must always be
620 * <= p_memsize so it's only necessary to check p_memsz.
621 */
622 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700623 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700624 eppnt->p_filesz > eppnt->p_memsz ||
625 eppnt->p_memsz > TASK_SIZE ||
626 TASK_SIZE - eppnt->p_memsz < k) {
627 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100628 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700631 /*
632 * Find the end of the file mapping for this phdr, and
633 * keep track of the largest address we see for this.
634 */
635 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
636 if (k > elf_bss)
637 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700639 /*
640 * Do the same thing for the memory mapping - between
641 * elf_bss and last_bss is the bss section.
642 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700643 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800644 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700645 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800646 bss_prot = elf_prot;
647 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
650
Kees Cook0036d1f2016-08-02 14:04:51 -0700651 /*
652 * Now fill out the bss section: first pad the last page from
653 * the file up to the page boundary, and zero it from elf_bss
654 * up to the end of the page.
655 */
656 if (padzero(elf_bss)) {
657 error = -EFAULT;
658 goto out;
659 }
660 /*
661 * Next, align both the file and mem bss up to the page size,
662 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800663 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700664 */
665 elf_bss = ELF_PAGEALIGN(elf_bss);
666 last_bss = ELF_PAGEALIGN(last_bss);
667 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800669 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
670 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700671 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100672 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 }
674
Jiri Kosinacc503c12008-01-30 13:31:07 +0100675 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676out:
677 return error;
678}
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680/*
681 * These are the functions used to load ELF style executables and shared
682 * libraries. There is no binary dependent code anywhere else.
683 */
684
Al Viro71613c32012-10-20 22:00:48 -0400685static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 struct file *interpreter = NULL; /* to shut gcc up */
688 unsigned long load_addr = 0, load_bias = 0;
689 int load_addr_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100691 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800693 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100695 unsigned long elf_entry;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800696 unsigned long e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100697 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700699 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800700 int executable_stack = EXSTACK_DEFAULT;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800701 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700702 struct elfhdr *interp_elf_ex = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100703 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800704 struct mm_struct *mm;
Alexey Dobriyan249b08e2019-05-14 15:43:54 -0700705 struct pt_regs *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 retval = -ENOEXEC;
708 /* First of all, some simple consistency checks */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800709 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 goto out;
711
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800712 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800714 if (!elf_check_arch(elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800716 if (elf_check_fdpic(elf_ex))
Nicolas Pitre47552002017-08-16 16:05:13 -0400717 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400718 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 goto out;
720
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800721 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (!elf_phdata)
723 goto out;
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800726 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700727 char *elf_interpreter;
Alexey Dobriyan5cf4a362019-05-14 15:43:36 -0700728
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700729 if (elf_ppnt->p_type != PT_INTERP)
730 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700732 /*
733 * This is the program interpreter used for shared libraries -
734 * for now assume that this is an a.out format binary.
735 */
736 retval = -ENOEXEC;
737 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
738 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700740 retval = -ENOMEM;
741 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
742 if (!elf_interpreter)
743 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800745 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
746 elf_ppnt->p_offset);
747 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700748 goto out_free_interp;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700749 /* make sure path is NULL terminated */
750 retval = -ENOEXEC;
751 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
752 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800753
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700754 interpreter = open_exec(elf_interpreter);
755 kfree(elf_interpreter);
756 retval = PTR_ERR(interpreter);
757 if (IS_ERR(interpreter))
758 goto out_free_ph;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800759
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700760 /*
761 * If the binary is not readable then enforce mm->dumpable = 0
762 * regardless of the interpreter's permissions.
763 */
764 would_dump(bprm, interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -0700766 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
767 if (!interp_elf_ex) {
768 retval = -ENOMEM;
769 goto out_free_ph;
770 }
771
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700772 /* Get the exec headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700773 retval = elf_read(interpreter, interp_elf_ex,
774 sizeof(*interp_elf_ex), 0);
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800775 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700776 goto out_free_dentry;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700777
778 break;
Alexey Dobriyancc338012019-05-14 15:43:39 -0700779
780out_free_interp:
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700781 kfree(elf_interpreter);
782 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784
785 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800786 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100787 switch (elf_ppnt->p_type) {
788 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (elf_ppnt->p_flags & PF_X)
790 executable_stack = EXSTACK_ENABLE_X;
791 else
792 executable_stack = EXSTACK_DISABLE_X;
793 break;
Paul Burton774c1052014-09-11 08:30:16 +0100794
795 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800796 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
Paul Burton774c1052014-09-11 08:30:16 +0100797 bprm->file, false,
798 &arch_state);
799 if (retval)
800 goto out_free_dentry;
801 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 /* Some simple consistency checks for the interpreter */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700805 if (interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800807 /* Not an ELF interpreter */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700808 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /* Verify the interpreter has a valid arch */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700811 if (!elf_check_arch(interp_elf_ex) ||
812 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100814
815 /* Load the interpreter program headers */
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700816 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100817 interpreter);
818 if (!interp_elf_phdata)
819 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100820
821 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
822 elf_ppnt = interp_elf_phdata;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700823 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100824 switch (elf_ppnt->p_type) {
825 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700826 retval = arch_elf_pt_proc(interp_elf_ex,
Paul Burton774c1052014-09-11 08:30:16 +0100827 elf_ppnt, interpreter,
828 true, &arch_state);
829 if (retval)
830 goto out_free_dentry;
831 break;
832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 }
834
Paul Burton774c1052014-09-11 08:30:16 +0100835 /*
836 * Allow arch code to reject the ELF at this point, whilst it's
837 * still possible to return an error to the code that invoked
838 * the exec syscall.
839 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800840 retval = arch_check_elf(elf_ex,
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -0700841 !!interpreter, interp_elf_ex,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000842 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100843 if (retval)
844 goto out_free_dentry;
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 /* Flush all traces of the currently running executable */
Eric W. Biederman23887772020-05-03 07:54:10 -0500847 retval = begin_new_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 if (retval)
849 goto out_free_dentry;
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
852 may depend on the personality. */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800853 SET_PERSONALITY2(*elf_ex, &arch_state);
854 if (elf_read_implies_exec(*elf_ex, executable_stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 current->personality |= READ_IMPLIES_EXEC;
856
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700857 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800859
860 setup_new_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 /* Do this so that we can load the interpreter, if need be. We will
863 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
865 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400866 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Alexey Dobriyan852643162019-05-14 15:43:48 -0700869 elf_bss = 0;
870 elf_brk = 0;
871
872 start_code = ~0UL;
873 end_code = 0;
874 start_data = 0;
875 end_data = 0;
876
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200877 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +0100878 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700879 for(i = 0, elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800880 i < elf_ex->e_phnum; i++, elf_ppnt++) {
Linus Torvaldsb2129212019-10-06 13:53:27 -0700881 int elf_prot, elf_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -0700883 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 if (elf_ppnt->p_type != PT_LOAD)
886 continue;
887
888 if (unlikely (elf_brk > elf_bss)) {
889 unsigned long nbyte;
890
891 /* There was a PT_LOAD segment with p_memsz > p_filesz
892 before this one. Map anonymous pages, if needed,
893 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800894 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800895 elf_brk + load_bias,
896 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -0400897 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 nbyte = ELF_PAGEOFFSET(elf_bss);
900 if (nbyte) {
901 nbyte = ELF_MIN_ALIGN - nbyte;
902 if (nbyte > elf_brk - elf_bss)
903 nbyte = elf_brk - elf_bss;
904 if (clear_user((void __user *)elf_bss +
905 load_bias, nbyte)) {
906 /*
907 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700908 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 * we don't check the return value
910 */
911 }
912 }
913 }
914
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700915 elf_prot = make_prot(elf_ppnt->p_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700917 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
919 vaddr = elf_ppnt->p_vaddr;
Kees Cookeab09532017-07-10 15:52:37 -0700920 /*
921 * If we are loading ET_EXEC or we have already performed
922 * the ET_DYN load_addr calculations, proceed normally.
923 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800924 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
Linus Torvaldsb2129212019-10-06 13:53:27 -0700925 elf_flags |= MAP_FIXED;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800926 } else if (elf_ex->e_type == ET_DYN) {
Kees Cookeab09532017-07-10 15:52:37 -0700927 /*
928 * This logic is run once for the first LOAD Program
929 * Header for ET_DYN binaries to calculate the
930 * randomization (load_bias) for all the LOAD
931 * Program Headers, and to calculate the entire
932 * size of the ELF mapping (total_size). (Note that
933 * load_addr_set is set to true later once the
934 * initial mapping is performed.)
935 *
936 * There are effectively two types of ET_DYN
937 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
938 * and loaders (ET_DYN without INTERP, since they
939 * _are_ the ELF interpreter). The loaders must
940 * be loaded away from programs since the program
941 * may otherwise collide with the loader (especially
942 * for ET_EXEC which does not have a randomized
943 * position). For example to handle invocations of
944 * "./ld.so someprog" to test out a new version of
945 * the loader, the subsequent program that the
946 * loader loads must avoid the loader itself, so
947 * they cannot share the same load range. Sufficient
948 * room for the brk must be allocated with the
949 * loader as well, since brk must be available with
950 * the loader.
951 *
952 * Therefore, programs are loaded offset from
953 * ELF_ET_DYN_BASE and loaders are loaded into the
954 * independently randomized mmap region (0 load_bias
955 * without MAP_FIXED).
956 */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700957 if (interpreter) {
Kees Cookeab09532017-07-10 15:52:37 -0700958 load_bias = ELF_ET_DYN_BASE;
959 if (current->flags & PF_RANDOMIZE)
960 load_bias += arch_mmap_rnd();
Linus Torvaldsb2129212019-10-06 13:53:27 -0700961 elf_flags |= MAP_FIXED;
Kees Cookeab09532017-07-10 15:52:37 -0700962 } else
963 load_bias = 0;
964
965 /*
966 * Since load_bias is used for all subsequent loading
967 * calculations, we must lower it by the first vaddr
968 * so that the remaining calculations based on the
969 * ELF vaddrs will be correctly offset. The result
970 * is then page aligned.
971 */
972 load_bias = ELF_PAGESTART(load_bias - vaddr);
973
Michael Davidsona87938b2015-04-14 15:47:38 -0700974 total_size = total_mapping_size(elf_phdata,
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800975 elf_ex->e_phnum);
Michael Davidsona87938b2015-04-14 15:47:38 -0700976 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -0700977 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -0700978 goto out_free_dentry;
979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 }
981
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700982 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -0700983 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700985 retval = IS_ERR((void *)error) ?
986 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 goto out_free_dentry;
988 }
989
990 if (!load_addr_set) {
991 load_addr_set = 1;
992 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800993 if (elf_ex->e_type == ET_DYN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 load_bias += error -
995 ELF_PAGESTART(load_bias + vaddr);
996 load_addr += load_bias;
997 reloc_func_desc = load_bias;
998 }
999 }
1000 k = elf_ppnt->p_vaddr;
Alexey Dobriyanf67ef442020-01-30 22:16:52 -08001001 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001002 start_code = k;
1003 if (start_data < k)
1004 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /*
1007 * Check to see if the section's size will overflow the
1008 * allowed task size. Note that p_filesz must always be
1009 * <= p_memsz so it is only necessary to check p_memsz.
1010 */
Chuck Ebbertce510592006-07-03 00:24:14 -07001011 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 elf_ppnt->p_memsz > TASK_SIZE ||
1013 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001014 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001015 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 goto out_free_dentry;
1017 }
1018
1019 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1020
1021 if (k > elf_bss)
1022 elf_bss = k;
1023 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1024 end_code = k;
1025 if (end_data < k)
1026 end_data = k;
1027 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001028 if (k > elf_brk) {
1029 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 }
1033
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001034 e_entry = elf_ex->e_entry + load_bias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 elf_bss += load_bias;
1036 elf_brk += load_bias;
1037 start_code += load_bias;
1038 end_code += load_bias;
1039 start_data += load_bias;
1040 end_data += load_bias;
1041
1042 /* Calling set_brk effectively mmaps the pages that we need
1043 * for the bss and break sections. We must do this before
1044 * mapping in the interpreter, to make sure it doesn't wind
1045 * up getting placed where the bss needs to go.
1046 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001047 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001048 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001050 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 retval = -EFAULT; /* Nobody gets to see this, but.. */
1052 goto out_free_dentry;
1053 }
1054
Alexey Dobriyancc338012019-05-14 15:43:39 -07001055 if (interpreter) {
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001056 elf_entry = load_elf_interp(interp_elf_ex,
Andi Kleend20894a2008-02-08 04:21:54 -08001057 interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +01001058 load_bias, interp_elf_phdata);
Andi Kleend20894a2008-02-08 04:21:54 -08001059 if (!IS_ERR((void *)elf_entry)) {
1060 /*
1061 * load_elf_interp() returns relocation
1062 * adjustment
1063 */
1064 interp_load_addr = elf_entry;
Alexey Dobriyanc69bcc92020-04-06 20:11:26 -07001065 elf_entry += interp_elf_ex->e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001068 retval = IS_ERR((void *)elf_entry) ?
1069 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 goto out_free_dentry;
1071 }
1072 reloc_func_desc = interp_load_addr;
1073
1074 allow_write_access(interpreter);
1075 fput(interpreter);
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001076
1077 kfree(interp_elf_ex);
Alexey Dobriyanaa0d1562020-04-06 20:11:32 -07001078 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 } else {
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001080 elf_entry = e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001081 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001082 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001083 goto out_free_dentry;
1084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 }
1086
1087 kfree(elf_phdata);
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 set_binfmt(&elf_format);
1090
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001091#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Alexey Dobriyancc338012019-05-14 15:43:39 -07001092 retval = arch_setup_additional_pages(bprm, !!interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001093 if (retval < 0)
Roland McGrath18c8baff2005-04-28 15:17:19 -07001094 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001095#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1096
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001097 retval = create_elf_tables(bprm, elf_ex,
1098 load_addr, interp_load_addr, e_entry);
Al Viro19d860a2014-05-04 20:11:36 -04001099 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001100 goto out;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001101
1102 mm = current->mm;
1103 mm->end_code = end_code;
1104 mm->start_code = start_code;
1105 mm->start_data = start_data;
1106 mm->end_data = end_data;
1107 mm->start_stack = bprm->p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Jiri Kosina4471a672011-04-14 15:22:09 -07001109 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Kees Cookbbdc6072019-05-14 15:43:57 -07001110 /*
1111 * For architectures with ELF randomization, when executing
1112 * a loader directly (i.e. no interpreter listed in ELF
1113 * headers), move the brk area out of the mmap region
1114 * (since it grows up, and may collide early with the stack
1115 * growing down), and into the unused ELF_ET_DYN_BASE region.
1116 */
Kees Cook7be3cb02019-09-26 10:15:25 -07001117 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001118 elf_ex->e_type == ET_DYN && !interpreter) {
1119 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1120 }
Kees Cookbbdc6072019-05-14 15:43:57 -07001121
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001122 mm->brk = mm->start_brk = arch_randomize_brk(mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001123#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001124 current->brk_randomized = 1;
1125#endif
1126 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001127
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (current->personality & MMAP_PAGE_ZERO) {
1129 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1130 and some applications "depend" upon this behavior.
1131 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001132 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001133 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 }
1136
Alexey Dobriyan249b08e2019-05-14 15:43:54 -07001137 regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138#ifdef ELF_PLAT_INIT
1139 /*
1140 * The ABI may specify that certain registers be set up in special
1141 * ways (on i386 %edx is the address of a DT_FINI function, for
1142 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1143 * that the e_entry field is the address of the function descriptor
1144 * for the startup routine, rather than the address of the startup
1145 * routine itself. This macro performs whatever initialization to
1146 * the regs structure is required as well as any relocations to the
1147 * function descriptor entries when executing dynamically links apps.
1148 */
1149 ELF_PLAT_INIT(regs, reloc_func_desc);
1150#endif
1151
Kees Cookb8383832018-04-10 16:34:57 -07001152 finalize_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 retval = 0;
1155out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return retval;
1157
1158 /* error cleanup */
1159out_free_dentry:
Alexey Dobriyan0693ffe2020-04-06 20:11:29 -07001160 kfree(interp_elf_ex);
Paul Burtona9d9ef12014-09-11 08:30:15 +01001161 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 allow_write_access(interpreter);
1163 if (interpreter)
1164 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165out_free_ph:
1166 kfree(elf_phdata);
1167 goto out;
1168}
1169
Josh Triplett69369a72014-04-03 14:48:27 -07001170#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171/* This is really simpleminded and specialized - we are loading an
1172 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173static int load_elf_library(struct file *file)
1174{
1175 struct elf_phdr *elf_phdata;
1176 struct elf_phdr *eppnt;
1177 unsigned long elf_bss, bss, len;
1178 int retval, error, i, j;
1179 struct elfhdr elf_ex;
1180
1181 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001182 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1183 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 goto out;
1185
1186 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1187 goto out;
1188
1189 /* First of all, some simple consistency checks */
1190 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001191 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -04001193 if (elf_check_fdpic(&elf_ex))
1194 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 /* Now read in all of the header information */
1197
1198 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1199 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1200
1201 error = -ENOMEM;
1202 elf_phdata = kmalloc(j, GFP_KERNEL);
1203 if (!elf_phdata)
1204 goto out;
1205
1206 eppnt = elf_phdata;
1207 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001208 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1209 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 goto out_free_ph;
1211
1212 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1213 if ((eppnt + i)->p_type == PT_LOAD)
1214 j++;
1215 if (j != 1)
1216 goto out_free_ph;
1217
1218 while (eppnt->p_type != PT_LOAD)
1219 eppnt++;
1220
1221 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001222 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 ELF_PAGESTART(eppnt->p_vaddr),
1224 (eppnt->p_filesz +
1225 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1226 PROT_READ | PROT_WRITE | PROT_EXEC,
Michal Hocko4ed28632018-04-10 16:36:01 -07001227 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 (eppnt->p_offset -
1229 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1231 goto out_free_ph;
1232
1233 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1234 if (padzero(elf_bss)) {
1235 error = -EFAULT;
1236 goto out_free_ph;
1237 }
1238
Oscar Salvador24962af2018-07-13 16:59:13 -07001239 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1240 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
Michal Hockoecc2bc82016-05-23 16:25:39 -07001241 if (bss > len) {
1242 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001243 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001244 goto out_free_ph;
1245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 error = 0;
1247
1248out_free_ph:
1249 kfree(elf_phdata);
1250out:
1251 return error;
1252}
Josh Triplett69369a72014-04-03 14:48:27 -07001253#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001255#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256/*
1257 * ELF core dumper
1258 *
1259 * Modelled on fs/exec.c:aout_core_dump()
1260 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1261 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263/*
Jason Baron909af762012-03-23 15:02:51 -07001264 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1265 * that are useful for post-mortem analysis are included in every core dump.
1266 * In that way we ensure that the core dump is fully interpretable later
1267 * without matching up the same kernel and hardware config to see what PC values
1268 * meant. These special mappings include - vDSO, vsyscall, and other
1269 * architecture specific mappings
1270 */
1271static bool always_dump_vma(struct vm_area_struct *vma)
1272{
1273 /* Any vsyscall mappings? */
1274 if (vma == get_gate_vma(vma->vm_mm))
1275 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001276
1277 /*
1278 * Assume that all vmas with a .name op should always be dumped.
1279 * If this changes, a new vm_ops field can easily be added.
1280 */
1281 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1282 return true;
1283
Jason Baron909af762012-03-23 15:02:51 -07001284 /*
1285 * arch_vma_name() returns non-NULL for special architecture mappings,
1286 * such as vDSO sections.
1287 */
1288 if (arch_vma_name(vma))
1289 return true;
1290
1291 return false;
1292}
1293
1294/*
Roland McGrath82df3972007-10-16 23:27:02 -07001295 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 */
Roland McGrath82df3972007-10-16 23:27:02 -07001297static unsigned long vma_dump_size(struct vm_area_struct *vma,
1298 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001300#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1301
Jason Baron909af762012-03-23 15:02:51 -07001302 /* always dump the vdso and vsyscall sections */
1303 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001304 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001305
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001306 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001307 return 0;
1308
Ross Zwisler50378352015-10-05 16:33:36 -06001309 /* support for DAX */
1310 if (vma_is_dax(vma)) {
1311 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1312 goto whole;
1313 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1314 goto whole;
1315 return 0;
1316 }
1317
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001318 /* Hugetlb memory check */
Anshuman Khandual03911132020-04-06 20:03:51 -07001319 if (is_vm_hugetlb_page(vma)) {
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001320 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1321 goto whole;
1322 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1323 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001324 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001325 }
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001328 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 return 0;
1330
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001331 /* By default, dump shared memory if mapped from an anonymous file. */
1332 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001333 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001334 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1335 goto whole;
1336 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Roland McGrath82df3972007-10-16 23:27:02 -07001339 /* Dump segments that have been written to. */
1340 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1341 goto whole;
1342 if (vma->vm_file == NULL)
1343 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Roland McGrath82df3972007-10-16 23:27:02 -07001345 if (FILTER(MAPPED_PRIVATE))
1346 goto whole;
1347
1348 /*
1349 * If this looks like the beginning of a DSO or executable mapping,
1350 * check for an ELF header. If we find one, dump the first page to
1351 * aid in determining what was mapped here.
1352 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001353 if (FILTER(ELF_HEADERS) &&
1354 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001355 u32 __user *header = (u32 __user *) vma->vm_start;
1356 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001357 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001358 /*
1359 * Doing it this way gets the constant folded by GCC.
1360 */
1361 union {
1362 u32 cmp;
1363 char elfmag[SELFMAG];
1364 } magic;
1365 BUILD_BUG_ON(SELFMAG != sizeof word);
1366 magic.elfmag[EI_MAG0] = ELFMAG0;
1367 magic.elfmag[EI_MAG1] = ELFMAG1;
1368 magic.elfmag[EI_MAG2] = ELFMAG2;
1369 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001370 /*
1371 * Switch to the user "segment" for get_user(),
1372 * then put back what elf_core_dump() had in place.
1373 */
1374 set_fs(USER_DS);
1375 if (unlikely(get_user(word, header)))
1376 word = 0;
1377 set_fs(fs);
1378 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001379 return PAGE_SIZE;
1380 }
1381
1382#undef FILTER
1383
1384 return 0;
1385
1386whole:
1387 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388}
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390/* An ELF note in memory */
1391struct memelfnote
1392{
1393 const char *name;
1394 int type;
1395 unsigned int datasz;
1396 void *data;
1397};
1398
1399static int notesize(struct memelfnote *en)
1400{
1401 int sz;
1402
1403 sz = sizeof(struct elf_note);
1404 sz += roundup(strlen(en->name) + 1, 4);
1405 sz += roundup(en->datasz, 4);
1406
1407 return sz;
1408}
1409
Al Viroecc8c772013-10-05 15:32:35 -04001410static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411{
1412 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 en.n_namesz = strlen(men->name) + 1;
1414 en.n_descsz = men->datasz;
1415 en.n_type = men->type;
1416
Al Viroecc8c772013-10-05 15:32:35 -04001417 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001418 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1419 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Roland McGrath3aba4812008-01-30 13:31:44 +01001422static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001423 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001425 memset(elf, 0, sizeof(*elf));
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1428 elf->e_ident[EI_CLASS] = ELF_CLASS;
1429 elf->e_ident[EI_DATA] = ELF_DATA;
1430 elf->e_ident[EI_VERSION] = EV_CURRENT;
1431 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001434 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001437 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 elf->e_ehsize = sizeof(struct elfhdr);
1439 elf->e_phentsize = sizeof(struct elf_phdr);
1440 elf->e_phnum = segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441}
1442
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001443static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
1445 phdr->p_type = PT_NOTE;
1446 phdr->p_offset = offset;
1447 phdr->p_vaddr = 0;
1448 phdr->p_paddr = 0;
1449 phdr->p_filesz = sz;
1450 phdr->p_memsz = 0;
1451 phdr->p_flags = 0;
1452 phdr->p_align = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453}
1454
1455static void fill_note(struct memelfnote *note, const char *name, int type,
1456 unsigned int sz, void *data)
1457{
1458 note->name = name;
1459 note->type = type;
1460 note->datasz = sz;
1461 note->data = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462}
1463
1464/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001465 * fill up all the fields in prstatus from the given task struct, except
1466 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 */
1468static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001469 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470{
1471 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1472 prstatus->pr_sigpend = p->pending.signal.sig[0];
1473 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001474 rcu_read_lock();
1475 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1476 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001477 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001478 prstatus->pr_pgrp = task_pgrp_vnr(p);
1479 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001481 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001484 * This is the record for the group leader. It shows the
1485 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001487 thread_group_cputime(p, &cputime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001488 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1489 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001491 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001492
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001493 task_cputime(p, &utime, &stime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001494 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1495 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001497
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001498 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1499 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
1501
1502static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1503 struct mm_struct *mm)
1504{
David Howellsc69e8d92008-11-14 10:39:19 +11001505 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001506 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
1508 /* first copy the parameters from user space */
1509 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1510
1511 len = mm->arg_end - mm->arg_start;
1512 if (len >= ELF_PRARGSZ)
1513 len = ELF_PRARGSZ-1;
1514 if (copy_from_user(&psinfo->pr_psargs,
1515 (const char __user *)mm->arg_start, len))
1516 return -EFAULT;
1517 for(i = 0; i < len; i++)
1518 if (psinfo->pr_psargs[i] == 0)
1519 psinfo->pr_psargs[i] = ' ';
1520 psinfo->pr_psargs[len] = 0;
1521
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001522 rcu_read_lock();
1523 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1524 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001525 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001526 psinfo->pr_pgrp = task_pgrp_vnr(p);
1527 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 i = p->state ? ffz(~p->state) + 1 : 0;
1530 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001531 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1533 psinfo->pr_nice = task_nice(p);
1534 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001535 rcu_read_lock();
1536 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001537 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1538 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001539 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1541
1542 return 0;
1543}
1544
Roland McGrath3aba4812008-01-30 13:31:44 +01001545static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1546{
1547 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1548 int i = 0;
1549 do
1550 i += 2;
1551 while (auxv[i - 2] != AT_NULL);
1552 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1553}
1554
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001555static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001556 const kernel_siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001557{
1558 mm_segment_t old_fs = get_fs();
1559 set_fs(KERNEL_DS);
1560 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1561 set_fs(old_fs);
1562 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1563}
1564
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001565#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1566/*
1567 * Format of NT_FILE note:
1568 *
1569 * long count -- how many files are mapped
1570 * long page_size -- units for file_ofs
1571 * array of [COUNT] elements of
1572 * long start
1573 * long end
1574 * long file_ofs
1575 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1576 */
Dan Aloni72023652013-09-30 13:45:02 -07001577static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001578{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001579 struct mm_struct *mm = current->mm;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001580 struct vm_area_struct *vma;
1581 unsigned count, size, names_ofs, remaining, n;
1582 user_long_t *data;
1583 user_long_t *start_end_ofs;
1584 char *name_base, *name_curpos;
1585
1586 /* *Estimated* file count and total data size needed */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001587 count = mm->map_count;
Alexey Dobriyan60c9d922018-02-06 15:39:13 -08001588 if (count > UINT_MAX / 64)
1589 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001590 size = count * 64;
1591
1592 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1593 alloc:
1594 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001595 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001596 size = round_up(size, PAGE_SIZE);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08001597 /*
1598 * "size" can be 0 here legitimately.
1599 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1600 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001601 data = kvmalloc(size, GFP_KERNEL);
1602 if (ZERO_OR_NULL_PTR(data))
Dan Aloni72023652013-09-30 13:45:02 -07001603 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001604
1605 start_end_ofs = data + 2;
1606 name_base = name_curpos = ((char *)data) + names_ofs;
1607 remaining = size - names_ofs;
1608 count = 0;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001609 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001610 struct file *file;
1611 const char *filename;
1612
1613 file = vma->vm_file;
1614 if (!file)
1615 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001616 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001617 if (IS_ERR(filename)) {
1618 if (PTR_ERR(filename) == -ENAMETOOLONG) {
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001619 kvfree(data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001620 size = size * 5 / 4;
1621 goto alloc;
1622 }
1623 continue;
1624 }
1625
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001626 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001627 /* n = strlen(filename) + 1: */
1628 n = (name_curpos + remaining) - filename;
1629 remaining = filename - name_curpos;
1630 memmove(name_curpos, filename, n);
1631 name_curpos += n;
1632
1633 *start_end_ofs++ = vma->vm_start;
1634 *start_end_ofs++ = vma->vm_end;
1635 *start_end_ofs++ = vma->vm_pgoff;
1636 count++;
1637 }
1638
1639 /* Now we know exact count of files, can store it */
1640 data[0] = count;
1641 data[1] = PAGE_SIZE;
1642 /*
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001643 * Count usually is less than mm->map_count,
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001644 * we need to move filenames down.
1645 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001646 n = mm->map_count - count;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001647 if (n != 0) {
1648 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1649 memmove(name_base - shift_bytes, name_base,
1650 name_curpos - name_base);
1651 name_curpos -= shift_bytes;
1652 }
1653
1654 size = name_curpos - (char *)data;
1655 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001656 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001657}
1658
Roland McGrath4206d3a2008-01-30 13:31:45 +01001659#ifdef CORE_DUMP_USE_REGSET
1660#include <linux/regset.h>
1661
1662struct elf_thread_core_info {
1663 struct elf_thread_core_info *next;
1664 struct task_struct *task;
1665 struct elf_prstatus prstatus;
1666 struct memelfnote notes[0];
1667};
1668
1669struct elf_note_info {
1670 struct elf_thread_core_info *thread;
1671 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001672 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001673 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001674 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001675 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001676 size_t size;
1677 int thread_notes;
1678};
1679
Roland McGrathd31472b2008-03-04 14:28:30 -08001680/*
1681 * When a regset has a writeback hook, we call it on each thread before
1682 * dumping user memory. On register window machines, this makes sure the
1683 * user memory backing the register data is up to date before we read it.
1684 */
1685static void do_thread_regset_writeback(struct task_struct *task,
1686 const struct user_regset *regset)
1687{
1688 if (regset->writeback)
1689 regset->writeback(task, regset, 1);
1690}
1691
H. J. Lu0953f65d2012-02-14 13:34:52 -08001692#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001693#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001694#endif
1695
1696#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001697#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001698#endif
1699
Roland McGrath4206d3a2008-01-30 13:31:45 +01001700static int fill_thread_core_info(struct elf_thread_core_info *t,
1701 const struct user_regset_view *view,
1702 long signr, size_t *total)
1703{
1704 unsigned int i;
Dave Martin27e64b42017-10-31 15:50:53 +00001705 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001706
1707 /*
1708 * NT_PRSTATUS is the one special case, because the regset data
1709 * goes into the pr_reg field inside the note contents, rather
1710 * than being the whole note contents. We fill the reset in here.
1711 * We assume that regset 0 is NT_PRSTATUS.
1712 */
1713 fill_prstatus(&t->prstatus, t->task, signr);
Dave Martin27e64b42017-10-31 15:50:53 +00001714 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001715 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001716
1717 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dave Martin27e64b42017-10-31 15:50:53 +00001718 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001719 *total += notesize(&t->notes[0]);
1720
Roland McGrathd31472b2008-03-04 14:28:30 -08001721 do_thread_regset_writeback(t->task, &view->regsets[0]);
1722
Roland McGrath4206d3a2008-01-30 13:31:45 +01001723 /*
1724 * Each other regset might generate a note too. For each regset
1725 * that has no core_note_type or is inactive, we leave t->notes[i]
1726 * all zero and we'll know to skip writing it later.
1727 */
1728 for (i = 1; i < view->n; ++i) {
1729 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001730 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001731 if (regset->core_note_type && regset->get &&
Maciej W. Rozycki2f819db2018-05-15 23:32:45 +01001732 (!regset->active || regset->active(t->task, regset) > 0)) {
Roland McGrath4206d3a2008-01-30 13:31:45 +01001733 int ret;
Dave Martin27e64b42017-10-31 15:50:53 +00001734 size_t size = regset_size(t->task, regset);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001735 void *data = kmalloc(size, GFP_KERNEL);
1736 if (unlikely(!data))
1737 return 0;
1738 ret = regset->get(t->task, regset,
1739 0, size, data, NULL);
1740 if (unlikely(ret))
1741 kfree(data);
1742 else {
1743 if (regset->core_note_type != NT_PRFPREG)
1744 fill_note(&t->notes[i], "LINUX",
1745 regset->core_note_type,
1746 size, data);
1747 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001748 SET_PR_FPVALID(&t->prstatus,
Dave Martin27e64b42017-10-31 15:50:53 +00001749 1, regset0_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001750 fill_note(&t->notes[i], "CORE",
1751 NT_PRFPREG, size, data);
1752 }
1753 *total += notesize(&t->notes[i]);
1754 }
1755 }
1756 }
1757
1758 return 1;
1759}
1760
1761static int fill_note_info(struct elfhdr *elf, int phdrs,
1762 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001763 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001764{
1765 struct task_struct *dump_task = current;
1766 const struct user_regset_view *view = task_user_regset_view(dump_task);
1767 struct elf_thread_core_info *t;
1768 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001769 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001770 unsigned int i;
1771
1772 info->size = 0;
1773 info->thread = NULL;
1774
1775 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001776 if (psinfo == NULL) {
1777 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001778 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001779 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001780
Amerigo Wange2dbe122009-07-01 01:06:26 -04001781 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1782
Roland McGrath4206d3a2008-01-30 13:31:45 +01001783 /*
1784 * Figure out how many notes we're going to need for each thread.
1785 */
1786 info->thread_notes = 0;
1787 for (i = 0; i < view->n; ++i)
1788 if (view->regsets[i].core_note_type != 0)
1789 ++info->thread_notes;
1790
1791 /*
1792 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1793 * since it is our one special case.
1794 */
1795 if (unlikely(info->thread_notes == 0) ||
1796 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1797 WARN_ON(1);
1798 return 0;
1799 }
1800
1801 /*
1802 * Initialize the ELF file header.
1803 */
1804 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001805 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001806
1807 /*
1808 * Allocate a structure for each thread.
1809 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001810 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1811 t = kzalloc(offsetof(struct elf_thread_core_info,
1812 notes[info->thread_notes]),
1813 GFP_KERNEL);
1814 if (unlikely(!t))
1815 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001816
Oleg Nesterov83914442008-07-25 01:47:45 -07001817 t->task = ct->task;
1818 if (ct->task == dump_task || !info->thread) {
1819 t->next = info->thread;
1820 info->thread = t;
1821 } else {
1822 /*
1823 * Make sure to keep the original task at
1824 * the head of the list.
1825 */
1826 t->next = info->thread->next;
1827 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001828 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001829 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001830
1831 /*
1832 * Now fill in each thread's information.
1833 */
1834 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001835 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001836 return 0;
1837
1838 /*
1839 * Fill in the two process-wide notes.
1840 */
1841 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1842 info->size += notesize(&info->psinfo);
1843
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001844 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1845 info->size += notesize(&info->signote);
1846
Roland McGrath4206d3a2008-01-30 13:31:45 +01001847 fill_auxv_note(&info->auxv, current->mm);
1848 info->size += notesize(&info->auxv);
1849
Dan Aloni72023652013-09-30 13:45:02 -07001850 if (fill_files_note(&info->files) == 0)
1851 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001852
Roland McGrath4206d3a2008-01-30 13:31:45 +01001853 return 1;
1854}
1855
1856static size_t get_note_info_size(struct elf_note_info *info)
1857{
1858 return info->size;
1859}
1860
1861/*
1862 * Write all the notes for each thread. When writing the first thread, the
1863 * process-wide notes are interleaved after the first thread-specific note.
1864 */
1865static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001866 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001867{
Fabian Frederickb219e252014-06-04 16:12:14 -07001868 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001869 struct elf_thread_core_info *t = info->thread;
1870
1871 do {
1872 int i;
1873
Al Viroecc8c772013-10-05 15:32:35 -04001874 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001875 return 0;
1876
Al Viroecc8c772013-10-05 15:32:35 -04001877 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001878 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001879 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001880 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001881 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001882 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07001883 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04001884 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001885 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001886
1887 for (i = 1; i < info->thread_notes; ++i)
1888 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04001889 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001890 return 0;
1891
Fabian Frederickb219e252014-06-04 16:12:14 -07001892 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001893 t = t->next;
1894 } while (t);
1895
1896 return 1;
1897}
1898
1899static void free_note_info(struct elf_note_info *info)
1900{
1901 struct elf_thread_core_info *threads = info->thread;
1902 while (threads) {
1903 unsigned int i;
1904 struct elf_thread_core_info *t = threads;
1905 threads = t->next;
1906 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1907 for (i = 1; i < info->thread_notes; ++i)
1908 kfree(t->notes[i].data);
1909 kfree(t);
1910 }
1911 kfree(info->psinfo.data);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001912 kvfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001913}
1914
1915#else
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917/* Here is the structure in which status of each thread is captured. */
1918struct elf_thread_status
1919{
1920 struct list_head list;
1921 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1922 elf_fpregset_t fpu; /* NT_PRFPREG */
1923 struct task_struct *thread;
1924#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001925 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926#endif
1927 struct memelfnote notes[3];
1928 int num_notes;
1929};
1930
1931/*
1932 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001933 * we need to keep a linked list of every threads pr_status and then create
1934 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 */
1936static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1937{
1938 int sz = 0;
1939 struct task_struct *p = t->thread;
1940 t->num_notes = 0;
1941
1942 fill_prstatus(&t->prstatus, p, signr);
1943 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1944
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001945 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1946 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 t->num_notes++;
1948 sz += notesize(&t->notes[0]);
1949
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001950 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1951 &t->fpu))) {
1952 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1953 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 t->num_notes++;
1955 sz += notesize(&t->notes[1]);
1956 }
1957
1958#ifdef ELF_CORE_COPY_XFPREGS
1959 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001960 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1961 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 t->num_notes++;
1963 sz += notesize(&t->notes[2]);
1964 }
1965#endif
1966 return sz;
1967}
1968
Roland McGrath3aba4812008-01-30 13:31:44 +01001969struct elf_note_info {
1970 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07001971 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01001972 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1973 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1974 struct list_head thread_list;
1975 elf_fpregset_t *fpu;
1976#ifdef ELF_CORE_COPY_XFPREGS
1977 elf_fpxregset_t *xfpu;
1978#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001979 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01001980 int thread_status_size;
1981 int numnote;
1982};
1983
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001984static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01001985{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001986 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01001987 INIT_LIST_HEAD(&info->thread_list);
1988
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001989 /* Allocate space for ELF notes */
Kees Cook6da2ec52018-06-12 13:55:00 -07001990 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01001991 if (!info->notes)
1992 return 0;
1993 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1994 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001995 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001996 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1997 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001998 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001999 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2000 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002001 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002002#ifdef ELF_CORE_COPY_XFPREGS
2003 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2004 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002005 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002006#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002007 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002008}
Roland McGrath3aba4812008-01-30 13:31:44 +01002009
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002010static int fill_note_info(struct elfhdr *elf, int phdrs,
2011 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002012 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002013{
Al Viroafabada2013-10-14 07:39:56 -04002014 struct core_thread *ct;
2015 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002016
2017 if (!elf_note_info_init(info))
2018 return 0;
2019
Al Viroafabada2013-10-14 07:39:56 -04002020 for (ct = current->mm->core_state->dumper.next;
2021 ct; ct = ct->next) {
2022 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2023 if (!ets)
2024 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07002025
Al Viroafabada2013-10-14 07:39:56 -04002026 ets->thread = ct->task;
2027 list_add(&ets->list, &info->thread_list);
2028 }
Oleg Nesterov83914442008-07-25 01:47:45 -07002029
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002030 list_for_each_entry(ets, &info->thread_list, list) {
Al Viroafabada2013-10-14 07:39:56 -04002031 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07002032
Al Viroafabada2013-10-14 07:39:56 -04002033 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2034 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01002035 }
2036 /* now collect the dump for the current */
2037 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002038 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01002039 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2040
2041 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08002042 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01002043
2044 /*
2045 * Set up the notes in similar form to SVR4 core dumps made
2046 * with info from their /proc.
2047 */
2048
2049 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2050 sizeof(*info->prstatus), info->prstatus);
2051 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2052 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2053 sizeof(*info->psinfo), info->psinfo);
2054
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002055 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2056 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002057 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002058
Dan Aloni72023652013-09-30 13:45:02 -07002059 if (fill_files_note(info->notes + info->numnote) == 0) {
2060 info->notes_files = info->notes + info->numnote;
2061 info->numnote++;
2062 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002063
2064 /* Try to dump the FPU. */
2065 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2066 info->fpu);
2067 if (info->prstatus->pr_fpvalid)
2068 fill_note(info->notes + info->numnote++,
2069 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2070#ifdef ELF_CORE_COPY_XFPREGS
2071 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2072 fill_note(info->notes + info->numnote++,
2073 "LINUX", ELF_CORE_XFPREG_TYPE,
2074 sizeof(*info->xfpu), info->xfpu);
2075#endif
2076
2077 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002078}
2079
2080static size_t get_note_info_size(struct elf_note_info *info)
2081{
2082 int sz = 0;
2083 int i;
2084
2085 for (i = 0; i < info->numnote; i++)
2086 sz += notesize(info->notes + i);
2087
2088 sz += info->thread_status_size;
2089
2090 return sz;
2091}
2092
2093static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002094 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002095{
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002096 struct elf_thread_status *ets;
Roland McGrath3aba4812008-01-30 13:31:44 +01002097 int i;
Roland McGrath3aba4812008-01-30 13:31:44 +01002098
2099 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002100 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002101 return 0;
2102
2103 /* write out the thread status notes section */
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002104 list_for_each_entry(ets, &info->thread_list, list) {
2105 for (i = 0; i < ets->num_notes; i++)
2106 if (!writenote(&ets->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002107 return 0;
2108 }
2109
2110 return 1;
2111}
2112
2113static void free_note_info(struct elf_note_info *info)
2114{
2115 while (!list_empty(&info->thread_list)) {
2116 struct list_head *tmp = info->thread_list.next;
2117 list_del(tmp);
2118 kfree(list_entry(tmp, struct elf_thread_status, list));
2119 }
2120
Dan Aloni72023652013-09-30 13:45:02 -07002121 /* Free data possibly allocated by fill_files_note(): */
2122 if (info->notes_files)
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002123 kvfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002124
Roland McGrath3aba4812008-01-30 13:31:44 +01002125 kfree(info->prstatus);
2126 kfree(info->psinfo);
2127 kfree(info->notes);
2128 kfree(info->fpu);
2129#ifdef ELF_CORE_COPY_XFPREGS
2130 kfree(info->xfpu);
2131#endif
2132}
2133
Roland McGrath4206d3a2008-01-30 13:31:45 +01002134#endif
2135
Roland McGrathf47aef52007-01-26 00:56:49 -08002136static struct vm_area_struct *first_vma(struct task_struct *tsk,
2137 struct vm_area_struct *gate_vma)
2138{
2139 struct vm_area_struct *ret = tsk->mm->mmap;
2140
2141 if (ret)
2142 return ret;
2143 return gate_vma;
2144}
2145/*
2146 * Helper function for iterating across a vma list. It ensures that the caller
2147 * will visit `gate_vma' prior to terminating the search.
2148 */
2149static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2150 struct vm_area_struct *gate_vma)
2151{
2152 struct vm_area_struct *ret;
2153
2154 ret = this_vma->vm_next;
2155 if (ret)
2156 return ret;
2157 if (this_vma == gate_vma)
2158 return NULL;
2159 return gate_vma;
2160}
2161
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002162static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2163 elf_addr_t e_shoff, int segs)
2164{
2165 elf->e_shoff = e_shoff;
2166 elf->e_shentsize = sizeof(*shdr4extnum);
2167 elf->e_shnum = 1;
2168 elf->e_shstrndx = SHN_UNDEF;
2169
2170 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2171
2172 shdr4extnum->sh_type = SHT_NULL;
2173 shdr4extnum->sh_size = elf->e_shnum;
2174 shdr4extnum->sh_link = elf->e_shstrndx;
2175 shdr4extnum->sh_info = segs;
2176}
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178/*
2179 * Actual dumper
2180 *
2181 * This is a two-pass process; first we find the offsets of the bits,
2182 * and then they are actually written out. If we run out of core limit
2183 * we just truncate.
2184 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002185static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 int has_dumped = 0;
2188 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002189 int segs, i;
2190 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002191 struct vm_area_struct *vma, *gate_vma;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002192 struct elfhdr elf;
Al Virocdc3d562013-10-05 22:24:29 -04002193 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002194 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002195 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002196 struct elf_shdr *shdr4extnum = NULL;
2197 Elf_Half e_phnum;
2198 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002199 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 /*
2202 * We no longer stop all VM operations.
2203 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002204 * This is because those proceses that could possibly change map_count
2205 * or the mmap / vma pages are now blocked in do_exit on current
2206 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 *
2208 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002209 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 * exists while dumping the mm->vm_next areas to the core file.
2211 */
2212
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002213 /*
2214 * The number of segs are recored into ELF header as 16bit value.
2215 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2216 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002218 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Stephen Wilson31db58b2011-03-13 15:49:15 -04002220 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002221 if (gate_vma != NULL)
2222 segs++;
2223
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002224 /* for notes section */
2225 segs++;
2226
2227 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2228 * this, kernel supports extended numbering. Have a look at
2229 * include/linux/elf.h for further information. */
2230 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2231
Roland McGrath3aba4812008-01-30 13:31:44 +01002232 /*
2233 * Collect all the non-memory information about the process for the
2234 * notes. This also sets up the file header.
2235 */
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002236 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002237 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
2239 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 fs = get_fs();
2242 set_fs(KERNEL_DS);
2243
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002244 offset += sizeof(elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002245 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247 /* Write notes phdr entry */
2248 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002249 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Michael Ellermane5501492007-09-19 14:38:12 +10002251 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002252
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002253 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2254 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002255 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002256
2257 fill_elf_note_phdr(phdr4note, sz, offset);
2258 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 }
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2262
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002263 /*
2264 * Zero vma process will get ZERO_SIZE_PTR here.
2265 * Let coredump continue for register state at least.
2266 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002267 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2268 GFP_KERNEL);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002269 if (!vma_filesz)
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002270 goto end_coredump;
2271
2272 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2273 vma = next_vma(vma, gate_vma)) {
2274 unsigned long dump_size;
2275
2276 dump_size = vma_dump_size(vma, cprm->mm_flags);
2277 vma_filesz[i++] = dump_size;
2278 vma_data_size += dump_size;
2279 }
2280
2281 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002282 offset += elf_core_extra_data_size();
2283 e_shoff = offset;
2284
2285 if (e_phnum == PN_XNUM) {
2286 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2287 if (!shdr4extnum)
2288 goto end_coredump;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002289 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002290 }
2291
2292 offset = dataoff;
2293
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002294 if (!dump_emit(cprm, &elf, sizeof(elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002295 goto end_coredump;
2296
Al Viroecc8c772013-10-05 15:32:35 -04002297 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002298 goto end_coredump;
2299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002301 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002302 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
2305 phdr.p_type = PT_LOAD;
2306 phdr.p_offset = offset;
2307 phdr.p_vaddr = vma->vm_start;
2308 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002309 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002310 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 offset += phdr.p_filesz;
2312 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002313 if (vma->vm_flags & VM_WRITE)
2314 phdr.p_flags |= PF_W;
2315 if (vma->vm_flags & VM_EXEC)
2316 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 phdr.p_align = ELF_EXEC_PAGESIZE;
2318
Al Viroecc8c772013-10-05 15:32:35 -04002319 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002320 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 }
2322
Al Viro506f21c2013-10-05 17:22:57 -04002323 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002324 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
2326 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002327 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002328 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Al Virocdc3d562013-10-05 22:24:29 -04002330 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002331 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002332
Andi Kleend025c9d2006-09-30 23:29:28 -07002333 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002334 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002335 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002337 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002338 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002340 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002342 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Roland McGrath82df3972007-10-16 23:27:02 -07002344 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002345 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002346 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002348 page = get_dump_page(addr);
2349 if (page) {
2350 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002351 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002352 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002353 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002354 } else
Al Viro9b56d542013-10-08 09:26:08 -04002355 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002356 if (stop)
2357 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 }
2359 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002360 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
Al Viroaa3e7ea2013-10-05 17:50:15 -04002362 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002363 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002365 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002366 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002367 goto end_coredump;
2368 }
2369
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370end_coredump:
2371 set_fs(fs);
2372
2373cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002374 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002375 kfree(shdr4extnum);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002376 kvfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002377 kfree(phdr4note);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379}
2380
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002381#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
2383static int __init init_elf_binfmt(void)
2384{
Al Viro8fc3dc52012-03-17 03:05:16 -04002385 register_binfmt(&elf_format);
2386 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388
2389static void __exit exit_elf_binfmt(void)
2390{
2391 /* Remove the COFF and ELF loaders. */
2392 unregister_binfmt(&elf_format);
2393}
2394
2395core_initcall(init_elf_binfmt);
2396module_exit(exit_elf_binfmt);
2397MODULE_LICENSE("GPL");