blob: 1fb67e506b68b90ec80d53c69af8381aed933689 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
17#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
19#include <linux/signal.h>
20#include <linux/binfmts.h>
21#include <linux/string.h>
22#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/personality.h>
25#include <linux/elfcore.h>
26#include <linux/init.h>
27#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/compiler.h>
29#include <linux/highmem.h>
30#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070031#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070034#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070035#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070036#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080037#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010038#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010039#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010040#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010041#include <linux/sched/cputime.h>
Dave Martin00e19ce2020-03-16 16:50:44 +000042#include <linux/sizes.h>
43#include <linux/types.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010044#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060045#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080046#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/param.h>
48#include <asm/page.h>
49
Dave Martin00e19ce2020-03-16 16:50:44 +000050#ifndef ELF_COMPAT
51#define ELF_COMPAT 0
52#endif
53
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070054#ifndef user_long_t
55#define user_long_t long
56#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070057#ifndef user_siginfo_t
58#define user_siginfo_t siginfo_t
59#endif
60
Nicolas Pitre47552002017-08-16 16:05:13 -040061/* That's for binfmt_elf_fdpic to deal with */
62#ifndef elf_check_fdpic
63#define elf_check_fdpic(ex) false
64#endif
65
Al Viro71613c32012-10-20 22:00:48 -040066static int load_elf_binary(struct linux_binprm *bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Josh Triplett69369a72014-04-03 14:48:27 -070068#ifdef CONFIG_USELIB
69static int load_elf_library(struct file *);
70#else
71#define load_elf_library NULL
72#endif
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/*
75 * If we don't support core dumping, then supply a NULL so we
76 * don't even try.
77 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080078#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080079static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#else
81#define elf_core_dump NULL
82#endif
83
84#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070085#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070087#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#endif
89
90#ifndef ELF_CORE_EFLAGS
91#define ELF_CORE_EFLAGS 0
92#endif
93
94#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
95#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
96#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
97
98static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080099 .module = THIS_MODULE,
100 .load_binary = load_elf_binary,
101 .load_shlib = load_elf_library,
102 .core_dump = elf_core_dump,
103 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104};
105
Alexey Dobriyan18676ff2020-01-30 22:17:01 -0800106#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800108static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 start = ELF_PAGEALIGN(start);
111 end = ELF_PAGEALIGN(end);
112 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800113 /*
114 * Map the last of the bss segment.
115 * If the header is requesting these pages to be
116 * executable, honour that (ppc32 needs this).
117 */
118 int error = vm_brk_flags(start, end - start,
119 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700120 if (error)
121 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123 current->mm->start_brk = current->mm->brk = end;
124 return 0;
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* We need to explicitly zero any fractional pages
128 after the data section (i.e. bss). This would
129 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700130 be in memory
131 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static int padzero(unsigned long elf_bss)
133{
134 unsigned long nbyte;
135
136 nbyte = ELF_PAGEOFFSET(elf_bss);
137 if (nbyte) {
138 nbyte = ELF_MIN_ALIGN - nbyte;
139 if (clear_user((void __user *) elf_bss, nbyte))
140 return -EFAULT;
141 }
142 return 0;
143}
144
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200145/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#ifdef CONFIG_STACK_GROWSUP
147#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
148#define STACK_ROUND(sp, items) \
149 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700150#define STACK_ALLOC(sp, len) ({ \
151 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
152 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#else
154#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
155#define STACK_ROUND(sp, items) \
156 (((unsigned long) (sp - items)) &~ 15UL)
157#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
158#endif
159
Nathan Lynch483fad12008-07-22 04:48:46 +1000160#ifndef ELF_BASE_PLATFORM
161/*
162 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
163 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
164 * will be copied to the user stack in the same manner as AT_PLATFORM.
165 */
166#define ELF_BASE_PLATFORM NULL
167#endif
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169static int
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800170create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
171 unsigned long load_addr, unsigned long interp_load_addr,
172 unsigned long e_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800174 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 unsigned long p = bprm->p;
176 int argc = bprm->argc;
177 int envc = bprm->envc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 elf_addr_t __user *sp;
179 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000180 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800181 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000183 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800184 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 int items;
186 elf_addr_t *elf_info;
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800187 int ei_index;
David Howells86a264a2008-11-14 10:39:18 +1100188 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700189 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700192 * In some cases (e.g. Hyper-Threading), we want to avoid L1
193 * evictions by the processes running on the same package. One
194 * thing we can do is to shuffle the initial stack for them.
195 */
196
197 p = arch_align_stack(p);
198
199 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * If this architecture has a platform capability string, copy it
201 * to userspace. In some cases (Sparc), this info is impossible
202 * for userspace to get any other way, in others (i386) it is
203 * merely difficult.
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 u_platform = NULL;
206 if (k_platform) {
207 size_t len = strlen(k_platform) + 1;
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
210 if (__copy_to_user(u_platform, k_platform, len))
211 return -EFAULT;
212 }
213
Nathan Lynch483fad12008-07-22 04:48:46 +1000214 /*
215 * If this architecture has a "base" platform capability
216 * string, copy it to userspace.
217 */
218 u_base_platform = NULL;
219 if (k_base_platform) {
220 size_t len = strlen(k_base_platform) + 1;
221
222 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
223 if (__copy_to_user(u_base_platform, k_base_platform, len))
224 return -EFAULT;
225 }
226
Kees Cookf06295b2009-01-07 18:08:52 -0800227 /*
228 * Generate 16 random bytes for userspace PRNG seeding.
229 */
230 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
231 u_rand_bytes = (elf_addr_t __user *)
232 STACK_ALLOC(p, sizeof(k_rand_bytes));
233 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
234 return -EFAULT;
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /* Create the ELF interpreter info */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800237 elf_info = (elf_addr_t *)mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700238 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700240 do { \
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800241 *elf_info++ = id; \
242 *elf_info++ = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700243 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245#ifdef ARCH_DLINFO
246 /*
247 * ARCH_DLINFO must come first so PPC can do its special alignment of
248 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700249 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
250 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 */
252 ARCH_DLINFO;
253#endif
254 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
255 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
256 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
257 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700258 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
260 NEW_AUX_ENT(AT_BASE, interp_load_addr);
261 NEW_AUX_ENT(AT_FLAGS, 0);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800262 NEW_AUX_ENT(AT_ENTRY, e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800263 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
264 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
265 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
266 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Kees Cookc425e182017-07-18 15:25:22 -0700267 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
Kees Cookf06295b2009-01-07 18:08:52 -0800268 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000269#ifdef ELF_HWCAP2
270 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
271#endif
John Reiser65191082008-07-21 14:21:32 -0700272 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700274 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700275 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000277 if (k_base_platform) {
278 NEW_AUX_ENT(AT_BASE_PLATFORM,
279 (elf_addr_t)(unsigned long)u_base_platform);
280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700282 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284#undef NEW_AUX_ENT
285 /* AT_NULL is zero; clear the rest too */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800286 memset(elf_info, 0, (char *)mm->saved_auxv +
287 sizeof(mm->saved_auxv) - (char *)elf_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289 /* And advance past the AT_NULL entry. */
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800290 elf_info += 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800292 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 sp = STACK_ADD(p, ei_index);
294
Andi Kleend20894a2008-02-08 04:21:54 -0800295 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 bprm->p = STACK_ROUND(sp, items);
297
298 /* Point sp at the lowest address on the stack */
299#ifdef CONFIG_STACK_GROWSUP
300 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700301 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#else
303 sp = (elf_addr_t __user *)bprm->p;
304#endif
305
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700306
307 /*
308 * Grow the stack manually; some architectures have a limit on how
309 * far ahead a user-space access may be in order to grow the stack.
310 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800311 vma = find_extend_vma(mm, bprm->p);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700312 if (!vma)
313 return -EFAULT;
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
316 if (__put_user(argc, sp++))
317 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Kees Cook67c67772017-07-10 15:52:54 -0700319 /* Populate list of argv pointers back to argv strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800320 p = mm->arg_end = mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 while (argc-- > 0) {
322 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700323 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800324 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700325 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
326 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800327 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 p += len;
329 }
Kees Cook67c67772017-07-10 15:52:54 -0700330 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800332 mm->arg_end = p;
Kees Cook67c67772017-07-10 15:52:54 -0700333
334 /* Populate list of envp pointers back to envp strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800335 mm->env_end = mm->env_start = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 while (envc-- > 0) {
337 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700338 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800339 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700340 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
341 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800342 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 p += len;
344 }
Kees Cook67c67772017-07-10 15:52:54 -0700345 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800347 mm->env_end = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 /* Put the elf_info on the stack in the right place. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800350 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return -EFAULT;
352 return 0;
353}
354
James Hoganc07380b2011-05-09 10:58:40 +0100355#ifndef elf_map
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357static unsigned long elf_map(struct file *filep, unsigned long addr,
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800358 const struct elf_phdr *eppnt, int prot, int type,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100359 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
361 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100362 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
363 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
364 addr = ELF_PAGESTART(addr);
365 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700366
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700367 /* mmap() will return -EINVAL if given a zero size, but a
368 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100369 if (!size)
370 return addr;
371
Jiri Kosinacc503c12008-01-30 13:31:07 +0100372 /*
373 * total_size is the size of the ELF (interpreter) image.
374 * The _first_ mmap needs to know the full size, otherwise
375 * randomization might put this image into an overlapping
376 * position with the ELF binary image. (since size < total_size)
377 * So we first map the 'big' image - and unmap the remainder at
378 * the end. (which unmap is needed for ELF images with holes.)
379 */
380 if (total_size) {
381 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400382 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100383 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400384 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100385 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400386 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100387
Tetsuo Handad23a61e2018-04-20 14:56:13 -0700388 if ((type & MAP_FIXED_NOREPLACE) &&
389 PTR_ERR((void *)map_addr) == -EEXIST)
390 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
391 task_pid_nr(current), current->comm, (void *)addr);
Michal Hocko4ed28632018-04-10 16:36:01 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return(map_addr);
394}
395
James Hoganc07380b2011-05-09 10:58:40 +0100396#endif /* !elf_map */
397
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800398static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
Jiri Kosinacc503c12008-01-30 13:31:07 +0100399{
400 int i, first_idx = -1, last_idx = -1;
401
402 for (i = 0; i < nr; i++) {
403 if (cmds[i].p_type == PT_LOAD) {
404 last_idx = i;
405 if (first_idx == -1)
406 first_idx = i;
407 }
408 }
409 if (first_idx == -1)
410 return 0;
411
412 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
413 ELF_PAGESTART(cmds[first_idx].p_vaddr);
414}
415
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800416static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
417{
418 ssize_t rv;
419
420 rv = kernel_read(file, buf, len, &pos);
421 if (unlikely(rv != len)) {
422 return (rv < 0) ? rv : -EIO;
423 }
424 return 0;
425}
426
Paul Burton6a8d3892014-09-11 08:30:14 +0100427/**
428 * load_elf_phdrs() - load ELF program headers
429 * @elf_ex: ELF header of the binary whose program headers should be loaded
430 * @elf_file: the opened ELF binary file
431 *
432 * Loads ELF program headers from the binary file elf_file, which has the ELF
433 * header pointed to by elf_ex, into a newly allocated array. The caller is
434 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
435 */
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800436static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
Paul Burton6a8d3892014-09-11 08:30:14 +0100437 struct file *elf_file)
438{
439 struct elf_phdr *elf_phdata = NULL;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800440 int retval, err = -1;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800441 unsigned int size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100442
443 /*
444 * If the size of this structure has changed, then punt, since
445 * we will be doing the wrong thing.
446 */
447 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
448 goto out;
449
450 /* Sanity check the number of program headers... */
Paul Burton6a8d3892014-09-11 08:30:14 +0100451 /* ...and their total size. */
452 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800453 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
Paul Burton6a8d3892014-09-11 08:30:14 +0100454 goto out;
455
456 elf_phdata = kmalloc(size, GFP_KERNEL);
457 if (!elf_phdata)
458 goto out;
459
460 /* Read in the program headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800461 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
462 if (retval < 0) {
463 err = retval;
Paul Burton6a8d3892014-09-11 08:30:14 +0100464 goto out;
465 }
466
467 /* Success! */
468 err = 0;
469out:
470 if (err) {
471 kfree(elf_phdata);
472 elf_phdata = NULL;
473 }
474 return elf_phdata;
475}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100476
Paul Burton774c1052014-09-11 08:30:16 +0100477#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
478
479/**
480 * struct arch_elf_state - arch-specific ELF loading state
481 *
482 * This structure is used to preserve architecture specific data during
483 * the loading of an ELF file, throughout the checking of architecture
484 * specific ELF headers & through to the point where the ELF load is
485 * known to be proceeding (ie. SET_PERSONALITY).
486 *
487 * This implementation is a dummy for architectures which require no
488 * specific state.
489 */
490struct arch_elf_state {
491};
492
493#define INIT_ARCH_ELF_STATE {}
494
495/**
496 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
497 * @ehdr: The main ELF header
498 * @phdr: The program header to check
499 * @elf: The open ELF file
500 * @is_interp: True if the phdr is from the interpreter of the ELF being
501 * loaded, else false.
502 * @state: Architecture-specific state preserved throughout the process
503 * of loading the ELF.
504 *
505 * Inspects the program header phdr to validate its correctness and/or
506 * suitability for the system. Called once per ELF program header in the
507 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
508 * interpreter.
509 *
510 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
511 * with that return code.
512 */
513static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
514 struct elf_phdr *phdr,
515 struct file *elf, bool is_interp,
516 struct arch_elf_state *state)
517{
518 /* Dummy implementation, always proceed */
519 return 0;
520}
521
522/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000523 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100524 * @ehdr: The main ELF header
525 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000526 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100527 * @state: Architecture-specific state preserved throughout the process
528 * of loading the ELF.
529 *
530 * Provides a final opportunity for architecture code to reject the loading
531 * of the ELF & cause an exec syscall to return an error. This is called after
532 * all program headers to be checked by arch_elf_pt_proc have been.
533 *
534 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
535 * with that return code.
536 */
537static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000538 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100539 struct arch_elf_state *state)
540{
541 /* Dummy implementation, always proceed */
542 return 0;
543}
544
545#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700547static inline int make_prot(u32 p_flags)
548{
549 int prot = 0;
550
551 if (p_flags & PF_R)
552 prot |= PROT_READ;
553 if (p_flags & PF_W)
554 prot |= PROT_WRITE;
555 if (p_flags & PF_X)
556 prot |= PROT_EXEC;
557 return prot;
558}
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560/* This is much more generalized than the library routine read function,
561 so we keep this separate. Technically the library read function
562 is only provided so that we can read a.out libraries that have
563 an ELF header */
564
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700565static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Alexey Dobriyan81696d52019-12-04 16:52:22 -0800566 struct file *interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +0100567 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 struct elf_phdr *eppnt;
570 unsigned long load_addr = 0;
571 int load_addr_set = 0;
572 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800573 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100575 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100576 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* First of all, some simple consistency checks */
579 if (interp_elf_ex->e_type != ET_EXEC &&
580 interp_elf_ex->e_type != ET_DYN)
581 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -0400582 if (!elf_check_arch(interp_elf_ex) ||
583 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400585 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 goto out;
587
Paul Burtona9d9ef12014-09-11 08:30:15 +0100588 total_size = total_mapping_size(interp_elf_phdata,
589 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100590 if (!total_size) {
591 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100592 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100593 }
594
Paul Burtona9d9ef12014-09-11 08:30:15 +0100595 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700596 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
597 if (eppnt->p_type == PT_LOAD) {
598 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700599 int elf_prot = make_prot(eppnt->p_flags);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700600 unsigned long vaddr = 0;
601 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700603 vaddr = eppnt->p_vaddr;
604 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
Michal Hocko4ed28632018-04-10 16:36:01 -0700605 elf_type |= MAP_FIXED_NOREPLACE;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100606 else if (no_base && interp_elf_ex->e_type == ET_DYN)
607 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700609 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100610 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100611 total_size = 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700612 error = map_addr;
613 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100614 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700616 if (!load_addr_set &&
617 interp_elf_ex->e_type == ET_DYN) {
618 load_addr = map_addr - ELF_PAGESTART(vaddr);
619 load_addr_set = 1;
620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700622 /*
623 * Check to see if the section's size will overflow the
624 * allowed task size. Note that p_filesz must always be
625 * <= p_memsize so it's only necessary to check p_memsz.
626 */
627 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700628 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700629 eppnt->p_filesz > eppnt->p_memsz ||
630 eppnt->p_memsz > TASK_SIZE ||
631 TASK_SIZE - eppnt->p_memsz < k) {
632 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100633 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700634 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700636 /*
637 * Find the end of the file mapping for this phdr, and
638 * keep track of the largest address we see for this.
639 */
640 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
641 if (k > elf_bss)
642 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700644 /*
645 * Do the same thing for the memory mapping - between
646 * elf_bss and last_bss is the bss section.
647 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700648 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800649 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700650 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800651 bss_prot = elf_prot;
652 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 }
655
Kees Cook0036d1f2016-08-02 14:04:51 -0700656 /*
657 * Now fill out the bss section: first pad the last page from
658 * the file up to the page boundary, and zero it from elf_bss
659 * up to the end of the page.
660 */
661 if (padzero(elf_bss)) {
662 error = -EFAULT;
663 goto out;
664 }
665 /*
666 * Next, align both the file and mem bss up to the page size,
667 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800668 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700669 */
670 elf_bss = ELF_PAGEALIGN(elf_bss);
671 last_bss = ELF_PAGEALIGN(last_bss);
672 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800674 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
675 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700676 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100677 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
Jiri Kosinacc503c12008-01-30 13:31:07 +0100680 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681out:
682 return error;
683}
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685/*
686 * These are the functions used to load ELF style executables and shared
687 * libraries. There is no binary dependent code anywhere else.
688 */
689
Dave Martin00e19ce2020-03-16 16:50:44 +0000690static int parse_elf_property(const char *data, size_t *off, size_t datasz,
691 struct arch_elf_state *arch,
692 bool have_prev_type, u32 *prev_type)
693{
694 size_t o, step;
695 const struct gnu_property *pr;
696 int ret;
697
698 if (*off == datasz)
699 return -ENOENT;
700
701 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
702 return -EIO;
703 o = *off;
704 datasz -= *off;
705
706 if (datasz < sizeof(*pr))
707 return -ENOEXEC;
708 pr = (const struct gnu_property *)(data + o);
709 o += sizeof(*pr);
710 datasz -= sizeof(*pr);
711
712 if (pr->pr_datasz > datasz)
713 return -ENOEXEC;
714
715 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
716 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
717 if (step > datasz)
718 return -ENOEXEC;
719
720 /* Properties are supposed to be unique and sorted on pr_type: */
721 if (have_prev_type && pr->pr_type <= *prev_type)
722 return -ENOEXEC;
723 *prev_type = pr->pr_type;
724
725 ret = arch_parse_elf_property(pr->pr_type, data + o,
726 pr->pr_datasz, ELF_COMPAT, arch);
727 if (ret)
728 return ret;
729
730 *off = o + step;
731 return 0;
732}
733
734#define NOTE_DATA_SZ SZ_1K
735#define GNU_PROPERTY_TYPE_0_NAME "GNU"
736#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
737
738static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
739 struct arch_elf_state *arch)
740{
741 union {
742 struct elf_note nhdr;
743 char data[NOTE_DATA_SZ];
744 } note;
745 loff_t pos;
746 ssize_t n;
747 size_t off, datasz;
748 int ret;
749 bool have_prev_type;
750 u32 prev_type;
751
752 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
753 return 0;
754
755 /* load_elf_binary() shouldn't call us unless this is true... */
756 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
757 return -ENOEXEC;
758
759 /* If the properties are crazy large, that's too bad (for now): */
760 if (phdr->p_filesz > sizeof(note))
761 return -ENOEXEC;
762
763 pos = phdr->p_offset;
764 n = kernel_read(f, &note, phdr->p_filesz, &pos);
765
766 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
767 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
768 return -EIO;
769
770 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
771 note.nhdr.n_namesz != NOTE_NAME_SZ ||
772 strncmp(note.data + sizeof(note.nhdr),
773 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
774 return -ENOEXEC;
775
776 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
777 ELF_GNU_PROPERTY_ALIGN);
778 if (off > n)
779 return -ENOEXEC;
780
781 if (note.nhdr.n_descsz > n - off)
782 return -ENOEXEC;
783 datasz = off + note.nhdr.n_descsz;
784
785 have_prev_type = false;
786 do {
787 ret = parse_elf_property(note.data, &off, datasz, arch,
788 have_prev_type, &prev_type);
789 have_prev_type = true;
790 } while (!ret);
791
792 return ret == -ENOENT ? 0 : ret;
793}
794
Al Viro71613c32012-10-20 22:00:48 -0400795static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
797 struct file *interpreter = NULL; /* to shut gcc up */
798 unsigned long load_addr = 0, load_bias = 0;
799 int load_addr_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100801 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Dave Martin00e19ce2020-03-16 16:50:44 +0000802 struct elf_phdr *elf_property_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800804 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100806 unsigned long elf_entry;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800807 unsigned long e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100808 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700810 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800811 int executable_stack = EXSTACK_DEFAULT;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800812 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 struct elfhdr interp_elf_ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 } *loc;
Paul Burton774c1052014-09-11 08:30:16 +0100816 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800817 struct mm_struct *mm;
Alexey Dobriyan249b08e2019-05-14 15:43:54 -0700818 struct pt_regs *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
821 if (!loc) {
822 retval = -ENOMEM;
823 goto out_ret;
824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 retval = -ENOEXEC;
827 /* First of all, some simple consistency checks */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800828 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 goto out;
830
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800831 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800833 if (!elf_check_arch(elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800835 if (elf_check_fdpic(elf_ex))
Nicolas Pitre47552002017-08-16 16:05:13 -0400836 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400837 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 goto out;
839
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800840 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 if (!elf_phdata)
842 goto out;
843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800845 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700846 char *elf_interpreter;
Alexey Dobriyan5cf4a362019-05-14 15:43:36 -0700847
Dave Martin00e19ce2020-03-16 16:50:44 +0000848 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
849 elf_property_phdata = elf_ppnt;
850 continue;
851 }
852
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700853 if (elf_ppnt->p_type != PT_INTERP)
854 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700856 /*
857 * This is the program interpreter used for shared libraries -
858 * for now assume that this is an a.out format binary.
859 */
860 retval = -ENOEXEC;
861 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
862 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700864 retval = -ENOMEM;
865 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
866 if (!elf_interpreter)
867 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800869 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
870 elf_ppnt->p_offset);
871 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700872 goto out_free_interp;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700873 /* make sure path is NULL terminated */
874 retval = -ENOEXEC;
875 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
876 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800877
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700878 interpreter = open_exec(elf_interpreter);
879 kfree(elf_interpreter);
880 retval = PTR_ERR(interpreter);
881 if (IS_ERR(interpreter))
882 goto out_free_ph;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800883
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700884 /*
885 * If the binary is not readable then enforce mm->dumpable = 0
886 * regardless of the interpreter's permissions.
887 */
888 would_dump(bprm, interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700890 /* Get the exec headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800891 retval = elf_read(interpreter, &loc->interp_elf_ex,
892 sizeof(loc->interp_elf_ex), 0);
893 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700894 goto out_free_dentry;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700895
896 break;
Alexey Dobriyancc338012019-05-14 15:43:39 -0700897
898out_free_interp:
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700899 kfree(elf_interpreter);
900 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 }
902
903 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800904 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100905 switch (elf_ppnt->p_type) {
906 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (elf_ppnt->p_flags & PF_X)
908 executable_stack = EXSTACK_ENABLE_X;
909 else
910 executable_stack = EXSTACK_DISABLE_X;
911 break;
Paul Burton774c1052014-09-11 08:30:16 +0100912
913 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800914 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
Paul Burton774c1052014-09-11 08:30:16 +0100915 bprm->file, false,
916 &arch_state);
917 if (retval)
918 goto out_free_dentry;
919 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 /* Some simple consistency checks for the interpreter */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700923 if (interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800925 /* Not an ELF interpreter */
926 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 /* Verify the interpreter has a valid arch */
Nicolas Pitre47552002017-08-16 16:05:13 -0400929 if (!elf_check_arch(&loc->interp_elf_ex) ||
930 elf_check_fdpic(&loc->interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100932
933 /* Load the interpreter program headers */
934 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
935 interpreter);
936 if (!interp_elf_phdata)
937 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100938
939 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
Dave Martin00e19ce2020-03-16 16:50:44 +0000940 elf_property_phdata = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100941 elf_ppnt = interp_elf_phdata;
942 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
943 switch (elf_ppnt->p_type) {
Dave Martin00e19ce2020-03-16 16:50:44 +0000944 case PT_GNU_PROPERTY:
945 elf_property_phdata = elf_ppnt;
946 break;
947
Paul Burton774c1052014-09-11 08:30:16 +0100948 case PT_LOPROC ... PT_HIPROC:
949 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
950 elf_ppnt, interpreter,
951 true, &arch_state);
952 if (retval)
953 goto out_free_dentry;
954 break;
955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 }
957
Dave Martin00e19ce2020-03-16 16:50:44 +0000958 retval = parse_elf_properties(interpreter ?: bprm->file,
959 elf_property_phdata, &arch_state);
960 if (retval)
961 goto out_free_dentry;
962
Paul Burton774c1052014-09-11 08:30:16 +0100963 /*
964 * Allow arch code to reject the ELF at this point, whilst it's
965 * still possible to return an error to the code that invoked
966 * the exec syscall.
967 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800968 retval = arch_check_elf(elf_ex,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000969 !!interpreter, &loc->interp_elf_ex,
970 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100971 if (retval)
972 goto out_free_dentry;
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 /* Flush all traces of the currently running executable */
975 retval = flush_old_exec(bprm);
976 if (retval)
977 goto out_free_dentry;
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
980 may depend on the personality. */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800981 SET_PERSONALITY2(*elf_ex, &arch_state);
982 if (elf_read_implies_exec(*elf_ex, executable_stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 current->personality |= READ_IMPLIES_EXEC;
984
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700985 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800987
988 setup_new_exec(bprm);
Linus Torvalds9f834ec2016-08-22 16:41:46 -0700989 install_exec_creds(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 /* Do this so that we can load the interpreter, if need be. We will
992 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
994 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400995 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Alexey Dobriyan852643162019-05-14 15:43:48 -0700998 elf_bss = 0;
999 elf_brk = 0;
1000
1001 start_code = ~0UL;
1002 end_code = 0;
1003 start_data = 0;
1004 end_data = 0;
1005
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001006 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +01001007 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001008 for(i = 0, elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001009 i < elf_ex->e_phnum; i++, elf_ppnt++) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001010 int elf_prot, elf_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -07001012 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 if (elf_ppnt->p_type != PT_LOAD)
1015 continue;
1016
1017 if (unlikely (elf_brk > elf_bss)) {
1018 unsigned long nbyte;
1019
1020 /* There was a PT_LOAD segment with p_memsz > p_filesz
1021 before this one. Map anonymous pages, if needed,
1022 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -08001023 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001024 elf_brk + load_bias,
1025 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001026 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 nbyte = ELF_PAGEOFFSET(elf_bss);
1029 if (nbyte) {
1030 nbyte = ELF_MIN_ALIGN - nbyte;
1031 if (nbyte > elf_brk - elf_bss)
1032 nbyte = elf_brk - elf_bss;
1033 if (clear_user((void __user *)elf_bss +
1034 load_bias, nbyte)) {
1035 /*
1036 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001037 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 * we don't check the return value
1039 */
1040 }
1041 }
1042 }
1043
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -07001044 elf_prot = make_prot(elf_ppnt->p_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001046 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
1048 vaddr = elf_ppnt->p_vaddr;
Kees Cookeab09532017-07-10 15:52:37 -07001049 /*
1050 * If we are loading ET_EXEC or we have already performed
1051 * the ET_DYN load_addr calculations, proceed normally.
1052 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001053 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001054 elf_flags |= MAP_FIXED;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001055 } else if (elf_ex->e_type == ET_DYN) {
Kees Cookeab09532017-07-10 15:52:37 -07001056 /*
1057 * This logic is run once for the first LOAD Program
1058 * Header for ET_DYN binaries to calculate the
1059 * randomization (load_bias) for all the LOAD
1060 * Program Headers, and to calculate the entire
1061 * size of the ELF mapping (total_size). (Note that
1062 * load_addr_set is set to true later once the
1063 * initial mapping is performed.)
1064 *
1065 * There are effectively two types of ET_DYN
1066 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1067 * and loaders (ET_DYN without INTERP, since they
1068 * _are_ the ELF interpreter). The loaders must
1069 * be loaded away from programs since the program
1070 * may otherwise collide with the loader (especially
1071 * for ET_EXEC which does not have a randomized
1072 * position). For example to handle invocations of
1073 * "./ld.so someprog" to test out a new version of
1074 * the loader, the subsequent program that the
1075 * loader loads must avoid the loader itself, so
1076 * they cannot share the same load range. Sufficient
1077 * room for the brk must be allocated with the
1078 * loader as well, since brk must be available with
1079 * the loader.
1080 *
1081 * Therefore, programs are loaded offset from
1082 * ELF_ET_DYN_BASE and loaders are loaded into the
1083 * independently randomized mmap region (0 load_bias
1084 * without MAP_FIXED).
1085 */
Alexey Dobriyancc338012019-05-14 15:43:39 -07001086 if (interpreter) {
Kees Cookeab09532017-07-10 15:52:37 -07001087 load_bias = ELF_ET_DYN_BASE;
1088 if (current->flags & PF_RANDOMIZE)
1089 load_bias += arch_mmap_rnd();
Linus Torvaldsb2129212019-10-06 13:53:27 -07001090 elf_flags |= MAP_FIXED;
Kees Cookeab09532017-07-10 15:52:37 -07001091 } else
1092 load_bias = 0;
1093
1094 /*
1095 * Since load_bias is used for all subsequent loading
1096 * calculations, we must lower it by the first vaddr
1097 * so that the remaining calculations based on the
1098 * ELF vaddrs will be correctly offset. The result
1099 * is then page aligned.
1100 */
1101 load_bias = ELF_PAGESTART(load_bias - vaddr);
1102
Michael Davidsona87938b2015-04-14 15:47:38 -07001103 total_size = total_mapping_size(elf_phdata,
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001104 elf_ex->e_phnum);
Michael Davidsona87938b2015-04-14 15:47:38 -07001105 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -07001106 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -07001107 goto out_free_dentry;
1108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 }
1110
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001111 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -07001112 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001114 retval = IS_ERR((void *)error) ?
1115 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 goto out_free_dentry;
1117 }
1118
1119 if (!load_addr_set) {
1120 load_addr_set = 1;
1121 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001122 if (elf_ex->e_type == ET_DYN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 load_bias += error -
1124 ELF_PAGESTART(load_bias + vaddr);
1125 load_addr += load_bias;
1126 reloc_func_desc = load_bias;
1127 }
1128 }
1129 k = elf_ppnt->p_vaddr;
Alexey Dobriyanf67ef442020-01-30 22:16:52 -08001130 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001131 start_code = k;
1132 if (start_data < k)
1133 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 /*
1136 * Check to see if the section's size will overflow the
1137 * allowed task size. Note that p_filesz must always be
1138 * <= p_memsz so it is only necessary to check p_memsz.
1139 */
Chuck Ebbertce510592006-07-03 00:24:14 -07001140 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 elf_ppnt->p_memsz > TASK_SIZE ||
1142 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001143 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001144 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 goto out_free_dentry;
1146 }
1147
1148 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1149
1150 if (k > elf_bss)
1151 elf_bss = k;
1152 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1153 end_code = k;
1154 if (end_data < k)
1155 end_data = k;
1156 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001157 if (k > elf_brk) {
1158 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 }
1162
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001163 e_entry = elf_ex->e_entry + load_bias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 elf_bss += load_bias;
1165 elf_brk += load_bias;
1166 start_code += load_bias;
1167 end_code += load_bias;
1168 start_data += load_bias;
1169 end_data += load_bias;
1170
1171 /* Calling set_brk effectively mmaps the pages that we need
1172 * for the bss and break sections. We must do this before
1173 * mapping in the interpreter, to make sure it doesn't wind
1174 * up getting placed where the bss needs to go.
1175 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001176 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001177 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001179 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 retval = -EFAULT; /* Nobody gets to see this, but.. */
1181 goto out_free_dentry;
1182 }
1183
Alexey Dobriyancc338012019-05-14 15:43:39 -07001184 if (interpreter) {
Andi Kleend20894a2008-02-08 04:21:54 -08001185 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1186 interpreter,
Paul Burtona9d9ef12014-09-11 08:30:15 +01001187 load_bias, interp_elf_phdata);
Andi Kleend20894a2008-02-08 04:21:54 -08001188 if (!IS_ERR((void *)elf_entry)) {
1189 /*
1190 * load_elf_interp() returns relocation
1191 * adjustment
1192 */
1193 interp_load_addr = elf_entry;
1194 elf_entry += loc->interp_elf_ex.e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001197 retval = IS_ERR((void *)elf_entry) ?
1198 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 goto out_free_dentry;
1200 }
1201 reloc_func_desc = interp_load_addr;
1202
1203 allow_write_access(interpreter);
1204 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 } else {
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001206 elf_entry = e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001207 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001208 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001209 goto out_free_dentry;
1210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 }
1212
Paul Burton774c1052014-09-11 08:30:16 +01001213 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 kfree(elf_phdata);
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 set_binfmt(&elf_format);
1217
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001218#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Alexey Dobriyancc338012019-05-14 15:43:39 -07001219 retval = arch_setup_additional_pages(bprm, !!interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001220 if (retval < 0)
Roland McGrath18c8baff2005-04-28 15:17:19 -07001221 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001222#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1223
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001224 retval = create_elf_tables(bprm, elf_ex,
1225 load_addr, interp_load_addr, e_entry);
Al Viro19d860a2014-05-04 20:11:36 -04001226 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001227 goto out;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001228
1229 mm = current->mm;
1230 mm->end_code = end_code;
1231 mm->start_code = start_code;
1232 mm->start_data = start_data;
1233 mm->end_data = end_data;
1234 mm->start_stack = bprm->p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Jiri Kosina4471a672011-04-14 15:22:09 -07001236 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Kees Cookbbdc6072019-05-14 15:43:57 -07001237 /*
1238 * For architectures with ELF randomization, when executing
1239 * a loader directly (i.e. no interpreter listed in ELF
1240 * headers), move the brk area out of the mmap region
1241 * (since it grows up, and may collide early with the stack
1242 * growing down), and into the unused ELF_ET_DYN_BASE region.
1243 */
Kees Cook7be3cb02019-09-26 10:15:25 -07001244 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001245 elf_ex->e_type == ET_DYN && !interpreter) {
1246 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1247 }
Kees Cookbbdc6072019-05-14 15:43:57 -07001248
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001249 mm->brk = mm->start_brk = arch_randomize_brk(mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001250#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001251 current->brk_randomized = 1;
1252#endif
1253 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 if (current->personality & MMAP_PAGE_ZERO) {
1256 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1257 and some applications "depend" upon this behavior.
1258 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001259 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001260 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 }
1263
Alexey Dobriyan249b08e2019-05-14 15:43:54 -07001264 regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265#ifdef ELF_PLAT_INIT
1266 /*
1267 * The ABI may specify that certain registers be set up in special
1268 * ways (on i386 %edx is the address of a DT_FINI function, for
1269 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1270 * that the e_entry field is the address of the function descriptor
1271 * for the startup routine, rather than the address of the startup
1272 * routine itself. This macro performs whatever initialization to
1273 * the regs structure is required as well as any relocations to the
1274 * function descriptor entries when executing dynamically links apps.
1275 */
1276 ELF_PLAT_INIT(regs, reloc_func_desc);
1277#endif
1278
Kees Cookb8383832018-04-10 16:34:57 -07001279 finalize_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 retval = 0;
1282out:
1283 kfree(loc);
1284out_ret:
1285 return retval;
1286
1287 /* error cleanup */
1288out_free_dentry:
Paul Burtona9d9ef12014-09-11 08:30:15 +01001289 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 allow_write_access(interpreter);
1291 if (interpreter)
1292 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293out_free_ph:
1294 kfree(elf_phdata);
1295 goto out;
1296}
1297
Josh Triplett69369a72014-04-03 14:48:27 -07001298#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299/* This is really simpleminded and specialized - we are loading an
1300 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301static int load_elf_library(struct file *file)
1302{
1303 struct elf_phdr *elf_phdata;
1304 struct elf_phdr *eppnt;
1305 unsigned long elf_bss, bss, len;
1306 int retval, error, i, j;
1307 struct elfhdr elf_ex;
1308
1309 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001310 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1311 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 goto out;
1313
1314 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1315 goto out;
1316
1317 /* First of all, some simple consistency checks */
1318 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001319 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -04001321 if (elf_check_fdpic(&elf_ex))
1322 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
1324 /* Now read in all of the header information */
1325
1326 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1327 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1328
1329 error = -ENOMEM;
1330 elf_phdata = kmalloc(j, GFP_KERNEL);
1331 if (!elf_phdata)
1332 goto out;
1333
1334 eppnt = elf_phdata;
1335 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001336 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1337 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 goto out_free_ph;
1339
1340 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1341 if ((eppnt + i)->p_type == PT_LOAD)
1342 j++;
1343 if (j != 1)
1344 goto out_free_ph;
1345
1346 while (eppnt->p_type != PT_LOAD)
1347 eppnt++;
1348
1349 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001350 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 ELF_PAGESTART(eppnt->p_vaddr),
1352 (eppnt->p_filesz +
1353 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1354 PROT_READ | PROT_WRITE | PROT_EXEC,
Michal Hocko4ed28632018-04-10 16:36:01 -07001355 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 (eppnt->p_offset -
1357 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1359 goto out_free_ph;
1360
1361 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1362 if (padzero(elf_bss)) {
1363 error = -EFAULT;
1364 goto out_free_ph;
1365 }
1366
Oscar Salvador24962af2018-07-13 16:59:13 -07001367 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1368 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
Michal Hockoecc2bc82016-05-23 16:25:39 -07001369 if (bss > len) {
1370 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001371 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001372 goto out_free_ph;
1373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 error = 0;
1375
1376out_free_ph:
1377 kfree(elf_phdata);
1378out:
1379 return error;
1380}
Josh Triplett69369a72014-04-03 14:48:27 -07001381#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001383#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384/*
1385 * ELF core dumper
1386 *
1387 * Modelled on fs/exec.c:aout_core_dump()
1388 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1389 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
1391/*
Jason Baron909af762012-03-23 15:02:51 -07001392 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1393 * that are useful for post-mortem analysis are included in every core dump.
1394 * In that way we ensure that the core dump is fully interpretable later
1395 * without matching up the same kernel and hardware config to see what PC values
1396 * meant. These special mappings include - vDSO, vsyscall, and other
1397 * architecture specific mappings
1398 */
1399static bool always_dump_vma(struct vm_area_struct *vma)
1400{
1401 /* Any vsyscall mappings? */
1402 if (vma == get_gate_vma(vma->vm_mm))
1403 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001404
1405 /*
1406 * Assume that all vmas with a .name op should always be dumped.
1407 * If this changes, a new vm_ops field can easily be added.
1408 */
1409 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1410 return true;
1411
Jason Baron909af762012-03-23 15:02:51 -07001412 /*
1413 * arch_vma_name() returns non-NULL for special architecture mappings,
1414 * such as vDSO sections.
1415 */
1416 if (arch_vma_name(vma))
1417 return true;
1418
1419 return false;
1420}
1421
1422/*
Roland McGrath82df3972007-10-16 23:27:02 -07001423 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 */
Roland McGrath82df3972007-10-16 23:27:02 -07001425static unsigned long vma_dump_size(struct vm_area_struct *vma,
1426 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001428#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1429
Jason Baron909af762012-03-23 15:02:51 -07001430 /* always dump the vdso and vsyscall sections */
1431 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001432 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001433
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001434 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001435 return 0;
1436
Ross Zwisler50378352015-10-05 16:33:36 -06001437 /* support for DAX */
1438 if (vma_is_dax(vma)) {
1439 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1440 goto whole;
1441 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1442 goto whole;
1443 return 0;
1444 }
1445
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001446 /* Hugetlb memory check */
1447 if (vma->vm_flags & VM_HUGETLB) {
1448 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1449 goto whole;
1450 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1451 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001452 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001453 }
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001456 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return 0;
1458
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001459 /* By default, dump shared memory if mapped from an anonymous file. */
1460 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001461 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001462 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1463 goto whole;
1464 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
Roland McGrath82df3972007-10-16 23:27:02 -07001467 /* Dump segments that have been written to. */
1468 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1469 goto whole;
1470 if (vma->vm_file == NULL)
1471 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Roland McGrath82df3972007-10-16 23:27:02 -07001473 if (FILTER(MAPPED_PRIVATE))
1474 goto whole;
1475
1476 /*
1477 * If this looks like the beginning of a DSO or executable mapping,
1478 * check for an ELF header. If we find one, dump the first page to
1479 * aid in determining what was mapped here.
1480 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001481 if (FILTER(ELF_HEADERS) &&
1482 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001483 u32 __user *header = (u32 __user *) vma->vm_start;
1484 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001485 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001486 /*
1487 * Doing it this way gets the constant folded by GCC.
1488 */
1489 union {
1490 u32 cmp;
1491 char elfmag[SELFMAG];
1492 } magic;
1493 BUILD_BUG_ON(SELFMAG != sizeof word);
1494 magic.elfmag[EI_MAG0] = ELFMAG0;
1495 magic.elfmag[EI_MAG1] = ELFMAG1;
1496 magic.elfmag[EI_MAG2] = ELFMAG2;
1497 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001498 /*
1499 * Switch to the user "segment" for get_user(),
1500 * then put back what elf_core_dump() had in place.
1501 */
1502 set_fs(USER_DS);
1503 if (unlikely(get_user(word, header)))
1504 word = 0;
1505 set_fs(fs);
1506 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001507 return PAGE_SIZE;
1508 }
1509
1510#undef FILTER
1511
1512 return 0;
1513
1514whole:
1515 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518/* An ELF note in memory */
1519struct memelfnote
1520{
1521 const char *name;
1522 int type;
1523 unsigned int datasz;
1524 void *data;
1525};
1526
1527static int notesize(struct memelfnote *en)
1528{
1529 int sz;
1530
1531 sz = sizeof(struct elf_note);
1532 sz += roundup(strlen(en->name) + 1, 4);
1533 sz += roundup(en->datasz, 4);
1534
1535 return sz;
1536}
1537
Al Viroecc8c772013-10-05 15:32:35 -04001538static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 en.n_namesz = strlen(men->name) + 1;
1542 en.n_descsz = men->datasz;
1543 en.n_type = men->type;
1544
Al Viroecc8c772013-10-05 15:32:35 -04001545 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001546 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1547 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Roland McGrath3aba4812008-01-30 13:31:44 +01001550static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001551 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001553 memset(elf, 0, sizeof(*elf));
1554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1556 elf->e_ident[EI_CLASS] = ELF_CLASS;
1557 elf->e_ident[EI_DATA] = ELF_DATA;
1558 elf->e_ident[EI_VERSION] = EV_CURRENT;
1559 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001562 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001565 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 elf->e_ehsize = sizeof(struct elfhdr);
1567 elf->e_phentsize = sizeof(struct elf_phdr);
1568 elf->e_phnum = segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569}
1570
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001571static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572{
1573 phdr->p_type = PT_NOTE;
1574 phdr->p_offset = offset;
1575 phdr->p_vaddr = 0;
1576 phdr->p_paddr = 0;
1577 phdr->p_filesz = sz;
1578 phdr->p_memsz = 0;
1579 phdr->p_flags = 0;
1580 phdr->p_align = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
1582
1583static void fill_note(struct memelfnote *note, const char *name, int type,
1584 unsigned int sz, void *data)
1585{
1586 note->name = name;
1587 note->type = type;
1588 note->datasz = sz;
1589 note->data = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
1591
1592/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001593 * fill up all the fields in prstatus from the given task struct, except
1594 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 */
1596static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001597 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598{
1599 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1600 prstatus->pr_sigpend = p->pending.signal.sig[0];
1601 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001602 rcu_read_lock();
1603 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1604 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001605 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001606 prstatus->pr_pgrp = task_pgrp_vnr(p);
1607 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001609 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001612 * This is the record for the group leader. It shows the
1613 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001615 thread_group_cputime(p, &cputime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001616 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1617 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001619 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001620
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001621 task_cputime(p, &utime, &stime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001622 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1623 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001625
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001626 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1627 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628}
1629
1630static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1631 struct mm_struct *mm)
1632{
David Howellsc69e8d92008-11-14 10:39:19 +11001633 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001634 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 /* first copy the parameters from user space */
1637 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1638
1639 len = mm->arg_end - mm->arg_start;
1640 if (len >= ELF_PRARGSZ)
1641 len = ELF_PRARGSZ-1;
1642 if (copy_from_user(&psinfo->pr_psargs,
1643 (const char __user *)mm->arg_start, len))
1644 return -EFAULT;
1645 for(i = 0; i < len; i++)
1646 if (psinfo->pr_psargs[i] == 0)
1647 psinfo->pr_psargs[i] = ' ';
1648 psinfo->pr_psargs[len] = 0;
1649
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001650 rcu_read_lock();
1651 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1652 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001653 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001654 psinfo->pr_pgrp = task_pgrp_vnr(p);
1655 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
1657 i = p->state ? ffz(~p->state) + 1 : 0;
1658 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001659 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1661 psinfo->pr_nice = task_nice(p);
1662 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001663 rcu_read_lock();
1664 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001665 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1666 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001667 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1669
1670 return 0;
1671}
1672
Roland McGrath3aba4812008-01-30 13:31:44 +01001673static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1674{
1675 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1676 int i = 0;
1677 do
1678 i += 2;
1679 while (auxv[i - 2] != AT_NULL);
1680 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1681}
1682
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001683static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001684 const kernel_siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001685{
1686 mm_segment_t old_fs = get_fs();
1687 set_fs(KERNEL_DS);
1688 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1689 set_fs(old_fs);
1690 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1691}
1692
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001693#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1694/*
1695 * Format of NT_FILE note:
1696 *
1697 * long count -- how many files are mapped
1698 * long page_size -- units for file_ofs
1699 * array of [COUNT] elements of
1700 * long start
1701 * long end
1702 * long file_ofs
1703 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1704 */
Dan Aloni72023652013-09-30 13:45:02 -07001705static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001706{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001707 struct mm_struct *mm = current->mm;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001708 struct vm_area_struct *vma;
1709 unsigned count, size, names_ofs, remaining, n;
1710 user_long_t *data;
1711 user_long_t *start_end_ofs;
1712 char *name_base, *name_curpos;
1713
1714 /* *Estimated* file count and total data size needed */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001715 count = mm->map_count;
Alexey Dobriyan60c9d922018-02-06 15:39:13 -08001716 if (count > UINT_MAX / 64)
1717 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001718 size = count * 64;
1719
1720 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1721 alloc:
1722 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001723 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001724 size = round_up(size, PAGE_SIZE);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08001725 /*
1726 * "size" can be 0 here legitimately.
1727 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1728 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001729 data = kvmalloc(size, GFP_KERNEL);
1730 if (ZERO_OR_NULL_PTR(data))
Dan Aloni72023652013-09-30 13:45:02 -07001731 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001732
1733 start_end_ofs = data + 2;
1734 name_base = name_curpos = ((char *)data) + names_ofs;
1735 remaining = size - names_ofs;
1736 count = 0;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001737 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001738 struct file *file;
1739 const char *filename;
1740
1741 file = vma->vm_file;
1742 if (!file)
1743 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001744 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001745 if (IS_ERR(filename)) {
1746 if (PTR_ERR(filename) == -ENAMETOOLONG) {
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001747 kvfree(data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001748 size = size * 5 / 4;
1749 goto alloc;
1750 }
1751 continue;
1752 }
1753
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001754 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001755 /* n = strlen(filename) + 1: */
1756 n = (name_curpos + remaining) - filename;
1757 remaining = filename - name_curpos;
1758 memmove(name_curpos, filename, n);
1759 name_curpos += n;
1760
1761 *start_end_ofs++ = vma->vm_start;
1762 *start_end_ofs++ = vma->vm_end;
1763 *start_end_ofs++ = vma->vm_pgoff;
1764 count++;
1765 }
1766
1767 /* Now we know exact count of files, can store it */
1768 data[0] = count;
1769 data[1] = PAGE_SIZE;
1770 /*
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001771 * Count usually is less than mm->map_count,
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001772 * we need to move filenames down.
1773 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001774 n = mm->map_count - count;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001775 if (n != 0) {
1776 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1777 memmove(name_base - shift_bytes, name_base,
1778 name_curpos - name_base);
1779 name_curpos -= shift_bytes;
1780 }
1781
1782 size = name_curpos - (char *)data;
1783 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001784 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001785}
1786
Roland McGrath4206d3a2008-01-30 13:31:45 +01001787#ifdef CORE_DUMP_USE_REGSET
1788#include <linux/regset.h>
1789
1790struct elf_thread_core_info {
1791 struct elf_thread_core_info *next;
1792 struct task_struct *task;
1793 struct elf_prstatus prstatus;
1794 struct memelfnote notes[0];
1795};
1796
1797struct elf_note_info {
1798 struct elf_thread_core_info *thread;
1799 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001800 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001801 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001802 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001803 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001804 size_t size;
1805 int thread_notes;
1806};
1807
Roland McGrathd31472b2008-03-04 14:28:30 -08001808/*
1809 * When a regset has a writeback hook, we call it on each thread before
1810 * dumping user memory. On register window machines, this makes sure the
1811 * user memory backing the register data is up to date before we read it.
1812 */
1813static void do_thread_regset_writeback(struct task_struct *task,
1814 const struct user_regset *regset)
1815{
1816 if (regset->writeback)
1817 regset->writeback(task, regset, 1);
1818}
1819
H. J. Lu0953f65d2012-02-14 13:34:52 -08001820#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001821#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001822#endif
1823
1824#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001825#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001826#endif
1827
Roland McGrath4206d3a2008-01-30 13:31:45 +01001828static int fill_thread_core_info(struct elf_thread_core_info *t,
1829 const struct user_regset_view *view,
1830 long signr, size_t *total)
1831{
1832 unsigned int i;
Dave Martin27e64b42017-10-31 15:50:53 +00001833 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001834
1835 /*
1836 * NT_PRSTATUS is the one special case, because the regset data
1837 * goes into the pr_reg field inside the note contents, rather
1838 * than being the whole note contents. We fill the reset in here.
1839 * We assume that regset 0 is NT_PRSTATUS.
1840 */
1841 fill_prstatus(&t->prstatus, t->task, signr);
Dave Martin27e64b42017-10-31 15:50:53 +00001842 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001843 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001844
1845 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dave Martin27e64b42017-10-31 15:50:53 +00001846 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001847 *total += notesize(&t->notes[0]);
1848
Roland McGrathd31472b2008-03-04 14:28:30 -08001849 do_thread_regset_writeback(t->task, &view->regsets[0]);
1850
Roland McGrath4206d3a2008-01-30 13:31:45 +01001851 /*
1852 * Each other regset might generate a note too. For each regset
1853 * that has no core_note_type or is inactive, we leave t->notes[i]
1854 * all zero and we'll know to skip writing it later.
1855 */
1856 for (i = 1; i < view->n; ++i) {
1857 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001858 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001859 if (regset->core_note_type && regset->get &&
Maciej W. Rozycki2f819db2018-05-15 23:32:45 +01001860 (!regset->active || regset->active(t->task, regset) > 0)) {
Roland McGrath4206d3a2008-01-30 13:31:45 +01001861 int ret;
Dave Martin27e64b42017-10-31 15:50:53 +00001862 size_t size = regset_size(t->task, regset);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001863 void *data = kmalloc(size, GFP_KERNEL);
1864 if (unlikely(!data))
1865 return 0;
1866 ret = regset->get(t->task, regset,
1867 0, size, data, NULL);
1868 if (unlikely(ret))
1869 kfree(data);
1870 else {
1871 if (regset->core_note_type != NT_PRFPREG)
1872 fill_note(&t->notes[i], "LINUX",
1873 regset->core_note_type,
1874 size, data);
1875 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001876 SET_PR_FPVALID(&t->prstatus,
Dave Martin27e64b42017-10-31 15:50:53 +00001877 1, regset0_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001878 fill_note(&t->notes[i], "CORE",
1879 NT_PRFPREG, size, data);
1880 }
1881 *total += notesize(&t->notes[i]);
1882 }
1883 }
1884 }
1885
1886 return 1;
1887}
1888
1889static int fill_note_info(struct elfhdr *elf, int phdrs,
1890 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001891 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001892{
1893 struct task_struct *dump_task = current;
1894 const struct user_regset_view *view = task_user_regset_view(dump_task);
1895 struct elf_thread_core_info *t;
1896 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001897 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001898 unsigned int i;
1899
1900 info->size = 0;
1901 info->thread = NULL;
1902
1903 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001904 if (psinfo == NULL) {
1905 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001906 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001907 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001908
Amerigo Wange2dbe122009-07-01 01:06:26 -04001909 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1910
Roland McGrath4206d3a2008-01-30 13:31:45 +01001911 /*
1912 * Figure out how many notes we're going to need for each thread.
1913 */
1914 info->thread_notes = 0;
1915 for (i = 0; i < view->n; ++i)
1916 if (view->regsets[i].core_note_type != 0)
1917 ++info->thread_notes;
1918
1919 /*
1920 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1921 * since it is our one special case.
1922 */
1923 if (unlikely(info->thread_notes == 0) ||
1924 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1925 WARN_ON(1);
1926 return 0;
1927 }
1928
1929 /*
1930 * Initialize the ELF file header.
1931 */
1932 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001933 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001934
1935 /*
1936 * Allocate a structure for each thread.
1937 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001938 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1939 t = kzalloc(offsetof(struct elf_thread_core_info,
1940 notes[info->thread_notes]),
1941 GFP_KERNEL);
1942 if (unlikely(!t))
1943 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001944
Oleg Nesterov83914442008-07-25 01:47:45 -07001945 t->task = ct->task;
1946 if (ct->task == dump_task || !info->thread) {
1947 t->next = info->thread;
1948 info->thread = t;
1949 } else {
1950 /*
1951 * Make sure to keep the original task at
1952 * the head of the list.
1953 */
1954 t->next = info->thread->next;
1955 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001956 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001957 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001958
1959 /*
1960 * Now fill in each thread's information.
1961 */
1962 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001963 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001964 return 0;
1965
1966 /*
1967 * Fill in the two process-wide notes.
1968 */
1969 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1970 info->size += notesize(&info->psinfo);
1971
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001972 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1973 info->size += notesize(&info->signote);
1974
Roland McGrath4206d3a2008-01-30 13:31:45 +01001975 fill_auxv_note(&info->auxv, current->mm);
1976 info->size += notesize(&info->auxv);
1977
Dan Aloni72023652013-09-30 13:45:02 -07001978 if (fill_files_note(&info->files) == 0)
1979 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001980
Roland McGrath4206d3a2008-01-30 13:31:45 +01001981 return 1;
1982}
1983
1984static size_t get_note_info_size(struct elf_note_info *info)
1985{
1986 return info->size;
1987}
1988
1989/*
1990 * Write all the notes for each thread. When writing the first thread, the
1991 * process-wide notes are interleaved after the first thread-specific note.
1992 */
1993static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001994 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001995{
Fabian Frederickb219e252014-06-04 16:12:14 -07001996 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001997 struct elf_thread_core_info *t = info->thread;
1998
1999 do {
2000 int i;
2001
Al Viroecc8c772013-10-05 15:32:35 -04002002 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002003 return 0;
2004
Al Viroecc8c772013-10-05 15:32:35 -04002005 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002006 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002007 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002008 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002009 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002010 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07002011 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04002012 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002013 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002014
2015 for (i = 1; i < info->thread_notes; ++i)
2016 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04002017 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002018 return 0;
2019
Fabian Frederickb219e252014-06-04 16:12:14 -07002020 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002021 t = t->next;
2022 } while (t);
2023
2024 return 1;
2025}
2026
2027static void free_note_info(struct elf_note_info *info)
2028{
2029 struct elf_thread_core_info *threads = info->thread;
2030 while (threads) {
2031 unsigned int i;
2032 struct elf_thread_core_info *t = threads;
2033 threads = t->next;
2034 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
2035 for (i = 1; i < info->thread_notes; ++i)
2036 kfree(t->notes[i].data);
2037 kfree(t);
2038 }
2039 kfree(info->psinfo.data);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002040 kvfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01002041}
2042
2043#else
2044
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045/* Here is the structure in which status of each thread is captured. */
2046struct elf_thread_status
2047{
2048 struct list_head list;
2049 struct elf_prstatus prstatus; /* NT_PRSTATUS */
2050 elf_fpregset_t fpu; /* NT_PRFPREG */
2051 struct task_struct *thread;
2052#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07002053 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054#endif
2055 struct memelfnote notes[3];
2056 int num_notes;
2057};
2058
2059/*
2060 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002061 * we need to keep a linked list of every threads pr_status and then create
2062 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 */
2064static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
2065{
2066 int sz = 0;
2067 struct task_struct *p = t->thread;
2068 t->num_notes = 0;
2069
2070 fill_prstatus(&t->prstatus, p, signr);
2071 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
2072
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002073 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
2074 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 t->num_notes++;
2076 sz += notesize(&t->notes[0]);
2077
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002078 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
2079 &t->fpu))) {
2080 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
2081 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 t->num_notes++;
2083 sz += notesize(&t->notes[1]);
2084 }
2085
2086#ifdef ELF_CORE_COPY_XFPREGS
2087 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07002088 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
2089 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 t->num_notes++;
2091 sz += notesize(&t->notes[2]);
2092 }
2093#endif
2094 return sz;
2095}
2096
Roland McGrath3aba4812008-01-30 13:31:44 +01002097struct elf_note_info {
2098 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07002099 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01002100 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
2101 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2102 struct list_head thread_list;
2103 elf_fpregset_t *fpu;
2104#ifdef ELF_CORE_COPY_XFPREGS
2105 elf_fpxregset_t *xfpu;
2106#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002107 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01002108 int thread_status_size;
2109 int numnote;
2110};
2111
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002112static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01002113{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002114 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01002115 INIT_LIST_HEAD(&info->thread_list);
2116
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002117 /* Allocate space for ELF notes */
Kees Cook6da2ec52018-06-12 13:55:00 -07002118 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01002119 if (!info->notes)
2120 return 0;
2121 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2122 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002123 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002124 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2125 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002126 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002127 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2128 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002129 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002130#ifdef ELF_CORE_COPY_XFPREGS
2131 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2132 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002133 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002134#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002135 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002136}
Roland McGrath3aba4812008-01-30 13:31:44 +01002137
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002138static int fill_note_info(struct elfhdr *elf, int phdrs,
2139 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002140 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002141{
Al Viroafabada2013-10-14 07:39:56 -04002142 struct core_thread *ct;
2143 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002144
2145 if (!elf_note_info_init(info))
2146 return 0;
2147
Al Viroafabada2013-10-14 07:39:56 -04002148 for (ct = current->mm->core_state->dumper.next;
2149 ct; ct = ct->next) {
2150 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2151 if (!ets)
2152 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07002153
Al Viroafabada2013-10-14 07:39:56 -04002154 ets->thread = ct->task;
2155 list_add(&ets->list, &info->thread_list);
2156 }
Oleg Nesterov83914442008-07-25 01:47:45 -07002157
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002158 list_for_each_entry(ets, &info->thread_list, list) {
Al Viroafabada2013-10-14 07:39:56 -04002159 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07002160
Al Viroafabada2013-10-14 07:39:56 -04002161 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2162 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01002163 }
2164 /* now collect the dump for the current */
2165 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002166 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01002167 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2168
2169 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08002170 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01002171
2172 /*
2173 * Set up the notes in similar form to SVR4 core dumps made
2174 * with info from their /proc.
2175 */
2176
2177 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2178 sizeof(*info->prstatus), info->prstatus);
2179 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2180 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2181 sizeof(*info->psinfo), info->psinfo);
2182
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002183 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2184 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002185 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002186
Dan Aloni72023652013-09-30 13:45:02 -07002187 if (fill_files_note(info->notes + info->numnote) == 0) {
2188 info->notes_files = info->notes + info->numnote;
2189 info->numnote++;
2190 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002191
2192 /* Try to dump the FPU. */
2193 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2194 info->fpu);
2195 if (info->prstatus->pr_fpvalid)
2196 fill_note(info->notes + info->numnote++,
2197 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2198#ifdef ELF_CORE_COPY_XFPREGS
2199 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2200 fill_note(info->notes + info->numnote++,
2201 "LINUX", ELF_CORE_XFPREG_TYPE,
2202 sizeof(*info->xfpu), info->xfpu);
2203#endif
2204
2205 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002206}
2207
2208static size_t get_note_info_size(struct elf_note_info *info)
2209{
2210 int sz = 0;
2211 int i;
2212
2213 for (i = 0; i < info->numnote; i++)
2214 sz += notesize(info->notes + i);
2215
2216 sz += info->thread_status_size;
2217
2218 return sz;
2219}
2220
2221static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002222 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002223{
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002224 struct elf_thread_status *ets;
Roland McGrath3aba4812008-01-30 13:31:44 +01002225 int i;
Roland McGrath3aba4812008-01-30 13:31:44 +01002226
2227 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002228 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002229 return 0;
2230
2231 /* write out the thread status notes section */
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002232 list_for_each_entry(ets, &info->thread_list, list) {
2233 for (i = 0; i < ets->num_notes; i++)
2234 if (!writenote(&ets->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002235 return 0;
2236 }
2237
2238 return 1;
2239}
2240
2241static void free_note_info(struct elf_note_info *info)
2242{
2243 while (!list_empty(&info->thread_list)) {
2244 struct list_head *tmp = info->thread_list.next;
2245 list_del(tmp);
2246 kfree(list_entry(tmp, struct elf_thread_status, list));
2247 }
2248
Dan Aloni72023652013-09-30 13:45:02 -07002249 /* Free data possibly allocated by fill_files_note(): */
2250 if (info->notes_files)
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002251 kvfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002252
Roland McGrath3aba4812008-01-30 13:31:44 +01002253 kfree(info->prstatus);
2254 kfree(info->psinfo);
2255 kfree(info->notes);
2256 kfree(info->fpu);
2257#ifdef ELF_CORE_COPY_XFPREGS
2258 kfree(info->xfpu);
2259#endif
2260}
2261
Roland McGrath4206d3a2008-01-30 13:31:45 +01002262#endif
2263
Roland McGrathf47aef52007-01-26 00:56:49 -08002264static struct vm_area_struct *first_vma(struct task_struct *tsk,
2265 struct vm_area_struct *gate_vma)
2266{
2267 struct vm_area_struct *ret = tsk->mm->mmap;
2268
2269 if (ret)
2270 return ret;
2271 return gate_vma;
2272}
2273/*
2274 * Helper function for iterating across a vma list. It ensures that the caller
2275 * will visit `gate_vma' prior to terminating the search.
2276 */
2277static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2278 struct vm_area_struct *gate_vma)
2279{
2280 struct vm_area_struct *ret;
2281
2282 ret = this_vma->vm_next;
2283 if (ret)
2284 return ret;
2285 if (this_vma == gate_vma)
2286 return NULL;
2287 return gate_vma;
2288}
2289
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002290static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2291 elf_addr_t e_shoff, int segs)
2292{
2293 elf->e_shoff = e_shoff;
2294 elf->e_shentsize = sizeof(*shdr4extnum);
2295 elf->e_shnum = 1;
2296 elf->e_shstrndx = SHN_UNDEF;
2297
2298 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2299
2300 shdr4extnum->sh_type = SHT_NULL;
2301 shdr4extnum->sh_size = elf->e_shnum;
2302 shdr4extnum->sh_link = elf->e_shstrndx;
2303 shdr4extnum->sh_info = segs;
2304}
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306/*
2307 * Actual dumper
2308 *
2309 * This is a two-pass process; first we find the offsets of the bits,
2310 * and then they are actually written out. If we run out of core limit
2311 * we just truncate.
2312 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002313static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 int has_dumped = 0;
2316 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002317 int segs, i;
2318 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002319 struct vm_area_struct *vma, *gate_vma;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002320 struct elfhdr elf;
Al Virocdc3d562013-10-05 22:24:29 -04002321 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002322 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002323 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002324 struct elf_shdr *shdr4extnum = NULL;
2325 Elf_Half e_phnum;
2326 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002327 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 /*
2330 * We no longer stop all VM operations.
2331 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002332 * This is because those proceses that could possibly change map_count
2333 * or the mmap / vma pages are now blocked in do_exit on current
2334 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 *
2336 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002337 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 * exists while dumping the mm->vm_next areas to the core file.
2339 */
2340
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002341 /*
2342 * The number of segs are recored into ELF header as 16bit value.
2343 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2344 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002346 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Stephen Wilson31db58b2011-03-13 15:49:15 -04002348 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002349 if (gate_vma != NULL)
2350 segs++;
2351
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002352 /* for notes section */
2353 segs++;
2354
2355 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2356 * this, kernel supports extended numbering. Have a look at
2357 * include/linux/elf.h for further information. */
2358 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2359
Roland McGrath3aba4812008-01-30 13:31:44 +01002360 /*
2361 * Collect all the non-memory information about the process for the
2362 * notes. This also sets up the file header.
2363 */
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002364 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002365 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
2367 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 fs = get_fs();
2370 set_fs(KERNEL_DS);
2371
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002372 offset += sizeof(elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002373 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
2375 /* Write notes phdr entry */
2376 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002377 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Michael Ellermane5501492007-09-19 14:38:12 +10002379 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002380
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002381 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2382 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002383 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002384
2385 fill_elf_note_phdr(phdr4note, sz, offset);
2386 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 }
2388
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2390
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002391 /*
2392 * Zero vma process will get ZERO_SIZE_PTR here.
2393 * Let coredump continue for register state at least.
2394 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002395 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2396 GFP_KERNEL);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002397 if (!vma_filesz)
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002398 goto end_coredump;
2399
2400 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2401 vma = next_vma(vma, gate_vma)) {
2402 unsigned long dump_size;
2403
2404 dump_size = vma_dump_size(vma, cprm->mm_flags);
2405 vma_filesz[i++] = dump_size;
2406 vma_data_size += dump_size;
2407 }
2408
2409 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002410 offset += elf_core_extra_data_size();
2411 e_shoff = offset;
2412
2413 if (e_phnum == PN_XNUM) {
2414 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2415 if (!shdr4extnum)
2416 goto end_coredump;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002417 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002418 }
2419
2420 offset = dataoff;
2421
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002422 if (!dump_emit(cprm, &elf, sizeof(elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002423 goto end_coredump;
2424
Al Viroecc8c772013-10-05 15:32:35 -04002425 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002426 goto end_coredump;
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002429 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002430 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
2433 phdr.p_type = PT_LOAD;
2434 phdr.p_offset = offset;
2435 phdr.p_vaddr = vma->vm_start;
2436 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002437 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002438 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 offset += phdr.p_filesz;
2440 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002441 if (vma->vm_flags & VM_WRITE)
2442 phdr.p_flags |= PF_W;
2443 if (vma->vm_flags & VM_EXEC)
2444 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 phdr.p_align = ELF_EXEC_PAGESIZE;
2446
Al Viroecc8c772013-10-05 15:32:35 -04002447 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002448 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 }
2450
Al Viro506f21c2013-10-05 17:22:57 -04002451 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002452 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002455 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002456 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Al Virocdc3d562013-10-05 22:24:29 -04002458 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002459 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002460
Andi Kleend025c9d2006-09-30 23:29:28 -07002461 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002462 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002463 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002465 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002466 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002468 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002470 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Roland McGrath82df3972007-10-16 23:27:02 -07002472 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002473 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002474 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002476 page = get_dump_page(addr);
2477 if (page) {
2478 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002479 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002480 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002481 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002482 } else
Al Viro9b56d542013-10-08 09:26:08 -04002483 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002484 if (stop)
2485 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 }
2487 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002488 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
Al Viroaa3e7ea2013-10-05 17:50:15 -04002490 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002491 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002493 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002494 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002495 goto end_coredump;
2496 }
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498end_coredump:
2499 set_fs(fs);
2500
2501cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002502 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002503 kfree(shdr4extnum);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002504 kvfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002505 kfree(phdr4note);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507}
2508
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002509#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
2511static int __init init_elf_binfmt(void)
2512{
Al Viro8fc3dc52012-03-17 03:05:16 -04002513 register_binfmt(&elf_format);
2514 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515}
2516
2517static void __exit exit_elf_binfmt(void)
2518{
2519 /* Remove the COFF and ELF loaders. */
2520 unregister_binfmt(&elf_format);
2521}
2522
2523core_initcall(init_elf_binfmt);
2524module_exit(exit_elf_binfmt);
2525MODULE_LICENSE("GPL");