blob: cceb29d6ef1d9ff14c0f4eab0b811fee1c0db786 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
17#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
19#include <linux/signal.h>
20#include <linux/binfmts.h>
21#include <linux/string.h>
22#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/personality.h>
25#include <linux/elfcore.h>
26#include <linux/init.h>
27#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/compiler.h>
29#include <linux/highmem.h>
30#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070031#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070034#include <linux/elf.h>
Kees Cookd1fd8362015-04-14 15:48:07 -070035#include <linux/elf-randomize.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070036#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080037#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010038#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010039#include <linux/sched/coredump.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010040#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010041#include <linux/sched/cputime.h>
Dave Martin00e19ce2020-03-16 16:50:44 +000042#include <linux/sizes.h>
43#include <linux/types.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010044#include <linux/cred.h>
Ross Zwisler50378352015-10-05 16:33:36 -060045#include <linux/dax.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080046#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/param.h>
48#include <asm/page.h>
49
Dave Martin00e19ce2020-03-16 16:50:44 +000050#ifndef ELF_COMPAT
51#define ELF_COMPAT 0
52#endif
53
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070054#ifndef user_long_t
55#define user_long_t long
56#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070057#ifndef user_siginfo_t
58#define user_siginfo_t siginfo_t
59#endif
60
Nicolas Pitre47552002017-08-16 16:05:13 -040061/* That's for binfmt_elf_fdpic to deal with */
62#ifndef elf_check_fdpic
63#define elf_check_fdpic(ex) false
64#endif
65
Al Viro71613c32012-10-20 22:00:48 -040066static int load_elf_binary(struct linux_binprm *bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Josh Triplett69369a72014-04-03 14:48:27 -070068#ifdef CONFIG_USELIB
69static int load_elf_library(struct file *);
70#else
71#define load_elf_library NULL
72#endif
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074/*
75 * If we don't support core dumping, then supply a NULL so we
76 * don't even try.
77 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080078#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080079static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#else
81#define elf_core_dump NULL
82#endif
83
84#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070085#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070087#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#endif
89
90#ifndef ELF_CORE_EFLAGS
91#define ELF_CORE_EFLAGS 0
92#endif
93
94#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
95#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
96#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
97
98static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080099 .module = THIS_MODULE,
100 .load_binary = load_elf_binary,
101 .load_shlib = load_elf_library,
102 .core_dump = elf_core_dump,
103 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104};
105
Alexey Dobriyan18676ff2020-01-30 22:17:01 -0800106#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800108static int set_brk(unsigned long start, unsigned long end, int prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 start = ELF_PAGEALIGN(start);
111 end = ELF_PAGEALIGN(end);
112 if (end > start) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800113 /*
114 * Map the last of the bss segment.
115 * If the header is requesting these pages to be
116 * executable, honour that (ppc32 needs this).
117 */
118 int error = vm_brk_flags(start, end - start,
119 prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700120 if (error)
121 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 }
123 current->mm->start_brk = current->mm->brk = end;
124 return 0;
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* We need to explicitly zero any fractional pages
128 after the data section (i.e. bss). This would
129 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700130 be in memory
131 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static int padzero(unsigned long elf_bss)
133{
134 unsigned long nbyte;
135
136 nbyte = ELF_PAGEOFFSET(elf_bss);
137 if (nbyte) {
138 nbyte = ELF_MIN_ALIGN - nbyte;
139 if (clear_user((void __user *) elf_bss, nbyte))
140 return -EFAULT;
141 }
142 return 0;
143}
144
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200145/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#ifdef CONFIG_STACK_GROWSUP
147#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
148#define STACK_ROUND(sp, items) \
149 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700150#define STACK_ALLOC(sp, len) ({ \
151 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
152 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#else
154#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
155#define STACK_ROUND(sp, items) \
156 (((unsigned long) (sp - items)) &~ 15UL)
157#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
158#endif
159
Nathan Lynch483fad12008-07-22 04:48:46 +1000160#ifndef ELF_BASE_PLATFORM
161/*
162 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
163 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
164 * will be copied to the user stack in the same manner as AT_PLATFORM.
165 */
166#define ELF_BASE_PLATFORM NULL
167#endif
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169static int
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800170create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
171 unsigned long load_addr, unsigned long interp_load_addr,
172 unsigned long e_entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800174 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 unsigned long p = bprm->p;
176 int argc = bprm->argc;
177 int envc = bprm->envc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 elf_addr_t __user *sp;
179 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000180 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800181 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000183 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800184 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 int items;
186 elf_addr_t *elf_info;
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800187 int ei_index;
David Howells86a264a2008-11-14 10:39:18 +1100188 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700189 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700192 * In some cases (e.g. Hyper-Threading), we want to avoid L1
193 * evictions by the processes running on the same package. One
194 * thing we can do is to shuffle the initial stack for them.
195 */
196
197 p = arch_align_stack(p);
198
199 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * If this architecture has a platform capability string, copy it
201 * to userspace. In some cases (Sparc), this info is impossible
202 * for userspace to get any other way, in others (i386) it is
203 * merely difficult.
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 u_platform = NULL;
206 if (k_platform) {
207 size_t len = strlen(k_platform) + 1;
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
210 if (__copy_to_user(u_platform, k_platform, len))
211 return -EFAULT;
212 }
213
Nathan Lynch483fad12008-07-22 04:48:46 +1000214 /*
215 * If this architecture has a "base" platform capability
216 * string, copy it to userspace.
217 */
218 u_base_platform = NULL;
219 if (k_base_platform) {
220 size_t len = strlen(k_base_platform) + 1;
221
222 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
223 if (__copy_to_user(u_base_platform, k_base_platform, len))
224 return -EFAULT;
225 }
226
Kees Cookf06295b2009-01-07 18:08:52 -0800227 /*
228 * Generate 16 random bytes for userspace PRNG seeding.
229 */
230 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
231 u_rand_bytes = (elf_addr_t __user *)
232 STACK_ALLOC(p, sizeof(k_rand_bytes));
233 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
234 return -EFAULT;
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /* Create the ELF interpreter info */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800237 elf_info = (elf_addr_t *)mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700238 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700240 do { \
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800241 *elf_info++ = id; \
242 *elf_info++ = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700243 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245#ifdef ARCH_DLINFO
246 /*
247 * ARCH_DLINFO must come first so PPC can do its special alignment of
248 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700249 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
250 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 */
252 ARCH_DLINFO;
253#endif
254 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
255 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
256 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
257 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700258 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
260 NEW_AUX_ENT(AT_BASE, interp_load_addr);
261 NEW_AUX_ENT(AT_FLAGS, 0);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800262 NEW_AUX_ENT(AT_ENTRY, e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800263 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
264 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
265 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
266 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Kees Cookc425e182017-07-18 15:25:22 -0700267 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
Kees Cookf06295b2009-01-07 18:08:52 -0800268 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000269#ifdef ELF_HWCAP2
270 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
271#endif
John Reiser65191082008-07-21 14:21:32 -0700272 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700274 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700275 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000277 if (k_base_platform) {
278 NEW_AUX_ENT(AT_BASE_PLATFORM,
279 (elf_addr_t)(unsigned long)u_base_platform);
280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700282 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284#undef NEW_AUX_ENT
285 /* AT_NULL is zero; clear the rest too */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800286 memset(elf_info, 0, (char *)mm->saved_auxv +
287 sizeof(mm->saved_auxv) - (char *)elf_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289 /* And advance past the AT_NULL entry. */
Alexey Dobriyan1f83d802020-01-30 22:16:50 -0800290 elf_info += 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800292 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 sp = STACK_ADD(p, ei_index);
294
Andi Kleend20894a2008-02-08 04:21:54 -0800295 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 bprm->p = STACK_ROUND(sp, items);
297
298 /* Point sp at the lowest address on the stack */
299#ifdef CONFIG_STACK_GROWSUP
300 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700301 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#else
303 sp = (elf_addr_t __user *)bprm->p;
304#endif
305
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700306
307 /*
308 * Grow the stack manually; some architectures have a limit on how
309 * far ahead a user-space access may be in order to grow the stack.
310 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800311 vma = find_extend_vma(mm, bprm->p);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700312 if (!vma)
313 return -EFAULT;
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
316 if (__put_user(argc, sp++))
317 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Kees Cook67c67772017-07-10 15:52:54 -0700319 /* Populate list of argv pointers back to argv strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800320 p = mm->arg_end = mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 while (argc-- > 0) {
322 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700323 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800324 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700325 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
326 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800327 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 p += len;
329 }
Kees Cook67c67772017-07-10 15:52:54 -0700330 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800332 mm->arg_end = p;
Kees Cook67c67772017-07-10 15:52:54 -0700333
334 /* Populate list of envp pointers back to envp strings. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800335 mm->env_end = mm->env_start = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 while (envc-- > 0) {
337 size_t len;
Kees Cook67c67772017-07-10 15:52:54 -0700338 if (__put_user((elf_addr_t)p, sp++))
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800339 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700340 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
341 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800342 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 p += len;
344 }
Kees Cook67c67772017-07-10 15:52:54 -0700345 if (__put_user(0, sp++))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return -EFAULT;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800347 mm->env_end = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 /* Put the elf_info on the stack in the right place. */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800350 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return -EFAULT;
352 return 0;
353}
354
James Hoganc07380b2011-05-09 10:58:40 +0100355#ifndef elf_map
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357static unsigned long elf_map(struct file *filep, unsigned long addr,
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800358 const struct elf_phdr *eppnt, int prot, int type,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100359 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
361 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100362 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
363 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
364 addr = ELF_PAGESTART(addr);
365 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700366
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700367 /* mmap() will return -EINVAL if given a zero size, but a
368 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100369 if (!size)
370 return addr;
371
Jiri Kosinacc503c12008-01-30 13:31:07 +0100372 /*
373 * total_size is the size of the ELF (interpreter) image.
374 * The _first_ mmap needs to know the full size, otherwise
375 * randomization might put this image into an overlapping
376 * position with the ELF binary image. (since size < total_size)
377 * So we first map the 'big' image - and unmap the remainder at
378 * the end. (which unmap is needed for ELF images with holes.)
379 */
380 if (total_size) {
381 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400382 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100383 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400384 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100385 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400386 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100387
Tetsuo Handad23a61e2018-04-20 14:56:13 -0700388 if ((type & MAP_FIXED_NOREPLACE) &&
389 PTR_ERR((void *)map_addr) == -EEXIST)
390 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
391 task_pid_nr(current), current->comm, (void *)addr);
Michal Hocko4ed28632018-04-10 16:36:01 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return(map_addr);
394}
395
James Hoganc07380b2011-05-09 10:58:40 +0100396#endif /* !elf_map */
397
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800398static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
Jiri Kosinacc503c12008-01-30 13:31:07 +0100399{
400 int i, first_idx = -1, last_idx = -1;
401
402 for (i = 0; i < nr; i++) {
403 if (cmds[i].p_type == PT_LOAD) {
404 last_idx = i;
405 if (first_idx == -1)
406 first_idx = i;
407 }
408 }
409 if (first_idx == -1)
410 return 0;
411
412 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
413 ELF_PAGESTART(cmds[first_idx].p_vaddr);
414}
415
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800416static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
417{
418 ssize_t rv;
419
420 rv = kernel_read(file, buf, len, &pos);
421 if (unlikely(rv != len)) {
422 return (rv < 0) ? rv : -EIO;
423 }
424 return 0;
425}
426
Paul Burton6a8d3892014-09-11 08:30:14 +0100427/**
428 * load_elf_phdrs() - load ELF program headers
429 * @elf_ex: ELF header of the binary whose program headers should be loaded
430 * @elf_file: the opened ELF binary file
431 *
432 * Loads ELF program headers from the binary file elf_file, which has the ELF
433 * header pointed to by elf_ex, into a newly allocated array. The caller is
434 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
435 */
Alexey Dobriyan49ac9812019-03-07 16:29:03 -0800436static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
Paul Burton6a8d3892014-09-11 08:30:14 +0100437 struct file *elf_file)
438{
439 struct elf_phdr *elf_phdata = NULL;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800440 int retval, err = -1;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800441 unsigned int size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100442
443 /*
444 * If the size of this structure has changed, then punt, since
445 * we will be doing the wrong thing.
446 */
447 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
448 goto out;
449
450 /* Sanity check the number of program headers... */
Paul Burton6a8d3892014-09-11 08:30:14 +0100451 /* ...and their total size. */
452 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
Alexey Dobriyanfaf1c312019-03-07 16:28:56 -0800453 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
Paul Burton6a8d3892014-09-11 08:30:14 +0100454 goto out;
455
456 elf_phdata = kmalloc(size, GFP_KERNEL);
457 if (!elf_phdata)
458 goto out;
459
460 /* Read in the program headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800461 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
462 if (retval < 0) {
463 err = retval;
Paul Burton6a8d3892014-09-11 08:30:14 +0100464 goto out;
465 }
466
467 /* Success! */
468 err = 0;
469out:
470 if (err) {
471 kfree(elf_phdata);
472 elf_phdata = NULL;
473 }
474 return elf_phdata;
475}
Jiri Kosinacc503c12008-01-30 13:31:07 +0100476
Paul Burton774c1052014-09-11 08:30:16 +0100477#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
478
479/**
480 * struct arch_elf_state - arch-specific ELF loading state
481 *
482 * This structure is used to preserve architecture specific data during
483 * the loading of an ELF file, throughout the checking of architecture
484 * specific ELF headers & through to the point where the ELF load is
485 * known to be proceeding (ie. SET_PERSONALITY).
486 *
487 * This implementation is a dummy for architectures which require no
488 * specific state.
489 */
490struct arch_elf_state {
491};
492
493#define INIT_ARCH_ELF_STATE {}
494
495/**
496 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
497 * @ehdr: The main ELF header
498 * @phdr: The program header to check
499 * @elf: The open ELF file
500 * @is_interp: True if the phdr is from the interpreter of the ELF being
501 * loaded, else false.
502 * @state: Architecture-specific state preserved throughout the process
503 * of loading the ELF.
504 *
505 * Inspects the program header phdr to validate its correctness and/or
506 * suitability for the system. Called once per ELF program header in the
507 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
508 * interpreter.
509 *
510 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
511 * with that return code.
512 */
513static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
514 struct elf_phdr *phdr,
515 struct file *elf, bool is_interp,
516 struct arch_elf_state *state)
517{
518 /* Dummy implementation, always proceed */
519 return 0;
520}
521
522/**
Maciej W. Rozycki54d157142015-10-26 15:47:57 +0000523 * arch_check_elf() - check an ELF executable
Paul Burton774c1052014-09-11 08:30:16 +0100524 * @ehdr: The main ELF header
525 * @has_interp: True if the ELF has an interpreter, else false.
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000526 * @interp_ehdr: The interpreter's ELF header
Paul Burton774c1052014-09-11 08:30:16 +0100527 * @state: Architecture-specific state preserved throughout the process
528 * of loading the ELF.
529 *
530 * Provides a final opportunity for architecture code to reject the loading
531 * of the ELF & cause an exec syscall to return an error. This is called after
532 * all program headers to be checked by arch_elf_pt_proc have been.
533 *
534 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
535 * with that return code.
536 */
537static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000538 struct elfhdr *interp_ehdr,
Paul Burton774c1052014-09-11 08:30:16 +0100539 struct arch_elf_state *state)
540{
541 /* Dummy implementation, always proceed */
542 return 0;
543}
544
545#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Dave Martinfe0f6762020-03-16 16:50:46 +0000547static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
548 bool has_interp, bool is_interp)
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700549{
550 int prot = 0;
551
552 if (p_flags & PF_R)
553 prot |= PROT_READ;
554 if (p_flags & PF_W)
555 prot |= PROT_WRITE;
556 if (p_flags & PF_X)
557 prot |= PROT_EXEC;
Dave Martinfe0f6762020-03-16 16:50:46 +0000558
559 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
Alexey Dobriyand8e7cb32019-05-14 15:43:51 -0700560}
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562/* This is much more generalized than the library routine read function,
563 so we keep this separate. Technically the library read function
564 is only provided so that we can read a.out libraries that have
565 an ELF header */
566
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700567static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Alexey Dobriyan81696d52019-12-04 16:52:22 -0800568 struct file *interpreter,
Dave Martinfe0f6762020-03-16 16:50:46 +0000569 unsigned long no_base, struct elf_phdr *interp_elf_phdata,
570 struct arch_elf_state *arch_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 struct elf_phdr *eppnt;
573 unsigned long load_addr = 0;
574 int load_addr_set = 0;
575 unsigned long last_bss = 0, elf_bss = 0;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800576 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100578 unsigned long total_size;
Paul Burton6a8d3892014-09-11 08:30:14 +0100579 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 /* First of all, some simple consistency checks */
582 if (interp_elf_ex->e_type != ET_EXEC &&
583 interp_elf_ex->e_type != ET_DYN)
584 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -0400585 if (!elf_check_arch(interp_elf_ex) ||
586 elf_check_fdpic(interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400588 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 goto out;
590
Paul Burtona9d9ef12014-09-11 08:30:15 +0100591 total_size = total_mapping_size(interp_elf_phdata,
592 interp_elf_ex->e_phnum);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100593 if (!total_size) {
594 error = -EINVAL;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100595 goto out;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100596 }
597
Paul Burtona9d9ef12014-09-11 08:30:15 +0100598 eppnt = interp_elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700599 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
600 if (eppnt->p_type == PT_LOAD) {
601 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
Dave Martinfe0f6762020-03-16 16:50:46 +0000602 int elf_prot = make_prot(eppnt->p_flags, arch_state,
603 true, true);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700604 unsigned long vaddr = 0;
605 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700607 vaddr = eppnt->p_vaddr;
608 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
Michal Hocko4ed28632018-04-10 16:36:01 -0700609 elf_type |= MAP_FIXED_NOREPLACE;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100610 else if (no_base && interp_elf_ex->e_type == ET_DYN)
611 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700613 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100614 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100615 total_size = 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700616 error = map_addr;
617 if (BAD_ADDR(map_addr))
Paul Burtona9d9ef12014-09-11 08:30:15 +0100618 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700620 if (!load_addr_set &&
621 interp_elf_ex->e_type == ET_DYN) {
622 load_addr = map_addr - ELF_PAGESTART(vaddr);
623 load_addr_set = 1;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700626 /*
627 * Check to see if the section's size will overflow the
628 * allowed task size. Note that p_filesz must always be
629 * <= p_memsize so it's only necessary to check p_memsz.
630 */
631 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700632 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700633 eppnt->p_filesz > eppnt->p_memsz ||
634 eppnt->p_memsz > TASK_SIZE ||
635 TASK_SIZE - eppnt->p_memsz < k) {
636 error = -ENOMEM;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100637 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700640 /*
641 * Find the end of the file mapping for this phdr, and
642 * keep track of the largest address we see for this.
643 */
644 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
645 if (k > elf_bss)
646 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700648 /*
649 * Do the same thing for the memory mapping - between
650 * elf_bss and last_bss is the bss section.
651 */
Kees Cook0036d1f2016-08-02 14:04:51 -0700652 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800653 if (k > last_bss) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700654 last_bss = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800655 bss_prot = elf_prot;
656 }
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 }
659
Kees Cook0036d1f2016-08-02 14:04:51 -0700660 /*
661 * Now fill out the bss section: first pad the last page from
662 * the file up to the page boundary, and zero it from elf_bss
663 * up to the end of the page.
664 */
665 if (padzero(elf_bss)) {
666 error = -EFAULT;
667 goto out;
668 }
669 /*
670 * Next, align both the file and mem bss up to the page size,
671 * since this is where elf_bss was just zeroed up to, and where
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800672 * last_bss will end after the vm_brk_flags() below.
Kees Cook0036d1f2016-08-02 14:04:51 -0700673 */
674 elf_bss = ELF_PAGEALIGN(elf_bss);
675 last_bss = ELF_PAGEALIGN(last_bss);
676 /* Finally, if there is still more bss to allocate, do it. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (last_bss > elf_bss) {
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800678 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
679 bss_prot & PROT_EXEC ? VM_EXEC : 0);
Linus Torvalds5d22fc22016-05-27 15:57:31 -0700680 if (error)
Paul Burtona9d9ef12014-09-11 08:30:15 +0100681 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
Jiri Kosinacc503c12008-01-30 13:31:07 +0100684 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685out:
686 return error;
687}
688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689/*
690 * These are the functions used to load ELF style executables and shared
691 * libraries. There is no binary dependent code anywhere else.
692 */
693
Dave Martin00e19ce2020-03-16 16:50:44 +0000694static int parse_elf_property(const char *data, size_t *off, size_t datasz,
695 struct arch_elf_state *arch,
696 bool have_prev_type, u32 *prev_type)
697{
698 size_t o, step;
699 const struct gnu_property *pr;
700 int ret;
701
702 if (*off == datasz)
703 return -ENOENT;
704
705 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
706 return -EIO;
707 o = *off;
708 datasz -= *off;
709
710 if (datasz < sizeof(*pr))
711 return -ENOEXEC;
712 pr = (const struct gnu_property *)(data + o);
713 o += sizeof(*pr);
714 datasz -= sizeof(*pr);
715
716 if (pr->pr_datasz > datasz)
717 return -ENOEXEC;
718
719 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
720 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
721 if (step > datasz)
722 return -ENOEXEC;
723
724 /* Properties are supposed to be unique and sorted on pr_type: */
725 if (have_prev_type && pr->pr_type <= *prev_type)
726 return -ENOEXEC;
727 *prev_type = pr->pr_type;
728
729 ret = arch_parse_elf_property(pr->pr_type, data + o,
730 pr->pr_datasz, ELF_COMPAT, arch);
731 if (ret)
732 return ret;
733
734 *off = o + step;
735 return 0;
736}
737
738#define NOTE_DATA_SZ SZ_1K
739#define GNU_PROPERTY_TYPE_0_NAME "GNU"
740#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
741
742static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
743 struct arch_elf_state *arch)
744{
745 union {
746 struct elf_note nhdr;
747 char data[NOTE_DATA_SZ];
748 } note;
749 loff_t pos;
750 ssize_t n;
751 size_t off, datasz;
752 int ret;
753 bool have_prev_type;
754 u32 prev_type;
755
756 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
757 return 0;
758
759 /* load_elf_binary() shouldn't call us unless this is true... */
760 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
761 return -ENOEXEC;
762
763 /* If the properties are crazy large, that's too bad (for now): */
764 if (phdr->p_filesz > sizeof(note))
765 return -ENOEXEC;
766
767 pos = phdr->p_offset;
768 n = kernel_read(f, &note, phdr->p_filesz, &pos);
769
770 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
771 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
772 return -EIO;
773
774 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
775 note.nhdr.n_namesz != NOTE_NAME_SZ ||
776 strncmp(note.data + sizeof(note.nhdr),
777 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
778 return -ENOEXEC;
779
780 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
781 ELF_GNU_PROPERTY_ALIGN);
782 if (off > n)
783 return -ENOEXEC;
784
785 if (note.nhdr.n_descsz > n - off)
786 return -ENOEXEC;
787 datasz = off + note.nhdr.n_descsz;
788
789 have_prev_type = false;
790 do {
791 ret = parse_elf_property(note.data, &off, datasz, arch,
792 have_prev_type, &prev_type);
793 have_prev_type = true;
794 } while (!ret);
795
796 return ret == -ENOENT ? 0 : ret;
797}
798
Al Viro71613c32012-10-20 22:00:48 -0400799static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
801 struct file *interpreter = NULL; /* to shut gcc up */
802 unsigned long load_addr = 0, load_bias = 0;
803 int load_addr_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 unsigned long error;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100805 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
Dave Martin00e19ce2020-03-16 16:50:44 +0000806 struct elf_phdr *elf_property_phdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 unsigned long elf_bss, elf_brk;
Denys Vlasenko16e72e92017-02-22 15:45:16 -0800808 int bss_prot = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 int retval, i;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100810 unsigned long elf_entry;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800811 unsigned long e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100812 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700814 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800815 int executable_stack = EXSTACK_DEFAULT;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800816 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 struct elfhdr interp_elf_ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 } *loc;
Paul Burton774c1052014-09-11 08:30:16 +0100820 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -0800821 struct mm_struct *mm;
Alexey Dobriyan249b08e2019-05-14 15:43:54 -0700822 struct pt_regs *regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
825 if (!loc) {
826 retval = -ENOMEM;
827 goto out_ret;
828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 retval = -ENOEXEC;
831 /* First of all, some simple consistency checks */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800832 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 goto out;
834
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800835 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800837 if (!elf_check_arch(elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 goto out;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800839 if (elf_check_fdpic(elf_ex))
Nicolas Pitre47552002017-08-16 16:05:13 -0400840 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400841 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 goto out;
843
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800844 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!elf_phdata)
846 goto out;
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800849 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700850 char *elf_interpreter;
Alexey Dobriyan5cf4a362019-05-14 15:43:36 -0700851
Dave Martin00e19ce2020-03-16 16:50:44 +0000852 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
853 elf_property_phdata = elf_ppnt;
854 continue;
855 }
856
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700857 if (elf_ppnt->p_type != PT_INTERP)
858 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700860 /*
861 * This is the program interpreter used for shared libraries -
862 * for now assume that this is an a.out format binary.
863 */
864 retval = -ENOEXEC;
865 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
866 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700868 retval = -ENOMEM;
869 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
870 if (!elf_interpreter)
871 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800873 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
874 elf_ppnt->p_offset);
875 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700876 goto out_free_interp;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700877 /* make sure path is NULL terminated */
878 retval = -ENOEXEC;
879 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
880 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800881
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700882 interpreter = open_exec(elf_interpreter);
883 kfree(elf_interpreter);
884 retval = PTR_ERR(interpreter);
885 if (IS_ERR(interpreter))
886 goto out_free_ph;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800887
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700888 /*
889 * If the binary is not readable then enforce mm->dumpable = 0
890 * regardless of the interpreter's permissions.
891 */
892 would_dump(bprm, interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700894 /* Get the exec headers */
Alexey Dobriyan658c0332019-12-04 16:52:25 -0800895 retval = elf_read(interpreter, &loc->interp_elf_ex,
896 sizeof(loc->interp_elf_ex), 0);
897 if (retval < 0)
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700898 goto out_free_dentry;
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700899
900 break;
Alexey Dobriyancc338012019-05-14 15:43:39 -0700901
902out_free_interp:
Alexey Dobriyanbe0deb52019-05-14 15:43:45 -0700903 kfree(elf_interpreter);
904 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
906
907 elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800908 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
Paul Burton774c1052014-09-11 08:30:16 +0100909 switch (elf_ppnt->p_type) {
910 case PT_GNU_STACK:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (elf_ppnt->p_flags & PF_X)
912 executable_stack = EXSTACK_ENABLE_X;
913 else
914 executable_stack = EXSTACK_DISABLE_X;
915 break;
Paul Burton774c1052014-09-11 08:30:16 +0100916
917 case PT_LOPROC ... PT_HIPROC:
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800918 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
Paul Burton774c1052014-09-11 08:30:16 +0100919 bprm->file, false,
920 &arch_state);
921 if (retval)
922 goto out_free_dentry;
923 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
926 /* Some simple consistency checks for the interpreter */
Alexey Dobriyancc338012019-05-14 15:43:39 -0700927 if (interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800929 /* Not an ELF interpreter */
930 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 /* Verify the interpreter has a valid arch */
Nicolas Pitre47552002017-08-16 16:05:13 -0400933 if (!elf_check_arch(&loc->interp_elf_ex) ||
934 elf_check_fdpic(&loc->interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 goto out_free_dentry;
Paul Burtona9d9ef12014-09-11 08:30:15 +0100936
937 /* Load the interpreter program headers */
938 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
939 interpreter);
940 if (!interp_elf_phdata)
941 goto out_free_dentry;
Paul Burton774c1052014-09-11 08:30:16 +0100942
943 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
Dave Martin00e19ce2020-03-16 16:50:44 +0000944 elf_property_phdata = NULL;
Paul Burton774c1052014-09-11 08:30:16 +0100945 elf_ppnt = interp_elf_phdata;
946 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
947 switch (elf_ppnt->p_type) {
Dave Martin00e19ce2020-03-16 16:50:44 +0000948 case PT_GNU_PROPERTY:
949 elf_property_phdata = elf_ppnt;
950 break;
951
Paul Burton774c1052014-09-11 08:30:16 +0100952 case PT_LOPROC ... PT_HIPROC:
953 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
954 elf_ppnt, interpreter,
955 true, &arch_state);
956 if (retval)
957 goto out_free_dentry;
958 break;
959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
961
Dave Martin00e19ce2020-03-16 16:50:44 +0000962 retval = parse_elf_properties(interpreter ?: bprm->file,
963 elf_property_phdata, &arch_state);
964 if (retval)
965 goto out_free_dentry;
966
Paul Burton774c1052014-09-11 08:30:16 +0100967 /*
968 * Allow arch code to reject the ELF at this point, whilst it's
969 * still possible to return an error to the code that invoked
970 * the exec syscall.
971 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800972 retval = arch_check_elf(elf_ex,
Maciej W. Rozyckieb4bc072015-11-13 00:47:48 +0000973 !!interpreter, &loc->interp_elf_ex,
974 &arch_state);
Paul Burton774c1052014-09-11 08:30:16 +0100975 if (retval)
976 goto out_free_dentry;
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 /* Flush all traces of the currently running executable */
979 retval = flush_old_exec(bprm);
980 if (retval)
981 goto out_free_dentry;
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
984 may depend on the personality. */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -0800985 SET_PERSONALITY2(*elf_ex, &arch_state);
986 if (elf_read_implies_exec(*elf_ex, executable_stack))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 current->personality |= READ_IMPLIES_EXEC;
988
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700989 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800991
992 setup_new_exec(bprm);
Linus Torvalds9f834ec2016-08-22 16:41:46 -0700993 install_exec_creds(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 /* Do this so that we can load the interpreter, if need be. We will
996 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
998 executable_stack);
Al Viro19d860a2014-05-04 20:11:36 -0400999 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Alexey Dobriyan852643162019-05-14 15:43:48 -07001002 elf_bss = 0;
1003 elf_brk = 0;
1004
1005 start_code = ~0UL;
1006 end_code = 0;
1007 start_data = 0;
1008 end_data = 0;
1009
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001010 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +01001011 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001012 for(i = 0, elf_ppnt = elf_phdata;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001013 i < elf_ex->e_phnum; i++, elf_ppnt++) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001014 int elf_prot, elf_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 unsigned long k, vaddr;
Michael Davidsona87938b2015-04-14 15:47:38 -07001016 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 if (elf_ppnt->p_type != PT_LOAD)
1019 continue;
1020
1021 if (unlikely (elf_brk > elf_bss)) {
1022 unsigned long nbyte;
1023
1024 /* There was a PT_LOAD segment with p_memsz > p_filesz
1025 before this one. Map anonymous pages, if needed,
1026 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -08001027 retval = set_brk(elf_bss + load_bias,
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001028 elf_brk + load_bias,
1029 bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001030 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 nbyte = ELF_PAGEOFFSET(elf_bss);
1033 if (nbyte) {
1034 nbyte = ELF_MIN_ALIGN - nbyte;
1035 if (nbyte > elf_brk - elf_bss)
1036 nbyte = elf_brk - elf_bss;
1037 if (clear_user((void __user *)elf_bss +
1038 load_bias, nbyte)) {
1039 /*
1040 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001041 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 * we don't check the return value
1043 */
1044 }
1045 }
1046 }
1047
Dave Martinfe0f6762020-03-16 16:50:46 +00001048 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
1049 !!interpreter, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001051 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
1053 vaddr = elf_ppnt->p_vaddr;
Kees Cookeab09532017-07-10 15:52:37 -07001054 /*
1055 * If we are loading ET_EXEC or we have already performed
1056 * the ET_DYN load_addr calculations, proceed normally.
1057 */
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001058 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
Linus Torvaldsb2129212019-10-06 13:53:27 -07001059 elf_flags |= MAP_FIXED;
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001060 } else if (elf_ex->e_type == ET_DYN) {
Kees Cookeab09532017-07-10 15:52:37 -07001061 /*
1062 * This logic is run once for the first LOAD Program
1063 * Header for ET_DYN binaries to calculate the
1064 * randomization (load_bias) for all the LOAD
1065 * Program Headers, and to calculate the entire
1066 * size of the ELF mapping (total_size). (Note that
1067 * load_addr_set is set to true later once the
1068 * initial mapping is performed.)
1069 *
1070 * There are effectively two types of ET_DYN
1071 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
1072 * and loaders (ET_DYN without INTERP, since they
1073 * _are_ the ELF interpreter). The loaders must
1074 * be loaded away from programs since the program
1075 * may otherwise collide with the loader (especially
1076 * for ET_EXEC which does not have a randomized
1077 * position). For example to handle invocations of
1078 * "./ld.so someprog" to test out a new version of
1079 * the loader, the subsequent program that the
1080 * loader loads must avoid the loader itself, so
1081 * they cannot share the same load range. Sufficient
1082 * room for the brk must be allocated with the
1083 * loader as well, since brk must be available with
1084 * the loader.
1085 *
1086 * Therefore, programs are loaded offset from
1087 * ELF_ET_DYN_BASE and loaders are loaded into the
1088 * independently randomized mmap region (0 load_bias
1089 * without MAP_FIXED).
1090 */
Alexey Dobriyancc338012019-05-14 15:43:39 -07001091 if (interpreter) {
Kees Cookeab09532017-07-10 15:52:37 -07001092 load_bias = ELF_ET_DYN_BASE;
1093 if (current->flags & PF_RANDOMIZE)
1094 load_bias += arch_mmap_rnd();
Linus Torvaldsb2129212019-10-06 13:53:27 -07001095 elf_flags |= MAP_FIXED;
Kees Cookeab09532017-07-10 15:52:37 -07001096 } else
1097 load_bias = 0;
1098
1099 /*
1100 * Since load_bias is used for all subsequent loading
1101 * calculations, we must lower it by the first vaddr
1102 * so that the remaining calculations based on the
1103 * ELF vaddrs will be correctly offset. The result
1104 * is then page aligned.
1105 */
1106 load_bias = ELF_PAGESTART(load_bias - vaddr);
1107
Michael Davidsona87938b2015-04-14 15:47:38 -07001108 total_size = total_mapping_size(elf_phdata,
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001109 elf_ex->e_phnum);
Michael Davidsona87938b2015-04-14 15:47:38 -07001110 if (!total_size) {
Andrew Morton2b1d3ae2015-05-28 15:44:24 -07001111 retval = -EINVAL;
Michael Davidsona87938b2015-04-14 15:47:38 -07001112 goto out_free_dentry;
1113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 }
1115
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001116 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidsona87938b2015-04-14 15:47:38 -07001117 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 if (BAD_ADDR(error)) {
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001119 retval = IS_ERR((void *)error) ?
1120 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 goto out_free_dentry;
1122 }
1123
1124 if (!load_addr_set) {
1125 load_addr_set = 1;
1126 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001127 if (elf_ex->e_type == ET_DYN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 load_bias += error -
1129 ELF_PAGESTART(load_bias + vaddr);
1130 load_addr += load_bias;
1131 reloc_func_desc = load_bias;
1132 }
1133 }
1134 k = elf_ppnt->p_vaddr;
Alexey Dobriyanf67ef442020-01-30 22:16:52 -08001135 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001136 start_code = k;
1137 if (start_data < k)
1138 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
1140 /*
1141 * Check to see if the section's size will overflow the
1142 * allowed task size. Note that p_filesz must always be
1143 * <= p_memsz so it is only necessary to check p_memsz.
1144 */
Chuck Ebbertce510592006-07-03 00:24:14 -07001145 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 elf_ppnt->p_memsz > TASK_SIZE ||
1147 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001148 /* set_brk can never work. Avoid overflows. */
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -07001149 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 goto out_free_dentry;
1151 }
1152
1153 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1154
1155 if (k > elf_bss)
1156 elf_bss = k;
1157 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1158 end_code = k;
1159 if (end_data < k)
1160 end_data = k;
1161 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001162 if (k > elf_brk) {
1163 bss_prot = elf_prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 elf_brk = k;
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 }
1167
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001168 e_entry = elf_ex->e_entry + load_bias;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 elf_bss += load_bias;
1170 elf_brk += load_bias;
1171 start_code += load_bias;
1172 end_code += load_bias;
1173 start_data += load_bias;
1174 end_data += load_bias;
1175
1176 /* Calling set_brk effectively mmaps the pages that we need
1177 * for the bss and break sections. We must do this before
1178 * mapping in the interpreter, to make sure it doesn't wind
1179 * up getting placed where the bss needs to go.
1180 */
Denys Vlasenko16e72e92017-02-22 15:45:16 -08001181 retval = set_brk(elf_bss, elf_brk, bss_prot);
Al Viro19d860a2014-05-04 20:11:36 -04001182 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 goto out_free_dentry;
akpm@osdl.org6de50512005-10-11 08:29:08 -07001184 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 retval = -EFAULT; /* Nobody gets to see this, but.. */
1186 goto out_free_dentry;
1187 }
1188
Alexey Dobriyancc338012019-05-14 15:43:39 -07001189 if (interpreter) {
Andi Kleend20894a2008-02-08 04:21:54 -08001190 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1191 interpreter,
Dave Martinfe0f6762020-03-16 16:50:46 +00001192 load_bias, interp_elf_phdata,
1193 &arch_state);
Andi Kleend20894a2008-02-08 04:21:54 -08001194 if (!IS_ERR((void *)elf_entry)) {
1195 /*
1196 * load_elf_interp() returns relocation
1197 * adjustment
1198 */
1199 interp_load_addr = elf_entry;
1200 elf_entry += loc->interp_elf_ex.e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +01001201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001203 retval = IS_ERR((void *)elf_entry) ?
1204 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 goto out_free_dentry;
1206 }
1207 reloc_func_desc = interp_load_addr;
1208
1209 allow_write_access(interpreter);
1210 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 } else {
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001212 elf_entry = e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001213 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -07001214 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +01001215 goto out_free_dentry;
1216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
1218
Paul Burton774c1052014-09-11 08:30:16 +01001219 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 kfree(elf_phdata);
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 set_binfmt(&elf_format);
1223
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001224#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Alexey Dobriyancc338012019-05-14 15:43:39 -07001225 retval = arch_setup_additional_pages(bprm, !!interpreter);
Al Viro19d860a2014-05-04 20:11:36 -04001226 if (retval < 0)
Roland McGrath18c8baff2005-04-28 15:17:19 -07001227 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -07001228#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1229
Alexey Dobriyana62c5b12020-01-30 22:16:55 -08001230 retval = create_elf_tables(bprm, elf_ex,
1231 load_addr, interp_load_addr, e_entry);
Al Viro19d860a2014-05-04 20:11:36 -04001232 if (retval < 0)
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001233 goto out;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001234
1235 mm = current->mm;
1236 mm->end_code = end_code;
1237 mm->start_code = start_code;
1238 mm->start_data = start_data;
1239 mm->end_data = end_data;
1240 mm->start_stack = bprm->p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Jiri Kosina4471a672011-04-14 15:22:09 -07001242 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Kees Cookbbdc6072019-05-14 15:43:57 -07001243 /*
1244 * For architectures with ELF randomization, when executing
1245 * a loader directly (i.e. no interpreter listed in ELF
1246 * headers), move the brk area out of the mmap region
1247 * (since it grows up, and may collide early with the stack
1248 * growing down), and into the unused ELF_ET_DYN_BASE region.
1249 */
Kees Cook7be3cb02019-09-26 10:15:25 -07001250 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001251 elf_ex->e_type == ET_DYN && !interpreter) {
1252 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1253 }
Kees Cookbbdc6072019-05-14 15:43:57 -07001254
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001255 mm->brk = mm->start_brk = arch_randomize_brk(mm);
Kees Cook204db6e2015-04-14 15:48:12 -07001256#ifdef compat_brk_randomized
Jiri Kosina4471a672011-04-14 15:22:09 -07001257 current->brk_randomized = 1;
1258#endif
1259 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +01001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 if (current->personality & MMAP_PAGE_ZERO) {
1262 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1263 and some applications "depend" upon this behavior.
1264 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001265 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001266 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
1269
Alexey Dobriyan249b08e2019-05-14 15:43:54 -07001270 regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271#ifdef ELF_PLAT_INIT
1272 /*
1273 * The ABI may specify that certain registers be set up in special
1274 * ways (on i386 %edx is the address of a DT_FINI function, for
1275 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1276 * that the e_entry field is the address of the function descriptor
1277 * for the startup routine, rather than the address of the startup
1278 * routine itself. This macro performs whatever initialization to
1279 * the regs structure is required as well as any relocations to the
1280 * function descriptor entries when executing dynamically links apps.
1281 */
1282 ELF_PLAT_INIT(regs, reloc_func_desc);
1283#endif
1284
Kees Cookb8383832018-04-10 16:34:57 -07001285 finalize_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 retval = 0;
1288out:
1289 kfree(loc);
1290out_ret:
1291 return retval;
1292
1293 /* error cleanup */
1294out_free_dentry:
Paul Burtona9d9ef12014-09-11 08:30:15 +01001295 kfree(interp_elf_phdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 allow_write_access(interpreter);
1297 if (interpreter)
1298 fput(interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299out_free_ph:
1300 kfree(elf_phdata);
1301 goto out;
1302}
1303
Josh Triplett69369a72014-04-03 14:48:27 -07001304#ifdef CONFIG_USELIB
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305/* This is really simpleminded and specialized - we are loading an
1306 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307static int load_elf_library(struct file *file)
1308{
1309 struct elf_phdr *elf_phdata;
1310 struct elf_phdr *eppnt;
1311 unsigned long elf_bss, bss, len;
1312 int retval, error, i, j;
1313 struct elfhdr elf_ex;
1314
1315 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001316 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1317 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 goto out;
1319
1320 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1321 goto out;
1322
1323 /* First of all, some simple consistency checks */
1324 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001325 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 goto out;
Nicolas Pitre47552002017-08-16 16:05:13 -04001327 if (elf_check_fdpic(&elf_ex))
1328 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 /* Now read in all of the header information */
1331
1332 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1333 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1334
1335 error = -ENOMEM;
1336 elf_phdata = kmalloc(j, GFP_KERNEL);
1337 if (!elf_phdata)
1338 goto out;
1339
1340 eppnt = elf_phdata;
1341 error = -ENOEXEC;
Alexey Dobriyan658c0332019-12-04 16:52:25 -08001342 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1343 if (retval < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 goto out_free_ph;
1345
1346 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1347 if ((eppnt + i)->p_type == PT_LOAD)
1348 j++;
1349 if (j != 1)
1350 goto out_free_ph;
1351
1352 while (eppnt->p_type != PT_LOAD)
1353 eppnt++;
1354
1355 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001356 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 ELF_PAGESTART(eppnt->p_vaddr),
1358 (eppnt->p_filesz +
1359 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1360 PROT_READ | PROT_WRITE | PROT_EXEC,
Michal Hocko4ed28632018-04-10 16:36:01 -07001361 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 (eppnt->p_offset -
1363 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1365 goto out_free_ph;
1366
1367 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1368 if (padzero(elf_bss)) {
1369 error = -EFAULT;
1370 goto out_free_ph;
1371 }
1372
Oscar Salvador24962af2018-07-13 16:59:13 -07001373 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1374 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
Michal Hockoecc2bc82016-05-23 16:25:39 -07001375 if (bss > len) {
1376 error = vm_brk(len, bss - len);
Linus Torvalds5d22fc22016-05-27 15:57:31 -07001377 if (error)
Michal Hockoecc2bc82016-05-23 16:25:39 -07001378 goto out_free_ph;
1379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 error = 0;
1381
1382out_free_ph:
1383 kfree(elf_phdata);
1384out:
1385 return error;
1386}
Josh Triplett69369a72014-04-03 14:48:27 -07001387#endif /* #ifdef CONFIG_USELIB */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001389#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390/*
1391 * ELF core dumper
1392 *
1393 * Modelled on fs/exec.c:aout_core_dump()
1394 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1395 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397/*
Jason Baron909af762012-03-23 15:02:51 -07001398 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1399 * that are useful for post-mortem analysis are included in every core dump.
1400 * In that way we ensure that the core dump is fully interpretable later
1401 * without matching up the same kernel and hardware config to see what PC values
1402 * meant. These special mappings include - vDSO, vsyscall, and other
1403 * architecture specific mappings
1404 */
1405static bool always_dump_vma(struct vm_area_struct *vma)
1406{
1407 /* Any vsyscall mappings? */
1408 if (vma == get_gate_vma(vma->vm_mm))
1409 return true;
Andy Lutomirski78d683e2014-05-19 15:58:32 -07001410
1411 /*
1412 * Assume that all vmas with a .name op should always be dumped.
1413 * If this changes, a new vm_ops field can easily be added.
1414 */
1415 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1416 return true;
1417
Jason Baron909af762012-03-23 15:02:51 -07001418 /*
1419 * arch_vma_name() returns non-NULL for special architecture mappings,
1420 * such as vDSO sections.
1421 */
1422 if (arch_vma_name(vma))
1423 return true;
1424
1425 return false;
1426}
1427
1428/*
Roland McGrath82df3972007-10-16 23:27:02 -07001429 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 */
Roland McGrath82df3972007-10-16 23:27:02 -07001431static unsigned long vma_dump_size(struct vm_area_struct *vma,
1432 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001434#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1435
Jason Baron909af762012-03-23 15:02:51 -07001436 /* always dump the vdso and vsyscall sections */
1437 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001438 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001439
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001440 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001441 return 0;
1442
Ross Zwisler50378352015-10-05 16:33:36 -06001443 /* support for DAX */
1444 if (vma_is_dax(vma)) {
1445 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1446 goto whole;
1447 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1448 goto whole;
1449 return 0;
1450 }
1451
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001452 /* Hugetlb memory check */
1453 if (vma->vm_flags & VM_HUGETLB) {
1454 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1455 goto whole;
1456 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1457 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001458 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001459 }
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001462 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return 0;
1464
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001465 /* By default, dump shared memory if mapped from an anonymous file. */
1466 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001467 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001468 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1469 goto whole;
1470 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Roland McGrath82df3972007-10-16 23:27:02 -07001473 /* Dump segments that have been written to. */
1474 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1475 goto whole;
1476 if (vma->vm_file == NULL)
1477 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Roland McGrath82df3972007-10-16 23:27:02 -07001479 if (FILTER(MAPPED_PRIVATE))
1480 goto whole;
1481
1482 /*
1483 * If this looks like the beginning of a DSO or executable mapping,
1484 * check for an ELF header. If we find one, dump the first page to
1485 * aid in determining what was mapped here.
1486 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001487 if (FILTER(ELF_HEADERS) &&
1488 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001489 u32 __user *header = (u32 __user *) vma->vm_start;
1490 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001491 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001492 /*
1493 * Doing it this way gets the constant folded by GCC.
1494 */
1495 union {
1496 u32 cmp;
1497 char elfmag[SELFMAG];
1498 } magic;
1499 BUILD_BUG_ON(SELFMAG != sizeof word);
1500 magic.elfmag[EI_MAG0] = ELFMAG0;
1501 magic.elfmag[EI_MAG1] = ELFMAG1;
1502 magic.elfmag[EI_MAG2] = ELFMAG2;
1503 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001504 /*
1505 * Switch to the user "segment" for get_user(),
1506 * then put back what elf_core_dump() had in place.
1507 */
1508 set_fs(USER_DS);
1509 if (unlikely(get_user(word, header)))
1510 word = 0;
1511 set_fs(fs);
1512 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001513 return PAGE_SIZE;
1514 }
1515
1516#undef FILTER
1517
1518 return 0;
1519
1520whole:
1521 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522}
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524/* An ELF note in memory */
1525struct memelfnote
1526{
1527 const char *name;
1528 int type;
1529 unsigned int datasz;
1530 void *data;
1531};
1532
1533static int notesize(struct memelfnote *en)
1534{
1535 int sz;
1536
1537 sz = sizeof(struct elf_note);
1538 sz += roundup(strlen(en->name) + 1, 4);
1539 sz += roundup(en->datasz, 4);
1540
1541 return sz;
1542}
1543
Al Viroecc8c772013-10-05 15:32:35 -04001544static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545{
1546 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 en.n_namesz = strlen(men->name) + 1;
1548 en.n_descsz = men->datasz;
1549 en.n_type = men->type;
1550
Al Viroecc8c772013-10-05 15:32:35 -04001551 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001552 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1553 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Roland McGrath3aba4812008-01-30 13:31:44 +01001556static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001557 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001559 memset(elf, 0, sizeof(*elf));
1560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1562 elf->e_ident[EI_CLASS] = ELF_CLASS;
1563 elf->e_ident[EI_DATA] = ELF_DATA;
1564 elf->e_ident[EI_VERSION] = EV_CURRENT;
1565 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
1567 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001568 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001571 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 elf->e_ehsize = sizeof(struct elfhdr);
1573 elf->e_phentsize = sizeof(struct elf_phdr);
1574 elf->e_phnum = segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001577static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
1579 phdr->p_type = PT_NOTE;
1580 phdr->p_offset = offset;
1581 phdr->p_vaddr = 0;
1582 phdr->p_paddr = 0;
1583 phdr->p_filesz = sz;
1584 phdr->p_memsz = 0;
1585 phdr->p_flags = 0;
1586 phdr->p_align = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587}
1588
1589static void fill_note(struct memelfnote *note, const char *name, int type,
1590 unsigned int sz, void *data)
1591{
1592 note->name = name;
1593 note->type = type;
1594 note->datasz = sz;
1595 note->data = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
1597
1598/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001599 * fill up all the fields in prstatus from the given task struct, except
1600 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 */
1602static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001603 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
1605 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1606 prstatus->pr_sigpend = p->pending.signal.sig[0];
1607 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001608 rcu_read_lock();
1609 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1610 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001611 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001612 prstatus->pr_pgrp = task_pgrp_vnr(p);
1613 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (thread_group_leader(p)) {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001615 struct task_cputime cputime;
Frank Mayharf06febc2008-09-12 09:54:39 -07001616
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001618 * This is the record for the group leader. It shows the
1619 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 */
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001621 thread_group_cputime(p, &cputime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001622 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1623 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 } else {
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001625 u64 utime, stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001626
Frederic Weisbeckercd19c362017-01-31 04:09:27 +01001627 task_cputime(p, &utime, &stime);
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001628 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1629 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 }
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001631
Arnd Bergmanne2bb80d2017-11-23 13:46:33 +01001632 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1633 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634}
1635
1636static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1637 struct mm_struct *mm)
1638{
David Howellsc69e8d92008-11-14 10:39:19 +11001639 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001640 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
1642 /* first copy the parameters from user space */
1643 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1644
1645 len = mm->arg_end - mm->arg_start;
1646 if (len >= ELF_PRARGSZ)
1647 len = ELF_PRARGSZ-1;
1648 if (copy_from_user(&psinfo->pr_psargs,
1649 (const char __user *)mm->arg_start, len))
1650 return -EFAULT;
1651 for(i = 0; i < len; i++)
1652 if (psinfo->pr_psargs[i] == 0)
1653 psinfo->pr_psargs[i] = ' ';
1654 psinfo->pr_psargs[len] = 0;
1655
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001656 rcu_read_lock();
1657 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1658 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001659 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001660 psinfo->pr_pgrp = task_pgrp_vnr(p);
1661 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 i = p->state ? ffz(~p->state) + 1 : 0;
1664 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001665 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1667 psinfo->pr_nice = task_nice(p);
1668 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001669 rcu_read_lock();
1670 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001671 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1672 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001673 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1675
1676 return 0;
1677}
1678
Roland McGrath3aba4812008-01-30 13:31:44 +01001679static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1680{
1681 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1682 int i = 0;
1683 do
1684 i += 2;
1685 while (auxv[i - 2] != AT_NULL);
1686 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1687}
1688
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001689static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001690 const kernel_siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001691{
1692 mm_segment_t old_fs = get_fs();
1693 set_fs(KERNEL_DS);
1694 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1695 set_fs(old_fs);
1696 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1697}
1698
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001699#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1700/*
1701 * Format of NT_FILE note:
1702 *
1703 * long count -- how many files are mapped
1704 * long page_size -- units for file_ofs
1705 * array of [COUNT] elements of
1706 * long start
1707 * long end
1708 * long file_ofs
1709 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1710 */
Dan Aloni72023652013-09-30 13:45:02 -07001711static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001712{
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001713 struct mm_struct *mm = current->mm;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001714 struct vm_area_struct *vma;
1715 unsigned count, size, names_ofs, remaining, n;
1716 user_long_t *data;
1717 user_long_t *start_end_ofs;
1718 char *name_base, *name_curpos;
1719
1720 /* *Estimated* file count and total data size needed */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001721 count = mm->map_count;
Alexey Dobriyan60c9d922018-02-06 15:39:13 -08001722 if (count > UINT_MAX / 64)
1723 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001724 size = count * 64;
1725
1726 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1727 alloc:
1728 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001729 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001730 size = round_up(size, PAGE_SIZE);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08001731 /*
1732 * "size" can be 0 here legitimately.
1733 * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
1734 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001735 data = kvmalloc(size, GFP_KERNEL);
1736 if (ZERO_OR_NULL_PTR(data))
Dan Aloni72023652013-09-30 13:45:02 -07001737 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001738
1739 start_end_ofs = data + 2;
1740 name_base = name_curpos = ((char *)data) + names_ofs;
1741 remaining = size - names_ofs;
1742 count = 0;
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001743 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001744 struct file *file;
1745 const char *filename;
1746
1747 file = vma->vm_file;
1748 if (!file)
1749 continue;
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001750 filename = file_path(file, name_curpos, remaining);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001751 if (IS_ERR(filename)) {
1752 if (PTR_ERR(filename) == -ENAMETOOLONG) {
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07001753 kvfree(data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001754 size = size * 5 / 4;
1755 goto alloc;
1756 }
1757 continue;
1758 }
1759
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02001760 /* file_path() fills at the end, move name down */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001761 /* n = strlen(filename) + 1: */
1762 n = (name_curpos + remaining) - filename;
1763 remaining = filename - name_curpos;
1764 memmove(name_curpos, filename, n);
1765 name_curpos += n;
1766
1767 *start_end_ofs++ = vma->vm_start;
1768 *start_end_ofs++ = vma->vm_end;
1769 *start_end_ofs++ = vma->vm_pgoff;
1770 count++;
1771 }
1772
1773 /* Now we know exact count of files, can store it */
1774 data[0] = count;
1775 data[1] = PAGE_SIZE;
1776 /*
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001777 * Count usually is less than mm->map_count,
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001778 * we need to move filenames down.
1779 */
Alexey Dobriyan03c6d722020-01-30 22:16:58 -08001780 n = mm->map_count - count;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001781 if (n != 0) {
1782 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1783 memmove(name_base - shift_bytes, name_base,
1784 name_curpos - name_base);
1785 name_curpos -= shift_bytes;
1786 }
1787
1788 size = name_curpos - (char *)data;
1789 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001790 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001791}
1792
Roland McGrath4206d3a2008-01-30 13:31:45 +01001793#ifdef CORE_DUMP_USE_REGSET
1794#include <linux/regset.h>
1795
1796struct elf_thread_core_info {
1797 struct elf_thread_core_info *next;
1798 struct task_struct *task;
1799 struct elf_prstatus prstatus;
1800 struct memelfnote notes[0];
1801};
1802
1803struct elf_note_info {
1804 struct elf_thread_core_info *thread;
1805 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001806 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001807 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001808 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001809 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001810 size_t size;
1811 int thread_notes;
1812};
1813
Roland McGrathd31472b2008-03-04 14:28:30 -08001814/*
1815 * When a regset has a writeback hook, we call it on each thread before
1816 * dumping user memory. On register window machines, this makes sure the
1817 * user memory backing the register data is up to date before we read it.
1818 */
1819static void do_thread_regset_writeback(struct task_struct *task,
1820 const struct user_regset *regset)
1821{
1822 if (regset->writeback)
1823 regset->writeback(task, regset, 1);
1824}
1825
H. J. Lu0953f65d2012-02-14 13:34:52 -08001826#ifndef PRSTATUS_SIZE
Dmitry Safonov90954e72016-09-05 16:33:06 +03001827#define PRSTATUS_SIZE(S, R) sizeof(S)
H. J. Lu0953f65d2012-02-14 13:34:52 -08001828#endif
1829
1830#ifndef SET_PR_FPVALID
Dmitry Safonov90954e72016-09-05 16:33:06 +03001831#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
H. J. Lu0953f65d2012-02-14 13:34:52 -08001832#endif
1833
Roland McGrath4206d3a2008-01-30 13:31:45 +01001834static int fill_thread_core_info(struct elf_thread_core_info *t,
1835 const struct user_regset_view *view,
1836 long signr, size_t *total)
1837{
1838 unsigned int i;
Dave Martin27e64b42017-10-31 15:50:53 +00001839 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001840
1841 /*
1842 * NT_PRSTATUS is the one special case, because the regset data
1843 * goes into the pr_reg field inside the note contents, rather
1844 * than being the whole note contents. We fill the reset in here.
1845 * We assume that regset 0 is NT_PRSTATUS.
1846 */
1847 fill_prstatus(&t->prstatus, t->task, signr);
Dave Martin27e64b42017-10-31 15:50:53 +00001848 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
Dmitry Safonov90954e72016-09-05 16:33:06 +03001849 &t->prstatus.pr_reg, NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001850
1851 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
Dave Martin27e64b42017-10-31 15:50:53 +00001852 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001853 *total += notesize(&t->notes[0]);
1854
Roland McGrathd31472b2008-03-04 14:28:30 -08001855 do_thread_regset_writeback(t->task, &view->regsets[0]);
1856
Roland McGrath4206d3a2008-01-30 13:31:45 +01001857 /*
1858 * Each other regset might generate a note too. For each regset
1859 * that has no core_note_type or is inactive, we leave t->notes[i]
1860 * all zero and we'll know to skip writing it later.
1861 */
1862 for (i = 1; i < view->n; ++i) {
1863 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001864 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001865 if (regset->core_note_type && regset->get &&
Maciej W. Rozycki2f819db2018-05-15 23:32:45 +01001866 (!regset->active || regset->active(t->task, regset) > 0)) {
Roland McGrath4206d3a2008-01-30 13:31:45 +01001867 int ret;
Dave Martin27e64b42017-10-31 15:50:53 +00001868 size_t size = regset_size(t->task, regset);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001869 void *data = kmalloc(size, GFP_KERNEL);
1870 if (unlikely(!data))
1871 return 0;
1872 ret = regset->get(t->task, regset,
1873 0, size, data, NULL);
1874 if (unlikely(ret))
1875 kfree(data);
1876 else {
1877 if (regset->core_note_type != NT_PRFPREG)
1878 fill_note(&t->notes[i], "LINUX",
1879 regset->core_note_type,
1880 size, data);
1881 else {
Dmitry Safonov90954e72016-09-05 16:33:06 +03001882 SET_PR_FPVALID(&t->prstatus,
Dave Martin27e64b42017-10-31 15:50:53 +00001883 1, regset0_size);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001884 fill_note(&t->notes[i], "CORE",
1885 NT_PRFPREG, size, data);
1886 }
1887 *total += notesize(&t->notes[i]);
1888 }
1889 }
1890 }
1891
1892 return 1;
1893}
1894
1895static int fill_note_info(struct elfhdr *elf, int phdrs,
1896 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001897 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001898{
1899 struct task_struct *dump_task = current;
1900 const struct user_regset_view *view = task_user_regset_view(dump_task);
1901 struct elf_thread_core_info *t;
1902 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001903 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001904 unsigned int i;
1905
1906 info->size = 0;
1907 info->thread = NULL;
1908
1909 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001910 if (psinfo == NULL) {
1911 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001912 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001913 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001914
Amerigo Wange2dbe122009-07-01 01:06:26 -04001915 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1916
Roland McGrath4206d3a2008-01-30 13:31:45 +01001917 /*
1918 * Figure out how many notes we're going to need for each thread.
1919 */
1920 info->thread_notes = 0;
1921 for (i = 0; i < view->n; ++i)
1922 if (view->regsets[i].core_note_type != 0)
1923 ++info->thread_notes;
1924
1925 /*
1926 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1927 * since it is our one special case.
1928 */
1929 if (unlikely(info->thread_notes == 0) ||
1930 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1931 WARN_ON(1);
1932 return 0;
1933 }
1934
1935 /*
1936 * Initialize the ELF file header.
1937 */
1938 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001939 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001940
1941 /*
1942 * Allocate a structure for each thread.
1943 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001944 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1945 t = kzalloc(offsetof(struct elf_thread_core_info,
1946 notes[info->thread_notes]),
1947 GFP_KERNEL);
1948 if (unlikely(!t))
1949 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001950
Oleg Nesterov83914442008-07-25 01:47:45 -07001951 t->task = ct->task;
1952 if (ct->task == dump_task || !info->thread) {
1953 t->next = info->thread;
1954 info->thread = t;
1955 } else {
1956 /*
1957 * Make sure to keep the original task at
1958 * the head of the list.
1959 */
1960 t->next = info->thread->next;
1961 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001962 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001963 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001964
1965 /*
1966 * Now fill in each thread's information.
1967 */
1968 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001969 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001970 return 0;
1971
1972 /*
1973 * Fill in the two process-wide notes.
1974 */
1975 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1976 info->size += notesize(&info->psinfo);
1977
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001978 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1979 info->size += notesize(&info->signote);
1980
Roland McGrath4206d3a2008-01-30 13:31:45 +01001981 fill_auxv_note(&info->auxv, current->mm);
1982 info->size += notesize(&info->auxv);
1983
Dan Aloni72023652013-09-30 13:45:02 -07001984 if (fill_files_note(&info->files) == 0)
1985 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001986
Roland McGrath4206d3a2008-01-30 13:31:45 +01001987 return 1;
1988}
1989
1990static size_t get_note_info_size(struct elf_note_info *info)
1991{
1992 return info->size;
1993}
1994
1995/*
1996 * Write all the notes for each thread. When writing the first thread, the
1997 * process-wide notes are interleaved after the first thread-specific note.
1998 */
1999static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002000 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01002001{
Fabian Frederickb219e252014-06-04 16:12:14 -07002002 bool first = true;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002003 struct elf_thread_core_info *t = info->thread;
2004
2005 do {
2006 int i;
2007
Al Viroecc8c772013-10-05 15:32:35 -04002008 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002009 return 0;
2010
Al Viroecc8c772013-10-05 15:32:35 -04002011 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002012 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002013 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002014 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04002015 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002016 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07002017 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04002018 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002019 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002020
2021 for (i = 1; i < info->thread_notes; ++i)
2022 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04002023 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01002024 return 0;
2025
Fabian Frederickb219e252014-06-04 16:12:14 -07002026 first = false;
Roland McGrath4206d3a2008-01-30 13:31:45 +01002027 t = t->next;
2028 } while (t);
2029
2030 return 1;
2031}
2032
2033static void free_note_info(struct elf_note_info *info)
2034{
2035 struct elf_thread_core_info *threads = info->thread;
2036 while (threads) {
2037 unsigned int i;
2038 struct elf_thread_core_info *t = threads;
2039 threads = t->next;
2040 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
2041 for (i = 1; i < info->thread_notes; ++i)
2042 kfree(t->notes[i].data);
2043 kfree(t);
2044 }
2045 kfree(info->psinfo.data);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002046 kvfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01002047}
2048
2049#else
2050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051/* Here is the structure in which status of each thread is captured. */
2052struct elf_thread_status
2053{
2054 struct list_head list;
2055 struct elf_prstatus prstatus; /* NT_PRSTATUS */
2056 elf_fpregset_t fpu; /* NT_PRFPREG */
2057 struct task_struct *thread;
2058#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07002059 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060#endif
2061 struct memelfnote notes[3];
2062 int num_notes;
2063};
2064
2065/*
2066 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002067 * we need to keep a linked list of every threads pr_status and then create
2068 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 */
2070static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
2071{
2072 int sz = 0;
2073 struct task_struct *p = t->thread;
2074 t->num_notes = 0;
2075
2076 fill_prstatus(&t->prstatus, p, signr);
2077 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
2078
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002079 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
2080 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 t->num_notes++;
2082 sz += notesize(&t->notes[0]);
2083
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002084 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
2085 &t->fpu))) {
2086 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
2087 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 t->num_notes++;
2089 sz += notesize(&t->notes[1]);
2090 }
2091
2092#ifdef ELF_CORE_COPY_XFPREGS
2093 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07002094 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
2095 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 t->num_notes++;
2097 sz += notesize(&t->notes[2]);
2098 }
2099#endif
2100 return sz;
2101}
2102
Roland McGrath3aba4812008-01-30 13:31:44 +01002103struct elf_note_info {
2104 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07002105 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01002106 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
2107 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2108 struct list_head thread_list;
2109 elf_fpregset_t *fpu;
2110#ifdef ELF_CORE_COPY_XFPREGS
2111 elf_fpxregset_t *xfpu;
2112#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002113 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01002114 int thread_status_size;
2115 int numnote;
2116};
2117
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002118static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01002119{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002120 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01002121 INIT_LIST_HEAD(&info->thread_list);
2122
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07002123 /* Allocate space for ELF notes */
Kees Cook6da2ec52018-06-12 13:55:00 -07002124 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01002125 if (!info->notes)
2126 return 0;
2127 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2128 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002129 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002130 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2131 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002132 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002133 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2134 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002135 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002136#ifdef ELF_CORE_COPY_XFPREGS
2137 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2138 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10002139 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01002140#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002141 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002142}
Roland McGrath3aba4812008-01-30 13:31:44 +01002143
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002144static int fill_note_info(struct elfhdr *elf, int phdrs,
2145 struct elf_note_info *info,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002146 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002147{
Al Viroafabada2013-10-14 07:39:56 -04002148 struct core_thread *ct;
2149 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07002150
2151 if (!elf_note_info_init(info))
2152 return 0;
2153
Al Viroafabada2013-10-14 07:39:56 -04002154 for (ct = current->mm->core_state->dumper.next;
2155 ct; ct = ct->next) {
2156 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2157 if (!ets)
2158 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07002159
Al Viroafabada2013-10-14 07:39:56 -04002160 ets->thread = ct->task;
2161 list_add(&ets->list, &info->thread_list);
2162 }
Oleg Nesterov83914442008-07-25 01:47:45 -07002163
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002164 list_for_each_entry(ets, &info->thread_list, list) {
Al Viroafabada2013-10-14 07:39:56 -04002165 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07002166
Al Viroafabada2013-10-14 07:39:56 -04002167 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2168 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01002169 }
2170 /* now collect the dump for the current */
2171 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002172 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01002173 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2174
2175 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08002176 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01002177
2178 /*
2179 * Set up the notes in similar form to SVR4 core dumps made
2180 * with info from their /proc.
2181 */
2182
2183 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2184 sizeof(*info->prstatus), info->prstatus);
2185 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2186 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2187 sizeof(*info->psinfo), info->psinfo);
2188
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002189 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2190 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07002191 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01002192
Dan Aloni72023652013-09-30 13:45:02 -07002193 if (fill_files_note(info->notes + info->numnote) == 0) {
2194 info->notes_files = info->notes + info->numnote;
2195 info->numnote++;
2196 }
Roland McGrath3aba4812008-01-30 13:31:44 +01002197
2198 /* Try to dump the FPU. */
2199 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2200 info->fpu);
2201 if (info->prstatus->pr_fpvalid)
2202 fill_note(info->notes + info->numnote++,
2203 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2204#ifdef ELF_CORE_COPY_XFPREGS
2205 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2206 fill_note(info->notes + info->numnote++,
2207 "LINUX", ELF_CORE_XFPREG_TYPE,
2208 sizeof(*info->xfpu), info->xfpu);
2209#endif
2210
2211 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01002212}
2213
2214static size_t get_note_info_size(struct elf_note_info *info)
2215{
2216 int sz = 0;
2217 int i;
2218
2219 for (i = 0; i < info->numnote; i++)
2220 sz += notesize(info->notes + i);
2221
2222 sz += info->thread_status_size;
2223
2224 return sz;
2225}
2226
2227static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04002228 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01002229{
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002230 struct elf_thread_status *ets;
Roland McGrath3aba4812008-01-30 13:31:44 +01002231 int i;
Roland McGrath3aba4812008-01-30 13:31:44 +01002232
2233 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04002234 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002235 return 0;
2236
2237 /* write out the thread status notes section */
Alexey Dobriyan93f044e2019-03-07 16:28:59 -08002238 list_for_each_entry(ets, &info->thread_list, list) {
2239 for (i = 0; i < ets->num_notes; i++)
2240 if (!writenote(&ets->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002241 return 0;
2242 }
2243
2244 return 1;
2245}
2246
2247static void free_note_info(struct elf_note_info *info)
2248{
2249 while (!list_empty(&info->thread_list)) {
2250 struct list_head *tmp = info->thread_list.next;
2251 list_del(tmp);
2252 kfree(list_entry(tmp, struct elf_thread_status, list));
2253 }
2254
Dan Aloni72023652013-09-30 13:45:02 -07002255 /* Free data possibly allocated by fill_files_note(): */
2256 if (info->notes_files)
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002257 kvfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07002258
Roland McGrath3aba4812008-01-30 13:31:44 +01002259 kfree(info->prstatus);
2260 kfree(info->psinfo);
2261 kfree(info->notes);
2262 kfree(info->fpu);
2263#ifdef ELF_CORE_COPY_XFPREGS
2264 kfree(info->xfpu);
2265#endif
2266}
2267
Roland McGrath4206d3a2008-01-30 13:31:45 +01002268#endif
2269
Roland McGrathf47aef52007-01-26 00:56:49 -08002270static struct vm_area_struct *first_vma(struct task_struct *tsk,
2271 struct vm_area_struct *gate_vma)
2272{
2273 struct vm_area_struct *ret = tsk->mm->mmap;
2274
2275 if (ret)
2276 return ret;
2277 return gate_vma;
2278}
2279/*
2280 * Helper function for iterating across a vma list. It ensures that the caller
2281 * will visit `gate_vma' prior to terminating the search.
2282 */
2283static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2284 struct vm_area_struct *gate_vma)
2285{
2286 struct vm_area_struct *ret;
2287
2288 ret = this_vma->vm_next;
2289 if (ret)
2290 return ret;
2291 if (this_vma == gate_vma)
2292 return NULL;
2293 return gate_vma;
2294}
2295
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002296static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2297 elf_addr_t e_shoff, int segs)
2298{
2299 elf->e_shoff = e_shoff;
2300 elf->e_shentsize = sizeof(*shdr4extnum);
2301 elf->e_shnum = 1;
2302 elf->e_shstrndx = SHN_UNDEF;
2303
2304 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2305
2306 shdr4extnum->sh_type = SHT_NULL;
2307 shdr4extnum->sh_size = elf->e_shnum;
2308 shdr4extnum->sh_link = elf->e_shstrndx;
2309 shdr4extnum->sh_info = segs;
2310}
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312/*
2313 * Actual dumper
2314 *
2315 * This is a two-pass process; first we find the offsets of the bits,
2316 * and then they are actually written out. If we run out of core limit
2317 * we just truncate.
2318 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002319static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 int has_dumped = 0;
2322 mm_segment_t fs;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002323 int segs, i;
2324 size_t vma_data_size = 0;
Roland McGrathf47aef52007-01-26 00:56:49 -08002325 struct vm_area_struct *vma, *gate_vma;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002326 struct elfhdr elf;
Al Virocdc3d562013-10-05 22:24:29 -04002327 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002328 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002329 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002330 struct elf_shdr *shdr4extnum = NULL;
2331 Elf_Half e_phnum;
2332 elf_addr_t e_shoff;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002333 elf_addr_t *vma_filesz = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
2335 /*
2336 * We no longer stop all VM operations.
2337 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002338 * This is because those proceses that could possibly change map_count
2339 * or the mmap / vma pages are now blocked in do_exit on current
2340 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 *
2342 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002343 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 * exists while dumping the mm->vm_next areas to the core file.
2345 */
2346
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002347 /*
2348 * The number of segs are recored into ELF header as 16bit value.
2349 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2350 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002352 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Stephen Wilson31db58b2011-03-13 15:49:15 -04002354 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002355 if (gate_vma != NULL)
2356 segs++;
2357
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002358 /* for notes section */
2359 segs++;
2360
2361 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2362 * this, kernel supports extended numbering. Have a look at
2363 * include/linux/elf.h for further information. */
2364 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2365
Roland McGrath3aba4812008-01-30 13:31:44 +01002366 /*
2367 * Collect all the non-memory information about the process for the
2368 * notes. This also sets up the file header.
2369 */
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002370 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002371 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
2373 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 fs = get_fs();
2376 set_fs(KERNEL_DS);
2377
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002378 offset += sizeof(elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002379 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
2381 /* Write notes phdr entry */
2382 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002383 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
Michael Ellermane5501492007-09-19 14:38:12 +10002385 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002386
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002387 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2388 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002389 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002390
2391 fill_elf_note_phdr(phdr4note, sz, offset);
2392 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 }
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2396
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002397 /*
2398 * Zero vma process will get ZERO_SIZE_PTR here.
2399 * Let coredump continue for register state at least.
2400 */
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002401 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2402 GFP_KERNEL);
Alexey Dobriyan1fbede62020-01-30 22:17:10 -08002403 if (!vma_filesz)
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002404 goto end_coredump;
2405
2406 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2407 vma = next_vma(vma, gate_vma)) {
2408 unsigned long dump_size;
2409
2410 dump_size = vma_dump_size(vma, cprm->mm_flags);
2411 vma_filesz[i++] = dump_size;
2412 vma_data_size += dump_size;
2413 }
2414
2415 offset += vma_data_size;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002416 offset += elf_core_extra_data_size();
2417 e_shoff = offset;
2418
2419 if (e_phnum == PN_XNUM) {
2420 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2421 if (!shdr4extnum)
2422 goto end_coredump;
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002423 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002424 }
2425
2426 offset = dataoff;
2427
Alexey Dobriyan225a3f52020-01-30 22:17:04 -08002428 if (!dump_emit(cprm, &elf, sizeof(elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002429 goto end_coredump;
2430
Al Viroecc8c772013-10-05 15:32:35 -04002431 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002432 goto end_coredump;
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 /* Write program headers for segments dump */
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002435 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002436 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439 phdr.p_type = PT_LOAD;
2440 phdr.p_offset = offset;
2441 phdr.p_vaddr = vma->vm_start;
2442 phdr.p_paddr = 0;
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002443 phdr.p_filesz = vma_filesz[i++];
Roland McGrath82df3972007-10-16 23:27:02 -07002444 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 offset += phdr.p_filesz;
2446 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002447 if (vma->vm_flags & VM_WRITE)
2448 phdr.p_flags |= PF_W;
2449 if (vma->vm_flags & VM_EXEC)
2450 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 phdr.p_align = ELF_EXEC_PAGESIZE;
2452
Al Viroecc8c772013-10-05 15:32:35 -04002453 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002454 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 }
2456
Al Viro506f21c2013-10-05 17:22:57 -04002457 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002458 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002461 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002462 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Al Virocdc3d562013-10-05 22:24:29 -04002464 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002465 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002466
Andi Kleend025c9d2006-09-30 23:29:28 -07002467 /* Align to page */
Mateusz Guzik1607f092016-06-05 23:14:14 +02002468 if (!dump_skip(cprm, dataoff - cprm->pos))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002469 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002471 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
Roland McGrathf47aef52007-01-26 00:56:49 -08002472 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002474 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
Jungseung Lee52f5592e2014-12-10 15:52:16 -08002476 end = vma->vm_start + vma_filesz[i++];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Roland McGrath82df3972007-10-16 23:27:02 -07002478 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002479 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002480 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002482 page = get_dump_page(addr);
2483 if (page) {
2484 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002485 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002486 kunmap(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002487 put_page(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002488 } else
Al Viro9b56d542013-10-08 09:26:08 -04002489 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002490 if (stop)
2491 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 }
2493 }
Dave Kleikamp4d22c752017-01-11 13:25:00 -06002494 dump_truncate(cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
Al Viroaa3e7ea2013-10-05 17:50:15 -04002496 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002497 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002499 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002500 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002501 goto end_coredump;
2502 }
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504end_coredump:
2505 set_fs(fs);
2506
2507cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002508 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002509 kfree(shdr4extnum);
Alexey Dobriyan86a2bb52018-06-14 15:27:24 -07002510 kvfree(vma_filesz);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002511 kfree(phdr4note);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513}
2514
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002515#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517static int __init init_elf_binfmt(void)
2518{
Al Viro8fc3dc52012-03-17 03:05:16 -04002519 register_binfmt(&elf_format);
2520 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521}
2522
2523static void __exit exit_elf_binfmt(void)
2524{
2525 /* Remove the COFF and ELF loaders. */
2526 unregister_binfmt(&elf_format);
2527}
2528
2529core_initcall(init_elf_binfmt);
2530module_exit(exit_elf_binfmt);
2531MODULE_LICENSE("GPL");