Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __SHMEM_FS_H |
| 3 | #define __SHMEM_FS_H |
| 4 | |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 5 | #include <linux/file.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/swap.h> |
| 7 | #include <linux/mempolicy.h> |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 8 | #include <linux/pagemap.h> |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 9 | #include <linux/percpu_counter.h> |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 10 | #include <linux/xattr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* inode in-kernel data */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | struct shmem_inode_info { |
| 15 | spinlock_t lock; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 16 | unsigned int seals; /* shmem seals */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | unsigned long flags; |
| 18 | unsigned long alloced; /* data pages alloced to file */ |
Al Viro | 3ed47db | 2016-01-22 18:08:52 -0500 | [diff] [blame] | 19 | unsigned long swapped; /* subtotal assigned to swap */ |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 20 | struct list_head shrinklist; /* shrinkable hpage inodes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | struct list_head swaplist; /* chain of maybes on swap */ |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 22 | struct shared_policy policy; /* NUMA memory alloc policy */ |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 23 | struct simple_xattrs xattrs; /* list of xattrs */ |
Hugh Dickins | af53d3e | 2019-04-18 17:50:13 -0700 | [diff] [blame] | 24 | atomic_t stop_eviction; /* hold when working on inode */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | struct inode vfs_inode; |
| 26 | }; |
| 27 | |
| 28 | struct shmem_sb_info { |
| 29 | unsigned long max_blocks; /* How many blocks are allowed */ |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 30 | struct percpu_counter used_blocks; /* How many are allocated */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | unsigned long max_inodes; /* How many inodes are allowed */ |
| 32 | unsigned long free_inodes; /* How many are left for allocation */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 33 | spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 34 | umode_t mode; /* Mount mode for root directory */ |
| 35 | unsigned char huge; /* Whether to try for hugepages */ |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 36 | kuid_t uid; /* Mount uid for root directory */ |
| 37 | kgid_t gid; /* Mount gid for root directory */ |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 38 | struct mempolicy *mpol; /* default memory policy for mappings */ |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 39 | spinlock_t shrinklist_lock; /* Protects shrinklist */ |
| 40 | struct list_head shrinklist; /* List of shinkable inodes */ |
| 41 | unsigned long shrinklist_len; /* Length of shrinklist */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | }; |
| 43 | |
| 44 | static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) |
| 45 | { |
| 46 | return container_of(inode, struct shmem_inode_info, vfs_inode); |
| 47 | } |
| 48 | |
Hugh Dickins | 072441e | 2011-06-27 16:18:02 -0700 | [diff] [blame] | 49 | /* |
| 50 | * Functions in mm/shmem.c called directly from elsewhere: |
| 51 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 52 | extern int shmem_init(void); |
Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 53 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); |
Hugh Dickins | 072441e | 2011-06-27 16:18:02 -0700 | [diff] [blame] | 54 | extern struct file *shmem_file_setup(const char *name, |
| 55 | loff_t size, unsigned long flags); |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 56 | extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, |
| 57 | unsigned long flags); |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 58 | extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, |
| 59 | const char *name, loff_t size, unsigned long flags); |
Hugh Dickins | 072441e | 2011-06-27 16:18:02 -0700 | [diff] [blame] | 60 | extern int shmem_zero_setup(struct vm_area_struct *); |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 61 | extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, |
| 62 | unsigned long len, unsigned long pgoff, unsigned long flags); |
Hugh Dickins | 072441e | 2011-06-27 16:18:02 -0700 | [diff] [blame] | 63 | extern int shmem_lock(struct file *file, int lock, struct user_struct *user); |
Hugh Dickins | 3a4f8a0 | 2017-02-24 14:59:36 -0800 | [diff] [blame] | 64 | #ifdef CONFIG_SHMEM |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 65 | extern bool shmem_mapping(struct address_space *mapping); |
Hugh Dickins | 3a4f8a0 | 2017-02-24 14:59:36 -0800 | [diff] [blame] | 66 | #else |
| 67 | static inline bool shmem_mapping(struct address_space *mapping) |
| 68 | { |
| 69 | return false; |
| 70 | } |
| 71 | #endif /* CONFIG_SHMEM */ |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 72 | extern void shmem_unlock_mapping(struct address_space *mapping); |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 73 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 74 | pgoff_t index, gfp_t gfp_mask); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 75 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 76 | extern int shmem_unuse(unsigned int type, bool frontswap, |
| 77 | unsigned long *fs_pages_to_unuse); |
Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 78 | |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 79 | extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 80 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
| 81 | pgoff_t start, pgoff_t end); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 82 | |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 83 | /* Flag allocation requirements to shmem_getpage */ |
| 84 | enum sgp_type { |
| 85 | SGP_READ, /* don't exceed i_size, don't allocate page */ |
| 86 | SGP_CACHE, /* don't exceed i_size, may allocate page */ |
| 87 | SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ |
| 88 | SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ |
| 89 | SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ |
| 90 | SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ |
| 91 | }; |
| 92 | |
| 93 | extern int shmem_getpage(struct inode *inode, pgoff_t index, |
| 94 | struct page **pagep, enum sgp_type sgp); |
| 95 | |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 96 | static inline struct page *shmem_read_mapping_page( |
| 97 | struct address_space *mapping, pgoff_t index) |
| 98 | { |
| 99 | return shmem_read_mapping_page_gfp(mapping, index, |
| 100 | mapping_gfp_mask(mapping)); |
| 101 | } |
| 102 | |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 103 | static inline bool shmem_file(struct file *file) |
| 104 | { |
| 105 | if (!IS_ENABLED(CONFIG_SHMEM)) |
| 106 | return false; |
| 107 | if (!file || !file->f_mapping) |
| 108 | return false; |
| 109 | return shmem_mapping(file->f_mapping); |
| 110 | } |
| 111 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 112 | extern bool shmem_charge(struct inode *inode, long pages); |
| 113 | extern void shmem_uncharge(struct inode *inode, long pages); |
| 114 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 115 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
| 116 | extern bool shmem_huge_enabled(struct vm_area_struct *vma); |
| 117 | #else |
| 118 | static inline bool shmem_huge_enabled(struct vm_area_struct *vma) |
| 119 | { |
| 120 | return false; |
| 121 | } |
| 122 | #endif |
| 123 | |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 124 | #ifdef CONFIG_SHMEM |
| 125 | extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, |
| 126 | struct vm_area_struct *dst_vma, |
| 127 | unsigned long dst_addr, |
| 128 | unsigned long src_addr, |
| 129 | struct page **pagep); |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 130 | extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, |
| 131 | pmd_t *dst_pmd, |
| 132 | struct vm_area_struct *dst_vma, |
| 133 | unsigned long dst_addr); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 134 | #else |
| 135 | #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ |
| 136 | src_addr, pagep) ({ BUG(); 0; }) |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 137 | #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \ |
| 138 | dst_addr) ({ BUG(); 0; }) |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 139 | #endif |
| 140 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | #endif |