123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624 |
- #ifndef _LINUX_MM_TYPES_H
- #define _LINUX_MM_TYPES_H
- #include <linux/auxvec.h>
- #include <linux/types.h>
- #include <linux/threads.h>
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/rbtree.h>
- #include <linux/rwsem.h>
- #include <linux/completion.h>
- #include <linux/cpumask.h>
- #include <linux/uprobes.h>
- #include <linux/page-flags-layout.h>
- #include <linux/workqueue.h>
- #include <asm/page.h>
- #include <asm/mmu.h>
- #ifndef AT_VECTOR_SIZE_ARCH
- #define AT_VECTOR_SIZE_ARCH 0
- #endif
- #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
- struct address_space;
- struct mem_cgroup;
- #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
- #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
- #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
- /*
- * Each physical page in the system has a struct page associated with
- * it to keep track of whatever it is we are using the page for at the
- * moment. Note that we have no way to track which tasks are using
- * a page, though if it is a pagecache page, rmap structures can tell us
- * who is mapping it.
- *
- * The objects in struct page are organized in double word blocks in
- * order to allows us to use atomic double word operations on portions
- * of struct page. That is currently only used by slub but the arrangement
- * allows the use of atomic double word operations on the flags/mapping
- * and lru list pointers also.
- */
- struct page {
- /* First double word block */
- unsigned long flags; /* Atomic flags, some possibly
- * updated asynchronously */
- union {
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
- void *s_mem; /* slab first object */
- atomic_t compound_mapcount; /* first tail page */
- /* page_deferred_list().next -- second tail page */
- };
- /* Second double word */
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
- union {
- #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
- defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
- /* Used for cmpxchg_double in slub */
- unsigned long counters;
- #else
- /*
- * Keep _refcount separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by slab_lock
- * but _refcount is not.
- */
- unsigned counters;
- #endif
- struct {
- union {
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- int units; /* SLOB */
- };
- /*
- * Usage count, *USE WRAPPER FUNCTION* when manual
- * accounting. See page_ref.h
- */
- atomic_t _refcount;
- };
- };
- /*
- * Third double word block
- *
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
- * avoid collision and false-positive PageTail().
- */
- union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
- #ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
- #else
- short int pages;
- short int pobjects;
- #endif
- };
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
- /* First tail page only */
- #ifdef CONFIG_64BIT
- /*
- * On 64 bit system we have enough space in struct page
- * to encode compound_dtor and compound_order with
- * unsigned int. It can help compiler generate better or
- * smaller code on some archtectures.
- */
- unsigned int compound_dtor;
- unsigned int compound_order;
- #else
- unsigned short int compound_dtor;
- unsigned short int compound_order;
- #endif
- };
- #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- };
- #endif
- };
- /* Remainder is not double word aligned */
- union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache;
- * indicates order in the buddy
- * system if PG_buddy is set.
- */
- #if USE_SPLIT_PTE_PTLOCKS
- #if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
- #else
- spinlock_t ptl;
- #endif
- #endif
- struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
- };
- #ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
- #endif
- /*
- * On machines where all RAM is mapped into kernel address space,
- * we can simply calculate the virtual address. On machines with
- * highmem some memory is mapped into kernel virtual memory
- * dynamically, so we need a place to store that address.
- * Note that this field could be 16 bits on x86 ... ;)
- *
- * Architectures with slow multiplication can define
- * WANT_PAGE_VIRTUAL in asm/page.h
- */
- #if defined(WANT_PAGE_VIRTUAL)
- void *virtual; /* Kernel virtual address (NULL if
- not kmapped, ie. highmem) */
- #endif /* WANT_PAGE_VIRTUAL */
- #ifdef CONFIG_KMEMCHECK
- /*
- * kmemcheck wants to track the status of each byte in a page; this
- * is a pointer to such a status block. NULL if not tracked.
- */
- void *shadow;
- #endif
- #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
- int _last_cpupid;
- #endif
- }
- /*
- * The struct page can be forced to be double word aligned so that atomic ops
- * on double words work. The SLUB allocator can make use of such a feature.
- */
- #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
- __aligned(2 * sizeof(unsigned long))
- #endif
- ;
- struct page_frag {
- struct page *page;
- #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 offset;
- __u32 size;
- #else
- __u16 offset;
- __u16 size;
- #endif
- };
- #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
- #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
- struct page_frag_cache {
- void * va;
- #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- __u16 offset;
- __u16 size;
- #else
- __u32 offset;
- #endif
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_refcount every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
- bool pfmemalloc;
- };
- typedef unsigned long vm_flags_t;
- /*
- * A region containing a mapping of a non-memory backed file under NOMMU
- * conditions. These are held in a global tree and are pinned by the VMAs that
- * map parts of them.
- */
- struct vm_region {
- struct rb_node vm_rb; /* link in global region tree */
- vm_flags_t vm_flags; /* VMA vm_flags */
- unsigned long vm_start; /* start address of region */
- unsigned long vm_end; /* region initialised to here */
- unsigned long vm_top; /* region allocated to here */
- unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
- struct file *vm_file; /* the backing file or NULL */
- int vm_usage; /* region usage count (access under nommu_region_sem) */
- bool vm_icache_flushed : 1; /* true if the icache has been flushed for
- * this region */
- };
- #ifdef CONFIG_USERFAULTFD
- #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
- struct vm_userfaultfd_ctx {
- struct userfaultfd_ctx *ctx;
- };
- #else /* CONFIG_USERFAULTFD */
- #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
- struct vm_userfaultfd_ctx {};
- #endif /* CONFIG_USERFAULTFD */
- /*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
- * space that has a special rule for the page-fault handlers (ie a shared
- * library, the executable area etc).
- */
- struct vm_area_struct {
- /* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
- struct rb_node vm_rb;
- /*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
- */
- unsigned long rb_subtree_gap;
- /* Second cache line starts here. */
- struct mm_struct *vm_mm; /* The address space we belong to. */
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, see mm.h. */
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
- */
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
- /*
- * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
- * list, after a COW of one of the file pages. A MAP_SHARED vma
- * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
- * or brk vma (with NULL file) can only be in an anon_vma list.
- */
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
- * page_table_lock */
- struct anon_vma *anon_vma; /* Serialized by page_table_lock */
- /* Function pointers to deal with this struct. */
- const struct vm_operations_struct *vm_ops;
- /* Information about our backing store: */
- unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units */
- struct file * vm_file; /* File we map to (can be NULL). */
- void * vm_private_data; /* was vm_pte (shared mem) */
- #ifndef CONFIG_MMU
- struct vm_region *vm_region; /* NOMMU mapping region */
- #endif
- #ifdef CONFIG_NUMA
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
- #endif
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
- };
- struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
- };
- struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
- };
- enum {
- MM_FILEPAGES, /* Resident file mapping pages */
- MM_ANONPAGES, /* Resident anonymous pages */
- MM_SWAPENTS, /* Anonymous swap entries */
- MM_SHMEMPAGES, /* Resident shared memory pages */
- NR_MM_COUNTERS
- };
- #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
- #define SPLIT_RSS_COUNTING
- /* per-thread cached information, */
- struct task_rss_stat {
- int events; /* for synchronization threshold */
- int count[NR_MM_COUNTERS];
- };
- #endif /* USE_SPLIT_PTE_PTLOCKS */
- struct mm_rss_stat {
- atomic_long_t count[NR_MM_COUNTERS];
- };
- struct kioctx_table;
- struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
- #ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- #endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- atomic_long_t nr_ptes; /* PTE page table pages */
- #if CONFIG_PGTABLE_LEVELS > 2
- atomic_long_t nr_pmds; /* PMD page table pages */
- #endif
- int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
- struct linux_binfmt *binfmt;
- cpumask_var_t cpu_vm_mask_var;
- /* Architecture-specific MM context */
- mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
- struct core_state *core_state; /* coredumping support */
- #ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
- #endif
- #ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
- #endif
- struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
- #ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
- #endif
- #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
- #endif
- #ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
- #endif
- #ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
- #endif
- #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- bool tlb_flush_pending;
- #endif
- #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
- #endif
- struct uprobes_state uprobes_state;
- #ifdef CONFIG_X86_INTEL_MPX
- /* address of the bounds directory */
- void __user *bd_addr;
- #endif
- #ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
- #endif
- struct work_struct async_put_work;
- };
- static inline void mm_init_cpumask(struct mm_struct *mm)
- {
- #ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
- #endif
- cpumask_clear(mm->cpu_vm_mask_var);
- }
- /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
- static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
- {
- return mm->cpu_vm_mask_var;
- }
- #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
- /*
- * Memory barriers to keep this state in sync are graciously provided by
- * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
- */
- static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
- {
- barrier();
- return mm->tlb_flush_pending;
- }
- static inline void set_tlb_flush_pending(struct mm_struct *mm)
- {
- mm->tlb_flush_pending = true;
- /*
- * Guarantee that the tlb_flush_pending store does not leak into the
- * critical section updating the page tables
- */
- smp_mb__before_spinlock();
- }
- /* Clearing is done after a TLB flush, which also provides a barrier. */
- static inline void clear_tlb_flush_pending(struct mm_struct *mm)
- {
- barrier();
- mm->tlb_flush_pending = false;
- }
- #else
- static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
- {
- return false;
- }
- static inline void set_tlb_flush_pending(struct mm_struct *mm)
- {
- }
- static inline void clear_tlb_flush_pending(struct mm_struct *mm)
- {
- }
- #endif
- struct vm_fault;
- struct vm_special_mapping {
- const char *name; /* The name, e.g. "[vdso]". */
- /*
- * If .fault is not provided, this points to a
- * NULL-terminated array of pages that back the special mapping.
- *
- * This must not be NULL unless .fault is provided.
- */
- struct page **pages;
- /*
- * If non-NULL, then this is called to resolve page faults
- * on the special mapping. If used, .pages is not checked.
- */
- int (*fault)(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma,
- struct vm_fault *vmf);
- int (*mremap)(const struct vm_special_mapping *sm,
- struct vm_area_struct *new_vma);
- };
- enum tlb_flush_reason {
- TLB_FLUSH_ON_TASK_SWITCH,
- TLB_REMOTE_SHOOTDOWN,
- TLB_LOCAL_SHOOTDOWN,
- TLB_LOCAL_MM_SHOOTDOWN,
- TLB_REMOTE_SEND_IPI,
- NR_TLB_FLUSH_REASONS,
- };
- /*
- * A swap entry has to fit into a "unsigned long", as the entry is hidden
- * in the "index" field of the swapper address space.
- */
- typedef struct {
- unsigned long val;
- } swp_entry_t;
- #endif /* _LINUX_MM_TYPES_H */
|