android_kernel_motorola_sm6225/mm/vmstat.c
Ivaylo Georgiev c2ec4b3fbc Merge android-4.19.95 (5da1114) into msm-4.19
* refs/heads/tmp-5da1114:
  Revert crypto changes from android-4.19.79-95
  Revert "UPSTREAM: PM / wakeup updates"
  Revert "ANDROID: of: property: Enable of_devlink by default"
  Revert "UPSTREAM: dt-bindings: arm: coresight: Add support for coresight-loses-context-with-cpu"
  UPSTREAM: net: usbnet: Fix -Wcast-function-type
  UPSTREAM: USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein
  UPSTREAM: USB: dummy-hcd: increase max number of devices to 32
  ANDROID: tty: serdev: Fix broken serial console input
  ANDROID: update kernel ABI (perf_event changes)
  BACKPORT: perf_event: Add support for LSM and SELinux checks
  UPSTREAM: iommu: Allow io-pgtable to be used outside of drivers/iommu/
  ANDROID: update abi for 4.19.94 release
  ANDROID: update abi due to revert
  Revert "BACKPORT: perf_event: Add support for LSM and SELinux checks"
  UPSTREAM: selinux: sidtab reverse lookup hash table
  UPSTREAM: selinux: avoid atomic_t usage in sidtab
  UPSTREAM: selinux: check sidtab limit before adding a new entry
  UPSTREAM: selinux: fix context string corruption in convert_context()
  UPSTREAM: selinux: overhaul sidtab to fix bug and improve performance
  UPSTREAM: selinux: refactor mls_context_to_sid() and make it stricter
  UPSTREAM: selinux: use separate table for initial SID lookup
  UPSTREAM: selinux: make "selinux_policycap_names[]" const char *
  UPSTREAM: selinux: refactor sidtab conversion
  ANDROID: Update ABI representation
  ANDROID: GKI: clk: Don't disable unused clocks with sync state support
  ANDROID: GKI: clk: Add support for clock providers with sync state
  ANDROID: GKI: driver core: Add dev_has_sync_state()
  ANDROID: update kernel ABI representation
  BACKPORT: perf_event: Add support for LSM and SELinux checks
  ANDROID: update ABI representation
  UPSTREAM: exit: panic before exit_mm() on global init exit
  ANDROID: serdev: Fix platform device support
  ANDROID: Kconfig.gki: Add Hidden SPRD DRM configs
  ANDROID: gki_defconfig: Disable TRANSPARENT_HUGEPAGE
  ANDROID: gki_defconfig: Enable CONFIG_GNSS_CMDLINE_SERIAL
  ANDROID: gnss: Add command line test driver
  ANDROID: serdev: add platform device support
  ANDROID: gki_defconfig: enable ARM64_SW_TTBR0_PAN
  ANDROID: gki_defconfig: Set BINFMT_MISC as =m
  UPSTREAM: binder: fix incorrect calculation for num_valid
  ABI: Update ABI after f2fs merge
  ANDROID: add initial ABI whitelist for android-4.19
  ANDROID: staging: android: ion: Fix build when CONFIG_ION_SYSTEM_HEAP=n
  ANDROID: staging: android: ion: Expose total heap and pool sizes via sysfs
  ANDROID: Update ABI representation due to vmstat counter changes
  UPSTREAM: include/linux/slab.h: fix sparse warning in kmalloc_type()
  UPSTREAM: mm, slab: shorten kmalloc cache names for large sizes
  UPSTREAM: mm, proc: add KReclaimable to /proc/meminfo
  UPSTREAM: mm: rename and change semantics of nr_indirectly_reclaimable_bytes
  UPSTREAM: dcache: allocate external names from reclaimable kmalloc caches
  UPSTREAM: mm, slab/slub: introduce kmalloc-reclaimable caches
  UPSTREAM: mm, slab: combine kmalloc_caches and kmalloc_dma_caches
  ANDROID: abi update for 4.19.89
  ANDROID: update abi_gki_aarch64.xml for LTO, CFI, and SCS
  ANDROID: gki_defconfig: enable LTO, CFI, and SCS
  ANDROID: update abi_gki_aarch64.xml for CONFIG_GNSS
  ANDROID: cuttlefish_defconfig: Enable CONFIG_GNSS
  UPSTREAM: arm64: Validate tagged addresses in access_ok() called from kernel threads
  ANDROID: mm: Throttle rss_stat tracepoint
  UPSTREAM: mm: slub: really fix slab walking for init_on_free
  ANDROID: update abi_gki_aarch64.xml for nf change
  ANDROID: kbuild: limit LTO inlining
  ANDROID: kbuild: merge module sections with LTO
  ANDROID: netfilter: nf_nat: remove static from nf_nat_ipv4_fn
  UPSTREAM: drm/client: remove the exporting of drm_client_close
  ANDROID: f2fs: fix possible merge of unencrypted with encrypted I/O
  UPSTREAM: binder: Add binder_proc logging to binderfs
  UPSTREAM: binder: Make transaction_log available in binderfs
  UPSTREAM: binder: Add stats, state and transactions files
  UPSTREAM: binder: add a mount option to show global stats
  UPSTREAM: binder: Validate the default binderfs device names.
  UPSTREAM: binder: Add default binder devices through binderfs when configured
  UPSTREAM: binder: fix CONFIG_ANDROID_BINDER_DEVICES
  UPSTREAM: android: binder: use kstrdup instead of open-coding it
  UPSTREAM: binderfs: remove separate device_initcall()
  UPSTREAM: binderfs: respect limit on binder control creation
  UPSTREAM: binderfs: switch from d_add() to d_instantiate()
  UPSTREAM: binderfs: drop lock in binderfs_binder_ctl_create
  UPSTREAM: binderfs: kill_litter_super() before cleanup
  UPSTREAM: binderfs: rework binderfs_binder_device_create()
  UPSTREAM: binderfs: rework binderfs_fill_super()
  UPSTREAM: binderfs: prevent renaming the control dentry
  UPSTREAM: binderfs: remove outdated comment
  UPSTREAM: binderfs: fix error return code in binderfs_fill_super()
  UPSTREAM: binderfs: handle !CONFIG_IPC_NS builds
  UPSTREAM: binderfs: reserve devices for initial mount
  UPSTREAM: binderfs: rename header to binderfs.h
  UPSTREAM: binderfs: implement "max" mount option
  UPSTREAM: binderfs: make each binderfs mount a new instance
  UPSTREAM: binderfs: remove wrong kern_mount() call
  UPSTREAM: binder: implement binderfs
  UPSTREAM: binder: remove BINDER_DEBUG_ENTRY()
  ANDROID: Don't base allmodconfig on gki_defconfig
  ANDROID: Disable UNWINDER_ORC for allmodconfig
  ANDROID: update abi_gki_aarch64.xml for 4.19.87
  BACKPORT: ARM: 8905/1: Emit __gnu_mcount_nc when using Clang 10.0.0 or newer
  ANDROID: update abi_gki_aarch64.xml
  ANDROID: gki_defconfig: =m's applied for virtio configs in arm64
  UPSTREAM: of: property: Add device link support for interrupt-parent, dmas and -gpio(s)
  UPSTREAM: of: property: Add device link support for "iommu-map"
  UPSTREAM: of: property: Fix the semantics of of_is_ancestor_of()
  UPSTREAM: i2c: of: Populate fwnode in of_i2c_get_board_info()
  UPSTREAM: driver core: Clarify documentation for fwnode_operations.add_links()
  UPSTREAM: dt-bindings: arm: coresight: Add support for coresight-loses-context-with-cpu
  BACKPORT: coresight: etm4x: Save/restore state across CPU low power states
  ANDROID: Update ABI representation
  ANDROID: gki_defconfig: IIO=y
  f2fs: stop GC when the victim becomes fully valid
  f2fs: expose main_blkaddr in sysfs
  f2fs: choose hardlimit when softlimit is larger than hardlimit in f2fs_statfs_project()
  f2fs: Fix deadlock in f2fs_gc() context during atomic files handling
  f2fs: show f2fs instance in printk_ratelimited
  f2fs: fix potential overflow
  f2fs: fix to update dir's i_pino during cross_rename
  f2fs: support aligned pinned file
  f2fs: avoid kernel panic on corruption test
  f2fs: fix wrong description in document
  f2fs: cache global IPU bio
  f2fs: fix to avoid memory leakage in f2fs_listxattr
  f2fs: check total_segments from devices in raw_super
  f2fs: update multi-dev metadata in resize_fs
  f2fs: mark recovery flag correctly in read_raw_super_block()
  f2fs: fix to update time in lazytime mode
  vfs: don't allow writes to swap files
  mm: set S_SWAPFILE on blockdev swap devices
  BACKPORT: ARM: 8900/1: UNWINDER_FRAME_POINTER implementation for Clang
  ANDROID: update abi_gki_aarch64.xml for 4.19.87
  ANDROID: gki_defconfig: FW_CACHE to no
  FROMGIT: firmware_class: make firmware caching configurable
  FROMLIST: arm64: implement Shadow Call Stack
  FROMLIST: arm64: disable SCS for hypervisor code
  BACKPORT: FROMLIST: arm64: vdso: disable Shadow Call Stack
  FROMLIST: arm64: efi: restore x18 if it was corrupted
  FROMLIST: arm64: preserve x18 when CPU is suspended
  FROMLIST: arm64: reserve x18 from general allocation with SCS
  FROMLIST: arm64: disable function graph tracing with SCS
  FROMLIST: scs: add support for stack usage debugging
  FROMLIST: scs: add accounting
  FROMLIST: add support for Clang's Shadow Call Stack (SCS)
  FROMLIST: arm64: kernel: avoid x18 in __cpu_soft_restart
  FROMLIST: arm64: kvm: stop treating register x18 as caller save
  FROMLIST: arm64/lib: copy_page: avoid x18 register in assembler code
  FROMLIST: arm64: mm: avoid x18 in idmap_kpti_install_ng_mappings
  ANDROID: use non-canonical CFI jump tables
  ANDROID: arm64: add __nocfi to __apply_alternatives
  ANDROID: arm64: add __pa_function
  ANDROID: arm64: allow ThinLTO to be selected
  ANDROID: soc/tegra: disable ARCH_TEGRA_210_SOC with LTO
  FROMLIST: arm64: fix alternatives with LLVM's integrated assembler
  ANDROID: irqchip/gic-v3: rename gic_of_init to work around a ThinLTO+CFI bug
  ANDROID: init: ensure initcall ordering with LTO
  Revert "ANDROID: init: ensure initcall ordering with LTO"
  ANDROID: add support for ThinLTO
  ANDROID: clang: update to 10.0.1
  ANDROID: gki_defconfig: enable CONFIG_REGULATOR_FIXED_VOLTAGE
  ANDROID: gki_defconfig: removed CONFIG_PM_WAKELOCKS
  ANDROID: gki_defconfig: enable CONFIG_IKHEADERS as m
  FROMGIT: pinctrl: devicetree: Avoid taking direct reference to device name string
  ANDROID: update abi_gki_aarch64.xml for 4.19.86 update
  ANDROID: Update ABI representation
  ANDROID: gki_defconfig: disable FUNCTION_TRACER
  ANDROID: Update the ABI representation
  ANDROID: update ABI representation
  ANDROID: add unstripped modules to the distribution
  FROMLIST: vsprintf: Inline call to ptr_to_hashval
  UPSTREAM: rss_stat: Add support to detect RSS updates of external mm
  UPSTREAM: mm: emit tracepoint when RSS changes
  FROMGIT: driver core: Allow device link operations inside sync_state()
  ANDROID: uid_sys_stats: avoid double accounting of dying threads
  ANDROID: scsi: ufs-qcom: Enable BROKEN_CRYPTO quirk flag
  ANDROID: scsi: ufs-hisi: Enable BROKEN_CRYPTO quirk flag
  ANDROID: scsi: ufs: Add quirk bit for controllers that don't play well with inline crypto
  ANDROID: scsi: ufs: UFS init should not require inline crypto
  ANDROID: scsi: ufs: UFS crypto variant operations API
  ANDROID: gki_defconfig: enable inline encryption
  BACKPORT: FROMLIST: ext4: add inline encryption support
  BACKPORT: FROMLIST: f2fs: add inline encryption support
  BACKPORT: FROMLIST: fscrypt: add inline encryption support
  BACKPORT: FROMLIST: scsi: ufs: Add inline encryption support to UFS
  BACKPORT: FROMLIST: scsi: ufs: UFS crypto API
  BACKPORT: FROMLIST: scsi: ufs: UFS driver v2.1 spec crypto additions
  BACKPORT: FROMLIST: block: blk-crypto for Inline Encryption
  ANDROID: block: Fix bio_crypt_should_process WARN_ON
  BACKPORT: FROMLIST: block: Add encryption context to struct bio
  BACKPORT: FROMLIST: block: Keyslot Manager for Inline Encryption
  FROMLIST: f2fs: add support for IV_INO_LBLK_64 encryption policies
  FROMLIST: ext4: add support for IV_INO_LBLK_64 encryption policies
  BACKPORT: FROMLIST: fscrypt: add support for IV_INO_LBLK_64 policies
  FROMLIST: fscrypt: zeroize fscrypt_info before freeing
  FROMLIST: fscrypt: remove struct fscrypt_ctx
  BACKPORT: FROMLIST: fscrypt: invoke crypto API for ESSIV handling
  ANDROID: build kernels with llvm-nm and llvm-objcopy
  ANDROID: Fix allmodconfig build with CC=clang
  UPSTREAM: mm/page_poison: expose page_poisoning_enabled to kernel modules
  FROMGIT: of: property: Add device link support for iommus, mboxes and io-channels
  FROMGIT: of: property: Make it easy to add device links from DT properties
  FROMGIT: of: property: Minor style clean up of of_link_to_phandle()
  Revert "ANDROID: of/property: Add device link support for iommus"
  ANDROID: Add allmodconfig build.configs for x86_64 and aarch64
  ANDROID: fix allmodconfig build
  ANDROID: nf: IDLETIMER: Fix possible use before initialization in idletimer_resume
  BACKPORT: coresight: funnel: Support static funnel
  BACKPORT:FROMGIT: coresight: replicator: Fix missing spin_lock_init()
  BACKPORT:FROMGIT: coresight: funnel: Fix missing spin_lock_init()
  BACKPORT:FROMGIT: coresight: Serialize enabling/disabling a link device.
  UPSTREAM: coresight: tmc-etr: Add barrier packets when moving offset forward
  UPSTREAM: coresight: tmc-etr: Decouple buffer sync and barrier packet insertion
  UPSTREAM: coresight: tmc: Make memory width mask computation into a function
  UPSTREAM: coresight: tmc-etr: Fix perf_data check
  UPSTREAM: coresight: tmc-etr: Fix updating buffer in not-snapshot mode.
  UPSTREAM: coresight: tmc-etr: Check if non-secure access is enabled
  UPSTREAM: coresight: tmc-etr: Handle memory errors
  BACKPORT: coresight: etr_buf: Consolidate refcount initialization
  UPSTREAM: coresight: Fix DEBUG_LOCKS_WARN_ON for uninitialized attribute
  UPSTREAM: coresight: Use coresight device names for sinks in PMU attribute
  UPSTREAM: coresight: tmc-etr: alloc_perf_buf: Do not call smp_processor_id from preemptible
  UPSTREAM: coresight: tmc-etr: Do not call smp_processor_id() from preemptible
  UPSTREAM: coresight: perf: Don't set the truncated flag in snapshot mode
  UPSTREAM: coresight: tmc-etf: Fix snapshot mode update function
  UPSTREAM: coresight: tmc-etr: Properly set AUX buffer head in snapshot mode
  UPSTREAM: coresight: tmc-etr: Add support for CPU-wide trace scenarios
  UPSTREAM: coresight: tmc-etr: Allocate and free ETR memory buffers for CPU-wide scenarios
  UPSTREAM: coresight: tmc-etr: Introduce the notion of IDR to ETR devices
  UPSTREAM: coresight: tmc-etr: Introduce the notion of reference counting to ETR devices
  UPSTREAM: coresight: tmc-etr: Introduce the notion of process ID to ETR devices
  UPSTREAM: coresight: tmc-etr: Create per-thread buffer allocation function
  UPSTREAM: coresight: tmc-etr: Refactor function tmc_etr_setup_perf_buf()
  UPSTREAM: coresight: Communicate perf event to sink buffer allocation functions
  UPSTREAM: coresight: perf: Refactor function free_event_data()
  UPSTREAM: coresight: perf: Clean up function etm_setup_aux()
  UPSTREAM: coresight: Properly address concurrency in sink::update() functions
  UPSTREAM: coresight: Properly address errors in sink::disable() functions
  UPSTREAM: coresight: Move reference counting inside sink drivers
  UPSTREAM: coresight: Adding return code to sink::disable() operation
  UPSTREAM: coresight: etm4x: Configure tracers to emit timestamps
  UPSTREAM: coresight: etm4x: Skip selector pair 0
  UPSTREAM: coresight: etm4x: Add kernel configuration for CONTEXTID
  UPSTREAM: coresight: pmu: Adding ITRACE property to cs_etm PMU
  UPSTREAM: coresight: tmc: Cleanup power management
  UPSTREAM: coresight: Fix freeing up the coresight connections
  UPSTREAM: coresight: tmc: Report DMA setup failures
  UPSTREAM: coresight: catu: fix clang build warning
  UPSTREAM: perf/core: Fix the address filtering fix
  UPSTREAM: perf, pt, coresight: Fix address filters for vmas with non-zero offset
  UPSTREAM: perf: Copy parent's address filter offsets on clone
  UPSTREAM: coresight: Use event attributes for sink selection
  UPSTREAM: coresight: perf: Add "sinks" group to PMU directory
  UPSTREAM: coresight: etb10: Add support for CLAIM tag
  UPSTREAM: coreisght: tmc: Claim device before use
  UPSTREAM: coresight: dynamic-replicator: Claim device for use
  UPSTREAM: coresight: funnel: Claim devices before use
  UPSTREAM: coresight: etmx: Claim devices before use
  UPSTREAM: coresight: Add support for CLAIM tag protocol
  UPSTREAM: coresight: dynamic-replicator: Handle multiple connections
  UPSTREAM: coresight: etb10: Handle errors enabling the device
  UPSTREAM: coresight: etm3: Add support for handling errors
  UPSTREAM: coresight: etm4x: Add support for handling errors
  UPSTREAM: coresight: tmc-etb/etf: Prepare to handle errors enabling
  UPSTREAM: coresight: tmc-etr: Handle errors enabling CATU
  UPSTREAM: coresight: tmc-etr: Refactor for handling errors
  UPSTREAM: coresight: Handle failures in enabling a trace path
  UPSTREAM: coresight: tmc: Fix byte-address alignment for RRP
  UPSTREAM: coresight: etm4x: Configure EL2 exception level when kernel is running in HYP
  UPSTREAM: coresight: etb10: Splitting function etb_enable()
  UPSTREAM: coresight: etb10: Refactor etb_drvdata::mode handling
  UPSTREAM: coresight: etm-perf: Add support for ETR backend
  UPSTREAM: coresight: perf: Remove set_buffer call back
  UPSTREAM: coresight: perf: Add helper to retrieve sink configuration
  UPSTREAM: coresight: perf: Remove reset_buffer call back for sinks
  UPSTREAM: coresight: Convert driver messages to dev_dbg
  UPSTREAM: coresight: tmc-etr: Relax collection of trace from sysfs mode
  UPSTREAM: coresight: tmc-etr: Handle driver mode specific ETR buffers
  UPSTREAM: coresight: perf: Disable trace path upon source error
  UPSTREAM: coresight: perf: Allow tracing on hotplugged CPUs
  UPSTREAM: coresight: perf: Avoid unncessary CPU hotplug read lock
  UPSTREAM: coresight: perf: Fix per cpu path management
  UPSTREAM: coresight: Fix handling of sinks
  UPSTREAM: coresight: Use ERR_CAST instead of ERR_PTR
  UPSTREAM: coresight: Fix remote endpoint parsing
  UPSTREAM: coresight: platform: Fix leaking device reference
  UPSTREAM: coresight: platform: Fix refcounting for graph nodes
  UPSTREAM: coresight: platform: Refactor graph endpoint parsing
  UPSTREAM: coresight: Document error handling in coresight_register
  ANDROID: regression introduced override_creds=off
  ANDROID: overlayfs: internal getxattr operations without sepolicy checking
  ANDROID: overlayfs: add __get xattr method
  ANDROID: Add optional __get xattr method paired to __vfs_getxattr
  UPSTREAM: scsi: ufs: override auto suspend tunables for ufs
  UPSTREAM: scsi: core: allow auto suspend override by low-level driver
  FROMGIT: of: property: Skip adding device links to suppliers that aren't devices
  ANDROID: gki_defconfig: enable CONFIG_KEYBOARD_GPIO
  UPSTREAM: dm bufio: introduce a global cache replacement
  UPSTREAM: dm bufio: remove old-style buffer cleanup
  UPSTREAM: dm bufio: introduce a global queue
  UPSTREAM: dm bufio: refactor adjust_total_allocated
  UPSTREAM: dm bufio: call adjust_total_allocated from __link_buffer and __unlink_buffer
  ANDROID: dummy_cpufreq: Implement get()
  ANDROID: gki_defconfig: enable CONFIG_CPUSETS
  ANDROID: virtio: virtio_input: Set the amount of multitouch slots in virtio input
  rtlwifi: Fix potential overflow on P2P code
  ANDROID: cpufreq: create dummy cpufreq driver
  ANDROID: Allow DRM_IOCTL_MODE_*_DUMB for render clients.
  Cuttlefish Wifi: Add data ops in virt_wifi driver for scan data simulation
  ANDROID: of: property: Enable of_devlink by default
  ANDROID: of: property: Make sure child dependencies don't block probing of parent
  ANDROID: driver core: Allow fwnode_operations.add_links to differentiate errors
  ANDROID: driver core: Allow a device to wait on optional suppliers
  ANDROID: driver core: Add device link support for SYNC_STATE_ONLY flag
  FROMGIT: docs: driver-model: Add documentation for sync_state
  FROMGIT: driver: core: Improve documentation for fwnode_operations.add_links()
  FROMGIT: of: property: Minor code formatting/style clean ups
  ANDROID: of/property: Add device link support for iommus
  ANDROID: move up spin_unlock_bh() ahead of remove_proc_entry()
  BACKPORT: arm64: tags: Preserve tags for addresses translated via TTBR1
  UPSTREAM: arm64: memory: Implement __tag_set() as common function
  UPSTREAM: arm64/mm: fix variable 'tag' set but not used
  UPSTREAM: arm64: avoid clang warning about self-assignment
  ANDROID: sdcardfs: evict dentries on fscrypt key removal
  ANDROID: fscrypt: add key removal notifier chain
  ANDROID: refactor build.config files to remove duplication
  ANDROID: Move from clang r353983c to r365631c
  ANDROID: gki_defconfig: remove PWRSEQ_EMMC and PWRSEQ_SIMPLE
  ANDROID: unconditionally compile sig_ok in struct module
  ANDROID: gki_defconfig: enable fs-verity
  UPSTREAM: mm: vmalloc: show number of vmalloc pages in /proc/meminfo
  BACKPORT: PM/sleep: Expose suspend stats in sysfs
  UPSTREAM: power: supply: Init device wakeup after device_add()
  UPSTREAM: PM / wakeup: Unexport wakeup_source_sysfs_{add,remove}()
  UPSTREAM: PM / wakeup: Register wakeup class kobj after device is added
  UPSTREAM: PM / wakeup: Fix sysfs registration error path
  UPSTREAM: PM / wakeup: Show wakeup sources stats in sysfs
  UPSTREAM: PM / wakeup: Use wakeup_source_register() in wakelock.c
  UPSTREAM: PM / wakeup: Drop wakeup_source_init(), wakeup_source_prepare()
  UPSTREAM: PM / wakeup: Drop wakeup_source_drop()
  UPSTREAM: PM / core: Add support to skip power management in device/driver model
  gki_defconfig: Enable CONFIG_DM_SNAPSHOT
  ANDROID: gki_defconfig: enable accelerated AES and SHA-256
  ANDROID: fix overflow in /proc/uid_cputime/remove_uid_range
  ANDROID: kasan: fix has_attribute check on older GCC versions
  ANDROID: gki_defconfig: enable CONFIG_PARAVIRT and CONFIG_HYPERVISOR_GUEST
  ANDROID: gki_defconfig: enable CONFIG_NLS_*
  ANDROID: gki_defconfig: Enable BPF_JIT and BPF_JIT_ALWAYS_ON
  FROMGIT: of: property: Create device links for all child-supplier depencencies
  FROMGIT: of/platform: Pause/resume sync state during init and of_platform_populate()
  BACKPORT: FROMGIT: driver core: Add sync_state driver/bus callback
  BACKPORT: FROMGIT: of: property: Add functional dependency link from DT bindings
  FROMGIT: driver core: Add support for linking devices during device addition
  FROMGIT: driver core: Add fwnode_to_dev() to look up device from fwnode
  UPSTREAM: mm: untag user pointers in mmap/munmap/mremap/brk
  UPSTREAM: vfio/type1: untag user pointers in vaddr_get_pfn
  UPSTREAM: tee/shm: untag user pointers in tee_shm_register
  UPSTREAM: media/v4l2-core: untag user pointers in videobuf_dma_contig_user_get
  UPSTREAM: drm/radeon: untag user pointers in radeon_gem_userptr_ioctl
  BACKPORT: drm/amdgpu: untag user pointers
  UPSTREAM: userfaultfd: untag user pointers
  UPSTREAM: fs/namespace: untag user pointers in copy_mount_options
  UPSTREAM: mm: untag user pointers in get_vaddr_frames
  UPSTREAM: mm: untag user pointers in mm/gup.c
  UPSTREAM: mm: untag user pointers passed to memory syscalls
  BACKPORT: lib: untag user pointers in strn*_user
  UPSTREAM: arm64: Fix reference to docs for ARM64_TAGGED_ADDR_ABI
  UPSTREAM: selftests, arm64: add kernel headers path for tags_test
  BACKPORT: arm64: Relax Documentation/arm64/tagged-pointers.rst
  UPSTREAM: arm64: Define Documentation/arm64/tagged-address-abi.rst
  UPSTREAM: arm64: Change the tagged_addr sysctl control semantics to only prevent the opt-in
  UPSTREAM: arm64: Tighten the PR_{SET, GET}_TAGGED_ADDR_CTRL prctl() unused arguments
  UPSTREAM: selftests, arm64: fix uninitialized symbol in tags_test.c
  UPSTREAM: arm64: mm: Really fix sparse warning in untagged_addr()
  UPSTREAM: selftests, arm64: add a selftest for passing tagged pointers to kernel
  BACKPORT: arm64: Introduce prctl() options to control the tagged user addresses ABI
  UPSTREAM: arm64: untag user pointers in access_ok and __uaccess_mask_ptr
  UPSTREAM: uaccess: add noop untagged_addr definition
  BACKPORT: block: annotate refault stalls from IO submission
  f2fs: add a condition to detect overflow in f2fs_ioc_gc_range()
  f2fs: fix to add missing F2FS_IO_ALIGNED() condition
  f2fs: fix to fallback to buffered IO in IO aligned mode
  f2fs: fix to handle error path correctly in f2fs_map_blocks
  f2fs: fix extent corrupotion during directIO in LFS mode
  f2fs: check all the data segments against all node ones
  f2fs: Add a small clarification to CONFIG_FS_F2FS_FS_SECURITY
  f2fs: fix inode rwsem regression
  f2fs: fix to avoid accessing uninitialized field of inode page in is_alive()
  f2fs: avoid infinite GC loop due to stale atomic files
  f2fs: Fix indefinite loop in f2fs_gc()
  f2fs: convert inline_data in prior to i_size_write
  f2fs: fix error path of f2fs_convert_inline_page()
  f2fs: add missing documents of reserve_root/resuid/resgid
  f2fs: fix flushing node pages when checkpoint is disabled
  f2fs: enhance f2fs_is_checkpoint_ready()'s readability
  f2fs: clean up __bio_alloc()'s parameter
  f2fs: fix wrong error injection path in inc_valid_block_count()
  f2fs: fix to writeout dirty inode during node flush
  f2fs: optimize case-insensitive lookups
  f2fs: introduce f2fs_match_name() for cleanup
  f2fs: Fix indefinite loop in f2fs_gc()
  f2fs: allocate memory in batch in build_sit_info()
  f2fs: support FS_IOC_{GET,SET}FSLABEL
  f2fs: fix to avoid data corruption by forbidding SSR overwrite
  f2fs: Fix build error while CONFIG_NLS=m
  Revert "f2fs: avoid out-of-range memory access"
  f2fs: cleanup the code in build_sit_entries.
  f2fs: fix wrong available node count calculation
  f2fs: remove duplicate code in f2fs_file_write_iter
  f2fs: fix to migrate blocks correctly during defragment
  f2fs: use wrapped f2fs_cp_error()
  f2fs: fix to use more generic EOPNOTSUPP
  f2fs: use wrapped IS_SWAPFILE()
  f2fs: Support case-insensitive file name lookups
  f2fs: include charset encoding information in the superblock
  fs: Reserve flag for casefolding
  f2fs: fix to avoid call kvfree under spinlock
  fs: f2fs: Remove unnecessary checks of SM_I(sbi) in update_general_status()
  f2fs: disallow direct IO in atomic write
  f2fs: fix to handle quota_{on,off} correctly
  f2fs: fix to detect cp error in f2fs_setxattr()
  f2fs: fix to spread f2fs_is_checkpoint_ready()
  f2fs: support fiemap() for directory inode
  f2fs: fix to avoid discard command leak
  f2fs: fix to avoid tagging SBI_QUOTA_NEED_REPAIR incorrectly
  f2fs: fix to drop meta/node pages during umount
  f2fs: disallow switching io_bits option during remount
  f2fs: fix panic of IO alignment feature
  f2fs: introduce {page,io}_is_mergeable() for readability
  f2fs: fix livelock in swapfile writes
  f2fs: add fs-verity support
  ext4: update on-disk format documentation for fs-verity
  ext4: add fs-verity read support
  ext4: add basic fs-verity support
  fs-verity: support builtin file signatures
  fs-verity: add SHA-512 support
  fs-verity: implement FS_IOC_MEASURE_VERITY ioctl
  fs-verity: implement FS_IOC_ENABLE_VERITY ioctl
  fs-verity: add data verification hooks for ->readpages()
  fs-verity: add the hook for file ->setattr()
  fs-verity: add the hook for file ->open()
  fs-verity: add inode and superblock fields
  fs-verity: add Kconfig and the helper functions for hashing
  fs: uapi: define verity bit for FS_IOC_GETFLAGS
  fs-verity: add UAPI header
  fs-verity: add MAINTAINERS file entry
  fs-verity: add a documentation file
  ext4: fix kernel oops caused by spurious casefold flag
  ext4: fix coverity warning on error path of filename setup
  ext4: optimize case-insensitive lookups
  ext4: fix dcache lookup of !casefolded directories
  unicode: update to Unicode 12.1.0 final
  unicode: add missing check for an error return from utf8lookup()
  ext4: export /sys/fs/ext4/feature/casefold if Unicode support is present
  unicode: refactor the rule for regenerating utf8data.h
  ext4: Support case-insensitive file name lookups
  ext4: include charset encoding information in the superblock
  unicode: update unicode database unicode version 12.1.0
  unicode: introduce test module for normalized utf8 implementation
  unicode: implement higher level API for string handling
  unicode: reduce the size of utf8data[]
  unicode: introduce code for UTF-8 normalization
  unicode: introduce UTF-8 character database
  ext4 crypto: fix to check feature status before get policy
  fscrypt: document the new ioctls and policy version
  ubifs: wire up new fscrypt ioctls
  f2fs: wire up new fscrypt ioctls
  ext4: wire up new fscrypt ioctls
  fscrypt: require that key be added when setting a v2 encryption policy
  fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS ioctl
  fscrypt: allow unprivileged users to add/remove keys for v2 policies
  fscrypt: v2 encryption policy support
  fscrypt: add an HKDF-SHA512 implementation
  fscrypt: add FS_IOC_GET_ENCRYPTION_KEY_STATUS ioctl
  fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY ioctl
  fscrypt: add FS_IOC_ADD_ENCRYPTION_KEY ioctl
  fscrypt: rename keyinfo.c to keysetup.c
  fscrypt: move v1 policy key setup to keysetup_v1.c
  fscrypt: refactor key setup code in preparation for v2 policies
  fscrypt: rename fscrypt_master_key to fscrypt_direct_key
  fscrypt: add ->ci_inode to fscrypt_info
  fscrypt: use FSCRYPT_* definitions, not FS_*
  fscrypt: use FSCRYPT_ prefix for uapi constants
  fs, fscrypt: move uapi definitions to new header <linux/fscrypt.h>
  fscrypt: use ENOPKG when crypto API support missing
  fscrypt: improve warnings for missing crypto API support
  fscrypt: improve warning messages for unsupported encryption contexts
  fscrypt: make fscrypt_msg() take inode instead of super_block
  fscrypt: clean up base64 encoding/decoding
  fscrypt: remove loadable module related code

Updated following files to fix build errors:
	drivers/gpu/msm/kgsl_pool.c
	drivers/hwtracing/coresight/coresight-dummy.c
	drivers/iommu/dma-mapping-fast.c
	drivers/iommu/io-pgtable-fast.c
	drivers/iommu/io-pgtable-msm-secure.c
	kernel/taskstats.c
	mm/vmalloc.c
	security/selinux/ss/sidtab.h

Conflicts:
	arch/arm/Makefile
	arch/arm64/Kconfig
	arch/x86/include/asm/syscall_wrapper.h
	build.config.common
	drivers/clk/clk.c
	drivers/hwtracing/coresight/coresight-etm-perf.c
	drivers/hwtracing/coresight/coresight-funnel.c
	drivers/hwtracing/coresight/coresight-tmc-etf.c
	drivers/hwtracing/coresight/coresight-tmc-etr.c
	drivers/hwtracing/coresight/coresight-tmc.c
	drivers/hwtracing/coresight/coresight-tmc.h
	drivers/hwtracing/coresight/coresight.c
	drivers/hwtracing/coresight/of_coresight.c
	drivers/iommu/arm-smmu.c
	drivers/iommu/io-pgtable-arm.c
	drivers/iommu/io-pgtable.c
	drivers/scsi/scsi_sysfs.c
	drivers/scsi/sd.c
	drivers/scsi/ufs/ufshcd.c
	drivers/scsi/ufs/ufshcd.h
	drivers/staging/android/ion/ion.c
	drivers/staging/android/ion/ion.h
	drivers/staging/android/ion/ion_page_pool.c
	fs/ext4/readpage.c
	fs/f2fs/data.c
	fs/f2fs/f2fs.h
	fs/f2fs/file.c
	fs/f2fs/segment.c
	fs/f2fs/super.c
	include/linux/clk-provider.h
	include/linux/compiler_types.h
	include/linux/coresight.h
	include/linux/mmzone.h
	include/scsi/scsi_device.h
	include/trace/events/kmem.h
	kernel/events/core.c
	kernel/sched/core.c
	mm/vmstat.c

Change-Id: I2eca52b08b484f2b5c30437671cab8cb0195b8d6
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
2020-03-27 10:48:20 -07:00

2148 lines
52 KiB
C

/*
* linux/mm/vmstat.c
*
* Manages VM statistics
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* zoned VM statistics
* Copyright (C) 2006 Silicon Graphics, Inc.,
* Christoph Lameter <christoph@lameter.com>
* Copyright (C) 2008-2014 Christoph Lameter
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/vmstat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/page_ext.h>
#include <linux/page_owner.h>
#include "internal.h"
#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
#ifdef CONFIG_NUMA
int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
/* zero numa counters within a zone */
static void zero_zone_numa_counters(struct zone *zone)
{
int item, cpu;
for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
atomic_long_set(&zone->vm_numa_stat[item], 0);
for_each_online_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
= 0;
}
}
/* zero numa counters of all the populated zones */
static void zero_zones_numa_counters(void)
{
struct zone *zone;
for_each_populated_zone(zone)
zero_zone_numa_counters(zone);
}
/* zero global numa counters */
static void zero_global_numa_counters(void)
{
int item;
for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
atomic_long_set(&vm_numa_stat[item], 0);
}
static void invalid_numa_statistics(void)
{
zero_zones_numa_counters();
zero_global_numa_counters();
}
static DEFINE_MUTEX(vm_numa_stat_lock);
int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
int ret, oldval;
mutex_lock(&vm_numa_stat_lock);
if (write)
oldval = sysctl_vm_numa_stat;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (ret || !write)
goto out;
if (oldval == sysctl_vm_numa_stat)
goto out;
else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
static_branch_enable(&vm_numa_stat_key);
pr_info("enable numa statistics\n");
} else {
static_branch_disable(&vm_numa_stat_key);
invalid_numa_statistics();
pr_info("disable numa statistics, and clear numa counters\n");
}
out:
mutex_unlock(&vm_numa_stat_lock);
return ret;
}
#endif
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
EXPORT_PER_CPU_SYMBOL(vm_event_states);
static void sum_vm_events(unsigned long *ret)
{
int cpu;
int i;
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
for_each_online_cpu(cpu) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
ret[i] += this->event[i];
}
}
/*
* Accumulate the vm event counters across all CPUs.
* The result is unavoidably approximate - it can change
* during and after execution of this function.
*/
void all_vm_events(unsigned long *ret)
{
get_online_cpus();
sum_vm_events(ret);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(all_vm_events);
/*
* Fold the foreign cpu events into our own.
*
* This is adding to the events on one processor
* but keeps the global counts constant.
*/
void vm_events_fold_cpu(int cpu)
{
struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
int i;
for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
count_vm_events(i, fold_state->event[i]);
fold_state->event[i] = 0;
}
}
#endif /* CONFIG_VM_EVENT_COUNTERS */
/*
* Manage combined zone based / global counters
*
* vm_stat contains the global counters
*/
atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_zone_stat);
EXPORT_SYMBOL(vm_numa_stat);
EXPORT_SYMBOL(vm_node_stat);
#ifdef CONFIG_SMP
int calculate_pressure_threshold(struct zone *zone)
{
int threshold;
int watermark_distance;
/*
* As vmstats are not up to date, there is drift between the estimated
* and real values. For high thresholds and a high number of CPUs, it
* is possible for the min watermark to be breached while the estimated
* value looks fine. The pressure threshold is a reduced value such
* that even the maximum amount of drift will not accidentally breach
* the min watermark
*/
watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
threshold = max(1, (int)(watermark_distance / num_online_cpus()));
/*
* Maximum threshold is 125
*/
threshold = min(125, threshold);
return threshold;
}
int calculate_normal_threshold(struct zone *zone)
{
int threshold;
int mem; /* memory in 128 MB units */
/*
* The threshold scales with the number of processors and the amount
* of memory per zone. More memory means that we can defer updates for
* longer, more processors could lead to more contention.
* fls() is used to have a cheap way of logarithmic scaling.
*
* Some sample thresholds:
*
* Threshold Processors (fls) Zonesize fls(mem+1)
* ------------------------------------------------------------------
* 8 1 1 0.9-1 GB 4
* 16 2 2 0.9-1 GB 4
* 20 2 2 1-2 GB 5
* 24 2 2 2-4 GB 6
* 28 2 2 4-8 GB 7
* 32 2 2 8-16 GB 8
* 4 2 2 <128M 1
* 30 4 3 2-4 GB 5
* 48 4 3 8-16 GB 8
* 32 8 4 1-2 GB 4
* 32 8 4 0.9-1GB 4
* 10 16 5 <128M 1
* 40 16 5 900M 4
* 70 64 7 2-4 GB 5
* 84 64 7 4-8 GB 6
* 108 512 9 4-8 GB 6
* 125 1024 10 8-16 GB 8
* 125 1024 10 16-32 GB 9
*/
mem = zone->managed_pages >> (27 - PAGE_SHIFT);
threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
/*
* Maximum threshold is 125
*/
threshold = min(125, threshold);
return threshold;
}
/*
* Refresh the thresholds for each zone.
*/
void refresh_zone_stat_thresholds(void)
{
struct pglist_data *pgdat;
struct zone *zone;
int cpu;
int threshold;
/* Zero current pgdat thresholds */
for_each_online_pgdat(pgdat) {
for_each_online_cpu(cpu) {
per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
}
}
for_each_populated_zone(zone) {
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long max_drift, tolerate_drift;
threshold = calculate_normal_threshold(zone);
for_each_online_cpu(cpu) {
int pgdat_threshold;
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
/* Base nodestat threshold on the largest populated zone. */
pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
= max(threshold, pgdat_threshold);
}
/*
* Only set percpu_drift_mark if there is a danger that
* NR_FREE_PAGES reports the low watermark is ok when in fact
* the min watermark could be breached by an allocation
*/
tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
max_drift = num_online_cpus() * threshold;
if (max_drift > tolerate_drift)
zone->percpu_drift_mark = high_wmark_pages(zone) +
max_drift;
}
}
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *))
{
struct zone *zone;
int cpu;
int threshold;
int i;
for (i = 0; i < pgdat->nr_zones; i++) {
zone = &pgdat->node_zones[i];
if (!zone->percpu_drift_mark)
continue;
threshold = (*calculate_pressure)(zone);
for_each_online_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
}
}
/*
* For use when we know that interrupts are disabled,
* or when we know that preemption is disabled and that
* particular counter cannot be updated from interrupt context.
*/
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long delta)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(x > t || x < -t)) {
zone_page_state_add(x, zone, item);
x = 0;
}
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_zone_page_state);
void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long delta)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
s8 __percpu *p = pcp->vm_node_stat_diff + item;
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(x > t || x < -t)) {
node_page_state_add(x, pgdat, item);
x = 0;
}
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_node_page_state);
/*
* Optimized increment and decrement functions.
*
* These are only for a single page and therefore can take a struct page *
* argument instead of struct zone *. This allows the inclusion of the code
* generated for page_zone(page) into the optimized functions.
*
* No overflow check is necessary and therefore the differential can be
* incremented or decremented in place which may allow the compilers to
* generate better code.
* The increment or decrement is known and therefore one boundary check can
* be omitted.
*
* NOTE: These functions are very performance sensitive. Change only
* with care.
*
* Some processors have inc/dec instructions that are atomic vs an interrupt.
* However, the code must first determine the differential location in a zone
* based on the processor number and then inc/dec the counter. There is no
* guarantee without disabling preemption that the processor will not change
* in between and therefore the atomicity vs. interrupt cannot be exploited
* in a useful way here.
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
s8 overstep = t >> 1;
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
s8 overstep = t >> 1;
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
__inc_zone_state(page_zone(page), item);
}
EXPORT_SYMBOL(__inc_zone_page_state);
void __inc_node_page_state(struct page *page, enum node_stat_item item)
{
__inc_node_state(page_pgdat(page), item);
}
EXPORT_SYMBOL(__inc_node_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
s8 overstep = t >> 1;
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
s8 overstep = t >> 1;
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
__dec_zone_state(page_zone(page), item);
}
EXPORT_SYMBOL(__dec_zone_page_state);
void __dec_node_page_state(struct page *page, enum node_stat_item item)
{
__dec_node_state(page_pgdat(page), item);
}
EXPORT_SYMBOL(__dec_node_page_state);
#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
/*
* If we have cmpxchg_local support then we do not need to incur the overhead
* that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
*
* mod_state() modifies the zone counter state through atomic per cpu
* operations.
*
* Overstep mode specifies how overstep should handled:
* 0 No overstepping
* 1 Overstepping half of threshold
* -1 Overstepping minus half of threshold
*/
static inline void mod_zone_state(struct zone *zone,
enum zone_stat_item item, long delta, int overstep_mode)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;
do {
z = 0; /* overflow to zone counters */
/*
* The fetching of the stat_threshold is racy. We may apply
* a counter threshold to the wrong the cpu if we get
* rescheduled while executing here. However, the next
* counter update will apply the threshold again and
* therefore bring the counter under the threshold again.
*
* Most of the time the thresholds are the same anyways
* for all cpus in a zone.
*/
t = this_cpu_read(pcp->stat_threshold);
o = this_cpu_read(*p);
n = delta + o;
if (n > t || n < -t) {
int os = overstep_mode * (t >> 1) ;
/* Overflow must be added to zone counters */
z = n + os;
n = -os;
}
} while (this_cpu_cmpxchg(*p, o, n) != o);
if (z)
zone_page_state_add(z, zone, item);
}
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long delta)
{
mod_zone_state(zone, item, delta, 0);
}
EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
mod_zone_state(page_zone(page), item, 1, 1);
}
EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
mod_zone_state(page_zone(page), item, -1, -1);
}
EXPORT_SYMBOL(dec_zone_page_state);
static inline void mod_node_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta, int overstep_mode)
{
struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
s8 __percpu *p = pcp->vm_node_stat_diff + item;
long o, n, t, z;
do {
z = 0; /* overflow to node counters */
/*
* The fetching of the stat_threshold is racy. We may apply
* a counter threshold to the wrong the cpu if we get
* rescheduled while executing here. However, the next
* counter update will apply the threshold again and
* therefore bring the counter under the threshold again.
*
* Most of the time the thresholds are the same anyways
* for all cpus in a node.
*/
t = this_cpu_read(pcp->stat_threshold);
o = this_cpu_read(*p);
n = delta + o;
if (n > t || n < -t) {
int os = overstep_mode * (t >> 1) ;
/* Overflow must be added to node counters */
z = n + os;
n = -os;
}
} while (this_cpu_cmpxchg(*p, o, n) != o);
if (z)
node_page_state_add(z, pgdat, item);
}
void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long delta)
{
mod_node_state(pgdat, item, delta, 0);
}
EXPORT_SYMBOL(mod_node_page_state);
void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
mod_node_state(pgdat, item, 1, 1);
}
void inc_node_page_state(struct page *page, enum node_stat_item item)
{
mod_node_state(page_pgdat(page), item, 1, 1);
}
EXPORT_SYMBOL(inc_node_page_state);
void dec_node_page_state(struct page *page, enum node_stat_item item)
{
mod_node_state(page_pgdat(page), item, -1, -1);
}
EXPORT_SYMBOL(dec_node_page_state);
#else
/*
* Use interrupt disable to serialize counter updates
*/
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
long delta)
{
unsigned long flags;
local_irq_save(flags);
__mod_zone_page_state(zone, item, delta);
local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
unsigned long flags;
struct zone *zone;
zone = page_zone(page);
local_irq_save(flags);
__inc_zone_state(zone, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__dec_zone_page_state(page, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(dec_zone_page_state);
void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__inc_node_state(pgdat, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_node_state);
void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
long delta)
{
unsigned long flags;
local_irq_save(flags);
__mod_node_page_state(pgdat, item, delta);
local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_node_page_state);
void inc_node_page_state(struct page *page, enum node_stat_item item)
{
unsigned long flags;
struct pglist_data *pgdat;
pgdat = page_pgdat(page);
local_irq_save(flags);
__inc_node_state(pgdat, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_node_page_state);
void dec_node_page_state(struct page *page, enum node_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__dec_node_page_state(page, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(dec_node_page_state);
#endif
/*
* Fold a differential into the global counters.
* Returns the number of counters updated.
*/
#ifdef CONFIG_NUMA
static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
{
int i;
int changes = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (zone_diff[i]) {
atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
changes++;
}
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
if (numa_diff[i]) {
atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
changes++;
}
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (node_diff[i]) {
atomic_long_add(node_diff[i], &vm_node_stat[i]);
changes++;
}
return changes;
}
#else
static int fold_diff(int *zone_diff, int *node_diff)
{
int i;
int changes = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (zone_diff[i]) {
atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
changes++;
}
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (node_diff[i]) {
atomic_long_add(node_diff[i], &vm_node_stat[i]);
changes++;
}
return changes;
}
#endif /* CONFIG_NUMA */
/*
* Update the zone counters for the current cpu.
*
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
* in the memory local to the processor using that pageset. So the
* loop over all zones will access a series of cachelines local to
* the processor.
*
* The call to zone_page_state_add updates the cachelines with the
* statistics in the remote zone struct as well as the global cachelines
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*
* The function returns the number of global counters updated.
*/
static int refresh_cpu_vm_stats(bool do_pagesets)
{
struct pglist_data *pgdat;
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
#ifdef CONFIG_NUMA
int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
#endif
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
int changes = 0;
for_each_populated_zone(zone) {
struct per_cpu_pageset __percpu *p = zone->pageset;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
int v;
v = this_cpu_xchg(p->vm_stat_diff[i], 0);
if (v) {
atomic_long_add(v, &zone->vm_stat[i]);
global_zone_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
__this_cpu_write(p->expire, 3);
#endif
}
}
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
int v;
v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
if (v) {
atomic_long_add(v, &zone->vm_numa_stat[i]);
global_numa_diff[i] += v;
__this_cpu_write(p->expire, 3);
}
}
if (do_pagesets) {
cond_resched();
/*
* Deal with draining the remote pageset of this
* processor
*
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
if (!__this_cpu_read(p->expire) ||
!__this_cpu_read(p->pcp.count))
continue;
/*
* We never drain zones local to this processor.
*/
if (zone_to_nid(zone) == numa_node_id()) {
__this_cpu_write(p->expire, 0);
continue;
}
if (__this_cpu_dec_return(p->expire))
continue;
if (__this_cpu_read(p->pcp.count)) {
drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
changes++;
}
}
#endif
}
for_each_online_pgdat(pgdat) {
struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
int v;
v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
if (v) {
atomic_long_add(v, &pgdat->vm_stat[i]);
global_node_diff[i] += v;
}
}
}
#ifdef CONFIG_NUMA
changes += fold_diff(global_zone_diff, global_numa_diff,
global_node_diff);
#else
changes += fold_diff(global_zone_diff, global_node_diff);
#endif
return changes;
}
/*
* Fold the data for an offline cpu into the global array.
* There cannot be any access by the offline cpu and therefore
* synchronization is simplified.
*/
void cpu_vm_stats_fold(int cpu)
{
struct pglist_data *pgdat;
struct zone *zone;
int i;
int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
#ifdef CONFIG_NUMA
int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
#endif
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
p = per_cpu_ptr(zone->pageset, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
int v;
v = p->vm_stat_diff[i];
p->vm_stat_diff[i] = 0;
atomic_long_add(v, &zone->vm_stat[i]);
global_zone_diff[i] += v;
}
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
if (p->vm_numa_stat_diff[i]) {
int v;
v = p->vm_numa_stat_diff[i];
p->vm_numa_stat_diff[i] = 0;
atomic_long_add(v, &zone->vm_numa_stat[i]);
global_numa_diff[i] += v;
}
#endif
}
for_each_online_pgdat(pgdat) {
struct per_cpu_nodestat *p;
p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
if (p->vm_node_stat_diff[i]) {
int v;
v = p->vm_node_stat_diff[i];
p->vm_node_stat_diff[i] = 0;
atomic_long_add(v, &pgdat->vm_stat[i]);
global_node_diff[i] += v;
}
}
#ifdef CONFIG_NUMA
fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
#else
fold_diff(global_zone_diff, global_node_diff);
#endif
}
/*
* this is only called if !populated_zone(zone), which implies no other users of
* pset->vm_stat_diff[] exsist.
*/
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
{
int i;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (pset->vm_stat_diff[i]) {
int v = pset->vm_stat_diff[i];
pset->vm_stat_diff[i] = 0;
atomic_long_add(v, &zone->vm_stat[i]);
atomic_long_add(v, &vm_zone_stat[i]);
}
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
if (pset->vm_numa_stat_diff[i]) {
int v = pset->vm_numa_stat_diff[i];
pset->vm_numa_stat_diff[i] = 0;
atomic_long_add(v, &zone->vm_numa_stat[i]);
atomic_long_add(v, &vm_numa_stat[i]);
}
#endif
}
#endif
#ifdef CONFIG_NUMA
void __inc_numa_state(struct zone *zone,
enum numa_stat_item item)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
u16 __percpu *p = pcp->vm_numa_stat_diff + item;
u16 v;
v = __this_cpu_inc_return(*p);
if (unlikely(v > NUMA_STATS_THRESHOLD)) {
zone_numa_state_add(v, zone, item);
__this_cpu_write(*p, 0);
}
}
/*
* Determine the per node value of a stat item. This function
* is called frequently in a NUMA machine, so try to be as
* frugal as possible.
*/
unsigned long sum_zone_node_page_state(int node,
enum zone_stat_item item)
{
struct zone *zones = NODE_DATA(node)->node_zones;
int i;
unsigned long count = 0;
for (i = 0; i < MAX_NR_ZONES; i++)
count += zone_page_state(zones + i, item);
return count;
}
/*
* Determine the per node value of a numa stat item. To avoid deviation,
* the per cpu stat number in vm_numa_stat_diff[] is also included.
*/
unsigned long sum_zone_numa_state(int node,
enum numa_stat_item item)
{
struct zone *zones = NODE_DATA(node)->node_zones;
int i;
unsigned long count = 0;
for (i = 0; i < MAX_NR_ZONES; i++)
count += zone_numa_state_snapshot(zones + i, item);
return count;
}
/*
* Determine the per node value of a stat item.
*/
unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item)
{
long x = atomic_long_read(&pgdat->vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}
#endif
#ifdef CONFIG_COMPACTION
struct contig_page_info {
unsigned long free_pages;
unsigned long free_blocks_total;
unsigned long free_blocks_suitable;
};
/*
* Calculate the number of free pages in a zone, how many contiguous
* pages are free and how many are large enough to satisfy an allocation of
* the target size. Note that this function makes no attempt to estimate
* how many suitable free blocks there *might* be if MOVABLE pages were
* migrated. Calculating that is possible, but expensive and can be
* figured out from userspace
*/
static void fill_contig_page_info(struct zone *zone,
unsigned int suitable_order,
struct contig_page_info *info)
{
unsigned int order;
info->free_pages = 0;
info->free_blocks_total = 0;
info->free_blocks_suitable = 0;
for (order = 0; order < MAX_ORDER; order++) {
unsigned long blocks;
/* Count number of free blocks */
blocks = zone->free_area[order].nr_free;
info->free_blocks_total += blocks;
/* Count free base pages */
info->free_pages += blocks << order;
/* Count the suitable free blocks */
if (order >= suitable_order)
info->free_blocks_suitable += blocks <<
(order - suitable_order);
}
}
/*
* A fragmentation index only makes sense if an allocation of a requested
* size would fail. If that is true, the fragmentation index indicates
* whether external fragmentation or a lack of memory was the problem.
* The value can be used to determine if page reclaim or compaction
* should be used
*/
static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
{
unsigned long requested = 1UL << order;
if (WARN_ON_ONCE(order >= MAX_ORDER))
return 0;
if (!info->free_blocks_total)
return 0;
/* Fragmentation index only makes sense when a request would fail */
if (info->free_blocks_suitable)
return -1000;
/*
* Index is between 0 and 1 so return within 3 decimal places
*
* 0 => allocation would fail due to lack of memory
* 1 => allocation would fail due to fragmentation
*/
return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
}
/* Same as __fragmentation index but allocs contig_page_info on stack */
int fragmentation_index(struct zone *zone, unsigned int order)
{
struct contig_page_info info;
fill_contig_page_info(zone, order, &info);
return __fragmentation_index(order, &info);
}
#endif
#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
#else
#define TEXT_FOR_DMA(xx)
#endif
#ifdef CONFIG_ZONE_DMA32
#define TEXT_FOR_DMA32(xx) xx "_dma32",
#else
#define TEXT_FOR_DMA32(xx)
#endif
#ifdef CONFIG_HIGHMEM
#define TEXT_FOR_HIGHMEM(xx) xx "_high",
#else
#define TEXT_FOR_HIGHMEM(xx)
#endif
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
TEXT_FOR_HIGHMEM(xx) xx "_movable",
const char * const vmstat_text[] = {
/* enum zone_stat_item countes */
"nr_free_pages",
"nr_zone_inactive_anon",
"nr_zone_active_anon",
"nr_zone_inactive_file",
"nr_zone_active_file",
"nr_zone_unevictable",
"nr_zone_write_pending",
"nr_mlock",
"nr_page_table_pages",
"nr_kernel_stack",
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
"nr_shadow_call_stack_bytes",
#endif
"nr_bounce",
#if IS_ENABLED(CONFIG_ZSMALLOC)
"nr_zspages",
#endif
"nr_free_cma",
/* enum numa_stat_item counters */
#ifdef CONFIG_NUMA
"numa_hit",
"numa_miss",
"numa_foreign",
"numa_interleave",
"numa_local",
"numa_other",
#endif
/* Node-based counters */
"nr_inactive_anon",
"nr_active_anon",
"nr_inactive_file",
"nr_active_file",
"nr_unevictable",
"nr_slab_reclaimable",
"nr_slab_unreclaimable",
"nr_isolated_anon",
"nr_isolated_file",
"workingset_refault",
"workingset_activate",
"workingset_restore",
"workingset_nodereclaim",
"nr_anon_pages",
"nr_mapped",
"nr_file_pages",
"nr_dirty",
"nr_writeback",
"nr_writeback_temp",
"nr_shmem",
"nr_shmem_hugepages",
"nr_shmem_pmdmapped",
"nr_anon_transparent_hugepages",
"nr_unstable",
"nr_vmscan_write",
"nr_vmscan_immediate_reclaim",
"nr_dirtied",
"nr_written",
"nr_kernel_misc_reclaimable",
"nr_unreclaimable_pages",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
"nr_dirty_background_threshold",
#ifdef CONFIG_VM_EVENT_COUNTERS
/* enum vm_event_item counters */
"pgpgin",
"pgpgout",
"pgpgoutclean",
"pswpin",
"pswpout",
TEXTS_FOR_ZONES("pgalloc")
TEXTS_FOR_ZONES("allocstall")
TEXTS_FOR_ZONES("pgskip")
"pgfree",
"pgactivate",
"pgdeactivate",
"pglazyfree",
"pgfault",
"pgmajfault",
"pglazyfreed",
"pgrefill",
"pgsteal_kswapd",
"pgsteal_direct",
"pgscan_kswapd",
"pgscan_direct",
"pgscan_direct_throttle",
#ifdef CONFIG_NUMA
"zone_reclaim_failed",
#endif
"pginodesteal",
"slabs_scanned",
"kswapd_inodesteal",
"kswapd_low_wmark_hit_quickly",
"kswapd_high_wmark_hit_quickly",
"pageoutrun",
"pgrotated",
"drop_pagecache",
"drop_slab",
"oom_kill",
#ifdef CONFIG_NUMA_BALANCING
"numa_pte_updates",
"numa_huge_pte_updates",
"numa_hint_faults",
"numa_hint_faults_local",
"numa_pages_migrated",
#endif
#ifdef CONFIG_MIGRATION
"pgmigrate_success",
"pgmigrate_fail",
#endif
#ifdef CONFIG_COMPACTION
"compact_migrate_scanned",
"compact_free_scanned",
"compact_isolated",
"compact_stall",
"compact_fail",
"compact_success",
"compact_daemon_wake",
"compact_daemon_migrate_scanned",
"compact_daemon_free_scanned",
#endif
#ifdef CONFIG_HUGETLB_PAGE
"htlb_buddy_alloc_success",
"htlb_buddy_alloc_fail",
#endif
"unevictable_pgs_culled",
"unevictable_pgs_scanned",
"unevictable_pgs_rescued",
"unevictable_pgs_mlocked",
"unevictable_pgs_munlocked",
"unevictable_pgs_cleared",
"unevictable_pgs_stranded",
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"thp_fault_alloc",
"thp_fault_fallback",
"thp_collapse_alloc",
"thp_collapse_alloc_failed",
"thp_file_alloc",
"thp_file_mapped",
"thp_split_page",
"thp_split_page_failed",
"thp_deferred_split_page",
"thp_split_pmd",
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
"thp_split_pud",
#endif
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
"thp_swpout",
"thp_swpout_fallback",
#endif
#ifdef CONFIG_MEMORY_BALLOON
"balloon_inflate",
"balloon_deflate",
#ifdef CONFIG_BALLOON_COMPACTION
"balloon_migrate",
#endif
#endif /* CONFIG_MEMORY_BALLOON */
#ifdef CONFIG_DEBUG_TLBFLUSH
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
#endif /* CONFIG_DEBUG_TLBFLUSH */
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
#endif
#ifdef CONFIG_SWAP
"swap_ra",
"swap_ra_hit",
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
"speculative_pgfault_anon",
"speculative_pgfault_file",
#endif
#endif /* CONFIG_VM_EVENT_COUNTERS */
};
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
defined(CONFIG_PROC_FS)
static void *frag_start(struct seq_file *m, loff_t *pos)
{
pg_data_t *pgdat;
loff_t node = *pos;
for (pgdat = first_online_pgdat();
pgdat && node;
pgdat = next_online_pgdat(pgdat))
--node;
return pgdat;
}
static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
{
pg_data_t *pgdat = (pg_data_t *)arg;
(*pos)++;
return next_online_pgdat(pgdat);
}
static void frag_stop(struct seq_file *m, void *arg)
{
}
/*
* Walk zones in a node and print using a callback.
* If @assert_populated is true, only use callback for zones that are populated.
*/
static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
bool assert_populated, bool nolock,
void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
{
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (assert_populated && !populated_zone(zone))
continue;
if (!nolock)
spin_lock_irqsave(&zone->lock, flags);
print(m, pgdat, zone);
if (!nolock)
spin_unlock_irqrestore(&zone->lock, flags);
}
}
#endif
#ifdef CONFIG_PROC_FS
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
int order;
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
seq_putc(m, '\n');
}
/*
* This walks the free areas for each zone.
*/
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, true, false, frag_show_print);
return 0;
}
static void pagetypeinfo_showfree_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
int order, mtype;
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
seq_printf(m, "Node %4d, zone %8s, type %12s ",
pgdat->node_id,
zone->name,
migratetype_names[mtype]);
for (order = 0; order < MAX_ORDER; ++order) {
unsigned long freecount = 0;
struct free_area *area;
struct list_head *curr;
area = &(zone->free_area[order]);
list_for_each(curr, &area->free_list[mtype])
freecount++;
seq_printf(m, "%6lu ", freecount);
}
seq_putc(m, '\n');
}
}
/* Print out the free pages at each order for each migatetype */
static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
{
int order;
pg_data_t *pgdat = (pg_data_t *)arg;
/* Print header */
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
return 0;
}
static void pagetypeinfo_showblockcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
int mtype;
unsigned long pfn;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
unsigned long count[MIGRATE_TYPES] = { 0, };
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
struct page *page;
page = pfn_to_online_page(pfn);
if (!page)
continue;
/* Watch for unexpected holes punched in the memmap */
if (!memmap_valid_within(pfn, page, zone))
continue;
if (page_zone(page) != zone)
continue;
mtype = get_pageblock_migratetype(page);
if (mtype < MIGRATE_TYPES)
count[mtype]++;
}
/* Print counts */
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12lu ", count[mtype]);
seq_putc(m, '\n');
}
/* Print out the number of pageblocks for each migratetype */
static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
{
int mtype;
pg_data_t *pgdat = (pg_data_t *)arg;
seq_printf(m, "\n%-23s", "Number of blocks type ");
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, true, false,
pagetypeinfo_showblockcount_print);
return 0;
}
/*
* Print out the number of pageblocks for each migratetype that contain pages
* of other types. This gives an indication of how well fallbacks are being
* contained by rmqueue_fallback(). It requires information from PAGE_OWNER
* to determine what is going on
*/
static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
{
#ifdef CONFIG_PAGE_OWNER
int mtype;
if (!static_branch_unlikely(&page_owner_inited))
return;
drain_all_pages(NULL);
seq_printf(m, "\n%-23s", "Number of mixed blocks ");
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, true, true,
pagetypeinfo_showmixedcount_print);
#endif /* CONFIG_PAGE_OWNER */
}
/*
* This prints out statistics in relation to grouping pages by mobility.
* It is expensive to collect so do not constantly read the file.
*/
static int pagetypeinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
seq_printf(m, "Page block order: %d\n", pageblock_order);
seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
seq_putc(m, '\n');
pagetypeinfo_showfree(m, pgdat);
pagetypeinfo_showblockcount(m, pgdat);
pagetypeinfo_showmixedcount(m, pgdat);
return 0;
}
static const struct seq_operations fragmentation_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = frag_show,
};
static const struct seq_operations pagetypeinfo_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = pagetypeinfo_show,
};
static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
{
int zid;
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
struct zone *compare = &pgdat->node_zones[zid];
if (populated_zone(compare))
return zone == compare;
}
return false;
}
static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
int i;
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
if (is_zone_first_populated(pgdat, zone)) {
seq_printf(m, "\n per-node stats");
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
/* Skip hidden vmstat items. */
if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_STAT_ITEMS] == '\0')
continue;
seq_printf(m, "\n %-12s %lu",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_STAT_ITEMS],
node_page_state(pgdat, i));
}
}
seq_printf(m,
"\n pages free %lu"
"\n min %lu"
"\n low %lu"
"\n high %lu"
"\n spanned %lu"
"\n present %lu"
"\n managed %lu",
zone_page_state(zone, NR_FREE_PAGES),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
zone->spanned_pages,
zone->present_pages,
zone->managed_pages);
seq_printf(m,
"\n protection: (%ld",
zone->lowmem_reserve[0]);
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
seq_putc(m, ')');
/* If unpopulated, no other information is useful */
if (!populated_zone(zone)) {
seq_putc(m, '\n');
return;
}
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", vmstat_text[i],
zone_page_state(zone, i));
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
zone_numa_state_snapshot(zone, i));
#endif
seq_printf(m, "\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
"\n high: %i"
"\n batch: %i",
i,
pageset->pcp.count,
pageset->pcp.high,
pageset->pcp.batch);
#ifdef CONFIG_SMP
seq_printf(m, "\n vm stats threshold: %d",
pageset->stat_threshold);
#endif
}
seq_printf(m,
"\n node_unreclaimable: %u"
"\n start_pfn: %lu",
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
zone->zone_start_pfn);
seq_putc(m, '\n');
}
/*
* Output information about zones in @pgdat. All zones are printed regardless
* of whether they are populated or not: lowmem_reserve_ratio operates on the
* set of all zones and userspace would not be aware of such zones if they are
* suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
*/
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
return 0;
}
static const struct seq_operations zoneinfo_op = {
.start = frag_start, /* iterate over all zones. The same as in
* fragmentation. */
.next = frag_next,
.stop = frag_stop,
.show = zoneinfo_show,
};
enum writeback_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
NR_VM_WRITEBACK_STAT_ITEMS,
};
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
unsigned long *v;
int i, stat_items_size;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
NR_VM_NUMA_STAT_ITEMS * sizeof(unsigned long) +
NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) +
NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
#ifdef CONFIG_VM_EVENT_COUNTERS
stat_items_size += sizeof(struct vm_event_state);
#endif
v = kmalloc(stat_items_size, GFP_KERNEL);
m->private = v;
if (!v)
return ERR_PTR(-ENOMEM);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
v[i] = global_zone_page_state(i);
v += NR_VM_ZONE_STAT_ITEMS;
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
v[i] = global_numa_state(i);
v += NR_VM_NUMA_STAT_ITEMS;
#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
v[i] = global_node_page_state(i);
v += NR_VM_NODE_STAT_ITEMS;
global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
v + NR_DIRTY_THRESHOLD);
v += NR_VM_WRITEBACK_STAT_ITEMS;
#ifdef CONFIG_VM_EVENT_COUNTERS
all_vm_events(v);
v[PGPGIN] /= 2; /* sectors -> kbytes */
v[PGPGOUT] /= 2;
#endif
return (unsigned long *)m->private + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
return (unsigned long *)m->private + *pos;
}
static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
seq_puts(m, vmstat_text[off]);
seq_put_decimal_ull(m, " ", *l);
seq_putc(m, '\n');
return 0;
}
static void vmstat_stop(struct seq_file *m, void *arg)
{
kfree(m->private);
m->private = NULL;
}
static const struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
#ifdef CONFIG_PROC_FS
static void refresh_vm_stats(struct work_struct *work)
{
refresh_cpu_vm_stats(true);
}
int vmstat_refresh(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
long val;
int err;
int i;
/*
* The regular update, every sysctl_stat_interval, may come later
* than expected: leaving a significant amount in per_cpu buckets.
* This is particularly misleading when checking a quantity of HUGE
* pages, immediately after running a test. /proc/sys/vm/stat_refresh,
* which can equally be echo'ed to or cat'ted from (by root),
* can be used to update the stats just before reading them.
*
* Oh, and since global_zone_page_state() etc. are so careful to hide
* transiently negative values, report an error here if any of
* the stats is negative, so we know to go looking for imbalance.
*/
err = schedule_on_each_cpu(refresh_vm_stats);
if (err)
return err;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
val = atomic_long_read(&vm_zone_stat[i]);
if (val < 0) {
pr_warn("%s: %s %ld\n",
__func__, vmstat_text[i], val);
err = -EINVAL;
}
}
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
val = atomic_long_read(&vm_numa_stat[i]);
if (val < 0) {
pr_warn("%s: %s %ld\n",
__func__, vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], val);
err = -EINVAL;
}
}
#endif
if (err)
return err;
if (write)
*ppos += *lenp;
else
*lenp = 0;
return 0;
}
#endif /* CONFIG_PROC_FS */
static void vmstat_update(struct work_struct *w)
{
if (refresh_cpu_vm_stats(true) && !cpu_isolated(smp_processor_id())) {
/*
* Counters were updated so we expect more updates
* to occur in the future. Keep on running the
* update worker thread.
*/
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
}
/*
* Switch off vmstat processing and then fold all the remaining differentials
* until the diffs stay at zero. The function is used by NOHZ and can only be
* invoked when tick processing is not active.
*/
/*
* Check if the diffs for a certain cpu indicate that
* an update is needed.
*/
static bool need_update(int cpu)
{
struct zone *zone;
for_each_populated_zone(zone) {
struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
#ifdef CONFIG_NUMA
BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
#endif
/*
* The fast way of checking if there are any vmstat diffs.
*/
if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
sizeof(p->vm_stat_diff[0])))
return true;
#ifdef CONFIG_NUMA
if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
sizeof(p->vm_numa_stat_diff[0])))
return true;
#endif
}
return false;
}
/*
* Switch off vmstat processing and then fold all the remaining differentials
* until the diffs stay at zero. The function is used by NOHZ and can only be
* invoked when tick processing is not active.
*/
void quiet_vmstat(void)
{
if (system_state != SYSTEM_RUNNING)
return;
if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
return;
if (!need_update(smp_processor_id()))
return;
/*
* Just refresh counters and do not care about the pending delayed
* vmstat_update. It doesn't fire that often to matter and canceling
* it would be too expensive from this path.
* vmstat_shepherd will take care about that for us.
*/
refresh_cpu_vm_stats(false);
}
/*
* Shepherd worker thread that checks the
* differentials of processors that have their worker
* threads for vm statistics updates disabled because of
* inactivity.
*/
static void vmstat_shepherd(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
static void vmstat_shepherd(struct work_struct *w)
{
int cpu;
get_online_cpus();
/* Check processors whose vmstat worker threads have been disabled */
for_each_online_cpu(cpu) {
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
if (!delayed_work_pending(dw) && need_update(cpu) &&
!cpu_isolated(cpu))
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
}
put_online_cpus();
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
}
static void __init start_shepherd_timer(void)
{
int cpu;
for_each_possible_cpu(cpu)
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
}
static void __init init_cpu_node_state(void)
{
int node;
for_each_online_node(node) {
if (cpumask_weight(cpumask_of_node(node)) > 0)
node_set_state(node, N_CPU);
}
}
static int vmstat_cpu_online(unsigned int cpu)
{
refresh_zone_stat_thresholds();
node_set_state(cpu_to_node(cpu), N_CPU);
return 0;
}
static int vmstat_cpu_down_prep(unsigned int cpu)
{
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
return 0;
}
static int vmstat_cpu_dead(unsigned int cpu)
{
const struct cpumask *node_cpus;
int node;
node = cpu_to_node(cpu);
refresh_zone_stat_thresholds();
node_cpus = cpumask_of_node(node);
if (cpumask_weight(node_cpus) > 0)
return 0;
node_clear_state(node, N_CPU);
return 0;
}
#endif
struct workqueue_struct *mm_percpu_wq;
void __init init_mm_internals(void)
{
int ret __maybe_unused;
mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
#ifdef CONFIG_SMP
ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
NULL, vmstat_cpu_dead);
if (ret < 0)
pr_err("vmstat: failed to register 'dead' hotplug state\n");
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
vmstat_cpu_online,
vmstat_cpu_down_prep);
if (ret < 0)
pr_err("vmstat: failed to register 'online' hotplug state\n");
get_online_cpus();
init_cpu_node_state();
put_online_cpus();
start_shepherd_timer();
#endif
#ifdef CONFIG_PROC_FS
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
#endif
}
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
/*
* Return an index indicating how much of the available free memory is
* unusable for an allocation of the requested size.
*/
static int unusable_free_index(unsigned int order,
struct contig_page_info *info)
{
/* No free memory is interpreted as all free memory is unusable */
if (info->free_pages == 0)
return 1000;
/*
* Index should be a value between 0 and 1. Return a value to 3
* decimal places.
*
* 0 => no fragmentation
* 1 => high fragmentation
*/
return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
}
static void unusable_show_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
unsigned int order;
int index;
struct contig_page_info info;
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
for (order = 0; order < MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info);
index = unusable_free_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
}
seq_putc(m, '\n');
}
/*
* Display unusable free space index
*
* The unusable free space index measures how much of the available free
* memory cannot be used to satisfy an allocation of a given size and is a
* value between 0 and 1. The higher the value, the more of free memory is
* unusable and by implication, the worse the external fragmentation is. This
* can be expressed as a percentage by multiplying by 100.
*/
static int unusable_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
if (!node_state(pgdat->node_id, N_MEMORY))
return 0;
walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
return 0;
}
static const struct seq_operations unusable_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = unusable_show,
};
static int unusable_open(struct inode *inode, struct file *file)
{
return seq_open(file, &unusable_op);
}
static const struct file_operations unusable_file_ops = {
.open = unusable_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void extfrag_show_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
unsigned int order;
int index;
/* Alloc on stack as interrupts are disabled for zone walk */
struct contig_page_info info;
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
for (order = 0; order < MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info);
index = __fragmentation_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
}
seq_putc(m, '\n');
}
/*
* Display fragmentation index for orders that allocations would fail for
*/
static int extfrag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
return 0;
}
static const struct seq_operations extfrag_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = extfrag_show,
};
static int extfrag_open(struct inode *inode, struct file *file)
{
return seq_open(file, &extfrag_op);
}
static const struct file_operations extfrag_file_ops = {
.open = extfrag_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init extfrag_debug_init(void)
{
struct dentry *extfrag_debug_root;
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
if (!extfrag_debug_root)
return -ENOMEM;
if (!debugfs_create_file("unusable_index", 0444,
extfrag_debug_root, NULL, &unusable_file_ops))
goto fail;
if (!debugfs_create_file("extfrag_index", 0444,
extfrag_debug_root, NULL, &extfrag_file_ops))
goto fail;
return 0;
fail:
debugfs_remove_recursive(extfrag_debug_root);
return -ENOMEM;
}
module_init(extfrag_debug_init);
#endif