| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| * |
| * Swap reorganised 29.12.95, Stephen Tweedie. |
| * kswapd added: 7.1.96 sct |
| * Removed kswapd_ctl limits, and swap out as many pages as needed |
| * to bring the system back to freepages.high: 2.4.97, Rik van Riel. |
| * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). |
| * Multiqueue VM started 5.8.00, Rik van Riel. |
| */ |
| |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| |
| #include <linux/mm.h> |
| #include <linux/sched/mm.h> |
| #include <linux/module.h> |
| #include <linux/gfp.h> |
| #include <linux/kernel_stat.h> |
| #include <linux/swap.h> |
| #include <linux/pagemap.h> |
| #include <linux/init.h> |
| #include <linux/highmem.h> |
| #include <linux/vmpressure.h> |
| #include <linux/vmstat.h> |
| #include <linux/file.h> |
| #include <linux/writeback.h> |
| #include <linux/blkdev.h> |
| #include <linux/buffer_head.h> /* for try_to_release_page(), |
| buffer_heads_over_limit */ |
| #include <linux/mm_inline.h> |
| #include <linux/backing-dev.h> |
| #include <linux/rmap.h> |
| #include <linux/topology.h> |
| #include <linux/cpu.h> |
| #include <linux/cpuset.h> |
| #include <linux/compaction.h> |
| #include <linux/notifier.h> |
| #include <linux/rwsem.h> |
| #include <linux/delay.h> |
| #include <linux/kthread.h> |
| #include <linux/freezer.h> |
| #include <linux/memcontrol.h> |
| #include <linux/migrate.h> |
| #include <linux/delayacct.h> |
| #include <linux/sysctl.h> |
| #include <linux/oom.h> |
| #include <linux/pagevec.h> |
| #include <linux/prefetch.h> |
| #include <linux/printk.h> |
| #include <linux/dax.h> |
| #include <linux/psi.h> |
| #include <linux/pagewalk.h> |
| #include <linux/shmem_fs.h> |
| #include <linux/ctype.h> |
| #include <linux/debugfs.h> |
| |
| #include <asm/tlbflush.h> |
| #include <asm/div64.h> |
| |
| #include <linux/swapops.h> |
| #include <linux/balloon_compaction.h> |
| |
| #include "internal.h" |
| |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/vmscan.h> |
| |
| #undef CREATE_TRACE_POINTS |
| #include <trace/hooks/vmscan.h> |
| |
| struct scan_control { |
| /* How many pages shrink_list() should reclaim */ |
| unsigned long nr_to_reclaim; |
| |
| /* |
| * Nodemask of nodes allowed by the caller. If NULL, all nodes |
| * are scanned. |
| */ |
| nodemask_t *nodemask; |
| |
| /* |
| * The memory cgroup that hit its limit and as a result is the |
| * primary target of this reclaim invocation. |
| */ |
| struct mem_cgroup *target_mem_cgroup; |
| |
| /* |
| * Scan pressure balancing between anon and file LRUs |
| */ |
| unsigned long anon_cost; |
| unsigned long file_cost; |
| |
| /* Can active pages be deactivated as part of reclaim? */ |
| #define DEACTIVATE_ANON 1 |
| #define DEACTIVATE_FILE 2 |
| unsigned int may_deactivate:2; |
| unsigned int force_deactivate:1; |
| unsigned int skipped_deactivate:1; |
| |
| /* Writepage batching in laptop mode; RECLAIM_WRITE */ |
| unsigned int may_writepage:1; |
| |
| /* Can mapped pages be reclaimed? */ |
| unsigned int may_unmap:1; |
| |
| /* Can pages be swapped as part of reclaim? */ |
| unsigned int may_swap:1; |
| |
| /* |
| * Cgroup memory below memory.low is protected as long as we |
| * don't threaten to OOM. If any cgroup is reclaimed at |
| * reduced force or passed over entirely due to its memory.low |
| * setting (memcg_low_skipped), and nothing is reclaimed as a |
| * result, then go back for one more cycle that reclaims the protected |
| * memory (memcg_low_reclaim) to avert OOM. |
| */ |
| unsigned int memcg_low_reclaim:1; |
| unsigned int memcg_low_skipped:1; |
| |
| unsigned int hibernation_mode:1; |
| |
| /* One of the zones is ready for compaction */ |
| unsigned int compaction_ready:1; |
| |
| /* There is easily reclaimable cold cache in the current node */ |
| unsigned int cache_trim_mode:1; |
| |
| /* The file pages on the current node are dangerously low */ |
| unsigned int file_is_tiny:1; |
| |
| /* Always discard instead of demoting to lower tier memory */ |
| unsigned int no_demotion:1; |
| |
| #ifdef CONFIG_LRU_GEN |
| /* help make better choices when multiple memcgs are available */ |
| unsigned int memcgs_need_aging:1; |
| unsigned int memcgs_need_swapping:1; |
| unsigned int memcgs_avoid_swapping:1; |
| #endif |
| |
| /* Allocation order */ |
| s8 order; |
| |
| /* Scan (total_size >> priority) pages at once */ |
| s8 priority; |
| |
| /* The highest zone to isolate pages for reclaim from */ |
| s8 reclaim_idx; |
| |
| /* This context's GFP mask */ |
| gfp_t gfp_mask; |
| |
| /* Incremented by the number of inactive pages that were scanned */ |
| unsigned long nr_scanned; |
| |
| /* Number of pages freed so far during a call to shrink_zones() */ |
| unsigned long nr_reclaimed; |
| |
| struct { |
| unsigned int dirty; |
| unsigned int unqueued_dirty; |
| unsigned int congested; |
| unsigned int writeback; |
| unsigned int immediate; |
| unsigned int file_taken; |
| unsigned int taken; |
| } nr; |
| |
| /* for recording the reclaimed slab by now */ |
| struct reclaim_state reclaim_state; |
| }; |
| |
| #ifdef ARCH_HAS_PREFETCHW |
| #define prefetchw_prev_lru_page(_page, _base, _field) \ |
| do { \ |
| if ((_page)->lru.prev != _base) { \ |
| struct page *prev; \ |
| \ |
| prev = lru_to_page(&(_page->lru)); \ |
| prefetchw(&prev->_field); \ |
| } \ |
| } while (0) |
| #else |
| #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) |
| #endif |
| |
| /* |
| * From 0 .. 200. Higher means more swappy. |
| */ |
| int vm_swappiness = 60; |
| |
| static void set_task_reclaim_state(struct task_struct *task, |
| struct reclaim_state *rs) |
| { |
| /* Check for an overwrite */ |
| WARN_ON_ONCE(rs && task->reclaim_state); |
| |
| /* Check for the nulling of an already-nulled member */ |
| WARN_ON_ONCE(!rs && !task->reclaim_state); |
| |
| task->reclaim_state = rs; |
| } |
| |
| static LIST_HEAD(shrinker_list); |
| static DECLARE_RWSEM(shrinker_rwsem); |
| |
| #ifdef CONFIG_MEMCG |
| static int shrinker_nr_max; |
| |
| /* The shrinker_info is expanded in a batch of BITS_PER_LONG */ |
| static inline int shrinker_map_size(int nr_items) |
| { |
| return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); |
| } |
| |
| static inline int shrinker_defer_size(int nr_items) |
| { |
| return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t)); |
| } |
| |
| static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, |
| int nid) |
| { |
| return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, |
| lockdep_is_held(&shrinker_rwsem)); |
| } |
| |
| static int expand_one_shrinker_info(struct mem_cgroup *memcg, |
| int map_size, int defer_size, |
| int old_map_size, int old_defer_size) |
| { |
| struct shrinker_info *new, *old; |
| struct mem_cgroup_per_node *pn; |
| int nid; |
| int size = map_size + defer_size; |
| |
| for_each_node(nid) { |
| pn = memcg->nodeinfo[nid]; |
| old = shrinker_info_protected(memcg, nid); |
| /* Not yet online memcg */ |
| if (!old) |
| return 0; |
| |
| new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); |
| if (!new) |
| return -ENOMEM; |
| |
| new->nr_deferred = (atomic_long_t *)(new + 1); |
| new->map = (void *)new->nr_deferred + defer_size; |
| |
| /* map: set all old bits, clear all new bits */ |
| memset(new->map, (int)0xff, old_map_size); |
| memset((void *)new->map + old_map_size, 0, map_size - old_map_size); |
| /* nr_deferred: copy old values, clear all new values */ |
| memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); |
| memset((void *)new->nr_deferred + old_defer_size, 0, |
| defer_size - old_defer_size); |
| |
| rcu_assign_pointer(pn->shrinker_info, new); |
| kvfree_rcu(old, rcu); |
| } |
| |
| return 0; |
| } |
| |
| void free_shrinker_info(struct mem_cgroup *memcg) |
| { |
| struct mem_cgroup_per_node *pn; |
| struct shrinker_info *info; |
| int nid; |
| |
| for_each_node(nid) { |
| pn = memcg->nodeinfo[nid]; |
| info = rcu_dereference_protected(pn->shrinker_info, true); |
| kvfree(info); |
| rcu_assign_pointer(pn->shrinker_info, NULL); |
| } |
| } |
| |
| int alloc_shrinker_info(struct mem_cgroup *memcg) |
| { |
| struct shrinker_info *info; |
| int nid, size, ret = 0; |
| int map_size, defer_size = 0; |
| |
| down_write(&shrinker_rwsem); |
| map_size = shrinker_map_size(shrinker_nr_max); |
| defer_size = shrinker_defer_size(shrinker_nr_max); |
| size = map_size + defer_size; |
| for_each_node(nid) { |
| info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); |
| if (!info) { |
| free_shrinker_info(memcg); |
| ret = -ENOMEM; |
| break; |
| } |
| info->nr_deferred = (atomic_long_t *)(info + 1); |
| info->map = (void *)info->nr_deferred + defer_size; |
| rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); |
| } |
| up_write(&shrinker_rwsem); |
| |
| return ret; |
| } |
| |
| static inline bool need_expand(int nr_max) |
| { |
| return round_up(nr_max, BITS_PER_LONG) > |
| round_up(shrinker_nr_max, BITS_PER_LONG); |
| } |
| |
| static int expand_shrinker_info(int new_id) |
| { |
| int ret = 0; |
| int new_nr_max = new_id + 1; |
| int map_size, defer_size = 0; |
| int old_map_size, old_defer_size = 0; |
| struct mem_cgroup *memcg; |
| |
| if (!need_expand(new_nr_max)) |
| goto out; |
| |
| if (!root_mem_cgroup) |
| goto out; |
| |
| lockdep_assert_held(&shrinker_rwsem); |
| |
| map_size = shrinker_map_size(new_nr_max); |
| defer_size = shrinker_defer_size(new_nr_max); |
| old_map_size = shrinker_map_size(shrinker_nr_max); |
| old_defer_size = shrinker_defer_size(shrinker_nr_max); |
| |
| memcg = mem_cgroup_iter(NULL, NULL, NULL); |
| do { |
| ret = expand_one_shrinker_info(memcg, map_size, defer_size, |
| old_map_size, old_defer_size); |
| if (ret) { |
| mem_cgroup_iter_break(NULL, memcg); |
| goto out; |
| } |
| } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); |
| out: |
| if (!ret) |
| shrinker_nr_max = new_nr_max; |
| |
| return ret; |
| } |
| |
| void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) |
| { |
| if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { |
| struct shrinker_info *info; |
| |
| rcu_read_lock(); |
| info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); |
| /* Pairs with smp mb in shrink_slab() */ |
| smp_mb__before_atomic(); |
| set_bit(shrinker_id, info->map); |
| rcu_read_unlock(); |
| } |
| } |
| |
| static DEFINE_IDR(shrinker_idr); |
| |
| static int prealloc_memcg_shrinker(struct shrinker *shrinker) |
| { |
| int id, ret = -ENOMEM; |
| |
| if (mem_cgroup_disabled()) |
| return -ENOSYS; |
| |
| down_write(&shrinker_rwsem); |
| /* This may call shrinker, so it must use down_read_trylock() */ |
| id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); |
| if (id < 0) |
| goto unlock; |
| |
| if (id >= shrinker_nr_max) { |
| if (expand_shrinker_info(id)) { |
| idr_remove(&shrinker_idr, id); |
| goto unlock; |
| } |
| } |
| shrinker->id = id; |
| ret = 0; |
| unlock: |
| up_write(&shrinker_rwsem); |
| return ret; |
| } |
| |
| static void unregister_memcg_shrinker(struct shrinker *shrinker) |
| { |
| int id = shrinker->id; |
| |
| BUG_ON(id < 0); |
| |
| lockdep_assert_held(&shrinker_rwsem); |
| |
| idr_remove(&shrinker_idr, id); |
| } |
| |
| static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, |
| struct mem_cgroup *memcg) |
| { |
| struct shrinker_info *info; |
| |
| info = shrinker_info_protected(memcg, nid); |
| return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); |
| } |
| |
| static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, |
| struct mem_cgroup *memcg) |
| { |
| struct shrinker_info *info; |
| |
| info = shrinker_info_protected(memcg, nid); |
| return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); |
| } |
| |
| void reparent_shrinker_deferred(struct mem_cgroup *memcg) |
| { |
| int i, nid; |
| long nr; |
| struct mem_cgroup *parent; |
| struct shrinker_info *child_info, *parent_info; |
| |
| parent = parent_mem_cgroup(memcg); |
| if (!parent) |
| parent = root_mem_cgroup; |
| |
| /* Prevent from concurrent shrinker_info expand */ |
| down_read(&shrinker_rwsem); |
| for_each_node(nid) { |
| child_info = shrinker_info_protected(memcg, nid); |
| parent_info = shrinker_info_protected(parent, nid); |
| for (i = 0; i < shrinker_nr_max; i++) { |
| nr = atomic_long_read(&child_info->nr_deferred[i]); |
| atomic_long_add(nr, &parent_info->nr_deferred[i]); |
| } |
| } |
| up_read(&shrinker_rwsem); |
| } |
| |
| static bool cgroup_reclaim(struct scan_control *sc) |
| { |
| return sc->target_mem_cgroup; |
| } |
| |
| /** |
| * writeback_throttling_sane - is the usual dirty throttling mechanism available? |
| * @sc: scan_control in question |
| * |
| * The normal page dirty throttling mechanism in balance_dirty_pages() is |
| * completely broken with the legacy memcg and direct stalling in |
| * shrink_page_list() is used for throttling instead, which lacks all the |
| * niceties such as fairness, adaptive pausing, bandwidth proportional |
| * allocation and configurability. |
| * |
| * This function tests whether the vmscan currently in progress can assume |
| * that the normal dirty throttling mechanism is operational. |
| */ |
| static bool writeback_throttling_sane(struct scan_control *sc) |
| { |
| if (!cgroup_reclaim(sc)) |
| return true; |
| #ifdef CONFIG_CGROUP_WRITEBACK |
| if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
| return true; |
| #endif |
| return false; |
| } |
| #else |
| static int prealloc_memcg_shrinker(struct shrinker *shrinker) |
| { |
| return -ENOSYS; |
| } |
| |
| static void unregister_memcg_shrinker(struct shrinker *shrinker) |
| { |
| } |
| |
| static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, |
| struct mem_cgroup *memcg) |
| { |
| return 0; |
| } |
| |
| static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, |
| struct mem_cgroup *memcg) |
| { |
| return 0; |
| } |
| |
| static bool cgroup_reclaim(struct scan_control *sc) |
| { |
| return false; |
| } |
| |
| static bool writeback_throttling_sane(struct scan_control *sc) |
| { |
| return true; |
| } |
| #endif |
| |
| static long xchg_nr_deferred(struct shrinker *shrinker, |
| struct shrink_control *sc) |
| { |
| int nid = sc->nid; |
| |
| if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) |
| nid = 0; |
| |
| if (sc->memcg && |
| (shrinker->flags & SHRINKER_MEMCG_AWARE)) |
| return xchg_nr_deferred_memcg(nid, shrinker, |
| sc->memcg); |
| |
| return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); |
| } |
| |
| |
| static long add_nr_deferred(long nr, struct shrinker *shrinker, |
| struct shrink_control *sc) |
| { |
| int nid = sc->nid; |
| |
| if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) |
| nid = 0; |
| |
| if (sc->memcg && |
| (shrinker->flags & SHRINKER_MEMCG_AWARE)) |
| return add_nr_deferred_memcg(nr, nid, shrinker, |
| sc->memcg); |
| |
| return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); |
| } |
| |
| static bool can_demote(int nid, struct scan_control *sc) |
| { |
| if (!numa_demotion_enabled) |
| return false; |
| if (sc) { |
| if (sc->no_demotion) |
| return false; |
| /* It is pointless to do demotion in memcg reclaim */ |
| if (cgroup_reclaim(sc)) |
| return false; |
| } |
| if (next_demotion_node(nid) == NUMA_NO_NODE) |
| return false; |
| |
| return true; |
| } |
| |
| static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, |
| int nid, |
| struct scan_control *sc) |
| { |
| if (memcg == NULL) { |
| /* |
| * For non-memcg reclaim, is there |
| * space in any swap device? |
| */ |
| if (get_nr_swap_pages() > 0) |
| return true; |
| } else { |
| /* Is the memcg below its swap limit? */ |
| if (mem_cgroup_get_nr_swap_pages(memcg) > 0) |
| return true; |
| } |
| |
| /* |
| * The page can not be swapped. |
| * |
| * Can it be reclaimed from this node via demotion? |
| */ |
| return can_demote(nid, sc); |
| } |
| |
| /* |
| * This misses isolated pages which are not accounted for to save counters. |
| * As the data only determines if reclaim or compaction continues, it is |
| * not expected that isolated pages will be a dominating factor. |
| */ |
| unsigned long zone_reclaimable_pages(struct zone *zone) |
| { |
| unsigned long nr; |
| |
| nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + |
| zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); |
| if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) |
| nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + |
| zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); |
| |
| return nr; |
| } |
| |
| /** |
| * lruvec_lru_size - Returns the number of pages on the given LRU list. |
| * @lruvec: lru vector |
| * @lru: lru to use |
| * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list) |
| */ |
| static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, |
| int zone_idx) |
| { |
| unsigned long size = 0; |
| int zid; |
| |
| for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) { |
| struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; |
| |
| if (!managed_zone(zone)) |
| continue; |
| |
| if (!mem_cgroup_disabled()) |
| size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); |
| else |
| size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); |
| } |
| return size; |
| } |
| |
| /* |
| * Add a shrinker callback to be called from the vm. |
| */ |
| int prealloc_shrinker(struct shrinker *shrinker) |
| { |
| unsigned int size; |
| int err; |
| |
| if (shrinker->flags & SHRINKER_MEMCG_AWARE) { |
| err = prealloc_memcg_shrinker(shrinker); |
| if (err != -ENOSYS) |
| return err; |
| |
| shrinker->flags &= ~SHRINKER_MEMCG_AWARE; |
| } |
| |
| size = sizeof(*shrinker->nr_deferred); |
| if (shrinker->flags & SHRINKER_NUMA_AWARE) |
| size *= nr_node_ids; |
| |
| shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); |
| if (!shrinker->nr_deferred) |
| return -ENOMEM; |
| |
| return 0; |
| } |
| |
| void free_prealloced_shrinker(struct shrinker *shrinker) |
| { |
| if (shrinker->flags & SHRINKER_MEMCG_AWARE) { |
| down_write(&shrinker_rwsem); |
| unregister_memcg_shrinker(shrinker); |
| up_write(&shrinker_rwsem); |
| return; |
| } |
| |
| kfree(shrinker->nr_deferred); |
| shrinker->nr_deferred = NULL; |
| } |
| |
| void register_shrinker_prepared(struct shrinker *shrinker) |
| { |
| down_write(&shrinker_rwsem); |
| list_add_tail(&shrinker->list, &shrinker_list); |
| shrinker->flags |= SHRINKER_REGISTERED; |
| up_write(&shrinker_rwsem); |
| } |
| |
| int register_shrinker(struct shrinker *shrinker) |
| { |
| int err = prealloc_shrinker(shrinker); |
| |
| if (err) |
| return err; |
| register_shrinker_prepared(shrinker); |
| return 0; |
| } |
| EXPORT_SYMBOL(register_shrinker); |
| |
| /* |
| * Remove one |
| */ |
| void unregister_shrinker(struct shrinker *shrinker) |
| { |
| if (!(shrinker->flags & SHRINKER_REGISTERED)) |
| return; |
| |
| down_write(&shrinker_rwsem); |
| list_del(&shrinker->list); |
| shrinker->flags &= ~SHRINKER_REGISTERED; |
| if (shrinker->flags & SHRINKER_MEMCG_AWARE) |
| unregister_memcg_shrinker(shrinker); |
| up_write(&shrinker_rwsem); |
| |
| kfree(shrinker->nr_deferred); |
| shrinker->nr_deferred = NULL; |
| } |
| EXPORT_SYMBOL(unregister_shrinker); |
| |
| #define SHRINK_BATCH 128 |
| |
| static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, |
| struct shrinker *shrinker, int priority) |
| { |
| unsigned long freed = 0; |
| unsigned long long delta; |
| long total_scan; |
| long freeable; |
| long nr; |
| long new_nr; |
| long batch_size = shrinker->batch ? shrinker->batch |
| : SHRINK_BATCH; |
| long scanned = 0, next_deferred; |
| |
| trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority); |
| |
| freeable = shrinker->count_objects(shrinker, shrinkctl); |
| if (freeable == 0 || freeable == SHRINK_EMPTY) |
| return freeable; |
| |
| /* |
| * copy the current shrinker scan count into a local variable |
| * and zero it so that other concurrent shrinker invocations |
| * don't also do this scanning work. |
| */ |
| nr = xchg_nr_deferred(shrinker, shrinkctl); |
| |
| if (shrinker->seeks) { |
| delta = freeable >> priority; |
| delta *= 4; |
| do_div(delta, shrinker->seeks); |
| } else { |
| /* |
| * These objects don't require any IO to create. Trim |
| * them aggressively under memory pressure to keep |
| * them from causing refetches in the IO caches. |
| */ |
| delta = freeable / 2; |
| } |
| |
| total_scan = nr >> priority; |
| total_scan += delta; |
| total_scan = min(total_scan, (2 * freeable)); |
| |
| trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, |
| freeable, delta, total_scan, priority); |
| |
| /* |
| * Normally, we should not scan less than batch_size objects in one |
| * pass to avoid too frequent shrinker calls, but if the slab has less |
| * than batch_size objects in total and we are really tight on memory, |
| * we will try to reclaim all available objects, otherwise we can end |
| * up failing allocations although there are plenty of reclaimable |
| * objects spread over several slabs with usage less than the |
| * batch_size. |
| * |
| * We detect the "tight on memory" situations by looking at the total |
| * number of objects we want to scan (total_scan). If it is greater |
| * than the total number of objects on slab (freeable), we must be |
| * scanning at high prio and therefore should try to reclaim as much as |
| * possible. |
| */ |
| while (total_scan >= batch_size || |
| total_scan >= freeable) { |
| unsigned long ret; |
| unsigned long nr_to_scan = min(batch_size, total_scan); |
| |
| shrinkctl->nr_to_scan = nr_to_scan; |
| shrinkctl->nr_scanned = nr_to_scan; |
| ret = shrinker->scan_objects(shrinker, shrinkctl); |
| if (ret == SHRINK_STOP) |
| break; |
| freed += ret; |
| |
| count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); |
| total_scan -= shrinkctl->nr_scanned; |
| scanned += shrinkctl->nr_scanned; |
| |
| cond_resched(); |
| } |
| |
| /* |
| * The deferred work is increased by any new work (delta) that wasn't |
| * done, decreased by old deferred work that was done now. |
| * |
| * And it is capped to two times of the freeable items. |
| */ |
| next_deferred = max_t(long, (nr + delta - scanned), 0); |
| next_deferred = min(next_deferred, (2 * freeable)); |
| |
| /* |
| * move the unused scan count back into the shrinker in a |
| * manner that handles concurrent updates. |
| */ |
| new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); |
| |
| trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); |
| return freed; |
| } |
| |
| #ifdef CONFIG_MEMCG |
| static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, |
| struct mem_cgroup *memcg, int priority) |
| { |
| struct shrinker_info *info; |
| unsigned long ret, freed = 0; |
| int i; |
| |
| if (!mem_cgroup_online(memcg)) |
| return 0; |
| |
| if (!down_read_trylock(&shrinker_rwsem)) |
| return 0; |
| |
| info = shrinker_info_protected(memcg, nid); |
| if (unlikely(!info)) |
| goto unlock; |
| |
| for_each_set_bit(i, info->map, shrinker_nr_max) { |
| struct shrink_control sc = { |
| .gfp_mask = gfp_mask, |
| .nid = nid, |
| .memcg = memcg, |
| }; |
| struct shrinker *shrinker; |
| |
| shrinker = idr_find(&shrinker_idr, i); |
| if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { |
| if (!shrinker) |
| clear_bit(i, info->map); |
| continue; |
| } |
| |
| /* Call non-slab shrinkers even though kmem is disabled */ |
| if (!memcg_kmem_enabled() && |
| !(shrinker->flags & SHRINKER_NONSLAB)) |
| continue; |
| |
| ret = do_shrink_slab(&sc, shrinker, priority); |
| if (ret == SHRINK_EMPTY) { |
| clear_bit(i, info->map); |
| /* |
| * After the shrinker reported that it had no objects to |
| * free, but before we cleared the corresponding bit in |
| * the memcg shrinker map, a new object might have been |
| * added. To make sure, we have the bit set in this |
| * case, we invoke the shrinker one more time and reset |
| * the bit if it reports that it is not empty anymore. |
| * The memory barrier here pairs with the barrier in |
| * set_shrinker_bit(): |
| * |
| * list_lru_add() shrink_slab_memcg() |
| * list_add_tail() clear_bit() |
| * <MB> <MB> |
| * set_bit() do_shrink_slab() |
| */ |
| smp_mb__after_atomic(); |
| ret = do_shrink_slab(&sc, shrinker, priority); |
| if (ret == SHRINK_EMPTY) |
| ret = 0; |
| else |
| set_shrinker_bit(memcg, nid, i); |
| } |
| freed += ret; |
| |
| if (rwsem_is_contended(&shrinker_rwsem)) { |
| freed = freed ? : 1; |
| break; |
| } |
| } |
| unlock: |
| up_read(&shrinker_rwsem); |
| return freed; |
| } |
| #else /* CONFIG_MEMCG */ |
| static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, |
| struct mem_cgroup *memcg, int priority) |
| { |
| return 0; |
| } |
| #endif /* CONFIG_MEMCG */ |
| |
| /** |
| * shrink_slab - shrink slab caches |
| * @gfp_mask: allocation context |
| * @nid: node whose slab caches to target |
| * @memcg: memory cgroup whose slab caches to target |
| * @priority: the reclaim priority |
| * |
| * Call the shrink functions to age shrinkable caches. |
| * |
| * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, |
| * unaware shrinkers will receive a node id of 0 instead. |
| * |
| * @memcg specifies the memory cgroup to target. Unaware shrinkers |
| * are called only if it is the root cgroup. |
| * |
| * @priority is sc->priority, we take the number of objects and >> by priority |
| * in order to get the scan target. |
| * |
| * Returns the number of reclaimed slab objects. |
| */ |
| unsigned long shrink_slab(gfp_t gfp_mask, int nid, |
| struct mem_cgroup *memcg, |
| int priority) |
| { |
| unsigned long ret, freed = 0; |
| struct shrinker *shrinker; |
| bool bypass = false; |
| |
| trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass); |
| if (bypass) |
| return 0; |
| |
| /* |
| * The root memcg might be allocated even though memcg is disabled |
| * via "cgroup_disable=memory" boot parameter. This could make |
| * mem_cgroup_is_root() return false, then just run memcg slab |
| * shrink, but skip global shrink. This may result in premature |
| * oom. |
| */ |
| if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) |
| return shrink_slab_memcg(gfp_mask, nid, memcg, priority); |
| |
| if (!down_read_trylock(&shrinker_rwsem)) |
| goto out; |
| |
| list_for_each_entry(shrinker, &shrinker_list, list) { |
| struct shrink_control sc = { |
| .gfp_mask = gfp_mask, |
| .nid = nid, |
| .memcg = memcg, |
| }; |
| |
| ret = do_shrink_slab(&sc, shrinker, priority); |
| if (ret == SHRINK_EMPTY) |
| ret = 0; |
| freed += ret; |
| /* |
| * Bail out if someone want to register a new shrinker to |
| * prevent the registration from being stalled for long periods |
| * by parallel ongoing shrinking. |
| */ |
| if (rwsem_is_contended(&shrinker_rwsem)) { |
| freed = freed ? : 1; |
| break; |
| } |
| } |
| |
| up_read(&shrinker_rwsem); |
| out: |
| cond_resched(); |
| return freed; |
| } |
| EXPORT_SYMBOL_GPL(shrink_slab); |
| |
| void drop_slab_node(int nid) |
| { |
| unsigned long freed; |
| int shift = 0; |
| |
| do { |
| struct mem_cgroup *memcg = NULL; |
| |
| if (fatal_signal_pending(current)) |
| return; |
| |
| freed = 0; |
| memcg = mem_cgroup_iter(NULL, NULL, NULL); |
| do { |
| freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); |
| } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); |
| } while ((freed >> shift++) > 1); |
| } |
| |
| void drop_slab(void) |
| { |
| int nid; |
| |
| for_each_online_node(nid) |
| drop_slab_node(nid); |
| } |
| |
| static inline int is_page_cache_freeable(struct page *page) |
| { |
| /* |
| * A freeable page cache page is referenced only by the caller |
| * that isolated the page, the page cache and optional buffer |
| * heads at page->private. |
| */ |
| int page_cache_pins = thp_nr_pages(page); |
| return page_count(page) - page_has_private(page) == 1 + page_cache_pins; |
| } |
| |
| static int may_write_to_inode(struct inode *inode) |
| { |
| if (current->flags & PF_SWAPWRITE) |
| return 1; |
| if (!inode_write_congested(inode)) |
| return 1; |
| if (inode_to_bdi(inode) == current->backing_dev_info) |
| return 1; |
| return 0; |
| } |
| |
| /* |
| * We detected a synchronous write error writing a page out. Probably |
| * -ENOSPC. We need to propagate that into the address_space for a subsequent |
| * fsync(), msync() or close(). |
| * |
| * The tricky part is that after writepage we cannot touch the mapping: nothing |
| * prevents it from being freed up. But we have a ref on the page and once |
| * that page is locked, the mapping is pinned. |
| * |
| * We're allowed to run sleeping lock_page() here because we know the caller has |
| * __GFP_FS. |
| */ |
| static void handle_write_error(struct address_space *mapping, |
| struct page *page, int error) |
| { |
| lock_page(page); |
| if (page_mapping(page) == mapping) |
| mapping_set_error(mapping, error); |
| unlock_page(page); |
| } |
| |
| /* possible outcome of pageout() */ |
| typedef enum { |
| /* failed to write page out, page is locked */ |
| PAGE_KEEP, |
| /* move page to the active list, page is locked */ |
| PAGE_ACTIVATE, |
| /* page has been sent to the disk successfully, page is unlocked */ |
| PAGE_SUCCESS, |
| /* page is clean and locked */ |
| PAGE_CLEAN, |
| } pageout_t; |
| |
| /* |
| * pageout is called by shrink_page_list() for each dirty page. |
| * Calls ->writepage(). |
| */ |
| static pageout_t pageout(struct page *page, struct address_space *mapping) |
| { |
| /* |
| * If the page is dirty, only perform writeback if that write |
| * will be non-blocking. To prevent this allocation from being |
| * stalled by pagecache activity. But note that there may be |
| * stalls if we need to run get_block(). We could test |
| * PagePrivate for that. |
| * |
| * If this process is currently in __generic_file_write_iter() against |
| * this page's queue, we can perform writeback even if that |
| * will block. |
| * |
| * If the page is swapcache, write it back even if that would |
| * block, for some throttling. This happens by accident, because |
| * swap_backing_dev_info is bust: it doesn't reflect the |
| * congestion state of the swapdevs. Easy to fix, if needed. |
| */ |
| if (!is_page_cache_freeable(page)) |
| return PAGE_KEEP; |
| if (!mapping) { |
| /* |
| * Some data journaling orphaned pages can have |
| * page->mapping == NULL while being dirty with clean buffers. |
| */ |
| if (page_has_private(page)) { |
| if (try_to_free_buffers(page)) { |
| ClearPageDirty(page); |
| pr_info("%s: orphaned page\n", __func__); |
| return PAGE_CLEAN; |
| } |
| } |
| return PAGE_KEEP; |
| } |
| if (mapping->a_ops->writepage == NULL) |
| return PAGE_ACTIVATE; |
| if (!may_write_to_inode(mapping->host)) |
| return PAGE_KEEP; |
| |
| if (clear_page_dirty_for_io(page)) { |
| int res; |
| struct writeback_control wbc = { |
| .sync_mode = WB_SYNC_NONE, |
| .nr_to_write = SWAP_CLUSTER_MAX, |
| .range_start = 0, |
| .range_end = LLONG_MAX, |
| .for_reclaim = 1, |
| }; |
| |
| SetPageReclaim(page); |
| res = mapping->a_ops->writepage(page, &wbc); |
| if (res < 0) |
| handle_write_error(mapping, page, res); |
| if (res == AOP_WRITEPAGE_ACTIVATE) { |
| ClearPageReclaim(page); |
| return PAGE_ACTIVATE; |
| } |
| |
| if (!PageWriteback(page)) { |
| /* synchronous write or broken a_ops? */ |
| ClearPageReclaim(page); |
| } |
| trace_mm_vmscan_writepage(page); |
| inc_node_page_state(page, NR_VMSCAN_WRITE); |
| return PAGE_SUCCESS; |
| } |
| |
| return PAGE_CLEAN; |
| } |
| |
| /* |
| * Same as remove_mapping, but if the page is removed from the mapping, it |
| * gets returned with a refcount of 0. |
| */ |
| static int __remove_mapping(struct address_space *mapping, struct page *page, |
| bool reclaimed, struct mem_cgroup *target_memcg) |
| { |
| int refcount; |
| void *shadow = NULL; |
| |
| BUG_ON(!PageLocked(page)); |
| BUG_ON(mapping != page_mapping(page)); |
| |
| xa_lock_irq(&mapping->i_pages); |
| /* |
| * The non racy check for a busy page. |
| * |
| * Must be careful with the order of the tests. When someone has |
| * a ref to the page, it may be possible that they dirty it then |
| * drop the reference. So if PageDirty is tested before page_count |
| * here, then the following race may occur: |
| * |
| * get_user_pages(&page); |
| * [user mapping goes away] |
| * write_to(page); |
| * !PageDirty(page) [good] |
| * SetPageDirty(page); |
| * put_page(page); |
| * !page_count(page) [good, discard it] |
| * |
| * [oops, our write_to data is lost] |
| * |
| * Reversing the order of the tests ensures such a situation cannot |
| * escape unnoticed. The smp_rmb is needed to ensure the page->flags |
| * load is not satisfied before that of page->_refcount. |
| * |
| * Note that if SetPageDirty is always performed via set_page_dirty, |
| * and thus under the i_pages lock, then this ordering is not required. |
| */ |
| refcount = 1 + compound_nr(page); |
| if (!page_ref_freeze(page, refcount)) |
| goto cannot_free; |
| /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */ |
| if (unlikely(PageDirty(page))) { |
| page_ref_unfreeze(page, refcount); |
| goto cannot_free; |
| } |
| |
| if (PageSwapCache(page)) { |
| swp_entry_t swap = { .val = page_private(page) }; |
| |
| /* get a shadow entry before mem_cgroup_swapout() clears page_memcg() */ |
| if (reclaimed && !mapping_exiting(mapping)) |
| shadow = workingset_eviction(page, target_memcg); |
| mem_cgroup_swapout(page, swap); |
| __delete_from_swap_cache(page, swap, shadow); |
| xa_unlock_irq(&mapping->i_pages); |
| put_swap_page(page, swap); |
| } else { |
| void (*freepage)(struct page *); |
| |
| freepage = mapping->a_ops->freepage; |
| /* |
| * Remember a shadow entry for reclaimed file cache in |
| * order to detect refaults, thus thrashing, later on. |
| * |
| * But don't store shadows in an address space that is |
| * already exiting. This is not just an optimization, |
| * inode reclaim needs to empty out the radix tree or |
| * the nodes are lost. Don't plant shadows behind its |
| * back. |
| * |
| * We also don't store shadows for DAX mappings because the |
| * only page cache pages found in these are zero pages |
| * covering holes, and because we don't want to mix DAX |
| * exceptional entries and shadow exceptional entries in the |
| * same address_space. |
| */ |
| if (reclaimed && page_is_file_lru(page) && |
| !mapping_exiting(mapping) && !dax_mapping(mapping)) |
| shadow = workingset_eviction(page, target_memcg); |
| __delete_from_page_cache(page, shadow); |
| xa_unlock_irq(&mapping->i_pages); |
| |
| if (freepage != NULL) |
| freepage(page); |
| } |
| |
| return 1; |
| |
| cannot_free: |
| xa_unlock_irq(&mapping->i_pages); |
| return 0; |
| } |
| |
| /* |
| * Attempt to detach a locked page from its ->mapping. If it is dirty or if |
| * someone else has a ref on the page, abort and return 0. If it was |
| * successfully detached, return 1. Assumes the caller has a single ref on |
| * this page. |
| */ |
| int remove_mapping(struct address_space *mapping, struct page *page) |
| { |
| if (__remove_mapping(mapping, page, false, NULL)) { |
| /* |
| * Unfreezing the refcount with 1 rather than 2 effectively |
| * drops the pagecache ref for us without requiring another |
| * atomic operation. |
| */ |
| page_ref_unfreeze(page, 1); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * putback_lru_page - put previously isolated page onto appropriate LRU list |
| * @page: page to be put back to appropriate lru list |
| * |
| * Add previously isolated @page to appropriate LRU list. |
| * Page may still be unevictable for other reasons. |
| * |
| * lru_lock must not be held, interrupts must be enabled. |
| */ |
| void putback_lru_page(struct page *page) |
| { |
| lru_cache_add(page); |
| put_page(page); /* drop ref from isolate */ |
| } |
| |
| enum page_references { |
| PAGEREF_RECLAIM, |
| PAGEREF_RECLAIM_CLEAN, |
| PAGEREF_KEEP, |
| PAGEREF_ACTIVATE, |
| }; |
| |
| static enum page_references page_check_references(struct page *page, |
| struct scan_control *sc) |
| { |
| int referenced_ptes, referenced_page; |
| unsigned long vm_flags; |
| |
| referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, |
| &vm_flags); |
| referenced_page = TestClearPageReferenced(page); |
| |
| /* |
| * Mlock lost the isolation race with us. Let try_to_unmap() |
| * move the page to the unevictable list. |
| */ |
| if (vm_flags & VM_LOCKED) |
| return PAGEREF_RECLAIM; |
| |
| if (referenced_ptes) { |
| /* |
| * All mapped pages start out with page table |
| * references from the instantiating fault, so we need |
| * to look twice if a mapped file page is used more |
| * than once. |
| * |
| * Mark it and spare it for another trip around the |
| * inactive list. Another page table reference will |
| * lead to its activation. |
| * |
| * Note: the mark is set for activated pages as well |
| * so that recently deactivated but used pages are |
| * quickly recovered. |
| */ |
| SetPageReferenced(page); |
| |
| if (referenced_page || referenced_ptes > 1) |
| return PAGEREF_ACTIVATE; |
| |
| /* |
| * Activate file-backed executable pages after first usage. |
| */ |
| if ((vm_flags & VM_EXEC) && !PageSwapBacked(page)) |
| return PAGEREF_ACTIVATE; |
| |
| return PAGEREF_KEEP; |
| } |
| |
| /* Reclaim if clean, defer dirty pages to writeback */ |
| if (referenced_page && !PageSwapBacked(page)) |
| return PAGEREF_RECLAIM_CLEAN; |
| |
| return PAGEREF_RECLAIM; |
| } |
| |
| /* Check if a page is dirty or under writeback */ |
| static void page_check_dirty_writeback(struct page *page, |
| bool *dirty, bool *writeback) |
| { |
| struct address_space *mapping; |
| |
| /* |
| * Anonymous pages are not handled by flushers and must be written |
| * from reclaim context. Do not stall reclaim based on them |
| */ |
| if (!page_is_file_lru(page) || |
| (PageAnon(page) && !PageSwapBacked(page))) { |
| *dirty = false; |
| *writeback = false; |
| return; |
| } |
| |
| /* By default assume that the page flags are accurate */ |
| *dirty = PageDirty(page); |
| *writeback = PageWriteback(page); |
| |
| /* Verify dirty/writeback state if the filesystem supports it */ |
| if (!page_has_private(page)) |
| return; |
| |
| mapping = page_mapping(page); |
| if (mapping && mapping->a_ops->is_dirty_writeback) |
| mapping->a_ops->is_dirty_writeback(page, dirty, writeback); |
| } |
| |
| static struct page *alloc_demote_page(struct page *page, unsigned long node) |
| { |
| struct migration_target_control mtc = { |
| /* |
| * Allocate from 'node', or fail quickly and quietly. |
| * When this happens, 'page' will likely just be discarded |
| * instead of migrated. |
| */ |
| .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | |
| __GFP_THISNODE | __GFP_NOWARN | |
| __GFP_NOMEMALLOC | GFP_NOWAIT, |
| .nid = node |
| }; |
| |
| return alloc_migration_target(page, (unsigned long)&mtc); |
| } |
| |
| /* |
| * Take pages on @demote_list and attempt to demote them to |
| * another node. Pages which are not demoted are left on |
| * @demote_pages. |
| */ |
| static unsigned int demote_page_list(struct list_head *demote_pages, |
| struct pglist_data *pgdat) |
| { |
| int target_nid = next_demotion_node(pgdat->node_id); |
| unsigned int nr_succeeded; |
| int err; |
| |
| if (list_empty(demote_pages)) |
| return 0; |
| |
| if (target_nid == NUMA_NO_NODE) |
| return 0; |
| |
| /* Demotion ignores all cpuset and mempolicy settings */ |
| err = migrate_pages(demote_pages, alloc_demote_page, NULL, |
| target_nid, MIGRATE_ASYNC, MR_DEMOTION, |
| &nr_succeeded); |
| |
| if (current_is_kswapd()) |
| __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded); |
| else |
| __count_vm_events(PGDEMOTE_DIRECT, nr_succeeded); |
| |
| return nr_succeeded; |
| } |
| |
| /* |
| * shrink_page_list() returns the number of reclaimed pages |
| */ |
| static unsigned int shrink_page_list(struct list_head *page_list, |
| struct pglist_data *pgdat, |
| struct scan_control *sc, |
| struct reclaim_stat *stat, |
| bool ignore_references) |
| { |
| LIST_HEAD(ret_pages); |
| LIST_HEAD(free_pages); |
| LIST_HEAD(demote_pages); |
| unsigned int nr_reclaimed = 0; |
| unsigned int pgactivate = 0; |
| bool do_demote_pass; |
| |
| memset(stat, 0, sizeof(*stat)); |
| cond_resched(); |
| do_demote_pass = can_demote(pgdat->node_id, sc); |
| |
| retry: |
| while (!list_empty(page_list)) { |
| struct address_space *mapping; |
| struct page *page; |
| enum page_references references = PAGEREF_RECLAIM; |
| bool dirty, writeback, may_enter_fs; |
| unsigned int nr_pages; |
| |
| cond_resched(); |
| |
| page = lru_to_page(page_list); |
| list_del(&page->lru); |
| |
| if (!trylock_page(page)) |
| goto keep; |
| |
| VM_BUG_ON_PAGE(PageActive(page), page); |
| |
| nr_pages = compound_nr(page); |
| |
| /* Account the number of base pages even though THP */ |
| sc->nr_scanned += nr_pages; |
| |
| if (unlikely(!page_evictable(page))) |
| goto activate_locked; |
| |
| if (!sc->may_unmap && page_mapped(page)) |
| goto keep_locked; |
| |
| /* page_update_gen() tried to promote this page? */ |
| if (lru_gen_enabled() && !ignore_references && |
| page_mapped(page) && PageReferenced(page)) |
| goto keep_locked; |
| |
| may_enter_fs = (sc->gfp_mask & __GFP_FS) || |
| (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); |
| |
| /* |
| * The number of dirty pages determines if a node is marked |
| * reclaim_congested which affects wait_iff_congested. kswapd |
| * will stall and start writing pages if the tail of the LRU |
| * is all dirty unqueued pages. |
| */ |
| page_check_dirty_writeback(page, &dirty, &writeback); |
| if (dirty || writeback) |
| stat->nr_dirty++; |
| |
| if (dirty && !writeback) |
| stat->nr_unqueued_dirty++; |
| |
| /* |
| * Treat this page as congested if the underlying BDI is or if |
| * pages are cycling through the LRU so quickly that the |
| * pages marked for immediate reclaim are making it to the |
| * end of the LRU a second time. |
| */ |
| mapping = page_mapping(page); |
| if (((dirty || writeback) && mapping && |
| inode_write_congested(mapping->host)) || |
| (writeback && PageReclaim(page))) |
| stat->nr_congested++; |
| |
| /* |
| * If a page at the tail of the LRU is under writeback, there |
| * are three cases to consider. |
| * |
| * 1) If reclaim is encountering an excessive number of pages |
| * under writeback and this page is both under writeback and |
| * PageReclaim then it indicates that pages are being queued |
| * for IO but are being recycled through the LRU before the |
| * IO can complete. Waiting on the page itself risks an |
| * indefinite stall if it is impossible to writeback the |
| * page due to IO error or disconnected storage so instead |
| * note that the LRU is being scanned too quickly and the |
| * caller can stall after page list has been processed. |
| * |
| * 2) Global or new memcg reclaim encounters a page that is |
| * not marked for immediate reclaim, or the caller does not |
| * have __GFP_FS (or __GFP_IO if it's simply going to swap, |
| * not to fs). In this case mark the page for immediate |
| * reclaim and continue scanning. |
| * |
| * Require may_enter_fs because we would wait on fs, which |
| * may not have submitted IO yet. And the loop driver might |
| * enter reclaim, and deadlock if it waits on a page for |
| * which it is needed to do the write (loop masks off |
| * __GFP_IO|__GFP_FS for this reason); but more thought |
| * would probably show more reasons. |
| * |
| * 3) Legacy memcg encounters a page that is already marked |
| * PageReclaim. memcg does not have any dirty pages |
| * throttling so we could easily OOM just because too many |
| * pages are in writeback and there is nothing else to |
| * reclaim. Wait for the writeback to complete. |
| * |
| * In cases 1) and 2) we activate the pages to get them out of |
| * the way while we continue scanning for clean pages on the |
| * inactive list and refilling from the active list. The |
| * observation here is that waiting for disk writes is more |
| * expensive than potentially causing reloads down the line. |
| * Since they're marked for immediate reclaim, they won't put |
| * memory pressure on the cache working set any longer than it |
| * takes to write them to disk. |
| */ |
| if (PageWriteback(page)) { |
| /* Case 1 above */ |
| if (current_is_kswapd() && |
| PageReclaim(page) && |
| test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { |
| stat->nr_immediate++; |
| goto activate_locked; |
| |
| /* Case 2 above */ |
| } else if (writeback_throttling_sane(sc) || |
| !PageReclaim(page) || !may_enter_fs) { |
| /* |
| * This is slightly racy - end_page_writeback() |
| * might have just cleared PageReclaim, then |
| * setting PageReclaim here end up interpreted |
| * as PageReadahead - but that does not matter |
| * enough to care. What we do want is for this |
| * page to have PageReclaim set next time memcg |
| * reclaim reaches the tests above, so it will |
| * then wait_on_page_writeback() to avoid OOM; |
| * and it's also appropriate in global reclaim. |
| */ |
| SetPageReclaim(page); |
| stat->nr_writeback++; |
| goto activate_locked; |
| |
| /* Case 3 above */ |
| } else { |
| unlock_page(page); |
| wait_on_page_writeback(page); |
| /* then go back and try same page again */ |
| list_add_tail(&page->lru, page_list); |
| continue; |
| } |
| } |
| |
| if (!ignore_references) |
| references = page_check_references(page, sc); |
| |
| switch (references) { |
| case PAGEREF_ACTIVATE: |
| goto activate_locked; |
| case PAGEREF_KEEP: |
| stat->nr_ref_keep += nr_pages; |
| goto keep_locked; |
| case PAGEREF_RECLAIM: |
| case PAGEREF_RECLAIM_CLEAN: |
| ; /* try to reclaim the page below */ |
| } |
| |
| /* |
| * Before reclaiming the page, try to relocate |
| * its contents to another node. |
| */ |
| if (do_demote_pass && |
| (thp_migration_supported() || !PageTransHuge(page))) { |
| list_add(&page->lru, &demote_pages); |
| unlock_page(page); |
| continue; |
| } |
| |
| /* |
| * Anonymous process memory has backing store? |
| * Try to allocate it some swap space here. |
| * Lazyfree page could be freed directly |
| */ |
| if (PageAnon(page) && PageSwapBacked(page)) { |
| if (!PageSwapCache(page)) { |
| if (!(sc->gfp_mask & __GFP_IO)) |
| goto keep_locked; |
| if (page_maybe_dma_pinned(page)) |
| goto keep_locked; |
| if (PageTransHuge(page)) { |
| /* cannot split THP, skip it */ |
| if (!can_split_huge_page(page, NULL)) |
| goto activate_locked; |
| /* |
| * Split pages without a PMD map right |
| * away. Chances are some or all of the |
| * tail pages can be freed without IO. |
| */ |
| if (!compound_mapcount(page) && |
| split_huge_page_to_list(page, |
| page_list)) |
| goto activate_locked; |
| } |
| if (!add_to_swap(page)) { |
| if (!PageTransHuge(page)) |
| goto activate_locked_split; |
| /* Fallback to swap normal pages */ |
| if (split_huge_page_to_list(page, |
| page_list)) |
| goto activate_locked; |
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| count_vm_event(THP_SWPOUT_FALLBACK); |
| #endif |
| if (!add_to_swap(page)) |
| goto activate_locked_split; |
| } |
| |
| may_enter_fs = true; |
| |
| /* Adding to swap updated mapping */ |
| mapping = page_mapping(page); |
| } |
| } else if (unlikely(PageTransHuge(page))) { |
| /* Split file THP */ |
| if (split_huge_page_to_list(page, page_list)) |
| goto keep_locked; |
| } |
| |
| /* |
| * THP may get split above, need minus tail pages and update |
| * nr_pages to avoid accounting tail pages twice. |
| * |
| * The tail pages that are added into swap cache successfully |
| * reach here. |
| */ |
| if ((nr_pages > 1) && !PageTransHuge(page)) { |
| sc->nr_scanned -= (nr_pages - 1); |
| nr_pages = 1; |
| } |
| |
| /* |
| * The page is mapped into the page tables of one or more |
| * processes. Try to unmap it here. |
| */ |
| if (page_mapped(page)) { |
| enum ttu_flags flags = TTU_BATCH_FLUSH; |
| bool was_swapbacked = PageSwapBacked(page); |
| |
| if (unlikely(PageTransHuge(page))) |
| flags |= TTU_SPLIT_HUGE_PMD; |
| |
| try_to_unmap(page, flags); |
| if (page_mapped(page)) { |
| stat->nr_unmap_fail += nr_pages; |
| if (!was_swapbacked && PageSwapBacked(page)) |
| stat->nr_lazyfree_fail += nr_pages; |
| goto activate_locked; |
| } |
| } |
| |
| if (PageDirty(page)) { |
| /* |
| * Only kswapd can writeback filesystem pages |
| * to avoid risk of stack overflow. But avoid |
| * injecting inefficient single-page IO into |
| * flusher writeback as much as possible: only |
| * write pages when we've encountered many |
| * dirty pages, and when we've already scanned |
| * the rest of the LRU for clean pages and see |
| * the same dirty pages again (PageReclaim). |
| */ |
| if (page_is_file_lru(page) && |
| (!current_is_kswapd() || !PageReclaim(page) || |
| !test_bit(PGDAT_DIRTY, &pgdat->flags))) { |
| /* |
| * Immediately reclaim when written back. |
| * Similar in principal to deactivate_page() |
| * except we already have the page isolated |
| * and know it's dirty |
| */ |
| inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); |
| SetPageReclaim(page); |
| |
| goto activate_locked; |
| } |
| |
| if (references == PAGEREF_RECLAIM_CLEAN) |
| goto keep_locked; |
| if (!may_enter_fs) |
| goto keep_locked; |
| if (!sc->may_writepage) |
| goto keep_locked; |
| |
| /* |
| * Page is dirty. Flush the TLB if a writable entry |
| * potentially exists to avoid CPU writes after IO |
| * starts and then write it out here. |
| */ |
| try_to_unmap_flush_dirty(); |
| switch (pageout(page, mapping)) { |
| case PAGE_KEEP: |
| goto keep_locked; |
| case PAGE_ACTIVATE: |
| goto activate_locked; |
| case PAGE_SUCCESS: |
| stat->nr_pageout += thp_nr_pages(page); |
| |
| if (PageWriteback(page)) |
| goto keep; |
| if (PageDirty(page)) |
| goto keep; |
| |
| /* |
| * A synchronous write - probably a ramdisk. Go |
| * ahead and try to reclaim the page. |
| */ |
| if (!trylock_page(page)) |
| goto keep; |
| if (PageDirty(page) || PageWriteback(page)) |
| goto keep_locked; |
| mapping = page_mapping(page); |
| fallthrough; |
| case PAGE_CLEAN: |
| ; /* try to free the page below */ |
| } |
| } |
| |
| /* |
| * If the page has buffers, try to free the buffer mappings |
| * associated with this page. If we succeed we try to free |
| * the page as well. |
| * |
| * We do this even if the page is PageDirty(). |
| * try_to_release_page() does not perform I/O, but it is |
| * possible for a page to have PageDirty set, but it is actually |
| * clean (all its buffers are clean). This happens if the |
| * buffers were written out directly, with submit_bh(). ext3 |
| * will do this, as well as the blockdev mapping. |
| * try_to_release_page() will discover that cleanness and will |
| * drop the buffers and mark the page clean - it can be freed. |
| * |
| * Rarely, pages can have buffers and no ->mapping. These are |
| * the pages which were not successfully invalidated in |
| * truncate_cleanup_page(). We try to drop those buffers here |
| * and if that worked, and the page is no longer mapped into |
| * process address space (page_count == 1) it can be freed. |
| * Otherwise, leave the page on the LRU so it is swappable. |
| */ |
| if (page_has_private(page)) { |
| if (!try_to_release_page(page, sc->gfp_mask)) |
| goto activate_locked; |
| if (!mapping && page_count(page) == 1) { |
| unlock_page(page); |
| if (put_page_testzero(page)) |
| goto free_it; |
| else { |
| /* |
| * rare race with speculative reference. |
| * the speculative reference will free |
| * this page shortly, so we may |
| * increment nr_reclaimed here (and |
| * leave it off the LRU). |
| */ |
| nr_reclaimed++; |
| continue; |
| } |
| } |
| } |
| |
| if (PageAnon(page) && !PageSwapBacked(page)) { |
| /* follow __remove_mapping for reference */ |
| if (!page_ref_freeze(page, 1)) |
| goto keep_locked; |
| /* |
| * The page has only one reference left, which is |
| * from the isolation. After the caller puts the |
| * page back on lru and drops the reference, the |
| * page will be freed anyway. It doesn't matter |
| * which lru it goes. So we don't bother checking |
| * PageDirty here. |
| */ |
| count_vm_event(PGLAZYFREED); |
| count_memcg_page_event(page, PGLAZYFREED); |
| } else if (!mapping || !__remove_mapping(mapping, page, true, |
| sc->target_mem_cgroup)) |
| goto keep_locked; |
| |
| unlock_page(page); |
| free_it: |
| /* |
| * THP may get swapped out in a whole, need account |
| * all base pages. |
| */ |
| nr_reclaimed += nr_pages; |
| |
| /* |
| * Is there need to periodically free_page_list? It would |
| * appear not as the counts should be low |
| */ |
| if (unlikely(PageTransHuge(page))) |
| destroy_compound_page(page); |
| else |
| list_add(&page->lru, &free_pages); |
| continue; |
| |
| activate_locked_split: |
| /* |
| * The tail pages that are failed to add into swap cache |
| * reach here. Fixup nr_scanned and nr_pages. |
| */ |
| if (nr_pages > 1) { |
| sc->nr_scanned -= (nr_pages - 1); |
| nr_pages = 1; |
| } |
| activate_locked: |
| /* Not a candidate for swapping, so reclaim swap space. */ |
| if (PageSwapCache(page) && (mem_cgroup_swap_full(page) || |
| PageMlocked(page))) |
| try_to_free_swap(page); |
| VM_BUG_ON_PAGE(PageActive(page), page); |
| if (!PageMlocked(page)) { |
| int type = page_is_file_lru(page); |
| SetPageActive(page); |
| stat->nr_activate[type] += nr_pages; |
| count_memcg_page_event(page, PGACTIVATE); |
| } |
| keep_locked: |
| unlock_page(page); |
| keep: |
| list_add(&page->lru, &ret_pages); |
| VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); |
| } |
| /* 'page_list' is always empty here */ |
| |
| /* Migrate pages selected for demotion */ |
| nr_reclaimed += demote_page_list(&demote_pages, pgdat); |
| /* Pages that could not be demoted are still in @demote_pages */ |
| if (!list_empty(&demote_pages)) { |
| /* Pages which failed to demoted go back on @page_list for retry: */ |
| list_splice_init(&demote_pages, page_list); |
| do_demote_pass = false; |
| goto retry; |
| } |
| |
| pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; |
| |
| mem_cgroup_uncharge_list(&free_pages); |
| try_to_unmap_flush(); |
| free_unref_page_list(&free_pages); |
| |
| list_splice(&ret_pages, page_list); |
| count_vm_events(PGACTIVATE, pgactivate); |
| |
| return nr_reclaimed; |
| } |
| |
| unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
| struct list_head *page_list) |
| { |
| struct scan_control sc = { |
| .gfp_mask = GFP_KERNEL, |
| .may_unmap = 1, |
| }; |
| struct reclaim_stat stat; |
| unsigned int nr_reclaimed; |
| struct page *page, *next; |
| LIST_HEAD(clean_pages); |
| unsigned int noreclaim_flag; |
| |
| list_for_each_entry_safe(page, next, page_list, lru) { |
| if (!PageHuge(page) && page_is_file_lru(page) && |
| !PageDirty(page) && !__PageMovable(page) && |
| !PageUnevictable(page)) { |
| ClearPageActive(page); |
| list_move(&page->lru, &clean_pages); |
| } |
| } |
| |
| /* |
| * We should be safe here since we are only dealing with file pages and |
| * we are not kswapd and therefore cannot write dirty file pages. But |
| * call memalloc_noreclaim_save() anyway, just in case these conditions |
| * change in the future. |
| */ |
| noreclaim_flag = memalloc_noreclaim_save(); |
| nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, |
| &stat, true); |
| memalloc_noreclaim_restore(noreclaim_flag); |
| |
| list_splice(&clean_pages, page_list); |
| mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, |
| -(long)nr_reclaimed); |
| /* |
| * Since lazyfree pages are isolated from file LRU from the beginning, |
| * they will rotate back to anonymous LRU in the end if it failed to |
| * discard so isolated count will be mismatched. |
| * Compensate the isolated count for both LRU lists. |
| */ |
| mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, |
| stat.nr_lazyfree_fail); |
| mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, |
| -(long)stat.nr_lazyfree_fail); |
| return nr_reclaimed; |
| } |
| |
| /* |
| * Attempt to remove the specified page from its LRU. Only take this page |
| * if it is of the appropriate PageActive status. Pages which are being |
| * freed elsewhere are also ignored. |
| * |
| * page: page to consider |
| * mode: one of the LRU isolation modes defined above |
| * |
| * returns true on success, false on failure. |
| */ |
| bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode) |
| { |
| /* Only take pages on the LRU. */ |
| if (!PageLRU(page)) |
| return false; |
| |
| /* Compaction should not handle unevictable pages but CMA can do so */ |
| if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) |
| return false; |
| |
| /* |
| * To minimise LRU disruption, the caller can indicate that it only |
| * wants to isolate pages it will be able to operate on without |
| * blocking - clean pages for the most part. |
| * |
| * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages |
| * that it is possible to migrate without blocking |
| */ |
| if (mode & ISOLATE_ASYNC_MIGRATE) { |
| /* All the caller can do on PageWriteback is block */ |
| if (PageWriteback(page)) |
| return false; |
| |
| if (PageDirty(page)) { |
| struct address_space *mapping; |
| bool migrate_dirty; |
| |
| /* |
| * Only pages without mappings or that have a |
| * ->migratepage callback are possible to migrate |
| * without blocking. However, we can be racing with |
| * truncation so it's necessary to lock the page |
| * to stabilise the mapping as truncation holds |
| * the page lock until after the page is removed |
| * from the page cache. |
| */ |
| if (!trylock_page(page)) |
| return false; |
| |
| mapping = page_mapping(page); |
| migrate_dirty = !mapping || mapping->a_ops->migratepage; |
| unlock_page(page); |
| if (!migrate_dirty) |
| return false; |
| } |
| } |
| |
| if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) |
| return false; |
| |
| return true; |
| } |
| |
| /* |
| * Update LRU sizes after isolating pages. The LRU size updates must |
| * be complete before mem_cgroup_update_lru_size due to a sanity check. |
| */ |
| static __always_inline void update_lru_sizes(struct lruvec *lruvec, |
| enum lru_list lru, unsigned long *nr_zone_taken) |
| { |
| int zid; |
| |
| for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
| if (!nr_zone_taken[zid]) |
| continue; |
| |
| update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); |
| } |
| |
| } |
| |
| /* |
| * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. |
| * |
| * lruvec->lru_lock is heavily contended. Some of the functions that |
| * shrink the lists perform better by taking out a batch of pages |
| * and working on them outside the LRU lock. |
| * |
| * For pagecache intensive workloads, this function is the hottest |
| * spot in the kernel (apart from copy_*_user functions). |
| * |
| * Lru_lock must be held before calling this function. |
| * |
| * @nr_to_scan: The number of eligible pages to look through on the list. |
| * @lruvec: The LRU vector to pull pages from. |
| * @dst: The temp list to put pages on to. |
| * @nr_scanned: The number of pages that were scanned. |
| * @sc: The scan_control struct for this reclaim session |
| * @lru: LRU list id for isolating |
| * |
| * returns how many pages were moved onto *@dst. |
| */ |
| static unsigned long isolate_lru_pages(unsigned long nr_to_scan, |
| struct lruvec *lruvec, struct list_head *dst, |
| unsigned long *nr_scanned, struct scan_control *sc, |
| enum lru_list lru) |
| { |
| struct list_head *src = &lruvec->lists[lru]; |
| unsigned long nr_taken = 0; |
| unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; |
| unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; |
| unsigned long skipped = 0; |
| unsigned long scan, total_scan, nr_pages; |
| LIST_HEAD(pages_skipped); |
| isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED); |
| |
| total_scan = 0; |
| scan = 0; |
| while (scan < nr_to_scan && !list_empty(src)) { |
| struct page *page; |
| |
| page = lru_to_page(src); |
| prefetchw_prev_lru_page(page, src, flags); |
| |
| nr_pages = compound_nr(page); |
| total_scan += nr_pages; |
| |
| if (page_zonenum(page) > sc->reclaim_idx) { |
| list_move(&page->lru, &pages_skipped); |
| nr_skipped[page_zonenum(page)] += nr_pages; |
| continue; |
| } |
| |
| /* |
| * Do not count skipped pages because that makes the function |
| * return with no isolated pages if the LRU mostly contains |
| * ineligible pages. This causes the VM to not reclaim any |
| * pages, triggering a premature OOM. |
| * |
| * Account all tail pages of THP. This would not cause |
| * premature OOM since __isolate_lru_page() returns -EBUSY |
| * only when the page is being freed somewhere else. |
| */ |
| scan += nr_pages; |
| if (!__isolate_lru_page_prepare(page, mode)) { |
| /* It is being freed elsewhere */ |
| list_move(&page->lru, src); |
| continue; |
| } |
| /* |
| * Be careful not to clear PageLRU until after we're |
| * sure the page is not being freed elsewhere -- the |
| * page release code relies on it. |
| */ |
| if (unlikely(!get_page_unless_zero(page))) { |
| list_move(&page->lru, src); |
| continue; |
| } |
| |
| if (!TestClearPageLRU(page)) { |
| /* Another thread is already isolating this page */ |
| put_page(page); |
| list_move(&page->lru, src); |
| continue; |
| } |
| |
| nr_taken += nr_pages; |
| nr_zone_taken[page_zonenum(page)] += nr_pages; |
| list_move(&page->lru, dst); |
| } |
| |
| /* |
| * Splice any skipped pages to the start of the LRU list. Note that |
| * this disrupts the LRU order when reclaiming for lower zones but |
| * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX |
| * scanning would soon rescan the same pages to skip and put the |
| * system at risk of premature OOM. |
| */ |
| if (!list_empty(&pages_skipped)) { |
| int zid; |
| |
| list_splice(&pages_skipped, src); |
| for (zid = 0; zid < MAX_NR_ZONES; zid++) { |
| if (!nr_skipped[zid]) |
| continue; |
| |
| __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); |
| skipped += nr_skipped[zid]; |
| } |
| } |
| *nr_scanned = total_scan; |
| trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, |
| total_scan, skipped, nr_taken, mode, lru); |
| update_lru_sizes(lruvec, lru, nr_zone_taken); |
| return nr_taken; |
| } |
| |
| /** |
| * isolate_lru_page - tries to isolate a page from its LRU list |
| * @page: page to isolate from its LRU list |
| * |
| * Isolates a @page from an LRU list, clears PageLRU and adjusts the |
| * vmstat statistic corresponding to whatever LRU list the page was on. |
| * |
| * Returns 0 if the page was removed from an LRU list. |
| * Returns -EBUSY if the page was not on an LRU list. |
| * |
| * The returned page will have PageLRU() cleared. If it was found on |
| * the active list, it will have PageActive set. If it was found on |
| * the unevictable list, it will have the PageUnevictable bit set. That flag |
| * may need to be cleared by the caller before letting the page go. |
| * |
| * The vmstat statistic corresponding to the list on which the page was |
| * found will be decremented. |
| * |
| * Restrictions: |
| * |
| * (1) Must be called with an elevated refcount on the page. This is a |
| * fundamental difference from isolate_lru_pages (which is called |
| * without a stable reference). |
| * (2) the lru_lock must not be held. |
| * (3) interrupts must be enabled. |
| */ |
| int isolate_lru_page(struct page *page) |
| { |
| int ret = -EBUSY; |
| |
| VM_BUG_ON_PAGE(!page_count(page), page); |
| WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); |
| |
| if (TestClearPageLRU(page)) { |
| struct lruvec *lruvec; |
| |
| get_page(page); |
| lruvec = lock_page_lruvec_irq(page); |
| del_page_from_lru_list(page, lruvec); |
| unlock_page_lruvec_irq(lruvec); |
| ret = 0; |
| } |
| |
| return ret; |
| } |
| |
| /* |
| * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and |
| * then get rescheduled. When there are massive number of tasks doing page |
| * allocation, such sleeping direct reclaimers may keep piling up on each CPU, |
| * the LRU list will go small and be scanned faster than necessary, leading to |
| * unnecessary swapping, thrashing and OOM. |
| */ |
| static int too_many_isolated(struct pglist_data *pgdat, int file, |
| struct scan_control *sc) |
| { |
| unsigned long inactive, isolated; |
| |
| if (current_is_kswapd()) |
| return 0; |
| |
| if (!writeback_throttling_sane(sc)) |
| return 0; |
| |
| if (file) { |
| inactive = node_page_state(pgdat, NR_INACTIVE_FILE); |
| isolated = node_page_state(pgdat, NR_ISOLATED_FILE); |
| } else { |
| inactive = node_page_state(pgdat, NR_INACTIVE_ANON); |
| isolated = node_page_state(pgdat, NR_ISOLATED_ANON); |
| } |
| |
| /* |
| * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they |
| * won't get blocked by normal direct-reclaimers, forming a circular |
| * deadlock. |
| */ |
| if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) |
| inactive >>= 3; |
| |
| return isolated > inactive; |
| } |
| |
| /* |
| * move_pages_to_lru() moves pages from private @list to appropriate LRU list. |
| * On return, @list is reused as a list of pages to be freed by the caller. |
| * |
| * Returns the number of pages moved to the given lruvec. |
| */ |
| static unsigned int move_pages_to_lru(struct lruvec *lruvec, |
| struct list_head *list) |
| { |
| int nr_pages, nr_moved = 0; |
| LIST_HEAD(pages_to_free); |
| struct page *page; |
| |
| while (!list_empty(list)) { |
| page = lru_to_page(list); |
| VM_BUG_ON_PAGE(PageLRU(page), page); |
| list_del(&page->lru); |
| if (unlikely(!page_evictable(page))) { |
| spin_unlock_irq(&lruvec->lru_lock); |
| putback_lru_page(page); |
| spin_lock_irq(&lruvec->lru_lock); |
| continue; |
| } |
| |
| /* |
| * The SetPageLRU needs to be kept here for list integrity. |
| * Otherwise: |
| * #0 move_pages_to_lru #1 release_pages |
| * if !put_page_testzero |
| * if (put_page_testzero()) |
| * !PageLRU //skip lru_lock |
| * SetPageLRU() |
| * list_add(&page->lru,) |
| * list_add(&page->lru,) |
| */ |
| SetPageLRU(page); |
| |
| if (unlikely(put_page_testzero(page))) { |
| __clear_page_lru_flags(page); |
| |
| if (unlikely(PageCompound(page))) { |
| spin_unlock_irq(&lruvec->lru_lock); |
| destroy_compound_page(page); |
| spin_lock_irq(&lruvec->lru_lock); |
| } else |
| list_add(&page->lru, &pages_to_free); |
| |
| continue; |
| } |
| |
| /* |
| * All pages were isolated from the same lruvec (and isolation |
| * inhibits memcg migration). |
| */ |
| VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page); |
| add_page_to_lru_list(page, lruvec); |
| nr_pages = thp_nr_pages(page); |
| nr_moved += nr_pages; |
| if (PageActive(page)) |
| workingset_age_nonresident(lruvec, nr_pages); |
| } |
| |
| /* |
| * To save our caller's stack, now use input list for pages to free. |
| */ |
| list_splice(&pages_to_free, list); |
| |
| return nr_moved; |
| } |
| |
| /* |
| * If a kernel thread (such as nfsd for loop-back mounts) services |
| * a backing device by writing to the page cache it sets PF_LOCAL_THROTTLE. |
| * In that case we should only throttle if the backing device it is |
| * writing to is congested. In other cases it is safe to throttle. |
| */ |
| static int current_may_throttle(void) |
| { |
| return !(current->flags & PF_LOCAL_THROTTLE) || |
| current->backing_dev_info == NULL || |
| bdi_write_congested(current->backing_dev_info); |
| } |
| |
| /* |
| * shrink_inactive_list() is a helper for shrink_node(). It returns the number |
| * of reclaimed pages |
| */ |
| static unsigned long |
| shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, |
| struct scan_control *sc, enum lru_list lru) |
| { |
| LIST_HEAD(page_list); |
| unsigned long nr_scanned; |
| unsigned int nr_reclaimed = 0; |
| unsigned long nr_taken; |
| struct reclaim_stat stat; |
| bool file = is_file_lru(lru); |
| enum vm_event_item item; |
| struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| bool stalled = false; |
| |
| while (unlikely(too_many_isolated(pgdat, file, sc))) { |
| if (stalled) |
| return 0; |
| |
| /* wait a bit for the reclaimer. */ |
| msleep(100); |
| stalled = true; |
| |
| /* We are about to die and free our memory. Return now. */ |
| if (fatal_signal_pending(current)) |
| return SWAP_CLUSTER_MAX; |
| } |
| |
| lru_add_drain(); |
| |
| spin_lock_irq(&lruvec->lru_lock); |
| |
| nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, |
| &nr_scanned, sc, lru); |
| |
| __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); |
| item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT; |
| if (!cgroup_reclaim(sc)) |
| __count_vm_events(item, nr_scanned); |
| __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); |
| __count_vm_events(PGSCAN_ANON + file, nr_scanned); |
| |
| spin_unlock_irq(&lruvec->lru_lock); |
| |
| if (nr_taken == 0) |
| return 0; |
| |
| nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false); |
| |
| spin_lock_irq(&lruvec->lru_lock); |
| move_pages_to_lru(lruvec, &page_list); |
| |
| __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); |
| item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; |
| if (!cgroup_reclaim(sc)) |
| __count_vm_events(item, nr_reclaimed); |
| __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); |
| __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); |
| spin_unlock_irq(&lruvec->lru_lock); |
| |
| lru_note_cost(lruvec, file, stat.nr_pageout); |
| mem_cgroup_uncharge_list(&page_list); |
| free_unref_page_list(&page_list); |
| |
| /* |
| * If dirty pages are scanned that are not queued for IO, it |
| * implies that flushers are not doing their job. This can |
| * happen when memory pressure pushes dirty pages to the end of |
| * the LRU before the dirty limits are breached and the dirty |
| * data has expired. It can also happen when the proportion of |
| * dirty pages grows not through writes but through memory |
| * pressure reclaiming all the clean cache. And in some cases, |
| * the flushers simply cannot keep up with the allocation |
| * rate. Nudge the flusher threads in case they are asleep. |
| */ |
| if (stat.nr_unqueued_dirty == nr_taken) |
| wakeup_flusher_threads(WB_REASON_VMSCAN); |
| |
| sc->nr.dirty += stat.nr_dirty; |
| sc->nr.congested += stat.nr_congested; |
| sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; |
| sc->nr.writeback += stat.nr_writeback; |
| sc->nr.immediate += stat.nr_immediate; |
| sc->nr.taken += nr_taken; |
| if (file) |
| sc->nr.file_taken += nr_taken; |
| |
| trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, |
| nr_scanned, nr_reclaimed, &stat, sc->priority, file); |
| return nr_reclaimed; |
| } |
| |
| /* |
| * shrink_active_list() moves pages from the active LRU to the inactive LRU. |
| * |
| * We move them the other way if the page is referenced by one or more |
| * processes. |
| * |
| * If the pages are mostly unmapped, the processing is fast and it is |
| * appropriate to hold lru_lock across the whole operation. But if |
| * the pages are mapped, the processing is slow (page_referenced()), so |
| * we should drop lru_lock around each page. It's impossible to balance |
| * this, so instead we remove the pages from the LRU while processing them. |
| * It is safe to rely on PG_active against the non-LRU pages in here because |
| * nobody will play with that bit on a non-LRU page. |
| * |
| * The downside is that we have to touch page->_refcount against each page. |
| * But we had to alter page->flags anyway. |
| */ |
| static void shrink_active_list(unsigned long nr_to_scan, |
| struct lruvec *lruvec, |
| struct scan_control *sc, |
| enum lru_list lru) |
| { |
| unsigned long nr_taken; |
| unsigned long nr_scanned; |
| unsigned long vm_flags; |
| LIST_HEAD(l_hold); /* The pages which were snipped off */ |
| LIST_HEAD(l_active); |
| LIST_HEAD(l_inactive); |
| struct page *page; |
| unsigned nr_deactivate, nr_activate; |
| unsigned nr_rotated = 0; |
| int file = is_file_lru(lru); |
| struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| bool bypass = false; |
| |
| lru_add_drain(); |
| |
| spin_lock_irq(&lruvec->lru_lock); |
| |
| nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, |
| &nr_scanned, sc, lru); |
| |
| __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); |
| |
| if (!cgroup_reclaim(sc)) |
| __count_vm_events(PGREFILL, nr_scanned); |
| __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); |
| |
| spin_unlock_irq(&lruvec->lru_lock); |
| |
| while (!list_empty(&l_hold)) { |
| cond_resched(); |
| page = lru_to_page(&l_hold); |
| list_del(&page->lru); |
| |
| if (unlikely(!page_evictable(page))) { |
| putback_lru_page(page); |
| continue; |
| } |
| |
| if (unlikely(buffer_heads_over_limit)) { |
| if (page_has_private(page) && trylock_page(page)) { |
| if (page_has_private(page)) |
| try_to_release_page(page, 0); |
| unlock_page(page); |
| } |
| } |
| |
| trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass); |
| if (bypass) |
| goto skip_page_referenced; |
| |
| if (page_referenced(page, 0, sc->target_mem_cgroup, |
| &vm_flags)) { |
| /* |
| * Identify referenced, file-backed active pages and |
| * give them one more trip around the active list. So |
| * that executable code get better chances to stay in |
| * memory under moderate memory pressure. Anon pages |
| * are not likely to be evicted by use-once streaming |
| * IO, plus JVM can create lots of anon VM_EXEC pages, |
| * so we ignore them here. |
| */ |
| if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { |
| nr_rotated += thp_nr_pages(page); |
| list_add(&page->lru, &l_active); |
| continue; |
| } |
| } |
| skip_page_referenced: |
| ClearPageActive(page); /* we are de-activating */ |
| SetPageWorkingset(page); |
| list_add(&page->lru, &l_inactive); |
| } |
| |
| /* |
| * Move pages back to the lru list. |
| */ |
| spin_lock_irq(&lruvec->lru_lock); |
| |
| nr_activate = move_pages_to_lru(lruvec, &l_active); |
| nr_deactivate = move_pages_to_lru(lruvec, &l_inactive); |
| /* Keep all free pages in l_active list */ |
| list_splice(&l_inactive, &l_active); |
| |
| __count_vm_events(PGDEACTIVATE, nr_deactivate); |
| __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); |
| |
| __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); |
| spin_unlock_irq(&lruvec->lru_lock); |
| |
| mem_cgroup_uncharge_list(&l_active); |
| free_unref_page_list(&l_active); |
| trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, |
| nr_deactivate, nr_rotated, sc->priority, file); |
| } |
| |
| unsigned long reclaim_pages(struct list_head *page_list) |
| { |
| int nid = NUMA_NO_NODE; |
| unsigned int nr_reclaimed = 0; |
| LIST_HEAD(node_page_list); |
| struct reclaim_stat dummy_stat; |
| struct page *page; |
| unsigned int noreclaim_flag; |
| struct scan_control sc = { |
| .gfp_mask = GFP_KERNEL, |
| .may_writepage = 1, |
| .may_unmap = 1, |
| .may_swap = 1, |
| .no_demotion = 1, |
| }; |
| |
| noreclaim_flag = memalloc_noreclaim_save(); |
| |
| while (!list_empty(page_list)) { |
| page = lru_to_page(page_list); |
| if (nid == NUMA_NO_NODE) { |
| nid = page_to_nid(page); |
| INIT_LIST_HEAD(&node_page_list); |
| } |
| |
| if (nid == page_to_nid(page)) { |
| ClearPageActive(page); |
| list_move(&page->lru, &node_page_list); |
| continue; |
| } |
| |
| nr_reclaimed += shrink_page_list(&node_page_list, |
| NODE_DATA(nid), |
| &sc, &dummy_stat, false); |
| while (!list_empty(&node_page_list)) { |
| page = lru_to_page(&node_page_list); |
| list_del(&page->lru); |
| putback_lru_page(page); |
| } |
| |
| nid = NUMA_NO_NODE; |
| } |
| |
| if (!list_empty(&node_page_list)) { |
| nr_reclaimed += shrink_page_list(&node_page_list, |
| NODE_DATA(nid), |
| &sc, &dummy_stat, false); |
| while (!list_empty(&node_page_list)) { |
| page = lru_to_page(&node_page_list); |
| list_del(&page->lru); |
| putback_lru_page(page); |
| } |
| } |
| |
| memalloc_noreclaim_restore(noreclaim_flag); |
| |
| return nr_reclaimed; |
| } |
| |
| static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, |
| struct lruvec *lruvec, struct scan_control *sc) |
| { |
| if (is_active_lru(lru)) { |
| if (sc->may_deactivate & (1 << is_file_lru(lru))) |
| shrink_active_list(nr_to_scan, lruvec, sc, lru); |
| else |
| sc->skipped_deactivate = 1; |
| return 0; |
| } |
| |
| return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); |
| } |
| |
| /* |
| * The inactive anon list should be small enough that the VM never has |
| * to do too much work. |
| * |
| * The inactive file list should be small enough to leave most memory |
| * to the established workingset on the scan-resistant active list, |
| * but large enough to avoid thrashing the aggregate readahead window. |
| * |
| * Both inactive lists should also be large enough that each inactive |
| * page has a chance to be referenced again before it is reclaimed. |
| * |
| * If that fails and refaulting is observed, the inactive list grows. |
| * |
| * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages |
| * on this LRU, maintained by the pageout code. An inactive_ratio |
| * of 3 means 3:1 or 25% of the pages are kept on the inactive list. |
| * |
| * total target max |
| * memory ratio inactive |
| * ------------------------------------- |
| * 10MB 1 5MB |
| * 100MB 1 50MB |
| * 1GB 3 250MB |
| * 10GB 10 0.9GB |
| * 100GB 31 3GB |
| * 1TB 101 10GB |
| * 10TB 320 32GB |
| */ |
| static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) |
| { |
| enum lru_list active_lru = inactive_lru + LRU_ACTIVE; |
| unsigned long inactive, active; |
| unsigned long inactive_ratio; |
| unsigned long gb; |
| |
| inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); |
| active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); |
| |
| gb = (inactive + active) >> (30 - PAGE_SHIFT); |
| if (gb) |
| inactive_ratio = int_sqrt(10 * gb); |
| else |
| inactive_ratio = 1; |
| |
| return inactive * inactive_ratio < active; |
| } |
| |
| enum scan_balance { |
| SCAN_EQUAL, |
| SCAN_FRACT, |
| SCAN_ANON, |
| SCAN_FILE, |
| }; |
| |
| static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) |
| { |
| unsigned long file; |
| struct lruvec *target_lruvec; |
| |
| if (lru_gen_enabled()) |
| return; |
| |
| target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); |
| |
| /* |
| * Flush the memory cgroup stats, so that we read accurate per-memcg |
| * lruvec stats for heuristics. |
| */ |
| mem_cgroup_flush_stats(); |
| |
| /* |
| * Determine the scan balance between anon and file LRUs. |
| */ |
| spin_lock_irq(&target_lruvec->lru_lock); |
| sc->anon_cost = target_lruvec->anon_cost; |
| sc->file_cost = target_lruvec->file_cost; |
| spin_unlock_irq(&target_lruvec->lru_lock); |
| |
| /* |
| * Target desirable inactive:active list ratios for the anon |
| * and file LRU lists. |
| */ |
| if (!sc->force_deactivate) { |
| unsigned long refaults; |
| |
| refaults = lruvec_page_state(target_lruvec, |
| WORKINGSET_ACTIVATE_ANON); |
| if (refaults != target_lruvec->refaults[0] || |
| inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) |
| sc->may_deactivate |= DEACTIVATE_ANON; |
| else |
| sc->may_deactivate &= ~DEACTIVATE_ANON; |
| |
| /* |
| * When refaults are being observed, it means a new |
| * workingset is being established. Deactivate to get |
| * rid of any stale active pages quickly. |
| */ |
| refaults = lruvec_page_state(target_lruvec, |
| WORKINGSET_ACTIVATE_FILE); |
| if (refaults != target_lruvec->refaults[1] || |
| inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) |
| sc->may_deactivate |= DEACTIVATE_FILE; |
| else |
| sc->may_deactivate &= ~DEACTIVATE_FILE; |
| } else |
| sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; |
| |
| /* |
| * If we have plenty of inactive file pages that aren't |
| * thrashing, try to reclaim those first before touching |
| * anonymous pages. |
| */ |
| file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); |
| if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) |
| sc->cache_trim_mode = 1; |
| else |
| sc->cache_trim_mode = 0; |
| |
| /* |
| * Prevent the reclaimer from falling into the cache trap: as |
| * cache pages start out inactive, every cache fault will tip |
| * the scan balance towards the file LRU. And as the file LRU |
| * shrinks, so does the window for rotation from references. |
| * This means we have a runaway feedback loop where a tiny |
| * thrashing file LRU becomes infinitely more attractive than |
| * anon pages. Try to detect this based on file LRU size. |
| */ |
| if (!cgroup_reclaim(sc)) { |
| unsigned long total_high_wmark = 0; |
| unsigned long free, anon; |
| int z; |
| |
| free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); |
| file = node_page_state(pgdat, NR_ACTIVE_FILE) + |
| node_page_state(pgdat, NR_INACTIVE_FILE); |
| |
| for (z = 0; z < MAX_NR_ZONES; z++) { |
| struct zone *zone = &pgdat->node_zones[z]; |
| |
| if (!managed_zone(zone)) |
| continue; |
| |
| total_high_wmark += high_wmark_pages(zone); |
| } |
| |
| /* |
| * Consider anon: if that's low too, this isn't a |
| * runaway file reclaim problem, but rather just |
| * extreme pressure. Reclaim as per usual then. |
| */ |
| anon = node_page_state(pgdat, NR_INACTIVE_ANON); |
| |
| sc->file_is_tiny = |
| file + free <= total_high_wmark && |
| !(sc->may_deactivate & DEACTIVATE_ANON) && |
| anon >> sc->priority; |
| } |
| } |
| |
| /* |
| * Determine how aggressively the anon and file LRU lists should be |
| * scanned. The relative value of each set of LRU lists is determined |
| * by looking at the fraction of the pages scanned we did rotate back |
| * onto the active list instead of evict. |
| * |
| * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan |
| * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan |
| */ |
| static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, |
| unsigned long *nr) |
| { |
| struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
| unsigned long anon_cost, file_cost, total_cost; |
| int swappiness = mem_cgroup_swappiness(memcg); |
| u64 fraction[ANON_AND_FILE]; |
| u64 denominator = 0; /* gcc */ |
| enum scan_balance scan_balance; |
| unsigned long ap, fp; |
| enum lru_list lru; |
| bool balance_anon_file_reclaim = false; |
| |
| /* If we have no swap space, do not bother scanning anon pages. */ |
| if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { |
| scan_balance = SCAN_FILE; |
| goto out; |
| } |
| |
| trace_android_vh_tune_swappiness(&swappiness); |
| /* |
| * Global reclaim will swap to prevent OOM even with no |
| * swappiness, but memcg users want to use this knob to |
| * disable swapping for individual groups completely when |
| * using the memory controller's swap limit feature would be |
| * too expensive. |
| */ |
| if (cgroup_reclaim(sc) && !swappiness) { |
| scan_balance = SCAN_FILE; |
| goto out; |
| } |
| |
| /* |
| * Do not apply any pressure balancing cleverness when the |
| * system is close to OOM, scan both anon and file equally |
| * (unless the swappiness setting disagrees with swapping). |
| */ |
| if (!sc->priority && swappiness) { |
| scan_balance = SCAN_EQUAL; |
| goto out; |
| } |
| |
| /* |
| * If the system is almost out of file pages, force-scan anon. |
| */ |
| if (sc->file_is_tiny) { |
| scan_balance = SCAN_ANON; |
| goto out; |
| } |
| |
| trace_android_rvh_set_balance_anon_file_reclaim(&balance_anon_file_reclaim); |
| |
| /* |
| * If there is enough inactive page cache, we do not reclaim |
| * anything from the anonymous working right now. But when balancing |
| * anon and page cache files for reclaim, allow swapping of anon pages |
| * even if there are a number of inactive file cache pages. |
| */ |
| if (!balance_anon_file_reclaim && sc->cache_trim_mode) { |
| scan_balance = SCAN_FILE; |
| goto out; |
| } |
| |
| scan_balance = SCAN_FRACT; |
| /* |
| * Calculate the pressure balance between anon and file pages. |
| * |
| * The amount of pressure we put on each LRU is inversely |
| * proportional to the cost of reclaiming each list, as |
| * determined by the share of pages that are refaulting, times |
| * the relative IO cost of bringing back a swapped out |
| * anonymous page vs reloading a filesystem page (swappiness). |
| * |
| * Although we limit that influence to ensure no list gets |
| * left behind completely: at least a third of the pressure is |
| * applied, before swappiness. |
| * |
| * With swappiness at 100, anon and file have equal IO cost. |
| */ |
| total_cost = sc->anon_cost + sc->file_cost; |
| anon_cost = total_cost + sc->anon_cost; |
| file_cost = total_cost + sc->file_cost; |
| total_cost = anon_cost + file_cost; |
| |
| ap = swappiness * (total_cost + 1); |
| ap /= anon_cost + 1; |
| |
| fp = (200 - swappiness) * (total_cost + 1); |
| fp /= file_cost + 1; |
| |
| fraction[0] = ap; |
| fraction[1] = fp; |
| denominator = ap + fp; |
| out: |
| trace_android_vh_tune_scan_type((char *)(&scan_balance)); |
| trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance)); |
| for_each_evictable_lru(lru) { |
| int file = is_file_lru(lru); |
| unsigned long lruvec_size; |
| unsigned long low, min; |
| unsigned long scan; |
| |
| lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); |
| mem_cgroup_protection(sc->target_mem_cgroup, memcg, |
| &min, &low); |
| |
| if (min || low) { |
| /* |
| * Scale a cgroup's reclaim pressure by proportioning |
| * its current usage to its memory.low or memory.min |
| * setting. |
| * |
| * This is important, as otherwise scanning aggression |
| * becomes extremely binary -- from nothing as we |
| * approach the memory protection threshold, to totally |
| * nominal as we exceed it. This results in requiring |
| * setting extremely liberal protection thresholds. It |
| * also means we simply get no protection at all if we |
| * set it too low, which is not ideal. |
| * |
| * If there is any protection in place, we reduce scan |
| * pressure by how much of the total memory used is |
| * within protection thresholds. |
| * |
| * There is one special case: in the first reclaim pass, |
| * we skip over all groups that are within their low |
| * protection. If that fails to reclaim enough pages to |
| * satisfy the reclaim goal, we come back and override |
| * the best-effort low protection. However, we still |
| * ideally want to honor how well-behaved groups are in |
| * that case instead of simply punishing them all |
| * equally. As such, we reclaim them based on how much |
| * memory they are using, reducing the scan pressure |
| * again by how much of the total memory used is under |
| * hard protection. |
| */ |
| unsigned long cgroup_size = mem_cgroup_size(memcg); |
| unsigned long protection; |
| |
| /* memory.low scaling, make sure we retry before OOM */ |
| if (!sc->memcg_low_reclaim && low > min) { |
| protection = low; |
| sc->memcg_low_skipped = 1; |
| } else { |
| protection = min; |
| } |
| |
| /* Avoid TOCTOU with earlier protection check */ |
| cgroup_size = max(cgroup_size, protection); |
| |
| scan = lruvec_size - lruvec_size * protection / |
| (cgroup_size + 1); |
| |
| /* |
| * Minimally target SWAP_CLUSTER_MAX pages to keep |
| * reclaim moving forwards, avoiding decrementing |
| * sc->priority further than desirable. |
| */ |
| scan = max(scan, SWAP_CLUSTER_MAX); |
| } else { |
| scan = lruvec_size; |
| } |
| |
| scan >>= sc->priority; |
| |
| /* |
| * If the cgroup's already been deleted, make sure to |
| * scrape out the remaining cache. |
| */ |
| if (!scan && !mem_cgroup_online(memcg)) |
| scan = min(lruvec_size, SWAP_CLUSTER_MAX); |
| |
| switch (scan_balance) { |
| case SCAN_EQUAL: |
| /* Scan lists relative to size */ |
| break; |
| case SCAN_FRACT: |
| /* |
| * Scan types proportional to swappiness and |
| * their relative recent reclaim efficiency. |
| * Make sure we don't miss the last page on |
| * the offlined memory cgroups because of a |
| * round-off error. |
| */ |
| scan = mem_cgroup_online(memcg) ? |
| div64_u64(scan * fraction[file], denominator) : |
| DIV64_U64_ROUND_UP(scan * fraction[file], |
| denominator); |
| break; |
| case SCAN_FILE: |
| case SCAN_ANON: |
| /* Scan one type exclusively */ |
| if ((scan_balance == SCAN_FILE) != file) |
| scan = 0; |
| break; |
| default: |
| /* Look ma, no brain */ |
| BUG(); |
| } |
| |
| nr[lru] = scan; |
| } |
| } |
| |
| /* |
| * Anonymous LRU management is a waste if there is |
| * ultimately no way to reclaim the memory. |
| */ |
| static bool can_age_anon_pages(struct pglist_data *pgdat, |
| struct scan_control *sc) |
| { |
| /* Aging the anon LRU is valuable if swap is present: */ |
| if (total_swap_pages > 0) |
| return true; |
| |
| /* Also valuable if anon pages can be demoted: */ |
| return can_demote(pgdat->node_id, sc); |
| } |
| |
| #ifdef CONFIG_LRU_GEN |
| |
| #ifdef CONFIG_LRU_GEN_ENABLED |
| DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); |
| #else |
| DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); |
| #endif |
| |
| /****************************************************************************** |
| * shorthand helpers |
| ******************************************************************************/ |
| |
| #define DEFINE_MAX_SEQ(lruvec) \ |
| unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) |
| |
| #define DEFINE_MIN_SEQ(lruvec) \ |
| unsigned long min_seq[ANON_AND_FILE] = { \ |
| READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ |
| READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ |
| } |
| |
| #define for_each_gen_type_zone(gen, type, zone) \ |
| for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ |
| for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ |
| for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) |
| |
| static int page_lru_gen(struct page *page) |
| { |
| unsigned long flags = READ_ONCE(page->flags); |
| |
| return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; |
| } |
| |
| static int page_lru_tier(struct page *page) |
| { |
| int refs; |
| unsigned long flags = READ_ONCE(page->flags); |
| |
| refs = (flags & LRU_REFS_FLAGS) == LRU_REFS_FLAGS ? |
| ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1 : 0; |
| |
| return lru_tier_from_refs(refs); |
| } |
| |
| static bool get_cap(int cap) |
| { |
| #ifdef CONFIG_LRU_GEN_ENABLED |
| return static_branch_likely(&lru_gen_caps[cap]); |
| #else |
| return static_branch_unlikely(&lru_gen_caps[cap]); |
| #endif |
| } |
| |
| static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) |
| { |
| struct pglist_data *pgdat = NODE_DATA(nid); |
| |
| #ifdef CONFIG_MEMCG |
| if (memcg) { |
| struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; |
| |
| /* for hotadd_new_pgdat() */ |
| if (!lruvec->pgdat) |
| lruvec->pgdat = pgdat; |
| |
| return lruvec; |
| } |
| #endif |
| VM_BUG_ON(!mem_cgroup_disabled()); |
| |
| return pgdat ? &pgdat->__lruvec : NULL; |
| } |
| |
| static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) |
| { |
| struct mem_cgroup *memcg = lruvec_memcg(lruvec); |
| struct pglist_data *pgdat = lruvec_pgdat(lruvec); |
| |
| if (!can_demote(pgdat->node_id, sc) && |
| mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) |
| return 0; |
| |
| return mem_cgroup_swappiness(memcg); |
| } |
| |
| static int get_nr_gens(struct lruvec *lruvec, int type) |
| { |
| return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; |
| } |
| |
| static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) |
| { |
| /* see the comment on lru_gen_struct */ |
| return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && |
| get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && |
| get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; |
| } |
| |
| /****************************************************************************** |
| * mm_struct list |
| ******************************************************************************/ |
| |
| static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) |
| { |
| static struct lru_gen_mm_list mm_list = { |
| .fifo = LIST_HEAD_INIT(mm_list.fifo), |
| .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), |
| }; |
| |
| #ifdef CONFIG_MEMCG |
| if (memcg) |
| return &memcg->mm_list; |
| #endif |
| VM_BUG_ON(!mem_cgroup_disabled()); |
| |
| return &mm_list; |
| } |
| |
| void lru_gen_add_mm(struct mm_struct *mm) |
| { |
| int nid; |
| struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); |
| struct lru_gen_mm_list *mm_list = get_mm_list(memcg); |
| |
| VM_BUG_ON_MM(!list_empty(&mm->lru_gen.list), mm); |
| #ifdef CONFIG_MEMCG |
| VM_BUG_ON_MM(mm->lru_gen.memcg, mm); |
| mm->lru_gen.memcg = memcg; |
| #endif |
| spin_lock(&mm_list->lock); |
| |
| for_each_node_state(nid, N_MEMORY) { |
| struct lruvec *lruvec = get_lruvec(memcg, nid); |
| |
| if (!lruvec) |
| continue; |
| |
| if (lruvec->mm_state.tail == &mm_list->fifo) |
| lruvec->mm_state.tail = &mm->lru_gen.list; |
| } |
| |
| list_add_tail(&mm->lru_gen.list, &mm_list->fifo); |
| |
| spin_unlock(&mm_list->lock); |
| } |
| |
| void lru_gen_del_mm(struct mm_struct *mm) |
| { |
| int nid; |
| struct lru_gen_mm_list *mm_list; |
| struct mem_cgroup *memcg = NULL; |
| |
| if (list_empty(&mm->lru_gen.list)) |
| return; |
| |
| #ifdef CONFIG_MEMCG |
| memcg = mm->lru_gen.memcg; |
| #endif |
| mm_list = get_mm_list(memcg); |
| |
| spin_lock(&mm_list->lock); |
| |
| for_each_node(nid) { |
| struct lruvec *lruvec = get_lruvec(memcg, nid); |
| |
| if (!lruvec) |
| continue; |
| |
| if (lruvec->mm_state.tail == &mm->lru_gen.list) |
| lruvec->mm_state.tail = lruvec->mm_state.tail->next; |
| |
| if (lruvec->mm_state.head != &mm->lru_gen.list) |
| continue; |
| |
| lruvec->mm_state.head = lruvec->mm_state.head->next; |
| if (lruvec->mm_state.head == &mm_list->fifo) |
| WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1); |
| } |
| |
| list_del_init(&mm->lru_gen.list); |
| |
| spin_unlock(&mm_list->lock); |
| |
| #ifdef CONFIG_MEMCG |
| mem_cgroup_put(mm->lru_gen.memcg); |
| mm->lru_gen.memcg = NULL; |
| #endif |
| } |
| |
| #ifdef CONFIG_MEMCG |
| void lru_gen_migrate_mm(struct mm_struct *mm) |
| { |
| struct mem_cgroup *memcg; |
| |
| lockdep_assert_held(&mm->owner->alloc_lock); |
| |
| /* for mm_update_next_owner() */ |
| if (mem_cgroup_disabled()) |
| return; |
| |
| rcu_read_lock(); |
| memcg = mem_cgroup_from_task(mm->owner); |
| rcu_read_unlock(); |
| if (memcg == mm->lru_gen.memcg) |
| return; |
| |
| VM_BUG_ON_MM(!mm->lru_gen.memcg, mm); |
| VM_BUG_ON_MM(list_empty(&mm->lru_gen.list), mm); |
| |
| lru_gen_del_mm(mm); |
| lru_gen_add_mm(mm); |
| } |
| #endif |
| |
| /* |
| * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when |
| * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of |
| * bits in a bitmap, k is the number of hash functions and n is the number of |
| * inserted items. |
| * |
| * Page table walkers use one of the two filters to reduce their search space. |
| * To get rid of non-leaf entries that no longer have enough leaf entries, the |
| * aging uses the double-buffering technique to flip to the other filter each |
| * time it produces a new generation. For non-leaf entries that have enough |
| * leaf entries, the aging carries them over to the next generation in |
| * walk_pmd_range(); the eviction also report them when walking the rmap |
| * in lru_gen_look_around(). |
| * |
| * For future optimizations: |
| * 1. It's not necessary to keep both filters all the time. The spare one can be |
| * freed after the RCU grace period and reallocated if needed again. |
| * 2. And when reallocating, it's worth scaling its size according to the number |
| * of inserted entries in the other filter, to reduce the memory overhead on |
| * small systems and false positives on large systems. |
| * 3. Jenkins' hash function is an alternative to Knuth's. |
| */ |
| #define BLOOM_FILTER_SHIFT 15 |
| |
| static inline int filter_gen_from_seq(unsigned long seq) |
| { |
| return seq % NR_BLOOM_FILTERS; |
| } |
| |
| static void get_item_key(void *item, int *key) |
| { |
| u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); |
| |
| BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); |
| |
| key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); |
| key[1] = hash >> BLOOM_FILTER_SHIFT; |
| } |
| |
| static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq) |
| { |
| unsigned long *filter; |
| int gen = filter_gen_from_seq(seq); |
| |
| lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); |
| |
| filter = lruvec->mm_state.filters[gen]; |
| if (filter) { |
| bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); |
| return; |
| } |
| |
| filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), GFP_ATOMIC); |
| WRITE_ONCE(lruvec->mm_state.filters[gen], filter); |
| } |
| |
| static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) |
| { |
| int key[2]; |
| unsigned long *filter; |
| int gen = filter_gen_from_seq(seq); |
| |
| filter = READ_ONCE(lruvec->mm_state.filters[gen]); |
| if (!filter) |
| return; |
| |
| get_item_key(item, key); |
| |
| if (!test_bit(key[0], filter)) |
| set_bit(key[0], filter); |
| if (!test_bit(key[1], filter)) |
| set_bit(key[1], filter); |
| } |
| |
| static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) |
| { |
| int key[2]; |
| unsigned long *filter; |
| int gen = filter_gen_from_seq(seq); |
| |
| filter = READ_ONCE(lruvec->mm_state.filters[gen]); |
| if (!filter) |
| return true; |
| |
| get_item_key(item, key); |
| |
| return test_bit(key[0], filter) && test_bit(key[1], filter); |
| } |
| |
| static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) |
| { |
| int i; |
| int hist; |
| |
| lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); |
| |
| if (walk) { |
| hist = lru_hist_from_seq(walk->max_seq); |
| |
| for (i = 0; i < NR_MM_STATS; i++) { |
| WRITE_ONCE(lruvec->mm_state.stats[hist][i], |
| lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); |
| walk->mm_stats[i] = 0; |
| } |
| } |
| |
| if (NR_HIST_GENS > 1 && last) { |
| hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); |
| |
| for (i = 0; i < NR_MM_STATS; i++) |
| WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); |
| } |
| } |
| |
| static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) |
| { |
| int type; |
| unsigned long size = 0; |
| struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); |
| |
| if (!walk->full_scan && cpumask_empty(mm_cpumask(mm)) && |
| !node_isset(pgdat->node_id, mm->lru_gen.nodes)) |
| return true; |
| |
| node_clear(pgdat->node_id, mm->lru_gen.nodes); |
| |
| for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { |
| size += type ? get_mm_counter(mm, MM_FILEPAGES) : |
| get_mm_counter(mm, MM_ANONPAGES) + |
| get_mm_counter(mm, MM_SHMEMPAGES); |
| } |
| |
| if (size < MIN_LRU_BATCH) |
| return true; |
| |
| if (mm_is_oom_victim(mm)) |
| return true; |
| |
| return !mmget_not_zero(mm); |
| } |
|