diff --git a/mm/vmscan.c b/mm/vmscan.c index 2447fe5fa37253e2e99293c967391bd68dc2fd8e..3fe6b3d1a89d66fc8b5214dfcad980e13b1b9a3f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1976,7 +1976,7 @@ static void handle_reclaim_writeback(unsigned long nr_taken, * the flushers simply cannot keep up with the allocation * rate. Nudge the flusher threads in case they are asleep. */ - if (stat->nr_unqueued_dirty == nr_taken && nr_taken) { + if (stat->nr_unqueued_dirty == nr_taken) { wakeup_flusher_threads(WB_REASON_VMSCAN); /* * For cgroupv1 dirty throttling is achieved by waking up @@ -4832,7 +4832,13 @@ static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec, break; } - type = !type; + /* + * If type_scan > 0 and isolated == 0, avoid falling back to the + * other type, as this type remains sufficient. Falling back + * too readily can disrupt the positive_ctrl_err() bias. + */ + if (!type_scan) + type = !type; } return scanned; @@ -4874,7 +4880,9 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec, retry: reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); sc->nr_reclaimed += reclaimed; - handle_reclaim_writeback(isolated, pgdat, sc, &stat); + /* Retry pass is only meant for clean folios without new isolation */ + if (isolated) + handle_reclaim_writeback(isolated, pgdat, sc, &stat); list_for_each_entry_safe_reverse(folio, next, &list, lru) { DEFINE_MIN_SEQ(lruvec); @@ -5006,9 +5014,6 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) struct mem_cgroup *memcg = lruvec_memcg(lruvec); nr_to_scan = get_nr_to_scan(lruvec, sc, memcg, swappiness); - if (!nr_to_scan) - need_rotate = true; - while (nr_to_scan > 0) { int delta; DEFINE_MAX_SEQ(lruvec);