While it may be nice to have a copy of pages on swap once written there, the more garbage we leave in the swapspace the slower any further writes and reads to and from it are. Just free swapcache whenever we can. -ck --- include/linux/swap.h | 2 +- mm/memory.c | 2 +- mm/swapfile.c | 9 ++++----- mm/vmscan.c | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) Index: linux-3.2-ck1/include/linux/swap.h =================================================================== --- linux-3.2-ck1.orig/include/linux/swap.h 2012-01-16 10:07:31.893096990 +1100 +++ linux-3.2-ck1/include/linux/swap.h 2012-01-16 10:07:32.052096979 +1100 @@ -201,7 +201,7 @@ struct swap_list_t { int next; /* swapfile to be used next */ }; -/* Swap 50% full? Release swapcache more aggressively.. */ +/* Swap 50% full? */ #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) /* linux/mm/page_alloc.c */ Index: linux-3.2-ck1/mm/memory.c =================================================================== --- linux-3.2-ck1.orig/mm/memory.c 2012-01-16 10:07:27.745097280 +1100 +++ linux-3.2-ck1/mm/memory.c 2012-01-16 10:07:32.052096979 +1100 @@ -2984,7 +2984,7 @@ static int do_swap_page(struct mm_struct mem_cgroup_commit_charge_swapin(page, ptr); swap_free(entry); - if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + if ((vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (swapcache) { Index: linux-3.2-ck1/mm/swapfile.c =================================================================== --- linux-3.2-ck1.orig/mm/swapfile.c 2012-01-16 10:07:27.745097280 +1100 +++ linux-3.2-ck1/mm/swapfile.c 2012-01-16 10:07:32.053096979 +1100 @@ -288,7 +288,7 @@ checks: scan_base = offset = si->lowest_bit; /* reuse swap entry of cache-only swap if not busy. */ - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; spin_unlock(&swap_lock); swap_was_freed = __try_to_reclaim_swap(si, offset); @@ -377,7 +377,7 @@ scan: spin_lock(&swap_lock); goto checks; } - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&swap_lock); goto checks; } @@ -392,7 +392,7 @@ scan: spin_lock(&swap_lock); goto checks; } - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { + if (si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&swap_lock); goto checks; } @@ -706,8 +706,7 @@ int free_swap_and_cache(swp_entry_t entr * Not mapped elsewhere, or swap space full? Free it! * Also recheck PageSwapCache now page is locked (above). */ - if (PageSwapCache(page) && !PageWriteback(page) && - (!page_mapped(page) || vm_swap_full())) { + if (PageSwapCache(page) && !PageWriteback(page)) { delete_from_swap_cache(page); SetPageDirty(page); } Index: linux-3.2-ck1/mm/vmscan.c =================================================================== --- linux-3.2-ck1.orig/mm/vmscan.c 2012-01-16 10:07:31.737097001 +1100 +++ linux-3.2-ck1/mm/vmscan.c 2012-01-16 10:07:32.054096979 +1100 @@ -986,7 +986,7 @@ cull_mlocked: activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ - if (PageSwapCache(page) && vm_swap_full()) + if (PageSwapCache(page)) try_to_free_swap(page); VM_BUG_ON(PageActive(page)); SetPageActive(page);