|
67 | 67 | #include <asm/div64.h>
|
68 | 68 | #include "internal.h"
|
69 | 69 |
|
| 70 | +#ifdef CONFIG_DEBUG_FS |
| 71 | +/* Last chance counter aviable via debugfs */ |
| 72 | +struct last_chance_stat { |
| 73 | + unsigned long flag_set; |
| 74 | + unsigned long retry_exercised; |
| 75 | + unsigned long reclaim_exercised; |
| 76 | +}; |
| 77 | + |
| 78 | +static DEFINE_PER_CPU(struct last_chance_stat, last_chance_stats[MAX_ORDER]); |
| 79 | + |
| 80 | +static inline void last_chance_flag_set(int order) |
| 81 | +{ |
| 82 | + this_cpu_inc(last_chance_stats[order].flag_set); |
| 83 | +} |
| 84 | +static inline void last_chance_retry_exercised(int order) |
| 85 | +{ |
| 86 | + this_cpu_inc(last_chance_stats[order].retry_exercised); |
| 87 | +} |
| 88 | +static inline void last_chance_reclaim_exercised(int order) |
| 89 | +{ |
| 90 | + this_cpu_inc(last_chance_stats[order].reclaim_exercised); |
| 91 | +} |
| 92 | +#else |
| 93 | +static inline void last_chance_flag_set(int order) {} |
| 94 | +static inline void last_chance_retry_exercised(int order) {} |
| 95 | +static inline void last_chance_reclaim_exercised(int order) {} |
| 96 | +#endif |
| 97 | + |
70 | 98 | /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
|
71 | 99 | static DEFINE_MUTEX(pcp_batch_high_lock);
|
72 | 100 | #define MIN_PERCPU_PAGELIST_FRACTION (8)
|
@@ -2298,6 +2326,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
|
2298 | 2326 | */
|
2299 | 2327 | skip_reclaim = false;
|
2300 | 2328 | zonelist_rescan = true;
|
| 2329 | + last_chance_reclaim_exercised(order); |
2301 | 2330 | }
|
2302 | 2331 |
|
2303 | 2332 | if (zonelist_rescan)
|
@@ -2413,8 +2442,10 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
|
2413 | 2442 | * retry scenario.
|
2414 | 2443 | */
|
2415 | 2444 | (*alloc_retries)++;
|
2416 |
| - if (did_some_progress && *alloc_retries <= order) |
| 2445 | + if (did_some_progress && *alloc_retries <= order) { |
| 2446 | + last_chance_retry_exercised(order); |
2417 | 2447 | return 1;
|
| 2448 | + } |
2418 | 2449 |
|
2419 | 2450 | return 0;
|
2420 | 2451 | }
|
@@ -2843,7 +2874,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
2843 | 2874 | goto got_pg;
|
2844 | 2875 |
|
2845 | 2876 | /* Make last chance efforts before failing or on retry */
|
2846 |
| - alloc_flags |= ALLOC_LAST_CHANCE; |
| 2877 | + if (!(alloc_flags & ALLOC_LAST_CHANCE)) { |
| 2878 | + alloc_flags |= ALLOC_LAST_CHANCE; |
| 2879 | + last_chance_flag_set(order); |
| 2880 | + } |
2847 | 2881 |
|
2848 | 2882 | /* Check if we should retry the allocation */
|
2849 | 2883 | pages_reclaimed += did_some_progress;
|
@@ -6767,3 +6801,96 @@ bool is_free_buddy_page(struct page *page)
|
6767 | 6801 | return order < MAX_ORDER;
|
6768 | 6802 | }
|
6769 | 6803 | #endif
|
| 6804 | + |
| 6805 | +#ifdef CONFIG_DEBUG_FS |
| 6806 | +#include <linux/debugfs.h> |
| 6807 | +/* Last chance debugfs support */ |
| 6808 | +static int last_chance_stats_show(struct seq_file *m, void *v) |
| 6809 | +{ |
| 6810 | + int i, cpu; |
| 6811 | + struct last_chance_stat total_stats[MAX_ORDER]; |
| 6812 | + |
| 6813 | + memset(total_stats, 0, sizeof(total_stats)); |
| 6814 | + |
| 6815 | + for_each_possible_cpu(cpu) { |
| 6816 | + for (i = 0; i < MAX_ORDER; i++) { |
| 6817 | + total_stats[i].flag_set += |
| 6818 | + per_cpu(last_chance_stats[i].flag_set, cpu); |
| 6819 | + total_stats[i].retry_exercised += |
| 6820 | + per_cpu(last_chance_stats[i].retry_exercised, |
| 6821 | + cpu); |
| 6822 | + total_stats[i].reclaim_exercised += |
| 6823 | + per_cpu(last_chance_stats[i].reclaim_exercised, |
| 6824 | + cpu); |
| 6825 | + } |
| 6826 | + } |
| 6827 | + |
| 6828 | + seq_printf(m, "flag_set: "); |
| 6829 | + for (i = 0; i < MAX_ORDER; i++) |
| 6830 | + seq_printf(m, " %8lu", total_stats[i].flag_set); |
| 6831 | + seq_printf(m, "\n"); |
| 6832 | + seq_printf(m, "retry_exercised: "); |
| 6833 | + for (i = 0; i < MAX_ORDER; i++) |
| 6834 | + seq_printf(m, " %8lu", total_stats[i].retry_exercised); |
| 6835 | + seq_printf(m, "\n"); |
| 6836 | + seq_printf(m, "reclaim_exercised:"); |
| 6837 | + for (i = 0; i < MAX_ORDER; i++) |
| 6838 | + seq_printf(m, " %8lu", total_stats[i].reclaim_exercised); |
| 6839 | + seq_printf(m, "\n"); |
| 6840 | + |
| 6841 | + return 0; |
| 6842 | +} |
| 6843 | + |
| 6844 | +static int last_chance_stats_open(struct inode *inode, struct file *filp) |
| 6845 | +{ |
| 6846 | + return single_open(filp, last_chance_stats_show, NULL); |
| 6847 | +} |
| 6848 | + |
| 6849 | +static const struct file_operations last_chance_stats_fops = { |
| 6850 | + .open = last_chance_stats_open, |
| 6851 | + .read = seq_read, |
| 6852 | + .llseek = seq_lseek, |
| 6853 | + .release = single_release, |
| 6854 | +}; |
| 6855 | + |
| 6856 | +static struct dentry *last_chance_dir; |
| 6857 | +int last_chance_create_debugfs(void) |
| 6858 | +{ |
| 6859 | + struct dentry *last_chance_stats_file; |
| 6860 | + |
| 6861 | + last_chance_dir = debugfs_create_dir("alloc_last_chance", NULL); |
| 6862 | + if (!last_chance_dir) { |
| 6863 | + pr_warn("mm: error creating last_chance debugfs entry\n"); |
| 6864 | + return -ENOMEM; |
| 6865 | + } |
| 6866 | + |
| 6867 | + last_chance_stats_file = debugfs_create_file("stats", 0444, |
| 6868 | + last_chance_dir, NULL, &last_chance_stats_fops); |
| 6869 | + if (IS_ERR(last_chance_stats_file)) { |
| 6870 | + pr_warn("mm: error creating last_chance debugfs entry\n"); |
| 6871 | + debugfs_remove_recursive(last_chance_dir); |
| 6872 | + return -ENOMEM; |
| 6873 | + } |
| 6874 | + |
| 6875 | + return 0; |
| 6876 | +} |
| 6877 | + |
| 6878 | +static int __init last_chance_init(void) |
| 6879 | +{ |
| 6880 | + int i, cpu; |
| 6881 | + |
| 6882 | + for_each_possible_cpu(cpu) { |
| 6883 | + for (i = 0; i < MAX_ORDER; i++) { |
| 6884 | + this_cpu_write(last_chance_stats[i].flag_set, 0); |
| 6885 | + this_cpu_write(last_chance_stats[i].retry_exercised, 0); |
| 6886 | + this_cpu_write(last_chance_stats[i].reclaim_exercised, |
| 6887 | + 0); |
| 6888 | + } |
| 6889 | + } |
| 6890 | + |
| 6891 | + last_chance_create_debugfs(); |
| 6892 | + |
| 6893 | + return 0; |
| 6894 | +} |
| 6895 | +module_init(last_chance_init); |
| 6896 | +#endif |
0 commit comments