Skip to content

Commit 1251704

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 fixes" * emailed patches from Andrew Morton <[email protected]>: mm, docs: update memory.stat description with workingset* entries mm: vmscan: scan until it finds eligible pages mm, thp: copying user pages must schedule on collapse dax: fix PMD data corruption when fault races with write dax: fix data corruption when fault races with write ext4: return to starting transaction in ext4_dax_huge_fault() mm: fix data corruption due to stale mmap reads dax: prevent invalidation of mapped DAX entries Tigran has moved mm, vmalloc: fix vmalloc users tracking properly mm/khugepaged: add missed tracepoint for collapse_huge_page_swapin gcov: support GCC 7.1 mm, vmstat: Remove spurious WARN() during zoneinfo print time: delete current_fs_time() hwpoison, memcg: forcibly uncharge LRU pages
2 parents 0fcc3ab + b340959 commit 1251704

File tree

23 files changed

+145
-126
lines changed

23 files changed

+145
-126
lines changed

Documentation/cgroup-v2.txt

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -918,6 +918,18 @@ PAGE_SIZE multiple when read back.
918918

919919
Number of major page faults incurred
920920

921+
workingset_refault
922+
923+
Number of refaults of previously evicted pages
924+
925+
workingset_activate
926+
927+
Number of refaulted pages that were immediately activated
928+
929+
workingset_nodereclaim
930+
931+
Number of times a shadow node has been reclaimed
932+
921933
memory.swap.current
922934

923935
A read-only single value file which exists on non-root

Documentation/filesystems/bfs.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,4 +54,4 @@ The first 4 bytes should be 0x1badface.
5454
If you have any patches, questions or suggestions regarding this BFS
5555
implementation please contact the author:
5656

57-
Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
57+
Tigran Aivazian <aivazian.tigran@gmail.com>

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2483,7 +2483,7 @@ S: Maintained
24832483
F: drivers/net/ethernet/ec_bhf.c
24842484

24852485
BFS FILE SYSTEM
2486-
M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk>
2486+
M: "Tigran A. Aivazian" <aivazian.tigran@gmail.com>
24872487
S: Maintained
24882488
F: Documentation/filesystems/bfs.txt
24892489
F: fs/bfs/

arch/x86/kernel/cpu/microcode/amd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
* Author: Peter Oruba <[email protected]>
1111
*
1212
* Based on work by:
13-
* Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
13+
* Tigran Aivazian <aivazian.tigran@gmail.com>
1414
*
1515
* early loader:
1616
* Copyright (C) 2013 Advanced Micro Devices, Inc.

arch/x86/kernel/cpu/microcode/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*
22
* CPU Microcode Update Driver for Linux
33
*
4-
* Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
4+
* Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
55
* 2006 Shaohua Li <[email protected]>
66
* 2013-2016 Borislav Petkov <[email protected]>
77
*

arch/x86/kernel/cpu/microcode/intel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*
22
* Intel CPU Microcode Update Driver for Linux
33
*
4-
* Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
4+
* Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
55
* 2006 Shaohua Li <[email protected]>
66
*
77
* Intel CPU microcode early update for Linux

fs/bfs/inode.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/*
22
* fs/bfs/inode.c
33
* BFS superblock and inode operations.
4-
* Copyright (C) 1999-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
4+
* Copyright (C) 1999-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
55
* From fs/minix, Copyright (C) 1991, 1992 Linus Torvalds.
66
*
77
* Made endianness-clean by Andrew Stribblehill <[email protected]>, 2005.
@@ -19,7 +19,7 @@
1919
#include <linux/uaccess.h>
2020
#include "bfs.h"
2121

22-
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
22+
MODULE_AUTHOR("Tigran Aivazian <aivazian.tigran@gmail.com>");
2323
MODULE_DESCRIPTION("SCO UnixWare BFS filesystem for Linux");
2424
MODULE_LICENSE("GPL");
2525

fs/dax.c

Lines changed: 31 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -460,35 +460,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
460460
return ret;
461461
}
462462

463-
/*
464-
* Invalidate exceptional DAX entry if easily possible. This handles DAX
465-
* entries for invalidate_inode_pages() so we evict the entry only if we can
466-
* do so without blocking.
467-
*/
468-
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
469-
{
470-
int ret = 0;
471-
void *entry, **slot;
472-
struct radix_tree_root *page_tree = &mapping->page_tree;
473-
474-
spin_lock_irq(&mapping->tree_lock);
475-
entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
476-
if (!entry || !radix_tree_exceptional_entry(entry) ||
477-
slot_locked(mapping, slot))
478-
goto out;
479-
if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
480-
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
481-
goto out;
482-
radix_tree_delete(page_tree, index);
483-
mapping->nrexceptional--;
484-
ret = 1;
485-
out:
486-
spin_unlock_irq(&mapping->tree_lock);
487-
if (ret)
488-
dax_wake_mapping_entry_waiter(mapping, index, entry, true);
489-
return ret;
490-
}
491-
492463
/*
493464
* Invalidate exceptional DAX entry if it is clean.
494465
*/
@@ -1044,7 +1015,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
10441015
* into page tables. We have to tear down these mappings so that data
10451016
* written by write(2) is visible in mmap.
10461017
*/
1047-
if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1018+
if (iomap->flags & IOMAP_F_NEW) {
10481019
invalidate_inode_pages2_range(inode->i_mapping,
10491020
pos >> PAGE_SHIFT,
10501021
(end - 1) >> PAGE_SHIFT);
@@ -1177,6 +1148,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
11771148
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
11781149
flags |= IOMAP_WRITE;
11791150

1151+
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1152+
if (IS_ERR(entry)) {
1153+
vmf_ret = dax_fault_return(PTR_ERR(entry));
1154+
goto out;
1155+
}
1156+
11801157
/*
11811158
* Note that we don't bother to use iomap_apply here: DAX required
11821159
* the file system block size to be equal the page size, which means
@@ -1185,17 +1162,11 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
11851162
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
11861163
if (error) {
11871164
vmf_ret = dax_fault_return(error);
1188-
goto out;
1165+
goto unlock_entry;
11891166
}
11901167
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1191-
vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1192-
goto finish_iomap;
1193-
}
1194-
1195-
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1196-
if (IS_ERR(entry)) {
1197-
vmf_ret = dax_fault_return(PTR_ERR(entry));
1198-
goto finish_iomap;
1168+
error = -EIO; /* fs corruption? */
1169+
goto error_finish_iomap;
11991170
}
12001171

12011172
sector = dax_iomap_sector(&iomap, pos);
@@ -1217,13 +1188,13 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12171188
}
12181189

12191190
if (error)
1220-
goto error_unlock_entry;
1191+
goto error_finish_iomap;
12211192

12221193
__SetPageUptodate(vmf->cow_page);
12231194
vmf_ret = finish_fault(vmf);
12241195
if (!vmf_ret)
12251196
vmf_ret = VM_FAULT_DONE_COW;
1226-
goto unlock_entry;
1197+
goto finish_iomap;
12271198
}
12281199

12291200
switch (iomap.type) {
@@ -1243,7 +1214,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12431214
case IOMAP_HOLE:
12441215
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
12451216
vmf_ret = dax_load_hole(mapping, &entry, vmf);
1246-
goto unlock_entry;
1217+
goto finish_iomap;
12471218
}
12481219
/*FALLTHRU*/
12491220
default:
@@ -1252,10 +1223,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12521223
break;
12531224
}
12541225

1255-
error_unlock_entry:
1226+
error_finish_iomap:
12561227
vmf_ret = dax_fault_return(error) | major;
1257-
unlock_entry:
1258-
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
12591228
finish_iomap:
12601229
if (ops->iomap_end) {
12611230
int copied = PAGE_SIZE;
@@ -1270,7 +1239,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12701239
*/
12711240
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
12721241
}
1273-
out:
1242+
unlock_entry:
1243+
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1244+
out:
12741245
trace_dax_pte_fault_done(inode, vmf, vmf_ret);
12751246
return vmf_ret;
12761247
}
@@ -1416,6 +1387,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14161387
if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
14171388
goto fallback;
14181389

1390+
/*
1391+
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1392+
* PMD or a HZP entry. If it can't (because a 4k page is already in
1393+
* the tree, for instance), it will return -EEXIST and we just fall
1394+
* back to 4k entries.
1395+
*/
1396+
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1397+
if (IS_ERR(entry))
1398+
goto fallback;
1399+
14191400
/*
14201401
* Note that we don't use iomap_apply here. We aren't doing I/O, only
14211402
* setting up a mapping, so really we're using iomap_begin() as a way
@@ -1424,38 +1405,26 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14241405
pos = (loff_t)pgoff << PAGE_SHIFT;
14251406
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
14261407
if (error)
1427-
goto fallback;
1408+
goto unlock_entry;
14281409

14291410
if (iomap.offset + iomap.length < pos + PMD_SIZE)
14301411
goto finish_iomap;
14311412

1432-
/*
1433-
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1434-
* PMD or a HZP entry. If it can't (because a 4k page is already in
1435-
* the tree, for instance), it will return -EEXIST and we just fall
1436-
* back to 4k entries.
1437-
*/
1438-
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1439-
if (IS_ERR(entry))
1440-
goto finish_iomap;
1441-
14421413
switch (iomap.type) {
14431414
case IOMAP_MAPPED:
14441415
result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
14451416
break;
14461417
case IOMAP_UNWRITTEN:
14471418
case IOMAP_HOLE:
14481419
if (WARN_ON_ONCE(write))
1449-
goto unlock_entry;
1420+
break;
14501421
result = dax_pmd_load_hole(vmf, &iomap, &entry);
14511422
break;
14521423
default:
14531424
WARN_ON_ONCE(1);
14541425
break;
14551426
}
14561427

1457-
unlock_entry:
1458-
put_locked_mapping_entry(mapping, pgoff, entry);
14591428
finish_iomap:
14601429
if (ops->iomap_end) {
14611430
int copied = PMD_SIZE;
@@ -1471,6 +1440,8 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14711440
ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
14721441
&iomap);
14731442
}
1443+
unlock_entry:
1444+
put_locked_mapping_entry(mapping, pgoff, entry);
14741445
fallback:
14751446
if (result == VM_FAULT_FALLBACK) {
14761447
split_huge_pmd(vma, vmf->pmd, vmf->address);

fs/ext4/file.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -257,19 +257,32 @@ static int ext4_dax_huge_fault(struct vm_fault *vmf,
257257
enum page_entry_size pe_size)
258258
{
259259
int result;
260+
handle_t *handle = NULL;
260261
struct inode *inode = file_inode(vmf->vma->vm_file);
261262
struct super_block *sb = inode->i_sb;
262263
bool write = vmf->flags & FAULT_FLAG_WRITE;
263264

264265
if (write) {
265266
sb_start_pagefault(sb);
266267
file_update_time(vmf->vma->vm_file);
268+
down_read(&EXT4_I(inode)->i_mmap_sem);
269+
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
270+
EXT4_DATA_TRANS_BLOCKS(sb));
271+
} else {
272+
down_read(&EXT4_I(inode)->i_mmap_sem);
267273
}
268-
down_read(&EXT4_I(inode)->i_mmap_sem);
269-
result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
270-
up_read(&EXT4_I(inode)->i_mmap_sem);
271-
if (write)
274+
if (!IS_ERR(handle))
275+
result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
276+
else
277+
result = VM_FAULT_SIGBUS;
278+
if (write) {
279+
if (!IS_ERR(handle))
280+
ext4_journal_stop(handle);
281+
up_read(&EXT4_I(inode)->i_mmap_sem);
272282
sb_end_pagefault(sb);
283+
} else {
284+
up_read(&EXT4_I(inode)->i_mmap_sem);
285+
}
273286

274287
return result;
275288
}

include/linux/dax.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8989
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
9090
const struct iomap_ops *ops);
9191
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
92-
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
9392
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
9493
pgoff_t index);
9594
void dax_wake_mapping_entry_waiter(struct address_space *mapping,

include/linux/fs.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1431,7 +1431,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
14311431
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
14321432
}
14331433

1434-
extern struct timespec current_fs_time(struct super_block *sb);
14351434
extern struct timespec current_time(struct inode *inode);
14361435

14371436
/*

include/linux/vmalloc.h

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#include <linux/list.h>
77
#include <linux/llist.h>
88
#include <asm/page.h> /* pgprot_t */
9-
#include <asm/pgtable.h> /* PAGE_KERNEL */
109
#include <linux/rbtree.h>
1110

1211
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -83,22 +82,14 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
8382
const void *caller);
8483
#ifndef CONFIG_MMU
8584
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
86-
#else
87-
extern void *__vmalloc_node(unsigned long size, unsigned long align,
88-
gfp_t gfp_mask, pgprot_t prot,
89-
int node, const void *caller);
90-
91-
/*
92-
* We really want to have this inlined due to caller tracking. This
93-
* function is used by the highlevel vmalloc apis and so we want to track
94-
* their callers and inlining will achieve that.
95-
*/
96-
static inline void *__vmalloc_node_flags(unsigned long size,
97-
int node, gfp_t flags)
85+
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
86+
gfp_t flags, void *caller)
9887
{
99-
return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
100-
node, __builtin_return_address(0));
88+
return __vmalloc_node_flags(size, node, flags);
10189
}
90+
#else
91+
extern void *__vmalloc_node_flags_caller(unsigned long size,
92+
int node, gfp_t flags, void *caller);
10293
#endif
10394

10495
extern void vfree(const void *addr);

kernel/gcov/base.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
9898
}
9999
EXPORT_SYMBOL(__gcov_merge_icall_topn);
100100

101+
void __gcov_exit(void)
102+
{
103+
/* Unused. */
104+
}
105+
EXPORT_SYMBOL(__gcov_exit);
106+
101107
/**
102108
* gcov_enable_events - enable event reporting through gcov_event()
103109
*

kernel/gcov/gcc_4_7.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@
1818
#include <linux/vmalloc.h>
1919
#include "gcov.h"
2020

21-
#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
21+
#if (__GNUC__ >= 7)
22+
#define GCOV_COUNTERS 9
23+
#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
2224
#define GCOV_COUNTERS 10
2325
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
2426
#define GCOV_COUNTERS 9

0 commit comments

Comments
 (0)