Skip to content

Commit d2ffb01

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "20 fixes" * emailed patches from Andrew Morton <[email protected]>: rapidio/rio_cm: avoid GFP_KERNEL in atomic context Revert "ocfs2: bump up o2cb network protocol version" ocfs2: fix start offset to ocfs2_zero_range_for_truncate() cgroup: duplicate cgroup reference when cloning sockets mm: memcontrol: make per-cpu charge cache IRQ-safe for socket accounting ocfs2: fix double unlock in case retry after free truncate log fanotify: fix list corruption in fanotify_get_response() fsnotify: add a way to stop queueing events on group shutdown ocfs2: fix trans extend while free cached blocks ocfs2: fix trans extend while flush truncate log ipc/shm: fix crash if CONFIG_SHMEM is not set mm: fix the page_swap_info() BUG_ON check autofs: use dentry flags to block walks during expire MAINTAINERS: update email for VLYNQ bus entry mm: avoid endless recursion in dump_page() mm, thp: fix leaking mapped pte in __collapse_huge_page_swapin() khugepaged: fix use-after-free in collapse_huge_page() MAINTAINERS: Maik has moved ocfs2/dlm: fix race between convert and migration mem-hotplug: don't clear the only node in new_node_page()
2 parents 7fadce0 + b92ae13 commit d2ffb01

File tree

22 files changed

+240
-146
lines changed

22 files changed

+240
-146
lines changed

MAINTAINERS

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6103,7 +6103,7 @@ S: Supported
61036103
F: drivers/cpufreq/intel_pstate.c
61046104

61056105
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
6106-
M: Maik Broemme <mbroemme@plusserver.de>
6106+
M: Maik Broemme <mbroemme@libmpq.org>
61076107
61086108
S: Maintained
61096109
F: Documentation/fb/intelfb.txt
@@ -12569,7 +12569,7 @@ F: include/linux/if_*vlan.h
1256912569
F: net/8021q/
1257012570

1257112571
VLYNQ BUS
12572-
M: Florian Fainelli <[email protected]>
12572+
M: Florian Fainelli <[email protected]>
1257312573
L: [email protected] (subscribers-only)
1257412574
S: Maintained
1257512575
F: drivers/vlynq/vlynq.c

drivers/rapidio/rio_cm.c

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
22472247
{
22482248
struct rio_channel *ch;
22492249
unsigned int i;
2250+
LIST_HEAD(list);
22502251

22512252
riocm_debug(EXIT, ".");
22522253

2254+
/*
2255+
* If there are any channels left in connected state send
2256+
* close notification to the connection partner.
2257+
* First build a list of channels that require a closing
2258+
* notification because function riocm_send_close() should
2259+
* be called outside of spinlock protected code.
2260+
*/
22532261
spin_lock_bh(&idr_lock);
22542262
idr_for_each_entry(&ch_idr, ch, i) {
2255-
riocm_debug(EXIT, "close ch %d", ch->id);
2256-
if (ch->state == RIO_CM_CONNECTED)
2257-
riocm_send_close(ch);
2263+
if (ch->state == RIO_CM_CONNECTED) {
2264+
riocm_debug(EXIT, "close ch %d", ch->id);
2265+
idr_remove(&ch_idr, ch->id);
2266+
list_add(&ch->ch_node, &list);
2267+
}
22582268
}
22592269
spin_unlock_bh(&idr_lock);
22602270

2271+
list_for_each_entry(ch, &list, ch_node)
2272+
riocm_send_close(ch);
2273+
22612274
return NOTIFY_DONE;
22622275
}
22632276

fs/autofs4/expire.c

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -417,6 +417,7 @@ static struct dentry *should_expire(struct dentry *dentry,
417417
}
418418
return NULL;
419419
}
420+
420421
/*
421422
* Find an eligible tree to time-out
422423
* A tree is eligible if :-
@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
432433
struct dentry *root = sb->s_root;
433434
struct dentry *dentry;
434435
struct dentry *expired;
436+
struct dentry *found;
435437
struct autofs_info *ino;
436438

437439
if (!root)
@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
442444

443445
dentry = NULL;
444446
while ((dentry = get_next_positive_subdir(dentry, root))) {
447+
int flags = how;
448+
445449
spin_lock(&sbi->fs_lock);
446450
ino = autofs4_dentry_ino(dentry);
447-
if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
448-
expired = NULL;
449-
else
450-
expired = should_expire(dentry, mnt, timeout, how);
451-
if (!expired) {
451+
if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
452452
spin_unlock(&sbi->fs_lock);
453453
continue;
454454
}
455+
spin_unlock(&sbi->fs_lock);
456+
457+
expired = should_expire(dentry, mnt, timeout, flags);
458+
if (!expired)
459+
continue;
460+
461+
spin_lock(&sbi->fs_lock);
455462
ino = autofs4_dentry_ino(expired);
456463
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
457464
spin_unlock(&sbi->fs_lock);
458465
synchronize_rcu();
459-
spin_lock(&sbi->fs_lock);
460-
if (should_expire(expired, mnt, timeout, how)) {
461-
if (expired != dentry)
462-
dput(dentry);
463-
goto found;
464-
}
465466

467+
/* Make sure a reference is not taken on found if
468+
* things have changed.
469+
*/
470+
flags &= ~AUTOFS_EXP_LEAVES;
471+
found = should_expire(expired, mnt, timeout, how);
472+
if (!found || found != expired)
473+
/* Something has changed, continue */
474+
goto next;
475+
476+
if (expired != dentry)
477+
dput(dentry);
478+
479+
spin_lock(&sbi->fs_lock);
480+
goto found;
481+
next:
482+
spin_lock(&sbi->fs_lock);
466483
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
484+
spin_unlock(&sbi->fs_lock);
467485
if (expired != dentry)
468486
dput(expired);
469-
spin_unlock(&sbi->fs_lock);
470487
}
471488
return NULL;
472489

@@ -483,15 +500,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
483500
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
484501
struct autofs_info *ino = autofs4_dentry_ino(dentry);
485502
int status;
503+
int state;
486504

487505
/* Block on any pending expire */
488506
if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
489507
return 0;
490508
if (rcu_walk)
491509
return -ECHILD;
492510

511+
retry:
493512
spin_lock(&sbi->fs_lock);
494-
if (ino->flags & AUTOFS_INF_EXPIRING) {
513+
state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
514+
if (state == AUTOFS_INF_WANT_EXPIRE) {
515+
spin_unlock(&sbi->fs_lock);
516+
/*
517+
* Possibly being selected for expire, wait until
518+
* it's selected or not.
519+
*/
520+
schedule_timeout_uninterruptible(HZ/10);
521+
goto retry;
522+
}
523+
if (state & AUTOFS_INF_EXPIRING) {
495524
spin_unlock(&sbi->fs_lock);
496525

497526
pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);

fs/notify/fanotify/fanotify.c

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
6767

6868
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
6969

70-
wait_event(group->fanotify_data.access_waitq, event->response ||
71-
atomic_read(&group->fanotify_data.bypass_perm));
72-
73-
if (!event->response) { /* bypass_perm set */
74-
/*
75-
* Event was canceled because group is being destroyed. Remove
76-
* it from group's event list because we are responsible for
77-
* freeing the permission event.
78-
*/
79-
fsnotify_remove_event(group, &event->fae.fse);
80-
return 0;
81-
}
70+
wait_event(group->fanotify_data.access_waitq, event->response);
8271

8372
/* userspace responded, convert to something usable */
8473
switch (event->response) {

fs/notify/fanotify/fanotify_user.c

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
358358

359359
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
360360
struct fanotify_perm_event_info *event, *next;
361+
struct fsnotify_event *fsn_event;
361362

362363
/*
363-
* There may be still new events arriving in the notification queue
364-
* but since userspace cannot use fanotify fd anymore, no event can
365-
* enter or leave access_list by now.
364+
* Stop new events from arriving in the notification queue. since
365+
* userspace cannot use fanotify fd anymore, no event can enter or
366+
* leave access_list by now either.
366367
*/
367-
spin_lock(&group->fanotify_data.access_lock);
368-
369-
atomic_inc(&group->fanotify_data.bypass_perm);
368+
fsnotify_group_stop_queueing(group);
370369

370+
/*
371+
* Process all permission events on access_list and notification queue
372+
* and simulate reply from userspace.
373+
*/
374+
spin_lock(&group->fanotify_data.access_lock);
371375
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
372376
fae.fse.list) {
373377
pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
379383
spin_unlock(&group->fanotify_data.access_lock);
380384

381385
/*
382-
* Since bypass_perm is set, newly queued events will not wait for
383-
* access response. Wake up the already sleeping ones now.
384-
* synchronize_srcu() in fsnotify_destroy_group() will wait for all
385-
* processes sleeping in fanotify_handle_event() waiting for access
386-
* response and thus also for all permission events to be freed.
386+
* Destroy all non-permission events. For permission events just
387+
* dequeue them and set the response. They will be freed once the
388+
* response is consumed and fanotify_get_response() returns.
387389
*/
390+
mutex_lock(&group->notification_mutex);
391+
while (!fsnotify_notify_queue_is_empty(group)) {
392+
fsn_event = fsnotify_remove_first_event(group);
393+
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
394+
fsnotify_destroy_event(group, fsn_event);
395+
else
396+
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
397+
}
398+
mutex_unlock(&group->notification_mutex);
399+
400+
/* Response for all permission events it set, wakeup waiters */
388401
wake_up(&group->fanotify_data.access_waitq);
389402
#endif
390403

@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
755768
spin_lock_init(&group->fanotify_data.access_lock);
756769
init_waitqueue_head(&group->fanotify_data.access_waitq);
757770
INIT_LIST_HEAD(&group->fanotify_data.access_list);
758-
atomic_set(&group->fanotify_data.bypass_perm, 0);
759771
#endif
760772
switch (flags & FAN_ALL_CLASS_BITS) {
761773
case FAN_CLASS_NOTIF:

fs/notify/group.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
3939
kfree(group);
4040
}
4141

42+
/*
43+
* Stop queueing new events for this group. Once this function returns
44+
* fsnotify_add_event() will not add any new events to the group's queue.
45+
*/
46+
void fsnotify_group_stop_queueing(struct fsnotify_group *group)
47+
{
48+
mutex_lock(&group->notification_mutex);
49+
group->shutdown = true;
50+
mutex_unlock(&group->notification_mutex);
51+
}
52+
4253
/*
4354
* Trying to get rid of a group. Remove all marks, flush all events and release
4455
* the group reference.
@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
4758
*/
4859
void fsnotify_destroy_group(struct fsnotify_group *group)
4960
{
61+
/*
62+
* Stop queueing new events. The code below is careful enough to not
63+
* require this but fanotify needs to stop queuing events even before
64+
* fsnotify_destroy_group() is called and this makes the other callers
65+
* of fsnotify_destroy_group() to see the same behavior.
66+
*/
67+
fsnotify_group_stop_queueing(group);
68+
5069
/* clear all inode marks for this group, attach them to destroy_list */
5170
fsnotify_detach_group_marks(group);
5271

fs/notify/notification.c

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
8282
* Add an event to the group notification queue. The group can later pull this
8383
* event off the queue to deal with. The function returns 0 if the event was
8484
* added to the queue, 1 if the event was merged with some other queued event,
85-
* 2 if the queue of events has overflown.
85+
* 2 if the event was not queued - either the queue of events has overflown
86+
* or the group is shutting down.
8687
*/
8788
int fsnotify_add_event(struct fsnotify_group *group,
8889
struct fsnotify_event *event,
@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
9697

9798
mutex_lock(&group->notification_mutex);
9899

100+
if (group->shutdown) {
101+
mutex_unlock(&group->notification_mutex);
102+
return 2;
103+
}
104+
99105
if (group->q_len >= group->max_events) {
100106
ret = 2;
101107
/* Queue overflow event only if it isn't already queued */
@@ -125,21 +131,6 @@ int fsnotify_add_event(struct fsnotify_group *group,
125131
return ret;
126132
}
127133

128-
/*
129-
* Remove @event from group's notification queue. It is the responsibility of
130-
* the caller to destroy the event.
131-
*/
132-
void fsnotify_remove_event(struct fsnotify_group *group,
133-
struct fsnotify_event *event)
134-
{
135-
mutex_lock(&group->notification_mutex);
136-
if (!list_empty(&event->list)) {
137-
list_del_init(&event->list);
138-
group->q_len--;
139-
}
140-
mutex_unlock(&group->notification_mutex);
141-
}
142-
143134
/*
144135
* Remove and return the first event from the notification list. It is the
145136
* responsibility of the caller to destroy the obtained event

0 commit comments

Comments
 (0)