Skip to content

Commit 49bbd81

Browse files
herraa1torvalds
authored andcommitted
fb_defio: fix for non-dirty ptes
Fix a problem observed while using fb_defio with a short delay on a PowerPC platform. It is possible that page_mkclean() is invoked in the deferred io work function _before_ a PTE has been marked dirty. In this case, the page is removed from the defio pagelist but page_mkclean() does not write-protect the page again. The end result is that defio ignores all subsequent writes to the page and the corresponding portions of the framebuffer never get updated. The fix consists in keeping track of the pages with non-dirty PTEs, re-checking them again on the next deferred io work iteration. Note that those pages are not passed to the defio callback as they are not written by userspace yet. Signed-off-by: Albert Herranz <[email protected]> Acked-by: Jaya Kumar <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 0d5b026 commit 49bbd81

File tree

1 file changed

+32
-8
lines changed

1 file changed

+32
-8
lines changed

drivers/video/fb_defio.c

Lines changed: 32 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work)
155155
{
156156
struct fb_info *info = container_of(work, struct fb_info,
157157
deferred_work.work);
158-
struct list_head *node, *next;
159-
struct page *cur;
160158
struct fb_deferred_io *fbdefio = info->fbdefio;
159+
struct page *page, *tmp_page;
160+
struct list_head *node, *tmp_node;
161+
struct list_head non_dirty;
162+
163+
INIT_LIST_HEAD(&non_dirty);
161164

162165
/* here we mkclean the pages, then do all deferred IO */
163166
mutex_lock(&fbdefio->lock);
164-
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
165-
lock_page(cur);
166-
page_mkclean(cur);
167-
unlock_page(cur);
167+
list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
168+
lock_page(page);
169+
/*
170+
* The workqueue callback can be triggered after a
171+
* ->page_mkwrite() call but before the PTE has been marked
172+
* dirty. In this case page_mkclean() won't "rearm" the page.
173+
*
174+
* To avoid this, remove those "non-dirty" pages from the
175+
* pagelist before calling the driver's callback, then add
176+
* them back to get processed on the next work iteration.
177+
* At that time, their PTEs will hopefully be dirty for real.
178+
*/
179+
if (!page_mkclean(page))
180+
list_move_tail(&page->lru, &non_dirty);
181+
unlock_page(page);
168182
}
169183

170184
/* driver's callback with pagelist */
171185
fbdefio->deferred_io(info, &fbdefio->pagelist);
172186

173-
/* clear the list */
174-
list_for_each_safe(node, next, &fbdefio->pagelist) {
187+
/* clear the list... */
188+
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
175189
list_del(node);
176190
}
191+
/* ... and add back the "non-dirty" pages to the list */
192+
list_splice_tail(&non_dirty, &fbdefio->pagelist);
177193
mutex_unlock(&fbdefio->lock);
178194
}
179195

@@ -202,13 +218,21 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
202218
void fb_deferred_io_cleanup(struct fb_info *info)
203219
{
204220
struct fb_deferred_io *fbdefio = info->fbdefio;
221+
struct list_head *node, *tmp_node;
205222
struct page *page;
206223
int i;
207224

208225
BUG_ON(!fbdefio);
209226
cancel_delayed_work(&info->deferred_work);
210227
flush_scheduled_work();
211228

229+
/* the list may have still some non-dirty pages at this point */
230+
mutex_lock(&fbdefio->lock);
231+
list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232+
list_del(node);
233+
}
234+
mutex_unlock(&fbdefio->lock);
235+
212236
/* clear out the mapping that we setup */
213237
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
214238
page = fb_deferred_io_page(info, i);

0 commit comments

Comments
 (0)