@@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
116
116
if (atomic_read (& c -> io_count ) == 0 )
117
117
break ;
118
118
ret = nfs_wait_bit_killable (& q .key );
119
- } while (atomic_read (& c -> io_count ) != 0 );
119
+ } while (atomic_read (& c -> io_count ) != 0 && ! ret );
120
120
finish_wait (wq , & q .wait );
121
121
return ret ;
122
122
}
@@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
139
139
/*
140
140
* nfs_page_group_lock - lock the head of the page group
141
141
* @req - request in group that is to be locked
142
+ * @nonblock - if true don't block waiting for lock
142
143
*
143
144
* this lock must be held if modifying the page group list
144
145
*
145
- * returns result from wait_on_bit_lock: 0 on success, < 0 on error
146
+ * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
147
+ * result from wait_on_bit_lock
148
+ *
149
+ * NOTE: calling with nonblock=false should always have set the
150
+ * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
151
+ * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
146
152
*/
147
153
int
148
- nfs_page_group_lock (struct nfs_page * req , bool wait )
154
+ nfs_page_group_lock (struct nfs_page * req , bool nonblock )
149
155
{
150
156
struct nfs_page * head = req -> wb_head ;
151
- int ret ;
152
157
153
158
WARN_ON_ONCE (head != head -> wb_head );
154
159
155
- do {
156
- ret = wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
157
- TASK_UNINTERRUPTIBLE );
158
- } while (wait && ret != 0 );
160
+ if (!test_and_set_bit (PG_HEADLOCK , & head -> wb_flags ))
161
+ return 0 ;
159
162
160
- WARN_ON_ONCE (ret > 0 );
161
- return ret ;
163
+ if (!nonblock )
164
+ return wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
165
+ TASK_UNINTERRUPTIBLE );
166
+
167
+ return - EAGAIN ;
168
+ }
169
+
170
+ /*
171
+ * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
172
+ * @req - a request in the group
173
+ *
174
+ * This is a blocking call to wait for the group lock to be cleared.
175
+ */
176
+ void
177
+ nfs_page_group_lock_wait (struct nfs_page * req )
178
+ {
179
+ struct nfs_page * head = req -> wb_head ;
180
+
181
+ WARN_ON_ONCE (head != head -> wb_head );
182
+
183
+ wait_on_bit (& head -> wb_flags , PG_HEADLOCK ,
184
+ TASK_UNINTERRUPTIBLE );
162
185
}
163
186
164
187
/*
@@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
219
242
{
220
243
bool ret ;
221
244
222
- nfs_page_group_lock (req , true );
245
+ nfs_page_group_lock (req , false );
223
246
ret = nfs_page_group_sync_on_bit_locked (req , bit );
224
247
nfs_page_group_unlock (req );
225
248
@@ -701,23 +724,35 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
701
724
struct nfs_pgio_header * hdr )
702
725
{
703
726
struct nfs_page * req ;
704
- struct page * * pages ;
727
+ struct page * * pages ,
728
+ * last_page ;
705
729
struct list_head * head = & desc -> pg_list ;
706
730
struct nfs_commit_info cinfo ;
707
- unsigned int pagecount ;
731
+ unsigned int pagecount , pageused ;
708
732
709
733
pagecount = nfs_page_array_len (desc -> pg_base , desc -> pg_count );
710
734
if (!nfs_pgarray_set (& hdr -> page_array , pagecount ))
711
735
return nfs_pgio_error (desc , hdr );
712
736
713
737
nfs_init_cinfo (& cinfo , desc -> pg_inode , desc -> pg_dreq );
714
738
pages = hdr -> page_array .pagevec ;
739
+ last_page = NULL ;
740
+ pageused = 0 ;
715
741
while (!list_empty (head )) {
716
742
req = nfs_list_entry (head -> next );
717
743
nfs_list_remove_request (req );
718
744
nfs_list_add_request (req , & hdr -> pages );
719
- * pages ++ = req -> wb_page ;
745
+
746
+ if (WARN_ON_ONCE (pageused >= pagecount ))
747
+ return nfs_pgio_error (desc , hdr );
748
+
749
+ if (!last_page || last_page != req -> wb_page ) {
750
+ * pages ++ = last_page = req -> wb_page ;
751
+ pageused ++ ;
752
+ }
720
753
}
754
+ if (WARN_ON_ONCE (pageused != pagecount ))
755
+ return nfs_pgio_error (desc , hdr );
721
756
722
757
if ((desc -> pg_ioflags & FLUSH_COND_STABLE ) &&
723
758
(desc -> pg_moreio || nfs_reqs_to_commit (& cinfo )))
@@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
788
823
return false;
789
824
if (req_offset (req ) != req_offset (prev ) + prev -> wb_bytes )
790
825
return false;
826
+ if (req -> wb_page == prev -> wb_page ) {
827
+ if (req -> wb_pgbase != prev -> wb_pgbase + prev -> wb_bytes )
828
+ return false;
829
+ } else {
830
+ if (req -> wb_pgbase != 0 ||
831
+ prev -> wb_pgbase + prev -> wb_bytes != PAGE_CACHE_SIZE )
832
+ return false;
833
+ }
791
834
}
792
835
size = pgio -> pg_ops -> pg_test (pgio , prev , req );
793
836
WARN_ON_ONCE (size > req -> wb_bytes );
@@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
858
901
struct nfs_page * subreq ;
859
902
unsigned int bytes_left = 0 ;
860
903
unsigned int offset , pgbase ;
861
- int ret ;
862
904
863
- ret = nfs_page_group_lock (req , false);
864
- if (ret < 0 ) {
865
- desc -> pg_error = ret ;
866
- return 0 ;
867
- }
905
+ nfs_page_group_lock (req , false);
868
906
869
907
subreq = req ;
870
908
bytes_left = subreq -> wb_bytes ;
@@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
886
924
if (desc -> pg_recoalesce )
887
925
return 0 ;
888
926
/* retry add_request for this subreq */
889
- ret = nfs_page_group_lock (req , false);
890
- if (ret < 0 ) {
891
- desc -> pg_error = ret ;
892
- return 0 ;
893
- }
927
+ nfs_page_group_lock (req , false);
894
928
continue ;
895
929
}
896
930
0 commit comments