@@ -1146,22 +1146,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1146
1146
* the early prefetch in the caller enough time.
1147
1147
*/
1148
1148
1149
- if (align & blocksize_mask ) {
1150
- if (bdev )
1151
- blkbits = blksize_bits (bdev_logical_block_size (bdev ));
1152
- blocksize_mask = (1 << blkbits ) - 1 ;
1153
- if (align & blocksize_mask )
1154
- goto out ;
1155
- }
1156
-
1157
1149
/* watch out for a 0 len io from a tricksy fs */
1158
1150
if (iov_iter_rw (iter ) == READ && !count )
1159
1151
return 0 ;
1160
1152
1161
1153
dio = kmem_cache_alloc (dio_cache , GFP_KERNEL );
1162
- retval = - ENOMEM ;
1163
1154
if (!dio )
1164
- goto out ;
1155
+ return - ENOMEM ;
1165
1156
/*
1166
1157
* Believe it or not, zeroing out the page array caused a .5%
1167
1158
* performance regression in a database benchmark. So, we take
@@ -1170,32 +1161,32 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1170
1161
memset (dio , 0 , offsetof(struct dio , pages ));
1171
1162
1172
1163
dio -> flags = flags ;
1173
- if (dio -> flags & DIO_LOCKING ) {
1174
- if (iov_iter_rw (iter ) == READ ) {
1175
- struct address_space * mapping =
1176
- iocb -> ki_filp -> f_mapping ;
1177
-
1178
- /* will be released by direct_io_worker */
1179
- inode_lock (inode );
1180
-
1181
- retval = filemap_write_and_wait_range (mapping , offset ,
1182
- end - 1 );
1183
- if (retval ) {
1184
- inode_unlock (inode );
1185
- kmem_cache_free (dio_cache , dio );
1186
- goto out ;
1187
- }
1188
- }
1164
+ if (dio -> flags & DIO_LOCKING && iov_iter_rw (iter ) == READ ) {
1165
+ /* will be released by direct_io_worker */
1166
+ inode_lock (inode );
1189
1167
}
1190
1168
1191
1169
/* Once we sampled i_size check for reads beyond EOF */
1192
1170
dio -> i_size = i_size_read (inode );
1193
1171
if (iov_iter_rw (iter ) == READ && offset >= dio -> i_size ) {
1194
- if (dio -> flags & DIO_LOCKING )
1195
- inode_unlock (inode );
1196
- kmem_cache_free (dio_cache , dio );
1197
1172
retval = 0 ;
1198
- goto out ;
1173
+ goto fail_dio ;
1174
+ }
1175
+
1176
+ if (align & blocksize_mask ) {
1177
+ if (bdev )
1178
+ blkbits = blksize_bits (bdev_logical_block_size (bdev ));
1179
+ blocksize_mask = (1 << blkbits ) - 1 ;
1180
+ if (align & blocksize_mask )
1181
+ goto fail_dio ;
1182
+ }
1183
+
1184
+ if (dio -> flags & DIO_LOCKING && iov_iter_rw (iter ) == READ ) {
1185
+ struct address_space * mapping = iocb -> ki_filp -> f_mapping ;
1186
+
1187
+ retval = filemap_write_and_wait_range (mapping , offset , end - 1 );
1188
+ if (retval )
1189
+ goto fail_dio ;
1199
1190
}
1200
1191
1201
1192
/*
@@ -1239,14 +1230,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1239
1230
*/
1240
1231
retval = sb_init_dio_done_wq (dio -> inode -> i_sb );
1241
1232
}
1242
- if (retval ) {
1243
- /*
1244
- * We grab i_mutex only for reads so we don't have
1245
- * to release it here
1246
- */
1247
- kmem_cache_free (dio_cache , dio );
1248
- goto out ;
1249
- }
1233
+ if (retval )
1234
+ goto fail_dio ;
1250
1235
}
1251
1236
1252
1237
/*
@@ -1349,7 +1334,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1349
1334
} else
1350
1335
BUG_ON (retval != - EIOCBQUEUED );
1351
1336
1352
- out :
1337
+ return retval ;
1338
+
1339
+ fail_dio :
1340
+ if (dio -> flags & DIO_LOCKING && iov_iter_rw (iter ) == READ )
1341
+ inode_unlock (inode );
1342
+
1343
+ kmem_cache_free (dio_cache , dio );
1353
1344
return retval ;
1354
1345
}
1355
1346
0 commit comments