@@ -3091,29 +3091,20 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3091
3091
ssize_t ret = 0 ;
3092
3092
struct file * file = iocb -> ki_filp ;
3093
3093
struct fuse_file * ff = file -> private_data ;
3094
- bool async_dio = ff -> fc -> async_dio ;
3095
3094
loff_t pos = 0 ;
3096
3095
struct inode * inode ;
3097
3096
loff_t i_size ;
3098
- size_t count = iov_iter_count (iter );
3097
+ size_t count = iov_iter_count (iter ), shortened = 0 ;
3099
3098
loff_t offset = iocb -> ki_pos ;
3100
3099
struct fuse_io_priv * io ;
3101
3100
3102
3101
pos = offset ;
3103
3102
inode = file -> f_mapping -> host ;
3104
3103
i_size = i_size_read (inode );
3105
3104
3106
- if ((iov_iter_rw (iter ) == READ ) && (offset > i_size ))
3105
+ if ((iov_iter_rw (iter ) == READ ) && (offset >= i_size ))
3107
3106
return 0 ;
3108
3107
3109
- /* optimization for short read */
3110
- if (async_dio && iov_iter_rw (iter ) != WRITE && offset + count > i_size ) {
3111
- if (offset >= i_size )
3112
- return 0 ;
3113
- iov_iter_truncate (iter , fuse_round_up (ff -> fc , i_size - offset ));
3114
- count = iov_iter_count (iter );
3115
- }
3116
-
3117
3108
io = kmalloc (sizeof (struct fuse_io_priv ), GFP_KERNEL );
3118
3109
if (!io )
3119
3110
return - ENOMEM ;
@@ -3129,15 +3120,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3129
3120
* By default, we want to optimize all I/Os with async request
3130
3121
* submission to the client filesystem if supported.
3131
3122
*/
3132
- io -> async = async_dio ;
3123
+ io -> async = ff -> fc -> async_dio ;
3133
3124
io -> iocb = iocb ;
3134
3125
io -> blocking = is_sync_kiocb (iocb );
3135
3126
3127
+ /* optimization for short read */
3128
+ if (io -> async && !io -> write && offset + count > i_size ) {
3129
+ iov_iter_truncate (iter , fuse_round_up (ff -> fc , i_size - offset ));
3130
+ shortened = count - iov_iter_count (iter );
3131
+ count -= shortened ;
3132
+ }
3133
+
3136
3134
/*
3137
3135
* We cannot asynchronously extend the size of a file.
3138
3136
* In such case the aio will behave exactly like sync io.
3139
3137
*/
3140
- if ((offset + count > i_size ) && iov_iter_rw ( iter ) == WRITE )
3138
+ if ((offset + count > i_size ) && io -> write )
3141
3139
io -> blocking = true;
3142
3140
3143
3141
if (io -> async && io -> blocking ) {
@@ -3155,6 +3153,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3155
3153
} else {
3156
3154
ret = __fuse_direct_read (io , iter , & pos );
3157
3155
}
3156
+ iov_iter_reexpand (iter , iov_iter_count (iter ) + shortened );
3158
3157
3159
3158
if (io -> async ) {
3160
3159
bool blocking = io -> blocking ;
0 commit comments