6
6
*/
7
7
#include <linux/module.h>
8
8
#include <linux/highmem.h>
9
+ #include <linux/folio_queue.h>
9
10
#include "smbdirect.h"
10
11
#include "cifs_debug.h"
11
12
#include "cifsproto.h"
@@ -2463,6 +2464,8 @@ static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
2463
2464
start = 0 ;
2464
2465
}
2465
2466
2467
+ if (ret > 0 )
2468
+ iov_iter_advance (iter , ret );
2466
2469
return ret ;
2467
2470
}
2468
2471
@@ -2519,6 +2522,65 @@ static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
2519
2522
start = 0 ;
2520
2523
}
2521
2524
2525
+ if (ret > 0 )
2526
+ iov_iter_advance (iter , ret );
2527
+ return ret ;
2528
+ }
2529
+
2530
+ /*
2531
+ * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
2532
+ * list. The folios are not pinned.
2533
+ */
2534
+ static ssize_t smb_extract_folioq_to_rdma (struct iov_iter * iter ,
2535
+ struct smb_extract_to_rdma * rdma ,
2536
+ ssize_t maxsize )
2537
+ {
2538
+ const struct folio_queue * folioq = iter -> folioq ;
2539
+ unsigned int slot = iter -> folioq_slot ;
2540
+ ssize_t ret = 0 ;
2541
+ size_t offset = iter -> iov_offset ;
2542
+
2543
+ BUG_ON (!folioq );
2544
+
2545
+ if (slot >= folioq_nr_slots (folioq )) {
2546
+ folioq = folioq -> next ;
2547
+ if (WARN_ON_ONCE (!folioq ))
2548
+ return - EIO ;
2549
+ slot = 0 ;
2550
+ }
2551
+
2552
+ do {
2553
+ struct folio * folio = folioq_folio (folioq , slot );
2554
+ size_t fsize = folioq_folio_size (folioq , slot );
2555
+
2556
+ if (offset < fsize ) {
2557
+ size_t part = umin (maxsize - ret , fsize - offset );
2558
+
2559
+ if (!smb_set_sge (rdma , folio_page (folio , 0 ), offset , part ))
2560
+ return - EIO ;
2561
+
2562
+ offset += part ;
2563
+ ret += part ;
2564
+ }
2565
+
2566
+ if (offset >= fsize ) {
2567
+ offset = 0 ;
2568
+ slot ++ ;
2569
+ if (slot >= folioq_nr_slots (folioq )) {
2570
+ if (!folioq -> next ) {
2571
+ WARN_ON_ONCE (ret < iter -> count );
2572
+ break ;
2573
+ }
2574
+ folioq = folioq -> next ;
2575
+ slot = 0 ;
2576
+ }
2577
+ }
2578
+ } while (rdma -> nr_sge < rdma -> max_sge || maxsize > 0 );
2579
+
2580
+ iter -> folioq = folioq ;
2581
+ iter -> folioq_slot = slot ;
2582
+ iter -> iov_offset = offset ;
2583
+ iter -> count -= ret ;
2522
2584
return ret ;
2523
2585
}
2524
2586
@@ -2563,6 +2625,8 @@ static ssize_t smb_extract_xarray_to_rdma(struct iov_iter *iter,
2563
2625
}
2564
2626
2565
2627
rcu_read_unlock ();
2628
+ if (ret > 0 )
2629
+ iov_iter_advance (iter , ret );
2566
2630
return ret ;
2567
2631
}
2568
2632
@@ -2590,6 +2654,9 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
2590
2654
case ITER_KVEC :
2591
2655
ret = smb_extract_kvec_to_rdma (iter , rdma , len );
2592
2656
break ;
2657
+ case ITER_FOLIOQ :
2658
+ ret = smb_extract_folioq_to_rdma (iter , rdma , len );
2659
+ break ;
2593
2660
case ITER_XARRAY :
2594
2661
ret = smb_extract_xarray_to_rdma (iter , rdma , len );
2595
2662
break ;
@@ -2598,9 +2665,7 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
2598
2665
return - EIO ;
2599
2666
}
2600
2667
2601
- if (ret > 0 ) {
2602
- iov_iter_advance (iter , ret );
2603
- } else if (ret < 0 ) {
2668
+ if (ret < 0 ) {
2604
2669
while (rdma -> nr_sge > before ) {
2605
2670
struct ib_sge * sge = & rdma -> sge [rdma -> nr_sge -- ];
2606
2671
0 commit comments