@@ -61,26 +61,79 @@ enum rpcrdma_chunktype {
61
61
rpcrdma_replych
62
62
};
63
63
64
- #if IS_ENABLED (CONFIG_SUNRPC_DEBUG )
65
64
static const char transfertypes [][12 ] = {
66
65
"pure inline" , /* no chunks */
67
66
" read chunk" , /* some argument via rdma read */
68
67
"*read chunk" , /* entire request via rdma read */
69
68
"write chunk" , /* some result via rdma write */
70
69
"reply chunk" /* entire reply via rdma write */
71
70
};
72
- #endif
71
+
72
+ /* Returns size of largest RPC-over-RDMA header in a Call message
73
+ *
74
+ * The client marshals only one chunk list per Call message.
75
+ * The largest list is the Read list.
76
+ */
77
+ static unsigned int rpcrdma_max_call_header_size (unsigned int maxsegs )
78
+ {
79
+ unsigned int size ;
80
+
81
+ /* Fixed header fields and list discriminators */
82
+ size = RPCRDMA_HDRLEN_MIN ;
83
+
84
+ /* Maximum Read list size */
85
+ maxsegs += 2 ; /* segment for head and tail buffers */
86
+ size = maxsegs * sizeof (struct rpcrdma_read_chunk );
87
+
88
+ dprintk ("RPC: %s: max call header size = %u\n" ,
89
+ __func__ , size );
90
+ return size ;
91
+ }
92
+
93
+ /* Returns size of largest RPC-over-RDMA header in a Reply message
94
+ *
95
+ * There is only one Write list or one Reply chunk per Reply
96
+ * message. The larger list is the Write list.
97
+ */
98
+ static unsigned int rpcrdma_max_reply_header_size (unsigned int maxsegs )
99
+ {
100
+ unsigned int size ;
101
+
102
+ /* Fixed header fields and list discriminators */
103
+ size = RPCRDMA_HDRLEN_MIN ;
104
+
105
+ /* Maximum Write list size */
106
+ maxsegs += 2 ; /* segment for head and tail buffers */
107
+ size = sizeof (__be32 ); /* segment count */
108
+ size += maxsegs * sizeof (struct rpcrdma_segment );
109
+ size += sizeof (__be32 ); /* list discriminator */
110
+
111
+ dprintk ("RPC: %s: max reply header size = %u\n" ,
112
+ __func__ , size );
113
+ return size ;
114
+ }
115
+
116
+ void rpcrdma_set_max_header_sizes (struct rpcrdma_ia * ia ,
117
+ struct rpcrdma_create_data_internal * cdata ,
118
+ unsigned int maxsegs )
119
+ {
120
+ ia -> ri_max_inline_write = cdata -> inline_wsize -
121
+ rpcrdma_max_call_header_size (maxsegs );
122
+ ia -> ri_max_inline_read = cdata -> inline_rsize -
123
+ rpcrdma_max_reply_header_size (maxsegs );
124
+ }
73
125
74
126
/* The client can send a request inline as long as the RPCRDMA header
75
127
* plus the RPC call fit under the transport's inline limit. If the
76
128
* combined call message size exceeds that limit, the client must use
77
129
* the read chunk list for this operation.
78
130
*/
79
- static bool rpcrdma_args_inline (struct rpc_rqst * rqst )
131
+ static bool rpcrdma_args_inline (struct rpcrdma_xprt * r_xprt ,
132
+ struct rpc_rqst * rqst )
80
133
{
81
- unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst -> rq_snd_buf . len ;
134
+ struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
82
135
83
- return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD ( rqst ) ;
136
+ return rqst -> rq_snd_buf . len <= ia -> ri_max_inline_write ;
84
137
}
85
138
86
139
/* The client can't know how large the actual reply will be. Thus it
@@ -89,11 +142,12 @@ static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
89
142
* limit, the client must provide a write list or a reply chunk for
90
143
* this request.
91
144
*/
92
- static bool rpcrdma_results_inline (struct rpc_rqst * rqst )
145
+ static bool rpcrdma_results_inline (struct rpcrdma_xprt * r_xprt ,
146
+ struct rpc_rqst * rqst )
93
147
{
94
- unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst -> rq_rcv_buf . buflen ;
148
+ struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
95
149
96
- return repsize <= RPCRDMA_INLINE_READ_THRESHOLD ( rqst ) ;
150
+ return rqst -> rq_rcv_buf . buflen <= ia -> ri_max_inline_read ;
97
151
}
98
152
99
153
static int
@@ -492,7 +546,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
492
546
*/
493
547
if (rqst -> rq_rcv_buf .flags & XDRBUF_READ )
494
548
wtype = rpcrdma_writech ;
495
- else if (rpcrdma_results_inline (rqst ))
549
+ else if (rpcrdma_results_inline (r_xprt , rqst ))
496
550
wtype = rpcrdma_noch ;
497
551
else
498
552
wtype = rpcrdma_replych ;
@@ -511,7 +565,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
511
565
* that both has a data payload, and whose non-data arguments
512
566
* by themselves are larger than the inline threshold.
513
567
*/
514
- if (rpcrdma_args_inline (rqst )) {
568
+ if (rpcrdma_args_inline (r_xprt , rqst )) {
515
569
rtype = rpcrdma_noch ;
516
570
} else if (rqst -> rq_snd_buf .flags & XDRBUF_WRITE ) {
517
571
rtype = rpcrdma_readch ;
@@ -561,6 +615,9 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
561
615
if (hdrlen < 0 )
562
616
return hdrlen ;
563
617
618
+ if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD (rqst ))
619
+ goto out_overflow ;
620
+
564
621
dprintk ("RPC: %s: %s: hdrlen %zd rpclen %zd"
565
622
" headerp 0x%p base 0x%p lkey 0x%x\n" ,
566
623
__func__ , transfertypes [wtype ], hdrlen , rpclen ,
@@ -587,6 +644,14 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
587
644
588
645
req -> rl_niovs = 2 ;
589
646
return 0 ;
647
+
648
+ out_overflow :
649
+ pr_err ("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s\n" ,
650
+ hdrlen , rpclen , transfertypes [wtype ]);
651
+ /* Terminate this RPC. Chunks registered above will be
652
+ * released by xprt_release -> xprt_rmda_free .
653
+ */
654
+ return - EIO ;
590
655
}
591
656
592
657
/*
0 commit comments