@@ -3127,9 +3127,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3127
3127
struct scatterlist * sg = NULL ;
3128
3128
bool more_trbs_coming = true;
3129
3129
bool need_zero_pkt = false;
3130
- unsigned int num_trbs , last_trb_num , i ;
3130
+ bool first_trb = true;
3131
+ unsigned int num_trbs ;
3131
3132
unsigned int start_cycle , num_sgs = 0 ;
3132
- unsigned int running_total , block_len , trb_buff_len , full_len ;
3133
+ unsigned int enqd_len , block_len , trb_buff_len , full_len ;
3133
3134
int ret ;
3134
3135
u32 field , length_field , remainder ;
3135
3136
u64 addr ;
@@ -3138,14 +3139,19 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3138
3139
if (!ring )
3139
3140
return - EINVAL ;
3140
3141
3142
+ full_len = urb -> transfer_buffer_length ;
3141
3143
/* If we have scatter/gather list, we use it. */
3142
3144
if (urb -> num_sgs ) {
3143
3145
num_sgs = urb -> num_mapped_sgs ;
3144
3146
sg = urb -> sg ;
3147
+ addr = (u64 ) sg_dma_address (sg );
3148
+ block_len = sg_dma_len (sg );
3145
3149
num_trbs = count_sg_trbs_needed (urb );
3146
- } else
3150
+ } else {
3147
3151
num_trbs = count_trbs_needed (urb );
3148
-
3152
+ addr = (u64 ) urb -> transfer_dma ;
3153
+ block_len = full_len ;
3154
+ }
3149
3155
ret = prepare_transfer (xhci , xhci -> devs [slot_id ],
3150
3156
ep_index , urb -> stream_id ,
3151
3157
num_trbs , urb , 0 , mem_flags );
@@ -3154,8 +3160,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3154
3160
3155
3161
urb_priv = urb -> hcpriv ;
3156
3162
3157
- last_trb_num = num_trbs - 1 ;
3158
-
3159
3163
/* Deal with URB_ZERO_PACKET - need one more td/trb */
3160
3164
if (urb -> transfer_flags & URB_ZERO_PACKET && urb_priv -> length > 1 )
3161
3165
need_zero_pkt = true;
@@ -3170,40 +3174,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3170
3174
start_trb = & ring -> enqueue -> generic ;
3171
3175
start_cycle = ring -> cycle_state ;
3172
3176
3173
- full_len = urb -> transfer_buffer_length ;
3174
- running_total = 0 ;
3175
- block_len = 0 ;
3176
-
3177
3177
/* Queue the TRBs, even if they are zero-length */
3178
- for (i = 0 ; i < num_trbs ; i ++ ) {
3178
+ for (enqd_len = 0 ; enqd_len < full_len ; enqd_len += trb_buff_len ) {
3179
3179
field = TRB_TYPE (TRB_NORMAL );
3180
3180
3181
- if (block_len == 0 ) {
3182
- /* A new contiguous block. */
3183
- if (sg ) {
3184
- addr = (u64 ) sg_dma_address (sg );
3185
- block_len = sg_dma_len (sg );
3186
- } else {
3187
- addr = (u64 ) urb -> transfer_dma ;
3188
- block_len = full_len ;
3189
- }
3190
- /* TRB buffer should not cross 64KB boundaries */
3191
- trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY (addr );
3192
- trb_buff_len = min_t (unsigned int ,
3193
- trb_buff_len ,
3194
- block_len );
3195
- } else {
3196
- /* Further through the contiguous block. */
3197
- trb_buff_len = block_len ;
3198
- if (trb_buff_len > TRB_MAX_BUFF_SIZE )
3199
- trb_buff_len = TRB_MAX_BUFF_SIZE ;
3200
- }
3181
+ /* TRB buffer should not cross 64KB boundaries */
3182
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY (addr );
3183
+ trb_buff_len = min_t (unsigned int , trb_buff_len , block_len );
3201
3184
3202
- if (running_total + trb_buff_len > full_len )
3203
- trb_buff_len = full_len - running_total ;
3185
+ if (enqd_len + trb_buff_len > full_len )
3186
+ trb_buff_len = full_len - enqd_len ;
3204
3187
3205
3188
/* Don't change the cycle bit of the first TRB until later */
3206
- if (i == 0 ) {
3189
+ if (first_trb ) {
3190
+ first_trb = false;
3207
3191
if (start_cycle == 0 )
3208
3192
field |= TRB_CYCLE ;
3209
3193
} else
@@ -3212,7 +3196,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3212
3196
/* Chain all the TRBs together; clear the chain bit in the last
3213
3197
* TRB to indicate it's the last TRB in the chain.
3214
3198
*/
3215
- if (i < last_trb_num ) {
3199
+ if (enqd_len + trb_buff_len < full_len ) {
3216
3200
field |= TRB_CHAIN ;
3217
3201
} else {
3218
3202
field |= TRB_IOC ;
@@ -3225,9 +3209,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3225
3209
field |= TRB_ISP ;
3226
3210
3227
3211
/* Set the TRB length, TD size, and interrupter fields. */
3228
- remainder = xhci_td_remainder (xhci , running_total ,
3229
- trb_buff_len , full_len ,
3230
- urb , more_trbs_coming );
3212
+ remainder = xhci_td_remainder (xhci , enqd_len , trb_buff_len ,
3213
+ full_len , urb , more_trbs_coming );
3214
+
3231
3215
length_field = TRB_LEN (trb_buff_len ) |
3232
3216
TRB_TD_SIZE (remainder ) |
3233
3217
TRB_INTR_TARGET (0 );
@@ -3238,17 +3222,16 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3238
3222
length_field ,
3239
3223
field );
3240
3224
3241
- running_total += trb_buff_len ;
3242
3225
addr += trb_buff_len ;
3243
3226
block_len -= trb_buff_len ;
3244
3227
3245
- if (sg ) {
3246
- if (block_len == 0 ) {
3247
- /* New sg entry */
3248
- -- num_sgs ;
3249
- if (num_sgs == 0 )
3250
- break ;
3228
+ if (sg && block_len == 0 ) {
3229
+ /* New sg entry */
3230
+ -- num_sgs ;
3231
+ if (num_sgs != 0 ) {
3251
3232
sg = sg_next (sg );
3233
+ block_len = sg_dma_len (sg );
3234
+ addr = (u64 ) sg_dma_address (sg );
3252
3235
}
3253
3236
}
3254
3237
}
@@ -3262,7 +3245,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3262
3245
queue_trb (xhci , ring , 0 , 0 , 0 , TRB_INTR_TARGET (0 ), field );
3263
3246
}
3264
3247
3265
- check_trb_math (urb , running_total );
3248
+ check_trb_math (urb , enqd_len );
3266
3249
giveback_first_trb (xhci , slot_id , ep_index , urb -> stream_id ,
3267
3250
start_cycle , start_trb );
3268
3251
return 0 ;
0 commit comments