@@ -314,11 +314,11 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
314
314
}
315
315
316
316
/*
317
- * Calculate throughput based on the average A-MPDU length, taking into account
318
- * the expected number of retransmissions and their expected length
317
+ * Return current throughput based on the average A-MPDU length, taking into
318
+ * account the expected number of retransmissions and their expected length
319
319
*/
320
- static void
321
- minstrel_ht_calc_tp (struct minstrel_ht_sta * mi , int group , int rate )
320
+ int
321
+ minstrel_ht_get_tp_avg (struct minstrel_ht_sta * mi , int group , int rate )
322
322
{
323
323
struct minstrel_rate_stats * mrs ;
324
324
unsigned int nsecs = 0 ;
@@ -328,10 +328,8 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
328
328
tmp_prob_ewma = mrs -> prob_ewma ;
329
329
330
330
/* do not account throughput if sucess prob is below 10% */
331
- if (mrs -> prob_ewma < MINSTREL_FRAC (10 , 100 )) {
332
- mrs -> cur_tp = 0 ;
333
- return ;
334
- }
331
+ if (mrs -> prob_ewma < MINSTREL_FRAC (10 , 100 ))
332
+ return 0 ;
335
333
336
334
/*
337
335
* For the throughput calculation, limit the probability value to 90% to
@@ -346,7 +344,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
346
344
nsecs += minstrel_mcs_groups [group ].duration [rate ];
347
345
348
346
/* prob is scaled - see MINSTREL_FRAC above */
349
- mrs -> cur_tp = MINSTREL_TRUNC (1000000 * ((tmp_prob_ewma * 1000 ) / nsecs ));
347
+ return MINSTREL_TRUNC (100000 * ((tmp_prob_ewma * 1000 ) / nsecs ));
350
348
}
351
349
352
350
/*
@@ -360,22 +358,22 @@ static void
360
358
minstrel_ht_sort_best_tp_rates (struct minstrel_ht_sta * mi , u16 index ,
361
359
u16 * tp_list )
362
360
{
363
- int cur_group , cur_idx , cur_thr , cur_prob ;
364
- int tmp_group , tmp_idx , tmp_thr , tmp_prob ;
361
+ int cur_group , cur_idx , cur_tp_avg , cur_prob ;
362
+ int tmp_group , tmp_idx , tmp_tp_avg , tmp_prob ;
365
363
int j = MAX_THR_RATES ;
366
364
367
365
cur_group = index / MCS_GROUP_RATES ;
368
366
cur_idx = index % MCS_GROUP_RATES ;
369
- cur_thr = mi -> groups [ cur_group ]. rates [ cur_idx ]. cur_tp ;
367
+ cur_tp_avg = minstrel_ht_get_tp_avg ( mi , cur_group , cur_idx ) ;
370
368
cur_prob = mi -> groups [cur_group ].rates [cur_idx ].prob_ewma ;
371
369
372
370
do {
373
371
tmp_group = tp_list [j - 1 ] / MCS_GROUP_RATES ;
374
372
tmp_idx = tp_list [j - 1 ] % MCS_GROUP_RATES ;
375
- tmp_thr = mi -> groups [ tmp_group ]. rates [ tmp_idx ]. cur_tp ;
373
+ tmp_tp_avg = minstrel_ht_get_tp_avg ( mi , tmp_group , tmp_idx ) ;
376
374
tmp_prob = mi -> groups [tmp_group ].rates [tmp_idx ].prob_ewma ;
377
- if (cur_thr < tmp_thr ||
378
- (cur_thr == tmp_thr && cur_prob <= tmp_prob ))
375
+ if (cur_tp_avg < tmp_tp_avg ||
376
+ (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob ))
379
377
break ;
380
378
j -- ;
381
379
} while (j > 0 );
@@ -396,14 +394,19 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
396
394
{
397
395
struct minstrel_mcs_group_data * mg ;
398
396
struct minstrel_rate_stats * mrs ;
399
- int tmp_group , tmp_idx , tmp_tp , tmp_prob , max_tp_group ;
397
+ int tmp_group , tmp_idx , tmp_tp_avg , tmp_prob ;
398
+ int max_tp_group , cur_tp_avg , cur_group , cur_idx ;
399
+ int max_group_prob_rate_group , max_group_prob_rate_idx ;
400
+ int max_group_prob_rate_tp_avg ;
400
401
402
+ cur_group = index / MCS_GROUP_RATES ;
403
+ cur_idx = index % MCS_GROUP_RATES ;
401
404
mg = & mi -> groups [index / MCS_GROUP_RATES ];
402
405
mrs = & mg -> rates [index % MCS_GROUP_RATES ];
403
406
404
407
tmp_group = mi -> max_prob_rate / MCS_GROUP_RATES ;
405
408
tmp_idx = mi -> max_prob_rate % MCS_GROUP_RATES ;
406
- tmp_tp = mi -> groups [ tmp_group ]. rates [ tmp_idx ]. cur_tp ;
409
+ tmp_tp_avg = minstrel_ht_get_tp_avg ( mi , tmp_group , tmp_idx ) ;
407
410
tmp_prob = mi -> groups [tmp_group ].rates [tmp_idx ].prob_ewma ;
408
411
409
412
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
@@ -414,9 +417,18 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
414
417
return ;
415
418
416
419
if (mrs -> prob_ewma > MINSTREL_FRAC (75 , 100 )) {
417
- if (mrs -> cur_tp > tmp_tp )
420
+ cur_tp_avg = minstrel_ht_get_tp_avg (mi , cur_group , cur_idx );
421
+ if (cur_tp_avg > tmp_tp_avg )
418
422
mi -> max_prob_rate = index ;
419
- if (mrs -> cur_tp > mg -> rates [mg -> max_group_prob_rate ].cur_tp )
423
+
424
+ max_group_prob_rate_group = mg -> max_group_prob_rate /
425
+ MCS_GROUP_RATES ;
426
+ max_group_prob_rate_idx = mg -> max_group_prob_rate %
427
+ MCS_GROUP_RATES ;
428
+ max_group_prob_rate_tp_avg = minstrel_ht_get_tp_avg (mi ,
429
+ max_group_prob_rate_group ,
430
+ max_group_prob_rate_idx );
431
+ if (cur_tp_avg > max_group_prob_rate_tp_avg )
420
432
mg -> max_group_prob_rate = index ;
421
433
} else {
422
434
if (mrs -> prob_ewma > tmp_prob )
@@ -443,11 +455,11 @@ minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
443
455
444
456
tmp_group = tmp_cck_tp_rate [0 ] / MCS_GROUP_RATES ;
445
457
tmp_idx = tmp_cck_tp_rate [0 ] % MCS_GROUP_RATES ;
446
- tmp_cck_tp = mi -> groups [ tmp_group ]. rates [ tmp_idx ]. cur_tp ;
458
+ tmp_cck_tp = minstrel_ht_get_tp_avg ( mi , tmp_group , tmp_idx ) ;
447
459
448
460
tmp_group = tmp_mcs_tp_rate [0 ] / MCS_GROUP_RATES ;
449
461
tmp_idx = tmp_mcs_tp_rate [0 ] % MCS_GROUP_RATES ;
450
- tmp_mcs_tp = mi -> groups [ tmp_group ]. rates [ tmp_idx ]. cur_tp ;
462
+ tmp_mcs_tp = minstrel_ht_get_tp_avg ( mi , tmp_group , tmp_idx ) ;
451
463
452
464
if (tmp_cck_tp > tmp_mcs_tp ) {
453
465
for (i = 0 ; i < MAX_THR_RATES ; i ++ ) {
@@ -466,8 +478,7 @@ static inline void
466
478
minstrel_ht_prob_rate_reduce_streams (struct minstrel_ht_sta * mi )
467
479
{
468
480
struct minstrel_mcs_group_data * mg ;
469
- struct minstrel_rate_stats * mrs ;
470
- int tmp_max_streams , group ;
481
+ int tmp_max_streams , group , tmp_idx ;
471
482
int tmp_tp = 0 ;
472
483
473
484
tmp_max_streams = minstrel_mcs_groups [mi -> max_tp_rate [0 ] /
@@ -476,11 +487,14 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
476
487
mg = & mi -> groups [group ];
477
488
if (!mg -> supported || group == MINSTREL_CCK_GROUP )
478
489
continue ;
479
- mrs = minstrel_get_ratestats (mi , mg -> max_group_prob_rate );
480
- if (tmp_tp < mrs -> cur_tp &&
490
+
491
+ tmp_idx = mg -> max_group_prob_rate % MCS_GROUP_RATES ;
492
+
493
+ if (tmp_tp < minstrel_ht_get_tp_avg (mi , group , tmp_idx ) &&
481
494
(minstrel_mcs_groups [group ].streams < tmp_max_streams )) {
482
495
mi -> max_prob_rate = mg -> max_group_prob_rate ;
483
- tmp_tp = mrs -> cur_tp ;
496
+ tmp_tp = minstrel_ht_get_tp_avg (mi , group ,
497
+ tmp_idx );
484
498
}
485
499
}
486
500
}
@@ -541,9 +555,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
541
555
mrs = & mg -> rates [i ];
542
556
mrs -> retry_updated = false;
543
557
minstrel_calc_rate_stats (mrs );
544
- minstrel_ht_calc_tp (mi , group , i );
545
558
546
- if (! mrs -> cur_tp )
559
+ if (minstrel_ht_get_tp_avg ( mi , group , i ) == 0 )
547
560
continue ;
548
561
549
562
/* Find max throughput rate set */
@@ -1302,16 +1315,18 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
1302
1315
{
1303
1316
struct minstrel_ht_sta_priv * msp = priv_sta ;
1304
1317
struct minstrel_ht_sta * mi = & msp -> ht ;
1305
- int i , j ;
1318
+ int i , j , tp_avg ;
1306
1319
1307
1320
if (!msp -> is_ht )
1308
1321
return mac80211_minstrel .get_expected_throughput (priv_sta );
1309
1322
1310
1323
i = mi -> max_tp_rate [0 ] / MCS_GROUP_RATES ;
1311
1324
j = mi -> max_tp_rate [0 ] % MCS_GROUP_RATES ;
1312
1325
1313
- /* convert cur_tp from pkt per second in kbps */
1314
- return mi -> groups [i ].rates [j ].cur_tp * AVG_PKT_SIZE * 8 / 1024 ;
1326
+ /* convert tp_avg from pkt per second in kbps */
1327
+ tp_avg = minstrel_ht_get_tp_avg (mi , i , j ) * AVG_PKT_SIZE * 8 / 1024 ;
1328
+
1329
+ return tp_avg ;
1315
1330
}
1316
1331
1317
1332
static const struct rate_control_ops mac80211_minstrel_ht = {
0 commit comments