@@ -1190,7 +1190,50 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1190
1190
mask , names );
1191
1191
}
1192
1192
1193
- static int efx_ef10_try_update_nic_stats (struct efx_nic * efx )
1193
+ static size_t efx_ef10_update_stats_common (struct efx_nic * efx , u64 * full_stats ,
1194
+ struct rtnl_link_stats64 * core_stats )
1195
+ {
1196
+ DECLARE_BITMAP (mask , EF10_STAT_COUNT );
1197
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
1198
+ u64 * stats = nic_data -> stats ;
1199
+ size_t stats_count = 0 , index ;
1200
+
1201
+ efx_ef10_get_stat_mask (efx , mask );
1202
+
1203
+ if (full_stats ) {
1204
+ for_each_set_bit (index , mask , EF10_STAT_COUNT ) {
1205
+ if (efx_ef10_stat_desc [index ].name ) {
1206
+ * full_stats ++ = stats [index ];
1207
+ ++ stats_count ;
1208
+ }
1209
+ }
1210
+ }
1211
+
1212
+ if (core_stats ) {
1213
+ core_stats -> rx_packets = stats [EF10_STAT_port_rx_packets ];
1214
+ core_stats -> tx_packets = stats [EF10_STAT_port_tx_packets ];
1215
+ core_stats -> rx_bytes = stats [EF10_STAT_port_rx_bytes ];
1216
+ core_stats -> tx_bytes = stats [EF10_STAT_port_tx_bytes ];
1217
+ core_stats -> rx_dropped = stats [EF10_STAT_port_rx_nodesc_drops ] +
1218
+ stats [GENERIC_STAT_rx_nodesc_trunc ] +
1219
+ stats [GENERIC_STAT_rx_noskb_drops ];
1220
+ core_stats -> multicast = stats [EF10_STAT_port_rx_multicast ];
1221
+ core_stats -> rx_length_errors =
1222
+ stats [EF10_STAT_port_rx_gtjumbo ] +
1223
+ stats [EF10_STAT_port_rx_length_error ];
1224
+ core_stats -> rx_crc_errors = stats [EF10_STAT_port_rx_bad ];
1225
+ core_stats -> rx_frame_errors =
1226
+ stats [EF10_STAT_port_rx_align_error ];
1227
+ core_stats -> rx_fifo_errors = stats [EF10_STAT_port_rx_overflow ];
1228
+ core_stats -> rx_errors = (core_stats -> rx_length_errors +
1229
+ core_stats -> rx_crc_errors +
1230
+ core_stats -> rx_frame_errors );
1231
+ }
1232
+
1233
+ return stats_count ;
1234
+ }
1235
+
1236
+ static int efx_ef10_try_update_nic_stats_pf (struct efx_nic * efx )
1194
1237
{
1195
1238
struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
1196
1239
DECLARE_BITMAP (mask , EF10_STAT_COUNT );
@@ -1227,57 +1270,83 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
1227
1270
}
1228
1271
1229
1272
1230
- static size_t efx_ef10_update_stats (struct efx_nic * efx , u64 * full_stats ,
1231
- struct rtnl_link_stats64 * core_stats )
1273
+ static size_t efx_ef10_update_stats_pf (struct efx_nic * efx , u64 * full_stats ,
1274
+ struct rtnl_link_stats64 * core_stats )
1232
1275
{
1233
- DECLARE_BITMAP (mask , EF10_STAT_COUNT );
1234
- struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
1235
- u64 * stats = nic_data -> stats ;
1236
- size_t stats_count = 0 , index ;
1237
1276
int retry ;
1238
1277
1239
- efx_ef10_get_stat_mask (efx , mask );
1240
-
1241
1278
/* If we're unlucky enough to read statistics during the DMA, wait
1242
1279
* up to 10ms for it to finish (typically takes <500us)
1243
1280
*/
1244
1281
for (retry = 0 ; retry < 100 ; ++ retry ) {
1245
- if (efx_ef10_try_update_nic_stats (efx ) == 0 )
1282
+ if (efx_ef10_try_update_nic_stats_pf (efx ) == 0 )
1246
1283
break ;
1247
1284
udelay (100 );
1248
1285
}
1249
1286
1250
- if (full_stats ) {
1251
- for_each_set_bit (index , mask , EF10_STAT_COUNT ) {
1252
- if (efx_ef10_stat_desc [index ].name ) {
1253
- * full_stats ++ = stats [index ];
1254
- ++ stats_count ;
1255
- }
1256
- }
1257
- }
1287
+ return efx_ef10_update_stats_common (efx , full_stats , core_stats );
1288
+ }
1258
1289
1259
- if (core_stats ) {
1260
- core_stats -> rx_packets = stats [EF10_STAT_port_rx_packets ];
1261
- core_stats -> tx_packets = stats [EF10_STAT_port_tx_packets ];
1262
- core_stats -> rx_bytes = stats [EF10_STAT_port_rx_bytes ];
1263
- core_stats -> tx_bytes = stats [EF10_STAT_port_tx_bytes ];
1264
- core_stats -> rx_dropped = stats [EF10_STAT_port_rx_nodesc_drops ] +
1265
- stats [GENERIC_STAT_rx_nodesc_trunc ] +
1266
- stats [GENERIC_STAT_rx_noskb_drops ];
1267
- core_stats -> multicast = stats [EF10_STAT_port_rx_multicast ];
1268
- core_stats -> rx_length_errors =
1269
- stats [EF10_STAT_port_rx_gtjumbo ] +
1270
- stats [EF10_STAT_port_rx_length_error ];
1271
- core_stats -> rx_crc_errors = stats [EF10_STAT_port_rx_bad ];
1272
- core_stats -> rx_frame_errors =
1273
- stats [EF10_STAT_port_rx_align_error ];
1274
- core_stats -> rx_fifo_errors = stats [EF10_STAT_port_rx_overflow ];
1275
- core_stats -> rx_errors = (core_stats -> rx_length_errors +
1276
- core_stats -> rx_crc_errors +
1277
- core_stats -> rx_frame_errors );
1290
+ static int efx_ef10_try_update_nic_stats_vf (struct efx_nic * efx )
1291
+ {
1292
+ MCDI_DECLARE_BUF (inbuf , MC_CMD_MAC_STATS_IN_LEN );
1293
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
1294
+ DECLARE_BITMAP (mask , EF10_STAT_COUNT );
1295
+ __le64 generation_start , generation_end ;
1296
+ u64 * stats = nic_data -> stats ;
1297
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof (u64 );
1298
+ struct efx_buffer stats_buf ;
1299
+ __le64 * dma_stats ;
1300
+ int rc ;
1301
+
1302
+ efx_ef10_get_stat_mask (efx , mask );
1303
+
1304
+ rc = efx_nic_alloc_buffer (efx , & stats_buf , dma_len , GFP_ATOMIC );
1305
+ if (rc )
1306
+ return rc ;
1307
+
1308
+ dma_stats = stats_buf .addr ;
1309
+ dma_stats [MC_CMD_MAC_GENERATION_END ] = EFX_MC_STATS_GENERATION_INVALID ;
1310
+
1311
+ MCDI_SET_QWORD (inbuf , MAC_STATS_IN_DMA_ADDR , stats_buf .dma_addr );
1312
+ MCDI_POPULATE_DWORD_1 (inbuf , MAC_STATS_IN_CMD ,
1313
+ MAC_STATS_IN_DMA , true);
1314
+ MCDI_SET_DWORD (inbuf , MAC_STATS_IN_DMA_LEN , dma_len );
1315
+ MCDI_SET_DWORD (inbuf , MAC_STATS_IN_PORT_ID , EVB_PORT_ID_ASSIGNED );
1316
+
1317
+ spin_unlock_bh (& efx -> stats_lock );
1318
+ rc = efx_mcdi_rpc (efx , MC_CMD_MAC_STATS , inbuf , sizeof (inbuf ), NULL ,
1319
+ 0 , NULL );
1320
+ spin_lock_bh (& efx -> stats_lock );
1321
+ if (rc )
1322
+ goto out ;
1323
+
1324
+ generation_end = dma_stats [MC_CMD_MAC_GENERATION_END ];
1325
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID )
1326
+ goto out ;
1327
+ rmb ();
1328
+ efx_nic_update_stats (efx_ef10_stat_desc , EF10_STAT_COUNT , mask ,
1329
+ stats , stats_buf .addr , false);
1330
+ rmb ();
1331
+ generation_start = dma_stats [MC_CMD_MAC_GENERATION_START ];
1332
+ if (generation_end != generation_start ) {
1333
+ rc = - EAGAIN ;
1334
+ goto out ;
1278
1335
}
1279
1336
1280
- return stats_count ;
1337
+ efx_update_sw_stats (efx , stats );
1338
+ out :
1339
+ efx_nic_free_buffer (efx , & stats_buf );
1340
+ return rc ;
1341
+ }
1342
+
1343
+ static size_t efx_ef10_update_stats_vf (struct efx_nic * efx , u64 * full_stats ,
1344
+ struct rtnl_link_stats64 * core_stats )
1345
+ {
1346
+ if (efx_ef10_try_update_nic_stats_vf (efx ))
1347
+ return 0 ;
1348
+
1349
+ return efx_ef10_update_stats_common (efx , full_stats , core_stats );
1281
1350
}
1282
1351
1283
1352
static void efx_ef10_push_irq_moderation (struct efx_channel * channel )
@@ -4122,7 +4191,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4122
4191
.prepare_flr = efx_ef10_prepare_flr ,
4123
4192
.finish_flr = efx_port_dummy_op_void ,
4124
4193
.describe_stats = efx_ef10_describe_stats ,
4125
- .update_stats = efx_ef10_update_stats ,
4194
+ .update_stats = efx_ef10_update_stats_vf ,
4126
4195
.start_stats = efx_port_dummy_op_void ,
4127
4196
.pull_stats = efx_port_dummy_op_void ,
4128
4197
.stop_stats = efx_port_dummy_op_void ,
@@ -4224,7 +4293,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
4224
4293
.prepare_flr = efx_ef10_prepare_flr ,
4225
4294
.finish_flr = efx_port_dummy_op_void ,
4226
4295
.describe_stats = efx_ef10_describe_stats ,
4227
- .update_stats = efx_ef10_update_stats ,
4296
+ .update_stats = efx_ef10_update_stats_pf ,
4228
4297
.start_stats = efx_mcdi_mac_start_stats ,
4229
4298
.pull_stats = efx_mcdi_mac_pull_stats ,
4230
4299
.stop_stats = efx_mcdi_mac_stop_stats ,
0 commit comments