@@ -99,11 +99,6 @@ static struct smca_bank_name smca_names[] = {
99
99
[SMCA_PCIE ] = { "pcie" , "PCI Express Unit" },
100
100
};
101
101
102
- static u32 smca_bank_addrs [MAX_NR_BANKS ][NR_BLOCKS ] __ro_after_init =
103
- {
104
- [0 ... MAX_NR_BANKS - 1 ] = { [0 ... NR_BLOCKS - 1 ] = -1 }
105
- };
106
-
107
102
static const char * smca_get_name (enum smca_bank_types t )
108
103
{
109
104
if (t >= N_SMCA_BANK_TYPES )
@@ -197,6 +192,9 @@ static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
197
192
static DEFINE_PER_CPU (struct threshold_bank * * , threshold_banks ) ;
198
193
static DEFINE_PER_CPU (unsigned int , bank_map ) ; /* see which banks are on */
199
194
195
+ /* Map of banks that have more than MCA_MISC0 available. */
196
+ static DEFINE_PER_CPU (u32 , smca_misc_banks_map ) ;
197
+
200
198
static void amd_threshold_interrupt (void );
201
199
static void amd_deferred_error_interrupt (void );
202
200
@@ -206,6 +204,28 @@ static void default_deferred_error_interrupt(void)
206
204
}
207
205
void (* deferred_error_int_vector )(void ) = default_deferred_error_interrupt ;
208
206
207
+ static void smca_set_misc_banks_map (unsigned int bank , unsigned int cpu )
208
+ {
209
+ u32 low , high ;
210
+
211
+ /*
212
+ * For SMCA enabled processors, BLKPTR field of the first MISC register
213
+ * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
214
+ */
215
+ if (rdmsr_safe (MSR_AMD64_SMCA_MCx_CONFIG (bank ), & low , & high ))
216
+ return ;
217
+
218
+ if (!(low & MCI_CONFIG_MCAX ))
219
+ return ;
220
+
221
+ if (rdmsr_safe (MSR_AMD64_SMCA_MCx_MISC (bank ), & low , & high ))
222
+ return ;
223
+
224
+ if (low & MASK_BLKPTR_LO )
225
+ per_cpu (smca_misc_banks_map , cpu ) |= BIT (bank );
226
+
227
+ }
228
+
209
229
static void smca_configure (unsigned int bank , unsigned int cpu )
210
230
{
211
231
unsigned int i , hwid_mcatype ;
@@ -243,6 +263,8 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
243
263
wrmsr (smca_config , low , high );
244
264
}
245
265
266
+ smca_set_misc_banks_map (bank , cpu );
267
+
246
268
/* Return early if this bank was already initialized. */
247
269
if (smca_banks [bank ].hwid )
248
270
return ;
@@ -453,50 +475,29 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
453
475
wrmsr (MSR_CU_DEF_ERR , low , high );
454
476
}
455
477
456
- static u32 smca_get_block_address (unsigned int bank , unsigned int block )
478
+ static u32 smca_get_block_address (unsigned int bank , unsigned int block ,
479
+ unsigned int cpu )
457
480
{
458
- u32 low , high ;
459
- u32 addr = 0 ;
460
-
461
- if (smca_get_bank_type (bank ) == SMCA_RESERVED )
462
- return addr ;
463
-
464
481
if (!block )
465
482
return MSR_AMD64_SMCA_MCx_MISC (bank );
466
483
467
- /* Check our cache first: */
468
- if (smca_bank_addrs [bank ][block ] != -1 )
469
- return smca_bank_addrs [bank ][block ];
470
-
471
- /*
472
- * For SMCA enabled processors, BLKPTR field of the first MISC register
473
- * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
474
- */
475
- if (rdmsr_safe (MSR_AMD64_SMCA_MCx_CONFIG (bank ), & low , & high ))
476
- goto out ;
477
-
478
- if (!(low & MCI_CONFIG_MCAX ))
479
- goto out ;
480
-
481
- if (!rdmsr_safe (MSR_AMD64_SMCA_MCx_MISC (bank ), & low , & high ) &&
482
- (low & MASK_BLKPTR_LO ))
483
- addr = MSR_AMD64_SMCA_MCx_MISCy (bank , block - 1 );
484
+ if (!(per_cpu (smca_misc_banks_map , cpu ) & BIT (bank )))
485
+ return 0 ;
484
486
485
- out :
486
- smca_bank_addrs [bank ][block ] = addr ;
487
- return addr ;
487
+ return MSR_AMD64_SMCA_MCx_MISCy (bank , block - 1 );
488
488
}
489
489
490
490
static u32 get_block_address (u32 current_addr , u32 low , u32 high ,
491
- unsigned int bank , unsigned int block )
491
+ unsigned int bank , unsigned int block ,
492
+ unsigned int cpu )
492
493
{
493
494
u32 addr = 0 , offset = 0 ;
494
495
495
- if ((bank >= mca_cfg . banks ) || (block >= NR_BLOCKS ))
496
+ if ((bank >= per_cpu ( mce_num_banks , cpu ) ) || (block >= NR_BLOCKS ))
496
497
return addr ;
497
498
498
499
if (mce_flags .smca )
499
- return smca_get_block_address (bank , block );
500
+ return smca_get_block_address (bank , block , cpu );
500
501
501
502
/* Fall back to method we used for older processors: */
502
503
switch (block ) {
@@ -624,18 +625,19 @@ void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
624
625
/* cpu init entry point, called from mce.c with preempt off */
625
626
void mce_amd_feature_init (struct cpuinfo_x86 * c )
626
627
{
627
- u32 low = 0 , high = 0 , address = 0 ;
628
628
unsigned int bank , block , cpu = smp_processor_id ();
629
+ u32 low = 0 , high = 0 , address = 0 ;
629
630
int offset = -1 ;
630
631
631
- for (bank = 0 ; bank < mca_cfg .banks ; ++ bank ) {
632
+
633
+ for (bank = 0 ; bank < this_cpu_read (mce_num_banks ); ++ bank ) {
632
634
if (mce_flags .smca )
633
635
smca_configure (bank , cpu );
634
636
635
637
disable_err_thresholding (c , bank );
636
638
637
639
for (block = 0 ; block < NR_BLOCKS ; ++ block ) {
638
- address = get_block_address (address , low , high , bank , block );
640
+ address = get_block_address (address , low , high , bank , block , cpu );
639
641
if (!address )
640
642
break ;
641
643
@@ -973,7 +975,7 @@ static void amd_deferred_error_interrupt(void)
973
975
{
974
976
unsigned int bank ;
975
977
976
- for (bank = 0 ; bank < mca_cfg . banks ; ++ bank )
978
+ for (bank = 0 ; bank < this_cpu_read ( mce_num_banks ) ; ++ bank )
977
979
log_error_deferred (bank );
978
980
}
979
981
@@ -1014,7 +1016,7 @@ static void amd_threshold_interrupt(void)
1014
1016
struct threshold_block * first_block = NULL , * block = NULL , * tmp = NULL ;
1015
1017
unsigned int bank , cpu = smp_processor_id ();
1016
1018
1017
- for (bank = 0 ; bank < mca_cfg . banks ; ++ bank ) {
1019
+ for (bank = 0 ; bank < this_cpu_read ( mce_num_banks ) ; ++ bank ) {
1018
1020
if (!(per_cpu (bank_map , cpu ) & (1 << bank )))
1019
1021
continue ;
1020
1022
@@ -1201,7 +1203,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
1201
1203
u32 low , high ;
1202
1204
int err ;
1203
1205
1204
- if ((bank >= mca_cfg . banks ) || (block >= NR_BLOCKS ))
1206
+ if ((bank >= per_cpu ( mce_num_banks , cpu ) ) || (block >= NR_BLOCKS ))
1205
1207
return 0 ;
1206
1208
1207
1209
if (rdmsr_safe_on_cpu (cpu , address , & low , & high ))
@@ -1252,7 +1254,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
1252
1254
if (err )
1253
1255
goto out_free ;
1254
1256
recurse :
1255
- address = get_block_address (address , low , high , bank , ++ block );
1257
+ address = get_block_address (address , low , high , bank , ++ block , cpu );
1256
1258
if (!address )
1257
1259
return 0 ;
1258
1260
@@ -1435,7 +1437,7 @@ int mce_threshold_remove_device(unsigned int cpu)
1435
1437
{
1436
1438
unsigned int bank ;
1437
1439
1438
- for (bank = 0 ; bank < mca_cfg . banks ; ++ bank ) {
1440
+ for (bank = 0 ; bank < per_cpu ( mce_num_banks , cpu ) ; ++ bank ) {
1439
1441
if (!(per_cpu (bank_map , cpu ) & (1 << bank )))
1440
1442
continue ;
1441
1443
threshold_remove_bank (cpu , bank );
@@ -1456,14 +1458,14 @@ int mce_threshold_create_device(unsigned int cpu)
1456
1458
if (bp )
1457
1459
return 0 ;
1458
1460
1459
- bp = kcalloc (mca_cfg . banks , sizeof (struct threshold_bank * ),
1461
+ bp = kcalloc (per_cpu ( mce_num_banks , cpu ) , sizeof (struct threshold_bank * ),
1460
1462
GFP_KERNEL );
1461
1463
if (!bp )
1462
1464
return - ENOMEM ;
1463
1465
1464
1466
per_cpu (threshold_banks , cpu ) = bp ;
1465
1467
1466
- for (bank = 0 ; bank < mca_cfg . banks ; ++ bank ) {
1468
+ for (bank = 0 ; bank < per_cpu ( mce_num_banks , cpu ) ; ++ bank ) {
1467
1469
if (!(per_cpu (bank_map , cpu ) & (1 << bank )))
1468
1470
continue ;
1469
1471
err = threshold_create_bank (cpu , bank );
0 commit comments