@@ -1736,10 +1736,11 @@ EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1736
1736
1737
1737
int cxgb4_read_tpte (struct net_device * dev , u32 stag , __be32 * tpte )
1738
1738
{
1739
- struct adapter * adap ;
1740
- u32 offset , memtype , memaddr ;
1741
1739
u32 edc0_size , edc1_size , mc0_size , mc1_size , size ;
1742
1740
u32 edc0_end , edc1_end , mc0_end , mc1_end ;
1741
+ u32 offset , memtype , memaddr ;
1742
+ struct adapter * adap ;
1743
+ u32 hma_size = 0 ;
1743
1744
int ret ;
1744
1745
1745
1746
adap = netdev2adap (dev );
@@ -1759,6 +1760,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1759
1760
size = t4_read_reg (adap , MA_EXT_MEMORY0_BAR_A );
1760
1761
mc0_size = EXT_MEM0_SIZE_G (size ) << 20 ;
1761
1762
1763
+ if (t4_read_reg (adap , MA_TARGET_MEM_ENABLE_A ) & HMA_MUX_F ) {
1764
+ size = t4_read_reg (adap , MA_EXT_MEMORY1_BAR_A );
1765
+ hma_size = EXT_MEM1_SIZE_G (size ) << 20 ;
1766
+ }
1762
1767
edc0_end = edc0_size ;
1763
1768
edc1_end = edc0_end + edc1_size ;
1764
1769
mc0_end = edc1_end + mc0_size ;
@@ -1770,7 +1775,10 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1770
1775
memtype = MEM_EDC1 ;
1771
1776
memaddr = offset - edc0_end ;
1772
1777
} else {
1773
- if (offset < mc0_end ) {
1778
+ if (hma_size && (offset < (edc1_end + hma_size ))) {
1779
+ memtype = MEM_HMA ;
1780
+ memaddr = offset - edc1_end ;
1781
+ } else if (offset < mc0_end ) {
1774
1782
memtype = MEM_MC0 ;
1775
1783
memaddr = offset - edc1_end ;
1776
1784
} else if (is_t5 (adap -> params .chip )) {
@@ -3301,6 +3309,206 @@ static void setup_memwin_rdma(struct adapter *adap)
3301
3309
}
3302
3310
}
3303
3311
3312
+ /* HMA Definitions */
3313
+
3314
+ /* The maximum number of address that can be send in a single FW cmd */
3315
+ #define HMA_MAX_ADDR_IN_CMD 5
3316
+
3317
+ #define HMA_PAGE_SIZE PAGE_SIZE
3318
+
3319
+ #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3320
+
3321
+ #define HMA_PAGE_ORDER \
3322
+ ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3323
+ ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3324
+
3325
+ /* The minimum and maximum possible HMA sizes that can be specified in the FW
3326
+ * configuration(in units of MB).
3327
+ */
3328
+ #define HMA_MIN_TOTAL_SIZE 1
3329
+ #define HMA_MAX_TOTAL_SIZE \
3330
+ (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3331
+ HMA_MAX_NO_FW_ADDRESS) >> 20)
3332
+
3333
+ static void adap_free_hma_mem (struct adapter * adapter )
3334
+ {
3335
+ struct scatterlist * iter ;
3336
+ struct page * page ;
3337
+ int i ;
3338
+
3339
+ if (!adapter -> hma .sgt )
3340
+ return ;
3341
+
3342
+ if (adapter -> hma .flags & HMA_DMA_MAPPED_FLAG ) {
3343
+ dma_unmap_sg (adapter -> pdev_dev , adapter -> hma .sgt -> sgl ,
3344
+ adapter -> hma .sgt -> nents , PCI_DMA_BIDIRECTIONAL );
3345
+ adapter -> hma .flags &= ~HMA_DMA_MAPPED_FLAG ;
3346
+ }
3347
+
3348
+ for_each_sg (adapter -> hma .sgt -> sgl , iter ,
3349
+ adapter -> hma .sgt -> orig_nents , i ) {
3350
+ page = sg_page (iter );
3351
+ if (page )
3352
+ __free_pages (page , HMA_PAGE_ORDER );
3353
+ }
3354
+
3355
+ kfree (adapter -> hma .phy_addr );
3356
+ sg_free_table (adapter -> hma .sgt );
3357
+ kfree (adapter -> hma .sgt );
3358
+ adapter -> hma .sgt = NULL ;
3359
+ }
3360
+
3361
+ static int adap_config_hma (struct adapter * adapter )
3362
+ {
3363
+ struct scatterlist * sgl , * iter ;
3364
+ struct sg_table * sgt ;
3365
+ struct page * newpage ;
3366
+ unsigned int i , j , k ;
3367
+ u32 param , hma_size ;
3368
+ unsigned int ncmds ;
3369
+ size_t page_size ;
3370
+ u32 page_order ;
3371
+ int node , ret ;
3372
+
3373
+ /* HMA is supported only for T6+ cards.
3374
+ * Avoid initializing HMA in kdump kernels.
3375
+ */
3376
+ if (is_kdump_kernel () ||
3377
+ CHELSIO_CHIP_VERSION (adapter -> params .chip ) < CHELSIO_T6 )
3378
+ return 0 ;
3379
+
3380
+ /* Get the HMA region size required by fw */
3381
+ param = (FW_PARAMS_MNEM_V (FW_PARAMS_MNEM_DEV ) |
3382
+ FW_PARAMS_PARAM_X_V (FW_PARAMS_PARAM_DEV_HMA_SIZE ));
3383
+ ret = t4_query_params (adapter , adapter -> mbox , adapter -> pf , 0 ,
3384
+ 1 , & param , & hma_size );
3385
+ /* An error means card has its own memory or HMA is not supported by
3386
+ * the firmware. Return without any errors.
3387
+ */
3388
+ if (ret || !hma_size )
3389
+ return 0 ;
3390
+
3391
+ if (hma_size < HMA_MIN_TOTAL_SIZE ||
3392
+ hma_size > HMA_MAX_TOTAL_SIZE ) {
3393
+ dev_err (adapter -> pdev_dev ,
3394
+ "HMA size %uMB beyond bounds(%u-%lu)MB\n" ,
3395
+ hma_size , HMA_MIN_TOTAL_SIZE , HMA_MAX_TOTAL_SIZE );
3396
+ return - EINVAL ;
3397
+ }
3398
+
3399
+ page_size = HMA_PAGE_SIZE ;
3400
+ page_order = HMA_PAGE_ORDER ;
3401
+ adapter -> hma .sgt = kzalloc (sizeof (* adapter -> hma .sgt ), GFP_KERNEL );
3402
+ if (unlikely (!adapter -> hma .sgt )) {
3403
+ dev_err (adapter -> pdev_dev , "HMA SG table allocation failed\n" );
3404
+ return - ENOMEM ;
3405
+ }
3406
+ sgt = adapter -> hma .sgt ;
3407
+ /* FW returned value will be in MB's
3408
+ */
3409
+ sgt -> orig_nents = (hma_size << 20 ) / (page_size << page_order );
3410
+ if (sg_alloc_table (sgt , sgt -> orig_nents , GFP_KERNEL )) {
3411
+ dev_err (adapter -> pdev_dev , "HMA SGL allocation failed\n" );
3412
+ kfree (adapter -> hma .sgt );
3413
+ adapter -> hma .sgt = NULL ;
3414
+ return - ENOMEM ;
3415
+ }
3416
+
3417
+ sgl = adapter -> hma .sgt -> sgl ;
3418
+ node = dev_to_node (adapter -> pdev_dev );
3419
+ for_each_sg (sgl , iter , sgt -> orig_nents , i ) {
3420
+ newpage = alloc_pages_node (node , __GFP_NOWARN | GFP_KERNEL ,
3421
+ page_order );
3422
+ if (!newpage ) {
3423
+ dev_err (adapter -> pdev_dev ,
3424
+ "Not enough memory for HMA page allocation\n" );
3425
+ ret = - ENOMEM ;
3426
+ goto free_hma ;
3427
+ }
3428
+ sg_set_page (iter , newpage , page_size << page_order , 0 );
3429
+ }
3430
+
3431
+ sgt -> nents = dma_map_sg (adapter -> pdev_dev , sgl , sgt -> orig_nents ,
3432
+ DMA_BIDIRECTIONAL );
3433
+ if (!sgt -> nents ) {
3434
+ dev_err (adapter -> pdev_dev ,
3435
+ "Not enough memory for HMA DMA mapping" );
3436
+ ret = - ENOMEM ;
3437
+ goto free_hma ;
3438
+ }
3439
+ adapter -> hma .flags |= HMA_DMA_MAPPED_FLAG ;
3440
+
3441
+ adapter -> hma .phy_addr = kcalloc (sgt -> nents , sizeof (dma_addr_t ),
3442
+ GFP_KERNEL );
3443
+ if (unlikely (!adapter -> hma .phy_addr ))
3444
+ goto free_hma ;
3445
+
3446
+ for_each_sg (sgl , iter , sgt -> nents , i ) {
3447
+ newpage = sg_page (iter );
3448
+ adapter -> hma .phy_addr [i ] = sg_dma_address (iter );
3449
+ }
3450
+
3451
+ ncmds = DIV_ROUND_UP (sgt -> nents , HMA_MAX_ADDR_IN_CMD );
3452
+ /* Pass on the addresses to firmware */
3453
+ for (i = 0 , k = 0 ; i < ncmds ; i ++ , k += HMA_MAX_ADDR_IN_CMD ) {
3454
+ struct fw_hma_cmd hma_cmd ;
3455
+ u8 naddr = HMA_MAX_ADDR_IN_CMD ;
3456
+ u8 soc = 0 , eoc = 0 ;
3457
+ u8 hma_mode = 1 ; /* Presently we support only Page table mode */
3458
+
3459
+ soc = (i == 0 ) ? 1 : 0 ;
3460
+ eoc = (i == ncmds - 1 ) ? 1 : 0 ;
3461
+
3462
+ /* For last cmd, set naddr corresponding to remaining
3463
+ * addresses
3464
+ */
3465
+ if (i == ncmds - 1 ) {
3466
+ naddr = sgt -> nents % HMA_MAX_ADDR_IN_CMD ;
3467
+ naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD ;
3468
+ }
3469
+ memset (& hma_cmd , 0 , sizeof (hma_cmd ));
3470
+ hma_cmd .op_pkd = htonl (FW_CMD_OP_V (FW_HMA_CMD ) |
3471
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F );
3472
+ hma_cmd .retval_len16 = htonl (FW_LEN16 (hma_cmd ));
3473
+
3474
+ hma_cmd .mode_to_pcie_params =
3475
+ htonl (FW_HMA_CMD_MODE_V (hma_mode ) |
3476
+ FW_HMA_CMD_SOC_V (soc ) | FW_HMA_CMD_EOC_V (eoc ));
3477
+
3478
+ /* HMA cmd size specified in MB's */
3479
+ hma_cmd .naddr_size =
3480
+ htonl (FW_HMA_CMD_SIZE_V (hma_size ) |
3481
+ FW_HMA_CMD_NADDR_V (naddr ));
3482
+
3483
+ /* Total Page size specified in units of 4K */
3484
+ hma_cmd .addr_size_pkd =
3485
+ htonl (FW_HMA_CMD_ADDR_SIZE_V
3486
+ ((page_size << page_order ) >> 12 ));
3487
+
3488
+ /* Fill the 5 addresses */
3489
+ for (j = 0 ; j < naddr ; j ++ ) {
3490
+ hma_cmd .phy_address [j ] =
3491
+ cpu_to_be64 (adapter -> hma .phy_addr [j + k ]);
3492
+ }
3493
+ ret = t4_wr_mbox (adapter , adapter -> mbox , & hma_cmd ,
3494
+ sizeof (hma_cmd ), & hma_cmd );
3495
+ if (ret ) {
3496
+ dev_err (adapter -> pdev_dev ,
3497
+ "HMA FW command failed with err %d\n" , ret );
3498
+ goto free_hma ;
3499
+ }
3500
+ }
3501
+
3502
+ if (!ret )
3503
+ dev_info (adapter -> pdev_dev ,
3504
+ "Reserved %uMB host memory for HMA\n" , hma_size );
3505
+ return ret ;
3506
+
3507
+ free_hma :
3508
+ adap_free_hma_mem (adapter );
3509
+ return ret ;
3510
+ }
3511
+
3304
3512
static int adap_init1 (struct adapter * adap , struct fw_caps_config_cmd * c )
3305
3513
{
3306
3514
u32 v ;
@@ -3754,6 +3962,12 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3754
3962
if (ret < 0 )
3755
3963
goto bye ;
3756
3964
3965
+ /* We will proceed even if HMA init fails. */
3966
+ ret = adap_config_hma (adapter );
3967
+ if (ret )
3968
+ dev_err (adapter -> pdev_dev ,
3969
+ "HMA configuration failed with error %d\n" , ret );
3970
+
3757
3971
/*
3758
3972
* And finally tell the firmware to initialize itself using the
3759
3973
* parameters from the Configuration File.
@@ -3960,6 +4174,11 @@ static int adap_init0(struct adapter *adap)
3960
4174
* effect. Otherwise, it's time to try initializing the adapter.
3961
4175
*/
3962
4176
if (state == DEV_STATE_INIT ) {
4177
+ ret = adap_config_hma (adap );
4178
+ if (ret )
4179
+ dev_err (adap -> pdev_dev ,
4180
+ "HMA configuration failed with error %d\n" ,
4181
+ ret );
3963
4182
dev_info (adap -> pdev_dev , "Coming up as %s: " \
3964
4183
"Adapter already initialized\n" ,
3965
4184
adap -> flags & MASTER_PF ? "MASTER" : "SLAVE" );
@@ -4349,6 +4568,7 @@ static int adap_init0(struct adapter *adap)
4349
4568
* happened to HW/FW, stop issuing commands.
4350
4569
*/
4351
4570
bye :
4571
+ adap_free_hma_mem (adap );
4352
4572
kfree (adap -> sge .egr_map );
4353
4573
kfree (adap -> sge .ingr_map );
4354
4574
kfree (adap -> sge .starving_fl );
@@ -5576,6 +5796,8 @@ static void remove_one(struct pci_dev *pdev)
5576
5796
t4_uld_clean_up (adapter );
5577
5797
}
5578
5798
5799
+ adap_free_hma_mem (adapter );
5800
+
5579
5801
disable_interrupts (adapter );
5580
5802
5581
5803
for_each_port (adapter , i )
0 commit comments