Skip to content

Commit c29efea

Browse files
akiyanodavem330
authored andcommitted
net: ena: move llq configuration from ena_probe to ena_device_init()
When the ENA device resets to recover from some error state, all LLQ configuration values are reset to their defaults, because LLQ is initialized only once during ena_probe(). Changes in this commit: 1. Move the LLQ configuration process into ena_init_device() which is called from both ena_probe() and ena_restore_device(). This way, LLQ setup configurations that are different from the default values will survive resets. 2. Extract the LLQ bar mapping to ena_map_llq_bar(), and call once in the lifetime of the driver from ena_probe(), since there is no need to unmap and map the LLQ bar again every reset. 3. Map the LLQ bar if it exists, regardless if initialization of LLQ placement policy (ENA_ADMIN_PLACEMENT_POLICY_DEV) succeeded or not. Initialization might fail the first time, falling back to the ENA_ADMIN_PLACEMENT_POLICY_HOST placement policy, but later succeed after device reset, in which case the LLQ bar needs to be mapped already. Signed-off-by: Sameeh Jubran <[email protected]> Signed-off-by: Arthur Kiyanovski <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 0ee60ed commit c29efea

File tree

1 file changed

+73
-63
lines changed

1 file changed

+73
-63
lines changed

drivers/net/ethernet/amazon/ena/ena_netdev.c

Lines changed: 73 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -3280,10 +3280,71 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
32803280
return 0;
32813281
}
32823282

3283+
static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3284+
{
3285+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3286+
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3287+
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3288+
llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3289+
llq_config->llq_ring_entry_size_value = 128;
3290+
}
3291+
3292+
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3293+
struct ena_com_dev *ena_dev,
3294+
struct ena_admin_feature_llq_desc *llq,
3295+
struct ena_llq_configurations *llq_default_configurations)
3296+
{
3297+
int rc;
3298+
u32 llq_feature_mask;
3299+
3300+
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3301+
if (!(ena_dev->supported_features & llq_feature_mask)) {
3302+
dev_err(&pdev->dev,
3303+
"LLQ is not supported Fallback to host mode policy.\n");
3304+
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3305+
return 0;
3306+
}
3307+
3308+
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3309+
if (unlikely(rc)) {
3310+
dev_err(&pdev->dev,
3311+
"Failed to configure the device mode. Fallback to host mode policy.\n");
3312+
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3313+
}
3314+
3315+
return 0;
3316+
}
3317+
3318+
static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3319+
int bars)
3320+
{
3321+
bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3322+
3323+
if (!has_mem_bar) {
3324+
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3325+
dev_err(&pdev->dev,
3326+
"ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3327+
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3328+
}
3329+
3330+
return 0;
3331+
}
3332+
3333+
ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3334+
pci_resource_start(pdev, ENA_MEM_BAR),
3335+
pci_resource_len(pdev, ENA_MEM_BAR));
3336+
3337+
if (!ena_dev->mem_bar)
3338+
return -EFAULT;
3339+
3340+
return 0;
3341+
}
3342+
32833343
static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
32843344
struct ena_com_dev_get_features_ctx *get_feat_ctx,
32853345
bool *wd_state)
32863346
{
3347+
struct ena_llq_configurations llq_config;
32873348
struct device *dev = &pdev->dev;
32883349
bool readless_supported;
32893350
u32 aenq_groups;
@@ -3374,6 +3435,15 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
33743435

33753436
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
33763437

3438+
set_default_llq_configurations(&llq_config);
3439+
3440+
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3441+
&llq_config);
3442+
if (rc) {
3443+
dev_err(&pdev->dev, "ena device init failed\n");
3444+
goto err_admin_init;
3445+
}
3446+
33773447
return 0;
33783448

33793449
err_admin_init:
@@ -3880,54 +3950,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
38803950
return max_num_io_queues;
38813951
}
38823952

3883-
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3884-
struct ena_com_dev *ena_dev,
3885-
struct ena_admin_feature_llq_desc *llq,
3886-
struct ena_llq_configurations *llq_default_configurations)
3887-
{
3888-
bool has_mem_bar;
3889-
int rc;
3890-
u32 llq_feature_mask;
3891-
3892-
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3893-
if (!(ena_dev->supported_features & llq_feature_mask)) {
3894-
dev_err(&pdev->dev,
3895-
"LLQ is not supported Fallback to host mode policy.\n");
3896-
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3897-
return 0;
3898-
}
3899-
3900-
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3901-
3902-
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3903-
if (unlikely(rc)) {
3904-
dev_err(&pdev->dev,
3905-
"Failed to configure the device mode. Fallback to host mode policy.\n");
3906-
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3907-
return 0;
3908-
}
3909-
3910-
/* Nothing to config, exit */
3911-
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3912-
return 0;
3913-
3914-
if (!has_mem_bar) {
3915-
dev_err(&pdev->dev,
3916-
"ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3917-
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3918-
return 0;
3919-
}
3920-
3921-
ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3922-
pci_resource_start(pdev, ENA_MEM_BAR),
3923-
pci_resource_len(pdev, ENA_MEM_BAR));
3924-
3925-
if (!ena_dev->mem_bar)
3926-
return -EFAULT;
3927-
3928-
return 0;
3929-
}
3930-
39313953
static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
39323954
struct net_device *netdev)
39333955
{
@@ -4043,14 +4065,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
40434065
pci_release_selected_regions(pdev, release_bars);
40444066
}
40454067

4046-
static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
4047-
{
4048-
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
4049-
llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
4050-
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
4051-
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
4052-
llq_config->llq_ring_entry_size_value = 128;
4053-
}
40544068

40554069
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
40564070
{
@@ -4132,7 +4146,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41324146
{
41334147
struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
41344148
struct ena_com_dev_get_features_ctx get_feat_ctx;
4135-
struct ena_llq_configurations llq_config;
41364149
struct ena_com_dev *ena_dev = NULL;
41374150
struct ena_adapter *adapter;
41384151
struct net_device *netdev;
@@ -4187,13 +4200,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41874200
goto err_free_region;
41884201
}
41894202

4190-
set_default_llq_configurations(&llq_config);
4191-
4192-
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
4193-
&llq_config);
4203+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
41944204
if (rc) {
4195-
dev_err(&pdev->dev, "ena device init failed\n");
4196-
goto err_device_destroy;
4205+
dev_err(&pdev->dev, "ena llq bar mapping failed\n");
4206+
goto err_free_ena_dev;
41974207
}
41984208

41994209
calc_queue_ctx.ena_dev = ena_dev;

0 commit comments

Comments
 (0)