@@ -44,108 +44,132 @@ bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
44
44
return !!sriov -> num_vfs ;
45
45
}
46
46
47
- static void enable_vfs (struct mlx5_core_dev * dev , int num_vfs )
47
+ static int mlx5_device_enable_sriov (struct mlx5_core_dev * dev , int num_vfs )
48
48
{
49
49
struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
50
50
int err ;
51
51
int vf ;
52
52
53
- for (vf = 1 ; vf <= num_vfs ; vf ++ ) {
54
- err = mlx5_core_enable_hca (dev , vf );
53
+ if (sriov -> enabled_vfs ) {
54
+ mlx5_core_warn (dev ,
55
+ "failed to enable SRIOV on device, already enabled with %d vfs\n" ,
56
+ sriov -> enabled_vfs );
57
+ return - EBUSY ;
58
+ }
59
+
60
+ #ifdef CONFIG_MLX5_CORE_EN
61
+ err = mlx5_eswitch_enable_sriov (dev -> priv .eswitch , num_vfs , SRIOV_LEGACY );
62
+ if (err ) {
63
+ mlx5_core_warn (dev ,
64
+ "failed to enable eswitch SRIOV (%d)\n" , err );
65
+ return err ;
66
+ }
67
+ #endif
68
+
69
+ for (vf = 0 ; vf < num_vfs ; vf ++ ) {
70
+ err = mlx5_core_enable_hca (dev , vf + 1 );
55
71
if (err ) {
56
- mlx5_core_warn (dev , "failed to enable VF %d\n" , vf - 1 );
57
- } else {
58
- sriov -> vfs_ctx [vf - 1 ].enabled = 1 ;
59
- mlx5_core_dbg (dev , "successfully enabled VF %d\n" , vf - 1 );
72
+ mlx5_core_warn (dev , "failed to enable VF %d (%d)\n" , vf , err );
73
+ continue ;
60
74
}
75
+ sriov -> vfs_ctx [vf ].enabled = 1 ;
76
+ sriov -> enabled_vfs ++ ;
77
+ mlx5_core_dbg (dev , "successfully enabled VF* %d\n" , vf );
78
+
61
79
}
80
+
81
+ return 0 ;
62
82
}
63
83
64
- static void disable_vfs (struct mlx5_core_dev * dev , int num_vfs )
84
+ static void mlx5_device_disable_sriov (struct mlx5_core_dev * dev )
65
85
{
66
86
struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
87
+ int err ;
67
88
int vf ;
68
89
69
- for (vf = 1 ; vf <= num_vfs ; vf ++ ) {
70
- if (sriov -> vfs_ctx [vf - 1 ].enabled ) {
71
- if (mlx5_core_disable_hca (dev , vf ))
72
- mlx5_core_warn (dev , "failed to disable VF %d\n" , vf - 1 );
73
- else
74
- sriov -> vfs_ctx [vf - 1 ].enabled = 0 ;
90
+ if (!sriov -> enabled_vfs )
91
+ return ;
92
+
93
+ for (vf = 0 ; vf < sriov -> num_vfs ; vf ++ ) {
94
+ if (!sriov -> vfs_ctx [vf ].enabled )
95
+ continue ;
96
+ err = mlx5_core_disable_hca (dev , vf + 1 );
97
+ if (err ) {
98
+ mlx5_core_warn (dev , "failed to disable VF %d\n" , vf );
99
+ continue ;
75
100
}
101
+ sriov -> vfs_ctx [vf ].enabled = 0 ;
102
+ sriov -> enabled_vfs -- ;
76
103
}
104
+
105
+ #ifdef CONFIG_MLX5_CORE_EN
106
+ mlx5_eswitch_disable_sriov (dev -> priv .eswitch );
107
+ #endif
108
+
109
+ if (mlx5_wait_for_vf_pages (dev ))
110
+ mlx5_core_warn (dev , "timeout reclaiming VFs pages\n" );
77
111
}
78
112
79
- static int mlx5_core_create_vfs (struct pci_dev * pdev , int num_vfs )
113
+ static int mlx5_pci_enable_sriov (struct pci_dev * pdev , int num_vfs )
80
114
{
81
115
struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
82
- int err ;
83
-
84
- if (pci_num_vf (pdev ))
85
- pci_disable_sriov (pdev );
86
-
87
- enable_vfs (dev , num_vfs );
116
+ int err = 0 ;
88
117
89
- err = pci_enable_sriov (pdev , num_vfs );
90
- if (err ) {
91
- dev_warn (& pdev -> dev , "enable sriov failed %d\n" , err );
92
- goto ex ;
118
+ if (pci_num_vf (pdev )) {
119
+ mlx5_core_warn (dev , "Unable to enable pci sriov, already enabled\n" );
120
+ return - EBUSY ;
93
121
}
94
122
95
- return 0 ;
123
+ err = pci_enable_sriov (pdev , num_vfs );
124
+ if (err )
125
+ mlx5_core_warn (dev , "pci_enable_sriov failed : %d\n" , err );
96
126
97
- ex :
98
- disable_vfs (dev , num_vfs );
99
127
return err ;
100
128
}
101
129
102
- static int mlx5_core_sriov_enable (struct pci_dev * pdev , int num_vfs )
130
+ static void mlx5_pci_disable_sriov (struct pci_dev * pdev )
131
+ {
132
+ pci_disable_sriov (pdev );
133
+ }
134
+
135
+ static int mlx5_sriov_enable (struct pci_dev * pdev , int num_vfs )
103
136
{
104
137
struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
105
138
struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
106
- int err ;
139
+ int err = 0 ;
107
140
108
- kfree (sriov -> vfs_ctx );
109
- sriov -> vfs_ctx = kcalloc (num_vfs , sizeof (* sriov -> vfs_ctx ), GFP_ATOMIC );
110
- if (!sriov -> vfs_ctx )
111
- return - ENOMEM ;
141
+ err = mlx5_device_enable_sriov (dev , num_vfs );
142
+ if (err ) {
143
+ mlx5_core_warn (dev , "mlx5_device_enable_sriov failed : %d\n" , err );
144
+ return err ;
145
+ }
112
146
113
- sriov -> enabled_vfs = num_vfs ;
114
- err = mlx5_core_create_vfs (pdev , num_vfs );
147
+ err = mlx5_pci_enable_sriov (pdev , num_vfs );
115
148
if (err ) {
116
- kfree ( sriov -> vfs_ctx );
117
- sriov -> vfs_ctx = NULL ;
149
+ mlx5_core_warn ( dev , "mlx5_pci_enable_sriov failed : %d\n" , err );
150
+ mlx5_device_disable_sriov ( dev ) ;
118
151
return err ;
119
152
}
120
153
154
+ sriov -> num_vfs = num_vfs ;
155
+
121
156
return 0 ;
122
157
}
123
158
124
- static void mlx5_core_init_vfs (struct mlx5_core_dev * dev , int num_vfs )
159
+ static void mlx5_sriov_disable (struct pci_dev * pdev )
125
160
{
161
+ struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
126
162
struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
127
163
128
- sriov -> num_vfs = num_vfs ;
129
- }
130
-
131
- static void mlx5_core_cleanup_vfs (struct mlx5_core_dev * dev )
132
- {
133
- struct mlx5_core_sriov * sriov ;
134
-
135
- sriov = & dev -> priv .sriov ;
136
- disable_vfs (dev , sriov -> num_vfs );
137
-
138
- if (mlx5_wait_for_vf_pages (dev ))
139
- mlx5_core_warn (dev , "timeout claiming VFs pages\n" );
140
-
164
+ mlx5_pci_disable_sriov (pdev );
165
+ mlx5_device_disable_sriov (dev );
141
166
sriov -> num_vfs = 0 ;
142
167
}
143
168
144
169
int mlx5_core_sriov_configure (struct pci_dev * pdev , int num_vfs )
145
170
{
146
171
struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
147
- struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
148
- int err ;
172
+ int err = 0 ;
149
173
150
174
mlx5_core_dbg (dev , "requested num_vfs %d\n" , num_vfs );
151
175
if (!mlx5_core_is_pf (dev ))
@@ -156,92 +180,44 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
156
180
return - EINVAL ;
157
181
}
158
182
159
- mlx5_core_cleanup_vfs (dev );
160
-
161
- if (!num_vfs ) {
162
- #ifdef CONFIG_MLX5_CORE_EN
163
- mlx5_eswitch_disable_sriov (dev -> priv .eswitch );
164
- #endif
165
- kfree (sriov -> vfs_ctx );
166
- sriov -> vfs_ctx = NULL ;
167
- if (!pci_vfs_assigned (pdev ))
168
- pci_disable_sriov (pdev );
169
- else
170
- mlx5_core_info (dev , "unloading PF driver while leaving orphan VFs\n" );
171
- return 0 ;
172
- }
173
-
174
- err = mlx5_core_sriov_enable (pdev , num_vfs );
175
- if (err ) {
176
- mlx5_core_warn (dev , "mlx5_core_sriov_enable failed %d\n" , err );
177
- return err ;
178
- }
183
+ if (num_vfs )
184
+ err = mlx5_sriov_enable (pdev , num_vfs );
185
+ else
186
+ mlx5_sriov_disable (pdev );
179
187
180
- mlx5_core_init_vfs (dev , num_vfs );
181
- #ifdef CONFIG_MLX5_CORE_EN
182
- mlx5_eswitch_enable_sriov (dev -> priv .eswitch , num_vfs , SRIOV_LEGACY );
183
- #endif
184
-
185
- return num_vfs ;
186
- }
187
-
188
- static int sync_required (struct pci_dev * pdev )
189
- {
190
- struct mlx5_core_dev * dev = pci_get_drvdata (pdev );
191
- struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
192
- int cur_vfs = pci_num_vf (pdev );
193
-
194
- if (cur_vfs != sriov -> num_vfs ) {
195
- mlx5_core_warn (dev , "current VFs %d, registered %d - sync needed\n" ,
196
- cur_vfs , sriov -> num_vfs );
197
- return 1 ;
198
- }
199
-
200
- return 0 ;
188
+ return err ? err : num_vfs ;
201
189
}
202
190
203
191
int mlx5_sriov_init (struct mlx5_core_dev * dev )
204
192
{
205
193
struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
206
194
struct pci_dev * pdev = dev -> pdev ;
207
- int cur_vfs ;
195
+ int total_vfs ;
208
196
209
197
if (!mlx5_core_is_pf (dev ))
210
198
return 0 ;
211
199
212
- if (!sync_required (dev -> pdev ))
213
- return 0 ;
214
-
215
- cur_vfs = pci_num_vf (pdev );
216
- sriov -> vfs_ctx = kcalloc (cur_vfs , sizeof (* sriov -> vfs_ctx ), GFP_KERNEL );
200
+ total_vfs = pci_sriov_get_totalvfs (pdev );
201
+ sriov -> num_vfs = pci_num_vf (pdev );
202
+ sriov -> vfs_ctx = kcalloc (total_vfs , sizeof (* sriov -> vfs_ctx ), GFP_KERNEL );
217
203
if (!sriov -> vfs_ctx )
218
204
return - ENOMEM ;
219
205
220
- sriov -> enabled_vfs = cur_vfs ;
221
-
222
- mlx5_core_init_vfs (dev , cur_vfs );
223
- #ifdef CONFIG_MLX5_CORE_EN
224
- if (cur_vfs )
225
- mlx5_eswitch_enable_sriov (dev -> priv .eswitch , cur_vfs ,
226
- SRIOV_LEGACY );
227
- #endif
228
-
229
- enable_vfs (dev , cur_vfs );
206
+ /* If sriov VFs exist in PCI level, enable them in device level */
207
+ if (!sriov -> num_vfs )
208
+ return 0 ;
230
209
210
+ mlx5_device_enable_sriov (dev , sriov -> num_vfs );
231
211
return 0 ;
232
212
}
233
213
234
- int mlx5_sriov_cleanup (struct mlx5_core_dev * dev )
214
+ void mlx5_sriov_cleanup (struct mlx5_core_dev * dev )
235
215
{
236
- struct pci_dev * pdev = dev -> pdev ;
237
- int err ;
216
+ struct mlx5_core_sriov * sriov = & dev -> priv .sriov ;
238
217
239
218
if (!mlx5_core_is_pf (dev ))
240
- return 0 ;
219
+ return ;
241
220
242
- err = mlx5_core_sriov_configure (pdev , 0 );
243
- if (err )
244
- return err ;
245
-
246
- return 0 ;
221
+ mlx5_device_disable_sriov (dev );
222
+ kfree (sriov -> vfs_ctx );
247
223
}
0 commit comments