@@ -76,19 +76,21 @@ int ib_get_cached_gid(struct ib_device *device,
76
76
{
77
77
struct ib_gid_cache * cache ;
78
78
unsigned long flags ;
79
- int ret = 0 ;
79
+ int ret = - EINVAL ;
80
80
81
81
if (port_num < start_port (device ) || port_num > end_port (device ))
82
82
return - EINVAL ;
83
83
84
84
read_lock_irqsave (& device -> cache .lock , flags );
85
85
86
- cache = device -> cache .gid_cache [port_num - start_port (device )];
86
+ if (device -> cache .gid_cache ) {
87
+ cache = device -> cache .gid_cache [port_num - start_port (device )];
87
88
88
- if (index < 0 || index >= cache -> table_len )
89
- ret = - EINVAL ;
90
- else
91
- * gid = cache -> table [index ];
89
+ if (cache && index >= 0 && index < cache -> table_len ) {
90
+ * gid = cache -> table [index ];
91
+ ret = 0 ;
92
+ }
93
+ }
92
94
93
95
read_unlock_irqrestore (& device -> cache .lock , flags );
94
96
@@ -138,19 +140,21 @@ int ib_get_cached_pkey(struct ib_device *device,
138
140
{
139
141
struct ib_pkey_cache * cache ;
140
142
unsigned long flags ;
141
- int ret = 0 ;
143
+ int ret = - EINVAL ;
142
144
143
145
if (port_num < start_port (device ) || port_num > end_port (device ))
144
146
return - EINVAL ;
145
147
146
148
read_lock_irqsave (& device -> cache .lock , flags );
147
149
148
- cache = device -> cache .pkey_cache [port_num - start_port (device )];
150
+ if (device -> cache .pkey_cache ) {
151
+ cache = device -> cache .pkey_cache [port_num - start_port (device )];
149
152
150
- if (index < 0 || index >= cache -> table_len )
151
- ret = - EINVAL ;
152
- else
153
- * pkey = cache -> table [index ];
153
+ if (cache && index >= 0 && index < cache -> table_len ) {
154
+ * pkey = cache -> table [index ];
155
+ ret = 0 ;
156
+ }
157
+ }
154
158
155
159
read_unlock_irqrestore (& device -> cache .lock , flags );
156
160
@@ -236,13 +240,16 @@ int ib_get_cached_lmc(struct ib_device *device,
236
240
u8 * lmc )
237
241
{
238
242
unsigned long flags ;
239
- int ret = 0 ;
243
+ int ret = - EINVAL ;
240
244
241
245
if (port_num < start_port (device ) || port_num > end_port (device ))
242
246
return - EINVAL ;
243
247
244
248
read_lock_irqsave (& device -> cache .lock , flags );
245
- * lmc = device -> cache .lmc_cache [port_num - start_port (device )];
249
+ if (device -> cache .lmc_cache ) {
250
+ * lmc = device -> cache .lmc_cache [port_num - start_port (device )];
251
+ ret = 0 ;
252
+ }
246
253
read_unlock_irqrestore (& device -> cache .lock , flags );
247
254
248
255
return ret ;
@@ -403,12 +410,19 @@ static void ib_cache_setup_one(struct ib_device *device)
403
410
kfree (device -> cache .pkey_cache );
404
411
kfree (device -> cache .gid_cache );
405
412
kfree (device -> cache .lmc_cache );
413
+ device -> cache .pkey_cache = NULL ;
414
+ device -> cache .gid_cache = NULL ;
415
+ device -> cache .lmc_cache = NULL ;
406
416
}
407
417
408
418
static void ib_cache_cleanup_one (struct ib_device * device )
409
419
{
410
420
int p ;
411
421
422
+ if (!(device -> cache .pkey_cache && device -> cache .gid_cache &&
423
+ device -> cache .lmc_cache ))
424
+ return ;
425
+
412
426
ib_unregister_event_handler (& device -> cache .event_handler );
413
427
flush_workqueue (ib_wq );
414
428
0 commit comments