@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
227
227
}
228
228
229
229
/* Called from syscall */
230
- static struct bpf_map * htab_map_alloc (union bpf_attr * attr )
230
+ static int htab_map_alloc_check (union bpf_attr * attr )
231
231
{
232
232
bool percpu = (attr -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
233
233
attr -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH );
@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
241
241
bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
242
242
bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
243
243
int numa_node = bpf_map_attr_numa_node (attr );
244
- struct bpf_htab * htab ;
245
- int err , i ;
246
- u64 cost ;
247
244
248
245
BUILD_BUG_ON (offsetof(struct htab_elem , htab ) !=
249
246
offsetof(struct htab_elem , hash_node .pprev ));
@@ -254,33 +251,33 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
254
251
/* LRU implementation is much complicated than other
255
252
* maps. Hence, limit to CAP_SYS_ADMIN for now.
256
253
*/
257
- return ERR_PTR ( - EPERM ) ;
254
+ return - EPERM ;
258
255
259
256
if (attr -> map_flags & ~HTAB_CREATE_FLAG_MASK )
260
257
/* reserved bits should not be used */
261
- return ERR_PTR ( - EINVAL ) ;
258
+ return - EINVAL ;
262
259
263
260
if (!lru && percpu_lru )
264
- return ERR_PTR ( - EINVAL ) ;
261
+ return - EINVAL ;
265
262
266
263
if (lru && !prealloc )
267
- return ERR_PTR ( - ENOTSUPP ) ;
264
+ return - ENOTSUPP ;
268
265
269
266
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru ))
270
- return ERR_PTR ( - EINVAL ) ;
267
+ return - EINVAL ;
271
268
272
269
/* check sanity of attributes.
273
270
* value_size == 0 may be allowed in the future to use map as a set
274
271
*/
275
272
if (attr -> max_entries == 0 || attr -> key_size == 0 ||
276
273
attr -> value_size == 0 )
277
- return ERR_PTR ( - EINVAL ) ;
274
+ return - EINVAL ;
278
275
279
276
if (attr -> key_size > MAX_BPF_STACK )
280
277
/* eBPF programs initialize keys on stack, so they cannot be
281
278
* larger than max stack size
282
279
*/
283
- return ERR_PTR ( - E2BIG ) ;
280
+ return - E2BIG ;
284
281
285
282
if (attr -> value_size >= KMALLOC_MAX_SIZE -
286
283
MAX_BPF_STACK - sizeof (struct htab_elem ))
@@ -289,7 +286,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
289
286
* sure that the elem_size doesn't overflow and it's
290
287
* kmalloc-able later in htab_map_update_elem()
291
288
*/
292
- return ERR_PTR (- E2BIG );
289
+ return - E2BIG ;
290
+
291
+ return 0 ;
292
+ }
293
+
294
+ static struct bpf_map * htab_map_alloc (union bpf_attr * attr )
295
+ {
296
+ bool percpu = (attr -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
297
+ attr -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH );
298
+ bool lru = (attr -> map_type == BPF_MAP_TYPE_LRU_HASH ||
299
+ attr -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH );
300
+ /* percpu_lru means each cpu has its own LRU list.
301
+ * it is different from BPF_MAP_TYPE_PERCPU_HASH where
302
+ * the map's value itself is percpu. percpu_lru has
303
+ * nothing to do with the map's value.
304
+ */
305
+ bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
306
+ bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
307
+ int numa_node = bpf_map_attr_numa_node (attr );
308
+ struct bpf_htab * htab ;
309
+ int err , i ;
310
+ u64 cost ;
293
311
294
312
htab = kzalloc (sizeof (* htab ), GFP_USER );
295
313
if (!htab )
@@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map)
1142
1160
}
1143
1161
1144
1162
const struct bpf_map_ops htab_map_ops = {
1163
+ .map_alloc_check = htab_map_alloc_check ,
1145
1164
.map_alloc = htab_map_alloc ,
1146
1165
.map_free = htab_map_free ,
1147
1166
.map_get_next_key = htab_map_get_next_key ,
@@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = {
1152
1171
};
1153
1172
1154
1173
const struct bpf_map_ops htab_lru_map_ops = {
1174
+ .map_alloc_check = htab_map_alloc_check ,
1155
1175
.map_alloc = htab_map_alloc ,
1156
1176
.map_free = htab_map_free ,
1157
1177
.map_get_next_key = htab_map_get_next_key ,
@@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1235
1255
}
1236
1256
1237
1257
const struct bpf_map_ops htab_percpu_map_ops = {
1258
+ .map_alloc_check = htab_map_alloc_check ,
1238
1259
.map_alloc = htab_map_alloc ,
1239
1260
.map_free = htab_map_free ,
1240
1261
.map_get_next_key = htab_map_get_next_key ,
@@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
1244
1265
};
1245
1266
1246
1267
const struct bpf_map_ops htab_lru_percpu_map_ops = {
1268
+ .map_alloc_check = htab_map_alloc_check ,
1247
1269
.map_alloc = htab_map_alloc ,
1248
1270
.map_free = htab_map_free ,
1249
1271
.map_get_next_key = htab_map_get_next_key ,
@@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
1252
1274
.map_delete_elem = htab_lru_map_delete_elem ,
1253
1275
};
1254
1276
1255
- static struct bpf_map * fd_htab_map_alloc (union bpf_attr * attr )
1277
+ static int fd_htab_map_alloc_check (union bpf_attr * attr )
1256
1278
{
1257
1279
if (attr -> value_size != sizeof (u32 ))
1258
- return ERR_PTR ( - EINVAL ) ;
1259
- return htab_map_alloc (attr );
1280
+ return - EINVAL ;
1281
+ return htab_map_alloc_check (attr );
1260
1282
}
1261
1283
1262
1284
static void fd_htab_map_free (struct bpf_map * map )
@@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1327
1349
if (IS_ERR (inner_map_meta ))
1328
1350
return inner_map_meta ;
1329
1351
1330
- map = fd_htab_map_alloc (attr );
1352
+ map = htab_map_alloc (attr );
1331
1353
if (IS_ERR (map )) {
1332
1354
bpf_map_meta_free (inner_map_meta );
1333
1355
return map ;
@@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map)
1371
1393
}
1372
1394
1373
1395
const struct bpf_map_ops htab_of_maps_map_ops = {
1396
+ .map_alloc_check = fd_htab_map_alloc_check ,
1374
1397
.map_alloc = htab_of_map_alloc ,
1375
1398
.map_free = htab_of_map_free ,
1376
1399
.map_get_next_key = htab_map_get_next_key ,
0 commit comments