34
34
35
35
struct flow_head {
36
36
struct list_head filters ;
37
+ struct rcu_head rcu ;
37
38
};
38
39
39
40
struct flow_filter {
40
41
struct list_head list ;
41
42
struct tcf_exts exts ;
42
43
struct tcf_ematch_tree ematches ;
44
+ struct tcf_proto * tp ;
43
45
struct timer_list perturb_timer ;
44
46
u32 perturb_period ;
45
47
u32 handle ;
@@ -54,6 +56,7 @@ struct flow_filter {
54
56
u32 divisor ;
55
57
u32 baseclass ;
56
58
u32 hashrnd ;
59
+ struct rcu_head rcu ;
57
60
};
58
61
59
62
static inline u32 addr_fold (void * addr )
@@ -276,14 +279,14 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
276
279
static int flow_classify (struct sk_buff * skb , const struct tcf_proto * tp ,
277
280
struct tcf_result * res )
278
281
{
279
- struct flow_head * head = tp -> root ;
282
+ struct flow_head * head = rcu_dereference_bh ( tp -> root ) ;
280
283
struct flow_filter * f ;
281
284
u32 keymask ;
282
285
u32 classid ;
283
286
unsigned int n , key ;
284
287
int r ;
285
288
286
- list_for_each_entry (f , & head -> filters , list ) {
289
+ list_for_each_entry_rcu (f , & head -> filters , list ) {
287
290
u32 keys [FLOW_KEY_MAX + 1 ];
288
291
struct flow_keys flow_keys ;
289
292
@@ -346,13 +349,23 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
346
349
[TCA_FLOW_PERTURB ] = { .type = NLA_U32 },
347
350
};
348
351
352
+ static void flow_destroy_filter (struct rcu_head * head )
353
+ {
354
+ struct flow_filter * f = container_of (head , struct flow_filter , rcu );
355
+
356
+ del_timer_sync (& f -> perturb_timer );
357
+ tcf_exts_destroy (f -> tp , & f -> exts );
358
+ tcf_em_tree_destroy (f -> tp , & f -> ematches );
359
+ kfree (f );
360
+ }
361
+
349
362
static int flow_change (struct net * net , struct sk_buff * in_skb ,
350
363
struct tcf_proto * tp , unsigned long base ,
351
364
u32 handle , struct nlattr * * tca ,
352
365
unsigned long * arg , bool ovr )
353
366
{
354
- struct flow_head * head = tp -> root ;
355
- struct flow_filter * f ;
367
+ struct flow_head * head = rtnl_dereference ( tp -> root ) ;
368
+ struct flow_filter * fold , * fnew ;
356
369
struct nlattr * opt = tca [TCA_OPTIONS ];
357
370
struct nlattr * tb [TCA_FLOW_MAX + 1 ];
358
371
struct tcf_exts e ;
@@ -401,20 +414,42 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
401
414
if (err < 0 )
402
415
goto err1 ;
403
416
404
- f = (struct flow_filter * )* arg ;
405
- if (f != NULL ) {
417
+ err = - ENOBUFS ;
418
+ fnew = kzalloc (sizeof (* fnew ), GFP_KERNEL );
419
+ if (!fnew )
420
+ goto err2 ;
421
+
422
+ fold = (struct flow_filter * )* arg ;
423
+ if (fold ) {
406
424
err = - EINVAL ;
407
- if (f -> handle != handle && handle )
425
+ if (fold -> handle != handle && handle )
408
426
goto err2 ;
409
427
410
- mode = f -> mode ;
428
+ /* Copy fold into fnew */
429
+ fnew -> handle = fold -> handle ;
430
+ fnew -> keymask = fold -> keymask ;
431
+ fnew -> tp = fold -> tp ;
432
+
433
+ fnew -> handle = fold -> handle ;
434
+ fnew -> nkeys = fold -> nkeys ;
435
+ fnew -> keymask = fold -> keymask ;
436
+ fnew -> mode = fold -> mode ;
437
+ fnew -> mask = fold -> mask ;
438
+ fnew -> xor = fold -> xor ;
439
+ fnew -> rshift = fold -> rshift ;
440
+ fnew -> addend = fold -> addend ;
441
+ fnew -> divisor = fold -> divisor ;
442
+ fnew -> baseclass = fold -> baseclass ;
443
+ fnew -> hashrnd = fold -> hashrnd ;
444
+
445
+ mode = fold -> mode ;
411
446
if (tb [TCA_FLOW_MODE ])
412
447
mode = nla_get_u32 (tb [TCA_FLOW_MODE ]);
413
448
if (mode != FLOW_MODE_HASH && nkeys > 1 )
414
449
goto err2 ;
415
450
416
451
if (mode == FLOW_MODE_HASH )
417
- perturb_period = f -> perturb_period ;
452
+ perturb_period = fold -> perturb_period ;
418
453
if (tb [TCA_FLOW_PERTURB ]) {
419
454
if (mode != FLOW_MODE_HASH )
420
455
goto err2 ;
@@ -444,83 +479,70 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
444
479
if (TC_H_MIN (baseclass ) == 0 )
445
480
baseclass = TC_H_MAKE (baseclass , 1 );
446
481
447
- err = - ENOBUFS ;
448
- f = kzalloc (sizeof (* f ), GFP_KERNEL );
449
- if (f == NULL )
450
- goto err2 ;
451
-
452
- f -> handle = handle ;
453
- f -> mask = ~0U ;
454
- tcf_exts_init (& f -> exts , TCA_FLOW_ACT , TCA_FLOW_POLICE );
455
-
456
- get_random_bytes (& f -> hashrnd , 4 );
457
- f -> perturb_timer .function = flow_perturbation ;
458
- f -> perturb_timer .data = (unsigned long )f ;
459
- init_timer_deferrable (& f -> perturb_timer );
482
+ fnew -> handle = handle ;
483
+ fnew -> mask = ~0U ;
484
+ fnew -> tp = tp ;
485
+ get_random_bytes (& fnew -> hashrnd , 4 );
486
+ tcf_exts_init (& fnew -> exts , TCA_FLOW_ACT , TCA_FLOW_POLICE );
460
487
}
461
488
462
- tcf_exts_change (tp , & f -> exts , & e );
463
- tcf_em_tree_change (tp , & f -> ematches , & t );
489
+ fnew -> perturb_timer .function = flow_perturbation ;
490
+ fnew -> perturb_timer .data = (unsigned long )fnew ;
491
+ init_timer_deferrable (& fnew -> perturb_timer );
464
492
465
- tcf_tree_lock (tp );
493
+ tcf_exts_change (tp , & fnew -> exts , & e );
494
+ tcf_em_tree_change (tp , & fnew -> ematches , & t );
466
495
467
496
if (tb [TCA_FLOW_KEYS ]) {
468
- f -> keymask = keymask ;
469
- f -> nkeys = nkeys ;
497
+ fnew -> keymask = keymask ;
498
+ fnew -> nkeys = nkeys ;
470
499
}
471
500
472
- f -> mode = mode ;
501
+ fnew -> mode = mode ;
473
502
474
503
if (tb [TCA_FLOW_MASK ])
475
- f -> mask = nla_get_u32 (tb [TCA_FLOW_MASK ]);
504
+ fnew -> mask = nla_get_u32 (tb [TCA_FLOW_MASK ]);
476
505
if (tb [TCA_FLOW_XOR ])
477
- f -> xor = nla_get_u32 (tb [TCA_FLOW_XOR ]);
506
+ fnew -> xor = nla_get_u32 (tb [TCA_FLOW_XOR ]);
478
507
if (tb [TCA_FLOW_RSHIFT ])
479
- f -> rshift = nla_get_u32 (tb [TCA_FLOW_RSHIFT ]);
508
+ fnew -> rshift = nla_get_u32 (tb [TCA_FLOW_RSHIFT ]);
480
509
if (tb [TCA_FLOW_ADDEND ])
481
- f -> addend = nla_get_u32 (tb [TCA_FLOW_ADDEND ]);
510
+ fnew -> addend = nla_get_u32 (tb [TCA_FLOW_ADDEND ]);
482
511
483
512
if (tb [TCA_FLOW_DIVISOR ])
484
- f -> divisor = nla_get_u32 (tb [TCA_FLOW_DIVISOR ]);
513
+ fnew -> divisor = nla_get_u32 (tb [TCA_FLOW_DIVISOR ]);
485
514
if (baseclass )
486
- f -> baseclass = baseclass ;
515
+ fnew -> baseclass = baseclass ;
487
516
488
- f -> perturb_period = perturb_period ;
489
- del_timer (& f -> perturb_timer );
517
+ fnew -> perturb_period = perturb_period ;
490
518
if (perturb_period )
491
- mod_timer (& f -> perturb_timer , jiffies + perturb_period );
519
+ mod_timer (& fnew -> perturb_timer , jiffies + perturb_period );
492
520
493
521
if (* arg == 0 )
494
- list_add_tail (& f -> list , & head -> filters );
522
+ list_add_tail_rcu (& fnew -> list , & head -> filters );
523
+ else
524
+ list_replace_rcu (& fnew -> list , & fold -> list );
495
525
496
- tcf_tree_unlock ( tp ) ;
526
+ * arg = ( unsigned long ) fnew ;
497
527
498
- * arg = (unsigned long )f ;
528
+ if (fold )
529
+ call_rcu (& fold -> rcu , flow_destroy_filter );
499
530
return 0 ;
500
531
501
532
err2 :
502
533
tcf_em_tree_destroy (tp , & t );
534
+ kfree (fnew );
503
535
err1 :
504
536
tcf_exts_destroy (tp , & e );
505
537
return err ;
506
538
}
507
539
508
- static void flow_destroy_filter (struct tcf_proto * tp , struct flow_filter * f )
509
- {
510
- del_timer_sync (& f -> perturb_timer );
511
- tcf_exts_destroy (tp , & f -> exts );
512
- tcf_em_tree_destroy (tp , & f -> ematches );
513
- kfree (f );
514
- }
515
-
516
540
static int flow_delete (struct tcf_proto * tp , unsigned long arg )
517
541
{
518
542
struct flow_filter * f = (struct flow_filter * )arg ;
519
543
520
- tcf_tree_lock (tp );
521
- list_del (& f -> list );
522
- tcf_tree_unlock (tp );
523
- flow_destroy_filter (tp , f );
544
+ list_del_rcu (& f -> list );
545
+ call_rcu (& f -> rcu , flow_destroy_filter );
524
546
return 0 ;
525
547
}
526
548
@@ -532,28 +554,29 @@ static int flow_init(struct tcf_proto *tp)
532
554
if (head == NULL )
533
555
return - ENOBUFS ;
534
556
INIT_LIST_HEAD (& head -> filters );
535
- tp -> root = head ;
557
+ rcu_assign_pointer ( tp -> root , head ) ;
536
558
return 0 ;
537
559
}
538
560
539
561
static void flow_destroy (struct tcf_proto * tp )
540
562
{
541
- struct flow_head * head = tp -> root ;
563
+ struct flow_head * head = rtnl_dereference ( tp -> root ) ;
542
564
struct flow_filter * f , * next ;
543
565
544
566
list_for_each_entry_safe (f , next , & head -> filters , list ) {
545
- list_del (& f -> list );
546
- flow_destroy_filter ( tp , f );
567
+ list_del_rcu (& f -> list );
568
+ call_rcu ( & f -> rcu , flow_destroy_filter );
547
569
}
548
- kfree (head );
570
+ RCU_INIT_POINTER (tp -> root , NULL );
571
+ kfree_rcu (head , rcu );
549
572
}
550
573
551
574
static unsigned long flow_get (struct tcf_proto * tp , u32 handle )
552
575
{
553
- struct flow_head * head = tp -> root ;
576
+ struct flow_head * head = rtnl_dereference ( tp -> root ) ;
554
577
struct flow_filter * f ;
555
578
556
- list_for_each_entry (f , & head -> filters , list )
579
+ list_for_each_entry_rcu (f , & head -> filters , list )
557
580
if (f -> handle == handle )
558
581
return (unsigned long )f ;
559
582
return 0 ;
@@ -626,10 +649,10 @@ static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
626
649
627
650
static void flow_walk (struct tcf_proto * tp , struct tcf_walker * arg )
628
651
{
629
- struct flow_head * head = tp -> root ;
652
+ struct flow_head * head = rtnl_dereference ( tp -> root ) ;
630
653
struct flow_filter * f ;
631
654
632
- list_for_each_entry (f , & head -> filters , list ) {
655
+ list_for_each_entry_rcu (f , & head -> filters , list ) {
633
656
if (arg -> count < arg -> skip )
634
657
goto skip ;
635
658
if (arg -> fn (tp , (unsigned long )f , arg ) < 0 ) {
0 commit comments