@@ -160,6 +160,16 @@ static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
160
160
return skb ;
161
161
}
162
162
163
+ /* This function is continuously called in the following cases:
164
+ * - when node role is MRM, in this case test_monitor is always set to false
165
+ * because it needs to notify the userspace that the ring is open and needs to
166
+ * send MRP_Test frames
167
+ * - when node role is MRA, there are 2 subcases:
168
+ * - when MRA behaves as MRM, in this case is similar with MRM role
169
+ * - when MRA behaves as MRC, in this case test_monitor is set to true,
170
+ * because it needs to detect when it stops seeing MRP_Test frames
171
+ * from MRM node but it doesn't need to send MRP_Test frames.
172
+ */
163
173
static void br_mrp_test_work_expired (struct work_struct * work )
164
174
{
165
175
struct delayed_work * del_work = to_delayed_work (work );
@@ -177,34 +187,46 @@ static void br_mrp_test_work_expired(struct work_struct *work)
177
187
/* Notify that the ring is open only if the ring state is
178
188
* closed, otherwise it would continue to notify at every
179
189
* interval.
190
+ * Also notify that the ring is open when the node has the
191
+ * role MRA and behaves as MRC. The reason is that the
192
+ * userspace needs to know when the MRM stopped sending
193
+ * MRP_Test frames so that the current node to try to take
194
+ * the role of a MRM.
180
195
*/
181
- if (mrp -> ring_state == BR_MRP_RING_STATE_CLOSED )
196
+ if (mrp -> ring_state == BR_MRP_RING_STATE_CLOSED ||
197
+ mrp -> test_monitor )
182
198
notify_open = true;
183
199
}
184
200
185
201
rcu_read_lock ();
186
202
187
203
p = rcu_dereference (mrp -> p_port );
188
204
if (p ) {
189
- skb = br_mrp_alloc_test_skb (mrp , p , BR_MRP_PORT_ROLE_PRIMARY );
190
- if (!skb )
191
- goto out ;
192
-
193
- skb_reset_network_header (skb );
194
- dev_queue_xmit (skb );
205
+ if (!mrp -> test_monitor ) {
206
+ skb = br_mrp_alloc_test_skb (mrp , p ,
207
+ BR_MRP_PORT_ROLE_PRIMARY );
208
+ if (!skb )
209
+ goto out ;
210
+
211
+ skb_reset_network_header (skb );
212
+ dev_queue_xmit (skb );
213
+ }
195
214
196
215
if (notify_open && !mrp -> ring_role_offloaded )
197
216
br_mrp_port_open (p -> dev , true);
198
217
}
199
218
200
219
p = rcu_dereference (mrp -> s_port );
201
220
if (p ) {
202
- skb = br_mrp_alloc_test_skb (mrp , p , BR_MRP_PORT_ROLE_SECONDARY );
203
- if (!skb )
204
- goto out ;
205
-
206
- skb_reset_network_header (skb );
207
- dev_queue_xmit (skb );
221
+ if (!mrp -> test_monitor ) {
222
+ skb = br_mrp_alloc_test_skb (mrp , p ,
223
+ BR_MRP_PORT_ROLE_SECONDARY );
224
+ if (!skb )
225
+ goto out ;
226
+
227
+ skb_reset_network_header (skb );
228
+ dev_queue_xmit (skb );
229
+ }
208
230
209
231
if (notify_open && !mrp -> ring_role_offloaded )
210
232
br_mrp_port_open (p -> dev , true);
@@ -227,7 +249,7 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
227
249
228
250
/* Stop sending MRP_Test frames */
229
251
cancel_delayed_work_sync (& mrp -> test_work );
230
- br_mrp_switchdev_send_ring_test (br , mrp , 0 , 0 , 0 );
252
+ br_mrp_switchdev_send_ring_test (br , mrp , 0 , 0 , 0 , 0 );
231
253
232
254
br_mrp_switchdev_del (br , mrp );
233
255
@@ -452,8 +474,8 @@ int br_mrp_set_ring_role(struct net_bridge *br,
452
474
return 0 ;
453
475
}
454
476
455
- /* Start to generate MRP test frames, the frames are generated by HW and if it
456
- * fails, they are generated by the SW.
477
+ /* Start to generate or monitor MRP test frames, the frames are generated by
478
+ * HW and if it fails, they are generated by the SW.
457
479
* note: already called with rtnl_lock
458
480
*/
459
481
int br_mrp_start_test (struct net_bridge * br ,
@@ -464,16 +486,18 @@ int br_mrp_start_test(struct net_bridge *br,
464
486
if (!mrp )
465
487
return - EINVAL ;
466
488
467
- /* Try to push it to the HW and if it fails then continue to generate in
468
- * SW and if that also fails then return error
489
+ /* Try to push it to the HW and if it fails then continue with SW
490
+ * implementation and if that also fails then return error.
469
491
*/
470
492
if (!br_mrp_switchdev_send_ring_test (br , mrp , test -> interval ,
471
- test -> max_miss , test -> period ))
493
+ test -> max_miss , test -> period ,
494
+ test -> monitor ))
472
495
return 0 ;
473
496
474
497
mrp -> test_interval = test -> interval ;
475
498
mrp -> test_end = jiffies + usecs_to_jiffies (test -> period );
476
499
mrp -> test_max_miss = test -> max_miss ;
500
+ mrp -> test_monitor = test -> monitor ;
477
501
mrp -> test_count_miss = 0 ;
478
502
queue_delayed_work (system_wq , & mrp -> test_work ,
479
503
usecs_to_jiffies (test -> interval ));
@@ -510,6 +534,57 @@ static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
510
534
br_mrp_port_open (port -> dev , false);
511
535
}
512
536
537
+ /* Determin if the test hdr has a better priority than the node */
538
+ static bool br_mrp_test_better_than_own (struct br_mrp * mrp ,
539
+ struct net_bridge * br ,
540
+ const struct br_mrp_ring_test_hdr * hdr )
541
+ {
542
+ u16 prio = be16_to_cpu (hdr -> prio );
543
+
544
+ if (prio < mrp -> prio ||
545
+ (prio == mrp -> prio &&
546
+ ether_addr_to_u64 (hdr -> sa ) < ether_addr_to_u64 (br -> dev -> dev_addr )))
547
+ return true;
548
+
549
+ return false;
550
+ }
551
+
552
+ /* Process only MRP Test frame. All the other MRP frames are processed by
553
+ * userspace application
554
+ * note: already called with rcu_read_lock
555
+ */
556
+ static void br_mrp_mra_process (struct br_mrp * mrp , struct net_bridge * br ,
557
+ struct net_bridge_port * port ,
558
+ struct sk_buff * skb )
559
+ {
560
+ const struct br_mrp_ring_test_hdr * test_hdr ;
561
+ struct br_mrp_ring_test_hdr _test_hdr ;
562
+ const struct br_mrp_tlv_hdr * hdr ;
563
+ struct br_mrp_tlv_hdr _hdr ;
564
+
565
+ /* Each MRP header starts with a version field which is 16 bits.
566
+ * Therefore skip the version and get directly the TLV header.
567
+ */
568
+ hdr = skb_header_pointer (skb , sizeof (uint16_t ), sizeof (_hdr ), & _hdr );
569
+ if (!hdr )
570
+ return ;
571
+
572
+ if (hdr -> type != BR_MRP_TLV_HEADER_RING_TEST )
573
+ return ;
574
+
575
+ test_hdr = skb_header_pointer (skb , sizeof (uint16_t ) + sizeof (_hdr ),
576
+ sizeof (_test_hdr ), & _test_hdr );
577
+ if (!test_hdr )
578
+ return ;
579
+
580
+ /* Only frames that have a better priority than the node will
581
+ * clear the miss counter because otherwise the node will need to behave
582
+ * as MRM.
583
+ */
584
+ if (br_mrp_test_better_than_own (mrp , br , test_hdr ))
585
+ mrp -> test_count_miss = 0 ;
586
+ }
587
+
513
588
/* This will just forward the frame to the other mrp ring port(MRC role) or will
514
589
* not do anything.
515
590
* note: already called with rcu_read_lock
@@ -546,6 +621,18 @@ static int br_mrp_rcv(struct net_bridge_port *p,
546
621
return 1 ;
547
622
}
548
623
624
+ /* If the role is MRA then don't forward the frames if it behaves as
625
+ * MRM node
626
+ */
627
+ if (mrp -> ring_role == BR_MRP_RING_ROLE_MRA ) {
628
+ if (!mrp -> test_monitor ) {
629
+ br_mrp_mrm_process (mrp , p , skb );
630
+ return 1 ;
631
+ }
632
+
633
+ br_mrp_mra_process (mrp , br , p , skb );
634
+ }
635
+
549
636
/* Clone the frame and forward it on the other MRP port */
550
637
nskb = skb_clone (skb , GFP_ATOMIC );
551
638
if (!nskb )
0 commit comments