@@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
120
120
a5psw_port_pattern_set (a5psw , port , A5PSW_PATTERN_MGMTFWD , enable );
121
121
}
122
122
123
+ static void a5psw_port_tx_enable (struct a5psw * a5psw , int port , bool enable )
124
+ {
125
+ u32 mask = A5PSW_PORT_ENA_TX (port );
126
+ u32 reg = enable ? mask : 0 ;
127
+
128
+ /* Even though the port TX is disabled through TXENA bit in the
129
+ * PORT_ENA register, it can still send BPDUs. This depends on the tag
130
+ * configuration added when sending packets from the CPU port to the
131
+ * switch port. Indeed, when using forced forwarding without filtering,
132
+ * even disabled ports will be able to send packets that are tagged.
133
+ * This allows to implement STP support when ports are in a state where
134
+ * forwarding traffic should be stopped but BPDUs should still be sent.
135
+ */
136
+ a5psw_reg_rmw (a5psw , A5PSW_PORT_ENA , mask , reg );
137
+ }
138
+
123
139
static void a5psw_port_enable_set (struct a5psw * a5psw , int port , bool enable )
124
140
{
125
141
u32 port_ena = 0 ;
@@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
292
308
return 0 ;
293
309
}
294
310
311
+ static void a5psw_port_learning_set (struct a5psw * a5psw , int port , bool learn )
312
+ {
313
+ u32 mask = A5PSW_INPUT_LEARN_DIS (port );
314
+ u32 reg = !learn ? mask : 0 ;
315
+
316
+ a5psw_reg_rmw (a5psw , A5PSW_INPUT_LEARN , mask , reg );
317
+ }
318
+
319
+ static void a5psw_port_rx_block_set (struct a5psw * a5psw , int port , bool block )
320
+ {
321
+ u32 mask = A5PSW_INPUT_LEARN_BLOCK (port );
322
+ u32 reg = block ? mask : 0 ;
323
+
324
+ a5psw_reg_rmw (a5psw , A5PSW_INPUT_LEARN , mask , reg );
325
+ }
326
+
295
327
static void a5psw_flooding_set_resolution (struct a5psw * a5psw , int port ,
296
328
bool set )
297
329
{
@@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
308
340
a5psw_reg_writel (a5psw , offsets [i ], a5psw -> bridged_ports );
309
341
}
310
342
343
+ static void a5psw_port_set_standalone (struct a5psw * a5psw , int port ,
344
+ bool standalone )
345
+ {
346
+ a5psw_port_learning_set (a5psw , port , !standalone );
347
+ a5psw_flooding_set_resolution (a5psw , port , !standalone );
348
+ a5psw_port_mgmtfwd_set (a5psw , port , standalone );
349
+ }
350
+
311
351
static int a5psw_port_bridge_join (struct dsa_switch * ds , int port ,
312
352
struct dsa_bridge bridge ,
313
353
bool * tx_fwd_offload ,
@@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
323
363
}
324
364
325
365
a5psw -> br_dev = bridge .dev ;
326
- a5psw_flooding_set_resolution (a5psw , port , true);
327
- a5psw_port_mgmtfwd_set (a5psw , port , false);
366
+ a5psw_port_set_standalone (a5psw , port , false);
328
367
329
368
return 0 ;
330
369
}
@@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
334
373
{
335
374
struct a5psw * a5psw = ds -> priv ;
336
375
337
- a5psw_flooding_set_resolution (a5psw , port , false);
338
- a5psw_port_mgmtfwd_set (a5psw , port , true);
376
+ a5psw_port_set_standalone (a5psw , port , true);
339
377
340
378
/* No more ports bridged */
341
379
if (a5psw -> bridged_ports == BIT (A5PSW_CPU_PORT ))
@@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
344
382
345
383
static void a5psw_port_stp_state_set (struct dsa_switch * ds , int port , u8 state )
346
384
{
347
- u32 mask = A5PSW_INPUT_LEARN_DIS ( port ) | A5PSW_INPUT_LEARN_BLOCK ( port ) ;
385
+ bool learning_enabled , rx_enabled , tx_enabled ;
348
386
struct a5psw * a5psw = ds -> priv ;
349
- u32 reg = 0 ;
350
387
351
388
switch (state ) {
352
389
case BR_STATE_DISABLED :
353
390
case BR_STATE_BLOCKING :
354
- reg |= A5PSW_INPUT_LEARN_DIS (port );
355
- reg |= A5PSW_INPUT_LEARN_BLOCK (port );
356
- break ;
357
391
case BR_STATE_LISTENING :
358
- reg |= A5PSW_INPUT_LEARN_DIS (port );
392
+ rx_enabled = false;
393
+ tx_enabled = false;
394
+ learning_enabled = false;
359
395
break ;
360
396
case BR_STATE_LEARNING :
361
- reg |= A5PSW_INPUT_LEARN_BLOCK (port );
397
+ rx_enabled = false;
398
+ tx_enabled = false;
399
+ learning_enabled = true;
362
400
break ;
363
401
case BR_STATE_FORWARDING :
364
- default :
402
+ rx_enabled = true;
403
+ tx_enabled = true;
404
+ learning_enabled = true;
365
405
break ;
406
+ default :
407
+ dev_err (ds -> dev , "invalid STP state: %d\n" , state );
408
+ return ;
366
409
}
367
410
368
- a5psw_reg_rmw (a5psw , A5PSW_INPUT_LEARN , mask , reg );
411
+ a5psw_port_learning_set (a5psw , port , learning_enabled );
412
+ a5psw_port_rx_block_set (a5psw , port , !rx_enabled );
413
+ a5psw_port_tx_enable (a5psw , port , tx_enabled );
369
414
}
370
415
371
416
static void a5psw_port_fast_age (struct dsa_switch * ds , int port )
@@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
673
718
}
674
719
675
720
/* Configure management port */
676
- reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD ;
721
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE ;
677
722
a5psw_reg_writel (a5psw , A5PSW_MGMT_CFG , reg );
678
723
679
724
/* Set pattern 0 to forward all frame to mgmt port */
@@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
722
767
if (dsa_port_is_unused (dp ))
723
768
continue ;
724
769
725
- /* Enable egress flooding for CPU port */
726
- if (dsa_port_is_cpu (dp ))
770
+ /* Enable egress flooding and learning for CPU port */
771
+ if (dsa_port_is_cpu (dp )) {
727
772
a5psw_flooding_set_resolution (a5psw , port , true);
773
+ a5psw_port_learning_set (a5psw , port , true);
774
+ }
728
775
729
- /* Enable management forward only for user ports */
776
+ /* Enable standalone mode for user ports */
730
777
if (dsa_port_is_user (dp ))
731
- a5psw_port_mgmtfwd_set (a5psw , port , true);
778
+ a5psw_port_set_standalone (a5psw , port , true);
732
779
}
733
780
734
781
return 0 ;
0 commit comments