@@ -1411,3 +1411,161 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1411
1411
br_mdb_config_fini (& cfg );
1412
1412
return err ;
1413
1413
}
1414
+
1415
+ static const struct nla_policy br_mdbe_attrs_get_pol [MDBE_ATTR_MAX + 1 ] = {
1416
+ [MDBE_ATTR_SOURCE ] = NLA_POLICY_RANGE (NLA_BINARY ,
1417
+ sizeof (struct in_addr ),
1418
+ sizeof (struct in6_addr )),
1419
+ };
1420
+
1421
+ static int br_mdb_get_parse (struct net_device * dev , struct nlattr * tb [],
1422
+ struct br_ip * group , struct netlink_ext_ack * extack )
1423
+ {
1424
+ struct br_mdb_entry * entry = nla_data (tb [MDBA_GET_ENTRY ]);
1425
+ struct nlattr * mdbe_attrs [MDBE_ATTR_MAX + 1 ];
1426
+ int err ;
1427
+
1428
+ if (!tb [MDBA_GET_ENTRY_ATTRS ]) {
1429
+ __mdb_entry_to_br_ip (entry , group , NULL );
1430
+ return 0 ;
1431
+ }
1432
+
1433
+ err = nla_parse_nested (mdbe_attrs , MDBE_ATTR_MAX ,
1434
+ tb [MDBA_GET_ENTRY_ATTRS ], br_mdbe_attrs_get_pol ,
1435
+ extack );
1436
+ if (err )
1437
+ return err ;
1438
+
1439
+ if (mdbe_attrs [MDBE_ATTR_SOURCE ] &&
1440
+ !is_valid_mdb_source (mdbe_attrs [MDBE_ATTR_SOURCE ],
1441
+ entry -> addr .proto , extack ))
1442
+ return - EINVAL ;
1443
+
1444
+ __mdb_entry_to_br_ip (entry , group , mdbe_attrs );
1445
+
1446
+ return 0 ;
1447
+ }
1448
+
1449
+ static struct sk_buff *
1450
+ br_mdb_get_reply_alloc (const struct net_bridge_mdb_entry * mp )
1451
+ {
1452
+ struct net_bridge_port_group * pg ;
1453
+ size_t nlmsg_size ;
1454
+
1455
+ nlmsg_size = NLMSG_ALIGN (sizeof (struct br_port_msg )) +
1456
+ /* MDBA_MDB */
1457
+ nla_total_size (0 ) +
1458
+ /* MDBA_MDB_ENTRY */
1459
+ nla_total_size (0 );
1460
+
1461
+ if (mp -> host_joined )
1462
+ nlmsg_size += rtnl_mdb_nlmsg_pg_size (NULL );
1463
+
1464
+ for (pg = mlock_dereference (mp -> ports , mp -> br ); pg ;
1465
+ pg = mlock_dereference (pg -> next , mp -> br ))
1466
+ nlmsg_size += rtnl_mdb_nlmsg_pg_size (pg );
1467
+
1468
+ return nlmsg_new (nlmsg_size , GFP_ATOMIC );
1469
+ }
1470
+
1471
+ static int br_mdb_get_reply_fill (struct sk_buff * skb ,
1472
+ struct net_bridge_mdb_entry * mp , u32 portid ,
1473
+ u32 seq )
1474
+ {
1475
+ struct nlattr * mdb_nest , * mdb_entry_nest ;
1476
+ struct net_bridge_port_group * pg ;
1477
+ struct br_port_msg * bpm ;
1478
+ struct nlmsghdr * nlh ;
1479
+ int err ;
1480
+
1481
+ nlh = nlmsg_put (skb , portid , seq , RTM_NEWMDB , sizeof (* bpm ), 0 );
1482
+ if (!nlh )
1483
+ return - EMSGSIZE ;
1484
+
1485
+ bpm = nlmsg_data (nlh );
1486
+ memset (bpm , 0 , sizeof (* bpm ));
1487
+ bpm -> family = AF_BRIDGE ;
1488
+ bpm -> ifindex = mp -> br -> dev -> ifindex ;
1489
+ mdb_nest = nla_nest_start_noflag (skb , MDBA_MDB );
1490
+ if (!mdb_nest ) {
1491
+ err = - EMSGSIZE ;
1492
+ goto cancel ;
1493
+ }
1494
+ mdb_entry_nest = nla_nest_start_noflag (skb , MDBA_MDB_ENTRY );
1495
+ if (!mdb_entry_nest ) {
1496
+ err = - EMSGSIZE ;
1497
+ goto cancel ;
1498
+ }
1499
+
1500
+ if (mp -> host_joined ) {
1501
+ err = __mdb_fill_info (skb , mp , NULL );
1502
+ if (err )
1503
+ goto cancel ;
1504
+ }
1505
+
1506
+ for (pg = mlock_dereference (mp -> ports , mp -> br ); pg ;
1507
+ pg = mlock_dereference (pg -> next , mp -> br )) {
1508
+ err = __mdb_fill_info (skb , mp , pg );
1509
+ if (err )
1510
+ goto cancel ;
1511
+ }
1512
+
1513
+ nla_nest_end (skb , mdb_entry_nest );
1514
+ nla_nest_end (skb , mdb_nest );
1515
+ nlmsg_end (skb , nlh );
1516
+
1517
+ return 0 ;
1518
+
1519
+ cancel :
1520
+ nlmsg_cancel (skb , nlh );
1521
+ return err ;
1522
+ }
1523
+
1524
+ int br_mdb_get (struct net_device * dev , struct nlattr * tb [], u32 portid , u32 seq ,
1525
+ struct netlink_ext_ack * extack )
1526
+ {
1527
+ struct net_bridge * br = netdev_priv (dev );
1528
+ struct net_bridge_mdb_entry * mp ;
1529
+ struct sk_buff * skb ;
1530
+ struct br_ip group ;
1531
+ int err ;
1532
+
1533
+ err = br_mdb_get_parse (dev , tb , & group , extack );
1534
+ if (err )
1535
+ return err ;
1536
+
1537
+ /* Hold the multicast lock to ensure that the MDB entry does not change
1538
+ * between the time the reply size is determined and when the reply is
1539
+ * filled in.
1540
+ */
1541
+ spin_lock_bh (& br -> multicast_lock );
1542
+
1543
+ mp = br_mdb_ip_get (br , & group );
1544
+ if (!mp ) {
1545
+ NL_SET_ERR_MSG_MOD (extack , "MDB entry not found" );
1546
+ err = - ENOENT ;
1547
+ goto unlock ;
1548
+ }
1549
+
1550
+ skb = br_mdb_get_reply_alloc (mp );
1551
+ if (!skb ) {
1552
+ err = - ENOMEM ;
1553
+ goto unlock ;
1554
+ }
1555
+
1556
+ err = br_mdb_get_reply_fill (skb , mp , portid , seq );
1557
+ if (err ) {
1558
+ NL_SET_ERR_MSG_MOD (extack , "Failed to fill MDB get reply" );
1559
+ goto free ;
1560
+ }
1561
+
1562
+ spin_unlock_bh (& br -> multicast_lock );
1563
+
1564
+ return rtnl_unicast (skb , dev_net (dev ), portid );
1565
+
1566
+ free :
1567
+ kfree_skb (skb );
1568
+ unlock :
1569
+ spin_unlock_bh (& br -> multicast_lock );
1570
+ return err ;
1571
+ }
0 commit comments