21
21
#include <linux/bpf_trace.h>
22
22
#include <linux/filter.h>
23
23
#include <linux/net_tstamp.h>
24
+ #include <linux/workqueue.h>
24
25
25
26
#include "nic_reg.h"
26
27
#include "nic.h"
@@ -67,6 +68,9 @@ module_param(cpi_alg, int, 0444);
67
68
MODULE_PARM_DESC (cpi_alg ,
68
69
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)" );
69
70
71
+ /* workqueue for handling kernel ndo_set_rx_mode() calls */
72
+ static struct workqueue_struct * nicvf_rx_mode_wq ;
73
+
70
74
static inline u8 nicvf_netdev_qidx (struct nicvf * nic , u8 qidx )
71
75
{
72
76
if (nic -> sqs_mode )
@@ -1919,6 +1923,100 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1919
1923
}
1920
1924
}
1921
1925
1926
+ static void nicvf_set_rx_mode_task (struct work_struct * work_arg )
1927
+ {
1928
+ struct nicvf_work * vf_work = container_of (work_arg , struct nicvf_work ,
1929
+ work .work );
1930
+ struct nicvf * nic = container_of (vf_work , struct nicvf , rx_mode_work );
1931
+ union nic_mbx mbx = {};
1932
+ struct xcast_addr * xaddr , * next ;
1933
+
1934
+ if (!vf_work )
1935
+ return ;
1936
+
1937
+ /* From the inside of VM code flow we have only 128 bits memory
1938
+ * available to send message to host's PF, so send all mc addrs
1939
+ * one by one, starting from flush command in case if kernel
1940
+ * requests to configure specific MAC filtering
1941
+ */
1942
+
1943
+ /* flush DMAC filters and reset RX mode */
1944
+ mbx .xcast .msg = NIC_MBOX_MSG_RESET_XCAST ;
1945
+ nicvf_send_msg_to_pf (nic , & mbx );
1946
+
1947
+ if (vf_work -> mode & BGX_XCAST_MCAST_FILTER ) {
1948
+ /* once enabling filtering, we need to signal to PF to add
1949
+ * its' own LMAC to the filter to accept packets for it.
1950
+ */
1951
+ mbx .xcast .msg = NIC_MBOX_MSG_ADD_MCAST ;
1952
+ mbx .xcast .data .mac = 0 ;
1953
+ nicvf_send_msg_to_pf (nic , & mbx );
1954
+ }
1955
+
1956
+ /* check if we have any specific MACs to be added to PF DMAC filter */
1957
+ if (vf_work -> mc ) {
1958
+ /* now go through kernel list of MACs and add them one by one */
1959
+ list_for_each_entry_safe (xaddr , next ,
1960
+ & vf_work -> mc -> list , list ) {
1961
+ mbx .xcast .msg = NIC_MBOX_MSG_ADD_MCAST ;
1962
+ mbx .xcast .data .mac = xaddr -> addr ;
1963
+ nicvf_send_msg_to_pf (nic , & mbx );
1964
+
1965
+ /* after receiving ACK from PF release memory */
1966
+ list_del (& xaddr -> list );
1967
+ kfree (xaddr );
1968
+ vf_work -> mc -> count -- ;
1969
+ }
1970
+ kfree (vf_work -> mc );
1971
+ }
1972
+
1973
+ /* and finally set rx mode for PF accordingly */
1974
+ mbx .xcast .msg = NIC_MBOX_MSG_SET_XCAST ;
1975
+ mbx .xcast .data .mode = vf_work -> mode ;
1976
+
1977
+ nicvf_send_msg_to_pf (nic , & mbx );
1978
+ }
1979
+
1980
+ static void nicvf_set_rx_mode (struct net_device * netdev )
1981
+ {
1982
+ struct nicvf * nic = netdev_priv (netdev );
1983
+ struct netdev_hw_addr * ha ;
1984
+ struct xcast_addr_list * mc_list = NULL ;
1985
+ u8 mode = 0 ;
1986
+
1987
+ if (netdev -> flags & IFF_PROMISC ) {
1988
+ mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT ;
1989
+ } else {
1990
+ if (netdev -> flags & IFF_BROADCAST )
1991
+ mode |= BGX_XCAST_BCAST_ACCEPT ;
1992
+
1993
+ if (netdev -> flags & IFF_ALLMULTI ) {
1994
+ mode |= BGX_XCAST_MCAST_ACCEPT ;
1995
+ } else if (netdev -> flags & IFF_MULTICAST ) {
1996
+ mode |= BGX_XCAST_MCAST_FILTER ;
1997
+ /* here we need to copy mc addrs */
1998
+ if (netdev_mc_count (netdev )) {
1999
+ struct xcast_addr * xaddr ;
2000
+
2001
+ mc_list = kmalloc (sizeof (* mc_list ), GFP_ATOMIC );
2002
+ INIT_LIST_HEAD (& mc_list -> list );
2003
+ netdev_hw_addr_list_for_each (ha , & netdev -> mc ) {
2004
+ xaddr = kmalloc (sizeof (* xaddr ),
2005
+ GFP_ATOMIC );
2006
+ xaddr -> addr =
2007
+ ether_addr_to_u64 (ha -> addr );
2008
+ list_add_tail (& xaddr -> list ,
2009
+ & mc_list -> list );
2010
+ mc_list -> count ++ ;
2011
+ }
2012
+ }
2013
+ }
2014
+ }
2015
+ nic -> rx_mode_work .mc = mc_list ;
2016
+ nic -> rx_mode_work .mode = mode ;
2017
+ queue_delayed_work (nicvf_rx_mode_wq , & nic -> rx_mode_work .work , 2 * HZ );
2018
+ }
2019
+
1922
2020
static const struct net_device_ops nicvf_netdev_ops = {
1923
2021
.ndo_open = nicvf_open ,
1924
2022
.ndo_stop = nicvf_stop ,
@@ -1931,6 +2029,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
1931
2029
.ndo_set_features = nicvf_set_features ,
1932
2030
.ndo_bpf = nicvf_xdp ,
1933
2031
.ndo_do_ioctl = nicvf_ioctl ,
2032
+ .ndo_set_rx_mode = nicvf_set_rx_mode ,
1934
2033
};
1935
2034
1936
2035
static int nicvf_probe (struct pci_dev * pdev , const struct pci_device_id * ent )
@@ -2071,6 +2170,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2071
2170
2072
2171
INIT_WORK (& nic -> reset_task , nicvf_reset_task );
2073
2172
2173
+ INIT_DELAYED_WORK (& nic -> rx_mode_work .work , nicvf_set_rx_mode_task );
2174
+
2074
2175
err = register_netdev (netdev );
2075
2176
if (err ) {
2076
2177
dev_err (dev , "Failed to register netdevice\n" );
@@ -2109,6 +2210,8 @@ static void nicvf_remove(struct pci_dev *pdev)
2109
2210
nic = netdev_priv (netdev );
2110
2211
pnetdev = nic -> pnicvf -> netdev ;
2111
2212
2213
+ cancel_delayed_work_sync (& nic -> rx_mode_work .work );
2214
+
2112
2215
/* Check if this Qset is assigned to different VF.
2113
2216
* If yes, clean primary and all secondary Qsets.
2114
2217
*/
@@ -2140,12 +2243,17 @@ static struct pci_driver nicvf_driver = {
2140
2243
static int __init nicvf_init_module (void )
2141
2244
{
2142
2245
pr_info ("%s, ver %s\n" , DRV_NAME , DRV_VERSION );
2143
-
2246
+ nicvf_rx_mode_wq = alloc_ordered_workqueue ("nicvf_generic" ,
2247
+ WQ_MEM_RECLAIM );
2144
2248
return pci_register_driver (& nicvf_driver );
2145
2249
}
2146
2250
2147
2251
static void __exit nicvf_cleanup_module (void )
2148
2252
{
2253
+ if (nicvf_rx_mode_wq ) {
2254
+ destroy_workqueue (nicvf_rx_mode_wq );
2255
+ nicvf_rx_mode_wq = NULL ;
2256
+ }
2149
2257
pci_unregister_driver (& nicvf_driver );
2150
2258
}
2151
2259
0 commit comments