@@ -2002,14 +2002,22 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
2002
2002
struct xps_dev_maps * dev_maps ,
2003
2003
int cpu , u16 offset , u16 count )
2004
2004
{
2005
- int i , j ;
2005
+ int num_tc = dev -> num_tc ? : 1 ;
2006
+ bool active = false;
2007
+ int tci ;
2006
2008
2007
- for (i = count , j = offset ; i -- ; j ++ ) {
2008
- if (!remove_xps_queue (dev_maps , cpu , j ))
2009
- break ;
2009
+ for (tci = cpu * num_tc ; num_tc -- ; tci ++ ) {
2010
+ int i , j ;
2011
+
2012
+ for (i = count , j = offset ; i -- ; j ++ ) {
2013
+ if (!remove_xps_queue (dev_maps , cpu , j ))
2014
+ break ;
2015
+ }
2016
+
2017
+ active |= i < 0 ;
2010
2018
}
2011
2019
2012
- return i < 0 ;
2020
+ return active ;
2013
2021
}
2014
2022
2015
2023
static void netif_reset_xps_queues (struct net_device * dev , u16 offset ,
@@ -2086,46 +2094,67 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2086
2094
u16 index )
2087
2095
{
2088
2096
struct xps_dev_maps * dev_maps , * new_dev_maps = NULL ;
2097
+ int i , cpu , tci , numa_node_id = -2 ;
2098
+ int maps_sz , num_tc = 1 , tc = 0 ;
2089
2099
struct xps_map * map , * new_map ;
2090
- int maps_sz = max_t (unsigned int , XPS_DEV_MAPS_SIZE , L1_CACHE_BYTES );
2091
- int cpu , numa_node_id = -2 ;
2092
2100
bool active = false;
2093
2101
2102
+ if (dev -> num_tc ) {
2103
+ num_tc = dev -> num_tc ;
2104
+ tc = netdev_txq_to_tc (dev , index );
2105
+ if (tc < 0 )
2106
+ return - EINVAL ;
2107
+ }
2108
+
2109
+ maps_sz = XPS_DEV_MAPS_SIZE (num_tc );
2110
+ if (maps_sz < L1_CACHE_BYTES )
2111
+ maps_sz = L1_CACHE_BYTES ;
2112
+
2094
2113
mutex_lock (& xps_map_mutex );
2095
2114
2096
2115
dev_maps = xmap_dereference (dev -> xps_maps );
2097
2116
2098
2117
/* allocate memory for queue storage */
2099
- for_each_online_cpu (cpu ) {
2100
- if (!cpumask_test_cpu (cpu , mask ))
2101
- continue ;
2102
-
2118
+ for_each_cpu_and (cpu , cpu_online_mask , mask ) {
2103
2119
if (!new_dev_maps )
2104
2120
new_dev_maps = kzalloc (maps_sz , GFP_KERNEL );
2105
2121
if (!new_dev_maps ) {
2106
2122
mutex_unlock (& xps_map_mutex );
2107
2123
return - ENOMEM ;
2108
2124
}
2109
2125
2110
- map = dev_maps ? xmap_dereference (dev_maps -> cpu_map [cpu ]) :
2126
+ tci = cpu * num_tc + tc ;
2127
+ map = dev_maps ? xmap_dereference (dev_maps -> cpu_map [tci ]) :
2111
2128
NULL ;
2112
2129
2113
2130
map = expand_xps_map (map , cpu , index );
2114
2131
if (!map )
2115
2132
goto error ;
2116
2133
2117
- RCU_INIT_POINTER (new_dev_maps -> cpu_map [cpu ], map );
2134
+ RCU_INIT_POINTER (new_dev_maps -> cpu_map [tci ], map );
2118
2135
}
2119
2136
2120
2137
if (!new_dev_maps )
2121
2138
goto out_no_new_maps ;
2122
2139
2123
2140
for_each_possible_cpu (cpu ) {
2141
+ /* copy maps belonging to foreign traffic classes */
2142
+ for (i = tc , tci = cpu * num_tc ; dev_maps && i -- ; tci ++ ) {
2143
+ /* fill in the new device map from the old device map */
2144
+ map = xmap_dereference (dev_maps -> cpu_map [tci ]);
2145
+ RCU_INIT_POINTER (new_dev_maps -> cpu_map [tci ], map );
2146
+ }
2147
+
2148
+ /* We need to explicitly update tci as prevous loop
2149
+ * could break out early if dev_maps is NULL.
2150
+ */
2151
+ tci = cpu * num_tc + tc ;
2152
+
2124
2153
if (cpumask_test_cpu (cpu , mask ) && cpu_online (cpu )) {
2125
2154
/* add queue to CPU maps */
2126
2155
int pos = 0 ;
2127
2156
2128
- map = xmap_dereference (new_dev_maps -> cpu_map [cpu ]);
2157
+ map = xmap_dereference (new_dev_maps -> cpu_map [tci ]);
2129
2158
while ((pos < map -> len ) && (map -> queues [pos ] != index ))
2130
2159
pos ++ ;
2131
2160
@@ -2139,26 +2168,36 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2139
2168
#endif
2140
2169
} else if (dev_maps ) {
2141
2170
/* fill in the new device map from the old device map */
2142
- map = xmap_dereference (dev_maps -> cpu_map [cpu ]);
2143
- RCU_INIT_POINTER (new_dev_maps -> cpu_map [cpu ], map );
2171
+ map = xmap_dereference (dev_maps -> cpu_map [tci ]);
2172
+ RCU_INIT_POINTER (new_dev_maps -> cpu_map [tci ], map );
2144
2173
}
2145
2174
2175
+ /* copy maps belonging to foreign traffic classes */
2176
+ for (i = num_tc - tc , tci ++ ; dev_maps && -- i ; tci ++ ) {
2177
+ /* fill in the new device map from the old device map */
2178
+ map = xmap_dereference (dev_maps -> cpu_map [tci ]);
2179
+ RCU_INIT_POINTER (new_dev_maps -> cpu_map [tci ], map );
2180
+ }
2146
2181
}
2147
2182
2148
2183
rcu_assign_pointer (dev -> xps_maps , new_dev_maps );
2149
2184
2150
2185
/* Cleanup old maps */
2151
- if (dev_maps ) {
2152
- for_each_possible_cpu (cpu ) {
2153
- new_map = xmap_dereference (new_dev_maps -> cpu_map [cpu ]);
2154
- map = xmap_dereference (dev_maps -> cpu_map [cpu ]);
2186
+ if (!dev_maps )
2187
+ goto out_no_old_maps ;
2188
+
2189
+ for_each_possible_cpu (cpu ) {
2190
+ for (i = num_tc , tci = cpu * num_tc ; i -- ; tci ++ ) {
2191
+ new_map = xmap_dereference (new_dev_maps -> cpu_map [tci ]);
2192
+ map = xmap_dereference (dev_maps -> cpu_map [tci ]);
2155
2193
if (map && map != new_map )
2156
2194
kfree_rcu (map , rcu );
2157
2195
}
2158
-
2159
- kfree_rcu (dev_maps , rcu );
2160
2196
}
2161
2197
2198
+ kfree_rcu (dev_maps , rcu );
2199
+
2200
+ out_no_old_maps :
2162
2201
dev_maps = new_dev_maps ;
2163
2202
active = true;
2164
2203
@@ -2173,11 +2212,12 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2173
2212
2174
2213
/* removes queue from unused CPUs */
2175
2214
for_each_possible_cpu (cpu ) {
2176
- if (cpumask_test_cpu (cpu , mask ) && cpu_online (cpu ))
2177
- continue ;
2178
-
2179
- if (remove_xps_queue (dev_maps , cpu , index ))
2180
- active = true;
2215
+ for (i = tc , tci = cpu * num_tc ; i -- ; tci ++ )
2216
+ active |= remove_xps_queue (dev_maps , tci , index );
2217
+ if (!cpumask_test_cpu (cpu , mask ) || !cpu_online (cpu ))
2218
+ active |= remove_xps_queue (dev_maps , tci , index );
2219
+ for (i = num_tc - tc , tci ++ ; -- i ; tci ++ )
2220
+ active |= remove_xps_queue (dev_maps , tci , index );
2181
2221
}
2182
2222
2183
2223
/* free map if not active */
@@ -2193,11 +2233,14 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2193
2233
error :
2194
2234
/* remove any maps that we added */
2195
2235
for_each_possible_cpu (cpu ) {
2196
- new_map = xmap_dereference (new_dev_maps -> cpu_map [cpu ]);
2197
- map = dev_maps ? xmap_dereference (dev_maps -> cpu_map [cpu ]) :
2198
- NULL ;
2199
- if (new_map && new_map != map )
2200
- kfree (new_map );
2236
+ for (i = num_tc , tci = cpu * num_tc ; i -- ; tci ++ ) {
2237
+ new_map = xmap_dereference (new_dev_maps -> cpu_map [tci ]);
2238
+ map = dev_maps ?
2239
+ xmap_dereference (dev_maps -> cpu_map [tci ]) :
2240
+ NULL ;
2241
+ if (new_map && new_map != map )
2242
+ kfree (new_map );
2243
+ }
2201
2244
}
2202
2245
2203
2246
mutex_unlock (& xps_map_mutex );
@@ -3158,8 +3201,14 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3158
3201
rcu_read_lock ();
3159
3202
dev_maps = rcu_dereference (dev -> xps_maps );
3160
3203
if (dev_maps ) {
3161
- map = rcu_dereference (
3162
- dev_maps -> cpu_map [skb -> sender_cpu - 1 ]);
3204
+ unsigned int tci = skb -> sender_cpu - 1 ;
3205
+
3206
+ if (dev -> num_tc ) {
3207
+ tci *= dev -> num_tc ;
3208
+ tci += netdev_get_prio_tc_map (dev , skb -> priority );
3209
+ }
3210
+
3211
+ map = rcu_dereference (dev_maps -> cpu_map [tci ]);
3163
3212
if (map ) {
3164
3213
if (map -> len == 1 )
3165
3214
queue_index = map -> queues [0 ];
0 commit comments