@@ -138,8 +138,8 @@ struct cake_flow {
138
138
struct cake_host {
139
139
u32 srchost_tag ;
140
140
u32 dsthost_tag ;
141
- u16 srchost_refcnt ;
142
- u16 dsthost_refcnt ;
141
+ u16 srchost_bulk_flow_count ;
142
+ u16 dsthost_bulk_flow_count ;
143
143
};
144
144
145
145
struct cake_heap_entry {
@@ -746,8 +746,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
746
746
* queue, accept the collision, update the host tags.
747
747
*/
748
748
q -> way_collisions ++ ;
749
- q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_refcnt -- ;
750
- q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_refcnt -- ;
749
+ if (q -> flows [outer_hash + k ].set == CAKE_SET_BULK ) {
750
+ q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_bulk_flow_count -- ;
751
+ q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_bulk_flow_count -- ;
752
+ }
751
753
allocate_src = cake_dsrc (flow_mode );
752
754
allocate_dst = cake_ddst (flow_mode );
753
755
found :
@@ -767,13 +769,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
767
769
}
768
770
for (i = 0 ; i < CAKE_SET_WAYS ;
769
771
i ++ , k = (k + 1 ) % CAKE_SET_WAYS ) {
770
- if (!q -> hosts [outer_hash + k ].srchost_refcnt )
772
+ if (!q -> hosts [outer_hash + k ].srchost_bulk_flow_count )
771
773
break ;
772
774
}
773
775
q -> hosts [outer_hash + k ].srchost_tag = srchost_hash ;
774
776
found_src :
775
777
srchost_idx = outer_hash + k ;
776
- q -> hosts [srchost_idx ].srchost_refcnt ++ ;
778
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
779
+ q -> hosts [srchost_idx ].srchost_bulk_flow_count ++ ;
777
780
q -> flows [reduced_hash ].srchost = srchost_idx ;
778
781
}
779
782
@@ -789,13 +792,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
789
792
}
790
793
for (i = 0 ; i < CAKE_SET_WAYS ;
791
794
i ++ , k = (k + 1 ) % CAKE_SET_WAYS ) {
792
- if (!q -> hosts [outer_hash + k ].dsthost_refcnt )
795
+ if (!q -> hosts [outer_hash + k ].dsthost_bulk_flow_count )
793
796
break ;
794
797
}
795
798
q -> hosts [outer_hash + k ].dsthost_tag = dsthost_hash ;
796
799
found_dst :
797
800
dsthost_idx = outer_hash + k ;
798
- q -> hosts [dsthost_idx ].dsthost_refcnt ++ ;
801
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
802
+ q -> hosts [dsthost_idx ].dsthost_bulk_flow_count ++ ;
799
803
q -> flows [reduced_hash ].dsthost = dsthost_idx ;
800
804
}
801
805
}
@@ -1794,20 +1798,30 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1794
1798
b -> sparse_flow_count ++ ;
1795
1799
1796
1800
if (cake_dsrc (q -> flow_mode ))
1797
- host_load = max (host_load , srchost -> srchost_refcnt );
1801
+ host_load = max (host_load , srchost -> srchost_bulk_flow_count );
1798
1802
1799
1803
if (cake_ddst (q -> flow_mode ))
1800
- host_load = max (host_load , dsthost -> dsthost_refcnt );
1804
+ host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
1801
1805
1802
1806
flow -> deficit = (b -> flow_quantum *
1803
1807
quantum_div [host_load ]) >> 16 ;
1804
1808
} else if (flow -> set == CAKE_SET_SPARSE_WAIT ) {
1809
+ struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1810
+ struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1811
+
1805
1812
/* this flow was empty, accounted as a sparse flow, but actually
1806
1813
* in the bulk rotation.
1807
1814
*/
1808
1815
flow -> set = CAKE_SET_BULK ;
1809
1816
b -> sparse_flow_count -- ;
1810
1817
b -> bulk_flow_count ++ ;
1818
+
1819
+ if (cake_dsrc (q -> flow_mode ))
1820
+ srchost -> srchost_bulk_flow_count ++ ;
1821
+
1822
+ if (cake_ddst (q -> flow_mode ))
1823
+ dsthost -> dsthost_bulk_flow_count ++ ;
1824
+
1811
1825
}
1812
1826
1813
1827
if (q -> buffer_used > q -> buffer_max_used )
@@ -1975,23 +1989,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1975
1989
dsthost = & b -> hosts [flow -> dsthost ];
1976
1990
host_load = 1 ;
1977
1991
1978
- if (cake_dsrc (q -> flow_mode ))
1979
- host_load = max (host_load , srchost -> srchost_refcnt );
1980
-
1981
- if (cake_ddst (q -> flow_mode ))
1982
- host_load = max (host_load , dsthost -> dsthost_refcnt );
1983
-
1984
- WARN_ON (host_load > CAKE_QUEUES );
1985
-
1986
1992
/* flow isolation (DRR++) */
1987
1993
if (flow -> deficit <= 0 ) {
1988
- /* The shifted prandom_u32() is a way to apply dithering to
1989
- * avoid accumulating roundoff errors
1990
- */
1991
- flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
1992
- (prandom_u32 () >> 16 )) >> 16 ;
1993
- list_move_tail (& flow -> flowchain , & b -> old_flows );
1994
-
1995
1994
/* Keep all flows with deficits out of the sparse and decaying
1996
1995
* rotations. No non-empty flow can go into the decaying
1997
1996
* rotation, so they can't get deficits
@@ -2000,6 +1999,13 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2000
1999
if (flow -> head ) {
2001
2000
b -> sparse_flow_count -- ;
2002
2001
b -> bulk_flow_count ++ ;
2002
+
2003
+ if (cake_dsrc (q -> flow_mode ))
2004
+ srchost -> srchost_bulk_flow_count ++ ;
2005
+
2006
+ if (cake_ddst (q -> flow_mode ))
2007
+ dsthost -> dsthost_bulk_flow_count ++ ;
2008
+
2003
2009
flow -> set = CAKE_SET_BULK ;
2004
2010
} else {
2005
2011
/* we've moved it to the bulk rotation for
@@ -2009,6 +2015,22 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2009
2015
flow -> set = CAKE_SET_SPARSE_WAIT ;
2010
2016
}
2011
2017
}
2018
+
2019
+ if (cake_dsrc (q -> flow_mode ))
2020
+ host_load = max (host_load , srchost -> srchost_bulk_flow_count );
2021
+
2022
+ if (cake_ddst (q -> flow_mode ))
2023
+ host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
2024
+
2025
+ WARN_ON (host_load > CAKE_QUEUES );
2026
+
2027
+ /* The shifted prandom_u32() is a way to apply dithering to
2028
+ * avoid accumulating roundoff errors
2029
+ */
2030
+ flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
2031
+ (prandom_u32 () >> 16 )) >> 16 ;
2032
+ list_move_tail (& flow -> flowchain , & b -> old_flows );
2033
+
2012
2034
goto retry ;
2013
2035
}
2014
2036
@@ -2029,6 +2051,13 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2029
2051
& b -> decaying_flows );
2030
2052
if (flow -> set == CAKE_SET_BULK ) {
2031
2053
b -> bulk_flow_count -- ;
2054
+
2055
+ if (cake_dsrc (q -> flow_mode ))
2056
+ srchost -> srchost_bulk_flow_count -- ;
2057
+
2058
+ if (cake_ddst (q -> flow_mode ))
2059
+ dsthost -> dsthost_bulk_flow_count -- ;
2060
+
2032
2061
b -> decaying_flow_count ++ ;
2033
2062
} else if (flow -> set == CAKE_SET_SPARSE ||
2034
2063
flow -> set == CAKE_SET_SPARSE_WAIT ) {
@@ -2042,14 +2071,19 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2042
2071
if (flow -> set == CAKE_SET_SPARSE ||
2043
2072
flow -> set == CAKE_SET_SPARSE_WAIT )
2044
2073
b -> sparse_flow_count -- ;
2045
- else if (flow -> set == CAKE_SET_BULK )
2074
+ else if (flow -> set == CAKE_SET_BULK ) {
2046
2075
b -> bulk_flow_count -- ;
2047
- else
2076
+
2077
+ if (cake_dsrc (q -> flow_mode ))
2078
+ srchost -> srchost_bulk_flow_count -- ;
2079
+
2080
+ if (cake_ddst (q -> flow_mode ))
2081
+ dsthost -> dsthost_bulk_flow_count -- ;
2082
+
2083
+ } else
2048
2084
b -> decaying_flow_count -- ;
2049
2085
2050
2086
flow -> set = CAKE_SET_NONE ;
2051
- srchost -> srchost_refcnt -- ;
2052
- dsthost -> dsthost_refcnt -- ;
2053
2087
}
2054
2088
goto begin ;
2055
2089
}
0 commit comments