@@ -627,6 +627,63 @@ static bool cake_ddst(int flow_mode)
627
627
return (flow_mode & CAKE_FLOW_DUAL_DST ) == CAKE_FLOW_DUAL_DST ;
628
628
}
629
629
630
+ static void cake_dec_srchost_bulk_flow_count (struct cake_tin_data * q ,
631
+ struct cake_flow * flow ,
632
+ int flow_mode )
633
+ {
634
+ if (likely (cake_dsrc (flow_mode ) &&
635
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ))
636
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count -- ;
637
+ }
638
+
639
+ static void cake_inc_srchost_bulk_flow_count (struct cake_tin_data * q ,
640
+ struct cake_flow * flow ,
641
+ int flow_mode )
642
+ {
643
+ if (likely (cake_dsrc (flow_mode ) &&
644
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count < CAKE_QUEUES ))
645
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count ++ ;
646
+ }
647
+
648
+ static void cake_dec_dsthost_bulk_flow_count (struct cake_tin_data * q ,
649
+ struct cake_flow * flow ,
650
+ int flow_mode )
651
+ {
652
+ if (likely (cake_ddst (flow_mode ) &&
653
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ))
654
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count -- ;
655
+ }
656
+
657
+ static void cake_inc_dsthost_bulk_flow_count (struct cake_tin_data * q ,
658
+ struct cake_flow * flow ,
659
+ int flow_mode )
660
+ {
661
+ if (likely (cake_ddst (flow_mode ) &&
662
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count < CAKE_QUEUES ))
663
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count ++ ;
664
+ }
665
+
666
+ static u16 cake_get_flow_quantum (struct cake_tin_data * q ,
667
+ struct cake_flow * flow ,
668
+ int flow_mode )
669
+ {
670
+ u16 host_load = 1 ;
671
+
672
+ if (cake_dsrc (flow_mode ))
673
+ host_load = max (host_load ,
674
+ q -> hosts [flow -> srchost ].srchost_bulk_flow_count );
675
+
676
+ if (cake_ddst (flow_mode ))
677
+ host_load = max (host_load ,
678
+ q -> hosts [flow -> dsthost ].dsthost_bulk_flow_count );
679
+
680
+ /* The get_random_u16() is a way to apply dithering to avoid
681
+ * accumulating roundoff errors
682
+ */
683
+ return (q -> flow_quantum * quantum_div [host_load ] +
684
+ get_random_u16 ()) >> 16 ;
685
+ }
686
+
630
687
static u32 cake_hash (struct cake_tin_data * q , const struct sk_buff * skb ,
631
688
int flow_mode , u16 flow_override , u16 host_override )
632
689
{
@@ -773,10 +830,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
773
830
allocate_dst = cake_ddst (flow_mode );
774
831
775
832
if (q -> flows [outer_hash + k ].set == CAKE_SET_BULK ) {
776
- if (allocate_src )
777
- q -> hosts [q -> flows [reduced_hash ].srchost ].srchost_bulk_flow_count -- ;
778
- if (allocate_dst )
779
- q -> hosts [q -> flows [reduced_hash ].dsthost ].dsthost_bulk_flow_count -- ;
833
+ cake_dec_srchost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
834
+ cake_dec_dsthost_bulk_flow_count (q , & q -> flows [outer_hash + k ], flow_mode );
780
835
}
781
836
found :
782
837
/* reserve queue for future packets in same flow */
@@ -801,9 +856,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
801
856
q -> hosts [outer_hash + k ].srchost_tag = srchost_hash ;
802
857
found_src :
803
858
srchost_idx = outer_hash + k ;
804
- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
805
- q -> hosts [srchost_idx ].srchost_bulk_flow_count ++ ;
806
859
q -> flows [reduced_hash ].srchost = srchost_idx ;
860
+
861
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
862
+ cake_inc_srchost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
807
863
}
808
864
809
865
if (allocate_dst ) {
@@ -824,9 +880,10 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
824
880
q -> hosts [outer_hash + k ].dsthost_tag = dsthost_hash ;
825
881
found_dst :
826
882
dsthost_idx = outer_hash + k ;
827
- if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
828
- q -> hosts [dsthost_idx ].dsthost_bulk_flow_count ++ ;
829
883
q -> flows [reduced_hash ].dsthost = dsthost_idx ;
884
+
885
+ if (q -> flows [reduced_hash ].set == CAKE_SET_BULK )
886
+ cake_inc_dsthost_bulk_flow_count (q , & q -> flows [reduced_hash ], flow_mode );
830
887
}
831
888
}
832
889
@@ -1839,10 +1896,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1839
1896
1840
1897
/* flowchain */
1841
1898
if (!flow -> set || flow -> set == CAKE_SET_DECAYING ) {
1842
- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1843
- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1844
- u16 host_load = 1 ;
1845
-
1846
1899
if (!flow -> set ) {
1847
1900
list_add_tail (& flow -> flowchain , & b -> new_flows );
1848
1901
} else {
@@ -1852,31 +1905,17 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1852
1905
flow -> set = CAKE_SET_SPARSE ;
1853
1906
b -> sparse_flow_count ++ ;
1854
1907
1855
- if (cake_dsrc (q -> flow_mode ))
1856
- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
1857
-
1858
- if (cake_ddst (q -> flow_mode ))
1859
- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
1860
-
1861
- flow -> deficit = (b -> flow_quantum *
1862
- quantum_div [host_load ]) >> 16 ;
1908
+ flow -> deficit = cake_get_flow_quantum (b , flow , q -> flow_mode );
1863
1909
} else if (flow -> set == CAKE_SET_SPARSE_WAIT ) {
1864
- struct cake_host * srchost = & b -> hosts [flow -> srchost ];
1865
- struct cake_host * dsthost = & b -> hosts [flow -> dsthost ];
1866
-
1867
1910
/* this flow was empty, accounted as a sparse flow, but actually
1868
1911
* in the bulk rotation.
1869
1912
*/
1870
1913
flow -> set = CAKE_SET_BULK ;
1871
1914
b -> sparse_flow_count -- ;
1872
1915
b -> bulk_flow_count ++ ;
1873
1916
1874
- if (cake_dsrc (q -> flow_mode ))
1875
- srchost -> srchost_bulk_flow_count ++ ;
1876
-
1877
- if (cake_ddst (q -> flow_mode ))
1878
- dsthost -> dsthost_bulk_flow_count ++ ;
1879
-
1917
+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
1918
+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
1880
1919
}
1881
1920
1882
1921
if (q -> buffer_used > q -> buffer_max_used )
@@ -1933,13 +1972,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1933
1972
{
1934
1973
struct cake_sched_data * q = qdisc_priv (sch );
1935
1974
struct cake_tin_data * b = & q -> tins [q -> cur_tin ];
1936
- struct cake_host * srchost , * dsthost ;
1937
1975
ktime_t now = ktime_get ();
1938
1976
struct cake_flow * flow ;
1939
1977
struct list_head * head ;
1940
1978
bool first_flow = true;
1941
1979
struct sk_buff * skb ;
1942
- u16 host_load ;
1943
1980
u64 delay ;
1944
1981
u32 len ;
1945
1982
@@ -2039,11 +2076,6 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2039
2076
q -> cur_flow = flow - b -> flows ;
2040
2077
first_flow = false;
2041
2078
2042
- /* triple isolation (modified DRR++) */
2043
- srchost = & b -> hosts [flow -> srchost ];
2044
- dsthost = & b -> hosts [flow -> dsthost ];
2045
- host_load = 1 ;
2046
-
2047
2079
/* flow isolation (DRR++) */
2048
2080
if (flow -> deficit <= 0 ) {
2049
2081
/* Keep all flows with deficits out of the sparse and decaying
@@ -2055,11 +2087,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2055
2087
b -> sparse_flow_count -- ;
2056
2088
b -> bulk_flow_count ++ ;
2057
2089
2058
- if (cake_dsrc (q -> flow_mode ))
2059
- srchost -> srchost_bulk_flow_count ++ ;
2060
-
2061
- if (cake_ddst (q -> flow_mode ))
2062
- dsthost -> dsthost_bulk_flow_count ++ ;
2090
+ cake_inc_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2091
+ cake_inc_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2063
2092
2064
2093
flow -> set = CAKE_SET_BULK ;
2065
2094
} else {
@@ -2071,19 +2100,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2071
2100
}
2072
2101
}
2073
2102
2074
- if (cake_dsrc (q -> flow_mode ))
2075
- host_load = max (host_load , srchost -> srchost_bulk_flow_count );
2076
-
2077
- if (cake_ddst (q -> flow_mode ))
2078
- host_load = max (host_load , dsthost -> dsthost_bulk_flow_count );
2079
-
2080
- WARN_ON (host_load > CAKE_QUEUES );
2081
-
2082
- /* The get_random_u16() is a way to apply dithering to avoid
2083
- * accumulating roundoff errors
2084
- */
2085
- flow -> deficit += (b -> flow_quantum * quantum_div [host_load ] +
2086
- get_random_u16 ()) >> 16 ;
2103
+ flow -> deficit += cake_get_flow_quantum (b , flow , q -> flow_mode );
2087
2104
list_move_tail (& flow -> flowchain , & b -> old_flows );
2088
2105
2089
2106
goto retry ;
@@ -2107,11 +2124,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2107
2124
if (flow -> set == CAKE_SET_BULK ) {
2108
2125
b -> bulk_flow_count -- ;
2109
2126
2110
- if (cake_dsrc (q -> flow_mode ))
2111
- srchost -> srchost_bulk_flow_count -- ;
2112
-
2113
- if (cake_ddst (q -> flow_mode ))
2114
- dsthost -> dsthost_bulk_flow_count -- ;
2127
+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2128
+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2115
2129
2116
2130
b -> decaying_flow_count ++ ;
2117
2131
} else if (flow -> set == CAKE_SET_SPARSE ||
@@ -2129,12 +2143,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
2129
2143
else if (flow -> set == CAKE_SET_BULK ) {
2130
2144
b -> bulk_flow_count -- ;
2131
2145
2132
- if (cake_dsrc (q -> flow_mode ))
2133
- srchost -> srchost_bulk_flow_count -- ;
2134
-
2135
- if (cake_ddst (q -> flow_mode ))
2136
- dsthost -> dsthost_bulk_flow_count -- ;
2137
-
2146
+ cake_dec_srchost_bulk_flow_count (b , flow , q -> flow_mode );
2147
+ cake_dec_dsthost_bulk_flow_count (b , flow , q -> flow_mode );
2138
2148
} else
2139
2149
b -> decaying_flow_count -- ;
2140
2150
0 commit comments