@@ -1678,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
1678
1678
spin_unlock (& old -> breadcrumbs .irq_lock );
1679
1679
}
1680
1680
1681
- static struct i915_request *
1682
- last_active (const struct intel_engine_execlists * execlists )
1683
- {
1684
- struct i915_request * const * last = READ_ONCE (execlists -> active );
1685
-
1686
- while (* last && i915_request_completed (* last ))
1687
- last ++ ;
1688
-
1689
- return * last ;
1690
- }
1691
-
1692
1681
#define for_each_waiter (p__ , rq__ ) \
1693
1682
list_for_each_entry_lockless(p__, \
1694
1683
&(rq__)->sched.waiters_list, \
@@ -1827,11 +1816,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
1827
1816
(void )I915_SELFTEST_ONLY (execlists -> preempt_hang .count ++ );
1828
1817
}
1829
1818
1830
- static unsigned long active_preempt_timeout (struct intel_engine_cs * engine )
1819
+ static unsigned long active_preempt_timeout (struct intel_engine_cs * engine ,
1820
+ const struct i915_request * rq )
1831
1821
{
1832
- struct i915_request * rq ;
1833
-
1834
- rq = last_active (& engine -> execlists );
1835
1822
if (!rq )
1836
1823
return 0 ;
1837
1824
@@ -1842,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
1842
1829
return READ_ONCE (engine -> props .preempt_timeout_ms );
1843
1830
}
1844
1831
1845
- static void set_preempt_timeout (struct intel_engine_cs * engine )
1832
+ static void set_preempt_timeout (struct intel_engine_cs * engine ,
1833
+ const struct i915_request * rq )
1846
1834
{
1847
1835
if (!intel_engine_has_preempt_reset (engine ))
1848
1836
return ;
1849
1837
1850
1838
set_timer_ms (& engine -> execlists .preempt ,
1851
- active_preempt_timeout (engine ));
1839
+ active_preempt_timeout (engine , rq ));
1852
1840
}
1853
1841
1854
1842
static inline void clear_ports (struct i915_request * * ports , int count )
@@ -1861,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1861
1849
struct intel_engine_execlists * const execlists = & engine -> execlists ;
1862
1850
struct i915_request * * port = execlists -> pending ;
1863
1851
struct i915_request * * const last_port = port + execlists -> port_mask ;
1852
+ struct i915_request * const * active ;
1864
1853
struct i915_request * last ;
1865
1854
struct rb_node * rb ;
1866
1855
bool submit = false;
@@ -1915,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
1915
1904
* i.e. we will retrigger preemption following the ack in case
1916
1905
* of trouble.
1917
1906
*/
1918
- last = last_active (execlists );
1907
+ active = READ_ONCE (execlists -> active );
1908
+ while ((last = * active ) && i915_request_completed (last ))
1909
+ active ++ ;
1910
+
1919
1911
if (last ) {
1920
1912
if (need_preempt (engine , last , rb )) {
1921
1913
ENGINE_TRACE (engine ,
@@ -2201,7 +2193,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
2201
2193
* Skip if we ended up with exactly the same set of requests,
2202
2194
* e.g. trying to timeslice a pair of ordered contexts
2203
2195
*/
2204
- if (!memcmp (execlists -> active , execlists -> pending ,
2196
+ if (!memcmp (active , execlists -> pending ,
2205
2197
(port - execlists -> pending + 1 ) * sizeof (* port ))) {
2206
2198
do
2207
2199
execlists_schedule_out (fetch_and_zero (port ));
@@ -2212,7 +2204,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
2212
2204
clear_ports (port + 1 , last_port - port );
2213
2205
2214
2206
execlists_submit_ports (engine );
2215
- set_preempt_timeout (engine );
2207
+ set_preempt_timeout (engine , * active );
2216
2208
} else {
2217
2209
skip_submit :
2218
2210
ring_set_paused (engine , 0 );
0 commit comments