Skip to content

Commit 5366b96

Browse files
committed
Merge tag 'drm-intel-fixes-2020-03-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.6-rc7: - Track active elements during dequeue - Fix failure to handle all MCR ranges - Revert unnecessary workaround Signed-off-by: Dave Airlie <[email protected]> From: Jani Nikula <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
2 parents 362b86a + fe8b708 commit 5366b96

File tree

2 files changed

+34
-43
lines changed

2 files changed

+34
-43
lines changed

drivers/gpu/drm/i915/gt/intel_lrc.c

Lines changed: 12 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
16001600
spin_unlock(&old->breadcrumbs.irq_lock);
16011601
}
16021602

1603-
static struct i915_request *
1604-
last_active(const struct intel_engine_execlists *execlists)
1605-
{
1606-
struct i915_request * const *last = READ_ONCE(execlists->active);
1607-
1608-
while (*last && i915_request_completed(*last))
1609-
last++;
1610-
1611-
return *last;
1612-
}
1613-
16141603
#define for_each_waiter(p__, rq__) \
16151604
list_for_each_entry_lockless(p__, \
16161605
&(rq__)->sched.waiters_list, \
@@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
17401729
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
17411730
}
17421731

1743-
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
1732+
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
1733+
const struct i915_request *rq)
17441734
{
1745-
struct i915_request *rq;
1746-
1747-
rq = last_active(&engine->execlists);
17481735
if (!rq)
17491736
return 0;
17501737

@@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
17551742
return READ_ONCE(engine->props.preempt_timeout_ms);
17561743
}
17571744

1758-
static void set_preempt_timeout(struct intel_engine_cs *engine)
1745+
static void set_preempt_timeout(struct intel_engine_cs *engine,
1746+
const struct i915_request *rq)
17591747
{
17601748
if (!intel_engine_has_preempt_reset(engine))
17611749
return;
17621750

17631751
set_timer_ms(&engine->execlists.preempt,
1764-
active_preempt_timeout(engine));
1752+
active_preempt_timeout(engine, rq));
17651753
}
17661754

17671755
static inline void clear_ports(struct i915_request **ports, int count)
@@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
17741762
struct intel_engine_execlists * const execlists = &engine->execlists;
17751763
struct i915_request **port = execlists->pending;
17761764
struct i915_request ** const last_port = port + execlists->port_mask;
1765+
struct i915_request * const *active;
17771766
struct i915_request *last;
17781767
struct rb_node *rb;
17791768
bool submit = false;
@@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
18281817
* i.e. we will retrigger preemption following the ack in case
18291818
* of trouble.
18301819
*/
1831-
last = last_active(execlists);
1820+
active = READ_ONCE(execlists->active);
1821+
while ((last = *active) && i915_request_completed(last))
1822+
active++;
1823+
18321824
if (last) {
18331825
if (need_preempt(engine, last, rb)) {
18341826
ENGINE_TRACE(engine,
@@ -2110,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
21102102
* Skip if we ended up with exactly the same set of requests,
21112103
* e.g. trying to timeslice a pair of ordered contexts
21122104
*/
2113-
if (!memcmp(execlists->active, execlists->pending,
2105+
if (!memcmp(active, execlists->pending,
21142106
(port - execlists->pending + 1) * sizeof(*port))) {
21152107
do
21162108
execlists_schedule_out(fetch_and_zero(port));
@@ -2121,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
21212113
clear_ports(port + 1, last_port - port);
21222114

21232115
execlists_submit_ports(engine);
2124-
set_preempt_timeout(engine);
2116+
set_preempt_timeout(engine, *active);
21252117
} else {
21262118
skip_submit:
21272119
ring_set_paused(engine, 0);
@@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
40084000

40094001
*cs++ = preparser_disable(false);
40104002
intel_ring_advance(request, cs);
4011-
4012-
/*
4013-
* Wa_1604544889:tgl
4014-
*/
4015-
if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
4016-
flags = 0;
4017-
flags |= PIPE_CONTROL_CS_STALL;
4018-
flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
4019-
4020-
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
4021-
flags |= PIPE_CONTROL_QW_WRITE;
4022-
4023-
cs = intel_ring_begin(request, 6);
4024-
if (IS_ERR(cs))
4025-
return PTR_ERR(cs);
4026-
4027-
cs = gen8_emit_pipe_control(cs, flags,
4028-
LRC_PPHWSP_SCRATCH_ADDR);
4029-
intel_ring_advance(request, cs);
4030-
}
40314003
}
40324004

40334005
return 0;

drivers/gpu/drm/i915/gt/intel_workarounds.c

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1529,15 +1529,34 @@ create_scratch(struct i915_address_space *vm, int count)
15291529
return ERR_PTR(err);
15301530
}
15311531

1532+
static const struct {
1533+
u32 start;
1534+
u32 end;
1535+
} mcr_ranges_gen8[] = {
1536+
{ .start = 0x5500, .end = 0x55ff },
1537+
{ .start = 0x7000, .end = 0x7fff },
1538+
{ .start = 0x9400, .end = 0x97ff },
1539+
{ .start = 0xb000, .end = 0xb3ff },
1540+
{ .start = 0xe000, .end = 0xe7ff },
1541+
{},
1542+
};
1543+
15321544
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
15331545
{
1546+
int i;
1547+
1548+
if (INTEL_GEN(i915) < 8)
1549+
return false;
1550+
15341551
/*
1535-
* Registers in this range are affected by the MCR selector
1552+
* Registers in these ranges are affected by the MCR selector
15361553
* which only controls CPU initiated MMIO. Routing does not
15371554
* work for CS access so we cannot verify them on this path.
15381555
*/
1539-
if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
1540-
return true;
1556+
for (i = 0; mcr_ranges_gen8[i].start; i++)
1557+
if (offset >= mcr_ranges_gen8[i].start &&
1558+
offset <= mcr_ranges_gen8[i].end)
1559+
return true;
15411560

15421561
return false;
15431562
}

0 commit comments

Comments
 (0)