Skip to content

Commit 6c2f73e

Browse files
yanzhao56zhenyw
authored andcommitted
drm/i915/gvt: access shadow ctx via its virtual address directly
as shadow context is pinned in intel_vgpu_setup_submission() and unpinned in intel_vgpu_clean_submission(), its base virtual address of is safely obtained from lrc_reg_state. no need to call kmap()/kunmap() repeatedly. Signed-off-by: Yan Zhao <[email protected]> Reviewed-by: Zhenyu Wang <[email protected]> Signed-off-by: Zhenyu Wang <[email protected]> Link: http://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 07f2fee commit 6c2f73e

File tree

1 file changed

+17
-18
lines changed

1 file changed

+17
-18
lines changed

drivers/gpu/drm/i915/gvt/scheduler.c

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -128,16 +128,19 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
128128
{
129129
struct intel_vgpu *vgpu = workload->vgpu;
130130
struct intel_gvt *gvt = vgpu->gvt;
131-
struct drm_i915_gem_object *ctx_obj =
132-
workload->req->context->state->obj;
131+
struct intel_context *ctx = workload->req->context;
133132
struct execlist_ring_context *shadow_ring_context;
134-
struct page *page;
135133
void *dst;
134+
void *context_base;
136135
unsigned long context_gpa, context_page_num;
137136
int i;
138137

139-
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
140-
shadow_ring_context = kmap(page);
138+
GEM_BUG_ON(!intel_context_is_pinned(ctx));
139+
140+
context_base = (void *) ctx->lrc_reg_state -
141+
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
142+
143+
shadow_ring_context = (void *) ctx->lrc_reg_state;
141144

142145
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
143146
#define COPY_REG(name) \
@@ -169,7 +172,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
169172
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
170173

171174
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
172-
kunmap(page);
173175

174176
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
175177
return 0;
@@ -194,11 +196,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
194196
return -EFAULT;
195197
}
196198

197-
page = i915_gem_object_get_page(ctx_obj, i);
198-
dst = kmap(page);
199+
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
199200
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
200201
I915_GTT_PAGE_SIZE);
201-
kunmap(page);
202202
i++;
203203
}
204204
return 0;
@@ -784,9 +784,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
784784
{
785785
struct i915_request *rq = workload->req;
786786
struct intel_vgpu *vgpu = workload->vgpu;
787-
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
788787
struct execlist_ring_context *shadow_ring_context;
789-
struct page *page;
788+
struct intel_context *ctx = workload->req->context;
789+
void *context_base;
790790
void *src;
791791
unsigned long context_gpa, context_page_num;
792792
int i;
@@ -797,6 +797,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
797797
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
798798
workload->ctx_desc.lrca);
799799

800+
GEM_BUG_ON(!intel_context_is_pinned(ctx));
801+
800802
head = workload->rb_head;
801803
tail = workload->rb_tail;
802804
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
@@ -821,6 +823,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
821823
context_page_num = 19;
822824

823825
i = 2;
826+
context_base = (void *) ctx->lrc_reg_state -
827+
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
824828

825829
while (i < context_page_num) {
826830
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -831,19 +835,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
831835
return;
832836
}
833837

834-
page = i915_gem_object_get_page(ctx_obj, i);
835-
src = kmap(page);
838+
src = context_base + (i << I915_GTT_PAGE_SHIFT);
836839
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
837840
I915_GTT_PAGE_SIZE);
838-
kunmap(page);
839841
i++;
840842
}
841843

842844
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
843845
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
844846

845-
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
846-
shadow_ring_context = kmap(page);
847+
shadow_ring_context = (void *) ctx->lrc_reg_state;
847848

848849
#define COPY_REG(name) \
849850
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -860,8 +861,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
860861
(void *)shadow_ring_context +
861862
sizeof(*shadow_ring_context),
862863
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
863-
864-
kunmap(page);
865864
}
866865

867866
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,

0 commit comments

Comments
 (0)