@@ -128,16 +128,19 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
128
128
{
129
129
struct intel_vgpu * vgpu = workload -> vgpu ;
130
130
struct intel_gvt * gvt = vgpu -> gvt ;
131
- struct drm_i915_gem_object * ctx_obj =
132
- workload -> req -> context -> state -> obj ;
131
+ struct intel_context * ctx = workload -> req -> context ;
133
132
struct execlist_ring_context * shadow_ring_context ;
134
- struct page * page ;
135
133
void * dst ;
134
+ void * context_base ;
136
135
unsigned long context_gpa , context_page_num ;
137
136
int i ;
138
137
139
- page = i915_gem_object_get_page (ctx_obj , LRC_STATE_PN );
140
- shadow_ring_context = kmap (page );
138
+ GEM_BUG_ON (!intel_context_is_pinned (ctx ));
139
+
140
+ context_base = (void * ) ctx -> lrc_reg_state -
141
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT );
142
+
143
+ shadow_ring_context = (void * ) ctx -> lrc_reg_state ;
141
144
142
145
sr_oa_regs (workload , (u32 * )shadow_ring_context , true);
143
146
#define COPY_REG (name ) \
@@ -169,7 +172,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
169
172
I915_GTT_PAGE_SIZE - sizeof (* shadow_ring_context ));
170
173
171
174
sr_oa_regs (workload , (u32 * )shadow_ring_context , false);
172
- kunmap (page );
173
175
174
176
if (IS_RESTORE_INHIBIT (shadow_ring_context -> ctx_ctrl .val ))
175
177
return 0 ;
@@ -194,11 +196,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
194
196
return - EFAULT ;
195
197
}
196
198
197
- page = i915_gem_object_get_page (ctx_obj , i );
198
- dst = kmap (page );
199
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT );
199
200
intel_gvt_hypervisor_read_gpa (vgpu , context_gpa , dst ,
200
201
I915_GTT_PAGE_SIZE );
201
- kunmap (page );
202
202
i ++ ;
203
203
}
204
204
return 0 ;
@@ -784,9 +784,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
784
784
{
785
785
struct i915_request * rq = workload -> req ;
786
786
struct intel_vgpu * vgpu = workload -> vgpu ;
787
- struct drm_i915_gem_object * ctx_obj = rq -> context -> state -> obj ;
788
787
struct execlist_ring_context * shadow_ring_context ;
789
- struct page * page ;
788
+ struct intel_context * ctx = workload -> req -> context ;
789
+ void * context_base ;
790
790
void * src ;
791
791
unsigned long context_gpa , context_page_num ;
792
792
int i ;
@@ -797,6 +797,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
797
797
gvt_dbg_sched ("ring id %d workload lrca %x\n" , rq -> engine -> id ,
798
798
workload -> ctx_desc .lrca );
799
799
800
+ GEM_BUG_ON (!intel_context_is_pinned (ctx ));
801
+
800
802
head = workload -> rb_head ;
801
803
tail = workload -> rb_tail ;
802
804
wrap_count = workload -> guest_rb_head >> RB_HEAD_WRAP_CNT_OFF ;
@@ -821,6 +823,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
821
823
context_page_num = 19 ;
822
824
823
825
i = 2 ;
826
+ context_base = (void * ) ctx -> lrc_reg_state -
827
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT );
824
828
825
829
while (i < context_page_num ) {
826
830
context_gpa = intel_vgpu_gma_to_gpa (vgpu -> gtt .ggtt_mm ,
@@ -831,19 +835,16 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
831
835
return ;
832
836
}
833
837
834
- page = i915_gem_object_get_page (ctx_obj , i );
835
- src = kmap (page );
838
+ src = context_base + (i << I915_GTT_PAGE_SHIFT );
836
839
intel_gvt_hypervisor_write_gpa (vgpu , context_gpa , src ,
837
840
I915_GTT_PAGE_SIZE );
838
- kunmap (page );
839
841
i ++ ;
840
842
}
841
843
842
844
intel_gvt_hypervisor_write_gpa (vgpu , workload -> ring_context_gpa +
843
845
RING_CTX_OFF (ring_header .val ), & workload -> rb_tail , 4 );
844
846
845
- page = i915_gem_object_get_page (ctx_obj , LRC_STATE_PN );
846
- shadow_ring_context = kmap (page );
847
+ shadow_ring_context = (void * ) ctx -> lrc_reg_state ;
847
848
848
849
#define COPY_REG (name ) \
849
850
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -860,8 +861,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
860
861
(void * )shadow_ring_context +
861
862
sizeof (* shadow_ring_context ),
862
863
I915_GTT_PAGE_SIZE - sizeof (* shadow_ring_context ));
863
-
864
- kunmap (page );
865
864
}
866
865
867
866
void intel_vgpu_clean_workloads (struct intel_vgpu * vgpu ,
0 commit comments