@@ -133,6 +133,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
133
133
void * dst ;
134
134
void * context_base ;
135
135
unsigned long context_gpa , context_page_num ;
136
+ unsigned long gpa_base ; /* first gpa of consecutive GPAs */
137
+ unsigned long gpa_size ; /* size of consecutive GPAs */
136
138
int i ;
137
139
138
140
GEM_BUG_ON (!intel_context_is_pinned (ctx ));
@@ -186,8 +188,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
186
188
if (IS_BROADWELL (gvt -> gt -> i915 ) && workload -> engine -> id == RCS0 )
187
189
context_page_num = 19 ;
188
190
189
- i = 2 ;
190
- while (i < context_page_num ) {
191
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
192
+ * read from the continuous GPAs into dst virtual address
193
+ */
194
+ gpa_size = 0 ;
195
+ for (i = 2 ; i < context_page_num ; i ++ ) {
191
196
context_gpa = intel_vgpu_gma_to_gpa (vgpu -> gtt .ggtt_mm ,
192
197
(u32 )((workload -> ctx_desc .lrca + i ) <<
193
198
I915_GTT_PAGE_SHIFT ));
@@ -196,10 +201,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
196
201
return - EFAULT ;
197
202
}
198
203
204
+ if (gpa_size == 0 ) {
205
+ gpa_base = context_gpa ;
206
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT );
207
+ } else if (context_gpa != gpa_base + gpa_size )
208
+ goto read ;
209
+
210
+ gpa_size += I915_GTT_PAGE_SIZE ;
211
+
212
+ if (i == context_page_num - 1 )
213
+ goto read ;
214
+
215
+ continue ;
216
+
217
+ read :
218
+ intel_gvt_hypervisor_read_gpa (vgpu , gpa_base , dst , gpa_size );
219
+ gpa_base = context_gpa ;
220
+ gpa_size = I915_GTT_PAGE_SIZE ;
199
221
dst = context_base + (i << I915_GTT_PAGE_SHIFT );
200
- intel_gvt_hypervisor_read_gpa (vgpu , context_gpa , dst ,
201
- I915_GTT_PAGE_SIZE );
202
- i ++ ;
203
222
}
204
223
return 0 ;
205
224
}
@@ -789,6 +808,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
789
808
void * context_base ;
790
809
void * src ;
791
810
unsigned long context_gpa , context_page_num ;
811
+ unsigned long gpa_base ; /* first gpa of consecutive GPAs */
812
+ unsigned long gpa_size ; /* size of consecutive GPAs*/
792
813
int i ;
793
814
u32 ring_base ;
794
815
u32 head , tail ;
@@ -822,11 +843,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
822
843
if (IS_BROADWELL (rq -> i915 ) && rq -> engine -> id == RCS0 )
823
844
context_page_num = 19 ;
824
845
825
- i = 2 ;
826
846
context_base = (void * ) ctx -> lrc_reg_state -
827
847
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT );
828
848
829
- while (i < context_page_num ) {
849
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
850
+ * write to the consecutive GPAs from src virtual address
851
+ */
852
+ gpa_size = 0 ;
853
+ for (i = 2 ; i < context_page_num ; i ++ ) {
830
854
context_gpa = intel_vgpu_gma_to_gpa (vgpu -> gtt .ggtt_mm ,
831
855
(u32 )((workload -> ctx_desc .lrca + i ) <<
832
856
I915_GTT_PAGE_SHIFT ));
@@ -835,10 +859,24 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
835
859
return ;
836
860
}
837
861
862
+ if (gpa_size == 0 ) {
863
+ gpa_base = context_gpa ;
864
+ src = context_base + (i << I915_GTT_PAGE_SHIFT );
865
+ } else if (context_gpa != gpa_base + gpa_size )
866
+ goto write ;
867
+
868
+ gpa_size += I915_GTT_PAGE_SIZE ;
869
+
870
+ if (i == context_page_num - 1 )
871
+ goto write ;
872
+
873
+ continue ;
874
+
875
+ write :
876
+ intel_gvt_hypervisor_write_gpa (vgpu , gpa_base , src , gpa_size );
877
+ gpa_base = context_gpa ;
878
+ gpa_size = I915_GTT_PAGE_SIZE ;
838
879
src = context_base + (i << I915_GTT_PAGE_SHIFT );
839
- intel_gvt_hypervisor_write_gpa (vgpu , context_gpa , src ,
840
- I915_GTT_PAGE_SIZE );
841
- i ++ ;
842
880
}
843
881
844
882
intel_gvt_hypervisor_write_gpa (vgpu , workload -> ring_context_gpa +
0 commit comments