41
41
static int alloc_gm (struct intel_vgpu * vgpu , bool high_gm )
42
42
{
43
43
struct intel_gvt * gvt = vgpu -> gvt ;
44
- struct drm_i915_private * dev_priv = gvt -> dev_priv ;
44
+ struct intel_gt * gt = gvt -> gt ;
45
45
unsigned int flags ;
46
46
u64 start , end , size ;
47
47
struct drm_mm_node * node ;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
61
61
flags = PIN_MAPPABLE ;
62
62
}
63
63
64
- mutex_lock (& dev_priv -> ggtt . vm .mutex );
65
- mmio_hw_access_pre (dev_priv );
66
- ret = i915_gem_gtt_insert (& dev_priv -> ggtt . vm , node ,
64
+ mutex_lock (& gt -> ggtt -> vm .mutex );
65
+ mmio_hw_access_pre (gt );
66
+ ret = i915_gem_gtt_insert (& gt -> ggtt -> vm , node ,
67
67
size , I915_GTT_PAGE_SIZE ,
68
68
I915_COLOR_UNEVICTABLE ,
69
69
start , end , flags );
70
- mmio_hw_access_post (dev_priv );
71
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
70
+ mmio_hw_access_post (gt );
71
+ mutex_unlock (& gt -> ggtt -> vm .mutex );
72
72
if (ret )
73
73
gvt_err ("fail to alloc %s gm space from host\n" ,
74
74
high_gm ? "high" : "low" );
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
79
79
static int alloc_vgpu_gm (struct intel_vgpu * vgpu )
80
80
{
81
81
struct intel_gvt * gvt = vgpu -> gvt ;
82
- struct drm_i915_private * dev_priv = gvt -> dev_priv ;
82
+ struct intel_gt * gt = gvt -> gt ;
83
83
int ret ;
84
84
85
85
ret = alloc_gm (vgpu , false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
98
98
99
99
return 0 ;
100
100
out_free_aperture :
101
- mutex_lock (& dev_priv -> ggtt . vm .mutex );
101
+ mutex_lock (& gt -> ggtt -> vm .mutex );
102
102
drm_mm_remove_node (& vgpu -> gm .low_gm_node );
103
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
103
+ mutex_unlock (& gt -> ggtt -> vm .mutex );
104
104
return ret ;
105
105
}
106
106
107
107
static void free_vgpu_gm (struct intel_vgpu * vgpu )
108
108
{
109
- struct drm_i915_private * dev_priv = vgpu -> gvt -> dev_priv ;
109
+ struct intel_gvt * gvt = vgpu -> gvt ;
110
+ struct intel_gt * gt = gvt -> gt ;
110
111
111
- mutex_lock (& dev_priv -> ggtt . vm .mutex );
112
+ mutex_lock (& gt -> ggtt -> vm .mutex );
112
113
drm_mm_remove_node (& vgpu -> gm .low_gm_node );
113
114
drm_mm_remove_node (& vgpu -> gm .high_gm_node );
114
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
115
+ mutex_unlock (& gt -> ggtt -> vm .mutex );
115
116
}
116
117
117
118
/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
128
129
u32 fence , u64 value )
129
130
{
130
131
struct intel_gvt * gvt = vgpu -> gvt ;
131
- struct drm_i915_private * dev_priv = gvt -> dev_priv ;
132
+ struct drm_i915_private * i915 = gvt -> gt -> i915 ;
133
+ struct intel_uncore * uncore = gvt -> gt -> uncore ;
132
134
struct i915_fence_reg * reg ;
133
135
i915_reg_t fence_reg_lo , fence_reg_hi ;
134
136
135
- assert_rpm_wakelock_held (& dev_priv -> runtime_pm );
137
+ assert_rpm_wakelock_held (uncore -> rpm );
136
138
137
- if (drm_WARN_ON (& dev_priv -> drm , fence >= vgpu_fence_sz (vgpu )))
139
+ if (drm_WARN_ON (& i915 -> drm , fence >= vgpu_fence_sz (vgpu )))
138
140
return ;
139
141
140
142
reg = vgpu -> fence .regs [fence ];
141
- if (drm_WARN_ON (& dev_priv -> drm , !reg ))
143
+ if (drm_WARN_ON (& i915 -> drm , !reg ))
142
144
return ;
143
145
144
146
fence_reg_lo = FENCE_REG_GEN6_LO (reg -> id );
145
147
fence_reg_hi = FENCE_REG_GEN6_HI (reg -> id );
146
148
147
- I915_WRITE ( fence_reg_lo , 0 );
148
- POSTING_READ ( fence_reg_lo );
149
+ intel_uncore_write ( uncore , fence_reg_lo , 0 );
150
+ intel_uncore_posting_read ( uncore , fence_reg_lo );
149
151
150
- I915_WRITE ( fence_reg_hi , upper_32_bits (value ));
151
- I915_WRITE ( fence_reg_lo , lower_32_bits (value ));
152
- POSTING_READ ( fence_reg_lo );
152
+ intel_uncore_write ( uncore , fence_reg_hi , upper_32_bits (value ));
153
+ intel_uncore_write ( uncore , fence_reg_lo , lower_32_bits (value ));
154
+ intel_uncore_posting_read ( uncore , fence_reg_lo );
153
155
}
154
156
155
157
static void _clear_vgpu_fence (struct intel_vgpu * vgpu )
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
163
165
static void free_vgpu_fence (struct intel_vgpu * vgpu )
164
166
{
165
167
struct intel_gvt * gvt = vgpu -> gvt ;
166
- struct drm_i915_private * dev_priv = gvt -> dev_priv ;
168
+ struct intel_uncore * uncore = gvt -> gt -> uncore ;
167
169
struct i915_fence_reg * reg ;
170
+ intel_wakeref_t wakeref ;
168
171
u32 i ;
169
172
170
- if (drm_WARN_ON (& dev_priv -> drm , !vgpu_fence_sz (vgpu )))
173
+ if (drm_WARN_ON (& gvt -> gt -> i915 -> drm , !vgpu_fence_sz (vgpu )))
171
174
return ;
172
175
173
- intel_runtime_pm_get (& dev_priv -> runtime_pm );
176
+ wakeref = intel_runtime_pm_get (uncore -> rpm );
174
177
175
- mutex_lock (& dev_priv -> ggtt . vm .mutex );
178
+ mutex_lock (& gvt -> gt -> ggtt -> vm .mutex );
176
179
_clear_vgpu_fence (vgpu );
177
180
for (i = 0 ; i < vgpu_fence_sz (vgpu ); i ++ ) {
178
181
reg = vgpu -> fence .regs [i ];
179
182
i915_unreserve_fence (reg );
180
183
vgpu -> fence .regs [i ] = NULL ;
181
184
}
182
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
185
+ mutex_unlock (& gvt -> gt -> ggtt -> vm .mutex );
183
186
184
- intel_runtime_pm_put_unchecked ( & dev_priv -> runtime_pm );
187
+ intel_runtime_pm_put ( uncore -> rpm , wakeref );
185
188
}
186
189
187
190
static int alloc_vgpu_fence (struct intel_vgpu * vgpu )
188
191
{
189
192
struct intel_gvt * gvt = vgpu -> gvt ;
190
- struct drm_i915_private * dev_priv = gvt -> dev_priv ;
191
- struct intel_runtime_pm * rpm = & dev_priv -> runtime_pm ;
193
+ struct intel_uncore * uncore = gvt -> gt -> uncore ;
192
194
struct i915_fence_reg * reg ;
195
+ intel_wakeref_t wakeref ;
193
196
int i ;
194
197
195
- intel_runtime_pm_get (rpm );
198
+ wakeref = intel_runtime_pm_get (uncore -> rpm );
196
199
197
200
/* Request fences from host */
198
- mutex_lock (& dev_priv -> ggtt . vm .mutex );
201
+ mutex_lock (& gvt -> gt -> ggtt -> vm .mutex );
199
202
200
203
for (i = 0 ; i < vgpu_fence_sz (vgpu ); i ++ ) {
201
- reg = i915_reserve_fence (& dev_priv -> ggtt );
204
+ reg = i915_reserve_fence (gvt -> gt -> ggtt );
202
205
if (IS_ERR (reg ))
203
206
goto out_free_fence ;
204
207
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
207
210
208
211
_clear_vgpu_fence (vgpu );
209
212
210
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
211
- intel_runtime_pm_put_unchecked ( rpm );
213
+ mutex_unlock (& gvt -> gt -> ggtt -> vm .mutex );
214
+ intel_runtime_pm_put ( uncore -> rpm , wakeref );
212
215
return 0 ;
216
+
213
217
out_free_fence :
214
218
gvt_vgpu_err ("Failed to alloc fences\n" );
215
219
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
220
224
i915_unreserve_fence (reg );
221
225
vgpu -> fence .regs [i ] = NULL ;
222
226
}
223
- mutex_unlock (& dev_priv -> ggtt . vm .mutex );
224
- intel_runtime_pm_put_unchecked (rpm );
227
+ mutex_unlock (& gvt -> gt -> ggtt -> vm .mutex );
228
+ intel_runtime_pm_put_unchecked (uncore -> rpm );
225
229
return - ENOSPC ;
226
230
}
227
231
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
315
319
*/
316
320
void intel_vgpu_reset_resource (struct intel_vgpu * vgpu )
317
321
{
318
- struct drm_i915_private * dev_priv = vgpu -> gvt -> dev_priv ;
322
+ struct intel_gvt * gvt = vgpu -> gvt ;
323
+ intel_wakeref_t wakeref ;
319
324
320
- intel_runtime_pm_get (& dev_priv -> runtime_pm );
321
- _clear_vgpu_fence (vgpu );
322
- intel_runtime_pm_put_unchecked (& dev_priv -> runtime_pm );
325
+ with_intel_runtime_pm (gvt -> gt -> uncore -> rpm , wakeref )
326
+ _clear_vgpu_fence (vgpu );
323
327
}
324
328
325
329
/**
0 commit comments