@@ -94,9 +94,10 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
94
94
}
95
95
96
96
if (!pipelined ) {
97
- struct drm_i915_private * dev_priv = fence -> i915 ;
97
+ struct intel_uncore * uncore = & fence -> i915 -> uncore ;
98
98
99
- /* To w/a incoherency with non-atomic 64-bit register updates,
99
+ /*
100
+ * To w/a incoherency with non-atomic 64-bit register updates,
100
101
* we split the 64-bit update into two 32-bit writes. In order
101
102
* for a partial fence not to be evaluated between writes, we
102
103
* precede the update with write to turn off the fence register,
@@ -105,12 +106,12 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
105
106
* For extra levels of paranoia, we make sure each step lands
106
107
* before applying the next step.
107
108
*/
108
- I915_WRITE ( fence_reg_lo , 0 );
109
- POSTING_READ ( fence_reg_lo );
109
+ intel_uncore_write_fw ( uncore , fence_reg_lo , 0 );
110
+ intel_uncore_posting_read_fw ( uncore , fence_reg_lo );
110
111
111
- I915_WRITE ( fence_reg_hi , upper_32_bits (val ));
112
- I915_WRITE ( fence_reg_lo , lower_32_bits (val ));
113
- POSTING_READ ( fence_reg_lo );
112
+ intel_uncore_write_fw ( uncore , fence_reg_hi , upper_32_bits (val ));
113
+ intel_uncore_write_fw ( uncore , fence_reg_lo , lower_32_bits (val ));
114
+ intel_uncore_posting_read_fw ( uncore , fence_reg_lo );
114
115
}
115
116
}
116
117
@@ -146,11 +147,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
146
147
}
147
148
148
149
if (!pipelined ) {
149
- struct drm_i915_private * dev_priv = fence -> i915 ;
150
+ struct intel_uncore * uncore = & fence -> i915 -> uncore ;
150
151
i915_reg_t reg = FENCE_REG (fence -> id );
151
152
152
- I915_WRITE ( reg , val );
153
- POSTING_READ ( reg );
153
+ intel_uncore_write_fw ( uncore , reg , val );
154
+ intel_uncore_posting_read_fw ( uncore , reg );
154
155
}
155
156
}
156
157
@@ -178,18 +179,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
178
179
}
179
180
180
181
if (!pipelined ) {
181
- struct drm_i915_private * dev_priv = fence -> i915 ;
182
+ struct intel_uncore * uncore = & fence -> i915 -> uncore ;
182
183
i915_reg_t reg = FENCE_REG (fence -> id );
183
184
184
- I915_WRITE ( reg , val );
185
- POSTING_READ ( reg );
185
+ intel_uncore_write_fw ( uncore , reg , val );
186
+ intel_uncore_posting_read_fw ( uncore , reg );
186
187
}
187
188
}
188
189
189
190
static void fence_write (struct drm_i915_fence_reg * fence ,
190
191
struct i915_vma * vma )
191
192
{
192
- /* Previous access through the fence register is marshalled by
193
+ /*
194
+ * Previous access through the fence register is marshalled by
193
195
* the mb() inside the fault handlers (i915_gem_release_mmaps)
194
196
* and explicitly managed for internal users.
195
197
*/
@@ -201,7 +203,8 @@ static void fence_write(struct drm_i915_fence_reg *fence,
201
203
else
202
204
i965_write_fence_reg (fence , vma );
203
205
204
- /* Access through the fenced region afterwards is
206
+ /*
207
+ * Access through the fenced region afterwards is
205
208
* ordered by the posting reads whilst writing the registers.
206
209
*/
207
210
@@ -308,11 +311,11 @@ int i915_vma_put_fence(struct i915_vma *vma)
308
311
return fence_update (fence , NULL );
309
312
}
310
313
311
- static struct drm_i915_fence_reg * fence_find (struct drm_i915_private * dev_priv )
314
+ static struct drm_i915_fence_reg * fence_find (struct drm_i915_private * i915 )
312
315
{
313
316
struct drm_i915_fence_reg * fence ;
314
317
315
- list_for_each_entry (fence , & dev_priv -> mm .fence_list , link ) {
318
+ list_for_each_entry (fence , & i915 -> mm .fence_list , link ) {
316
319
GEM_BUG_ON (fence -> vma && fence -> vma -> fence != fence );
317
320
318
321
if (fence -> pin_count )
@@ -322,7 +325,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
322
325
}
323
326
324
327
/* Wait for completion of pending flips which consume fences */
325
- if (intel_has_pending_fb_unpin (dev_priv ))
328
+ if (intel_has_pending_fb_unpin (i915 ))
326
329
return ERR_PTR (- EAGAIN );
327
330
328
331
return ERR_PTR (- EDEADLK );
@@ -353,7 +356,8 @@ i915_vma_pin_fence(struct i915_vma *vma)
353
356
struct i915_vma * set = i915_gem_object_is_tiled (vma -> obj ) ? vma : NULL ;
354
357
int err ;
355
358
356
- /* Note that we revoke fences on runtime suspend. Therefore the user
359
+ /*
360
+ * Note that we revoke fences on runtime suspend. Therefore the user
357
361
* must keep the device awake whilst using the fence.
358
362
*/
359
363
assert_rpm_wakelock_held (vma -> vm -> i915 );
@@ -395,28 +399,28 @@ i915_vma_pin_fence(struct i915_vma *vma)
395
399
396
400
/**
397
401
* i915_reserve_fence - Reserve a fence for vGPU
398
- * @dev_priv : i915 device private
402
+ * @i915 : i915 device private
399
403
*
400
404
* This function walks the fence regs looking for a free one and remove
401
405
* it from the fence_list. It is used to reserve fence for vGPU to use.
402
406
*/
403
407
struct drm_i915_fence_reg *
404
- i915_reserve_fence (struct drm_i915_private * dev_priv )
408
+ i915_reserve_fence (struct drm_i915_private * i915 )
405
409
{
406
410
struct drm_i915_fence_reg * fence ;
407
411
int count ;
408
412
int ret ;
409
413
410
- lockdep_assert_held (& dev_priv -> drm .struct_mutex );
414
+ lockdep_assert_held (& i915 -> drm .struct_mutex );
411
415
412
416
/* Keep at least one fence available for the display engine. */
413
417
count = 0 ;
414
- list_for_each_entry (fence , & dev_priv -> mm .fence_list , link )
418
+ list_for_each_entry (fence , & i915 -> mm .fence_list , link )
415
419
count += !fence -> pin_count ;
416
420
if (count <= 1 )
417
421
return ERR_PTR (- ENOSPC );
418
422
419
- fence = fence_find (dev_priv );
423
+ fence = fence_find (i915 );
420
424
if (IS_ERR (fence ))
421
425
return fence ;
422
426
@@ -446,19 +450,19 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
446
450
447
451
/**
448
452
* i915_gem_restore_fences - restore fence state
449
- * @dev_priv : i915 device private
453
+ * @i915 : i915 device private
450
454
*
451
455
* Restore the hw fence state to match the software tracking again, to be called
452
456
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
453
457
* the fences, to be reacquired by the user later.
454
458
*/
455
- void i915_gem_restore_fences (struct drm_i915_private * dev_priv )
459
+ void i915_gem_restore_fences (struct drm_i915_private * i915 )
456
460
{
457
461
int i ;
458
462
459
463
rcu_read_lock (); /* keep obj alive as we dereference */
460
- for (i = 0 ; i < dev_priv -> num_fence_regs ; i ++ ) {
461
- struct drm_i915_fence_reg * reg = & dev_priv -> fence_regs [i ];
464
+ for (i = 0 ; i < i915 -> num_fence_regs ; i ++ ) {
465
+ struct drm_i915_fence_reg * reg = & i915 -> fence_regs [i ];
462
466
struct i915_vma * vma = READ_ONCE (reg -> vma );
463
467
464
468
GEM_BUG_ON (vma && vma -> fence != reg );
@@ -525,18 +529,19 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
525
529
526
530
/**
527
531
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
528
- * @dev_priv : i915 device private
532
+ * @i915 : i915 device private
529
533
*
530
534
* Detects bit 6 swizzling of address lookup between IGD access and CPU
531
535
* access through main memory.
532
536
*/
533
537
void
534
- i915_gem_detect_bit_6_swizzle (struct drm_i915_private * dev_priv )
538
+ i915_gem_detect_bit_6_swizzle (struct drm_i915_private * i915 )
535
539
{
540
+ struct intel_uncore * uncore = & i915 -> uncore ;
536
541
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN ;
537
542
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN ;
538
543
539
- if (INTEL_GEN (dev_priv ) >= 8 || IS_VALLEYVIEW (dev_priv )) {
544
+ if (INTEL_GEN (i915 ) >= 8 || IS_VALLEYVIEW (i915 )) {
540
545
/*
541
546
* On BDW+, swizzling is not used. We leave the CPU memory
542
547
* controller in charge of optimizing memory accesses without
@@ -546,9 +551,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
546
551
*/
547
552
swizzle_x = I915_BIT_6_SWIZZLE_NONE ;
548
553
swizzle_y = I915_BIT_6_SWIZZLE_NONE ;
549
- } else if (INTEL_GEN (dev_priv ) >= 6 ) {
550
- if (dev_priv -> preserve_bios_swizzle ) {
551
- if (I915_READ ( DISP_ARB_CTL ) &
554
+ } else if (INTEL_GEN (i915 ) >= 6 ) {
555
+ if (i915 -> preserve_bios_swizzle ) {
556
+ if (intel_uncore_read ( uncore , DISP_ARB_CTL ) &
552
557
DISP_TILE_SURFACE_SWIZZLING ) {
553
558
swizzle_x = I915_BIT_6_SWIZZLE_9_10 ;
554
559
swizzle_y = I915_BIT_6_SWIZZLE_9 ;
@@ -558,15 +563,17 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
558
563
}
559
564
} else {
560
565
u32 dimm_c0 , dimm_c1 ;
561
- dimm_c0 = I915_READ ( MAD_DIMM_C0 );
562
- dimm_c1 = I915_READ ( MAD_DIMM_C1 );
566
+ dimm_c0 = intel_uncore_read ( uncore , MAD_DIMM_C0 );
567
+ dimm_c1 = intel_uncore_read ( uncore , MAD_DIMM_C1 );
563
568
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK ;
564
569
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK ;
565
- /* Enable swizzling when the channels are populated
570
+ /*
571
+ * Enable swizzling when the channels are populated
566
572
* with identically sized dimms. We don't need to check
567
573
* the 3rd channel because no cpu with gpu attached
568
574
* ships in that configuration. Also, swizzling only
569
- * makes sense for 2 channels anyway. */
575
+ * makes sense for 2 channels anyway.
576
+ */
570
577
if (dimm_c0 == dimm_c1 ) {
571
578
swizzle_x = I915_BIT_6_SWIZZLE_9_10 ;
572
579
swizzle_y = I915_BIT_6_SWIZZLE_9 ;
@@ -575,20 +582,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
575
582
swizzle_y = I915_BIT_6_SWIZZLE_NONE ;
576
583
}
577
584
}
578
- } else if (IS_GEN (dev_priv , 5 )) {
579
- /* On Ironlake whatever DRAM config, GPU always do
585
+ } else if (IS_GEN (i915 , 5 )) {
586
+ /*
587
+ * On Ironlake whatever DRAM config, GPU always do
580
588
* same swizzling setup.
581
589
*/
582
590
swizzle_x = I915_BIT_6_SWIZZLE_9_10 ;
583
591
swizzle_y = I915_BIT_6_SWIZZLE_9 ;
584
- } else if (IS_GEN (dev_priv , 2 )) {
585
- /* As far as we know, the 865 doesn't have these bit 6
592
+ } else if (IS_GEN (i915 , 2 )) {
593
+ /*
594
+ * As far as we know, the 865 doesn't have these bit 6
586
595
* swizzling issues.
587
596
*/
588
597
swizzle_x = I915_BIT_6_SWIZZLE_NONE ;
589
598
swizzle_y = I915_BIT_6_SWIZZLE_NONE ;
590
- } else if (IS_G45 (dev_priv ) || IS_I965G (dev_priv ) || IS_G33 (dev_priv )) {
591
- /* The 965, G33, and newer, have a very flexible memory
599
+ } else if (IS_G45 (i915 ) || IS_I965G (i915 ) || IS_G33 (i915 )) {
600
+ /*
601
+ * The 965, G33, and newer, have a very flexible memory
592
602
* configuration. It will enable dual-channel mode
593
603
* (interleaving) on as much memory as it can, and the GPU
594
604
* will additionally sometimes enable different bit 6
@@ -614,22 +624,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
614
624
* banks of memory are paired and unswizzled on the
615
625
* uneven portion, so leave that as unknown.
616
626
*/
617
- if (I915_READ16 (C0DRB3 ) == I915_READ16 (C1DRB3 )) {
627
+ if (intel_uncore_read (uncore , C0DRB3 ) ==
628
+ intel_uncore_read (uncore , C1DRB3 )) {
618
629
swizzle_x = I915_BIT_6_SWIZZLE_9_10 ;
619
630
swizzle_y = I915_BIT_6_SWIZZLE_9 ;
620
631
}
621
632
} else {
622
- u32 dcc ;
633
+ u32 dcc = intel_uncore_read ( uncore , DCC ) ;
623
634
624
- /* On 9xx chipsets, channel interleave by the CPU is
635
+ /*
636
+ * On 9xx chipsets, channel interleave by the CPU is
625
637
* determined by DCC. For single-channel, neither the CPU
626
638
* nor the GPU do swizzling. For dual channel interleaved,
627
639
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
628
640
* 9 for Y tiled. The CPU's interleave is independent, and
629
641
* can be based on either bit 11 (haven't seen this yet) or
630
642
* bit 17 (common).
631
643
*/
632
- dcc = I915_READ (DCC );
633
644
switch (dcc & DCC_ADDRESSING_MODE_MASK ) {
634
645
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL :
635
646
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC :
@@ -638,7 +649,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
638
649
break ;
639
650
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED :
640
651
if (dcc & DCC_CHANNEL_XOR_DISABLE ) {
641
- /* This is the base swizzling by the GPU for
652
+ /*
653
+ * This is the base swizzling by the GPU for
642
654
* tiled buffers.
643
655
*/
644
656
swizzle_x = I915_BIT_6_SWIZZLE_9_10 ;
@@ -656,8 +668,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
656
668
}
657
669
658
670
/* check for L-shaped memory aka modified enhanced addressing */
659
- if (IS_GEN (dev_priv , 4 ) &&
660
- !(I915_READ ( DCC2 ) & DCC2_MODIFIED_ENHANCED_DISABLE )) {
671
+ if (IS_GEN (i915 , 4 ) &&
672
+ !(intel_uncore_read ( uncore , DCC2 ) & DCC2_MODIFIED_ENHANCED_DISABLE )) {
661
673
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN ;
662
674
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN ;
663
675
}
@@ -672,7 +684,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
672
684
673
685
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
674
686
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN ) {
675
- /* Userspace likes to explode if it sees unknown swizzling,
687
+ /*
688
+ * Userspace likes to explode if it sees unknown swizzling,
676
689
* so lie. We will finish the lie when reporting through
677
690
* the get-tiling-ioctl by reporting the physical swizzle
678
691
* mode as unknown instead.
@@ -681,13 +694,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
681
694
* bit17 dependent, and so we need to also prevent the pages
682
695
* from being moved.
683
696
*/
684
- dev_priv -> quirks |= QUIRK_PIN_SWIZZLED_PAGES ;
697
+ i915 -> quirks |= QUIRK_PIN_SWIZZLED_PAGES ;
685
698
swizzle_x = I915_BIT_6_SWIZZLE_NONE ;
686
699
swizzle_y = I915_BIT_6_SWIZZLE_NONE ;
687
700
}
688
701
689
- dev_priv -> mm .bit_6_swizzle_x = swizzle_x ;
690
- dev_priv -> mm .bit_6_swizzle_y = swizzle_y ;
702
+ i915 -> mm .bit_6_swizzle_x = swizzle_x ;
703
+ i915 -> mm .bit_6_swizzle_y = swizzle_y ;
691
704
}
692
705
693
706
/*
0 commit comments