|
60 | 60 | * and related files, but that will be described in separate chapters.
|
61 | 61 | */
|
62 | 62 |
|
| 63 | +/* |
| 64 | + * Interrupt statistic for PMU. Increments the counter only if the |
| 65 | + * interrupt originated from the the GPU so interrupts from a device which |
| 66 | + * shares the interrupt line are not accounted. |
| 67 | + */ |
| 68 | +static inline void pmu_irq_stats(struct drm_i915_private *i915, |
| 69 | + irqreturn_t res) |
| 70 | +{ |
| 71 | + if (unlikely(res != IRQ_HANDLED)) |
| 72 | + return; |
| 73 | + |
| 74 | + /* |
| 75 | + * A clever compiler translates that into INC. A not so clever one |
| 76 | + * should at least prevent store tearing. |
| 77 | + */ |
| 78 | + WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); |
| 79 | +} |
| 80 | + |
63 | 81 | typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
|
64 | 82 |
|
65 | 83 | static const u32 hpd_ilk[HPD_NUM_PINS] = {
|
@@ -1599,6 +1617,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
1599 | 1617 | valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
|
1600 | 1618 | } while (0);
|
1601 | 1619 |
|
| 1620 | + pmu_irq_stats(dev_priv, ret); |
| 1621 | + |
1602 | 1622 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
1603 | 1623 |
|
1604 | 1624 | return ret;
|
@@ -1676,6 +1696,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
|
1676 | 1696 | valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
|
1677 | 1697 | } while (0);
|
1678 | 1698 |
|
| 1699 | + pmu_irq_stats(dev_priv, ret); |
| 1700 | + |
1679 | 1701 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
1680 | 1702 |
|
1681 | 1703 | return ret;
|
@@ -2103,6 +2125,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
|
2103 | 2125 | if (sde_ier)
|
2104 | 2126 | raw_reg_write(regs, SDEIER, sde_ier);
|
2105 | 2127 |
|
| 2128 | + pmu_irq_stats(i915, ret); |
| 2129 | + |
2106 | 2130 | /* IRQs are synced during runtime_suspend, we don't require a wakeref */
|
2107 | 2131 | enable_rpm_wakeref_asserts(&i915->runtime_pm);
|
2108 | 2132 |
|
@@ -2419,6 +2443,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
2419 | 2443 |
|
2420 | 2444 | gen8_master_intr_enable(regs);
|
2421 | 2445 |
|
| 2446 | + pmu_irq_stats(dev_priv, IRQ_HANDLED); |
| 2447 | + |
2422 | 2448 | return IRQ_HANDLED;
|
2423 | 2449 | }
|
2424 | 2450 |
|
@@ -2514,6 +2540,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
|
2514 | 2540 |
|
2515 | 2541 | gen11_gu_misc_irq_handler(gt, gu_misc_iir);
|
2516 | 2542 |
|
| 2543 | + pmu_irq_stats(i915, IRQ_HANDLED); |
| 2544 | + |
2517 | 2545 | return IRQ_HANDLED;
|
2518 | 2546 | }
|
2519 | 2547 |
|
@@ -3688,6 +3716,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
3688 | 3716 | i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
|
3689 | 3717 | } while (0);
|
3690 | 3718 |
|
| 3719 | + pmu_irq_stats(dev_priv, ret); |
| 3720 | + |
3691 | 3721 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
3692 | 3722 |
|
3693 | 3723 | return ret;
|
@@ -3796,6 +3826,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
3796 | 3826 | i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
|
3797 | 3827 | } while (0);
|
3798 | 3828 |
|
| 3829 | + pmu_irq_stats(dev_priv, ret); |
| 3830 | + |
3799 | 3831 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
3800 | 3832 |
|
3801 | 3833 | return ret;
|
@@ -3941,6 +3973,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
3941 | 3973 | i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
|
3942 | 3974 | } while (0);
|
3943 | 3975 |
|
| 3976 | + pmu_irq_stats(dev_priv, IRQ_HANDLED); |
| 3977 | + |
3944 | 3978 | enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
3945 | 3979 |
|
3946 | 3980 | return ret;
|
|
0 commit comments