@@ -1452,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1452
1452
{
1453
1453
struct ufs_hba * hba = container_of (work , struct ufs_hba ,
1454
1454
clk_scaling .suspend_work );
1455
- unsigned long irq_flags ;
1456
1455
1457
- spin_lock_irqsave (hba -> host -> host_lock , irq_flags );
1458
- if (hba -> clk_scaling .active_reqs || hba -> clk_scaling .is_suspended ) {
1459
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1460
- return ;
1456
+ scoped_guard (spinlock_irqsave , & hba -> clk_scaling .lock )
1457
+ {
1458
+ if (hba -> clk_scaling .active_reqs ||
1459
+ hba -> clk_scaling .is_suspended )
1460
+ return ;
1461
+
1462
+ hba -> clk_scaling .is_suspended = true;
1463
+ hba -> clk_scaling .window_start_t = 0 ;
1461
1464
}
1462
- hba -> clk_scaling .is_suspended = true;
1463
- hba -> clk_scaling .window_start_t = 0 ;
1464
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1465
1465
1466
1466
devfreq_suspend_device (hba -> devfreq );
1467
1467
}
@@ -1470,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1470
1470
{
1471
1471
struct ufs_hba * hba = container_of (work , struct ufs_hba ,
1472
1472
clk_scaling .resume_work );
1473
- unsigned long irq_flags ;
1474
1473
1475
- spin_lock_irqsave (hba -> host -> host_lock , irq_flags );
1476
- if (!hba -> clk_scaling .is_suspended ) {
1477
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1478
- return ;
1474
+ scoped_guard (spinlock_irqsave , & hba -> clk_scaling .lock )
1475
+ {
1476
+ if (!hba -> clk_scaling .is_suspended )
1477
+ return ;
1478
+ hba -> clk_scaling .is_suspended = false;
1479
1479
}
1480
- hba -> clk_scaling .is_suspended = false;
1481
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1482
1480
1483
1481
devfreq_resume_device (hba -> devfreq );
1484
1482
}
@@ -1492,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
1492
1490
bool scale_up = false, sched_clk_scaling_suspend_work = false;
1493
1491
struct list_head * clk_list = & hba -> clk_list_head ;
1494
1492
struct ufs_clk_info * clki ;
1495
- unsigned long irq_flags ;
1496
1493
1497
1494
if (!ufshcd_is_clkscaling_supported (hba ))
1498
1495
return - EINVAL ;
@@ -1513,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
1513
1510
* freq = (unsigned long ) clk_round_rate (clki -> clk , * freq );
1514
1511
}
1515
1512
1516
- spin_lock_irqsave (hba -> host -> host_lock , irq_flags );
1517
- if (ufshcd_eh_in_progress (hba )) {
1518
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1519
- return 0 ;
1520
- }
1513
+ scoped_guard (spinlock_irqsave , & hba -> clk_scaling .lock )
1514
+ {
1515
+ if (ufshcd_eh_in_progress (hba ))
1516
+ return 0 ;
1521
1517
1522
- /* Skip scaling clock when clock scaling is suspended */
1523
- if (hba -> clk_scaling .is_suspended ) {
1524
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1525
- dev_warn (hba -> dev , "clock scaling is suspended, skip" );
1526
- return 0 ;
1527
- }
1518
+ /* Skip scaling clock when clock scaling is suspended */
1519
+ if (hba -> clk_scaling .is_suspended ) {
1520
+ dev_warn (hba -> dev , "clock scaling is suspended, skip" );
1521
+ return 0 ;
1522
+ }
1528
1523
1529
- if (!hba -> clk_scaling .active_reqs )
1530
- sched_clk_scaling_suspend_work = true;
1524
+ if (!hba -> clk_scaling .active_reqs )
1525
+ sched_clk_scaling_suspend_work = true;
1531
1526
1532
- if (list_empty (clk_list )) {
1533
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1534
- goto out ;
1535
- }
1527
+ if (list_empty (clk_list ))
1528
+ goto out ;
1536
1529
1537
- /* Decide based on the target or rounded-off frequency and update */
1538
- if (hba -> use_pm_opp )
1539
- scale_up = * freq > hba -> clk_scaling .target_freq ;
1540
- else
1541
- scale_up = * freq == clki -> max_freq ;
1530
+ /* Decide based on the target or rounded-off frequency and update */
1531
+ if (hba -> use_pm_opp )
1532
+ scale_up = * freq > hba -> clk_scaling .target_freq ;
1533
+ else
1534
+ scale_up = * freq == clki -> max_freq ;
1542
1535
1543
- if (!hba -> use_pm_opp && !scale_up )
1544
- * freq = clki -> min_freq ;
1536
+ if (!hba -> use_pm_opp && !scale_up )
1537
+ * freq = clki -> min_freq ;
1545
1538
1546
- /* Update the frequency */
1547
- if (!ufshcd_is_devfreq_scaling_required (hba , * freq , scale_up )) {
1548
- spin_unlock_irqrestore ( hba -> host -> host_lock , irq_flags ) ;
1549
- ret = 0 ;
1550
- goto out ; /* no state change required */
1539
+ /* Update the frequency */
1540
+ if (!ufshcd_is_devfreq_scaling_required (hba , * freq , scale_up )) {
1541
+ ret = 0 ;
1542
+ goto out ; /* no state change required */
1543
+ }
1551
1544
}
1552
- spin_unlock_irqrestore (hba -> host -> host_lock , irq_flags );
1553
1545
1554
1546
start = ktime_get ();
1555
1547
ret = ufshcd_devfreq_scale (hba , * freq , scale_up );
@@ -1574,15 +1566,15 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
1574
1566
{
1575
1567
struct ufs_hba * hba = dev_get_drvdata (dev );
1576
1568
struct ufs_clk_scaling * scaling = & hba -> clk_scaling ;
1577
- unsigned long flags ;
1578
1569
ktime_t curr_t ;
1579
1570
1580
1571
if (!ufshcd_is_clkscaling_supported (hba ))
1581
1572
return - EINVAL ;
1582
1573
1583
1574
memset (stat , 0 , sizeof (* stat ));
1584
1575
1585
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1576
+ guard (spinlock_irqsave )(& hba -> clk_scaling .lock );
1577
+
1586
1578
curr_t = ktime_get ();
1587
1579
if (!scaling -> window_start_t )
1588
1580
goto start_window ;
@@ -1618,7 +1610,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
1618
1610
scaling -> busy_start_t = 0 ;
1619
1611
scaling -> is_busy_started = false;
1620
1612
}
1621
- spin_unlock_irqrestore ( hba -> host -> host_lock , flags );
1613
+
1622
1614
return 0 ;
1623
1615
}
1624
1616
@@ -1682,35 +1674,35 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1682
1674
1683
1675
static void ufshcd_suspend_clkscaling (struct ufs_hba * hba )
1684
1676
{
1685
- unsigned long flags ;
1686
1677
bool suspend = false;
1687
1678
1688
1679
cancel_work_sync (& hba -> clk_scaling .suspend_work );
1689
1680
cancel_work_sync (& hba -> clk_scaling .resume_work );
1690
1681
1691
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1692
- if (!hba -> clk_scaling .is_suspended ) {
1693
- suspend = true;
1694
- hba -> clk_scaling .is_suspended = true;
1695
- hba -> clk_scaling .window_start_t = 0 ;
1682
+ scoped_guard (spinlock_irqsave , & hba -> clk_scaling .lock )
1683
+ {
1684
+ if (!hba -> clk_scaling .is_suspended ) {
1685
+ suspend = true;
1686
+ hba -> clk_scaling .is_suspended = true;
1687
+ hba -> clk_scaling .window_start_t = 0 ;
1688
+ }
1696
1689
}
1697
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1698
1690
1699
1691
if (suspend )
1700
1692
devfreq_suspend_device (hba -> devfreq );
1701
1693
}
1702
1694
1703
1695
static void ufshcd_resume_clkscaling (struct ufs_hba * hba )
1704
1696
{
1705
- unsigned long flags ;
1706
1697
bool resume = false;
1707
1698
1708
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1709
- if (hba -> clk_scaling .is_suspended ) {
1710
- resume = true;
1711
- hba -> clk_scaling .is_suspended = false;
1699
+ scoped_guard (spinlock_irqsave , & hba -> clk_scaling .lock )
1700
+ {
1701
+ if (hba -> clk_scaling .is_suspended ) {
1702
+ resume = true;
1703
+ hba -> clk_scaling .is_suspended = false;
1704
+ }
1712
1705
}
1713
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1714
1706
1715
1707
if (resume )
1716
1708
devfreq_resume_device (hba -> devfreq );
@@ -1796,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1796
1788
INIT_WORK (& hba -> clk_scaling .resume_work ,
1797
1789
ufshcd_clk_scaling_resume_work );
1798
1790
1791
+ spin_lock_init (& hba -> clk_scaling .lock );
1792
+
1799
1793
hba -> clk_scaling .workq = alloc_ordered_workqueue (
1800
1794
"ufs_clkscaling_%d" , WQ_MEM_RECLAIM , hba -> host -> host_no );
1801
1795
@@ -2157,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2157
2151
{
2158
2152
bool queue_resume_work = false;
2159
2153
ktime_t curr_t = ktime_get ();
2160
- unsigned long flags ;
2161
2154
2162
2155
if (!ufshcd_is_clkscaling_supported (hba ))
2163
2156
return ;
2164
2157
2165
- spin_lock_irqsave (hba -> host -> host_lock , flags );
2158
+ guard (spinlock_irqsave )(& hba -> clk_scaling .lock );
2159
+
2166
2160
if (!hba -> clk_scaling .active_reqs ++ )
2167
2161
queue_resume_work = true;
2168
2162
2169
- if (!hba -> clk_scaling .is_enabled || hba -> pm_op_in_progress ) {
2170
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2163
+ if (!hba -> clk_scaling .is_enabled || hba -> pm_op_in_progress )
2171
2164
return ;
2172
- }
2173
2165
2174
2166
if (queue_resume_work )
2175
2167
queue_work (hba -> clk_scaling .workq ,
@@ -2185,26 +2177,24 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2185
2177
hba -> clk_scaling .busy_start_t = curr_t ;
2186
2178
hba -> clk_scaling .is_busy_started = true;
2187
2179
}
2188
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2189
2180
}
2190
2181
2191
2182
static void ufshcd_clk_scaling_update_busy (struct ufs_hba * hba )
2192
2183
{
2193
2184
struct ufs_clk_scaling * scaling = & hba -> clk_scaling ;
2194
- unsigned long flags ;
2195
2185
2196
2186
if (!ufshcd_is_clkscaling_supported (hba ))
2197
2187
return ;
2198
2188
2199
- spin_lock_irqsave (hba -> host -> host_lock , flags );
2189
+ guard (spinlock_irqsave )(& hba -> clk_scaling .lock );
2190
+
2200
2191
hba -> clk_scaling .active_reqs -- ;
2201
2192
if (!scaling -> active_reqs && scaling -> is_busy_started ) {
2202
2193
scaling -> tot_busy_t += ktime_to_us (ktime_sub (ktime_get (),
2203
2194
scaling -> busy_start_t ));
2204
2195
scaling -> busy_start_t = 0 ;
2205
2196
scaling -> is_busy_started = false;
2206
2197
}
2207
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2208
2198
}
2209
2199
2210
2200
static inline int ufshcd_monitor_opcode2dir (u8 opcode )
0 commit comments