Skip to content

Commit be769e5

Browse files
avri-altman-sndkmartinkpetersen
authored andcommitted
scsi: ufs: core: Introduce a new clock_scaling lock
Introduce a new clock scaling lock to serialize access to some of the clock scaling members instead of the host_lock. here also, simplify the code with the guard() macro and co. Reviewed-by: Bart Van Assche <[email protected]> Signed-off-by: Avri Altman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 209f4e4 commit be769e5

File tree

2 files changed

+71
-77
lines changed

2 files changed

+71
-77
lines changed

drivers/ufs/core/ufshcd.c

Lines changed: 61 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1452,16 +1452,16 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
14521452
{
14531453
struct ufs_hba *hba = container_of(work, struct ufs_hba,
14541454
clk_scaling.suspend_work);
1455-
unsigned long irq_flags;
14561455

1457-
spin_lock_irqsave(hba->host->host_lock, irq_flags);
1458-
if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1459-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1460-
return;
1456+
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1457+
{
1458+
if (hba->clk_scaling.active_reqs ||
1459+
hba->clk_scaling.is_suspended)
1460+
return;
1461+
1462+
hba->clk_scaling.is_suspended = true;
1463+
hba->clk_scaling.window_start_t = 0;
14611464
}
1462-
hba->clk_scaling.is_suspended = true;
1463-
hba->clk_scaling.window_start_t = 0;
1464-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
14651465

14661466
devfreq_suspend_device(hba->devfreq);
14671467
}
@@ -1470,15 +1470,13 @@ static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
14701470
{
14711471
struct ufs_hba *hba = container_of(work, struct ufs_hba,
14721472
clk_scaling.resume_work);
1473-
unsigned long irq_flags;
14741473

1475-
spin_lock_irqsave(hba->host->host_lock, irq_flags);
1476-
if (!hba->clk_scaling.is_suspended) {
1477-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1478-
return;
1474+
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1475+
{
1476+
if (!hba->clk_scaling.is_suspended)
1477+
return;
1478+
hba->clk_scaling.is_suspended = false;
14791479
}
1480-
hba->clk_scaling.is_suspended = false;
1481-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
14821480

14831481
devfreq_resume_device(hba->devfreq);
14841482
}
@@ -1492,7 +1490,6 @@ static int ufshcd_devfreq_target(struct device *dev,
14921490
bool scale_up = false, sched_clk_scaling_suspend_work = false;
14931491
struct list_head *clk_list = &hba->clk_list_head;
14941492
struct ufs_clk_info *clki;
1495-
unsigned long irq_flags;
14961493

14971494
if (!ufshcd_is_clkscaling_supported(hba))
14981495
return -EINVAL;
@@ -1513,43 +1510,38 @@ static int ufshcd_devfreq_target(struct device *dev,
15131510
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
15141511
}
15151512

1516-
spin_lock_irqsave(hba->host->host_lock, irq_flags);
1517-
if (ufshcd_eh_in_progress(hba)) {
1518-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1519-
return 0;
1520-
}
1513+
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1514+
{
1515+
if (ufshcd_eh_in_progress(hba))
1516+
return 0;
15211517

1522-
/* Skip scaling clock when clock scaling is suspended */
1523-
if (hba->clk_scaling.is_suspended) {
1524-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1525-
dev_warn(hba->dev, "clock scaling is suspended, skip");
1526-
return 0;
1527-
}
1518+
/* Skip scaling clock when clock scaling is suspended */
1519+
if (hba->clk_scaling.is_suspended) {
1520+
dev_warn(hba->dev, "clock scaling is suspended, skip");
1521+
return 0;
1522+
}
15281523

1529-
if (!hba->clk_scaling.active_reqs)
1530-
sched_clk_scaling_suspend_work = true;
1524+
if (!hba->clk_scaling.active_reqs)
1525+
sched_clk_scaling_suspend_work = true;
15311526

1532-
if (list_empty(clk_list)) {
1533-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1534-
goto out;
1535-
}
1527+
if (list_empty(clk_list))
1528+
goto out;
15361529

1537-
/* Decide based on the target or rounded-off frequency and update */
1538-
if (hba->use_pm_opp)
1539-
scale_up = *freq > hba->clk_scaling.target_freq;
1540-
else
1541-
scale_up = *freq == clki->max_freq;
1530+
/* Decide based on the target or rounded-off frequency and update */
1531+
if (hba->use_pm_opp)
1532+
scale_up = *freq > hba->clk_scaling.target_freq;
1533+
else
1534+
scale_up = *freq == clki->max_freq;
15421535

1543-
if (!hba->use_pm_opp && !scale_up)
1544-
*freq = clki->min_freq;
1536+
if (!hba->use_pm_opp && !scale_up)
1537+
*freq = clki->min_freq;
15451538

1546-
/* Update the frequency */
1547-
if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1548-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1549-
ret = 0;
1550-
goto out; /* no state change required */
1539+
/* Update the frequency */
1540+
if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1541+
ret = 0;
1542+
goto out; /* no state change required */
1543+
}
15511544
}
1552-
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
15531545

15541546
start = ktime_get();
15551547
ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
@@ -1574,15 +1566,15 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
15741566
{
15751567
struct ufs_hba *hba = dev_get_drvdata(dev);
15761568
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1577-
unsigned long flags;
15781569
ktime_t curr_t;
15791570

15801571
if (!ufshcd_is_clkscaling_supported(hba))
15811572
return -EINVAL;
15821573

15831574
memset(stat, 0, sizeof(*stat));
15841575

1585-
spin_lock_irqsave(hba->host->host_lock, flags);
1576+
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
1577+
15861578
curr_t = ktime_get();
15871579
if (!scaling->window_start_t)
15881580
goto start_window;
@@ -1618,7 +1610,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
16181610
scaling->busy_start_t = 0;
16191611
scaling->is_busy_started = false;
16201612
}
1621-
spin_unlock_irqrestore(hba->host->host_lock, flags);
1613+
16221614
return 0;
16231615
}
16241616

@@ -1682,35 +1674,35 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
16821674

16831675
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
16841676
{
1685-
unsigned long flags;
16861677
bool suspend = false;
16871678

16881679
cancel_work_sync(&hba->clk_scaling.suspend_work);
16891680
cancel_work_sync(&hba->clk_scaling.resume_work);
16901681

1691-
spin_lock_irqsave(hba->host->host_lock, flags);
1692-
if (!hba->clk_scaling.is_suspended) {
1693-
suspend = true;
1694-
hba->clk_scaling.is_suspended = true;
1695-
hba->clk_scaling.window_start_t = 0;
1682+
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1683+
{
1684+
if (!hba->clk_scaling.is_suspended) {
1685+
suspend = true;
1686+
hba->clk_scaling.is_suspended = true;
1687+
hba->clk_scaling.window_start_t = 0;
1688+
}
16961689
}
1697-
spin_unlock_irqrestore(hba->host->host_lock, flags);
16981690

16991691
if (suspend)
17001692
devfreq_suspend_device(hba->devfreq);
17011693
}
17021694

17031695
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
17041696
{
1705-
unsigned long flags;
17061697
bool resume = false;
17071698

1708-
spin_lock_irqsave(hba->host->host_lock, flags);
1709-
if (hba->clk_scaling.is_suspended) {
1710-
resume = true;
1711-
hba->clk_scaling.is_suspended = false;
1699+
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1700+
{
1701+
if (hba->clk_scaling.is_suspended) {
1702+
resume = true;
1703+
hba->clk_scaling.is_suspended = false;
1704+
}
17121705
}
1713-
spin_unlock_irqrestore(hba->host->host_lock, flags);
17141706

17151707
if (resume)
17161708
devfreq_resume_device(hba->devfreq);
@@ -1796,6 +1788,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
17961788
INIT_WORK(&hba->clk_scaling.resume_work,
17971789
ufshcd_clk_scaling_resume_work);
17981790

1791+
spin_lock_init(&hba->clk_scaling.lock);
1792+
17991793
hba->clk_scaling.workq = alloc_ordered_workqueue(
18001794
"ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
18011795

@@ -2157,19 +2151,17 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
21572151
{
21582152
bool queue_resume_work = false;
21592153
ktime_t curr_t = ktime_get();
2160-
unsigned long flags;
21612154

21622155
if (!ufshcd_is_clkscaling_supported(hba))
21632156
return;
21642157

2165-
spin_lock_irqsave(hba->host->host_lock, flags);
2158+
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
2159+
21662160
if (!hba->clk_scaling.active_reqs++)
21672161
queue_resume_work = true;
21682162

2169-
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2170-
spin_unlock_irqrestore(hba->host->host_lock, flags);
2163+
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
21712164
return;
2172-
}
21732165

21742166
if (queue_resume_work)
21752167
queue_work(hba->clk_scaling.workq,
@@ -2185,26 +2177,24 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
21852177
hba->clk_scaling.busy_start_t = curr_t;
21862178
hba->clk_scaling.is_busy_started = true;
21872179
}
2188-
spin_unlock_irqrestore(hba->host->host_lock, flags);
21892180
}
21902181

21912182
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
21922183
{
21932184
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2194-
unsigned long flags;
21952185

21962186
if (!ufshcd_is_clkscaling_supported(hba))
21972187
return;
21982188

2199-
spin_lock_irqsave(hba->host->host_lock, flags);
2189+
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
2190+
22002191
hba->clk_scaling.active_reqs--;
22012192
if (!scaling->active_reqs && scaling->is_busy_started) {
22022193
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
22032194
scaling->busy_start_t));
22042195
scaling->busy_start_t = 0;
22052196
scaling->is_busy_started = false;
22062197
}
2207-
spin_unlock_irqrestore(hba->host->host_lock, flags);
22082198
}
22092199

22102200
static inline int ufshcd_monitor_opcode2dir(u8 opcode)

include/ufs/ufshcd.h

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -436,6 +436,10 @@ struct ufs_clk_gating {
436436

437437
/**
438438
* struct ufs_clk_scaling - UFS clock scaling related data
439+
* @workq: workqueue to schedule devfreq suspend/resume work
440+
* @suspend_work: worker to suspend devfreq
441+
* @resume_work: worker to resume devfreq
442+
* @lock: serialize access to some struct ufs_clk_scaling members
439443
* @active_reqs: number of requests that are pending. If this is zero when
440444
* devfreq ->target() function is called then schedule "suspend_work" to
441445
* suspend devfreq.
@@ -445,9 +449,6 @@ struct ufs_clk_gating {
445449
* @enable_attr: sysfs attribute to enable/disable clock scaling
446450
* @saved_pwr_info: UFS power mode may also be changed during scaling and this
447451
* one keeps track of previous power mode.
448-
* @workq: workqueue to schedule devfreq suspend/resume work
449-
* @suspend_work: worker to suspend devfreq
450-
* @resume_work: worker to resume devfreq
451452
* @target_freq: frequency requested by devfreq framework
452453
* @min_gear: lowest HS gear to scale down to
453454
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
@@ -459,15 +460,18 @@ struct ufs_clk_gating {
459460
* @is_suspended: tracks if devfreq is suspended or not
460461
*/
461462
struct ufs_clk_scaling {
463+
struct workqueue_struct *workq;
464+
struct work_struct suspend_work;
465+
struct work_struct resume_work;
466+
467+
spinlock_t lock;
468+
462469
int active_reqs;
463470
unsigned long tot_busy_t;
464471
ktime_t window_start_t;
465472
ktime_t busy_start_t;
466473
struct device_attribute enable_attr;
467474
struct ufs_pa_layer_attr saved_pwr_info;
468-
struct workqueue_struct *workq;
469-
struct work_struct suspend_work;
470-
struct work_struct resume_work;
471475
unsigned long target_freq;
472476
u32 min_gear;
473477
bool is_enabled;

0 commit comments

Comments
 (0)