@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
37
37
static HLIST_HEAD (clk_orphan_list );
38
38
static LIST_HEAD (clk_notifier_list );
39
39
40
+ /* List of registered clks that use runtime PM */
41
+ static HLIST_HEAD (clk_rpm_list );
42
+ static DEFINE_MUTEX (clk_rpm_list_lock );
43
+
40
44
static const struct hlist_head * all_lists [] = {
41
45
& clk_root_list ,
42
46
& clk_orphan_list ,
@@ -59,6 +63,7 @@ struct clk_core {
59
63
struct clk_hw * hw ;
60
64
struct module * owner ;
61
65
struct device * dev ;
66
+ struct hlist_node rpm_node ;
62
67
struct device_node * of_node ;
63
68
struct clk_core * parent ;
64
69
struct clk_parent_map * parents ;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
122
127
pm_runtime_put_sync (core -> dev );
123
128
}
124
129
130
+ /**
131
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
132
+ *
133
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
134
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
135
+ * resuming/suspending and the runtime PM callback is trying to grab the
136
+ * prepare_lock for something like clk_prepare_enable() while
137
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
138
+ * PM resume/suspend the device as well.
139
+ *
140
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
141
+ * success. Otherwise the lock is released on failure.
142
+ *
143
+ * Return: 0 on success, negative errno otherwise.
144
+ */
145
+ static int clk_pm_runtime_get_all (void )
146
+ {
147
+ int ret ;
148
+ struct clk_core * core , * failed ;
149
+
150
+ /*
151
+ * Grab the list lock to prevent any new clks from being registered
152
+ * or unregistered until clk_pm_runtime_put_all().
153
+ */
154
+ mutex_lock (& clk_rpm_list_lock );
155
+
156
+ /*
157
+ * Runtime PM "get" all the devices that are needed for the clks
158
+ * currently registered. Do this without holding the prepare_lock, to
159
+ * avoid the deadlock.
160
+ */
161
+ hlist_for_each_entry (core , & clk_rpm_list , rpm_node ) {
162
+ ret = clk_pm_runtime_get (core );
163
+ if (ret ) {
164
+ failed = core ;
165
+ pr_err ("clk: Failed to runtime PM get '%s' for clk '%s'\n" ,
166
+ dev_name (failed -> dev ), failed -> name );
167
+ goto err ;
168
+ }
169
+ }
170
+
171
+ return 0 ;
172
+
173
+ err :
174
+ hlist_for_each_entry (core , & clk_rpm_list , rpm_node ) {
175
+ if (core == failed )
176
+ break ;
177
+
178
+ clk_pm_runtime_put (core );
179
+ }
180
+ mutex_unlock (& clk_rpm_list_lock );
181
+
182
+ return ret ;
183
+ }
184
+
185
+ /**
186
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
187
+ *
188
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
189
+ * the 'clk_rpm_list_lock'.
190
+ */
191
+ static void clk_pm_runtime_put_all (void )
192
+ {
193
+ struct clk_core * core ;
194
+
195
+ hlist_for_each_entry (core , & clk_rpm_list , rpm_node )
196
+ clk_pm_runtime_put (core );
197
+ mutex_unlock (& clk_rpm_list_lock );
198
+ }
199
+
200
+ static void clk_pm_runtime_init (struct clk_core * core )
201
+ {
202
+ struct device * dev = core -> dev ;
203
+
204
+ if (dev && pm_runtime_enabled (dev )) {
205
+ core -> rpm_enabled = true;
206
+
207
+ mutex_lock (& clk_rpm_list_lock );
208
+ hlist_add_head (& core -> rpm_node , & clk_rpm_list );
209
+ mutex_unlock (& clk_rpm_list_lock );
210
+ }
211
+ }
212
+
125
213
/*** locking ***/
126
214
static void clk_prepare_lock (void )
127
215
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1381
1469
if (core -> flags & CLK_IGNORE_UNUSED )
1382
1470
return ;
1383
1471
1384
- if (clk_pm_runtime_get (core ))
1385
- return ;
1386
-
1387
1472
if (clk_core_is_prepared (core )) {
1388
1473
trace_clk_unprepare (core );
1389
1474
if (core -> ops -> unprepare_unused )
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1392
1477
core -> ops -> unprepare (core -> hw );
1393
1478
trace_clk_unprepare_complete (core );
1394
1479
}
1395
-
1396
- clk_pm_runtime_put (core );
1397
1480
}
1398
1481
1399
1482
static void __init clk_disable_unused_subtree (struct clk_core * core )
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
1409
1492
if (core -> flags & CLK_OPS_PARENT_ENABLE )
1410
1493
clk_core_prepare_enable (core -> parent );
1411
1494
1412
- if (clk_pm_runtime_get (core ))
1413
- goto unprepare_out ;
1414
-
1415
1495
flags = clk_enable_lock ();
1416
1496
1417
1497
if (core -> enable_count )
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
1436
1516
1437
1517
unlock_out :
1438
1518
clk_enable_unlock (flags );
1439
- clk_pm_runtime_put (core );
1440
- unprepare_out :
1441
1519
if (core -> flags & CLK_OPS_PARENT_ENABLE )
1442
1520
clk_core_disable_unprepare (core -> parent );
1443
1521
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
1453
1531
static int __init clk_disable_unused (void )
1454
1532
{
1455
1533
struct clk_core * core ;
1534
+ int ret ;
1456
1535
1457
1536
if (clk_ignore_unused ) {
1458
1537
pr_warn ("clk: Not disabling unused clocks\n" );
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
1461
1540
1462
1541
pr_info ("clk: Disabling unused clocks\n" );
1463
1542
1543
+ ret = clk_pm_runtime_get_all ();
1544
+ if (ret )
1545
+ return ret ;
1546
+ /*
1547
+ * Grab the prepare lock to keep the clk topology stable while iterating
1548
+ * over clks.
1549
+ */
1464
1550
clk_prepare_lock ();
1465
1551
1466
1552
hlist_for_each_entry (core , & clk_root_list , child_node )
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
1477
1563
1478
1564
clk_prepare_unlock ();
1479
1565
1566
+ clk_pm_runtime_put_all ();
1567
+
1480
1568
return 0 ;
1481
1569
}
1482
1570
late_initcall_sync (clk_disable_unused );
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
3252
3340
{
3253
3341
struct clk_core * child ;
3254
3342
3255
- clk_pm_runtime_get (c );
3256
3343
clk_summary_show_one (s , c , level );
3257
- clk_pm_runtime_put (c );
3258
3344
3259
3345
hlist_for_each_entry (child , & c -> children , child_node )
3260
3346
clk_summary_show_subtree (s , child , level + 1 );
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
3264
3350
{
3265
3351
struct clk_core * c ;
3266
3352
struct hlist_head * * lists = s -> private ;
3353
+ int ret ;
3267
3354
3268
3355
seq_puts (s , " enable prepare protect duty hardware connection\n" );
3269
3356
seq_puts (s , " clock count count count rate accuracy phase cycle enable consumer id\n" );
3270
3357
seq_puts (s , "---------------------------------------------------------------------------------------------------------------------------------------------\n" );
3271
3358
3359
+ ret = clk_pm_runtime_get_all ();
3360
+ if (ret )
3361
+ return ret ;
3272
3362
3273
3363
clk_prepare_lock ();
3274
3364
@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
3277
3367
clk_summary_show_subtree (s , c , 0 );
3278
3368
3279
3369
clk_prepare_unlock ();
3370
+ clk_pm_runtime_put_all ();
3280
3371
3281
3372
return 0 ;
3282
3373
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
3324
3415
struct clk_core * c ;
3325
3416
bool first_node = true;
3326
3417
struct hlist_head * * lists = s -> private ;
3418
+ int ret ;
3419
+
3420
+ ret = clk_pm_runtime_get_all ();
3421
+ if (ret )
3422
+ return ret ;
3327
3423
3328
3424
seq_putc (s , '{' );
3425
+
3329
3426
clk_prepare_lock ();
3330
3427
3331
3428
for (; * lists ; lists ++ ) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
3338
3435
}
3339
3436
3340
3437
clk_prepare_unlock ();
3438
+ clk_pm_runtime_put_all ();
3341
3439
3342
3440
seq_puts (s , "}\n" );
3343
3441
return 0 ;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
3981
4079
}
3982
4080
3983
4081
clk_core_reparent_orphans_nolock ();
3984
-
3985
- kref_init (& core -> ref );
3986
4082
out :
3987
4083
clk_pm_runtime_put (core );
3988
4084
unlock :
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
4211
4307
kfree (core -> parents );
4212
4308
}
4213
4309
4310
+ /* Free memory allocated for a struct clk_core */
4311
+ static void __clk_release (struct kref * ref )
4312
+ {
4313
+ struct clk_core * core = container_of (ref , struct clk_core , ref );
4314
+
4315
+ if (core -> rpm_enabled ) {
4316
+ mutex_lock (& clk_rpm_list_lock );
4317
+ hlist_del (& core -> rpm_node );
4318
+ mutex_unlock (& clk_rpm_list_lock );
4319
+ }
4320
+
4321
+ clk_core_free_parent_map (core );
4322
+ kfree_const (core -> name );
4323
+ kfree (core );
4324
+ }
4325
+
4214
4326
static struct clk *
4215
4327
__clk_register (struct device * dev , struct device_node * np , struct clk_hw * hw )
4216
4328
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4231
4343
goto fail_out ;
4232
4344
}
4233
4345
4346
+ kref_init (& core -> ref );
4347
+
4234
4348
core -> name = kstrdup_const (init -> name , GFP_KERNEL );
4235
4349
if (!core -> name ) {
4236
4350
ret = - ENOMEM ;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4243
4357
}
4244
4358
core -> ops = init -> ops ;
4245
4359
4246
- if (dev && pm_runtime_enabled (dev ))
4247
- core -> rpm_enabled = true;
4248
4360
core -> dev = dev ;
4361
+ clk_pm_runtime_init (core );
4249
4362
core -> of_node = np ;
4250
4363
if (dev && dev -> driver )
4251
4364
core -> owner = dev -> driver -> owner ;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4285
4398
hw -> clk = NULL ;
4286
4399
4287
4400
fail_create_clk :
4288
- clk_core_free_parent_map (core );
4289
4401
fail_parents :
4290
4402
fail_ops :
4291
- kfree_const (core -> name );
4292
4403
fail_name :
4293
- kfree ( core );
4404
+ kref_put ( & core -> ref , __clk_release );
4294
4405
fail_out :
4295
4406
return ERR_PTR (ret );
4296
4407
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4370
4481
}
4371
4482
EXPORT_SYMBOL_GPL (of_clk_hw_register );
4372
4483
4373
- /* Free memory allocated for a clock. */
4374
- static void __clk_release (struct kref * ref )
4375
- {
4376
- struct clk_core * core = container_of (ref , struct clk_core , ref );
4377
-
4378
- lockdep_assert_held (& prepare_lock );
4379
-
4380
- clk_core_free_parent_map (core );
4381
- kfree_const (core -> name );
4382
- kfree (core );
4383
- }
4384
-
4385
4484
/*
4386
4485
* Empty clk_ops for unregistered clocks. These are used temporarily
4387
4486
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
4472
4571
if (ops == & clk_nodrv_ops ) {
4473
4572
pr_err ("%s: unregistered clock: %s\n" , __func__ ,
4474
4573
clk -> core -> name );
4475
- goto unlock ;
4574
+ clk_prepare_unlock ();
4575
+ return ;
4476
4576
}
4477
4577
/*
4478
4578
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
4506
4606
if (clk -> core -> protect_count )
4507
4607
pr_warn ("%s: unregistering protected clock: %s\n" ,
4508
4608
__func__ , clk -> core -> name );
4609
+ clk_prepare_unlock ();
4509
4610
4510
4611
kref_put (& clk -> core -> ref , __clk_release );
4511
4612
free_clk (clk );
4512
- unlock :
4513
- clk_prepare_unlock ();
4514
4613
}
4515
4614
EXPORT_SYMBOL_GPL (clk_unregister );
4516
4615
@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
4669
4768
if (clk -> min_rate > 0 || clk -> max_rate < ULONG_MAX )
4670
4769
clk_set_rate_range_nolock (clk , 0 , ULONG_MAX );
4671
4770
4672
- owner = clk -> core -> owner ;
4673
- kref_put (& clk -> core -> ref , __clk_release );
4674
-
4675
4771
clk_prepare_unlock ();
4676
4772
4773
+ owner = clk -> core -> owner ;
4774
+ kref_put (& clk -> core -> ref , __clk_release );
4677
4775
module_put (owner );
4678
-
4679
4776
free_clk (clk );
4680
4777
}
4681
4778
0 commit comments