@@ -198,6 +198,17 @@ static const struct block_device_operations pmem_fops = {
198
198
.revalidate_disk = nvdimm_revalidate_disk ,
199
199
};
200
200
201
+ static void pmem_release_queue (void * q )
202
+ {
203
+ blk_cleanup_queue (q );
204
+ }
205
+
206
+ void pmem_release_disk (void * disk )
207
+ {
208
+ del_gendisk (disk );
209
+ put_disk (disk );
210
+ }
211
+
201
212
static struct pmem_device * pmem_alloc (struct device * dev ,
202
213
struct resource * res , int id )
203
214
{
@@ -234,25 +245,22 @@ static struct pmem_device *pmem_alloc(struct device *dev,
234
245
pmem -> phys_addr , pmem -> size ,
235
246
ARCH_MEMREMAP_PMEM );
236
247
237
- if (IS_ERR (pmem -> virt_addr )) {
248
+ /*
249
+ * At release time the queue must be dead before
250
+ * devm_memremap_pages is unwound
251
+ */
252
+ if (devm_add_action (dev , pmem_release_queue , q )) {
238
253
blk_cleanup_queue (q );
239
- return ( void __force * ) pmem -> virt_addr ;
254
+ return ERR_PTR ( - ENOMEM ) ;
240
255
}
241
256
257
+ if (IS_ERR (pmem -> virt_addr ))
258
+ return (void __force * ) pmem -> virt_addr ;
259
+
242
260
pmem -> pmem_queue = q ;
243
261
return pmem ;
244
262
}
245
263
246
- static void pmem_detach_disk (struct pmem_device * pmem )
247
- {
248
- if (!pmem -> pmem_disk )
249
- return ;
250
-
251
- del_gendisk (pmem -> pmem_disk );
252
- put_disk (pmem -> pmem_disk );
253
- blk_cleanup_queue (pmem -> pmem_queue );
254
- }
255
-
256
264
static int pmem_attach_disk (struct device * dev ,
257
265
struct nd_namespace_common * ndns , struct pmem_device * pmem )
258
266
{
@@ -269,8 +277,10 @@ static int pmem_attach_disk(struct device *dev,
269
277
pmem -> pmem_queue -> queuedata = pmem ;
270
278
271
279
disk = alloc_disk_node (0 , nid );
272
- if (!disk ) {
273
- blk_cleanup_queue (pmem -> pmem_queue );
280
+ if (!disk )
281
+ return - ENOMEM ;
282
+ if (devm_add_action (dev , pmem_release_disk , disk )) {
283
+ put_disk (disk );
274
284
return - ENOMEM ;
275
285
}
276
286
@@ -427,15 +437,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
427
437
return nvdimm_write_bytes (ndns , SZ_4K , pfn_sb , sizeof (* pfn_sb ));
428
438
}
429
439
430
- static void nvdimm_namespace_detach_pfn (struct nd_pfn * nd_pfn )
431
- {
432
- struct pmem_device * pmem ;
433
-
434
- /* free pmem disk */
435
- pmem = dev_get_drvdata (& nd_pfn -> dev );
436
- pmem_detach_disk (pmem );
437
- }
438
-
439
440
/*
440
441
* We hotplug memory at section granularity, pad the reserved area from
441
442
* the previous section base to the namespace base address.
@@ -458,7 +459,6 @@ static unsigned long init_altmap_reserve(resource_size_t base)
458
459
459
460
static int __nvdimm_namespace_attach_pfn (struct nd_pfn * nd_pfn )
460
461
{
461
- int rc ;
462
462
struct resource res ;
463
463
struct request_queue * q ;
464
464
struct pmem_device * pmem ;
@@ -495,35 +495,33 @@ static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
495
495
altmap = & __altmap ;
496
496
altmap -> free = PHYS_PFN (pmem -> data_offset - SZ_8K );
497
497
altmap -> alloc = 0 ;
498
- } else {
499
- rc = - ENXIO ;
500
- goto err ;
501
- }
498
+ } else
499
+ return - ENXIO ;
502
500
503
501
/* establish pfn range for lookup, and switch to direct map */
504
502
q = pmem -> pmem_queue ;
505
503
memcpy (& res , & nsio -> res , sizeof (res ));
506
504
res .start += start_pad ;
507
505
res .end -= end_trunc ;
506
+ devm_remove_action (dev , pmem_release_queue , q );
508
507
devm_memunmap (dev , (void __force * ) pmem -> virt_addr );
509
508
pmem -> virt_addr = (void __pmem * ) devm_memremap_pages (dev , & res ,
510
509
& q -> q_usage_counter , altmap );
511
510
pmem -> pfn_flags |= PFN_MAP ;
512
- if (IS_ERR (pmem -> virt_addr )) {
513
- rc = PTR_ERR (pmem -> virt_addr );
514
- goto err ;
511
+
512
+ /*
513
+ * At release time the queue must be dead before
514
+ * devm_memremap_pages is unwound
515
+ */
516
+ if (devm_add_action (dev , pmem_release_queue , q )) {
517
+ blk_cleanup_queue (q );
518
+ return - ENOMEM ;
515
519
}
520
+ if (IS_ERR (pmem -> virt_addr ))
521
+ return PTR_ERR (pmem -> virt_addr );
516
522
517
523
/* attach pmem disk in "pfn-mode" */
518
- rc = pmem_attach_disk (dev , ndns , pmem );
519
- if (rc )
520
- goto err ;
521
-
522
- return rc ;
523
- err :
524
- nvdimm_namespace_detach_pfn (nd_pfn );
525
- return rc ;
526
-
524
+ return pmem_attach_disk (dev , ndns , pmem );
527
525
}
528
526
529
527
static int nvdimm_namespace_attach_pfn (struct nd_namespace_common * ndns )
@@ -565,8 +563,8 @@ static int nd_pmem_probe(struct device *dev)
565
563
566
564
if (is_nd_btt (dev )) {
567
565
/* btt allocates its own request_queue */
566
+ devm_remove_action (dev , pmem_release_queue , pmem -> pmem_queue );
568
567
blk_cleanup_queue (pmem -> pmem_queue );
569
- pmem -> pmem_queue = NULL ;
570
568
return nvdimm_namespace_attach_btt (ndns );
571
569
}
572
570
@@ -579,7 +577,6 @@ static int nd_pmem_probe(struct device *dev)
579
577
* We'll come back as either btt-pmem, or pfn-pmem, so
580
578
* drop the queue allocation for now.
581
579
*/
582
- blk_cleanup_queue (pmem -> pmem_queue );
583
580
return - ENXIO ;
584
581
}
585
582
@@ -588,15 +585,8 @@ static int nd_pmem_probe(struct device *dev)
588
585
589
586
static int nd_pmem_remove (struct device * dev )
590
587
{
591
- struct pmem_device * pmem = dev_get_drvdata (dev );
592
-
593
588
if (is_nd_btt (dev ))
594
589
nvdimm_namespace_detach_btt (to_nd_btt (dev ));
595
- else if (is_nd_pfn (dev ))
596
- nvdimm_namespace_detach_pfn (to_nd_pfn (dev ));
597
- else
598
- pmem_detach_disk (pmem );
599
-
600
590
return 0 ;
601
591
}
602
592
0 commit comments