15
15
#include <linux/module.h>
16
16
#include <linux/mutex.h>
17
17
#include <linux/ndctl.h>
18
+ #include <linux/sysfs.h>
18
19
#include <linux/delay.h>
19
20
#include <linux/list.h>
20
21
#include <linux/acpi.h>
@@ -874,14 +875,87 @@ static ssize_t revision_show(struct device *dev,
874
875
}
875
876
static DEVICE_ATTR_RO (revision );
876
877
878
+ /*
879
+ * This shows the number of full Address Range Scrubs that have been
880
+ * completed since driver load time. Userspace can wait on this using
881
+ * select/poll etc. A '+' at the end indicates an ARS is in progress
882
+ */
883
+ static ssize_t scrub_show (struct device * dev ,
884
+ struct device_attribute * attr , char * buf )
885
+ {
886
+ struct nvdimm_bus_descriptor * nd_desc ;
887
+ ssize_t rc = - ENXIO ;
888
+
889
+ device_lock (dev );
890
+ nd_desc = dev_get_drvdata (dev );
891
+ if (nd_desc ) {
892
+ struct acpi_nfit_desc * acpi_desc = to_acpi_desc (nd_desc );
893
+
894
+ rc = sprintf (buf , "%d%s" , acpi_desc -> scrub_count ,
895
+ (work_busy (& acpi_desc -> work )) ? "+\n" : "\n" );
896
+ }
897
+ device_unlock (dev );
898
+ return rc ;
899
+ }
900
+
901
+ static int acpi_nfit_ars_rescan (struct acpi_nfit_desc * acpi_desc );
902
+
903
+ static ssize_t scrub_store (struct device * dev ,
904
+ struct device_attribute * attr , const char * buf , size_t size )
905
+ {
906
+ struct nvdimm_bus_descriptor * nd_desc ;
907
+ ssize_t rc ;
908
+ long val ;
909
+
910
+ rc = kstrtol (buf , 0 , & val );
911
+ if (rc )
912
+ return rc ;
913
+ if (val != 1 )
914
+ return - EINVAL ;
915
+
916
+ device_lock (dev );
917
+ nd_desc = dev_get_drvdata (dev );
918
+ if (nd_desc ) {
919
+ struct acpi_nfit_desc * acpi_desc = to_acpi_desc (nd_desc );
920
+
921
+ rc = acpi_nfit_ars_rescan (acpi_desc );
922
+ }
923
+ device_unlock (dev );
924
+ if (rc )
925
+ return rc ;
926
+ return size ;
927
+ }
928
+ static DEVICE_ATTR_RW (scrub );
929
+
930
+ static bool ars_supported (struct nvdimm_bus * nvdimm_bus )
931
+ {
932
+ struct nvdimm_bus_descriptor * nd_desc = to_nd_desc (nvdimm_bus );
933
+ const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
934
+ | 1 << ND_CMD_ARS_STATUS ;
935
+
936
+ return (nd_desc -> cmd_mask & mask ) == mask ;
937
+ }
938
+
939
+ static umode_t nfit_visible (struct kobject * kobj , struct attribute * a , int n )
940
+ {
941
+ struct device * dev = container_of (kobj , struct device , kobj );
942
+ struct nvdimm_bus * nvdimm_bus = to_nvdimm_bus (dev );
943
+
944
+ if (a == & dev_attr_scrub .attr && !ars_supported (nvdimm_bus ))
945
+ return 0 ;
946
+ return a -> mode ;
947
+ }
948
+
877
949
static struct attribute * acpi_nfit_attributes [] = {
878
950
& dev_attr_revision .attr ,
951
+ & dev_attr_scrub .attr ,
879
952
NULL ,
880
953
};
881
954
882
955
static struct attribute_group acpi_nfit_attribute_group = {
883
956
.name = "nfit" ,
884
957
.attrs = acpi_nfit_attributes ,
958
+ .is_visible = nfit_visible ,
885
959
};
886
960
887
961
static const struct attribute_group * acpi_nfit_attribute_groups [] = {
@@ -2054,7 +2128,7 @@ static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
2054
2128
unsigned int tmo = scrub_timeout ;
2055
2129
int rc ;
2056
2130
2057
- if (nfit_spa -> ars_done || !nfit_spa -> nd_region )
2131
+ if (! nfit_spa -> ars_required || !nfit_spa -> nd_region )
2058
2132
return ;
2059
2133
2060
2134
rc = ars_start (acpi_desc , nfit_spa );
@@ -2143,7 +2217,9 @@ static void acpi_nfit_scrub(struct work_struct *work)
2143
2217
* firmware initiated scrubs to complete and then we go search for the
2144
2218
* affected spa regions to mark them scanned. In the second phase we
2145
2219
* initiate a directed scrub for every range that was not scrubbed in
2146
- * phase 1.
2220
+ * phase 1. If we're called for a 'rescan', we harmlessly pass through
2221
+ * the first phase, but really only care about running phase 2, where
2222
+ * regions can be notified of new poison.
2147
2223
*/
2148
2224
2149
2225
/* process platform firmware initiated scrubs */
@@ -2246,14 +2322,17 @@ static void acpi_nfit_scrub(struct work_struct *work)
2246
2322
* Flag all the ranges that still need scrubbing, but
2247
2323
* register them now to make data available.
2248
2324
*/
2249
- if (nfit_spa -> nd_region )
2250
- nfit_spa -> ars_done = 1 ;
2251
- else
2325
+ if (!nfit_spa -> nd_region ) {
2326
+ nfit_spa -> ars_required = 1 ;
2252
2327
acpi_nfit_register_region (acpi_desc , nfit_spa );
2328
+ }
2253
2329
}
2254
2330
2255
2331
list_for_each_entry (nfit_spa , & acpi_desc -> spas , list )
2256
2332
acpi_nfit_async_scrub (acpi_desc , nfit_spa );
2333
+ acpi_desc -> scrub_count ++ ;
2334
+ if (acpi_desc -> scrub_count_state )
2335
+ sysfs_notify_dirent (acpi_desc -> scrub_count_state );
2257
2336
mutex_unlock (& acpi_desc -> init_mutex );
2258
2337
}
2259
2338
@@ -2291,12 +2370,48 @@ static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
2291
2370
return 0 ;
2292
2371
}
2293
2372
2373
+ static int acpi_nfit_desc_init_scrub_attr (struct acpi_nfit_desc * acpi_desc )
2374
+ {
2375
+ struct device * dev = acpi_desc -> dev ;
2376
+ struct kernfs_node * nfit ;
2377
+ struct device * bus_dev ;
2378
+
2379
+ if (!ars_supported (acpi_desc -> nvdimm_bus ))
2380
+ return 0 ;
2381
+
2382
+ bus_dev = to_nvdimm_bus_dev (acpi_desc -> nvdimm_bus );
2383
+ nfit = sysfs_get_dirent (bus_dev -> kobj .sd , "nfit" );
2384
+ if (!nfit ) {
2385
+ dev_err (dev , "sysfs_get_dirent 'nfit' failed\n" );
2386
+ return - ENODEV ;
2387
+ }
2388
+ acpi_desc -> scrub_count_state = sysfs_get_dirent (nfit , "scrub" );
2389
+ sysfs_put (nfit );
2390
+ if (!acpi_desc -> scrub_count_state ) {
2391
+ dev_err (dev , "sysfs_get_dirent 'scrub' failed\n" );
2392
+ return - ENODEV ;
2393
+ }
2394
+
2395
+ return 0 ;
2396
+ }
2397
+
2294
2398
static void acpi_nfit_destruct (void * data )
2295
2399
{
2296
2400
struct acpi_nfit_desc * acpi_desc = data ;
2401
+ struct device * bus_dev = to_nvdimm_bus_dev (acpi_desc -> nvdimm_bus );
2297
2402
2298
2403
acpi_desc -> cancel = 1 ;
2404
+ /*
2405
+ * Bounce the nvdimm bus lock to make sure any in-flight
2406
+ * acpi_nfit_ars_rescan() submissions have had a chance to
2407
+ * either submit or see ->cancel set.
2408
+ */
2409
+ device_lock (bus_dev );
2410
+ device_unlock (bus_dev );
2411
+
2299
2412
flush_workqueue (nfit_wq );
2413
+ if (acpi_desc -> scrub_count_state )
2414
+ sysfs_put (acpi_desc -> scrub_count_state );
2300
2415
nvdimm_bus_unregister (acpi_desc -> nvdimm_bus );
2301
2416
acpi_desc -> nvdimm_bus = NULL ;
2302
2417
}
@@ -2309,14 +2424,21 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
2309
2424
int rc ;
2310
2425
2311
2426
if (!acpi_desc -> nvdimm_bus ) {
2427
+ acpi_nfit_init_dsms (acpi_desc );
2428
+
2312
2429
acpi_desc -> nvdimm_bus = nvdimm_bus_register (dev ,
2313
2430
& acpi_desc -> nd_desc );
2314
2431
if (!acpi_desc -> nvdimm_bus )
2315
2432
return - ENOMEM ;
2433
+
2316
2434
rc = devm_add_action_or_reset (dev , acpi_nfit_destruct ,
2317
2435
acpi_desc );
2318
2436
if (rc )
2319
2437
return rc ;
2438
+
2439
+ rc = acpi_nfit_desc_init_scrub_attr (acpi_desc );
2440
+ if (rc )
2441
+ return rc ;
2320
2442
}
2321
2443
2322
2444
mutex_lock (& acpi_desc -> init_mutex );
@@ -2360,8 +2482,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
2360
2482
if (rc )
2361
2483
goto out_unlock ;
2362
2484
2363
- acpi_nfit_init_dsms (acpi_desc );
2364
-
2365
2485
rc = acpi_nfit_register_dimms (acpi_desc );
2366
2486
if (rc )
2367
2487
goto out_unlock ;
@@ -2429,6 +2549,33 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
2429
2549
return 0 ;
2430
2550
}
2431
2551
2552
+ static int acpi_nfit_ars_rescan (struct acpi_nfit_desc * acpi_desc )
2553
+ {
2554
+ struct device * dev = acpi_desc -> dev ;
2555
+ struct nfit_spa * nfit_spa ;
2556
+
2557
+ if (work_busy (& acpi_desc -> work ))
2558
+ return - EBUSY ;
2559
+
2560
+ if (acpi_desc -> cancel )
2561
+ return 0 ;
2562
+
2563
+ mutex_lock (& acpi_desc -> init_mutex );
2564
+ list_for_each_entry (nfit_spa , & acpi_desc -> spas , list ) {
2565
+ struct acpi_nfit_system_address * spa = nfit_spa -> spa ;
2566
+
2567
+ if (nfit_spa_type (spa ) != NFIT_SPA_PM )
2568
+ continue ;
2569
+
2570
+ nfit_spa -> ars_required = 1 ;
2571
+ }
2572
+ queue_work (nfit_wq , & acpi_desc -> work );
2573
+ dev_dbg (dev , "%s: ars_scan triggered\n" , __func__ );
2574
+ mutex_unlock (& acpi_desc -> init_mutex );
2575
+
2576
+ return 0 ;
2577
+ }
2578
+
2432
2579
void acpi_nfit_desc_init (struct acpi_nfit_desc * acpi_desc , struct device * dev )
2433
2580
{
2434
2581
struct nvdimm_bus_descriptor * nd_desc ;
0 commit comments