Skip to content

Commit b177fe8

Browse files
stellarhopperdjbw
authored andcommitted
libnvdimm, btt: ensure that initializing metadata clears poison
If we had badblocks/poison in the metadata area of a BTT, recreating the BTT would not clear the poison in all cases, notably the flog area. This is because rw_bytes will only clear errors if the request being sent down is 512B aligned and sized. Make sure that when writing the map and info blocks, the rw_bytes being sent are of the correct size/alignment. For the flog, instead of doing the smaller log_entry writes only, first do a 'wipe' of the entire area by writing zeroes in large enough chunks so that errors get cleared. Cc: Andy Rudoff <[email protected]> Cc: Dan Williams <[email protected]> Signed-off-by: Vishal Verma <[email protected]> Signed-off-by: Dan Williams <[email protected]>
1 parent 3ae3d67 commit b177fe8

File tree

1 file changed

+47
-7
lines changed

1 file changed

+47
-7
lines changed

drivers/nvdimm/btt.c

Lines changed: 47 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,14 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
5757
{
5858
int ret;
5959

60+
/*
61+
* infooff and info2off should always be at least 512B aligned.
62+
* We rely on that to make sure rw_bytes does error clearing
63+
* correctly, so make sure that is the case.
64+
*/
65+
WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512));
66+
WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512));
67+
6068
ret = arena_write_bytes(arena, arena->info2off, super,
6169
sizeof(struct btt_sb), 0);
6270
if (ret)
@@ -394,9 +402,17 @@ static int btt_map_init(struct arena_info *arena)
394402
if (!zerobuf)
395403
return -ENOMEM;
396404

405+
/*
406+
* mapoff should always be at least 512B aligned. We rely on that to
407+
* make sure rw_bytes does error clearing correctly, so make sure that
408+
* is the case.
409+
*/
410+
WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512));
411+
397412
while (mapsize) {
398413
size_t size = min(mapsize, chunk_size);
399414

415+
WARN_ON_ONCE(size < 512);
400416
ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
401417
size, 0);
402418
if (ret)
@@ -418,11 +434,36 @@ static int btt_map_init(struct arena_info *arena)
418434
*/
419435
static int btt_log_init(struct arena_info *arena)
420436
{
437+
size_t logsize = arena->info2off - arena->logoff;
438+
size_t chunk_size = SZ_4K, offset = 0;
439+
struct log_entry log;
440+
void *zerobuf;
421441
int ret;
422442
u32 i;
423-
struct log_entry log, zerolog;
424443

425-
memset(&zerolog, 0, sizeof(zerolog));
444+
zerobuf = kzalloc(chunk_size, GFP_KERNEL);
445+
if (!zerobuf)
446+
return -ENOMEM;
447+
/*
448+
* logoff should always be at least 512B aligned. We rely on that to
449+
* make sure rw_bytes does error clearing correctly, so make sure that
450+
* is the case.
451+
*/
452+
WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512));
453+
454+
while (logsize) {
455+
size_t size = min(logsize, chunk_size);
456+
457+
WARN_ON_ONCE(size < 512);
458+
ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
459+
size, 0);
460+
if (ret)
461+
goto free;
462+
463+
offset += size;
464+
logsize -= size;
465+
cond_resched();
466+
}
426467

427468
for (i = 0; i < arena->nfree; i++) {
428469
log.lba = cpu_to_le32(i);
@@ -431,13 +472,12 @@ static int btt_log_init(struct arena_info *arena)
431472
log.seq = cpu_to_le32(LOG_SEQ_INIT);
432473
ret = __btt_log_write(arena, i, 0, &log, 0);
433474
if (ret)
434-
return ret;
435-
ret = __btt_log_write(arena, i, 1, &zerolog, 0);
436-
if (ret)
437-
return ret;
475+
goto free;
438476
}
439477

440-
return 0;
478+
free:
479+
kfree(zerobuf);
480+
return ret;
441481
}
442482

443483
static int btt_freelist_init(struct arena_info *arena)

0 commit comments

Comments
 (0)