Skip to content

Commit dd4f699

Browse files
lgeaxboe
authored andcommitted
drbd: when receiving P_TRIM, zero-out partial unaligned chunks
We can avoid spurious data divergence caused by partially-ignored discards on certain backends with discard_zeroes_data=0, if we translate partial unaligned discard requests into explicit zero-out. The relevant use case is LVM/DM thin. If on different nodes, DRBD is backed by devices with differing discard characteristics, discards may lead to data divergence (old data or garbage left over on one backend, zeroes due to unmapped areas on the other backend). Online verify would now potentially report tons of spurious differences. While probably harmless for most use cases (fstrim on a file system), DRBD cannot have that, it would violate our promise to upper layers that our data instances on the nodes are identical. To be correct and play safe (make sure data is identical on both copies), we would have to disable discard support, if our local backend (on a Primary) does not support "discard_zeroes_data=true". We'd also have to translate discards to explicit zero-out on the receiving (typically: Secondary) side, unless the receiving side supports "discard_zeroes_data=true". Which both would allocate those blocks, instead of unmapping them, in contrast with expectations. LVM/DM thin does set discard_zeroes_data=0, because it silently ignores discards to partial chunks. We can work around this by checking the alignment first. For unaligned (wrt. alignment and granularity) or too small discards, we zero-out the initial (and/or) trailing unaligned partial chunks, but discard all the aligned full chunks. At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1". Arguably it should behave this way internally, by default, and we'll try to make that happen. But our workaround is still valid for already deployed setups, and for other devices that may behave this way. Setting discard-zeroes-if-aligned=yes will allow DRBD to use discards, and to announce discard_zeroes_data=true, even on backends that announce discard_zeroes_data=false. Setting discard-zeroes-if-aligned=no will cause DRBD to always fall-back to zero-out on the receiving side, and to not even announce discard capabilities on the Primary, if the respective backend announces discard_zeroes_data=false. We used to ignore the discard_zeroes_data setting completely. To not break established and expected behaviour, and suddenly cause fstrim on thin-provisioned LVs to run out-of-space, instead of freeing up space, the default value is "yes". Signed-off-by: Philipp Reisner <[email protected]> Signed-off-by: Lars Ellenberg <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent f9ff0da commit dd4f699

File tree

5 files changed

+134
-30
lines changed

5 files changed

+134
-30
lines changed

drivers/block/drbd/drbd_int.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1488,7 +1488,7 @@ enum determine_dev_size {
14881488
extern enum determine_dev_size
14891489
drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
14901490
extern void resync_after_online_grow(struct drbd_device *);
1491-
extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
1491+
extern void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev);
14921492
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
14931493
enum drbd_role new_role,
14941494
int force);

drivers/block/drbd/drbd_nl.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1161,13 +1161,17 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
11611161
unsigned int max_hw_sectors = max_bio_size >> 9;
11621162
unsigned int max_segments = 0;
11631163
struct request_queue *b = NULL;
1164+
struct disk_conf *dc;
1165+
bool discard_zeroes_if_aligned = true;
11641166

11651167
if (bdev) {
11661168
b = bdev->backing_bdev->bd_disk->queue;
11671169

11681170
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
11691171
rcu_read_lock();
1170-
max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1172+
dc = rcu_dereference(device->ldev->disk_conf);
1173+
max_segments = dc->max_bio_bvecs;
1174+
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
11711175
rcu_read_unlock();
11721176

11731177
blk_set_stacking_limits(&q->limits);
@@ -1185,7 +1189,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
11851189

11861190
blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
11871191

1188-
if (blk_queue_discard(b) &&
1192+
if (blk_queue_discard(b) && (b->limits.discard_zeroes_data || discard_zeroes_if_aligned) &&
11891193
(connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
11901194
/* We don't care, stacking below should fix it for the local device.
11911195
* Whether or not it is a suitable granularity on the remote device
@@ -1216,7 +1220,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
12161220
}
12171221
}
12181222

1219-
void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1223+
void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev)
12201224
{
12211225
unsigned int now, new, local, peer;
12221226

@@ -1488,6 +1492,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
14881492
if (write_ordering_changed(old_disk_conf, new_disk_conf))
14891493
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
14901494

1495+
if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
1496+
drbd_reconsider_queue_parameters(device, device->ldev);
1497+
14911498
drbd_md_sync(device);
14921499

14931500
if (device->state.conn >= C_CONNECTED) {
@@ -1866,7 +1873,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
18661873
device->read_cnt = 0;
18671874
device->writ_cnt = 0;
18681875

1869-
drbd_reconsider_max_bio_size(device, device->ldev);
1876+
drbd_reconsider_queue_parameters(device, device->ldev);
18701877

18711878
/* If I am currently not R_PRIMARY,
18721879
* but meta data primary indicator is set,

drivers/block/drbd/drbd_receiver.c

Lines changed: 115 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1443,6 +1443,108 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
14431443
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
14441444
}
14451445

1446+
/*
1447+
* We *may* ignore the discard-zeroes-data setting, if so configured.
1448+
*
1449+
* Assumption is that it "discard_zeroes_data=0" is only because the backend
1450+
* may ignore partial unaligned discards.
1451+
*
1452+
* LVM/DM thin as of at least
1453+
* LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
1454+
* Library version: 1.02.93-RHEL7 (2015-01-28)
1455+
* Driver version: 4.29.0
1456+
* still behaves this way.
1457+
*
1458+
* For unaligned (wrt. alignment and granularity) or too small discards,
1459+
* we zero-out the initial (and/or) trailing unaligned partial chunks,
1460+
* but discard all the aligned full chunks.
1461+
*
1462+
* At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1".
1463+
*/
1464+
int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard)
1465+
{
1466+
struct block_device *bdev = device->ldev->backing_bdev;
1467+
struct request_queue *q = bdev_get_queue(bdev);
1468+
sector_t tmp, nr;
1469+
unsigned int max_discard_sectors, granularity;
1470+
int alignment;
1471+
int err = 0;
1472+
1473+
if (!discard)
1474+
goto zero_out;
1475+
1476+
/* Zero-sector (unknown) and one-sector granularities are the same. */
1477+
granularity = max(q->limits.discard_granularity >> 9, 1U);
1478+
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
1479+
1480+
max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
1481+
max_discard_sectors -= max_discard_sectors % granularity;
1482+
if (unlikely(!max_discard_sectors))
1483+
goto zero_out;
1484+
1485+
if (nr_sectors < granularity)
1486+
goto zero_out;
1487+
1488+
tmp = start;
1489+
if (sector_div(tmp, granularity) != alignment) {
1490+
if (nr_sectors < 2*granularity)
1491+
goto zero_out;
1492+
/* start + gran - (start + gran - align) % gran */
1493+
tmp = start + granularity - alignment;
1494+
tmp = start + granularity - sector_div(tmp, granularity);
1495+
1496+
nr = tmp - start;
1497+
err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
1498+
nr_sectors -= nr;
1499+
start = tmp;
1500+
}
1501+
while (nr_sectors >= granularity) {
1502+
nr = min_t(sector_t, nr_sectors, max_discard_sectors);
1503+
err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
1504+
nr_sectors -= nr;
1505+
start += nr;
1506+
}
1507+
zero_out:
1508+
if (nr_sectors) {
1509+
err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0);
1510+
}
1511+
return err != 0;
1512+
}
1513+
1514+
static bool can_do_reliable_discards(struct drbd_device *device)
1515+
{
1516+
struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
1517+
struct disk_conf *dc;
1518+
bool can_do;
1519+
1520+
if (!blk_queue_discard(q))
1521+
return false;
1522+
1523+
if (q->limits.discard_zeroes_data)
1524+
return true;
1525+
1526+
rcu_read_lock();
1527+
dc = rcu_dereference(device->ldev->disk_conf);
1528+
can_do = dc->discard_zeroes_if_aligned;
1529+
rcu_read_unlock();
1530+
return can_do;
1531+
}
1532+
1533+
void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
1534+
{
1535+
/* If the backend cannot discard, or does not guarantee
1536+
* read-back zeroes in discarded ranges, we fall back to
1537+
* zero-out. Unless configuration specifically requested
1538+
* otherwise. */
1539+
if (!can_do_reliable_discards(device))
1540+
peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
1541+
1542+
if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1543+
peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT)))
1544+
peer_req->flags |= EE_WAS_ERROR;
1545+
drbd_endio_write_sec_final(peer_req);
1546+
}
1547+
14461548
/**
14471549
* drbd_submit_peer_request()
14481550
* @device: DRBD device.
@@ -1474,7 +1576,13 @@ int drbd_submit_peer_request(struct drbd_device *device,
14741576
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
14751577
int err = -ENOMEM;
14761578

1477-
if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) {
1579+
/* TRIM/DISCARD: for now, always use the helper function
1580+
* blkdev_issue_zeroout(..., discard=true).
1581+
* It's synchronous, but it does the right thing wrt. bio splitting.
1582+
* Correctness first, performance later. Next step is to code an
1583+
* asynchronous variant of the same.
1584+
*/
1585+
if (peer_req->flags & EE_IS_TRIM) {
14781586
/* wait for all pending IO completions, before we start
14791587
* zeroing things out. */
14801588
conn_wait_active_ee_empty(peer_req->peer_device->connection);
@@ -1491,19 +1599,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
14911599
spin_unlock_irq(&device->resource->req_lock);
14921600
}
14931601

1494-
if (blkdev_issue_zeroout(device->ldev->backing_bdev,
1495-
sector, data_size >> 9, GFP_NOIO, false))
1496-
peer_req->flags |= EE_WAS_ERROR;
1497-
drbd_endio_write_sec_final(peer_req);
1602+
drbd_issue_peer_discard(device, peer_req);
14981603
return 0;
14991604
}
15001605

1501-
/* Discards don't have any payload.
1502-
* But the scsi layer still expects a bio_vec it can use internally,
1503-
* see sd_setup_discard_cmnd() and blk_add_request_payload(). */
1504-
if (peer_req->flags & EE_IS_TRIM)
1505-
nr_pages = 1;
1506-
15071606
/* In most cases, we will only need one bio. But in case the lower
15081607
* level restrictions happen to be different at this offset on this
15091608
* side than those of the sending peer, we may need to submit the
@@ -1529,11 +1628,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
15291628
bios = bio;
15301629
++n_bios;
15311630

1532-
if (op == REQ_OP_DISCARD) {
1533-
bio->bi_iter.bi_size = data_size;
1534-
goto submit;
1535-
}
1536-
15371631
page_chain_for_each(page) {
15381632
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
15391633
if (!bio_add_page(bio, page, len, 0)) {
@@ -1555,7 +1649,6 @@ int drbd_submit_peer_request(struct drbd_device *device,
15551649
--nr_pages;
15561650
}
15571651
D_ASSERT(device, data_size == 0);
1558-
submit:
15591652
D_ASSERT(device, page == NULL);
15601653

15611654
atomic_set(&peer_req->pending_bios, n_bios);
@@ -2424,10 +2517,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
24242517
op = wire_flags_to_bio_op(dp_flags);
24252518
op_flags = wire_flags_to_bio_flags(dp_flags);
24262519
if (pi->cmd == P_TRIM) {
2427-
struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
24282520
peer_req->flags |= EE_IS_TRIM;
2429-
if (!blk_queue_discard(q))
2430-
peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
24312521
D_ASSERT(peer_device, peer_req->i.size > 0);
24322522
D_ASSERT(peer_device, op == REQ_OP_DISCARD);
24332523
D_ASSERT(peer_device, peer_req->pages == NULL);
@@ -2498,7 +2588,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
24982588
* and we wait for all pending requests, respectively wait for
24992589
* active_ee to become empty in drbd_submit_peer_request();
25002590
* better not add ourselves here. */
2501-
if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0)
2591+
if ((peer_req->flags & EE_IS_TRIM) == 0)
25022592
list_add_tail(&peer_req->w.list, &device->active_ee);
25032593
spin_unlock_irq(&device->resource->req_lock);
25042594

@@ -3916,14 +4006,14 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
39164006
}
39174007

39184008
device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3919-
/* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size().
4009+
/* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
39204010
In case we cleared the QUEUE_FLAG_DISCARD from our queue in
3921-
drbd_reconsider_max_bio_size(), we can be sure that after
4011+
drbd_reconsider_queue_parameters(), we can be sure that after
39224012
drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
39234013

39244014
ddsf = be16_to_cpu(p->dds_flags);
39254015
if (get_ldev(device)) {
3926-
drbd_reconsider_max_bio_size(device, device->ldev);
4016+
drbd_reconsider_queue_parameters(device, device->ldev);
39274017
dd = drbd_determine_dev_size(device, ddsf, NULL);
39284018
put_ldev(device);
39294019
if (dd == DS_ERROR)
@@ -3943,7 +4033,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
39434033
* However, if he sends a zero current size,
39444034
* take his (user-capped or) backing disk size anyways.
39454035
*/
3946-
drbd_reconsider_max_bio_size(device, NULL);
4036+
drbd_reconsider_queue_parameters(device, NULL);
39474037
drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
39484038
}
39494039

include/linux/drbd_genl.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,7 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
132132
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
133133
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
134134
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
135+
__flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
135136
)
136137

137138
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,

include/linux/drbd_limits.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,12 @@
210210
#define DRBD_MD_FLUSHES_DEF 1
211211
#define DRBD_TCP_CORK_DEF 1
212212
#define DRBD_AL_UPDATES_DEF 1
213+
/* We used to ignore the discard_zeroes_data setting.
214+
* To not change established (and expected) behaviour,
215+
* by default assume that, for discard_zeroes_data=0,
216+
* we can make that an effective discard_zeroes_data=1,
217+
* if we only explicitly zero-out unaligned partial chunks. */
218+
#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
213219

214220
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
215221
#define DRBD_ALWAYS_ASBP_DEF 0

0 commit comments

Comments
 (0)