Skip to content

Commit 0372dd4

Browse files
igawkeithbusch
authored andcommitted
nvme: refactor ns info helpers
Pass in the nvme_ns_head pointer directly. This reduces the necessity on the caller side have the nvme_ns data structure present. Thus we can refactor the caller side in the next step as well. Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Daniel Wagner <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 9419e71 commit 0372dd4

File tree

4 files changed

+34
-28
lines changed

4 files changed

+34
-28
lines changed

drivers/nvme/host/core.c

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ static void nvme_log_error(struct request *req)
316316
ns->disk ? ns->disk->disk_name : "?",
317317
nvme_get_opcode_str(nr->cmd->common.opcode),
318318
nr->cmd->common.opcode,
319-
nvme_sect_to_lba(ns, blk_rq_pos(req)),
319+
nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
320320
blk_rq_bytes(req) >> ns->head->lba_shift,
321321
nvme_get_error_status_str(nr->status),
322322
nr->status >> 8 & 7, /* Status Code Type */
@@ -372,9 +372,12 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
372372
static inline void nvme_end_req_zoned(struct request *req)
373373
{
374374
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
375-
req_op(req) == REQ_OP_ZONE_APPEND)
376-
req->__sector = nvme_lba_to_sect(req->q->queuedata,
375+
req_op(req) == REQ_OP_ZONE_APPEND) {
376+
struct nvme_ns *ns = req->q->queuedata;
377+
378+
req->__sector = nvme_lba_to_sect(ns->head,
377379
le64_to_cpu(nvme_req(req)->result.u64));
380+
}
378381
}
379382

380383
static inline void nvme_end_req(struct request *req)
@@ -791,7 +794,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
791794
}
792795

793796
if (queue_max_discard_segments(req->q) == 1) {
794-
u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
797+
u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
795798
u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
796799

797800
range[0].cattr = cpu_to_le32(0);
@@ -800,7 +803,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
800803
n = 1;
801804
} else {
802805
__rq_for_each_bio(bio, req) {
803-
u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
806+
u64 slba = nvme_sect_to_lba(ns->head,
807+
bio->bi_iter.bi_sector);
804808
u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
805809

806810
if (n < segments) {
@@ -867,15 +871,15 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
867871
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
868872
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
869873
cmnd->write_zeroes.slba =
870-
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
874+
cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
871875
cmnd->write_zeroes.length =
872876
cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
873877

874878
if (!(req->cmd_flags & REQ_NOUNMAP) &&
875879
(ns->head->features & NVME_NS_DEAC))
876880
cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
877881

878-
if (nvme_ns_has_pi(ns)) {
882+
if (nvme_ns_has_pi(ns->head)) {
879883
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
880884

881885
switch (ns->head->pi_type) {
@@ -910,7 +914,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
910914
cmnd->rw.cdw2 = 0;
911915
cmnd->rw.cdw3 = 0;
912916
cmnd->rw.metadata = 0;
913-
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
917+
cmnd->rw.slba =
918+
cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
914919
cmnd->rw.length =
915920
cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
916921
cmnd->rw.reftag = 0;
@@ -925,7 +930,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
925930
* namespace capacity to zero to prevent any I/O.
926931
*/
927932
if (!blk_integrity_rq(req)) {
928-
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
933+
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
929934
return BLK_STS_NOTSUPP;
930935
control |= NVME_RW_PRINFO_PRACT;
931936
}
@@ -1723,8 +1728,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
17231728
struct request_queue *queue = disk->queue;
17241729
u32 size = queue_logical_block_size(queue);
17251730

1726-
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
1727-
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
1731+
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
1732+
ctrl->max_discard_sectors =
1733+
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
17281734

17291735
if (ctrl->max_discard_sectors == 0) {
17301736
blk_queue_max_discard_sectors(queue, 0);
@@ -1848,7 +1854,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
18481854
* Note, this check will need to be modified if any drivers
18491855
* gain the ability to use other metadata formats.
18501856
*/
1851-
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
1857+
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns->head))
18521858
ns->head->features |= NVME_NS_METADATA_SUPPORTED;
18531859
} else {
18541860
/*
@@ -1886,7 +1892,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
18861892
static void nvme_update_disk_info(struct gendisk *disk,
18871893
struct nvme_ns *ns, struct nvme_id_ns *id)
18881894
{
1889-
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
1895+
sector_t capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
18901896
u32 bs = 1U << ns->head->lba_shift;
18911897
u32 atomic_bs, phys_bs, io_opt = 0;
18921898

@@ -1942,7 +1948,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
19421948
(ns->head->features & NVME_NS_METADATA_SUPPORTED))
19431949
nvme_init_integrity(disk, ns,
19441950
ns->ctrl->max_integrity_segments);
1945-
else if (!nvme_ns_has_pi(ns))
1951+
else if (!nvme_ns_has_pi(ns->head))
19461952
capacity = 0;
19471953
}
19481954

@@ -1973,7 +1979,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
19731979
is_power_of_2(ctrl->max_hw_sectors))
19741980
iob = ctrl->max_hw_sectors;
19751981
else
1976-
iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
1982+
iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
19771983

19781984
if (!iob)
19791985
return;

drivers/nvme/host/nvme.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -512,9 +512,9 @@ struct nvme_ns {
512512
};
513513

514514
/* NVMe ns supports metadata actions by the controller (generate/strip) */
515-
static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
515+
static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
516516
{
517-
return ns->head->pi_type && ns->head->ms == ns->head->pi_size;
517+
return head->pi_type && head->ms == head->pi_size;
518518
}
519519

520520
struct nvme_ctrl_ops {
@@ -646,17 +646,17 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
646646
/*
647647
* Convert a 512B sector number to a device logical block number.
648648
*/
649-
static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
649+
static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
650650
{
651-
return sector >> (ns->head->lba_shift - SECTOR_SHIFT);
651+
return sector >> (head->lba_shift - SECTOR_SHIFT);
652652
}
653653

654654
/*
655655
* Convert a device logical block number to a 512B sector number.
656656
*/
657-
static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
657+
static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
658658
{
659-
return lba << (ns->head->lba_shift - SECTOR_SHIFT);
659+
return lba << (head->lba_shift - SECTOR_SHIFT);
660660
}
661661

662662
/*

drivers/nvme/host/rdma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2012,7 +2012,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
20122012
queue->pi_support &&
20132013
(c->common.opcode == nvme_cmd_write ||
20142014
c->common.opcode == nvme_cmd_read) &&
2015-
nvme_ns_has_pi(ns))
2015+
nvme_ns_has_pi(ns->head))
20162016
req->use_sig_mr = true;
20172017
else
20182018
req->use_sig_mr = false;

drivers/nvme/host/zns.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
100100
}
101101

102102
ns->head->zsze =
103-
nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
103+
nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
104104
if (!is_power_of_2(ns->head->zsze)) {
105105
dev_warn(ns->ctrl->device,
106106
"invalid zone size:%llu for namespace:%u\n",
@@ -164,12 +164,12 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
164164
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
165165
zone.cond = entry->zs >> 4;
166166
zone.len = ns->head->zsze;
167-
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
168-
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
167+
zone.capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zcap));
168+
zone.start = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zslba));
169169
if (zone.cond == BLK_ZONE_COND_FULL)
170170
zone.wp = zone.start + zone.len;
171171
else
172-
zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
172+
zone.wp = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->wp));
173173

174174
return cb(&zone, idx, data);
175175
}
@@ -201,7 +201,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
201201
while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
202202
memset(report, 0, buflen);
203203

204-
c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
204+
c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector));
205205
ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
206206
if (ret) {
207207
if (ret > 0)
@@ -240,7 +240,7 @@ blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
240240

241241
c->zms.opcode = nvme_cmd_zone_mgmt_send;
242242
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
243-
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
243+
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
244244
c->zms.zsa = action;
245245

246246
if (req_op(req) == REQ_OP_ZONE_RESET_ALL)

0 commit comments

Comments
 (0)