Skip to content

Commit d386aed

Browse files
igawkeithbusch
authored andcommitted
nvme: refactor ns info setup function
Use nvme_ns_head instead of nvme_ns where possible. This reduces the coupling between the different data structures. Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Daniel Wagner <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent 0372dd4 commit d386aed

File tree

2 files changed

+62
-60
lines changed

2 files changed

+62
-60
lines changed

drivers/nvme/host/core.c

Lines changed: 53 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1665,14 +1665,14 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
16651665
}
16661666

16671667
#ifdef CONFIG_BLK_DEV_INTEGRITY
1668-
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1669-
u32 max_integrity_segments)
1668+
static void nvme_init_integrity(struct gendisk *disk,
1669+
struct nvme_ns_head *head, u32 max_integrity_segments)
16701670
{
16711671
struct blk_integrity integrity = { };
16721672

1673-
switch (ns->head->pi_type) {
1673+
switch (head->pi_type) {
16741674
case NVME_NS_DPS_PI_TYPE3:
1675-
switch (ns->head->guard_type) {
1675+
switch (head->guard_type) {
16761676
case NVME_NVM_NS_16B_GUARD:
16771677
integrity.profile = &t10_pi_type3_crc;
16781678
integrity.tag_size = sizeof(u16) + sizeof(u32);
@@ -1690,7 +1690,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
16901690
break;
16911691
case NVME_NS_DPS_PI_TYPE1:
16921692
case NVME_NS_DPS_PI_TYPE2:
1693-
switch (ns->head->guard_type) {
1693+
switch (head->guard_type) {
16941694
case NVME_NVM_NS_16B_GUARD:
16951695
integrity.profile = &t10_pi_type1_crc;
16961696
integrity.tag_size = sizeof(u16);
@@ -1711,26 +1711,26 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
17111711
break;
17121712
}
17131713

1714-
integrity.tuple_size = ns->head->ms;
1714+
integrity.tuple_size = head->ms;
17151715
blk_integrity_register(disk, &integrity);
17161716
blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
17171717
}
17181718
#else
1719-
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
1720-
u32 max_integrity_segments)
1719+
static void nvme_init_integrity(struct gendisk *disk,
1720+
struct nvme_ns_head *head, u32 max_integrity_segments)
17211721
{
17221722
}
17231723
#endif /* CONFIG_BLK_DEV_INTEGRITY */
17241724

1725-
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1725+
static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
1726+
struct nvme_ns_head *head)
17261727
{
1727-
struct nvme_ctrl *ctrl = ns->ctrl;
17281728
struct request_queue *queue = disk->queue;
17291729
u32 size = queue_logical_block_size(queue);
17301730

1731-
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
1731+
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX))
17321732
ctrl->max_discard_sectors =
1733-
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
1733+
nvme_lba_to_sect(head, ctrl->dmrsl);
17341734

17351735
if (ctrl->max_discard_sectors == 0) {
17361736
blk_queue_max_discard_sectors(queue, 0);
@@ -1761,21 +1761,21 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
17611761
a->csi == b->csi;
17621762
}
17631763

1764-
static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
1764+
static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
1765+
struct nvme_id_ns *id)
17651766
{
17661767
bool first = id->dps & NVME_NS_DPS_PI_FIRST;
17671768
unsigned lbaf = nvme_lbaf_index(id->flbas);
1768-
struct nvme_ctrl *ctrl = ns->ctrl;
17691769
struct nvme_command c = { };
17701770
struct nvme_id_ns_nvm *nvm;
17711771
int ret = 0;
17721772
u32 elbaf;
17731773

1774-
ns->head->pi_size = 0;
1775-
ns->head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
1774+
head->pi_size = 0;
1775+
head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
17761776
if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1777-
ns->head->pi_size = sizeof(struct t10_pi_tuple);
1778-
ns->head->guard_type = NVME_NVM_NS_16B_GUARD;
1777+
head->pi_size = sizeof(struct t10_pi_tuple);
1778+
head->guard_type = NVME_NVM_NS_16B_GUARD;
17791779
goto set_pi;
17801780
}
17811781

@@ -1784,11 +1784,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
17841784
return -ENOMEM;
17851785

17861786
c.identify.opcode = nvme_admin_identify;
1787-
c.identify.nsid = cpu_to_le32(ns->head->ns_id);
1787+
c.identify.nsid = cpu_to_le32(head->ns_id);
17881788
c.identify.cns = NVME_ID_CNS_CS_NS;
17891789
c.identify.csi = NVME_CSI_NVM;
17901790

1791-
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
1791+
ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
17921792
if (ret)
17931793
goto free_data;
17941794

@@ -1798,13 +1798,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
17981798
if (nvme_elbaf_sts(elbaf))
17991799
goto free_data;
18001800

1801-
ns->head->guard_type = nvme_elbaf_guard_type(elbaf);
1802-
switch (ns->head->guard_type) {
1801+
head->guard_type = nvme_elbaf_guard_type(elbaf);
1802+
switch (head->guard_type) {
18031803
case NVME_NVM_NS_64B_GUARD:
1804-
ns->head->pi_size = sizeof(struct crc64_pi_tuple);
1804+
head->pi_size = sizeof(struct crc64_pi_tuple);
18051805
break;
18061806
case NVME_NVM_NS_16B_GUARD:
1807-
ns->head->pi_size = sizeof(struct t10_pi_tuple);
1807+
head->pi_size = sizeof(struct t10_pi_tuple);
18081808
break;
18091809
default:
18101810
break;
@@ -1813,25 +1813,25 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
18131813
free_data:
18141814
kfree(nvm);
18151815
set_pi:
1816-
if (ns->head->pi_size && (first || ns->head->ms == ns->head->pi_size))
1817-
ns->head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1816+
if (head->pi_size && (first || head->ms == head->pi_size))
1817+
head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
18181818
else
1819-
ns->head->pi_type = 0;
1819+
head->pi_type = 0;
18201820

18211821
return ret;
18221822
}
18231823

1824-
static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
1824+
static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
1825+
struct nvme_ns_head *head, struct nvme_id_ns *id)
18251826
{
1826-
struct nvme_ctrl *ctrl = ns->ctrl;
18271827
int ret;
18281828

1829-
ret = nvme_init_ms(ns, id);
1829+
ret = nvme_init_ms(ctrl, head, id);
18301830
if (ret)
18311831
return ret;
18321832

1833-
ns->head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1834-
if (!ns->head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1833+
head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1834+
if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
18351835
return 0;
18361836

18371837
if (ctrl->ops->flags & NVME_F_FABRICS) {
@@ -1843,7 +1843,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
18431843
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
18441844
return 0;
18451845

1846-
ns->head->features |= NVME_NS_EXT_LBAS;
1846+
head->features |= NVME_NS_EXT_LBAS;
18471847

18481848
/*
18491849
* The current fabrics transport drivers support namespace
@@ -1854,8 +1854,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
18541854
* Note, this check will need to be modified if any drivers
18551855
* gain the ability to use other metadata formats.
18561856
*/
1857-
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns->head))
1858-
ns->head->features |= NVME_NS_METADATA_SUPPORTED;
1857+
if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
1858+
head->features |= NVME_NS_METADATA_SUPPORTED;
18591859
} else {
18601860
/*
18611861
* For PCIe controllers, we can't easily remap the separate
@@ -1864,9 +1864,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
18641864
* We allow extended LBAs for the passthrough interface, though.
18651865
*/
18661866
if (id->flbas & NVME_NS_FLBAS_META_EXT)
1867-
ns->head->features |= NVME_NS_EXT_LBAS;
1867+
head->features |= NVME_NS_EXT_LBAS;
18681868
else
1869-
ns->head->features |= NVME_NS_METADATA_SUPPORTED;
1869+
head->features |= NVME_NS_METADATA_SUPPORTED;
18701870
}
18711871
return 0;
18721872
}
@@ -1889,18 +1889,18 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
18891889
blk_queue_write_cache(q, vwc, vwc);
18901890
}
18911891

1892-
static void nvme_update_disk_info(struct gendisk *disk,
1893-
struct nvme_ns *ns, struct nvme_id_ns *id)
1892+
static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
1893+
struct nvme_ns_head *head, struct nvme_id_ns *id)
18941894
{
1895-
sector_t capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
1896-
u32 bs = 1U << ns->head->lba_shift;
1895+
sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze));
1896+
u32 bs = 1U << head->lba_shift;
18971897
u32 atomic_bs, phys_bs, io_opt = 0;
18981898

18991899
/*
19001900
* The block layer can't support LBA sizes larger than the page size
19011901
* yet, so catch this early and don't allow block I/O.
19021902
*/
1903-
if (ns->head->lba_shift > PAGE_SHIFT) {
1903+
if (head->lba_shift > PAGE_SHIFT) {
19041904
capacity = 0;
19051905
bs = (1 << 9);
19061906
}
@@ -1917,7 +1917,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
19171917
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
19181918
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
19191919
else
1920-
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
1920+
atomic_bs = (1 + ctrl->subsys->awupf) * bs;
19211921
}
19221922

19231923
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
@@ -1943,20 +1943,20 @@ static void nvme_update_disk_info(struct gendisk *disk,
19431943
* I/O to namespaces with metadata except when the namespace supports
19441944
* PI, as it can strip/insert in that case.
19451945
*/
1946-
if (ns->head->ms) {
1946+
if (head->ms) {
19471947
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
1948-
(ns->head->features & NVME_NS_METADATA_SUPPORTED))
1949-
nvme_init_integrity(disk, ns,
1950-
ns->ctrl->max_integrity_segments);
1951-
else if (!nvme_ns_has_pi(ns->head))
1948+
(head->features & NVME_NS_METADATA_SUPPORTED))
1949+
nvme_init_integrity(disk, head,
1950+
ctrl->max_integrity_segments);
1951+
else if (!nvme_ns_has_pi(head))
19521952
capacity = 0;
19531953
}
19541954

19551955
set_capacity_and_notify(disk, capacity);
19561956

1957-
nvme_config_discard(disk, ns);
1957+
nvme_config_discard(ctrl, disk, head);
19581958
blk_queue_max_write_zeroes_sectors(disk->queue,
1959-
ns->ctrl->max_zeroes_sectors);
1959+
ctrl->max_zeroes_sectors);
19601960
}
19611961

19621962
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
@@ -2042,13 +2042,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
20422042
ns->head->lba_shift = id->lbaf[lbaf].ds;
20432043
nvme_set_queue_limits(ns->ctrl, ns->queue);
20442044

2045-
ret = nvme_configure_metadata(ns, id);
2045+
ret = nvme_configure_metadata(ns->ctrl, ns->head, id);
20462046
if (ret < 0) {
20472047
blk_mq_unfreeze_queue(ns->disk->queue);
20482048
goto out;
20492049
}
20502050
nvme_set_chunk_sectors(ns, id);
2051-
nvme_update_disk_info(ns->disk, ns, id);
2051+
nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id);
20522052

20532053
if (ns->head->ids.csi == NVME_CSI_ZNS) {
20542054
ret = nvme_update_zone_info(ns, lbaf);
@@ -2078,7 +2078,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
20782078

20792079
if (nvme_ns_head_multipath(ns->head)) {
20802080
blk_mq_freeze_queue(ns->head->disk->queue);
2081-
nvme_update_disk_info(ns->head->disk, ns, id);
2081+
nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id);
20822082
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
20832083
nvme_mpath_revalidate_paths(ns);
20842084
blk_stack_limits(&ns->head->disk->queue->limits,

drivers/nvme/host/zns.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -148,28 +148,29 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
148148
return NULL;
149149
}
150150

151-
static int nvme_zone_parse_entry(struct nvme_ns *ns,
151+
static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
152+
struct nvme_ns_head *head,
152153
struct nvme_zone_descriptor *entry,
153154
unsigned int idx, report_zones_cb cb,
154155
void *data)
155156
{
156157
struct blk_zone zone = { };
157158

158159
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
159-
dev_err(ns->ctrl->device, "invalid zone type %#x\n",
160+
dev_err(ctrl->device, "invalid zone type %#x\n",
160161
entry->zt);
161162
return -EINVAL;
162163
}
163164

164165
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
165166
zone.cond = entry->zs >> 4;
166-
zone.len = ns->head->zsze;
167-
zone.capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zcap));
168-
zone.start = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zslba));
167+
zone.len = head->zsze;
168+
zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap));
169+
zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba));
169170
if (zone.cond == BLK_ZONE_COND_FULL)
170171
zone.wp = zone.start + zone.len;
171172
else
172-
zone.wp = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->wp));
173+
zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
173174

174175
return cb(&zone, idx, data);
175176
}
@@ -214,7 +215,8 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
214215
break;
215216

216217
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
217-
ret = nvme_zone_parse_entry(ns, &report->entries[i],
218+
ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
219+
&report->entries[i],
218220
zone_idx, cb, data);
219221
if (ret)
220222
goto out_free;

0 commit comments

Comments
 (0)