Skip to content

Commit 0f7223a

Browse files
committed
Merge tag 'nvme-6.9-2024-03-07' of git://git.infradead.org/nvme into for-6.9/block
Pull NVMe updates from Keith: "nvme updates for Linux 6.9 - RDMA target enhancements (Max) - Fabrics fixes (Max, Guixin, Hannes) - Atomic queue_limits usage (Christoph) - Const use for class_register (Ricardo) - Identification error handling fixes (Shin'ichiro, Keith)" * tag 'nvme-6.9-2024-03-07' of git://git.infradead.org/nvme: (31 commits) nvme: clear caller pointer on identify failure nvme: host: fix double-free of struct nvme_id_ns in ns_update_nuse() nvme: fcloop: make fcloop_class constant nvme: fabrics: make nvmf_class constant nvme: core: constify struct class usage nvme-fabrics: typo in nvmf_parse_key() nvme-multipath: use atomic queue limits API for stacking limits nvme-multipath: pass queue_limits to blk_alloc_disk nvme: use the atomic queue limits update API nvme: cleanup nvme_configure_metadata nvme: don't query identify data in configure_metadata nvme: split out a nvme_identify_ns_nvm helper nvme: move common logic into nvme_update_ns_info nvme: move setting the write cache flags out of nvme_set_queue_limits nvme: move a few things out of nvme_update_disk_info nvme: don't use nvme_update_disk_info for the multipath disk nvme: move blk_integrity_unregister into nvme_init_integrity nvme: cleanup the nvme_init_integrity calling conventions nvme: move max_integrity_segments handling out of nvme_init_integrity nvme: remove nvme_revalidate_zones ...
2 parents d37977f + 7e80eb7 commit 0f7223a

File tree

18 files changed

+356
-289
lines changed

18 files changed

+356
-289
lines changed

drivers/nvme/host/core.c

Lines changed: 238 additions & 218 deletions
Large diffs are not rendered by default.

drivers/nvme/host/fabrics.c

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,7 @@ static struct key *nvmf_parse_key(int key_id)
637637
}
638638

639639
key = key_lookup(key_id);
640-
if (!IS_ERR(key))
640+
if (IS_ERR(key))
641641
pr_err("key id %08x not found\n", key_id);
642642
else
643643
pr_debug("Using key id %08x\n", key_id);
@@ -1318,7 +1318,10 @@ nvmf_create_ctrl(struct device *dev, const char *buf)
13181318
return ERR_PTR(ret);
13191319
}
13201320

1321-
static struct class *nvmf_class;
1321+
static const struct class nvmf_class = {
1322+
.name = "nvme-fabrics",
1323+
};
1324+
13221325
static struct device *nvmf_device;
13231326
static DEFINE_MUTEX(nvmf_dev_mutex);
13241327

@@ -1438,15 +1441,14 @@ static int __init nvmf_init(void)
14381441
if (!nvmf_default_host)
14391442
return -ENOMEM;
14401443

1441-
nvmf_class = class_create("nvme-fabrics");
1442-
if (IS_ERR(nvmf_class)) {
1444+
ret = class_register(&nvmf_class);
1445+
if (ret) {
14431446
pr_err("couldn't register class nvme-fabrics\n");
1444-
ret = PTR_ERR(nvmf_class);
14451447
goto out_free_host;
14461448
}
14471449

14481450
nvmf_device =
1449-
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1451+
device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
14501452
if (IS_ERR(nvmf_device)) {
14511453
pr_err("couldn't create nvme-fabrics device!\n");
14521454
ret = PTR_ERR(nvmf_device);
@@ -1462,9 +1464,9 @@ static int __init nvmf_init(void)
14621464
return 0;
14631465

14641466
out_destroy_device:
1465-
device_destroy(nvmf_class, MKDEV(0, 0));
1467+
device_destroy(&nvmf_class, MKDEV(0, 0));
14661468
out_destroy_class:
1467-
class_destroy(nvmf_class);
1469+
class_unregister(&nvmf_class);
14681470
out_free_host:
14691471
nvmf_host_put(nvmf_default_host);
14701472
return ret;
@@ -1473,8 +1475,8 @@ static int __init nvmf_init(void)
14731475
static void __exit nvmf_exit(void)
14741476
{
14751477
misc_deregister(&nvmf_misc);
1476-
device_destroy(nvmf_class, MKDEV(0, 0));
1477-
class_destroy(nvmf_class);
1478+
device_destroy(&nvmf_class, MKDEV(0, 0));
1479+
class_unregister(&nvmf_class);
14781480
nvmf_host_put(nvmf_default_host);
14791481

14801482
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);

drivers/nvme/host/multipath.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work)
516516

517517
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
518518
{
519+
struct queue_limits lim;
519520
bool vwc = false;
520521

521522
mutex_init(&head->lock);
@@ -532,7 +533,12 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
532533
!nvme_is_unique_nsid(ctrl, head) || !multipath)
533534
return 0;
534535

535-
head->disk = blk_alloc_disk(NULL, ctrl->numa_node);
536+
blk_set_stacking_limits(&lim);
537+
lim.dma_alignment = 3;
538+
if (head->ids.csi != NVME_CSI_ZNS)
539+
lim.max_zone_append_sectors = 0;
540+
541+
head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
536542
if (IS_ERR(head->disk))
537543
return PTR_ERR(head->disk);
538544
head->disk->fops = &nvme_ns_head_ops;
@@ -553,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
553559
ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
554560
blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
555561

556-
/* set to a default value of 512 until the disk is validated */
557-
blk_queue_logical_block_size(head->disk->queue, 512);
558-
blk_set_stacking_limits(&head->disk->queue->limits);
559-
blk_queue_dma_alignment(head->disk->queue, 3);
560-
561562
/* we need to propagate up the VMC settings */
562563
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
563564
vwc = true;

drivers/nvme/host/nvme.h

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1036,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
10361036
}
10371037
#endif /* CONFIG_NVME_MULTIPATH */
10381038

1039-
int nvme_revalidate_zones(struct nvme_ns *ns);
10401039
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
10411040
unsigned int nr_zones, report_zones_cb cb, void *data);
1041+
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
1042+
struct queue_limits *lim);
10421043
#ifdef CONFIG_BLK_DEV_ZONED
1043-
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
10441044
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
10451045
struct nvme_command *cmnd,
10461046
enum nvme_zone_mgmt_action action);
@@ -1051,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
10511051
{
10521052
return BLK_STS_NOTSUPP;
10531053
}
1054-
1055-
static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
1056-
{
1057-
dev_warn(ns->ctrl->device,
1058-
"Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
1059-
return -EPROTONOSUPPORT;
1060-
}
10611054
#endif
10621055

10631056
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)

drivers/nvme/host/rdma.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
10061006
{
10071007
int ret;
10081008
bool changed;
1009+
u16 max_queue_size;
10091010

10101011
ret = nvme_rdma_configure_admin_queue(ctrl, new);
10111012
if (ret)
@@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
10301031
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
10311032
}
10321033

1033-
if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
1034+
if (ctrl->ctrl.max_integrity_segments)
1035+
max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
1036+
else
1037+
max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
1038+
1039+
if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
10341040
dev_warn(ctrl->ctrl.device,
1035-
"ctrl sqsize %u > max queue size %u, clamping down\n",
1036-
ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
1037-
ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
1041+
"ctrl sqsize %u > max queue size %u, clamping down\n",
1042+
ctrl->ctrl.sqsize + 1, max_queue_size);
1043+
ctrl->ctrl.sqsize = max_queue_size - 1;
10381044
}
10391045

10401046
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {

drivers/nvme/host/sysfs.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -221,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns)
221221

222222
ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
223223
if (ret)
224-
goto out_free_id;
224+
return ret;
225225

226226
ns->head->nuse = le64_to_cpu(id->nuse);
227-
228-
out_free_id:
229227
kfree(id);
230-
231-
return ret;
228+
return 0;
232229
}
233230

234231
static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,

drivers/nvme/host/zns.c

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,6 @@
77
#include <linux/vmalloc.h>
88
#include "nvme.h"
99

10-
int nvme_revalidate_zones(struct nvme_ns *ns)
11-
{
12-
struct request_queue *q = ns->queue;
13-
14-
blk_queue_chunk_sectors(q, ns->head->zsze);
15-
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
16-
17-
return blk_revalidate_disk_zones(ns->disk, NULL);
18-
}
19-
2010
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
2111
{
2212
struct nvme_command c = { };
@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
4535
return 0;
4636
}
4737

48-
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
38+
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
39+
struct queue_limits *lim)
4940
{
5041
struct nvme_effects_log *log = ns->head->effects;
51-
struct request_queue *q = ns->queue;
5242
struct nvme_command c = { };
5343
struct nvme_id_ns_zns *id;
5444
int status;
@@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
10999
goto free_data;
110100
}
111101

112-
disk_set_zoned(ns->disk);
113-
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
114-
disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
115-
disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
102+
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
103+
lim->zoned = 1;
104+
lim->max_open_zones = le32_to_cpu(id->mor) + 1;
105+
lim->max_active_zones = le32_to_cpu(id->mar) + 1;
106+
lim->chunk_sectors = ns->head->zsze;
107+
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
116108
free_data:
117109
kfree(id);
118110
return status;

drivers/nvme/target/admin-cmd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
428428
id->cqes = (0x4 << 4) | 0x4;
429429

430430
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
431-
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
431+
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
432432

433433
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
434434
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);

drivers/nvme/target/configfs.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
273273

274274
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
275275

276+
static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
277+
char *page)
278+
{
279+
struct nvmet_port *port = to_nvmet_port(item);
280+
281+
return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
282+
}
283+
284+
static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
285+
const char *page, size_t count)
286+
{
287+
struct nvmet_port *port = to_nvmet_port(item);
288+
int ret;
289+
290+
if (nvmet_is_port_enabled(port, __func__))
291+
return -EACCES;
292+
ret = kstrtoint(page, 0, &port->max_queue_size);
293+
if (ret) {
294+
pr_err("Invalid value '%s' for max_queue_size\n", page);
295+
return -EINVAL;
296+
}
297+
return count;
298+
}
299+
300+
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
301+
276302
#ifdef CONFIG_BLK_DEV_INTEGRITY
277303
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
278304
char *page)
@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
18591885
&nvmet_attr_addr_trtype,
18601886
&nvmet_attr_addr_tsas,
18611887
&nvmet_attr_param_inline_data_size,
1888+
&nvmet_attr_param_max_queue_size,
18621889
#ifdef CONFIG_BLK_DEV_INTEGRITY
18631890
&nvmet_attr_param_pi_enable,
18641891
#endif
@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
19171944
INIT_LIST_HEAD(&port->subsystems);
19181945
INIT_LIST_HEAD(&port->referrals);
19191946
port->inline_data_size = -1; /* < 0 == let the transport choose */
1947+
port->max_queue_size = -1; /* < 0 == let the transport choose */
19201948

19211949
port->disc_addr.portid = cpu_to_le16(portid);
19221950
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;

drivers/nvme/target/core.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
358358
if (port->inline_data_size < 0)
359359
port->inline_data_size = 0;
360360

361+
/*
362+
* If the transport didn't set the max_queue_size properly, then clamp
363+
* it to the target limits. Also set default values in case the
364+
* transport didn't set it at all.
365+
*/
366+
if (port->max_queue_size < 0)
367+
port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
368+
else
369+
port->max_queue_size = clamp_t(int, port->max_queue_size,
370+
NVMET_MIN_QUEUE_SIZE,
371+
NVMET_MAX_QUEUE_SIZE);
372+
361373
port->enabled = true;
362374
port->tr_ops = ops;
363375
return 0;
@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
12231235
ctrl->cap |= (15ULL << 24);
12241236
/* maximum queue entries supported: */
12251237
if (ctrl->ops->get_max_queue_size)
1226-
ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
1238+
ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
1239+
ctrl->port->max_queue_size) - 1;
12271240
else
1228-
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1241+
ctrl->cap |= ctrl->port->max_queue_size - 1;
12291242

12301243
if (nvmet_is_passthru_subsys(ctrl->subsys))
12311244
nvmet_passthrough_override_cap(ctrl);
@@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
14111424

14121425
kref_init(&ctrl->ref);
14131426
ctrl->subsys = subsys;
1427+
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
14141428
nvmet_init_cap(ctrl);
14151429
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
14161430

drivers/nvme/target/discovery.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
282282
id->lpa = (1 << 2);
283283

284284
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
285-
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
285+
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
286286

287287
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
288288
if (ctrl->ops->flags & NVMF_KEYED_SGLS)

drivers/nvme/target/fabrics-cmd.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
157157
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
158158
}
159159

160-
if (sqsize > mqes) {
160+
/* for fabrics, this value applies to only the I/O Submission Queues */
161+
if (qid && sqsize > mqes) {
161162
pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
162163
sqsize, mqes, ctrl->cntlid);
163164
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
@@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
251252
if (status)
252253
goto out;
253254

254-
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
255-
256255
uuid_copy(&ctrl->hostid, &d->hostid);
257256

258257
ret = nvmet_setup_auth(ctrl);

drivers/nvme/target/fcloop.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1556,23 +1556,24 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = {
15561556
NULL,
15571557
};
15581558

1559-
static struct class *fcloop_class;
1559+
static const struct class fcloop_class = {
1560+
.name = "fcloop",
1561+
};
15601562
static struct device *fcloop_device;
15611563

15621564

15631565
static int __init fcloop_init(void)
15641566
{
15651567
int ret;
15661568

1567-
fcloop_class = class_create("fcloop");
1568-
if (IS_ERR(fcloop_class)) {
1569+
ret = class_register(&fcloop_class);
1570+
if (ret) {
15691571
pr_err("couldn't register class fcloop\n");
1570-
ret = PTR_ERR(fcloop_class);
15711572
return ret;
15721573
}
15731574

15741575
fcloop_device = device_create_with_groups(
1575-
fcloop_class, NULL, MKDEV(0, 0), NULL,
1576+
&fcloop_class, NULL, MKDEV(0, 0), NULL,
15761577
fcloop_dev_attr_groups, "ctl");
15771578
if (IS_ERR(fcloop_device)) {
15781579
pr_err("couldn't create ctl device!\n");
@@ -1585,7 +1586,7 @@ static int __init fcloop_init(void)
15851586
return 0;
15861587

15871588
out_destroy_class:
1588-
class_destroy(fcloop_class);
1589+
class_unregister(&fcloop_class);
15891590
return ret;
15901591
}
15911592

@@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void)
16431644

16441645
put_device(fcloop_device);
16451646

1646-
device_destroy(fcloop_class, MKDEV(0, 0));
1647-
class_destroy(fcloop_class);
1647+
device_destroy(&fcloop_class, MKDEV(0, 0));
1648+
class_unregister(&fcloop_class);
16481649
}
16491650

16501651
module_init(fcloop_init);

0 commit comments

Comments
 (0)