65
65
#include <linux/percpu-refcount.h>
66
66
#include <linux/part_stat.h>
67
67
68
- #include <trace/events/block.h>
69
68
#include "md.h"
70
69
#include "md-bitmap.h"
71
70
#include "md-cluster.h"
@@ -2411,7 +2410,7 @@ int md_integrity_register(struct mddev *mddev)
2411
2410
2412
2411
if (list_empty (& mddev -> disks ))
2413
2412
return 0 ; /* nothing to do */
2414
- if (! mddev -> gendisk || blk_get_integrity (mddev -> gendisk ))
2413
+ if (mddev_is_dm ( mddev ) || blk_get_integrity (mddev -> gendisk ))
2415
2414
return 0 ; /* shouldn't register, or already is */
2416
2415
rdev_for_each (rdev , mddev ) {
2417
2416
/* skip spares and non-functional disks */
@@ -2464,7 +2463,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2464
2463
{
2465
2464
struct blk_integrity * bi_mddev ;
2466
2465
2467
- if (! mddev -> gendisk )
2466
+ if (mddev_is_dm ( mddev ) )
2468
2467
return 0 ;
2469
2468
2470
2469
bi_mddev = blk_get_integrity (mddev -> gendisk );
@@ -2857,8 +2856,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
2857
2856
pr_debug ("md: updating %s RAID superblock on device (in sync %d)\n" ,
2858
2857
mdname (mddev ), mddev -> in_sync );
2859
2858
2860
- if (mddev -> queue )
2861
- blk_add_trace_msg (mddev -> queue , "md md_update_sb" );
2859
+ mddev_add_trace_msg (mddev , "md md_update_sb" );
2862
2860
rewrite :
2863
2861
md_bitmap_update_sb (mddev -> bitmap );
2864
2862
rdev_for_each (rdev , mddev ) {
@@ -4166,7 +4164,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
4166
4164
mddev -> in_sync = 1 ;
4167
4165
del_timer_sync (& mddev -> safemode_timer );
4168
4166
}
4169
- blk_set_stacking_limits (& mddev -> queue -> limits );
4170
4167
pers -> run (mddev );
4171
4168
set_bit (MD_SB_CHANGE_DEVS , & mddev -> sb_flags );
4172
4169
if (!mddev -> thread )
@@ -5753,6 +5750,51 @@ static const struct kobj_type md_ktype = {
5753
5750
5754
5751
int mdp_major = 0 ;
5755
5752
5753
+ /* stack the limit for all rdevs into lim */
5754
+ void mddev_stack_rdev_limits (struct mddev * mddev , struct queue_limits * lim )
5755
+ {
5756
+ struct md_rdev * rdev ;
5757
+
5758
+ rdev_for_each (rdev , mddev ) {
5759
+ queue_limits_stack_bdev (lim , rdev -> bdev , rdev -> data_offset ,
5760
+ mddev -> gendisk -> disk_name );
5761
+ }
5762
+ }
5763
+ EXPORT_SYMBOL_GPL (mddev_stack_rdev_limits );
5764
+
5765
+ /* apply the extra stacking limits from a new rdev into mddev */
5766
+ int mddev_stack_new_rdev (struct mddev * mddev , struct md_rdev * rdev )
5767
+ {
5768
+ struct queue_limits lim ;
5769
+
5770
+ if (mddev_is_dm (mddev ))
5771
+ return 0 ;
5772
+
5773
+ lim = queue_limits_start_update (mddev -> gendisk -> queue );
5774
+ queue_limits_stack_bdev (& lim , rdev -> bdev , rdev -> data_offset ,
5775
+ mddev -> gendisk -> disk_name );
5776
+ return queue_limits_commit_update (mddev -> gendisk -> queue , & lim );
5777
+ }
5778
+ EXPORT_SYMBOL_GPL (mddev_stack_new_rdev );
5779
+
5780
+ /* update the optimal I/O size after a reshape */
5781
+ void mddev_update_io_opt (struct mddev * mddev , unsigned int nr_stripes )
5782
+ {
5783
+ struct queue_limits lim ;
5784
+
5785
+ if (mddev_is_dm (mddev ))
5786
+ return ;
5787
+
5788
+ /* don't bother updating io_opt if we can't suspend the array */
5789
+ if (mddev_suspend (mddev , false) < 0 )
5790
+ return ;
5791
+ lim = queue_limits_start_update (mddev -> gendisk -> queue );
5792
+ lim .io_opt = lim .io_min * nr_stripes ;
5793
+ queue_limits_commit_update (mddev -> gendisk -> queue , & lim );
5794
+ mddev_resume (mddev );
5795
+ }
5796
+ EXPORT_SYMBOL_GPL (mddev_update_io_opt );
5797
+
5756
5798
static void mddev_delayed_delete (struct work_struct * ws )
5757
5799
{
5758
5800
struct mddev * mddev = container_of (ws , struct mddev , del_work );
@@ -5835,9 +5877,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
5835
5877
disk -> fops = & md_fops ;
5836
5878
disk -> private_data = mddev ;
5837
5879
5838
- mddev -> queue = disk -> queue ;
5839
- blk_set_stacking_limits (& mddev -> queue -> limits );
5840
- blk_queue_write_cache (mddev -> queue , true, true);
5880
+ blk_queue_write_cache (disk -> queue , true, true);
5841
5881
disk -> events |= DISK_EVENT_MEDIA_CHANGE ;
5842
5882
mddev -> gendisk = disk ;
5843
5883
error = add_disk (disk );
@@ -5979,7 +6019,7 @@ int md_run(struct mddev *mddev)
5979
6019
invalidate_bdev (rdev -> bdev );
5980
6020
if (mddev -> ro != MD_RDONLY && rdev_read_only (rdev )) {
5981
6021
mddev -> ro = MD_RDONLY ;
5982
- if (mddev -> gendisk )
6022
+ if (! mddev_is_dm ( mddev ) )
5983
6023
set_disk_ro (mddev -> gendisk , 1 );
5984
6024
}
5985
6025
@@ -6141,7 +6181,8 @@ int md_run(struct mddev *mddev)
6141
6181
}
6142
6182
}
6143
6183
6144
- if (mddev -> queue ) {
6184
+ if (!mddev_is_dm (mddev )) {
6185
+ struct request_queue * q = mddev -> gendisk -> queue ;
6145
6186
bool nonrot = true;
6146
6187
6147
6188
rdev_for_each (rdev , mddev ) {
@@ -6153,14 +6194,14 @@ int md_run(struct mddev *mddev)
6153
6194
if (mddev -> degraded )
6154
6195
nonrot = false;
6155
6196
if (nonrot )
6156
- blk_queue_flag_set (QUEUE_FLAG_NONROT , mddev -> queue );
6197
+ blk_queue_flag_set (QUEUE_FLAG_NONROT , q );
6157
6198
else
6158
- blk_queue_flag_clear (QUEUE_FLAG_NONROT , mddev -> queue );
6159
- blk_queue_flag_set (QUEUE_FLAG_IO_STAT , mddev -> queue );
6199
+ blk_queue_flag_clear (QUEUE_FLAG_NONROT , q );
6200
+ blk_queue_flag_set (QUEUE_FLAG_IO_STAT , q );
6160
6201
6161
6202
/* Set the NOWAIT flags if all underlying devices support it */
6162
6203
if (nowait )
6163
- blk_queue_flag_set (QUEUE_FLAG_NOWAIT , mddev -> queue );
6204
+ blk_queue_flag_set (QUEUE_FLAG_NOWAIT , q );
6164
6205
}
6165
6206
if (pers -> sync_request ) {
6166
6207
if (mddev -> kobj .sd &&
@@ -6406,8 +6447,10 @@ static void mddev_detach(struct mddev *mddev)
6406
6447
mddev -> pers -> quiesce (mddev , 0 );
6407
6448
}
6408
6449
md_unregister_thread (mddev , & mddev -> thread );
6409
- if (mddev -> queue )
6410
- blk_sync_queue (mddev -> queue ); /* the unplug fn references 'conf'*/
6450
+
6451
+ /* the unplug fn references 'conf' */
6452
+ if (!mddev_is_dm (mddev ))
6453
+ blk_sync_queue (mddev -> gendisk -> queue );
6411
6454
}
6412
6455
6413
6456
static void __md_stop (struct mddev * mddev )
@@ -7125,7 +7168,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
7125
7168
if (!bdev_nowait (rdev -> bdev )) {
7126
7169
pr_info ("%s: Disabling nowait because %pg does not support nowait\n" ,
7127
7170
mdname (mddev ), rdev -> bdev );
7128
- blk_queue_flag_clear (QUEUE_FLAG_NOWAIT , mddev -> queue );
7171
+ blk_queue_flag_clear (QUEUE_FLAG_NOWAIT , mddev -> gendisk -> queue );
7129
7172
}
7130
7173
/*
7131
7174
* Kick recovery, maybe this spare has to be added to the
@@ -7362,10 +7405,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
7362
7405
if (!rv ) {
7363
7406
if (mddev_is_clustered (mddev ))
7364
7407
md_cluster_ops -> update_size (mddev , old_dev_sectors );
7365
- else if (mddev -> queue ) {
7408
+ else if (! mddev_is_dm ( mddev ))
7366
7409
set_capacity_and_notify (mddev -> gendisk ,
7367
7410
mddev -> array_sectors );
7368
- }
7369
7411
}
7370
7412
return rv ;
7371
7413
}
@@ -8686,10 +8728,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8686
8728
8687
8729
bio_chain (discard_bio , bio );
8688
8730
bio_clone_blkg_association (discard_bio , bio );
8689
- if (mddev -> gendisk )
8690
- trace_block_bio_remap (discard_bio ,
8691
- disk_devt (mddev -> gendisk ),
8692
- bio -> bi_iter .bi_sector );
8731
+ mddev_trace_remap (mddev , discard_bio , bio -> bi_iter .bi_sector );
8693
8732
submit_bio_noacct (discard_bio );
8694
8733
}
8695
8734
EXPORT_SYMBOL_GPL (md_submit_discard_bio );
@@ -9182,7 +9221,7 @@ void md_do_sync(struct md_thread *thread)
9182
9221
mddev -> delta_disks > 0 &&
9183
9222
mddev -> pers -> finish_reshape &&
9184
9223
mddev -> pers -> size &&
9185
- mddev -> queue ) {
9224
+ ! mddev_is_dm ( mddev ) ) {
9186
9225
mddev_lock_nointr (mddev );
9187
9226
md_set_array_sectors (mddev , mddev -> pers -> size (mddev , 0 , 0 ));
9188
9227
mddev_unlock (mddev );
0 commit comments