@@ -551,13 +551,14 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
551
551
return 0 ;
552
552
}
553
553
554
- static int sync_rw_sb (struct dm_integrity_c * ic , int op , int op_flags )
554
+ static int sync_rw_sb (struct dm_integrity_c * ic , blk_opf_t opf )
555
555
{
556
556
struct dm_io_request io_req ;
557
557
struct dm_io_region io_loc ;
558
+ const enum req_op op = opf & REQ_OP_MASK ;
558
559
int r ;
559
560
560
- io_req .bi_opf = op | op_flags ;
561
+ io_req .bi_opf = opf ;
561
562
io_req .mem .type = DM_IO_KMEM ;
562
563
io_req .mem .ptr .addr = ic -> sb ;
563
564
io_req .notify .fn = NULL ;
@@ -1049,8 +1050,9 @@ static void complete_journal_io(unsigned long error, void *context)
1049
1050
complete_journal_op (comp );
1050
1051
}
1051
1052
1052
- static void rw_journal_sectors (struct dm_integrity_c * ic , int op , int op_flags ,
1053
- unsigned sector , unsigned n_sectors , struct journal_completion * comp )
1053
+ static void rw_journal_sectors (struct dm_integrity_c * ic , blk_opf_t opf ,
1054
+ unsigned sector , unsigned n_sectors ,
1055
+ struct journal_completion * comp )
1054
1056
{
1055
1057
struct dm_io_request io_req ;
1056
1058
struct dm_io_region io_loc ;
@@ -1066,7 +1068,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
1066
1068
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT );
1067
1069
pl_offset = (sector << SECTOR_SHIFT ) & (PAGE_SIZE - 1 );
1068
1070
1069
- io_req .bi_opf = op | op_flags ;
1071
+ io_req .bi_opf = opf ;
1070
1072
io_req .mem .type = DM_IO_PAGE_LIST ;
1071
1073
if (ic -> journal_io )
1072
1074
io_req .mem .ptr .pl = & ic -> journal_io [pl_index ];
@@ -1086,23 +1088,25 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
1086
1088
1087
1089
r = dm_io (& io_req , 1 , & io_loc , NULL );
1088
1090
if (unlikely (r )) {
1089
- dm_integrity_io_error (ic , op == REQ_OP_READ ? "reading journal" : "writing journal" , r );
1091
+ dm_integrity_io_error (ic , (opf & REQ_OP_MASK ) == REQ_OP_READ ?
1092
+ "reading journal" : "writing journal" , r );
1090
1093
if (comp ) {
1091
1094
WARN_ONCE (1 , "asynchronous dm_io failed: %d" , r );
1092
1095
complete_journal_io (-1UL , comp );
1093
1096
}
1094
1097
}
1095
1098
}
1096
1099
1097
- static void rw_journal (struct dm_integrity_c * ic , int op , int op_flags , unsigned section ,
1098
- unsigned n_sections , struct journal_completion * comp )
1100
+ static void rw_journal (struct dm_integrity_c * ic , blk_opf_t opf ,
1101
+ unsigned section , unsigned n_sections ,
1102
+ struct journal_completion * comp )
1099
1103
{
1100
1104
unsigned sector , n_sectors ;
1101
1105
1102
1106
sector = section * ic -> journal_section_sectors ;
1103
1107
n_sectors = n_sections * ic -> journal_section_sectors ;
1104
1108
1105
- rw_journal_sectors (ic , op , op_flags , sector , n_sectors , comp );
1109
+ rw_journal_sectors (ic , opf , sector , n_sectors , comp );
1106
1110
}
1107
1111
1108
1112
static void write_journal (struct dm_integrity_c * ic , unsigned commit_start , unsigned commit_sections )
@@ -1127,7 +1131,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
1127
1131
for (i = 0 ; i < commit_sections ; i ++ )
1128
1132
rw_section_mac (ic , commit_start + i , true);
1129
1133
}
1130
- rw_journal (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC , commit_start ,
1134
+ rw_journal (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC , commit_start ,
1131
1135
commit_sections , & io_comp );
1132
1136
} else {
1133
1137
unsigned to_end ;
@@ -1139,7 +1143,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
1139
1143
crypt_comp_1 .in_flight = (atomic_t )ATOMIC_INIT (0 );
1140
1144
encrypt_journal (ic , true, commit_start , to_end , & crypt_comp_1 );
1141
1145
if (try_wait_for_completion (& crypt_comp_1 .comp )) {
1142
- rw_journal (ic , REQ_OP_WRITE , REQ_FUA , commit_start , to_end , & io_comp );
1146
+ rw_journal (ic , REQ_OP_WRITE | REQ_FUA ,
1147
+ commit_start , to_end , & io_comp );
1143
1148
reinit_completion (& crypt_comp_1 .comp );
1144
1149
crypt_comp_1 .in_flight = (atomic_t )ATOMIC_INIT (0 );
1145
1150
encrypt_journal (ic , true, 0 , commit_sections - to_end , & crypt_comp_1 );
@@ -1150,17 +1155,17 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
1150
1155
crypt_comp_2 .in_flight = (atomic_t )ATOMIC_INIT (0 );
1151
1156
encrypt_journal (ic , true, 0 , commit_sections - to_end , & crypt_comp_2 );
1152
1157
wait_for_completion_io (& crypt_comp_1 .comp );
1153
- rw_journal (ic , REQ_OP_WRITE , REQ_FUA , commit_start , to_end , & io_comp );
1158
+ rw_journal (ic , REQ_OP_WRITE | REQ_FUA , commit_start , to_end , & io_comp );
1154
1159
wait_for_completion_io (& crypt_comp_2 .comp );
1155
1160
}
1156
1161
} else {
1157
1162
for (i = 0 ; i < to_end ; i ++ )
1158
1163
rw_section_mac (ic , commit_start + i , true);
1159
- rw_journal (ic , REQ_OP_WRITE , REQ_FUA , commit_start , to_end , & io_comp );
1164
+ rw_journal (ic , REQ_OP_WRITE | REQ_FUA , commit_start , to_end , & io_comp );
1160
1165
for (i = 0 ; i < commit_sections - to_end ; i ++ )
1161
1166
rw_section_mac (ic , i , true);
1162
1167
}
1163
- rw_journal (ic , REQ_OP_WRITE , REQ_FUA , 0 , commit_sections - to_end , & io_comp );
1168
+ rw_journal (ic , REQ_OP_WRITE | REQ_FUA , 0 , commit_sections - to_end , & io_comp );
1164
1169
}
1165
1170
1166
1171
wait_for_completion_io (& io_comp .comp );
@@ -2622,7 +2627,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
2622
2627
if (dm_integrity_failed (ic ))
2623
2628
return ;
2624
2629
2625
- r = sync_rw_sb (ic , REQ_OP_WRITE , 0 );
2630
+ r = sync_rw_sb (ic , REQ_OP_WRITE );
2626
2631
if (unlikely (r ))
2627
2632
dm_integrity_io_error (ic , "writing superblock" , r );
2628
2633
}
@@ -2795,7 +2800,7 @@ static void bitmap_block_work(struct work_struct *w)
2795
2800
if (bio_list_empty (& waiting ))
2796
2801
return ;
2797
2802
2798
- rw_journal_sectors (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC ,
2803
+ rw_journal_sectors (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC ,
2799
2804
bbs -> idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ),
2800
2805
BITMAP_BLOCK_SIZE >> SECTOR_SHIFT , NULL );
2801
2806
@@ -2841,7 +2846,7 @@ static void bitmap_flush_work(struct work_struct *work)
2841
2846
block_bitmap_op (ic , ic -> journal , 0 , limit , BITMAP_OP_CLEAR );
2842
2847
block_bitmap_op (ic , ic -> may_write_bitmap , 0 , limit , BITMAP_OP_CLEAR );
2843
2848
2844
- rw_journal_sectors (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC , 0 ,
2849
+ rw_journal_sectors (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC , 0 ,
2845
2850
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
2846
2851
2847
2852
spin_lock_irq (& ic -> endio_wait .lock );
@@ -2913,7 +2918,7 @@ static void replay_journal(struct dm_integrity_c *ic)
2913
2918
2914
2919
if (!ic -> just_formatted ) {
2915
2920
DEBUG_print ("reading journal\n" );
2916
- rw_journal (ic , REQ_OP_READ , 0 , 0 , ic -> journal_sections , NULL );
2921
+ rw_journal (ic , REQ_OP_READ , 0 , ic -> journal_sections , NULL );
2917
2922
if (ic -> journal_io )
2918
2923
DEBUG_bytes (lowmem_page_address (ic -> journal_io [0 ].page ), 64 , "read journal" );
2919
2924
if (ic -> journal_io ) {
@@ -3108,7 +3113,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
3108
3113
/* set to 0 to test bitmap replay code */
3109
3114
init_journal (ic , 0 , ic -> journal_sections , 0 );
3110
3115
ic -> sb -> flags &= ~cpu_to_le32 (SB_FLAG_DIRTY_BITMAP );
3111
- r = sync_rw_sb (ic , REQ_OP_WRITE , REQ_FUA );
3116
+ r = sync_rw_sb (ic , REQ_OP_WRITE | REQ_FUA );
3112
3117
if (unlikely (r ))
3113
3118
dm_integrity_io_error (ic , "writing superblock" , r );
3114
3119
#endif
@@ -3131,23 +3136,23 @@ static void dm_integrity_resume(struct dm_target *ti)
3131
3136
if (ic -> provided_data_sectors > old_provided_data_sectors &&
3132
3137
ic -> mode == 'B' &&
3133
3138
ic -> sb -> log2_blocks_per_bitmap_bit == ic -> log2_blocks_per_bitmap_bit ) {
3134
- rw_journal_sectors (ic , REQ_OP_READ , 0 , 0 ,
3139
+ rw_journal_sectors (ic , REQ_OP_READ , 0 ,
3135
3140
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
3136
3141
block_bitmap_op (ic , ic -> journal , old_provided_data_sectors ,
3137
3142
ic -> provided_data_sectors - old_provided_data_sectors , BITMAP_OP_SET );
3138
- rw_journal_sectors (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC , 0 ,
3143
+ rw_journal_sectors (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC , 0 ,
3139
3144
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
3140
3145
}
3141
3146
3142
3147
ic -> sb -> provided_data_sectors = cpu_to_le64 (ic -> provided_data_sectors );
3143
- r = sync_rw_sb (ic , REQ_OP_WRITE , REQ_FUA );
3148
+ r = sync_rw_sb (ic , REQ_OP_WRITE | REQ_FUA );
3144
3149
if (unlikely (r ))
3145
3150
dm_integrity_io_error (ic , "writing superblock" , r );
3146
3151
}
3147
3152
3148
3153
if (ic -> sb -> flags & cpu_to_le32 (SB_FLAG_DIRTY_BITMAP )) {
3149
3154
DEBUG_print ("resume dirty_bitmap\n" );
3150
- rw_journal_sectors (ic , REQ_OP_READ , 0 , 0 ,
3155
+ rw_journal_sectors (ic , REQ_OP_READ , 0 ,
3151
3156
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
3152
3157
if (ic -> mode == 'B' ) {
3153
3158
if (ic -> sb -> log2_blocks_per_bitmap_bit == ic -> log2_blocks_per_bitmap_bit &&
@@ -3166,7 +3171,7 @@ static void dm_integrity_resume(struct dm_target *ti)
3166
3171
block_bitmap_op (ic , ic -> recalc_bitmap , 0 , ic -> provided_data_sectors , BITMAP_OP_SET );
3167
3172
block_bitmap_op (ic , ic -> may_write_bitmap , 0 , ic -> provided_data_sectors , BITMAP_OP_SET );
3168
3173
block_bitmap_op (ic , ic -> journal , 0 , ic -> provided_data_sectors , BITMAP_OP_SET );
3169
- rw_journal_sectors (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC , 0 ,
3174
+ rw_journal_sectors (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC , 0 ,
3170
3175
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
3171
3176
ic -> sb -> flags |= cpu_to_le32 (SB_FLAG_RECALCULATING );
3172
3177
ic -> sb -> recalc_sector = cpu_to_le64 (0 );
@@ -3182,7 +3187,7 @@ static void dm_integrity_resume(struct dm_target *ti)
3182
3187
replay_journal (ic );
3183
3188
ic -> sb -> flags &= ~cpu_to_le32 (SB_FLAG_DIRTY_BITMAP );
3184
3189
}
3185
- r = sync_rw_sb (ic , REQ_OP_WRITE , REQ_FUA );
3190
+ r = sync_rw_sb (ic , REQ_OP_WRITE | REQ_FUA );
3186
3191
if (unlikely (r ))
3187
3192
dm_integrity_io_error (ic , "writing superblock" , r );
3188
3193
} else {
@@ -3194,7 +3199,7 @@ static void dm_integrity_resume(struct dm_target *ti)
3194
3199
if (ic -> mode == 'B' ) {
3195
3200
ic -> sb -> flags |= cpu_to_le32 (SB_FLAG_DIRTY_BITMAP );
3196
3201
ic -> sb -> log2_blocks_per_bitmap_bit = ic -> log2_blocks_per_bitmap_bit ;
3197
- r = sync_rw_sb (ic , REQ_OP_WRITE , REQ_FUA );
3202
+ r = sync_rw_sb (ic , REQ_OP_WRITE | REQ_FUA );
3198
3203
if (unlikely (r ))
3199
3204
dm_integrity_io_error (ic , "writing superblock" , r );
3200
3205
@@ -3210,7 +3215,7 @@ static void dm_integrity_resume(struct dm_target *ti)
3210
3215
block_bitmap_op (ic , ic -> may_write_bitmap , le64_to_cpu (ic -> sb -> recalc_sector ),
3211
3216
ic -> provided_data_sectors - le64_to_cpu (ic -> sb -> recalc_sector ), BITMAP_OP_SET );
3212
3217
}
3213
- rw_journal_sectors (ic , REQ_OP_WRITE , REQ_FUA | REQ_SYNC , 0 ,
3218
+ rw_journal_sectors (ic , REQ_OP_WRITE | REQ_FUA | REQ_SYNC , 0 ,
3214
3219
ic -> n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT ), NULL );
3215
3220
}
3216
3221
}
@@ -4251,7 +4256,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
4251
4256
goto bad ;
4252
4257
}
4253
4258
4254
- r = sync_rw_sb (ic , REQ_OP_READ , 0 );
4259
+ r = sync_rw_sb (ic , REQ_OP_READ );
4255
4260
if (r ) {
4256
4261
ti -> error = "Error reading superblock" ;
4257
4262
goto bad ;
@@ -4495,7 +4500,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
4495
4500
ti -> error = "Error initializing journal" ;
4496
4501
goto bad ;
4497
4502
}
4498
- r = sync_rw_sb (ic , REQ_OP_WRITE , REQ_FUA );
4503
+ r = sync_rw_sb (ic , REQ_OP_WRITE | REQ_FUA );
4499
4504
if (r ) {
4500
4505
ti -> error = "Error initializing superblock" ;
4501
4506
goto bad ;
0 commit comments