@@ -136,9 +136,25 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
136
136
return BLK_STS_OK ;
137
137
}
138
138
139
- static blk_status_t pmem_do_bvec (struct pmem_device * pmem , struct page * page ,
140
- unsigned int len , unsigned int off , unsigned int op ,
141
- sector_t sector )
139
+ static blk_status_t pmem_do_read (struct pmem_device * pmem ,
140
+ struct page * page , unsigned int page_off ,
141
+ sector_t sector , unsigned int len )
142
+ {
143
+ blk_status_t rc ;
144
+ phys_addr_t pmem_off = sector * 512 + pmem -> data_offset ;
145
+ void * pmem_addr = pmem -> virt_addr + pmem_off ;
146
+
147
+ if (unlikely (is_bad_pmem (& pmem -> bb , sector , len )))
148
+ return BLK_STS_IOERR ;
149
+
150
+ rc = read_pmem (page , page_off , pmem_addr , len );
151
+ flush_dcache_page (page );
152
+ return rc ;
153
+ }
154
+
155
+ static blk_status_t pmem_do_write (struct pmem_device * pmem ,
156
+ struct page * page , unsigned int page_off ,
157
+ sector_t sector , unsigned int len )
142
158
{
143
159
blk_status_t rc = BLK_STS_OK ;
144
160
bool bad_pmem = false;
@@ -148,34 +164,25 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
148
164
if (unlikely (is_bad_pmem (& pmem -> bb , sector , len )))
149
165
bad_pmem = true;
150
166
151
- if (!op_is_write (op )) {
152
- if (unlikely (bad_pmem ))
153
- rc = BLK_STS_IOERR ;
154
- else {
155
- rc = read_pmem (page , off , pmem_addr , len );
156
- flush_dcache_page (page );
157
- }
158
- } else {
159
- /*
160
- * Note that we write the data both before and after
161
- * clearing poison. The write before clear poison
162
- * handles situations where the latest written data is
163
- * preserved and the clear poison operation simply marks
164
- * the address range as valid without changing the data.
165
- * In this case application software can assume that an
166
- * interrupted write will either return the new good
167
- * data or an error.
168
- *
169
- * However, if pmem_clear_poison() leaves the data in an
170
- * indeterminate state we need to perform the write
171
- * after clear poison.
172
- */
173
- flush_dcache_page (page );
174
- write_pmem (pmem_addr , page , off , len );
175
- if (unlikely (bad_pmem )) {
176
- rc = pmem_clear_poison (pmem , pmem_off , len );
177
- write_pmem (pmem_addr , page , off , len );
178
- }
167
+ /*
168
+ * Note that we write the data both before and after
169
+ * clearing poison. The write before clear poison
170
+ * handles situations where the latest written data is
171
+ * preserved and the clear poison operation simply marks
172
+ * the address range as valid without changing the data.
173
+ * In this case application software can assume that an
174
+ * interrupted write will either return the new good
175
+ * data or an error.
176
+ *
177
+ * However, if pmem_clear_poison() leaves the data in an
178
+ * indeterminate state we need to perform the write
179
+ * after clear poison.
180
+ */
181
+ flush_dcache_page (page );
182
+ write_pmem (pmem_addr , page , page_off , len );
183
+ if (unlikely (bad_pmem )) {
184
+ rc = pmem_clear_poison (pmem , pmem_off , len );
185
+ write_pmem (pmem_addr , page , page_off , len );
179
186
}
180
187
181
188
return rc ;
@@ -197,8 +204,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
197
204
198
205
do_acct = nd_iostat_start (bio , & start );
199
206
bio_for_each_segment (bvec , bio , iter ) {
200
- rc = pmem_do_bvec (pmem , bvec .bv_page , bvec .bv_len ,
201
- bvec .bv_offset , bio_op (bio ), iter .bi_sector );
207
+ if (op_is_write (bio_op (bio )))
208
+ rc = pmem_do_write (pmem , bvec .bv_page , bvec .bv_offset ,
209
+ iter .bi_sector , bvec .bv_len );
210
+ else
211
+ rc = pmem_do_read (pmem , bvec .bv_page , bvec .bv_offset ,
212
+ iter .bi_sector , bvec .bv_len );
202
213
if (rc ) {
203
214
bio -> bi_status = rc ;
204
215
break ;
@@ -223,9 +234,12 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
223
234
struct pmem_device * pmem = bdev -> bd_queue -> queuedata ;
224
235
blk_status_t rc ;
225
236
226
- rc = pmem_do_bvec (pmem , page , hpage_nr_pages (page ) * PAGE_SIZE ,
227
- 0 , op , sector );
228
-
237
+ if (op_is_write (op ))
238
+ rc = pmem_do_write (pmem , page , 0 , sector ,
239
+ hpage_nr_pages (page ) * PAGE_SIZE );
240
+ else
241
+ rc = pmem_do_read (pmem , page , 0 , sector ,
242
+ hpage_nr_pages (page ) * PAGE_SIZE );
229
243
/*
230
244
* The ->rw_page interface is subtle and tricky. The core
231
245
* retries on any error, so we can only invoke page_endio() in
0 commit comments