@@ -129,12 +129,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
129
129
entry_clr_protected (entry );
130
130
}
131
131
132
- static int dma_update_trans (struct zpci_dev * zdev , unsigned long pa ,
133
- dma_addr_t dma_addr , size_t size , int flags )
132
+ static int __dma_update_trans (struct zpci_dev * zdev , unsigned long pa ,
133
+ dma_addr_t dma_addr , size_t size , int flags )
134
134
{
135
135
unsigned int nr_pages = PAGE_ALIGN (size ) >> PAGE_SHIFT ;
136
136
u8 * page_addr = (u8 * ) (pa & PAGE_MASK );
137
- dma_addr_t start_dma_addr = dma_addr ;
138
137
unsigned long irq_flags ;
139
138
unsigned long * entry ;
140
139
int i , rc = 0 ;
@@ -145,7 +144,7 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
145
144
spin_lock_irqsave (& zdev -> dma_table_lock , irq_flags );
146
145
if (!zdev -> dma_table ) {
147
146
rc = - EINVAL ;
148
- goto no_refresh ;
147
+ goto out_unlock ;
149
148
}
150
149
151
150
for (i = 0 ; i < nr_pages ; i ++ ) {
@@ -159,20 +158,6 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
159
158
dma_addr += PAGE_SIZE ;
160
159
}
161
160
162
- /*
163
- * With zdev->tlb_refresh == 0, rpcit is not required to establish new
164
- * translations when previously invalid translation-table entries are
165
- * validated. With lazy unmap, it also is skipped for previously valid
166
- * entries, but a global rpcit is then required before any address can
167
- * be re-used, i.e. after each iommu bitmap wrap-around.
168
- */
169
- if (!zdev -> tlb_refresh &&
170
- (!s390_iommu_strict ||
171
- ((flags & ZPCI_PTE_VALID_MASK ) == ZPCI_PTE_VALID )))
172
- goto no_refresh ;
173
-
174
- rc = zpci_refresh_trans ((u64 ) zdev -> fh << 32 , start_dma_addr ,
175
- nr_pages * PAGE_SIZE );
176
161
undo_cpu_trans :
177
162
if (rc && ((flags & ZPCI_PTE_VALID_MASK ) == ZPCI_PTE_VALID )) {
178
163
flags = ZPCI_PTE_INVALID ;
@@ -185,12 +170,46 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
185
170
dma_update_cpu_trans (entry , page_addr , flags );
186
171
}
187
172
}
188
-
189
- no_refresh :
173
+ out_unlock :
190
174
spin_unlock_irqrestore (& zdev -> dma_table_lock , irq_flags );
191
175
return rc ;
192
176
}
193
177
178
+ static int __dma_purge_tlb (struct zpci_dev * zdev , dma_addr_t dma_addr ,
179
+ size_t size , int flags )
180
+ {
181
+ /*
182
+ * With zdev->tlb_refresh == 0, rpcit is not required to establish new
183
+ * translations when previously invalid translation-table entries are
184
+ * validated. With lazy unmap, it also is skipped for previously valid
185
+ * entries, but a global rpcit is then required before any address can
186
+ * be re-used, i.e. after each iommu bitmap wrap-around.
187
+ */
188
+ if (!zdev -> tlb_refresh &&
189
+ (!s390_iommu_strict ||
190
+ ((flags & ZPCI_PTE_VALID_MASK ) == ZPCI_PTE_VALID )))
191
+ return 0 ;
192
+
193
+ return zpci_refresh_trans ((u64 ) zdev -> fh << 32 , dma_addr ,
194
+ PAGE_ALIGN (size ));
195
+ }
196
+
197
+ static int dma_update_trans (struct zpci_dev * zdev , unsigned long pa ,
198
+ dma_addr_t dma_addr , size_t size , int flags )
199
+ {
200
+ int rc ;
201
+
202
+ rc = __dma_update_trans (zdev , pa , dma_addr , size , flags );
203
+ if (rc )
204
+ return rc ;
205
+
206
+ rc = __dma_purge_tlb (zdev , dma_addr , size , flags );
207
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK ) == ZPCI_PTE_VALID ))
208
+ __dma_update_trans (zdev , pa , dma_addr , size , ZPCI_PTE_INVALID );
209
+
210
+ return rc ;
211
+ }
212
+
194
213
void dma_free_seg_table (unsigned long entry )
195
214
{
196
215
unsigned long * sto = get_rt_sto (entry );
@@ -411,12 +430,16 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
411
430
412
431
for (s = sg ; dma_addr < dma_addr_base + size ; s = sg_next (s )) {
413
432
pa = page_to_phys (sg_page (s )) + s -> offset ;
414
- ret = dma_update_trans (zdev , pa , dma_addr , s -> length , flags );
433
+ ret = __dma_update_trans (zdev , pa , dma_addr , s -> length , flags );
415
434
if (ret )
416
435
goto unmap ;
417
436
418
437
dma_addr += s -> length ;
419
438
}
439
+ ret = __dma_purge_tlb (zdev , dma_addr_base , size , flags );
440
+ if (ret )
441
+ goto unmap ;
442
+
420
443
* handle = dma_addr_base ;
421
444
atomic64_add (size >> PAGE_SHIFT , & zdev -> mapped_pages );
422
445
0 commit comments