@@ -98,8 +98,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
98
98
struct mm_iommu_table_group_mem_t * mem ;
99
99
long i , ret , locked_entries = 0 ;
100
100
unsigned int pageshift ;
101
- unsigned long flags ;
102
- unsigned long cur_ua ;
103
101
104
102
mutex_lock (& mem_list_mutex );
105
103
@@ -167,22 +165,14 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
167
165
for (i = 0 ; i < entries ; ++ i ) {
168
166
struct page * page = mem -> hpages [i ];
169
167
170
- cur_ua = ua + (i << PAGE_SHIFT );
171
- if (mem -> pageshift > PAGE_SHIFT && PageCompound (page )) {
172
- pte_t * pte ;
168
+ /*
169
+ * Allow to use larger than 64k IOMMU pages. Only do that
170
+ * if we are backed by hugetlb.
171
+ */
172
+ if ((mem -> pageshift > PAGE_SHIFT ) && PageHuge (page )) {
173
173
struct page * head = compound_head (page );
174
- unsigned int compshift = compound_order (head );
175
- unsigned int pteshift ;
176
-
177
- local_irq_save (flags ); /* disables as well */
178
- pte = find_linux_pte (mm -> pgd , cur_ua , NULL , & pteshift );
179
-
180
- /* Double check it is still the same pinned page */
181
- if (pte && pte_page (* pte ) == head &&
182
- pteshift == compshift + PAGE_SHIFT )
183
- pageshift = max_t (unsigned int , pteshift ,
184
- PAGE_SHIFT );
185
- local_irq_restore (flags );
174
+
175
+ pageshift = compound_order (head ) + PAGE_SHIFT ;
186
176
}
187
177
mem -> pageshift = min (mem -> pageshift , pageshift );
188
178
/*
0 commit comments