@@ -27,6 +27,11 @@ static void fq_destroy_all_entries(struct iova_domain *iovad);
27
27
static void fq_flush_timeout (struct timer_list * t );
28
28
static void free_global_cached_iovas (struct iova_domain * iovad );
29
29
30
+ static struct iova * to_iova (struct rb_node * node )
31
+ {
32
+ return rb_entry (node , struct iova , node );
33
+ }
34
+
30
35
void
31
36
init_iova_domain (struct iova_domain * iovad , unsigned long granule ,
32
37
unsigned long start_pfn )
@@ -136,15 +141,15 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
136
141
{
137
142
struct iova * cached_iova ;
138
143
139
- cached_iova = rb_entry (iovad -> cached32_node , struct iova , node );
144
+ cached_iova = to_iova (iovad -> cached32_node );
140
145
if (free == cached_iova ||
141
146
(free -> pfn_hi < iovad -> dma_32bit_pfn &&
142
147
free -> pfn_lo >= cached_iova -> pfn_lo )) {
143
148
iovad -> cached32_node = rb_next (& free -> node );
144
149
iovad -> max32_alloc_size = iovad -> dma_32bit_pfn ;
145
150
}
146
151
147
- cached_iova = rb_entry (iovad -> cached_node , struct iova , node );
152
+ cached_iova = to_iova (iovad -> cached_node );
148
153
if (free -> pfn_lo >= cached_iova -> pfn_lo )
149
154
iovad -> cached_node = rb_next (& free -> node );
150
155
}
@@ -159,7 +164,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
159
164
new = (start ) ? & start : & (root -> rb_node );
160
165
/* Figure out where to put new node */
161
166
while (* new ) {
162
- struct iova * this = rb_entry (* new , struct iova , node );
167
+ struct iova * this = to_iova (* new );
163
168
164
169
parent = * new ;
165
170
@@ -198,7 +203,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
198
203
goto iova32_full ;
199
204
200
205
curr = __get_cached_rbnode (iovad , limit_pfn );
201
- curr_iova = rb_entry (curr , struct iova , node );
206
+ curr_iova = to_iova (curr );
202
207
retry_pfn = curr_iova -> pfn_hi + 1 ;
203
208
204
209
retry :
@@ -207,15 +212,15 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
207
212
new_pfn = (high_pfn - size ) & align_mask ;
208
213
prev = curr ;
209
214
curr = rb_prev (curr );
210
- curr_iova = rb_entry (curr , struct iova , node );
215
+ curr_iova = to_iova (curr );
211
216
} while (curr && new_pfn <= curr_iova -> pfn_hi && new_pfn >= low_pfn );
212
217
213
218
if (high_pfn < size || new_pfn < low_pfn ) {
214
219
if (low_pfn == iovad -> start_pfn && retry_pfn < limit_pfn ) {
215
220
high_pfn = limit_pfn ;
216
221
low_pfn = retry_pfn ;
217
222
curr = & iovad -> anchor .node ;
218
- curr_iova = rb_entry (curr , struct iova , node );
223
+ curr_iova = to_iova (curr );
219
224
goto retry ;
220
225
}
221
226
iovad -> max32_alloc_size = size ;
@@ -331,7 +336,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
331
336
assert_spin_locked (& iovad -> iova_rbtree_lock );
332
337
333
338
while (node ) {
334
- struct iova * iova = rb_entry ( node , struct iova , node );
339
+ struct iova * iova = to_iova ( node );
335
340
336
341
if (pfn < iova -> pfn_lo )
337
342
node = node -> rb_left ;
@@ -617,7 +622,7 @@ static int
617
622
__is_range_overlap (struct rb_node * node ,
618
623
unsigned long pfn_lo , unsigned long pfn_hi )
619
624
{
620
- struct iova * iova = rb_entry ( node , struct iova , node );
625
+ struct iova * iova = to_iova ( node );
621
626
622
627
if ((pfn_lo <= iova -> pfn_hi ) && (pfn_hi >= iova -> pfn_lo ))
623
628
return 1 ;
@@ -685,7 +690,7 @@ reserve_iova(struct iova_domain *iovad,
685
690
spin_lock_irqsave (& iovad -> iova_rbtree_lock , flags );
686
691
for (node = rb_first (& iovad -> rbroot ); node ; node = rb_next (node )) {
687
692
if (__is_range_overlap (node , pfn_lo , pfn_hi )) {
688
- iova = rb_entry ( node , struct iova , node );
693
+ iova = to_iova ( node );
689
694
__adjust_overlap_range (iova , & pfn_lo , & pfn_hi );
690
695
if ((pfn_lo >= iova -> pfn_lo ) &&
691
696
(pfn_hi <= iova -> pfn_hi ))
0 commit comments