1
+ // SPDX-License-Identifier: MIT
1
2
/*
2
3
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3
4
*
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
92
93
kfree_rcu (list , rcu );
93
94
}
94
95
95
- #if IS_ENABLED (CONFIG_LOCKDEP )
96
- static int __init dma_resv_lockdep (void )
97
- {
98
- struct mm_struct * mm = mm_alloc ();
99
- struct ww_acquire_ctx ctx ;
100
- struct dma_resv obj ;
101
- struct address_space mapping ;
102
- int ret ;
103
-
104
- if (!mm )
105
- return - ENOMEM ;
106
-
107
- dma_resv_init (& obj );
108
- address_space_init_once (& mapping );
109
-
110
- mmap_read_lock (mm );
111
- ww_acquire_init (& ctx , & reservation_ww_class );
112
- ret = dma_resv_lock (& obj , & ctx );
113
- if (ret == - EDEADLK )
114
- dma_resv_lock_slow (& obj , & ctx );
115
- fs_reclaim_acquire (GFP_KERNEL );
116
- /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
117
- i_mmap_lock_write (& mapping );
118
- i_mmap_unlock_write (& mapping );
119
- #ifdef CONFIG_MMU_NOTIFIER
120
- lock_map_acquire (& __mmu_notifier_invalidate_range_start_map );
121
- __dma_fence_might_wait ();
122
- lock_map_release (& __mmu_notifier_invalidate_range_start_map );
123
- #else
124
- __dma_fence_might_wait ();
125
- #endif
126
- fs_reclaim_release (GFP_KERNEL );
127
- ww_mutex_unlock (& obj .lock );
128
- ww_acquire_fini (& ctx );
129
- mmap_read_unlock (mm );
130
-
131
- mmput (mm );
132
-
133
- return 0 ;
134
- }
135
- subsys_initcall (dma_resv_lockdep );
136
- #endif
137
-
138
96
/**
139
97
* dma_resv_init - initialize a reservation object
140
98
* @obj: the reservation object
@@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
196
154
if (old && old -> shared_max ) {
197
155
if ((old -> shared_count + num_fences ) <= old -> shared_max )
198
156
return 0 ;
199
- else
200
- max = max (old -> shared_count + num_fences ,
201
- old -> shared_max * 2 );
157
+ max = max (old -> shared_count + num_fences , old -> shared_max * 2 );
202
158
} else {
203
159
max = max (4ul , roundup_pow_of_two (num_fences ));
204
160
}
@@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
337
293
EXPORT_SYMBOL (dma_resv_add_excl_fence );
338
294
339
295
/**
340
- * dma_resv_copy_fences - Copy all fences from src to dst.
341
- * @dst: the destination reservation object
342
- * @src: the source reservation object
343
- *
344
- * Copy all fences from src to dst. dst-lock must be held.
345
- */
296
+ * dma_resv_copy_fences - Copy all fences from src to dst.
297
+ * @dst: the destination reservation object
298
+ * @src: the source reservation object
299
+ *
300
+ * Copy all fences from src to dst. dst-lock must be held.
301
+ */
346
302
int dma_resv_copy_fences (struct dma_resv * dst , struct dma_resv * src )
347
303
{
348
304
struct dma_resv_list * src_list , * dst_list ;
349
305
struct dma_fence * old , * new ;
350
- unsigned i ;
306
+ unsigned int i ;
351
307
352
308
dma_resv_assert_held (dst );
353
309
@@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
356
312
357
313
retry :
358
314
if (src_list ) {
359
- unsigned shared_count = src_list -> shared_count ;
315
+ unsigned int shared_count = src_list -> shared_count ;
360
316
361
317
rcu_read_unlock ();
362
318
@@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
373
329
374
330
dst_list -> shared_count = 0 ;
375
331
for (i = 0 ; i < src_list -> shared_count ; ++ i ) {
332
+ struct dma_fence __rcu * * dst ;
376
333
struct dma_fence * fence ;
377
334
378
335
fence = rcu_dereference (src_list -> shared [i ]);
@@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
391
348
continue ;
392
349
}
393
350
394
- rcu_assign_pointer (dst_list -> shared [dst_list -> shared_count ++ ], fence );
351
+ dst = & dst_list -> shared [dst_list -> shared_count ++ ];
352
+ rcu_assign_pointer (* dst , fence );
395
353
}
396
354
} else {
397
355
dst_list = NULL ;
@@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
431
389
*/
432
390
int dma_resv_get_fences_rcu (struct dma_resv * obj ,
433
391
struct dma_fence * * pfence_excl ,
434
- unsigned * pshared_count ,
392
+ unsigned int * pshared_count ,
435
393
struct dma_fence * * * pshared )
436
394
{
437
395
struct dma_fence * * shared = NULL ;
@@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
533
491
bool wait_all , bool intr ,
534
492
unsigned long timeout )
535
493
{
536
- struct dma_fence * fence ;
537
- unsigned seq , shared_count ;
538
494
long ret = timeout ? timeout : 1 ;
495
+ unsigned int seq , shared_count ;
496
+ struct dma_fence * fence ;
539
497
int i ;
540
498
541
499
retry :
@@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
565
523
shared_count = fobj -> shared_count ;
566
524
567
525
for (i = 0 ; !fence && i < shared_count ; ++ i ) {
568
- struct dma_fence * lfence = rcu_dereference ( fobj -> shared [ i ]) ;
526
+ struct dma_fence * lfence ;
569
527
528
+ lfence = rcu_dereference (fobj -> shared [i ]);
570
529
if (test_bit (DMA_FENCE_FLAG_SIGNALED_BIT ,
571
530
& lfence -> flags ))
572
531
continue ;
@@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
633
592
*/
634
593
bool dma_resv_test_signaled_rcu (struct dma_resv * obj , bool test_all )
635
594
{
636
- unsigned seq , shared_count ;
595
+ unsigned int seq , shared_count ;
637
596
int ret ;
638
597
639
598
rcu_read_lock ();
@@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
643
602
seq = read_seqcount_begin (& obj -> seq );
644
603
645
604
if (test_all ) {
646
- unsigned i ;
647
-
648
605
struct dma_resv_list * fobj = rcu_dereference (obj -> fence );
606
+ unsigned int i ;
649
607
650
608
if (fobj )
651
609
shared_count = fobj -> shared_count ;
652
610
653
611
for (i = 0 ; i < shared_count ; ++ i ) {
654
- struct dma_fence * fence = rcu_dereference ( fobj -> shared [ i ]) ;
612
+ struct dma_fence * fence ;
655
613
614
+ fence = rcu_dereference (fobj -> shared [i ]);
656
615
ret = dma_resv_test_signaled_single (fence );
657
616
if (ret < 0 )
658
617
goto retry ;
@@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
681
640
return ret ;
682
641
}
683
642
EXPORT_SYMBOL_GPL (dma_resv_test_signaled_rcu );
643
+
644
+ #if IS_ENABLED (CONFIG_LOCKDEP )
645
+ static int __init dma_resv_lockdep (void )
646
+ {
647
+ struct mm_struct * mm = mm_alloc ();
648
+ struct ww_acquire_ctx ctx ;
649
+ struct dma_resv obj ;
650
+ struct address_space mapping ;
651
+ int ret ;
652
+
653
+ if (!mm )
654
+ return - ENOMEM ;
655
+
656
+ dma_resv_init (& obj );
657
+ address_space_init_once (& mapping );
658
+
659
+ mmap_read_lock (mm );
660
+ ww_acquire_init (& ctx , & reservation_ww_class );
661
+ ret = dma_resv_lock (& obj , & ctx );
662
+ if (ret == - EDEADLK )
663
+ dma_resv_lock_slow (& obj , & ctx );
664
+ fs_reclaim_acquire (GFP_KERNEL );
665
+ /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
666
+ i_mmap_lock_write (& mapping );
667
+ i_mmap_unlock_write (& mapping );
668
+ #ifdef CONFIG_MMU_NOTIFIER
669
+ lock_map_acquire (& __mmu_notifier_invalidate_range_start_map );
670
+ __dma_fence_might_wait ();
671
+ lock_map_release (& __mmu_notifier_invalidate_range_start_map );
672
+ #else
673
+ __dma_fence_might_wait ();
674
+ #endif
675
+ fs_reclaim_release (GFP_KERNEL );
676
+ ww_mutex_unlock (& obj .lock );
677
+ ww_acquire_fini (& ctx );
678
+ mmap_read_unlock (mm );
679
+
680
+ mmput (mm );
681
+
682
+ return 0 ;
683
+ }
684
+ subsys_initcall (dma_resv_lockdep );
685
+ #endif
0 commit comments