Skip to content

Commit 068d9d7

Browse files
dma-buf: add SPDX header and fix style in dma-resv.c
dma_resv_lockdep() seems to have some space/tab mixups. Fix that and move the function to the end of the file. Also fix some minor things checkpatch.pl pointed out while at it. No functional change. Signed-off-by: Christian König <[email protected]> Acked-by: Daniel Vetter <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 680753d commit 068d9d7

File tree

1 file changed

+65
-63
lines changed

1 file changed

+65
-63
lines changed

drivers/dma-buf/dma-resv.c

Lines changed: 65 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// SPDX-License-Identifier: MIT
12
/*
23
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
34
*
@@ -92,49 +93,6 @@ static void dma_resv_list_free(struct dma_resv_list *list)
9293
kfree_rcu(list, rcu);
9394
}
9495

95-
#if IS_ENABLED(CONFIG_LOCKDEP)
96-
static int __init dma_resv_lockdep(void)
97-
{
98-
struct mm_struct *mm = mm_alloc();
99-
struct ww_acquire_ctx ctx;
100-
struct dma_resv obj;
101-
struct address_space mapping;
102-
int ret;
103-
104-
if (!mm)
105-
return -ENOMEM;
106-
107-
dma_resv_init(&obj);
108-
address_space_init_once(&mapping);
109-
110-
mmap_read_lock(mm);
111-
ww_acquire_init(&ctx, &reservation_ww_class);
112-
ret = dma_resv_lock(&obj, &ctx);
113-
if (ret == -EDEADLK)
114-
dma_resv_lock_slow(&obj, &ctx);
115-
fs_reclaim_acquire(GFP_KERNEL);
116-
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
117-
i_mmap_lock_write(&mapping);
118-
i_mmap_unlock_write(&mapping);
119-
#ifdef CONFIG_MMU_NOTIFIER
120-
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
121-
__dma_fence_might_wait();
122-
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
123-
#else
124-
__dma_fence_might_wait();
125-
#endif
126-
fs_reclaim_release(GFP_KERNEL);
127-
ww_mutex_unlock(&obj.lock);
128-
ww_acquire_fini(&ctx);
129-
mmap_read_unlock(mm);
130-
131-
mmput(mm);
132-
133-
return 0;
134-
}
135-
subsys_initcall(dma_resv_lockdep);
136-
#endif
137-
13896
/**
13997
* dma_resv_init - initialize a reservation object
14098
* @obj: the reservation object
@@ -196,9 +154,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
196154
if (old && old->shared_max) {
197155
if ((old->shared_count + num_fences) <= old->shared_max)
198156
return 0;
199-
else
200-
max = max(old->shared_count + num_fences,
201-
old->shared_max * 2);
157+
max = max(old->shared_count + num_fences, old->shared_max * 2);
202158
} else {
203159
max = max(4ul, roundup_pow_of_two(num_fences));
204160
}
@@ -337,17 +293,17 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
337293
EXPORT_SYMBOL(dma_resv_add_excl_fence);
338294

339295
/**
340-
* dma_resv_copy_fences - Copy all fences from src to dst.
341-
* @dst: the destination reservation object
342-
* @src: the source reservation object
343-
*
344-
* Copy all fences from src to dst. dst-lock must be held.
345-
*/
296+
* dma_resv_copy_fences - Copy all fences from src to dst.
297+
* @dst: the destination reservation object
298+
* @src: the source reservation object
299+
*
300+
* Copy all fences from src to dst. dst-lock must be held.
301+
*/
346302
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
347303
{
348304
struct dma_resv_list *src_list, *dst_list;
349305
struct dma_fence *old, *new;
350-
unsigned i;
306+
unsigned int i;
351307

352308
dma_resv_assert_held(dst);
353309

@@ -356,7 +312,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
356312

357313
retry:
358314
if (src_list) {
359-
unsigned shared_count = src_list->shared_count;
315+
unsigned int shared_count = src_list->shared_count;
360316

361317
rcu_read_unlock();
362318

@@ -373,6 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
373329

374330
dst_list->shared_count = 0;
375331
for (i = 0; i < src_list->shared_count; ++i) {
332+
struct dma_fence __rcu **dst;
376333
struct dma_fence *fence;
377334

378335
fence = rcu_dereference(src_list->shared[i]);
@@ -391,7 +348,8 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
391348
continue;
392349
}
393350

394-
rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
351+
dst = &dst_list->shared[dst_list->shared_count++];
352+
rcu_assign_pointer(*dst, fence);
395353
}
396354
} else {
397355
dst_list = NULL;
@@ -431,7 +389,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
431389
*/
432390
int dma_resv_get_fences_rcu(struct dma_resv *obj,
433391
struct dma_fence **pfence_excl,
434-
unsigned *pshared_count,
392+
unsigned int *pshared_count,
435393
struct dma_fence ***pshared)
436394
{
437395
struct dma_fence **shared = NULL;
@@ -533,9 +491,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
533491
bool wait_all, bool intr,
534492
unsigned long timeout)
535493
{
536-
struct dma_fence *fence;
537-
unsigned seq, shared_count;
538494
long ret = timeout ? timeout : 1;
495+
unsigned int seq, shared_count;
496+
struct dma_fence *fence;
539497
int i;
540498

541499
retry:
@@ -565,8 +523,9 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
565523
shared_count = fobj->shared_count;
566524

567525
for (i = 0; !fence && i < shared_count; ++i) {
568-
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
526+
struct dma_fence *lfence;
569527

528+
lfence = rcu_dereference(fobj->shared[i]);
570529
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
571530
&lfence->flags))
572531
continue;
@@ -633,7 +592,7 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
633592
*/
634593
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
635594
{
636-
unsigned seq, shared_count;
595+
unsigned int seq, shared_count;
637596
int ret;
638597

639598
rcu_read_lock();
@@ -643,16 +602,16 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
643602
seq = read_seqcount_begin(&obj->seq);
644603

645604
if (test_all) {
646-
unsigned i;
647-
648605
struct dma_resv_list *fobj = rcu_dereference(obj->fence);
606+
unsigned int i;
649607

650608
if (fobj)
651609
shared_count = fobj->shared_count;
652610

653611
for (i = 0; i < shared_count; ++i) {
654-
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
612+
struct dma_fence *fence;
655613

614+
fence = rcu_dereference(fobj->shared[i]);
656615
ret = dma_resv_test_signaled_single(fence);
657616
if (ret < 0)
658617
goto retry;
@@ -681,3 +640,46 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
681640
return ret;
682641
}
683642
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
643+
644+
#if IS_ENABLED(CONFIG_LOCKDEP)
645+
static int __init dma_resv_lockdep(void)
646+
{
647+
struct mm_struct *mm = mm_alloc();
648+
struct ww_acquire_ctx ctx;
649+
struct dma_resv obj;
650+
struct address_space mapping;
651+
int ret;
652+
653+
if (!mm)
654+
return -ENOMEM;
655+
656+
dma_resv_init(&obj);
657+
address_space_init_once(&mapping);
658+
659+
mmap_read_lock(mm);
660+
ww_acquire_init(&ctx, &reservation_ww_class);
661+
ret = dma_resv_lock(&obj, &ctx);
662+
if (ret == -EDEADLK)
663+
dma_resv_lock_slow(&obj, &ctx);
664+
fs_reclaim_acquire(GFP_KERNEL);
665+
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
666+
i_mmap_lock_write(&mapping);
667+
i_mmap_unlock_write(&mapping);
668+
#ifdef CONFIG_MMU_NOTIFIER
669+
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
670+
__dma_fence_might_wait();
671+
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
672+
#else
673+
__dma_fence_might_wait();
674+
#endif
675+
fs_reclaim_release(GFP_KERNEL);
676+
ww_mutex_unlock(&obj.lock);
677+
ww_acquire_fini(&ctx);
678+
mmap_read_unlock(mm);
679+
680+
mmput(mm);
681+
682+
return 0;
683+
}
684+
subsys_initcall(dma_resv_lockdep);
685+
#endif

0 commit comments

Comments
 (0)