Skip to content

Commit 5a27aa8

Browse files
vwooltorvalds
authored andcommitted
z3fold: add kref refcounting
With both coming and already present locking optimizations, introducing kref to reference-count z3fold objects is the right thing to do. Moreover, it makes buddied list no longer necessary, and allows for a simpler handling of headless pages. [[email protected]: coding-style fixes] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Vitaly Wool <[email protected]> Reviewed-by: Dan Streetman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 2f1e5e4 commit 5a27aa8

File tree

1 file changed

+69
-86
lines changed

1 file changed

+69
-86
lines changed

mm/z3fold.c

Lines changed: 69 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ enum buddy {
5252
* z3fold page, except for HEADLESS pages
5353
* @buddy: links the z3fold page into the relevant list in the pool
5454
* @page_lock: per-page lock
55+
* @refcount: reference cound for the z3fold page
5556
* @first_chunks: the size of the first buddy in chunks, 0 if free
5657
* @middle_chunks: the size of the middle buddy in chunks, 0 if free
5758
* @last_chunks: the size of the last buddy in chunks, 0 if free
@@ -60,6 +61,7 @@ enum buddy {
6061
struct z3fold_header {
6162
struct list_head buddy;
6263
spinlock_t page_lock;
64+
struct kref refcount;
6365
unsigned short first_chunks;
6466
unsigned short middle_chunks;
6567
unsigned short last_chunks;
@@ -95,8 +97,6 @@ struct z3fold_header {
9597
* @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
9698
* the lists each z3fold page is added to depends on the size of
9799
* its free region.
98-
* @buddied: list tracking the z3fold pages that contain 3 buddies;
99-
* these z3fold pages are full
100100
* @lru: list tracking the z3fold pages in LRU order by most recently
101101
* added buddy.
102102
* @pages_nr: number of z3fold pages in the pool.
@@ -109,7 +109,6 @@ struct z3fold_header {
109109
struct z3fold_pool {
110110
spinlock_t lock;
111111
struct list_head unbuddied[NCHUNKS];
112-
struct list_head buddied;
113112
struct list_head lru;
114113
atomic64_t pages_nr;
115114
const struct z3fold_ops *ops;
@@ -121,8 +120,7 @@ struct z3fold_pool {
121120
* Internal z3fold page flags
122121
*/
123122
enum z3fold_page_flags {
124-
UNDER_RECLAIM = 0,
125-
PAGE_HEADLESS,
123+
PAGE_HEADLESS = 0,
126124
MIDDLE_CHUNK_MAPPED,
127125
};
128126

@@ -146,11 +144,11 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
146144
struct z3fold_header *zhdr = page_address(page);
147145

148146
INIT_LIST_HEAD(&page->lru);
149-
clear_bit(UNDER_RECLAIM, &page->private);
150147
clear_bit(PAGE_HEADLESS, &page->private);
151148
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
152149

153150
spin_lock_init(&zhdr->page_lock);
151+
kref_init(&zhdr->refcount);
154152
zhdr->first_chunks = 0;
155153
zhdr->middle_chunks = 0;
156154
zhdr->last_chunks = 0;
@@ -161,9 +159,24 @@ static struct z3fold_header *init_z3fold_page(struct page *page)
161159
}
162160

163161
/* Resets the struct page fields and frees the page */
164-
static void free_z3fold_page(struct z3fold_header *zhdr)
162+
static void free_z3fold_page(struct page *page)
165163
{
166-
__free_page(virt_to_page(zhdr));
164+
__free_page(page);
165+
}
166+
167+
static void release_z3fold_page(struct kref *ref)
168+
{
169+
struct z3fold_header *zhdr;
170+
struct page *page;
171+
172+
zhdr = container_of(ref, struct z3fold_header, refcount);
173+
page = virt_to_page(zhdr);
174+
175+
if (!list_empty(&zhdr->buddy))
176+
list_del(&zhdr->buddy);
177+
if (!list_empty(&page->lru))
178+
list_del(&page->lru);
179+
free_z3fold_page(page);
167180
}
168181

169182
/* Lock a z3fold page */
@@ -178,7 +191,6 @@ static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
178191
spin_unlock(&zhdr->page_lock);
179192
}
180193

181-
182194
/*
183195
* Encodes the handle of a particular buddy within a z3fold page
184196
* Pool lock should be held as this function accesses first_num
@@ -257,7 +269,6 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
257269
spin_lock_init(&pool->lock);
258270
for_each_unbuddied_list(i, 0)
259271
INIT_LIST_HEAD(&pool->unbuddied[i]);
260-
INIT_LIST_HEAD(&pool->buddied);
261272
INIT_LIST_HEAD(&pool->lru);
262273
atomic64_set(&pool->pages_nr, 0);
263274
pool->ops = ops;
@@ -378,6 +389,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
378389
spin_unlock(&pool->lock);
379390
continue;
380391
}
392+
kref_get(&zhdr->refcount);
381393
list_del_init(&zhdr->buddy);
382394
spin_unlock(&pool->lock);
383395

@@ -394,10 +406,12 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394406
else if (zhdr->middle_chunks == 0)
395407
bud = MIDDLE;
396408
else {
409+
z3fold_page_unlock(zhdr);
397410
spin_lock(&pool->lock);
398-
list_add(&zhdr->buddy, &pool->buddied);
411+
if (kref_put(&zhdr->refcount,
412+
release_z3fold_page))
413+
atomic64_dec(&pool->pages_nr);
399414
spin_unlock(&pool->lock);
400-
z3fold_page_unlock(zhdr);
401415
pr_err("No free chunks in unbuddied\n");
402416
WARN_ON(1);
403417
continue;
@@ -438,9 +452,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
438452
/* Add to unbuddied list */
439453
freechunks = num_free_chunks(zhdr);
440454
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
441-
} else {
442-
/* Add to buddied list */
443-
list_add(&zhdr->buddy, &pool->buddied);
444455
}
445456

446457
headless:
@@ -504,52 +515,29 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
504515
}
505516
}
506517

507-
if (test_bit(UNDER_RECLAIM, &page->private)) {
508-
/* z3fold page is under reclaim, reclaim will free */
509-
if (bud != HEADLESS)
510-
z3fold_page_unlock(zhdr);
511-
return;
512-
}
513-
514-
/* Remove from existing buddy list */
515-
if (bud != HEADLESS) {
516-
spin_lock(&pool->lock);
517-
/*
518-
* this object may have been removed from its list by
519-
* z3fold_alloc(). In that case we just do nothing,
520-
* z3fold_alloc() will allocate an object and add the page
521-
* to the relevant list.
522-
*/
523-
if (!list_empty(&zhdr->buddy)) {
524-
list_del(&zhdr->buddy);
525-
} else {
526-
spin_unlock(&pool->lock);
527-
z3fold_page_unlock(zhdr);
528-
return;
529-
}
530-
spin_unlock(&pool->lock);
531-
}
532-
533-
if (bud == HEADLESS ||
534-
(zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 &&
535-
zhdr->last_chunks == 0)) {
536-
/* z3fold page is empty, free */
518+
if (bud == HEADLESS) {
537519
spin_lock(&pool->lock);
538520
list_del(&page->lru);
539521
spin_unlock(&pool->lock);
540-
clear_bit(PAGE_HEADLESS, &page->private);
541-
if (bud != HEADLESS)
542-
z3fold_page_unlock(zhdr);
543-
free_z3fold_page(zhdr);
522+
free_z3fold_page(page);
544523
atomic64_dec(&pool->pages_nr);
545524
} else {
546-
z3fold_compact_page(zhdr);
547-
/* Add to the unbuddied list */
525+
if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
526+
zhdr->last_chunks != 0) {
527+
z3fold_compact_page(zhdr);
528+
/* Add to the unbuddied list */
529+
spin_lock(&pool->lock);
530+
if (!list_empty(&zhdr->buddy))
531+
list_del(&zhdr->buddy);
532+
freechunks = num_free_chunks(zhdr);
533+
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
534+
spin_unlock(&pool->lock);
535+
}
536+
z3fold_page_unlock(zhdr);
548537
spin_lock(&pool->lock);
549-
freechunks = num_free_chunks(zhdr);
550-
list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
538+
if (kref_put(&zhdr->refcount, release_z3fold_page))
539+
atomic64_dec(&pool->pages_nr);
551540
spin_unlock(&pool->lock);
552-
z3fold_page_unlock(zhdr);
553541
}
554542

555543
}
@@ -608,13 +596,13 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
608596
return -EINVAL;
609597
}
610598
page = list_last_entry(&pool->lru, struct page, lru);
611-
list_del(&page->lru);
599+
list_del_init(&page->lru);
612600

613-
/* Protect z3fold page against free */
614-
set_bit(UNDER_RECLAIM, &page->private);
615601
zhdr = page_address(page);
616602
if (!test_bit(PAGE_HEADLESS, &page->private)) {
617-
list_del(&zhdr->buddy);
603+
if (!list_empty(&zhdr->buddy))
604+
list_del_init(&zhdr->buddy);
605+
kref_get(&zhdr->refcount);
618606
spin_unlock(&pool->lock);
619607
z3fold_page_lock(zhdr);
620608
/*
@@ -655,30 +643,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
655643
goto next;
656644
}
657645
next:
658-
if (!test_bit(PAGE_HEADLESS, &page->private))
659-
z3fold_page_lock(zhdr);
660-
clear_bit(UNDER_RECLAIM, &page->private);
661-
if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) ||
662-
(zhdr->first_chunks == 0 && zhdr->last_chunks == 0 &&
663-
zhdr->middle_chunks == 0)) {
664-
/*
665-
* All buddies are now free, free the z3fold page and
666-
* return success.
667-
*/
668-
if (!test_and_clear_bit(PAGE_HEADLESS, &page->private))
669-
z3fold_page_unlock(zhdr);
670-
free_z3fold_page(zhdr);
671-
atomic64_dec(&pool->pages_nr);
672-
return 0;
673-
} else if (!test_bit(PAGE_HEADLESS, &page->private)) {
674-
if (zhdr->first_chunks != 0 &&
675-
zhdr->last_chunks != 0 &&
676-
zhdr->middle_chunks != 0) {
677-
/* Full, add to buddied list */
678-
spin_lock(&pool->lock);
679-
list_add(&zhdr->buddy, &pool->buddied);
680-
spin_unlock(&pool->lock);
646+
if (test_bit(PAGE_HEADLESS, &page->private)) {
647+
if (ret == 0) {
648+
free_z3fold_page(page);
649+
return 0;
681650
} else {
651+
spin_lock(&pool->lock);
652+
}
653+
} else {
654+
z3fold_page_lock(zhdr);
655+
if ((zhdr->first_chunks || zhdr->last_chunks ||
656+
zhdr->middle_chunks) &&
657+
!(zhdr->first_chunks && zhdr->last_chunks &&
658+
zhdr->middle_chunks)) {
682659
z3fold_compact_page(zhdr);
683660
/* add to unbuddied list */
684661
spin_lock(&pool->lock);
@@ -687,13 +664,19 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
687664
&pool->unbuddied[freechunks]);
688665
spin_unlock(&pool->lock);
689666
}
690-
}
691-
692-
if (!test_bit(PAGE_HEADLESS, &page->private))
693667
z3fold_page_unlock(zhdr);
668+
spin_lock(&pool->lock);
669+
if (kref_put(&zhdr->refcount, release_z3fold_page)) {
670+
atomic64_dec(&pool->pages_nr);
671+
return 0;
672+
}
673+
}
694674

695-
spin_lock(&pool->lock);
696-
/* add to beginning of LRU */
675+
/*
676+
* Add to the beginning of LRU.
677+
* Pool lock has to be kept here to ensure the page has
678+
* not already been released
679+
*/
697680
list_add(&page->lru, &pool->lru);
698681
}
699682
spin_unlock(&pool->lock);

0 commit comments

Comments
 (0)