Skip to content

Commit ae4c51a

Browse files
Oleksandr AndrushchenkoBoris Ostrovsky
authored andcommitted
xen/balloon: Share common memory reservation routines
Memory {increase|decrease}_reservation and VA mappings update/reset code used in balloon driver can be made common, so other drivers can also re-use the same functionality without open-coding. Create a dedicated file for the shared code and export corresponding symbols for other kernel modules. Signed-off-by: Oleksandr Andrushchenko <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Signed-off-by: Boris Ostrovsky <[email protected]>
1 parent 8c3799e commit ae4c51a

File tree

4 files changed

+184
-69
lines changed

4 files changed

+184
-69
lines changed

drivers/xen/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
33
obj-$(CONFIG_X86) += fallback.o
44
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
5+
obj-y += mem-reservation.o
56
obj-y += events/
67
obj-y += xenbus/
78

drivers/xen/balloon.c

Lines changed: 6 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
#include <xen/balloon.h>
7272
#include <xen/features.h>
7373
#include <xen/page.h>
74+
#include <xen/mem-reservation.h>
7475

7576
static int xen_hotplug_unpopulated;
7677

@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
157158
#define GFP_BALLOON \
158159
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
159160

160-
static void scrub_page(struct page *page)
161-
{
162-
#ifdef CONFIG_XEN_SCRUB_PAGES
163-
clear_highpage(page);
164-
#endif
165-
}
166-
167161
/* balloon_append: add the given page to the balloon. */
168162
static void __balloon_append(struct page *page)
169163
{
@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
463457
int rc;
464458
unsigned long i;
465459
struct page *page;
466-
struct xen_memory_reservation reservation = {
467-
.address_bits = 0,
468-
.extent_order = EXTENT_ORDER,
469-
.domid = DOMID_SELF
470-
};
471460

472461
if (nr_pages > ARRAY_SIZE(frame_list))
473462
nr_pages = ARRAY_SIZE(frame_list);
@@ -479,46 +468,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
479468
break;
480469
}
481470

482-
/* XENMEM_populate_physmap requires a PFN based on Xen
483-
* granularity.
484-
*/
485471
frame_list[i] = page_to_xen_pfn(page);
486472
page = balloon_next_page(page);
487473
}
488474

489-
set_xen_guest_handle(reservation.extent_start, frame_list);
490-
reservation.nr_extents = nr_pages;
491-
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
475+
rc = xenmem_reservation_increase(nr_pages, frame_list);
492476
if (rc <= 0)
493477
return BP_EAGAIN;
494478

495479
for (i = 0; i < rc; i++) {
496480
page = balloon_retrieve(false);
497481
BUG_ON(page == NULL);
498482

499-
#ifdef CONFIG_XEN_HAVE_PVMMU
500-
/*
501-
* We don't support PV MMU when Linux and Xen is using
502-
* different page granularity.
503-
*/
504-
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
505-
506-
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
507-
unsigned long pfn = page_to_pfn(page);
508-
509-
set_phys_to_machine(pfn, frame_list[i]);
510-
511-
/* Link back into the page tables if not highmem. */
512-
if (!PageHighMem(page)) {
513-
int ret;
514-
ret = HYPERVISOR_update_va_mapping(
515-
(unsigned long)__va(pfn << PAGE_SHIFT),
516-
mfn_pte(frame_list[i], PAGE_KERNEL),
517-
0);
518-
BUG_ON(ret);
519-
}
520-
}
521-
#endif
483+
xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
522484

523485
/* Relinquish the page back to the allocator. */
524486
free_reserved_page(page);
@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
535497
unsigned long i;
536498
struct page *page, *tmp;
537499
int ret;
538-
struct xen_memory_reservation reservation = {
539-
.address_bits = 0,
540-
.extent_order = EXTENT_ORDER,
541-
.domid = DOMID_SELF
542-
};
543500
LIST_HEAD(pages);
544501

545502
if (nr_pages > ARRAY_SIZE(frame_list))
@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
553510
break;
554511
}
555512
adjust_managed_page_count(page, -1);
556-
scrub_page(page);
513+
xenmem_reservation_scrub_page(page);
557514
list_add(&page->lru, &pages);
558515
}
559516

@@ -572,38 +529,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
572529
*/
573530
i = 0;
574531
list_for_each_entry_safe(page, tmp, &pages, lru) {
575-
/* XENMEM_decrease_reservation requires a GFN */
576532
frame_list[i++] = xen_page_to_gfn(page);
577533

578-
#ifdef CONFIG_XEN_HAVE_PVMMU
579-
/*
580-
* We don't support PV MMU when Linux and Xen is using
581-
* different page granularity.
582-
*/
583-
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
584-
585-
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
586-
unsigned long pfn = page_to_pfn(page);
534+
xenmem_reservation_va_mapping_reset(1, &page);
587535

588-
if (!PageHighMem(page)) {
589-
ret = HYPERVISOR_update_va_mapping(
590-
(unsigned long)__va(pfn << PAGE_SHIFT),
591-
__pte_ma(0), 0);
592-
BUG_ON(ret);
593-
}
594-
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
595-
}
596-
#endif
597536
list_del(&page->lru);
598537

599538
balloon_append(page);
600539
}
601540

602541
flush_tlb_all();
603542

604-
set_xen_guest_handle(reservation.extent_start, frame_list);
605-
reservation.nr_extents = nr_pages;
606-
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
543+
ret = xenmem_reservation_decrease(nr_pages, frame_list);
607544
BUG_ON(ret != nr_pages);
608545

609546
balloon_stats.current_pages -= nr_pages;

drivers/xen/mem-reservation.c

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
/******************************************************************************
4+
* Xen memory reservation utilities.
5+
*
6+
* Copyright (c) 2003, B Dragovic
7+
* Copyright (c) 2003-2004, M Williamson, K Fraser
8+
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
9+
* Copyright (c) 2010 Daniel Kiper
10+
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11+
*/
12+
13+
#include <asm/xen/hypercall.h>
14+
15+
#include <xen/interface/memory.h>
16+
#include <xen/mem-reservation.h>
17+
18+
/*
19+
* Use one extent per PAGE_SIZE to avoid to break down the page into
20+
* multiple frame.
21+
*/
22+
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
23+
24+
#ifdef CONFIG_XEN_HAVE_PVMMU
25+
void __xenmem_reservation_va_mapping_update(unsigned long count,
26+
struct page **pages,
27+
xen_pfn_t *frames)
28+
{
29+
int i;
30+
31+
for (i = 0; i < count; i++) {
32+
struct page *page = pages[i];
33+
unsigned long pfn = page_to_pfn(page);
34+
35+
BUG_ON(!page);
36+
37+
/*
38+
* We don't support PV MMU when Linux and Xen is using
39+
* different page granularity.
40+
*/
41+
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
42+
43+
set_phys_to_machine(pfn, frames[i]);
44+
45+
/* Link back into the page tables if not highmem. */
46+
if (!PageHighMem(page)) {
47+
int ret;
48+
49+
ret = HYPERVISOR_update_va_mapping(
50+
(unsigned long)__va(pfn << PAGE_SHIFT),
51+
mfn_pte(frames[i], PAGE_KERNEL),
52+
0);
53+
BUG_ON(ret);
54+
}
55+
}
56+
}
57+
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
58+
59+
void __xenmem_reservation_va_mapping_reset(unsigned long count,
60+
struct page **pages)
61+
{
62+
int i;
63+
64+
for (i = 0; i < count; i++) {
65+
struct page *page = pages[i];
66+
unsigned long pfn = page_to_pfn(page);
67+
68+
/*
69+
* We don't support PV MMU when Linux and Xen are using
70+
* different page granularity.
71+
*/
72+
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
73+
74+
if (!PageHighMem(page)) {
75+
int ret;
76+
77+
ret = HYPERVISOR_update_va_mapping(
78+
(unsigned long)__va(pfn << PAGE_SHIFT),
79+
__pte_ma(0), 0);
80+
BUG_ON(ret);
81+
}
82+
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
83+
}
84+
}
85+
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
86+
#endif /* CONFIG_XEN_HAVE_PVMMU */
87+
88+
/* @frames is an array of PFNs */
89+
int xenmem_reservation_increase(int count, xen_pfn_t *frames)
90+
{
91+
struct xen_memory_reservation reservation = {
92+
.address_bits = 0,
93+
.extent_order = EXTENT_ORDER,
94+
.domid = DOMID_SELF
95+
};
96+
97+
/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
98+
set_xen_guest_handle(reservation.extent_start, frames);
99+
reservation.nr_extents = count;
100+
return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
101+
}
102+
EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
103+
104+
/* @frames is an array of GFNs */
105+
int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
106+
{
107+
struct xen_memory_reservation reservation = {
108+
.address_bits = 0,
109+
.extent_order = EXTENT_ORDER,
110+
.domid = DOMID_SELF
111+
};
112+
113+
/* XENMEM_decrease_reservation requires a GFN */
114+
set_xen_guest_handle(reservation.extent_start, frames);
115+
reservation.nr_extents = count;
116+
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
117+
}
118+
EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);

include/xen/mem-reservation.h

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
3+
/*
4+
* Xen memory reservation utilities.
5+
*
6+
* Copyright (c) 2003, B Dragovic
7+
* Copyright (c) 2003-2004, M Williamson, K Fraser
8+
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
9+
* Copyright (c) 2010 Daniel Kiper
10+
* Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11+
*/
12+
13+
#ifndef _XENMEM_RESERVATION_H
14+
#define _XENMEM_RESERVATION_H
15+
16+
#include <linux/highmem.h>
17+
18+
#include <xen/page.h>
19+
20+
static inline void xenmem_reservation_scrub_page(struct page *page)
21+
{
22+
#ifdef CONFIG_XEN_SCRUB_PAGES
23+
clear_highpage(page);
24+
#endif
25+
}
26+
27+
#ifdef CONFIG_XEN_HAVE_PVMMU
28+
void __xenmem_reservation_va_mapping_update(unsigned long count,
29+
struct page **pages,
30+
xen_pfn_t *frames);
31+
32+
void __xenmem_reservation_va_mapping_reset(unsigned long count,
33+
struct page **pages);
34+
#endif
35+
36+
static inline void xenmem_reservation_va_mapping_update(unsigned long count,
37+
struct page **pages,
38+
xen_pfn_t *frames)
39+
{
40+
#ifdef CONFIG_XEN_HAVE_PVMMU
41+
if (!xen_feature(XENFEAT_auto_translated_physmap))
42+
__xenmem_reservation_va_mapping_update(count, pages, frames);
43+
#endif
44+
}
45+
46+
static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
47+
struct page **pages)
48+
{
49+
#ifdef CONFIG_XEN_HAVE_PVMMU
50+
if (!xen_feature(XENFEAT_auto_translated_physmap))
51+
__xenmem_reservation_va_mapping_reset(count, pages);
52+
#endif
53+
}
54+
55+
int xenmem_reservation_increase(int count, xen_pfn_t *frames);
56+
57+
int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
58+
59+
#endif

0 commit comments

Comments
 (0)