Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 162a447

Browse files
eberman-quicTreehugger Robot
authored andcommitted
FROMLIST: virt: gunyah: Add interfaces to map memory into guest address space
Gunyah virtual machines are created with either all memory provided at VM creation using the Resource Manager memory parcel construct, or Incrementally by enabling VM demand paging. The Gunyah demand paging support is provided directly by the hypervisor and does not require the creation of resource manager memory parcels. Demand paging allows the host to map/unmap contiguous pages (folios) to a Gunyah memory extent object with the correct rights allowing its contained pages to be mapped into the Guest VM's address space. Memory extents are Gunyah's mechanism for handling system memory abstracting from the direct use of physical page numbers. Memory extents are hypervisor objects and are therefore referenced and access controlled with capabilities. When a virtual machine is configured for demand paging, 3 memory extent and 1 address space capabilities are provided to the host. The resource manager defined policy is such that memory in the "host-only" extent (the default) is private to the host. Memory in the "guest-only" extent can be used for guest private mappings, and are unmapped from the host. Memory in the "host-and-guest-shared" extent can be mapped concurrently and shared between the host and guest VMs. Implement two functions which Linux can use to move memory between the virtual machines: gunyah_provide_folio and gunyah_reclaim_folio. Memory that has been provided to the guest is tracked in a maple tree to be reclaimed later. Folios provided to the virtual machine are assumed to be owned Gunyah stack: the folio's ->private field is used for bookkeeping about whether page is mapped into virtual machine. Bug: 338347082 Link: https://lore.kernel.org/all/[email protected]/ Change-Id: I303fced6f7baef3c69540ef9fa9a7a04db957365 Signed-off-by: Elliot Berman <[email protected]>
1 parent b2a82da commit 162a447

File tree

4 files changed

+499
-1
lines changed

4 files changed

+499
-1
lines changed

drivers/virt/gunyah/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# SPDX-License-Identifier: GPL-2.0
22

3-
gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o
3+
gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mem.o
44

55
obj-$(CONFIG_GUNYAH) += gunyah.o gunyah_rsc_mgr.o gunyah_vcpu.o

drivers/virt/gunyah/vm_mgr.c

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,16 @@
1717
#include "rsc_mgr.h"
1818
#include "vm_mgr.h"
1919

20+
#define GUNYAH_VM_ADDRSPACE_LABEL 0
21+
// "To" extent for memory private to guest
22+
#define GUNYAH_VM_MEM_EXTENT_GUEST_PRIVATE_LABEL 0
23+
// "From" extent for memory shared with guest
24+
#define GUNYAH_VM_MEM_EXTENT_HOST_SHARED_LABEL 1
25+
// "To" extent for memory shared with the guest
26+
#define GUNYAH_VM_MEM_EXTENT_GUEST_SHARED_LABEL 3
27+
// "From" extent for memory private to guest
28+
#define GUNYAH_VM_MEM_EXTENT_HOST_PRIVATE_LABEL 2
29+
2030
static DEFINE_XARRAY(gunyah_vm_functions);
2131

2232
static void gunyah_vm_put_function(struct gunyah_vm_function *fn)
@@ -175,6 +185,16 @@ void gunyah_vm_function_unregister(struct gunyah_vm_function *fn)
175185
}
176186
EXPORT_SYMBOL_GPL(gunyah_vm_function_unregister);
177187

188+
static bool gunyah_vm_resource_ticket_populate_noop(
189+
struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc)
190+
{
191+
return true;
192+
}
193+
static void gunyah_vm_resource_ticket_unpopulate_noop(
194+
struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc)
195+
{
196+
}
197+
178198
int gunyah_vm_add_resource_ticket(struct gunyah_vm *ghvm,
179199
struct gunyah_vm_resource_ticket *ticket)
180200
{
@@ -349,6 +369,17 @@ static void gunyah_vm_stop(struct gunyah_vm *ghvm)
349369
ghvm->vm_status != GUNYAH_RM_VM_STATUS_RUNNING);
350370
}
351371

372+
static inline void setup_extent_ticket(struct gunyah_vm *ghvm,
373+
struct gunyah_vm_resource_ticket *ticket,
374+
u32 label)
375+
{
376+
ticket->resource_type = GUNYAH_RESOURCE_TYPE_MEM_EXTENT;
377+
ticket->label = label;
378+
ticket->populate = gunyah_vm_resource_ticket_populate_noop;
379+
ticket->unpopulate = gunyah_vm_resource_ticket_unpopulate_noop;
380+
gunyah_vm_add_resource_ticket(ghvm, ticket);
381+
}
382+
352383
static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gunyah_rm *rm)
353384
{
354385
struct gunyah_vm *ghvm;
@@ -372,6 +403,25 @@ static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gunyah_rm *rm)
372403
INIT_LIST_HEAD(&ghvm->resources);
373404
INIT_LIST_HEAD(&ghvm->resource_tickets);
374405

406+
mt_init(&ghvm->mm);
407+
408+
ghvm->addrspace_ticket.resource_type = GUNYAH_RESOURCE_TYPE_ADDR_SPACE;
409+
ghvm->addrspace_ticket.label = GUNYAH_VM_ADDRSPACE_LABEL;
410+
ghvm->addrspace_ticket.populate =
411+
gunyah_vm_resource_ticket_populate_noop;
412+
ghvm->addrspace_ticket.unpopulate =
413+
gunyah_vm_resource_ticket_unpopulate_noop;
414+
gunyah_vm_add_resource_ticket(ghvm, &ghvm->addrspace_ticket);
415+
416+
setup_extent_ticket(ghvm, &ghvm->host_private_extent_ticket,
417+
GUNYAH_VM_MEM_EXTENT_HOST_PRIVATE_LABEL);
418+
setup_extent_ticket(ghvm, &ghvm->host_shared_extent_ticket,
419+
GUNYAH_VM_MEM_EXTENT_HOST_SHARED_LABEL);
420+
setup_extent_ticket(ghvm, &ghvm->guest_private_extent_ticket,
421+
GUNYAH_VM_MEM_EXTENT_GUEST_PRIVATE_LABEL);
422+
setup_extent_ticket(ghvm, &ghvm->guest_shared_extent_ticket,
423+
GUNYAH_VM_MEM_EXTENT_GUEST_SHARED_LABEL);
424+
375425
return ghvm;
376426
}
377427

@@ -533,6 +583,23 @@ static void _gunyah_vm_put(struct kref *kref)
533583
gunyah_vm_stop(ghvm);
534584

535585
gunyah_vm_remove_functions(ghvm);
586+
587+
/**
588+
* If this fails, we're going to lose the memory for good and is
589+
* BUG_ON-worthy, but not unrecoverable (we just lose memory).
590+
* This call should always succeed though because the VM is in not
591+
* running and RM will let us reclaim all the memory.
592+
*/
593+
WARN_ON(gunyah_vm_reclaim_range(ghvm, 0, U64_MAX));
594+
595+
/* clang-format off */
596+
gunyah_vm_remove_resource_ticket(ghvm, &ghvm->addrspace_ticket);
597+
gunyah_vm_remove_resource_ticket(ghvm, &ghvm->host_shared_extent_ticket);
598+
gunyah_vm_remove_resource_ticket(ghvm, &ghvm->host_private_extent_ticket);
599+
gunyah_vm_remove_resource_ticket(ghvm, &ghvm->guest_shared_extent_ticket);
600+
gunyah_vm_remove_resource_ticket(ghvm, &ghvm->guest_private_extent_ticket);
601+
/* clang-format on */
602+
536603
gunyah_vm_clean_resources(ghvm);
537604

538605
if (ghvm->vm_status == GUNYAH_RM_VM_STATUS_EXITED ||
@@ -548,6 +615,8 @@ static void _gunyah_vm_put(struct kref *kref)
548615
/* clang-format on */
549616
}
550617

618+
mtree_destroy(&ghvm->mm);
619+
551620
if (ghvm->vm_status > GUNYAH_RM_VM_STATUS_NO_STATE) {
552621
gunyah_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
553622

drivers/virt/gunyah/vm_mgr.h

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,53 @@
88

99
#include <linux/device.h>
1010
#include <linux/kref.h>
11+
#include <linux/maple_tree.h>
1112
#include <linux/mutex.h>
13+
#include <linux/pagemap.h>
1214
#include <linux/rwsem.h>
15+
#include <linux/set_memory.h>
1316
#include <linux/wait.h>
1417

1518
#include <uapi/linux/gunyah.h>
1619

1720
#include "rsc_mgr.h"
1821

22+
static inline u64 gunyah_gpa_to_gfn(u64 gpa)
23+
{
24+
return gpa >> PAGE_SHIFT;
25+
}
26+
27+
static inline u64 gunyah_gfn_to_gpa(u64 gfn)
28+
{
29+
return gfn << PAGE_SHIFT;
30+
}
31+
1932
long gunyah_dev_vm_mgr_ioctl(struct gunyah_rm *rm, unsigned int cmd,
2033
unsigned long arg);
2134

2235
/**
2336
* struct gunyah_vm - Main representation of a Gunyah Virtual machine
2437
* @vmid: Gunyah's VMID for this virtual machine
38+
* @mm: A maple tree of all memory that has been mapped to a VM.
39+
* Indices are guest frame numbers; entries are either folios or
40+
* RM mem parcels
41+
* @addrspace_ticket: Resource ticket to the capability for guest VM's
42+
* address space
43+
* @host_private_extent_ticket: Resource ticket to the capability for our
44+
* memory extent from which to lend private
45+
* memory to the guest
46+
* @host_shared_extent_ticket: Resource ticket to the capaiblity for our
47+
* memory extent from which to share memory
48+
* with the guest. Distinction with
49+
* @host_private_extent_ticket needed for
50+
* current Qualcomm platforms; on non-Qualcomm
51+
* platforms, this is the same capability ID
52+
* @guest_private_extent_ticket: Resource ticket to the capaiblity for
53+
* the guest's memory extent to lend private
54+
* memory to
55+
* @guest_shared_extent_ticket: Resource ticket to the capability for
56+
* the memory extent that represents
57+
* memory shared with the guest.
2558
* @rm: Pointer to the resource manager struct to make RM calls
2659
* @parent: For logging
2760
* @nb: Notifier block for RM notifications
@@ -43,6 +76,11 @@ long gunyah_dev_vm_mgr_ioctl(struct gunyah_rm *rm, unsigned int cmd,
4376
*/
4477
struct gunyah_vm {
4578
u16 vmid;
79+
struct maple_tree mm;
80+
struct gunyah_vm_resource_ticket addrspace_ticket,
81+
host_private_extent_ticket, host_shared_extent_ticket,
82+
guest_private_extent_ticket, guest_shared_extent_ticket;
83+
4684
struct gunyah_rm *rm;
4785

4886
struct notifier_block nb;
@@ -63,4 +101,74 @@ struct gunyah_vm {
63101

64102
};
65103

104+
/**
105+
* folio_mmapped() - Returns true if the folio is mapped into any vma
106+
* @folio: Folio to test
107+
*/
108+
static bool folio_mmapped(struct folio *folio)
109+
{
110+
struct address_space *mapping = folio->mapping;
111+
struct vm_area_struct *vma;
112+
bool ret = false;
113+
114+
i_mmap_lock_read(mapping);
115+
vma_interval_tree_foreach(vma, &mapping->i_mmap, folio_index(folio),
116+
folio_index(folio) + folio_nr_pages(folio)) {
117+
ret = true;
118+
break;
119+
}
120+
i_mmap_unlock_read(mapping);
121+
return ret;
122+
}
123+
124+
/**
125+
* gunyah_folio_lend_safe() - Returns true if folio is ready to be lent to guest
126+
* @folio: Folio to prepare
127+
*
128+
* Tests if the folio is mapped anywhere outside the kernel logical map
129+
* and whether any userspace has a vma containing the folio, even if it hasn't
130+
* paged it in. We want to avoid causing fault to userspace.
131+
* If userspace doesn't have it mapped anywhere, then unmap from kernel
132+
* logical map to prevent accidental access (e.g. by load_unaligned_zeropad)
133+
*/
134+
static inline bool gunyah_folio_lend_safe(struct folio *folio)
135+
{
136+
long i;
137+
138+
if (folio_mapped(folio) || folio_mmapped(folio))
139+
return false;
140+
141+
for (i = 0; i < folio_nr_pages(folio); i++)
142+
set_direct_map_invalid_noflush(folio_page(folio, i));
143+
/**
144+
* No need to flush tlb on armv8/9: hypervisor will flush when it
145+
* removes from our stage 2
146+
*/
147+
return true;
148+
}
149+
150+
/**
151+
* gunyah_folio_host_reclaim() - Restores kernel logical map to folio
152+
* @folio: folio to reclaim by host
153+
*
154+
* See also gunyah_folio_lend_safe().
155+
*/
156+
static inline void gunyah_folio_host_reclaim(struct folio *folio)
157+
{
158+
long i;
159+
for (i = 0; i < folio_nr_pages(folio); i++)
160+
set_direct_map_default_noflush(folio_page(folio, i));
161+
}
162+
163+
int gunyah_vm_parcel_to_paged(struct gunyah_vm *ghvm,
164+
struct gunyah_rm_mem_parcel *parcel, u64 gfn,
165+
u64 nr);
166+
void gunyah_vm_mm_erase_range(struct gunyah_vm *ghvm, u64 gfn, u64 nr);
167+
int gunyah_vm_reclaim_parcel(struct gunyah_vm *ghvm,
168+
struct gunyah_rm_mem_parcel *parcel, u64 gfn);
169+
int gunyah_vm_provide_folio(struct gunyah_vm *ghvm, struct folio *folio,
170+
u64 gfn, bool share, bool write);
171+
int gunyah_vm_reclaim_folio(struct gunyah_vm *ghvm, u64 gfn, struct folio *folio);
172+
int gunyah_vm_reclaim_range(struct gunyah_vm *ghvm, u64 gfn, u64 nr);
173+
66174
#endif

0 commit comments

Comments
 (0)