Skip to content

Update uVisor with new page allocator #2558

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 27, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions features/FEATURE_UVISOR/AUTHORS.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
552 Milosch Meriac
457 Alessandro Angelino
553 Milosch Meriac
458 Alessandro Angelino
42 Niklas Hauser
40 Jaeden Amero
31 Niklas Hauser
3 Hugo Vincent
3 JaredCJR
3 Jim Huang
Expand Down
2 changes: 1 addition & 1 deletion features/FEATURE_UVISOR/VERSION.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v0.20.1-alpha
v0.21.0-alpha
9 changes: 9 additions & 0 deletions features/FEATURE_UVISOR/includes/uvisor/api/inc/box_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#define __UVISOR_API_BOX_CONFIG_H__

#include "api/inc/uvisor_exports.h"
#include "api/inc/page_allocator_exports.h"
#include <stddef.h>
#include <stdint.h>

Expand Down Expand Up @@ -53,6 +54,14 @@ UVISOR_EXTERN const uint32_t __uvisor_mode;
\
extern const __attribute__((section(".keep.uvisor.cfgtbl_ptr_first"), aligned(4))) void * const main_cfg_ptr = &main_cfg;

/* Creates a global page heap with at least `minimum_number_of_pages` each of size `page_size` in bytes.
* The total page heap size is at least `minimum_number_of_pages * page_size`. */
#define UVISOR_SET_PAGE_HEAP(page_size, minimum_number_of_pages) \
const uint32_t __uvisor_page_size = (page_size); \
uint8_t __attribute__((section(".keep.uvisor.page_heap"))) \
main_page_heap_reserved[ (page_size) * (minimum_number_of_pages) ]


/* this macro selects an overloaded macro (variable number of arguments) */
#define __UVISOR_BOX_MACRO(_1, _2, _3, _4, NAME, ...) NAME

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,7 @@ UVISOR_EXTERN int uvisor_page_malloc(UvisorPageTable * const table);
*/
UVISOR_EXTERN int uvisor_page_free(const UvisorPageTable * const table);

/* @returns the active page size for one page. */
UVISOR_EXTERN uint32_t uvisor_get_page_size(void);

#endif /* __UVISOR_API_PAGE_ALLOCATOR_H__ */
Original file line number Diff line number Diff line change
Expand Up @@ -30,27 +30,9 @@
#define UVISOR_ERROR_PAGE_INVALID_PAGE_OWNER (UVISOR_ERROR_CLASS_PAGE + 5)
#define UVISOR_ERROR_PAGE_INVALID_PAGE_COUNT (UVISOR_ERROR_CLASS_PAGE + 6)


/* Must be a power of 2 for MPU alignment in ARMv7-M with ARM MPU.
* Must be multiple of 32 for K64F MPU. */
#ifndef UVISOR_PAGE_SIZE
#define UVISOR_PAGE_SIZE ((uint32_t) 16 * 1024)
#endif

/* Return the rounded up number of pages required to hold `size`. */
#define UVISOR_PAGES_FOR_SIZE(size) ((size + UVISOR_PAGE_SIZE - 1) / UVISOR_PAGE_SIZE)

/* Create a page table with `count` many entries. */
#define UVISOR_PAGE_TABLE(count) \
struct { \
uint32_t page_size; \
uint32_t page_count; \
void * page_origins[count]; \
}

/* Create a page table with enough pages to hold `size`. */
#define UVISOR_PAGE_TABLE_FOR_SIZE(size) UVISOR_PAGE_TABLE(UVISOR_PAGES_FOR_SIZE(size))

/* Contains the uVisor page size.
* @warning Do not read directly, instead use `uvisor_get_page_size()` accessor! */
UVISOR_EXTERN const uint32_t __uvisor_page_size;

typedef struct {
uint32_t page_size; /* The page size in bytes. Must be multiple of `UVISOR_PAGE_SIZE`! */
Expand Down
59 changes: 42 additions & 17 deletions features/FEATURE_UVISOR/source/page_allocator.c_inc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,10 @@

#include "page_allocator_config.h"

/* Maps the page to the owning box handle. */
page_owner_t g_page_owner_table[UVISOR_PAGE_TABLE_MAX_COUNT];
/* Contains the page usage mapped by owner. */
uint32_t g_page_owner_map[UVISOR_MAX_BOXES][UVISOR_PAGE_MAP_COUNT];
/* Contains total page usage. */
uint32_t g_page_usage_map[UVISOR_PAGE_MAP_COUNT];
/* Contains the configured page size. */
uint32_t g_page_size;
/* Points to the beginning of the page heap. */
Expand Down Expand Up @@ -119,12 +121,16 @@ void page_allocator_init(void * const heap_start, void * const heap_end, const u
g_page_count_total = ((uint32_t) heap_end - start) / g_page_size;
}
/* Clamp page count to table size. */
if (g_page_count_total > UVISOR_PAGE_TABLE_MAX_COUNT) {
g_page_count_total = UVISOR_PAGE_TABLE_MAX_COUNT;
if (g_page_count_total > UVISOR_PAGE_MAX_COUNT) {
DPRINTF("uvisor_page_init: Clamping available page count from %u to %u!\n", g_page_count_total, UVISOR_PAGE_MAX_COUNT);
/* Move the heap start address forward so that the last clamped page is located nearest to the heap end. */
g_page_heap_start += (g_page_count_total - UVISOR_PAGE_MAX_COUNT) * g_page_size;
/* Clamp the page count. */
g_page_count_total = UVISOR_PAGE_MAX_COUNT;
}
g_page_count_free = g_page_count_total;
/* Remember the end of the heap. */
g_page_heap_end = g_page_heap_start + g_page_count_free * g_page_size;
g_page_heap_end = g_page_heap_start + g_page_count_total * g_page_size;

DPRINTF("uvisor_page_init:\n.page_heap start 0x%08x\n.page_heap end 0x%08x\n.page_heap available %ukB split into %u pages of %ukB\n\n",
(unsigned int) g_page_heap_start,
Expand All @@ -133,11 +139,9 @@ void page_allocator_init(void * const heap_start, void * const heap_end, const u
(unsigned int) g_page_count_total,
(unsigned int) (g_page_size / 1024));

uint32_t page = 0;
for (; page < UVISOR_PAGE_TABLE_MAX_COUNT; page++) {
g_page_owner_table[page] = UVISOR_PAGE_UNUSED;
page_allocator_reset_faults(page);
}
/* Force a reset of owner and usage page maps. */
memset(g_page_owner_map, 0, sizeof(g_page_owner_map));
memset(g_page_usage_map, 0, sizeof(g_page_usage_map));
}

int page_allocator_malloc(UvisorPageTable * const table)
Expand Down Expand Up @@ -176,10 +180,20 @@ int page_allocator_malloc(UvisorPageTable * const table)
/* Iterate through the page table and find the empty pages. */
uint32_t page = 0;
for (; (page < g_page_count_total) && pages_required; page++) {
/* If the page is unused, it's entry is UVISOR_PAGE_UNUSED (not NULL!). */
if (g_page_owner_table[page] == UVISOR_PAGE_UNUSED) {
/* Marry this page to the box id. */
g_page_owner_table[page] = box_id;
/* If the page is unused, map_get returns zero. */
if (!page_allocator_map_get(g_page_usage_map, page)) {
/* Remember this page as used. */
page_allocator_map_set(g_page_usage_map, page);
/* Pages of box 0 are accessible to all other boxes! */
if (box_id == 0) {
uint32_t ii = 0;
for (; ii < UVISOR_MAX_BOXES; ii++) {
page_allocator_map_set(g_page_owner_map[ii], page);
}
} else {
/* Otherwise, remember ownership only for active box. */
page_allocator_map_set(g_page_owner_map[box_id], page);
}
/* Reset the fault count for this page. */
page_allocator_reset_faults(page);
/* Get the pointer to the page. */
Expand Down Expand Up @@ -243,14 +257,25 @@ int page_allocator_free(const UvisorPageTable * const table)
return UVISOR_ERROR_PAGE_INVALID_PAGE_ORIGIN;
}
/* Check if the page belongs to the caller. */
if (g_page_owner_table[page_index] == box_id) {
g_page_owner_table[page_index] = UVISOR_PAGE_UNUSED;
if (page_allocator_map_get(g_page_owner_map[box_id], page_index)) {
/* Clear the owner and usage page maps for this page. */
page_allocator_map_clear(g_page_usage_map, page_index);
/* If the page was owned by box 0, we need to remove it from all other boxes! */
if (box_id == 0) {
uint32_t ii = 0;
for (; ii < UVISOR_MAX_BOXES; ii++) {
page_allocator_map_clear(g_page_owner_map[ii], page_index);
}
} else {
/* Otherwise, only remove for the active box. */
page_allocator_map_clear(g_page_owner_map[box_id], page_index);
}
g_page_count_free++;
DPRINTF("uvisor_page_free: Freeing page at index %u\n", page_index);
}
else {
/* Abort if the page doesn't belong to the caller. */
if (g_page_owner_table[page_index] == UVISOR_PAGE_UNUSED) {
if (!page_allocator_map_get(g_page_usage_map, page_index)) {
DPRINTF("uvisor_page_free: FAIL: Page %u is not allocated!\n\n", page_index);
} else {
DPRINTF("uvisor_page_free: FAIL: Page %u is not owned by box %u!\n\n", page_index, box_id);
Expand Down
43 changes: 40 additions & 3 deletions features/FEATURE_UVISOR/source/page_allocator_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,58 @@
* a relatively low limit to the number of pages.
* By default a maximum of 16 pages are allowed. This can only be overwritten
* by the porting engineer for the current platform. */
#ifndef UVISOR_PAGE_TABLE_MAX_COUNT
#define UVISOR_PAGE_TABLE_MAX_COUNT ((uint32_t) 16)
#ifndef UVISOR_PAGE_MAX_COUNT
#define UVISOR_PAGE_MAX_COUNT (16UL)
#endif
/* The number of pages is decided by the page size. A small page size leads to
* a lot of pages, however, number of pages is capped for efficiency.
* Furthermore, when allocating large continous memory, a too small page size
* will lead to allocation failures. This can only be overwritten
* by the porting engineer for the current platform. */
#ifndef UVISOR_PAGE_SIZE_MINIMUM
#define UVISOR_PAGE_SIZE_MINIMUM ((uint32_t) 1024)
#define UVISOR_PAGE_SIZE_MINIMUM (1024UL)
#endif

/* Defines the number of uint32_t page owner masks in the owner map. */
#define UVISOR_PAGE_MAP_COUNT ((UVISOR_PAGE_MAX_COUNT + 31) / 32)

/* The page box_id is the box id which is 8-bit large. */
typedef uint8_t page_owner_t;
/* Define a unused value for the page table. */
#define UVISOR_PAGE_UNUSED ((page_owner_t) (-1))
/* Contains the total number of available pages. */
extern uint8_t g_page_count_total;

/** Sets the page bit in the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
*/
static inline void page_allocator_map_set(uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
map[page / 32] |= (1UL << (page % 32));
}

/** Clears the page bit in the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
*/
static inline void page_allocator_map_clear(uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
map[page / 32] &= ~(1UL << (page % 32));
}

/** Check if the page bit is set int the page map array.
* @param map an array of `uint32_t` containing the page map
* @param page the index of the page to be set
* @retval 0 if page bit is not set
* @retval 1 if page bit is set
*/
static inline int page_allocator_map_get(const uint32_t * const map, uint8_t page)
{
page += UVISOR_PAGE_MAP_COUNT * 32 - g_page_count_total;
return (map[page / 32] >> (page % 32)) & 0x1;
}

#endif /* __PAGE_ALLOCATOR_CONFIG_H__ */
13 changes: 5 additions & 8 deletions features/FEATURE_UVISOR/source/rtx/secure_allocator.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,6 @@
/* offsetof is a gcc built-in function, this is the manual implementation */
#define OFFSETOF(type, member) ((uint32_t) (&(((type *)(0))->member)))

/* Declare this variable here, so the tier-2 allocator _always_ uses the
* page size that the tier-1 allocator expects! */
const uint32_t __uvisor_page_size = UVISOR_PAGE_SIZE;

/* Internal structure currently only contains the page table. */
typedef struct {
UvisorPageTable table;
Expand Down Expand Up @@ -71,10 +67,11 @@ SecureAllocator secure_allocator_create_with_pages(
size_t size,
size_t maximum_malloc_size)
{
const uint32_t page_size = uvisor_get_page_size();
/* The rt_Memory allocator puts one MEMP structure at both the
* beginning and end of the memory pool. */
const size_t block_overhead = 2 * sizeof(MEMP);
const size_t page_size_with_overhead = UVISOR_PAGE_SIZE + block_overhead;
const size_t page_size_with_overhead = page_size + block_overhead;
/* Calculate the integer part of required the page count. */
size_t page_count = size / page_size_with_overhead;
/* Add another page if the remainder is not zero. */
Expand All @@ -84,7 +81,7 @@ SecureAllocator secure_allocator_create_with_pages(
DPRINTF("secure_allocator_create_with_pages: Requesting %u pages for at least %uB\n", page_count, size);

/* Compute the maximum allocation within our blocks. */
size_t maximum_allocation_size = UVISOR_PAGE_SIZE - block_overhead;
size_t maximum_allocation_size = page_size - block_overhead;
/* If the required maximum allocation is larger than we can provide, abort. */
if (maximum_malloc_size > maximum_allocation_size) {
DPRINTF("secure_allocator_create_with_pages: Maximum allocation request %uB is larger then available %uB\n\n", maximum_malloc_size, maximum_allocation_size);
Expand All @@ -104,7 +101,7 @@ SecureAllocator secure_allocator_create_with_pages(
}

/* Prepare the page table. */
allocator->table.page_size = UVISOR_PAGE_SIZE;
allocator->table.page_size = page_size;
allocator->table.page_count = page_count;
/* Get me some pages. */
if (uvisor_page_malloc((UvisorPageTable *) &(allocator->table))) {
Expand All @@ -116,7 +113,7 @@ SecureAllocator secure_allocator_create_with_pages(
/* Initialize a MEMP structure in all pages. */
for(size_t ii = 0; ii < page_count; ii++) {
/* Add each page as a pool. */
rt_init_mem(allocator->table.page_origins[ii], UVISOR_PAGE_SIZE);
rt_init_mem(allocator->table.page_origins[ii], page_size);
DPRINTF("secure_allocator_create_with_pages: Created MEMP allocator %p with offset %d\n", allocator->table.page_origins[ii], 0);
}
DPRINTF("\n");
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,16 @@ SECTIONS
__uvisor_bss_end = .;
} > m_data

/* Heap space for the page allocator */
.page_heap (NOLOAD) :
{
. = ALIGN(32);
__uvisor_page_start = .;
KEEP(*(.keep.uvisor.page_heap))
. = ALIGN(32);
__uvisor_page_end = .;
} > m_data_2

__VECTOR_RAM = DEFINED(__ram_vector_table__) ? __VECTOR_RAM__ : ORIGIN(m_interrupts);
__RAM_VECTOR_TABLE_SIZE_BYTES = DEFINED(__ram_vector_table__) ? (__interrupts_ram_end__ - __interrupts_ram_start__) : 0x0;

Expand Down Expand Up @@ -325,21 +335,6 @@ SECTIONS
__uvisor_heap_end = .;
} > m_data_2

.stack :
{
. = ALIGN(8);
. += STACK_SIZE;
__StackTop = .;
} > m_data_2

/* Heap space for the page allocator */
.page_heap (NOLOAD) :
{
__uvisor_page_start = .;
. = ORIGIN(m_data_2) + LENGTH(m_data_2) - 4;
__uvisor_page_end = .;
} > m_data_2

m_usb_bdt USB_RAM_START (NOLOAD) :
{
*(m_usb_bdt)
Expand All @@ -352,6 +347,7 @@ SECTIONS
}

/* Initializes stack on the end of block */
__StackTop = ORIGIN(m_data_2) + LENGTH(m_data_2);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like the stack was moved to the end of RAM. This should increase the heap size so +1.

Also, the GCC specific tweaks can now be removed in RTX_CM_Lib.h:

#if defined(__GNUC__) && !defined(__CC_ARM)     /* GCC */
extern uint32_t __StackTop[];
#define INITIAL_SP            (__StackTop)
#else
#define INITIAL_SP            (0x20030000UL)
#endif

__StackLimit = __StackTop - STACK_SIZE;
PROVIDE(__stack = __StackTop);

Expand All @@ -365,4 +361,3 @@ SECTIONS
__uvisor_sram_start = ORIGIN(m_data);
__uvisor_sram_end = ORIGIN(m_data_2) + LENGTH(m_data_2);
}

6 changes: 1 addition & 5 deletions rtos/rtx/TARGET_CORTEX_M/RTX_CM_lib.h
Original file line number Diff line number Diff line change
Expand Up @@ -421,12 +421,8 @@ osThreadDef_t os_thread_def_main = {(os_pthread)pre_main, osPriorityNormal, 1U,
#define INITIAL_SP (0x20003000UL)

#elif defined(TARGET_K64F)
#if defined(__GNUC__) && !defined(__CC_ARM) /* GCC */
extern uint32_t __StackTop[];
#define INITIAL_SP (__StackTop)
#else
#define INITIAL_SP (0x20030000UL)
#endif

#if defined(__CC_ARM) || defined(__GNUC__)
#define ISR_STACK_SIZE (0x1000)
#endif
Expand Down