7
7
#include "vmwgfx_drv.h"
8
8
#include <drm/ttm/ttm_bo_driver.h>
9
9
#include <drm/ttm/ttm_placement.h>
10
+ #include <drm/ttm/ttm_range_manager.h>
10
11
11
12
/**
12
13
* struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -54,16 +55,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
54
55
{
55
56
struct vmw_thp_manager * rman = to_thp_manager (man );
56
57
struct drm_mm * mm = & rman -> mm ;
57
- struct drm_mm_node * node ;
58
+ struct ttm_range_mgr_node * node ;
58
59
unsigned long align_pages ;
59
60
unsigned long lpfn ;
60
61
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST ;
61
62
int ret ;
62
63
63
- node = kzalloc (sizeof ( * node ), GFP_KERNEL );
64
+ node = kzalloc (struct_size ( node , mm_nodes , 1 ), GFP_KERNEL );
64
65
if (!node )
65
66
return - ENOMEM ;
66
67
68
+ ttm_resource_init (bo , place , & node -> base );
69
+
67
70
lpfn = place -> lpfn ;
68
71
if (!lpfn )
69
72
lpfn = man -> size ;
@@ -76,32 +79,34 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
76
79
if (IS_ENABLED (CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD )) {
77
80
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT );
78
81
if (mem -> num_pages >= align_pages ) {
79
- ret = vmw_thp_insert_aligned (bo , mm , node , align_pages ,
80
- place , mem , lpfn , mode );
82
+ ret = vmw_thp_insert_aligned (bo , mm , & node -> mm_nodes [0 ],
83
+ align_pages , place , mem ,
84
+ lpfn , mode );
81
85
if (!ret )
82
86
goto found_unlock ;
83
87
}
84
88
}
85
89
86
90
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT );
87
91
if (mem -> num_pages >= align_pages ) {
88
- ret = vmw_thp_insert_aligned (bo , mm , node , align_pages , place ,
89
- mem , lpfn , mode );
92
+ ret = vmw_thp_insert_aligned (bo , mm , & node -> mm_nodes [0 ],
93
+ align_pages , place , mem , lpfn ,
94
+ mode );
90
95
if (!ret )
91
96
goto found_unlock ;
92
97
}
93
98
94
- ret = drm_mm_insert_node_in_range (mm , node , mem -> num_pages ,
95
- bo -> page_alignment , 0 ,
99
+ ret = drm_mm_insert_node_in_range (mm , & node -> mm_nodes [ 0 ] ,
100
+ mem -> num_pages , bo -> page_alignment , 0 ,
96
101
place -> fpfn , lpfn , mode );
97
102
found_unlock :
98
103
spin_unlock (& rman -> lock );
99
104
100
105
if (unlikely (ret )) {
101
106
kfree (node );
102
107
} else {
103
- mem -> mm_node = node ;
104
- mem -> start = node -> start ;
108
+ mem -> mm_node = & node -> mm_nodes [ 0 ] ;
109
+ mem -> start = node -> mm_nodes [ 0 ]. start ;
105
110
}
106
111
107
112
return ret ;
@@ -113,15 +118,13 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man,
113
118
struct ttm_resource * mem )
114
119
{
115
120
struct vmw_thp_manager * rman = to_thp_manager (man );
121
+ struct ttm_range_mgr_node * node = mem -> mm_node ;
116
122
117
- if (mem -> mm_node ) {
118
- spin_lock (& rman -> lock );
119
- drm_mm_remove_node (mem -> mm_node );
120
- spin_unlock (& rman -> lock );
123
+ spin_lock (& rman -> lock );
124
+ drm_mm_remove_node (& node -> mm_nodes [0 ]);
125
+ spin_unlock (& rman -> lock );
121
126
122
- kfree (mem -> mm_node );
123
- mem -> mm_node = NULL ;
124
- }
127
+ kfree (node );
125
128
}
126
129
127
130
int vmw_thp_init (struct vmw_private * dev_priv )
0 commit comments