@@ -103,19 +103,39 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
103
103
{
104
104
struct ttm_buffer_object * bo = i915_gem_to_ttm (obj );
105
105
unsigned int cache_level ;
106
+ unsigned int mem_flags ;
106
107
unsigned int i ;
108
+ int mem_type ;
109
+
110
+ /*
111
+ * We might have been purged (or swapped out) if the resource is NULL,
112
+ * in which case the SYSTEM placement is the closest match to describe
113
+ * the current domain. If the object is ever used in this state then we
114
+ * will require moving it again.
115
+ */
116
+ if (!bo -> resource ) {
117
+ mem_flags = I915_BO_FLAG_STRUCT_PAGE ;
118
+ mem_type = I915_PL_SYSTEM ;
119
+ cache_level = I915_CACHE_NONE ;
120
+ } else {
121
+ mem_flags = i915_ttm_cpu_maps_iomem (bo -> resource ) ? I915_BO_FLAG_IOMEM :
122
+ I915_BO_FLAG_STRUCT_PAGE ;
123
+ mem_type = bo -> resource -> mem_type ;
124
+ cache_level = i915_ttm_cache_level (to_i915 (bo -> base .dev ), bo -> resource ,
125
+ bo -> ttm );
126
+ }
107
127
108
128
/*
109
129
* If object was moved to an allowable region, update the object
110
130
* region to consider it migrated. Note that if it's currently not
111
131
* in an allowable region, it's evicted and we don't update the
112
132
* object region.
113
133
*/
114
- if (intel_region_to_ttm_type (obj -> mm .region ) != bo -> resource -> mem_type ) {
134
+ if (intel_region_to_ttm_type (obj -> mm .region ) != mem_type ) {
115
135
for (i = 0 ; i < obj -> mm .n_placements ; ++ i ) {
116
136
struct intel_memory_region * mr = obj -> mm .placements [i ];
117
137
118
- if (intel_region_to_ttm_type (mr ) == bo -> resource -> mem_type &&
138
+ if (intel_region_to_ttm_type (mr ) == mem_type &&
119
139
mr != obj -> mm .region ) {
120
140
i915_gem_object_release_memory_region (obj );
121
141
i915_gem_object_init_memory_region (obj , mr );
@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
125
145
}
126
146
127
147
obj -> mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM );
148
+ obj -> mem_flags |= mem_flags ;
128
149
129
- obj -> mem_flags |= i915_ttm_cpu_maps_iomem (bo -> resource ) ? I915_BO_FLAG_IOMEM :
130
- I915_BO_FLAG_STRUCT_PAGE ;
131
-
132
- cache_level = i915_ttm_cache_level (to_i915 (bo -> base .dev ), bo -> resource ,
133
- bo -> ttm );
134
150
i915_gem_object_set_cache_coherency (obj , cache_level );
135
151
}
136
152
@@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
565
581
return 0 ;
566
582
}
567
583
584
+ if (!bo -> resource ) {
585
+ if (dst_mem -> mem_type != TTM_PL_SYSTEM ) {
586
+ hop -> mem_type = TTM_PL_SYSTEM ;
587
+ hop -> flags = TTM_PL_FLAG_TEMPORARY ;
588
+ return - EMULTIHOP ;
589
+ }
590
+
591
+ /*
592
+ * This is only reached when first creating the object, or if
593
+ * the object was purged or swapped out (pipeline-gutting). For
594
+ * the former we can safely skip all of the below since we are
595
+ * only using a dummy SYSTEM placement here. And with the latter
596
+ * we will always re-enter here with bo->resource set correctly
597
+ * (as per the above), since this is part of a multi-hop
598
+ * sequence, where at the end we can do the move for real.
599
+ *
600
+ * The special case here is when the dst_mem is TTM_PL_SYSTEM,
601
+ * which doens't require any kind of move, so it should be safe
602
+ * to skip all the below and call ttm_bo_move_null() here, where
603
+ * the caller in __i915_ttm_get_pages() will take care of the
604
+ * rest, since we should have a valid ttm_tt.
605
+ */
606
+ ttm_bo_move_null (bo , dst_mem );
607
+ return 0 ;
608
+ }
609
+
568
610
ret = i915_ttm_move_notify (bo );
569
611
if (ret )
570
612
return ret ;
0 commit comments