Skip to content

Commit bfd616f

Browse files
author
Thomas Zimmermann
committed
Merge tag 'tags/topic/i915-ttm-2021-06-11' into drm-misc-next
drm-misc and drm-intel pull request for topic/i915-ttm: - Convert i915 lmem handling to ttm. - Add a patch to temporarily add a driver_private member to vma_node. - Use this to allow mixed object mmap handling for i915.
2 parents 00f4471 + cf3e3e8 commit bfd616f

24 files changed

+1039
-250
lines changed

drivers/gpu/drm/drm_gem.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1148,15 +1148,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
11481148
return -EACCES;
11491149
}
11501150

1151-
if (node->readonly) {
1152-
if (vma->vm_flags & VM_WRITE) {
1153-
drm_gem_object_put(obj);
1154-
return -EINVAL;
1155-
}
1156-
1157-
vma->vm_flags &= ~VM_MAYWRITE;
1158-
}
1159-
11601151
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
11611152
vma);
11621153

drivers/gpu/drm/i915/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@ gem-y += \
155155
gem/i915_gem_stolen.o \
156156
gem/i915_gem_throttle.o \
157157
gem/i915_gem_tiling.o \
158+
gem/i915_gem_ttm.o \
158159
gem/i915_gem_userptr.o \
159160
gem/i915_gem_wait.o \
160161
gem/i915_gemfs.o

drivers/gpu/drm/i915/display/intel_display.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11771,7 +11771,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
1177111771

1177211772
/* object is backed with LMEM for discrete */
1177311773
i915 = to_i915(obj->base.dev);
11774-
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
11774+
if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
1177511775
/* object is "remote", not in local memory */
1177611776
i915_gem_object_put(obj);
1177711777
return ERR_PTR(-EREMOTE);

drivers/gpu/drm/i915/gem/i915_gem_create.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
8585
return -E2BIG;
8686

8787
/*
88-
* For now resort to CPU based clearing for device local-memory, in the
89-
* near future this will use the blitter engine for accelerated, GPU
90-
* based clearing.
88+
* I915_BO_ALLOC_USER will make sure the object is cleared before
89+
* any user access.
9190
*/
92-
flags = 0;
93-
if (mr->type == INTEL_MEMORY_LOCAL)
94-
flags = I915_BO_ALLOC_CPU_CLEAR;
91+
flags = I915_BO_ALLOC_USER;
9592

9693
ret = mr->ops->init_object(mr, obj, size, flags);
9794
if (ret)

drivers/gpu/drm/i915/gem/i915_gem_lmem.c

Lines changed: 41 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -4,74 +4,10 @@
44
*/
55

66
#include "intel_memory_region.h"
7-
#include "intel_region_ttm.h"
87
#include "gem/i915_gem_region.h"
98
#include "gem/i915_gem_lmem.h"
109
#include "i915_drv.h"
1110

12-
static void lmem_put_pages(struct drm_i915_gem_object *obj,
13-
struct sg_table *pages)
14-
{
15-
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
16-
obj->mm.dirty = false;
17-
sg_free_table(pages);
18-
kfree(pages);
19-
}
20-
21-
static int lmem_get_pages(struct drm_i915_gem_object *obj)
22-
{
23-
unsigned int flags;
24-
struct sg_table *pages;
25-
26-
flags = I915_ALLOC_MIN_PAGE_SIZE;
27-
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
28-
flags |= I915_ALLOC_CONTIGUOUS;
29-
30-
obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
31-
obj->base.size,
32-
flags);
33-
if (IS_ERR(obj->mm.st_mm_node))
34-
return PTR_ERR(obj->mm.st_mm_node);
35-
36-
/* Range manager is always contigous */
37-
if (obj->mm.region->is_range_manager)
38-
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
39-
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
40-
if (IS_ERR(pages)) {
41-
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
42-
return PTR_ERR(pages);
43-
}
44-
45-
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
46-
47-
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
48-
void __iomem *vaddr =
49-
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
50-
51-
if (!vaddr) {
52-
struct sg_table *pages =
53-
__i915_gem_object_unset_pages(obj);
54-
55-
if (!IS_ERR_OR_NULL(pages))
56-
lmem_put_pages(obj, pages);
57-
}
58-
59-
memset_io(vaddr, 0, obj->base.size);
60-
io_mapping_unmap(vaddr);
61-
}
62-
63-
return 0;
64-
}
65-
66-
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
67-
.name = "i915_gem_object_lmem",
68-
.flags = I915_GEM_OBJECT_HAS_IOMEM,
69-
70-
.get_pages = lmem_get_pages,
71-
.put_pages = lmem_put_pages,
72-
.release = i915_gem_object_release_memory_region,
73-
};
74-
7511
void __iomem *
7612
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
7713
unsigned long n,
@@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
8723
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
8824
}
8925

26+
/**
27+
* i915_gem_object_validates_to_lmem - Whether the object is resident in
28+
* lmem when pages are present.
29+
* @obj: The object to check.
30+
*
31+
* Migratable objects residency may change from under us if the object is
32+
* not pinned or locked. This function is intended to be used to check whether
33+
* the object can only reside in lmem when pages are present.
34+
*
35+
* Return: Whether the object is always resident in lmem when pages are
36+
* present.
37+
*/
38+
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
39+
{
40+
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
41+
42+
return !i915_gem_object_migratable(obj) &&
43+
mr && (mr->type == INTEL_MEMORY_LOCAL ||
44+
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
45+
}
46+
47+
/**
48+
* i915_gem_object_is_lmem - Whether the object is resident in
49+
* lmem
50+
* @obj: The object to check.
51+
*
52+
* Even if an object is allowed to migrate and change memory region,
53+
* this function checks whether it will always be present in lmem when
54+
* valid *or* if that's not the case, whether it's currently resident in lmem.
55+
* For migratable and evictable objects, the latter only makes sense when
56+
* the object is locked.
57+
*
58+
* Return: Whether the object migratable but resident in lmem, or not
59+
* migratable and will be present in lmem when valid.
60+
*/
9061
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
9162
{
92-
struct intel_memory_region *mr = obj->mm.region;
63+
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
9364

65+
#ifdef CONFIG_LOCKDEP
66+
if (i915_gem_object_migratable(obj) &&
67+
i915_gem_object_evictable(obj))
68+
assert_object_held(obj);
69+
#endif
9470
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
9571
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
9672
}
@@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
10379
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
10480
size, flags);
10581
}
106-
107-
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
108-
struct drm_i915_gem_object *obj,
109-
resource_size_t size,
110-
unsigned int flags)
111-
{
112-
static struct lock_class_key lock_class;
113-
struct drm_i915_private *i915 = mem->i915;
114-
115-
drm_gem_private_object_init(&i915->drm, &obj->base, size);
116-
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);
117-
118-
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
119-
120-
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
121-
122-
i915_gem_object_init_memory_region(obj, mem);
123-
124-
return 0;
125-
}

drivers/gpu/drm/i915/gem/i915_gem_lmem.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
2626
resource_size_t size,
2727
unsigned int flags);
2828

29-
int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
30-
struct drm_i915_gem_object *obj,
31-
resource_size_t size,
32-
unsigned int flags);
33-
3429
#endif /* !__I915_GEM_LMEM_H */

drivers/gpu/drm/i915/gem/i915_gem_mman.c

Lines changed: 57 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include "i915_gem_mman.h"
2020
#include "i915_trace.h"
2121
#include "i915_user_extensions.h"
22+
#include "i915_gem_ttm.h"
2223
#include "i915_vma.h"
2324

2425
static inline bool
@@ -623,6 +624,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
623624
struct i915_mmap_offset *mmo;
624625
int err;
625626

627+
GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
628+
626629
mmo = lookup_mmo(obj, mmap_type);
627630
if (mmo)
628631
goto out;
@@ -665,40 +668,47 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
665668
}
666669

667670
static int
668-
__assign_mmap_offset(struct drm_file *file,
669-
u32 handle,
671+
__assign_mmap_offset(struct drm_i915_gem_object *obj,
670672
enum i915_mmap_type mmap_type,
671-
u64 *offset)
673+
u64 *offset, struct drm_file *file)
672674
{
673-
struct drm_i915_gem_object *obj;
674675
struct i915_mmap_offset *mmo;
675-
int err;
676676

677-
obj = i915_gem_object_lookup(file, handle);
678-
if (!obj)
679-
return -ENOENT;
677+
if (i915_gem_object_never_mmap(obj))
678+
return -ENODEV;
680679

681-
if (i915_gem_object_never_mmap(obj)) {
682-
err = -ENODEV;
683-
goto out;
680+
if (obj->ops->mmap_offset) {
681+
*offset = obj->ops->mmap_offset(obj);
682+
return 0;
684683
}
685684

686685
if (mmap_type != I915_MMAP_TYPE_GTT &&
687686
!i915_gem_object_has_struct_page(obj) &&
688-
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) {
689-
err = -ENODEV;
690-
goto out;
691-
}
687+
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
688+
return -ENODEV;
692689

693690
mmo = mmap_offset_attach(obj, mmap_type, file);
694-
if (IS_ERR(mmo)) {
695-
err = PTR_ERR(mmo);
696-
goto out;
697-
}
691+
if (IS_ERR(mmo))
692+
return PTR_ERR(mmo);
698693

699694
*offset = drm_vma_node_offset_addr(&mmo->vma_node);
700-
err = 0;
701-
out:
695+
return 0;
696+
}
697+
698+
static int
699+
__assign_mmap_offset_handle(struct drm_file *file,
700+
u32 handle,
701+
enum i915_mmap_type mmap_type,
702+
u64 *offset)
703+
{
704+
struct drm_i915_gem_object *obj;
705+
int err;
706+
707+
obj = i915_gem_object_lookup(file, handle);
708+
if (!obj)
709+
return -ENOENT;
710+
711+
err = __assign_mmap_offset(obj, mmap_type, offset, file);
702712
i915_gem_object_put(obj);
703713
return err;
704714
}
@@ -718,7 +728,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
718728
else
719729
mmap_type = I915_MMAP_TYPE_GTT;
720730

721-
return __assign_mmap_offset(file, handle, mmap_type, offset);
731+
return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
722732
}
723733

724734
/**
@@ -786,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
786796
return -EINVAL;
787797
}
788798

789-
return __assign_mmap_offset(file, args->handle, type, &args->offset);
799+
return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
790800
}
791801

792802
static void vm_open(struct vm_area_struct *vma)
@@ -890,8 +900,18 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
890900
* destroyed and will be invalid when the vma manager lock
891901
* is released.
892902
*/
893-
mmo = container_of(node, struct i915_mmap_offset, vma_node);
894-
obj = i915_gem_object_get_rcu(mmo->obj);
903+
if (!node->driver_private) {
904+
mmo = container_of(node, struct i915_mmap_offset, vma_node);
905+
obj = i915_gem_object_get_rcu(mmo->obj);
906+
907+
GEM_BUG_ON(obj && obj->ops->mmap_ops);
908+
} else {
909+
obj = i915_gem_object_get_rcu
910+
(container_of(node, struct drm_i915_gem_object,
911+
base.vma_node));
912+
913+
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
914+
}
895915
}
896916
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
897917
rcu_read_unlock();
@@ -913,7 +933,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
913933
}
914934

915935
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
916-
vma->vm_private_data = mmo;
936+
937+
if (i915_gem_object_has_iomem(obj))
938+
vma->vm_flags |= VM_IO;
917939

918940
/*
919941
* We keep the ref on mmo->obj, not vm_file, but we require
@@ -927,6 +949,15 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
927949
/* Drop the initial creation reference, the vma is now holding one. */
928950
fput(anon);
929951

952+
if (obj->ops->mmap_ops) {
953+
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
954+
vma->vm_ops = obj->ops->mmap_ops;
955+
vma->vm_private_data = node->driver_private;
956+
return 0;
957+
}
958+
959+
vma->vm_private_data = mmo;
960+
930961
switch (mmo->mmap_type) {
931962
case I915_MMAP_TYPE_WC:
932963
vma->vm_page_prot =

0 commit comments

Comments
 (0)