Skip to content

Commit d8524ae

Browse files
committed
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: - some small fixes for msm and exynos - a regression revert affecting nouveau users with old userspace - intel pageflip deadlock and gpu hang fixes, hsw modesetting hangs * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (22 commits) Revert "drm: mark context support as a legacy subsystem" drm/i915: Don't enable the cursor on a disable pipe drm/i915: do not update cursor in crtc mode set drm/exynos: fix return value check in lowlevel_buffer_allocate() drm/exynos: Fix address space warnings in exynos_drm_fbdev.c drm/exynos: Fix address space warning in exynos_drm_buf.c drm/exynos: Remove redundant OF dependency drm/msm: drop unnecessary set_need_resched() drm/i915: kill set_need_resched drm/msm: fix potential NULL pointer dereference drm/i915/dvo: set crtc timings again for panel fixed modes drm/i915/sdvo: Robustify the dtd<->drm_mode conversions drm/msm: workaround for missing irq drm/msm: return -EBUSY if bo still active drm/msm: fix return value check in ERR_PTR() drm/msm: fix cmdstream size check drm/msm: hangcheck harder drm/msm: handle read vs write fences drm/i915/sdvo: Fully translate sync flags in the dtd->mode conversion drm/i915: Use proper print format for debug prints ...
2 parents 68cf8d0 + 6ddf2ed commit d8524ae

23 files changed

+276
-197
lines changed

drivers/gpu/drm/drm_context.c

Lines changed: 8 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@
4242

4343
#include <drm/drmP.h>
4444

45+
/******************************************************************/
46+
/** \name Context bitmap support */
47+
/*@{*/
48+
4549
/**
4650
* Free a handle from the context bitmap.
4751
*
@@ -52,48 +56,13 @@
5256
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
5357
* lock.
5458
*/
55-
static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
59+
void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
5660
{
57-
if (drm_core_check_feature(dev, DRIVER_MODESET))
58-
return;
59-
6061
mutex_lock(&dev->struct_mutex);
6162
idr_remove(&dev->ctx_idr, ctx_handle);
6263
mutex_unlock(&dev->struct_mutex);
6364
}
6465

65-
/******************************************************************/
66-
/** \name Context bitmap support */
67-
/*@{*/
68-
69-
void drm_legacy_ctxbitmap_release(struct drm_device *dev,
70-
struct drm_file *file_priv)
71-
{
72-
if (drm_core_check_feature(dev, DRIVER_MODESET))
73-
return;
74-
75-
mutex_lock(&dev->ctxlist_mutex);
76-
if (!list_empty(&dev->ctxlist)) {
77-
struct drm_ctx_list *pos, *n;
78-
79-
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
80-
if (pos->tag == file_priv &&
81-
pos->handle != DRM_KERNEL_CONTEXT) {
82-
if (dev->driver->context_dtor)
83-
dev->driver->context_dtor(dev,
84-
pos->handle);
85-
86-
drm_ctxbitmap_free(dev, pos->handle);
87-
88-
list_del(&pos->head);
89-
kfree(pos);
90-
--dev->ctx_count;
91-
}
92-
}
93-
}
94-
mutex_unlock(&dev->ctxlist_mutex);
95-
}
96-
9766
/**
9867
* Context bitmap allocation.
9968
*
@@ -121,12 +90,10 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
12190
*
12291
* Initialise the drm_device::ctx_idr
12392
*/
124-
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
93+
int drm_ctxbitmap_init(struct drm_device * dev)
12594
{
126-
if (drm_core_check_feature(dev, DRIVER_MODESET))
127-
return;
128-
12995
idr_init(&dev->ctx_idr);
96+
return 0;
13097
}
13198

13299
/**
@@ -137,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
137104
* Free all idr members using drm_ctx_sarea_free helper function
138105
* while holding the drm_device::struct_mutex lock.
139106
*/
140-
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
107+
void drm_ctxbitmap_cleanup(struct drm_device * dev)
141108
{
142109
mutex_lock(&dev->struct_mutex);
143110
idr_destroy(&dev->ctx_idr);
@@ -169,9 +136,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
169136
struct drm_local_map *map;
170137
struct drm_map_list *_entry;
171138

172-
if (drm_core_check_feature(dev, DRIVER_MODESET))
173-
return -EINVAL;
174-
175139
mutex_lock(&dev->struct_mutex);
176140

177141
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -216,9 +180,6 @@ int drm_setsareactx(struct drm_device *dev, void *data,
216180
struct drm_local_map *map = NULL;
217181
struct drm_map_list *r_list = NULL;
218182

219-
if (drm_core_check_feature(dev, DRIVER_MODESET))
220-
return -EINVAL;
221-
222183
mutex_lock(&dev->struct_mutex);
223184
list_for_each_entry(r_list, &dev->maplist, head) {
224185
if (r_list->map
@@ -319,9 +280,6 @@ int drm_resctx(struct drm_device *dev, void *data,
319280
struct drm_ctx ctx;
320281
int i;
321282

322-
if (drm_core_check_feature(dev, DRIVER_MODESET))
323-
return -EINVAL;
324-
325283
if (res->count >= DRM_RESERVED_CONTEXTS) {
326284
memset(&ctx, 0, sizeof(ctx));
327285
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -352,9 +310,6 @@ int drm_addctx(struct drm_device *dev, void *data,
352310
struct drm_ctx_list *ctx_entry;
353311
struct drm_ctx *ctx = data;
354312

355-
if (drm_core_check_feature(dev, DRIVER_MODESET))
356-
return -EINVAL;
357-
358313
ctx->handle = drm_ctxbitmap_next(dev);
359314
if (ctx->handle == DRM_KERNEL_CONTEXT) {
360315
/* Skip kernel's context and get a new one. */
@@ -398,9 +353,6 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
398353
{
399354
struct drm_ctx *ctx = data;
400355

401-
if (drm_core_check_feature(dev, DRIVER_MODESET))
402-
return -EINVAL;
403-
404356
/* This is 0, because we don't handle any context flags */
405357
ctx->flags = 0;
406358

@@ -423,9 +375,6 @@ int drm_switchctx(struct drm_device *dev, void *data,
423375
{
424376
struct drm_ctx *ctx = data;
425377

426-
if (drm_core_check_feature(dev, DRIVER_MODESET))
427-
return -EINVAL;
428-
429378
DRM_DEBUG("%d\n", ctx->handle);
430379
return drm_context_switch(dev, dev->last_context, ctx->handle);
431380
}
@@ -446,9 +395,6 @@ int drm_newctx(struct drm_device *dev, void *data,
446395
{
447396
struct drm_ctx *ctx = data;
448397

449-
if (drm_core_check_feature(dev, DRIVER_MODESET))
450-
return -EINVAL;
451-
452398
DRM_DEBUG("%d\n", ctx->handle);
453399
drm_context_switch_complete(dev, file_priv, ctx->handle);
454400

@@ -471,9 +417,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
471417
{
472418
struct drm_ctx *ctx = data;
473419

474-
if (drm_core_check_feature(dev, DRIVER_MODESET))
475-
return -EINVAL;
476-
477420
DRM_DEBUG("%d\n", ctx->handle);
478421
if (ctx->handle != DRM_KERNEL_CONTEXT) {
479422
if (dev->driver->context_dtor)

drivers/gpu/drm/drm_fops.c

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,26 @@ int drm_release(struct inode *inode, struct file *filp)
439439
if (dev->driver->driver_features & DRIVER_GEM)
440440
drm_gem_release(dev, file_priv);
441441

442-
drm_legacy_ctxbitmap_release(dev, file_priv);
442+
mutex_lock(&dev->ctxlist_mutex);
443+
if (!list_empty(&dev->ctxlist)) {
444+
struct drm_ctx_list *pos, *n;
445+
446+
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
447+
if (pos->tag == file_priv &&
448+
pos->handle != DRM_KERNEL_CONTEXT) {
449+
if (dev->driver->context_dtor)
450+
dev->driver->context_dtor(dev,
451+
pos->handle);
452+
453+
drm_ctxbitmap_free(dev, pos->handle);
454+
455+
list_del(&pos->head);
456+
kfree(pos);
457+
--dev->ctx_count;
458+
}
459+
}
460+
}
461+
mutex_unlock(&dev->ctxlist_mutex);
443462

444463
mutex_lock(&dev->struct_mutex);
445464

drivers/gpu/drm/drm_stub.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,13 @@ int drm_fill_in_dev(struct drm_device *dev,
292292
goto error_out_unreg;
293293
}
294294

295-
drm_legacy_ctxbitmap_init(dev);
295+
296+
297+
retcode = drm_ctxbitmap_init(dev);
298+
if (retcode) {
299+
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
300+
goto error_out_unreg;
301+
}
296302

297303
if (driver->driver_features & DRIVER_GEM) {
298304
retcode = drm_gem_init(dev);
@@ -446,7 +452,7 @@ void drm_put_dev(struct drm_device *dev)
446452
drm_rmmap(dev, r_list->map);
447453
drm_ht_remove(&dev->map_hash);
448454

449-
drm_legacy_ctxbitmap_cleanup(dev);
455+
drm_ctxbitmap_cleanup(dev);
450456

451457
if (drm_core_check_feature(dev, DRIVER_MODESET))
452458
drm_put_minor(&dev->control);

drivers/gpu/drm/exynos/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ config DRM_EXYNOS_IPP
5656

5757
config DRM_EXYNOS_FIMC
5858
bool "Exynos DRM FIMC"
59-
depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
59+
depends on DRM_EXYNOS_IPP && MFD_SYSCON
6060
help
6161
Choose this option if you want to use Exynos FIMC for DRM.
6262

drivers/gpu/drm/exynos/exynos_drm_buf.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,8 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
6363
return -ENOMEM;
6464
}
6565

66-
buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
66+
buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
67+
buf->size,
6768
&buf->dma_addr, GFP_KERNEL,
6869
&buf->dma_attrs);
6970
if (!buf->kvaddr) {
@@ -90,9 +91,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
9091
}
9192

9293
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
93-
if (!buf->sgt) {
94+
if (IS_ERR(buf->sgt)) {
9495
DRM_ERROR("failed to get sg table.\n");
95-
ret = -ENOMEM;
96+
ret = PTR_ERR(buf->sgt);
9697
goto err_free_attrs;
9798
}
9899

drivers/gpu/drm/exynos/exynos_drm_fbdev.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,12 +99,13 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
9999
if (is_drm_iommu_supported(dev)) {
100100
unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
101101

102-
buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
102+
buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103+
nr_pages, VM_MAP,
103104
pgprot_writecombine(PAGE_KERNEL));
104105
} else {
105106
phys_addr_t dma_addr = buffer->dma_addr;
106107
if (dma_addr)
107-
buffer->kvaddr = phys_to_virt(dma_addr);
108+
buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
108109
else
109110
buffer->kvaddr = (void __iomem *)NULL;
110111
}

drivers/gpu/drm/i915/i915_gem.c

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,14 +1392,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
13921392
if (i915_terminally_wedged(&dev_priv->gpu_error))
13931393
return VM_FAULT_SIGBUS;
13941394
case -EAGAIN:
1395-
/* Give the error handler a chance to run and move the
1396-
* objects off the GPU active list. Next time we service the
1397-
* fault, we should be able to transition the page into the
1398-
* GTT without touching the GPU (and so avoid further
1399-
* EIO/EGAIN). If the GPU is wedged, then there is no issue
1400-
* with coherency, just lost writes.
1395+
/*
1396+
* EAGAIN means the gpu is hung and we'll wait for the error
1397+
* handler to reset everything when re-faulting in
1398+
* i915_mutex_lock_interruptible.
14011399
*/
1402-
set_need_resched();
14031400
case 0:
14041401
case -ERESTARTSYS:
14051402
case -EINTR:

drivers/gpu/drm/i915/i915_irq.c

Lines changed: 54 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1469,6 +1469,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
14691469
return ret;
14701470
}
14711471

1472+
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1473+
bool reset_completed)
1474+
{
1475+
struct intel_ring_buffer *ring;
1476+
int i;
1477+
1478+
/*
1479+
* Notify all waiters for GPU completion events that reset state has
1480+
* been changed, and that they need to restart their wait after
1481+
* checking for potential errors (and bail out to drop locks if there is
1482+
* a gpu reset pending so that i915_error_work_func can acquire them).
1483+
*/
1484+
1485+
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1486+
for_each_ring(ring, dev_priv, i)
1487+
wake_up_all(&ring->irq_queue);
1488+
1489+
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1490+
wake_up_all(&dev_priv->pending_flip_queue);
1491+
1492+
/*
1493+
* Signal tasks blocked in i915_gem_wait_for_error that the pending
1494+
* reset state is cleared.
1495+
*/
1496+
if (reset_completed)
1497+
wake_up_all(&dev_priv->gpu_error.reset_queue);
1498+
}
1499+
14721500
/**
14731501
* i915_error_work_func - do process context error handling work
14741502
* @work: work struct
@@ -1483,11 +1511,10 @@ static void i915_error_work_func(struct work_struct *work)
14831511
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
14841512
gpu_error);
14851513
struct drm_device *dev = dev_priv->dev;
1486-
struct intel_ring_buffer *ring;
14871514
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
14881515
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
14891516
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1490-
int i, ret;
1517+
int ret;
14911518

14921519
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
14931520

@@ -1506,8 +1533,16 @@ static void i915_error_work_func(struct work_struct *work)
15061533
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
15071534
reset_event);
15081535

1536+
/*
1537+
* All state reset _must_ be completed before we update the
1538+
* reset counter, for otherwise waiters might miss the reset
1539+
* pending state and not properly drop locks, resulting in
1540+
* deadlocks with the reset work.
1541+
*/
15091542
ret = i915_reset(dev);
15101543

1544+
intel_display_handle_reset(dev);
1545+
15111546
if (ret == 0) {
15121547
/*
15131548
* After all the gem state is reset, increment the reset
@@ -1528,12 +1563,11 @@ static void i915_error_work_func(struct work_struct *work)
15281563
atomic_set(&error->reset_counter, I915_WEDGED);
15291564
}
15301565

1531-
for_each_ring(ring, dev_priv, i)
1532-
wake_up_all(&ring->irq_queue);
1533-
1534-
intel_display_handle_reset(dev);
1535-
1536-
wake_up_all(&dev_priv->gpu_error.reset_queue);
1566+
/*
1567+
* Note: The wake_up also serves as a memory barrier so that
1568+
* waiters see the update value of the reset counter atomic_t.
1569+
*/
1570+
i915_error_wake_up(dev_priv, true);
15371571
}
15381572
}
15391573

@@ -1642,8 +1676,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
16421676
void i915_handle_error(struct drm_device *dev, bool wedged)
16431677
{
16441678
struct drm_i915_private *dev_priv = dev->dev_private;
1645-
struct intel_ring_buffer *ring;
1646-
int i;
16471679

16481680
i915_capture_error_state(dev);
16491681
i915_report_and_clear_eir(dev);
@@ -1653,11 +1685,19 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
16531685
&dev_priv->gpu_error.reset_counter);
16541686

16551687
/*
1656-
* Wakeup waiting processes so that the reset work item
1657-
* doesn't deadlock trying to grab various locks.
1688+
* Wakeup waiting processes so that the reset work function
1689+
* i915_error_work_func doesn't deadlock trying to grab various
1690+
* locks. By bumping the reset counter first, the woken
1691+
* processes will see a reset in progress and back off,
1692+
* releasing their locks and then wait for the reset completion.
1693+
* We must do this for _all_ gpu waiters that might hold locks
1694+
* that the reset work needs to acquire.
1695+
*
1696+
* Note: The wake_up serves as the required memory barrier to
1697+
* ensure that the waiters see the updated value of the reset
1698+
* counter atomic_t.
16581699
*/
1659-
for_each_ring(ring, dev_priv, i)
1660-
wake_up_all(&ring->irq_queue);
1700+
i915_error_wake_up(dev_priv, false);
16611701
}
16621702

16631703
/*

0 commit comments

Comments
 (0)