40
40
#include <drm/ttm/ttm_execbuf_util.h>
41
41
#include <drm/ttm/ttm_module.h>
42
42
#include "vmwgfx_fence.h"
43
+ #include <linux/sync_file.h>
43
44
44
- #define VMWGFX_DRIVER_DATE "20170607"
45
+ #define VMWGFX_DRIVER_NAME "vmwgfx"
46
+ #define VMWGFX_DRIVER_DATE "20170612"
45
47
#define VMWGFX_DRIVER_MAJOR 2
46
- #define VMWGFX_DRIVER_MINOR 13
48
+ #define VMWGFX_DRIVER_MINOR 14
47
49
#define VMWGFX_DRIVER_PATCHLEVEL 0
48
50
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49
51
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -351,6 +353,12 @@ struct vmw_otable_batch {
351
353
struct ttm_buffer_object * otable_bo ;
352
354
};
353
355
356
+ enum {
357
+ VMW_IRQTHREAD_FENCE ,
358
+ VMW_IRQTHREAD_CMDBUF ,
359
+ VMW_IRQTHREAD_MAX
360
+ };
361
+
354
362
struct vmw_private {
355
363
struct ttm_bo_device bdev ;
356
364
struct ttm_bo_global_ref bo_global_ref ;
@@ -529,6 +537,7 @@ struct vmw_private {
529
537
struct vmw_otable_batch otable_batch ;
530
538
531
539
struct vmw_cmdbuf_man * cman ;
540
+ DECLARE_BITMAP (irqthread_pending , VMW_IRQTHREAD_MAX );
532
541
};
533
542
534
543
static inline struct vmw_surface * vmw_res_to_srf (struct vmw_resource * res )
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
561
570
static inline void vmw_write (struct vmw_private * dev_priv ,
562
571
unsigned int offset , uint32_t value )
563
572
{
564
- unsigned long irq_flags ;
565
-
566
- spin_lock_irqsave (& dev_priv -> hw_lock , irq_flags );
573
+ spin_lock (& dev_priv -> hw_lock );
567
574
outl (offset , dev_priv -> io_start + VMWGFX_INDEX_PORT );
568
575
outl (value , dev_priv -> io_start + VMWGFX_VALUE_PORT );
569
- spin_unlock_irqrestore (& dev_priv -> hw_lock , irq_flags );
576
+ spin_unlock (& dev_priv -> hw_lock );
570
577
}
571
578
572
579
static inline uint32_t vmw_read (struct vmw_private * dev_priv ,
573
580
unsigned int offset )
574
581
{
575
- unsigned long irq_flags ;
576
582
u32 val ;
577
583
578
- spin_lock_irqsave (& dev_priv -> hw_lock , irq_flags );
584
+ spin_lock (& dev_priv -> hw_lock );
579
585
outl (offset , dev_priv -> io_start + VMWGFX_INDEX_PORT );
580
586
val = inl (dev_priv -> io_start + VMWGFX_VALUE_PORT );
581
- spin_unlock_irqrestore (& dev_priv -> hw_lock , irq_flags );
587
+ spin_unlock (& dev_priv -> hw_lock );
582
588
583
589
return val ;
584
590
}
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
821
827
uint32_t dx_context_handle ,
822
828
struct drm_vmw_fence_rep __user
823
829
* user_fence_rep ,
824
- struct vmw_fence_obj * * out_fence );
830
+ struct vmw_fence_obj * * out_fence ,
831
+ uint32_t flags );
825
832
extern void __vmw_execbuf_release_pinned_bo (struct vmw_private * dev_priv ,
826
833
struct vmw_fence_obj * fence );
827
834
extern void vmw_execbuf_release_pinned_bo (struct vmw_private * dev_priv );
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
836
843
struct drm_vmw_fence_rep __user
837
844
* user_fence_rep ,
838
845
struct vmw_fence_obj * fence ,
839
- uint32_t fence_handle );
846
+ uint32_t fence_handle ,
847
+ int32_t out_fence_fd ,
848
+ struct sync_file * sync_file );
840
849
extern int vmw_validate_single_buffer (struct vmw_private * dev_priv ,
841
850
struct ttm_buffer_object * bo ,
842
851
bool interruptible ,
843
852
bool validate_as_mob );
844
-
853
+ bool vmw_cmd_describe ( const void * buf , u32 * size , char const * * cmd );
845
854
846
855
/**
847
856
* IRQs and wating - vmwgfx_irq.c
848
857
*/
849
858
850
- extern irqreturn_t vmw_irq_handler (int irq , void * arg );
851
859
extern int vmw_wait_seqno (struct vmw_private * dev_priv , bool lazy ,
852
860
uint32_t seqno , bool interruptible ,
853
861
unsigned long timeout );
854
- extern void vmw_irq_preinstall (struct drm_device * dev );
855
- extern int vmw_irq_postinstall (struct drm_device * dev );
862
+ extern int vmw_irq_install (struct drm_device * dev , int irq );
856
863
extern void vmw_irq_uninstall (struct drm_device * dev );
857
864
extern bool vmw_seqno_passed (struct vmw_private * dev_priv ,
858
865
uint32_t seqno );
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1150
1157
extern void vmw_cmdbuf_commit (struct vmw_cmdbuf_man * man , size_t size ,
1151
1158
struct vmw_cmdbuf_header * header ,
1152
1159
bool flush );
1153
- extern void vmw_cmdbuf_tasklet_schedule (struct vmw_cmdbuf_man * man );
1154
1160
extern void * vmw_cmdbuf_alloc (struct vmw_cmdbuf_man * man ,
1155
1161
size_t size , bool interruptible ,
1156
1162
struct vmw_cmdbuf_header * * p_header );
1157
1163
extern void vmw_cmdbuf_header_free (struct vmw_cmdbuf_header * header );
1158
1164
extern int vmw_cmdbuf_cur_flush (struct vmw_cmdbuf_man * man ,
1159
1165
bool interruptible );
1166
+ extern void vmw_cmdbuf_irqthread (struct vmw_cmdbuf_man * man );
1160
1167
1161
1168
1162
1169
/**
0 commit comments