|
27 | 27 |
|
28 | 28 | #include "virtgpu_drv.h"
|
29 | 29 |
|
| 30 | +MODULE_IMPORT_NS(DMA_BUF); |
| 31 | + |
30 | 32 | static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
|
31 | 33 | uuid_t *uuid)
|
32 | 34 | {
|
@@ -142,6 +144,46 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
|
142 | 144 | return buf;
|
143 | 145 | }
|
144 | 146 |
|
| 147 | +int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents, |
| 148 | + unsigned int *nents, |
| 149 | + struct virtio_gpu_object *bo, |
| 150 | + struct dma_buf_attachment *attach) |
| 151 | +{ |
| 152 | + struct scatterlist *sl; |
| 153 | + struct sg_table *sgt; |
| 154 | + long i, ret; |
| 155 | + |
| 156 | + dma_resv_assert_held(attach->dmabuf->resv); |
| 157 | + |
| 158 | + ret = dma_resv_wait_timeout(attach->dmabuf->resv, |
| 159 | + DMA_RESV_USAGE_KERNEL, |
| 160 | + false, MAX_SCHEDULE_TIMEOUT); |
| 161 | + if (ret <= 0) |
| 162 | + return ret < 0 ? ret : -ETIMEDOUT; |
| 163 | + |
| 164 | + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
| 165 | + if (IS_ERR(sgt)) |
| 166 | + return PTR_ERR(sgt); |
| 167 | + |
| 168 | + *ents = kvmalloc_array(sgt->nents, |
| 169 | + sizeof(struct virtio_gpu_mem_entry), |
| 170 | + GFP_KERNEL); |
| 171 | + if (!(*ents)) { |
| 172 | + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); |
| 173 | + return -ENOMEM; |
| 174 | + } |
| 175 | + |
| 176 | + *nents = sgt->nents; |
| 177 | + for_each_sgtable_dma_sg(sgt, sl, i) { |
| 178 | + (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl)); |
| 179 | + (*ents)[i].length = cpu_to_le32(sg_dma_len(sl)); |
| 180 | + (*ents)[i].padding = 0; |
| 181 | + } |
| 182 | + |
| 183 | + bo->sgt = sgt; |
| 184 | + return 0; |
| 185 | +} |
| 186 | + |
145 | 187 | struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
|
146 | 188 | struct dma_buf *buf)
|
147 | 189 | {
|
|
0 commit comments