@@ -3175,4 +3175,80 @@ int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
3175
3175
}
3176
3176
EXPORT_SYMBOL_GPL (virtqueue_dma_mapping_error );
3177
3177
3178
+ /**
3179
+ * virtqueue_dma_need_sync - check a dma address needs sync
3180
+ * @_vq: the struct virtqueue we're talking about.
3181
+ * @addr: DMA address
3182
+ *
3183
+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
3184
+ * synchronized
3185
+ *
3186
+ * return bool
3187
+ */
3188
+ bool virtqueue_dma_need_sync (struct virtqueue * _vq , dma_addr_t addr )
3189
+ {
3190
+ struct vring_virtqueue * vq = to_vvq (_vq );
3191
+
3192
+ if (!vq -> use_dma_api )
3193
+ return false;
3194
+
3195
+ return dma_need_sync (vring_dma_dev (vq ), addr );
3196
+ }
3197
+ EXPORT_SYMBOL_GPL (virtqueue_dma_need_sync );
3198
+
3199
+ /**
3200
+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
3201
+ * @_vq: the struct virtqueue we're talking about.
3202
+ * @addr: DMA address
3203
+ * @offset: DMA address offset
3204
+ * @size: buf size for sync
3205
+ * @dir: DMA direction
3206
+ *
3207
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
3208
+ * the DMA address really needs to be synchronized
3209
+ *
3210
+ */
3211
+ void virtqueue_dma_sync_single_range_for_cpu (struct virtqueue * _vq ,
3212
+ dma_addr_t addr ,
3213
+ unsigned long offset , size_t size ,
3214
+ enum dma_data_direction dir )
3215
+ {
3216
+ struct vring_virtqueue * vq = to_vvq (_vq );
3217
+ struct device * dev = vring_dma_dev (vq );
3218
+
3219
+ if (!vq -> use_dma_api )
3220
+ return ;
3221
+
3222
+ dma_sync_single_range_for_cpu (dev , addr , offset , size ,
3223
+ DMA_BIDIRECTIONAL );
3224
+ }
3225
+ EXPORT_SYMBOL_GPL (virtqueue_dma_sync_single_range_for_cpu );
3226
+
3227
+ /**
3228
+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
3229
+ * @_vq: the struct virtqueue we're talking about.
3230
+ * @addr: DMA address
3231
+ * @offset: DMA address offset
3232
+ * @size: buf size for sync
3233
+ * @dir: DMA direction
3234
+ *
3235
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
3236
+ * the DMA address really needs to be synchronized
3237
+ */
3238
+ void virtqueue_dma_sync_single_range_for_device (struct virtqueue * _vq ,
3239
+ dma_addr_t addr ,
3240
+ unsigned long offset , size_t size ,
3241
+ enum dma_data_direction dir )
3242
+ {
3243
+ struct vring_virtqueue * vq = to_vvq (_vq );
3244
+ struct device * dev = vring_dma_dev (vq );
3245
+
3246
+ if (!vq -> use_dma_api )
3247
+ return ;
3248
+
3249
+ dma_sync_single_range_for_device (dev , addr , offset , size ,
3250
+ DMA_BIDIRECTIONAL );
3251
+ }
3252
+ EXPORT_SYMBOL_GPL (virtqueue_dma_sync_single_range_for_device );
3253
+
3178
3254
MODULE_LICENSE ("GPL" );
0 commit comments