@@ -69,6 +69,7 @@ struct vfio_iommu {
69
69
struct rb_root dma_list ;
70
70
struct blocking_notifier_head notifier ;
71
71
unsigned int dma_avail ;
72
+ uint64_t pgsize_bitmap ;
72
73
bool v2 ;
73
74
bool nesting ;
74
75
};
@@ -835,15 +836,14 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
835
836
iommu -> dma_avail ++ ;
836
837
}
837
838
838
- static unsigned long vfio_pgsize_bitmap (struct vfio_iommu * iommu )
839
+ static void vfio_update_pgsize_bitmap (struct vfio_iommu * iommu )
839
840
{
840
841
struct vfio_domain * domain ;
841
- unsigned long bitmap = ULONG_MAX ;
842
842
843
- mutex_lock (& iommu -> lock );
843
+ iommu -> pgsize_bitmap = ULONG_MAX ;
844
+
844
845
list_for_each_entry (domain , & iommu -> domain_list , next )
845
- bitmap &= domain -> domain -> pgsize_bitmap ;
846
- mutex_unlock (& iommu -> lock );
846
+ iommu -> pgsize_bitmap &= domain -> domain -> pgsize_bitmap ;
847
847
848
848
/*
849
849
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -853,12 +853,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
853
853
* granularity while iommu driver can use the sub-PAGE_SIZE size
854
854
* to map the buffer.
855
855
*/
856
- if (bitmap & ~PAGE_MASK ) {
857
- bitmap &= PAGE_MASK ;
858
- bitmap |= PAGE_SIZE ;
856
+ if (iommu -> pgsize_bitmap & ~PAGE_MASK ) {
857
+ iommu -> pgsize_bitmap &= PAGE_MASK ;
858
+ iommu -> pgsize_bitmap |= PAGE_SIZE ;
859
859
}
860
-
861
- return bitmap ;
862
860
}
863
861
864
862
static int vfio_dma_do_unmap (struct vfio_iommu * iommu ,
@@ -869,19 +867,28 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
869
867
size_t unmapped = 0 ;
870
868
int ret = 0 , retries = 0 ;
871
869
872
- mask = ((uint64_t )1 << __ffs (vfio_pgsize_bitmap (iommu ))) - 1 ;
870
+ mutex_lock (& iommu -> lock );
871
+
872
+ mask = ((uint64_t )1 << __ffs (iommu -> pgsize_bitmap )) - 1 ;
873
+
874
+ if (unmap -> iova & mask ) {
875
+ ret = - EINVAL ;
876
+ goto unlock ;
877
+ }
878
+
879
+ if (!unmap -> size || unmap -> size & mask ) {
880
+ ret = - EINVAL ;
881
+ goto unlock ;
882
+ }
873
883
874
- if (unmap -> iova & mask )
875
- return - EINVAL ;
876
- if (!unmap -> size || unmap -> size & mask )
877
- return - EINVAL ;
878
884
if (unmap -> iova + unmap -> size - 1 < unmap -> iova ||
879
- unmap -> size > SIZE_MAX )
880
- return - EINVAL ;
885
+ unmap -> size > SIZE_MAX ) {
886
+ ret = - EINVAL ;
887
+ goto unlock ;
888
+ }
881
889
882
890
WARN_ON (mask & PAGE_MASK );
883
891
again :
884
- mutex_lock (& iommu -> lock );
885
892
886
893
/*
887
894
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
@@ -960,6 +967,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
960
967
blocking_notifier_call_chain (& iommu -> notifier ,
961
968
VFIO_IOMMU_NOTIFY_DMA_UNMAP ,
962
969
& nb_unmap );
970
+ mutex_lock (& iommu -> lock );
963
971
goto again ;
964
972
}
965
973
unmapped += dma -> size ;
@@ -1075,24 +1083,28 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1075
1083
if (map -> size != size || map -> vaddr != vaddr || map -> iova != iova )
1076
1084
return - EINVAL ;
1077
1085
1078
- mask = ((uint64_t )1 << __ffs (vfio_pgsize_bitmap (iommu ))) - 1 ;
1079
-
1080
- WARN_ON (mask & PAGE_MASK );
1081
-
1082
1086
/* READ/WRITE from device perspective */
1083
1087
if (map -> flags & VFIO_DMA_MAP_FLAG_WRITE )
1084
1088
prot |= IOMMU_WRITE ;
1085
1089
if (map -> flags & VFIO_DMA_MAP_FLAG_READ )
1086
1090
prot |= IOMMU_READ ;
1087
1091
1088
- if (!prot || !size || (size | iova | vaddr ) & mask )
1089
- return - EINVAL ;
1092
+ mutex_lock (& iommu -> lock );
1090
1093
1091
- /* Don't allow IOVA or virtual address wrap */
1092
- if (iova + size - 1 < iova || vaddr + size - 1 < vaddr )
1093
- return - EINVAL ;
1094
+ mask = ((uint64_t )1 << __ffs (iommu -> pgsize_bitmap )) - 1 ;
1094
1095
1095
- mutex_lock (& iommu -> lock );
1096
+ WARN_ON (mask & PAGE_MASK );
1097
+
1098
+ if (!prot || !size || (size | iova | vaddr ) & mask ) {
1099
+ ret = - EINVAL ;
1100
+ goto out_unlock ;
1101
+ }
1102
+
1103
+ /* Don't allow IOVA or virtual address wrap */
1104
+ if (iova + size - 1 < iova || vaddr + size - 1 < vaddr ) {
1105
+ ret = - EINVAL ;
1106
+ goto out_unlock ;
1107
+ }
1096
1108
1097
1109
if (vfio_find_dma (iommu , iova , size )) {
1098
1110
ret = - EEXIST ;
@@ -1698,6 +1710,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1698
1710
if (!iommu -> external_domain ) {
1699
1711
INIT_LIST_HEAD (& domain -> group_list );
1700
1712
iommu -> external_domain = domain ;
1713
+ vfio_update_pgsize_bitmap (iommu );
1701
1714
} else {
1702
1715
kfree (domain );
1703
1716
}
@@ -1823,6 +1836,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1823
1836
}
1824
1837
1825
1838
list_add (& domain -> next , & iommu -> domain_list );
1839
+ vfio_update_pgsize_bitmap (iommu );
1826
1840
done :
1827
1841
/* Delete the old one and insert new iova list */
1828
1842
vfio_iommu_iova_insert_copy (iommu , & iova_copy );
@@ -2034,6 +2048,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
2034
2048
list_del (& domain -> next );
2035
2049
kfree (domain );
2036
2050
vfio_iommu_aper_expand (iommu , & iova_copy );
2051
+ vfio_update_pgsize_bitmap (iommu );
2037
2052
}
2038
2053
break ;
2039
2054
}
@@ -2166,8 +2181,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
2166
2181
size_t size ;
2167
2182
int iovas = 0 , i = 0 , ret ;
2168
2183
2169
- mutex_lock (& iommu -> lock );
2170
-
2171
2184
list_for_each_entry (iova , & iommu -> iova_list , list )
2172
2185
iovas ++ ;
2173
2186
@@ -2176,17 +2189,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
2176
2189
* Return 0 as a container with a single mdev device
2177
2190
* will have an empty list
2178
2191
*/
2179
- ret = 0 ;
2180
- goto out_unlock ;
2192
+ return 0 ;
2181
2193
}
2182
2194
2183
2195
size = sizeof (* cap_iovas ) + (iovas * sizeof (* cap_iovas -> iova_ranges ));
2184
2196
2185
2197
cap_iovas = kzalloc (size , GFP_KERNEL );
2186
- if (!cap_iovas ) {
2187
- ret = - ENOMEM ;
2188
- goto out_unlock ;
2189
- }
2198
+ if (!cap_iovas )
2199
+ return - ENOMEM ;
2190
2200
2191
2201
cap_iovas -> nr_iovas = iovas ;
2192
2202
@@ -2199,8 +2209,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
2199
2209
ret = vfio_iommu_iova_add_cap (caps , cap_iovas , size );
2200
2210
2201
2211
kfree (cap_iovas );
2202
- out_unlock :
2203
- mutex_unlock (& iommu -> lock );
2204
2212
return ret ;
2205
2213
}
2206
2214
@@ -2245,11 +2253,13 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
2245
2253
info .cap_offset = 0 ; /* output, no-recopy necessary */
2246
2254
}
2247
2255
2256
+ mutex_lock (& iommu -> lock );
2248
2257
info .flags = VFIO_IOMMU_INFO_PGSIZES ;
2249
2258
2250
- info .iova_pgsizes = vfio_pgsize_bitmap ( iommu ) ;
2259
+ info .iova_pgsizes = iommu -> pgsize_bitmap ;
2251
2260
2252
2261
ret = vfio_iommu_iova_build_caps (iommu , & caps );
2262
+ mutex_unlock (& iommu -> lock );
2253
2263
if (ret )
2254
2264
return ret ;
2255
2265
0 commit comments