|
| 1 | +//! Comparison traits for `[T]`. |
| 2 | +
|
| 3 | +use crate::cmp; |
| 4 | +use crate::cmp::Ordering::{self, Greater, Less}; |
| 5 | +use crate::mem; |
| 6 | + |
| 7 | +use super::from_raw_parts; |
| 8 | +use super::memchr; |
| 9 | + |
| 10 | +extern "C" { |
| 11 | + /// Calls implementation provided memcmp. |
| 12 | + /// |
| 13 | + /// Interprets the data as u8. |
| 14 | + /// |
| 15 | + /// Returns 0 for equal, < 0 for less than and > 0 for greater |
| 16 | + /// than. |
| 17 | + // FIXME(#32610): Return type should be c_int |
| 18 | + fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32; |
| 19 | +} |
| 20 | + |
| 21 | +#[stable(feature = "rust1", since = "1.0.0")] |
| 22 | +impl<A, B> PartialEq<[B]> for [A] |
| 23 | +where |
| 24 | + A: PartialEq<B>, |
| 25 | +{ |
| 26 | + fn eq(&self, other: &[B]) -> bool { |
| 27 | + SlicePartialEq::equal(self, other) |
| 28 | + } |
| 29 | + |
| 30 | + fn ne(&self, other: &[B]) -> bool { |
| 31 | + SlicePartialEq::not_equal(self, other) |
| 32 | + } |
| 33 | +} |
| 34 | + |
| 35 | +#[stable(feature = "rust1", since = "1.0.0")] |
| 36 | +impl<T: Eq> Eq for [T] {} |
| 37 | + |
| 38 | +/// Implements comparison of vectors lexicographically. |
| 39 | +#[stable(feature = "rust1", since = "1.0.0")] |
| 40 | +impl<T: Ord> Ord for [T] { |
| 41 | + fn cmp(&self, other: &[T]) -> Ordering { |
| 42 | + SliceOrd::compare(self, other) |
| 43 | + } |
| 44 | +} |
| 45 | + |
| 46 | +/// Implements comparison of vectors lexicographically. |
| 47 | +#[stable(feature = "rust1", since = "1.0.0")] |
| 48 | +impl<T: PartialOrd> PartialOrd for [T] { |
| 49 | + fn partial_cmp(&self, other: &[T]) -> Option<Ordering> { |
| 50 | + SlicePartialOrd::partial_compare(self, other) |
| 51 | + } |
| 52 | +} |
| 53 | + |
| 54 | +#[doc(hidden)] |
| 55 | +// intermediate trait for specialization of slice's PartialEq |
| 56 | +trait SlicePartialEq<B> { |
| 57 | + fn equal(&self, other: &[B]) -> bool; |
| 58 | + |
| 59 | + fn not_equal(&self, other: &[B]) -> bool { |
| 60 | + !self.equal(other) |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +// Generic slice equality |
| 65 | +impl<A, B> SlicePartialEq<B> for [A] |
| 66 | +where |
| 67 | + A: PartialEq<B>, |
| 68 | +{ |
| 69 | + default fn equal(&self, other: &[B]) -> bool { |
| 70 | + if self.len() != other.len() { |
| 71 | + return false; |
| 72 | + } |
| 73 | + |
| 74 | + self.iter().zip(other.iter()).all(|(x, y)| x == y) |
| 75 | + } |
| 76 | +} |
| 77 | + |
| 78 | +// Use an equal-pointer optimization when types are `Eq` |
| 79 | +// We can't make `A` and `B` the same type because `min_specialization` won't |
| 80 | +// allow it. |
| 81 | +impl<A, B> SlicePartialEq<B> for [A] |
| 82 | +where |
| 83 | + A: MarkerEq<B>, |
| 84 | +{ |
| 85 | + default fn equal(&self, other: &[B]) -> bool { |
| 86 | + if self.len() != other.len() { |
| 87 | + return false; |
| 88 | + } |
| 89 | + |
| 90 | + // While performance would suffer if `guaranteed_eq` just returned `false` |
| 91 | + // for all arguments, correctness and return value of this function are not affected. |
| 92 | + if self.as_ptr().guaranteed_eq(other.as_ptr() as *const A) { |
| 93 | + return true; |
| 94 | + } |
| 95 | + |
| 96 | + self.iter().zip(other.iter()).all(|(x, y)| x == y) |
| 97 | + } |
| 98 | +} |
| 99 | + |
| 100 | +// Use memcmp for bytewise equality when the types allow |
| 101 | +impl<A, B> SlicePartialEq<B> for [A] |
| 102 | +where |
| 103 | + A: BytewiseEquality<B>, |
| 104 | +{ |
| 105 | + fn equal(&self, other: &[B]) -> bool { |
| 106 | + if self.len() != other.len() { |
| 107 | + return false; |
| 108 | + } |
| 109 | + |
| 110 | + // While performance would suffer if `guaranteed_eq` just returned `false` |
| 111 | + // for all arguments, correctness and return value of this function are not affected. |
| 112 | + if self.as_ptr().guaranteed_eq(other.as_ptr() as *const A) { |
| 113 | + return true; |
| 114 | + } |
| 115 | + // SAFETY: `self` and `other` are references and are thus guaranteed to be valid. |
| 116 | + // The two slices have been checked to have the same size above. |
| 117 | + unsafe { |
| 118 | + let size = mem::size_of_val(self); |
| 119 | + memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0 |
| 120 | + } |
| 121 | + } |
| 122 | +} |
| 123 | + |
| 124 | +#[doc(hidden)] |
| 125 | +// intermediate trait for specialization of slice's PartialOrd |
| 126 | +trait SlicePartialOrd: Sized { |
| 127 | + fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>; |
| 128 | +} |
| 129 | + |
| 130 | +impl<A: PartialOrd> SlicePartialOrd for A { |
| 131 | + default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> { |
| 132 | + let l = cmp::min(left.len(), right.len()); |
| 133 | + |
| 134 | + // Slice to the loop iteration range to enable bound check |
| 135 | + // elimination in the compiler |
| 136 | + let lhs = &left[..l]; |
| 137 | + let rhs = &right[..l]; |
| 138 | + |
| 139 | + for i in 0..l { |
| 140 | + match lhs[i].partial_cmp(&rhs[i]) { |
| 141 | + Some(Ordering::Equal) => (), |
| 142 | + non_eq => return non_eq, |
| 143 | + } |
| 144 | + } |
| 145 | + |
| 146 | + left.len().partial_cmp(&right.len()) |
| 147 | + } |
| 148 | +} |
| 149 | + |
| 150 | +// This is the impl that we would like to have. Unfortunately it's not sound. |
| 151 | +// See `partial_ord_slice.rs`. |
| 152 | +/* |
| 153 | +impl<A> SlicePartialOrd for A |
| 154 | +where |
| 155 | + A: Ord, |
| 156 | +{ |
| 157 | + default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> { |
| 158 | + Some(SliceOrd::compare(left, right)) |
| 159 | + } |
| 160 | +} |
| 161 | +*/ |
| 162 | + |
| 163 | +impl<A: AlwaysApplicableOrd> SlicePartialOrd for A { |
| 164 | + fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> { |
| 165 | + Some(SliceOrd::compare(left, right)) |
| 166 | + } |
| 167 | +} |
| 168 | + |
| 169 | +#[rustc_specialization_trait] |
| 170 | +trait AlwaysApplicableOrd: SliceOrd + Ord {} |
| 171 | + |
| 172 | +macro_rules! always_applicable_ord { |
| 173 | + ($([$($p:tt)*] $t:ty,)*) => { |
| 174 | + $(impl<$($p)*> AlwaysApplicableOrd for $t {})* |
| 175 | + } |
| 176 | +} |
| 177 | + |
| 178 | +always_applicable_ord! { |
| 179 | + [] u8, [] u16, [] u32, [] u64, [] u128, [] usize, |
| 180 | + [] i8, [] i16, [] i32, [] i64, [] i128, [] isize, |
| 181 | + [] bool, [] char, |
| 182 | + [T: ?Sized] *const T, [T: ?Sized] *mut T, |
| 183 | + [T: AlwaysApplicableOrd] &T, |
| 184 | + [T: AlwaysApplicableOrd] &mut T, |
| 185 | + [T: AlwaysApplicableOrd] Option<T>, |
| 186 | +} |
| 187 | + |
| 188 | +#[doc(hidden)] |
| 189 | +// intermediate trait for specialization of slice's Ord |
| 190 | +trait SliceOrd: Sized { |
| 191 | + fn compare(left: &[Self], right: &[Self]) -> Ordering; |
| 192 | +} |
| 193 | + |
| 194 | +impl<A: Ord> SliceOrd for A { |
| 195 | + default fn compare(left: &[Self], right: &[Self]) -> Ordering { |
| 196 | + let l = cmp::min(left.len(), right.len()); |
| 197 | + |
| 198 | + // Slice to the loop iteration range to enable bound check |
| 199 | + // elimination in the compiler |
| 200 | + let lhs = &left[..l]; |
| 201 | + let rhs = &right[..l]; |
| 202 | + |
| 203 | + for i in 0..l { |
| 204 | + match lhs[i].cmp(&rhs[i]) { |
| 205 | + Ordering::Equal => (), |
| 206 | + non_eq => return non_eq, |
| 207 | + } |
| 208 | + } |
| 209 | + |
| 210 | + left.len().cmp(&right.len()) |
| 211 | + } |
| 212 | +} |
| 213 | + |
| 214 | +// memcmp compares a sequence of unsigned bytes lexicographically. |
| 215 | +// this matches the order we want for [u8], but no others (not even [i8]). |
| 216 | +impl SliceOrd for u8 { |
| 217 | + #[inline] |
| 218 | + fn compare(left: &[Self], right: &[Self]) -> Ordering { |
| 219 | + let order = |
| 220 | + // SAFETY: `left` and `right` are references and are thus guaranteed to be valid. |
| 221 | + // We use the minimum of both lengths which guarantees that both regions are |
| 222 | + // valid for reads in that interval. |
| 223 | + unsafe { memcmp(left.as_ptr(), right.as_ptr(), cmp::min(left.len(), right.len())) }; |
| 224 | + if order == 0 { |
| 225 | + left.len().cmp(&right.len()) |
| 226 | + } else if order < 0 { |
| 227 | + Less |
| 228 | + } else { |
| 229 | + Greater |
| 230 | + } |
| 231 | + } |
| 232 | +} |
| 233 | + |
| 234 | +// Hack to allow specializing on `Eq` even though `Eq` has a method. |
| 235 | +#[rustc_unsafe_specialization_marker] |
| 236 | +trait MarkerEq<T>: PartialEq<T> {} |
| 237 | + |
| 238 | +impl<T: Eq> MarkerEq<T> for T {} |
| 239 | + |
| 240 | +#[doc(hidden)] |
| 241 | +/// Trait implemented for types that can be compared for equality using |
| 242 | +/// their bytewise representation |
| 243 | +#[rustc_specialization_trait] |
| 244 | +trait BytewiseEquality<T>: MarkerEq<T> + Copy {} |
| 245 | + |
| 246 | +macro_rules! impl_marker_for { |
| 247 | + ($traitname:ident, $($ty:ty)*) => { |
| 248 | + $( |
| 249 | + impl $traitname<$ty> for $ty { } |
| 250 | + )* |
| 251 | + } |
| 252 | +} |
| 253 | + |
| 254 | +impl_marker_for!(BytewiseEquality, |
| 255 | + u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool); |
| 256 | + |
| 257 | +pub(super) trait SliceContains: Sized { |
| 258 | + fn slice_contains(&self, x: &[Self]) -> bool; |
| 259 | +} |
| 260 | + |
| 261 | +impl<T> SliceContains for T |
| 262 | +where |
| 263 | + T: PartialEq, |
| 264 | +{ |
| 265 | + default fn slice_contains(&self, x: &[Self]) -> bool { |
| 266 | + x.iter().any(|y| *y == *self) |
| 267 | + } |
| 268 | +} |
| 269 | + |
| 270 | +impl SliceContains for u8 { |
| 271 | + fn slice_contains(&self, x: &[Self]) -> bool { |
| 272 | + memchr::memchr(*self, x).is_some() |
| 273 | + } |
| 274 | +} |
| 275 | + |
| 276 | +impl SliceContains for i8 { |
| 277 | + fn slice_contains(&self, x: &[Self]) -> bool { |
| 278 | + let byte = *self as u8; |
| 279 | + // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()` |
| 280 | + // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed |
| 281 | + // to be valid for reads for the length of the slice `x.len()`, which cannot be larger |
| 282 | + // than `isize::MAX`. The returned slice is never mutated. |
| 283 | + let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) }; |
| 284 | + memchr::memchr(byte, bytes).is_some() |
| 285 | + } |
| 286 | +} |
0 commit comments