@@ -1775,7 +1775,31 @@ impl<T, A: Allocator> Vec<T, A> {
1775
1775
return ;
1776
1776
}
1777
1777
1778
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
1778
+ // Check if we ever want to remove anything.
1779
+ // This allows to use copy_non_overlapping in next cycle.
1780
+ // And avoids any memory writes if we don't need to remove anything.
1781
+ let mut possible_remove_idx = 1 ;
1782
+ let start = self . as_mut_ptr ( ) ;
1783
+ while possible_remove_idx < len {
1784
+ let found_duplicate = unsafe {
1785
+ // SAFETY: possible_remove_idx always in range [1..len)
1786
+ // Note that we start iteration from 1 so we never overflow.
1787
+ let prev = start. add ( possible_remove_idx. wrapping_sub ( 1 ) ) ;
1788
+ let current = start. add ( possible_remove_idx) ;
1789
+ // We explicitly say in docs that references are reversed.
1790
+ same_bucket ( & mut * current, & mut * prev)
1791
+ } ;
1792
+ if found_duplicate {
1793
+ break ;
1794
+ }
1795
+ possible_remove_idx += 1 ;
1796
+ }
1797
+ // Don't need to remove anything.
1798
+ if possible_remove_idx >= len {
1799
+ return ;
1800
+ }
1801
+
1802
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
1779
1803
struct FillGapOnDrop < ' a , T , A : core:: alloc:: Allocator > {
1780
1804
/* Offset of the element we want to check if it is duplicate */
1781
1805
read : usize ,
@@ -1821,31 +1845,39 @@ impl<T, A: Allocator> Vec<T, A> {
1821
1845
}
1822
1846
}
1823
1847
1824
- let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1825
- let ptr = gap. vec . as_mut_ptr ( ) ;
1826
-
1827
1848
/* Drop items while going through Vec, it should be more efficient than
1828
1849
* doing slice partition_dedup + truncate */
1829
1850
1851
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
1852
+ let mut gap =
1853
+ FillGapOnDrop { read : possible_remove_idx + 1 , write : possible_remove_idx, vec : self } ;
1854
+ unsafe {
1855
+ // SAFETY: we checked that possible_remove_idx < len before.
1856
+ // If drop panics, `gap` would remove this item without drop.
1857
+ ptr:: drop_in_place ( start. add ( possible_remove_idx) ) ;
1858
+ }
1859
+
1830
1860
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
1831
1861
* are always in-bounds and read_ptr never aliases prev_ptr */
1832
1862
unsafe {
1833
1863
while gap. read < len {
1834
- let read_ptr = ptr . add ( gap. read ) ;
1835
- let prev_ptr = ptr . add ( gap. write . wrapping_sub ( 1 ) ) ;
1864
+ let read_ptr = start . add ( gap. read ) ;
1865
+ let prev_ptr = start . add ( gap. write . wrapping_sub ( 1 ) ) ;
1836
1866
1837
- if same_bucket ( & mut * read_ptr, & mut * prev_ptr) {
1867
+ // We explicitly say in docs that references are reversed.
1868
+ let found_duplicate = same_bucket ( & mut * read_ptr, & mut * prev_ptr) ;
1869
+ if found_duplicate {
1838
1870
// Increase `gap.read` now since the drop may panic.
1839
1871
gap. read += 1 ;
1840
1872
/* We have found duplicate, drop it in-place */
1841
1873
ptr:: drop_in_place ( read_ptr) ;
1842
1874
} else {
1843
- let write_ptr = ptr . add ( gap. write ) ;
1875
+ let write_ptr = start . add ( gap. write ) ;
1844
1876
1845
- /* Because ` read_ptr` can be equal to ` write_ptr`, we either
1846
- * have to use `copy` or conditional `copy_nonoverlapping` .
1847
- * Looks like the first option is faster. * /
1848
- ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1877
+ /* read_ptr cannot be equal to write_ptr because at this point
1878
+ * we guaranteed to skip at least one element (before loop starts) .
1879
+ */
1880
+ ptr:: copy_nonoverlapping ( read_ptr, write_ptr, 1 ) ;
1849
1881
1850
1882
/* We have filled that place, so go further */
1851
1883
gap. write += 1 ;
0 commit comments