@@ -1646,7 +1646,7 @@ HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::ExpandBuffer(
1646
1646
}
1647
1647
}
1648
1648
table_ = temporary_table;
1649
- Allocator::template BackingWriteBarrier (&table_);
1649
+ Allocator::BackingWriteBarrier (&table_);
1650
1650
1651
1651
HashTableBucketInitializer<Traits, Allocator, Value>::InitializeTable (
1652
1652
original_table, new_table_size);
@@ -1700,7 +1700,7 @@ Value* HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::RehashTo(
1700
1700
// This swaps the newly allocated buffer with the current one. The store to
1701
1701
// the current table has to be atomic to prevent races with concurrent marker.
1702
1702
AsAtomicPtr (&table_)->store (new_hash_table.table_ , std::memory_order_relaxed);
1703
- Allocator::template BackingWriteBarrier (&table_);
1703
+ Allocator::BackingWriteBarrier (&table_);
1704
1704
table_size_ = new_table_size;
1705
1705
1706
1706
new_hash_table.table_ = old_table;
@@ -1852,8 +1852,8 @@ void HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::swap(
1852
1852
// on the mutator thread, which is also the only one that writes to them, so
1853
1853
// there is *no* risk of data races when reading.
1854
1854
AtomicWriteSwap (table_, other.table_ );
1855
- Allocator::template BackingWriteBarrier (&table_);
1856
- Allocator::template BackingWriteBarrier (&other.table_ );
1855
+ Allocator::BackingWriteBarrier (&table_);
1856
+ Allocator::BackingWriteBarrier (&other.table_ );
1857
1857
if (IsWeak<ValueType>::value) {
1858
1858
// Weak processing is omitted when no backing store is present. In case such
1859
1859
// an empty table is later on used it needs to be strongified.
0 commit comments