@@ -2023,6 +2023,47 @@ void ThinLTOCodeGenerator::run() {
2023
2023
}
2024
2024
});
2025
2025
}
2026
+
2027
+ WrittenObjects.wait (AllModules);
2028
+
2029
+ {
2030
+ if (CacheLogging)
2031
+ CacheLogOS.applyLocked ([&](raw_ostream &OS) {
2032
+ OS << " Waiting for outstanding cache requests...\n " ;
2033
+ });
2034
+ ScopedDurationTimer T ([&](double Seconds) {
2035
+ if (CacheLogging)
2036
+ CacheLogOS.applyLocked ([&](raw_ostream &OS) {
2037
+ OS << " Handled outstanding cache requests in "
2038
+ << llvm::format (" %.6fs" , Seconds) << " \n " ;
2039
+ });
2040
+ });
2041
+ auto Start = std::chrono::steady_clock::now ();
2042
+ auto CacheTimeout = DeterministicCheck
2043
+ ? std::chrono::milliseconds::max ()
2044
+ : std::chrono::milliseconds (5000 );
2045
+
2046
+ if (!HandledCacheReads.waitFor (CacheTimeout, AllModules)) {
2047
+ // If we were unable to finish all cache reads in time, just request
2048
+ // their cancellation (we already have all objects written) and don't
2049
+ // bother writing to the cache (that would probably be even slower
2050
+ // than reading form it).
2051
+ GetCancelTok->requestCancellation ();
2052
+ } else {
2053
+ auto Now = std::chrono::steady_clock::now ();
2054
+ auto RemainingCacheTimeout = CacheTimeout - (Now - Start);
2055
+ // If we finished all cache reads in time, request writes.
2056
+ if (!HandledCacheWrites.waitFor (RemainingCacheTimeout, AllModules)) {
2057
+ // If we were unable to finish all cache writes in time, request
2058
+ // their cancellation. We don't want to hold up the link any longer.
2059
+ PutCancelTok->requestCancellation ();
2060
+ }
2061
+ }
2062
+
2063
+ if (DeterministicCheck)
2064
+ for (int count : ModulesOrdering)
2065
+ (void )Infos[count].Entry ->areLoadedAndWrittenResultsIdentical ();
2066
+ }
2026
2067
}
2027
2068
2028
2069
pruneCache (CacheOptions.Path , CacheOptions.Policy , ProducedBinaries);
0 commit comments