@@ -2023,6 +2023,46 @@ void ThinLTOCodeGenerator::run() {
2023
2023
}
2024
2024
});
2025
2025
}
2026
+
2027
+ WrittenObjects.wait (AllModules);
2028
+
2029
+ {
2030
+ CacheLogOS.applyLocked ([&](raw_ostream &OS) {
2031
+ OS << " Waiting for outstanding cache requests...\n " ;
2032
+ });
2033
+ ScopedDurationTimer T ([&](double Seconds) {
2034
+ if (CacheLogging)
2035
+ CacheLogOS.applyLocked ([&](raw_ostream &OS) {
2036
+ OS << " Handled outstanding cache requests in "
2037
+ << llvm::format (" %.6fs" , Seconds) << " \n " ;
2038
+ });
2039
+ });
2040
+ auto Start = std::chrono::steady_clock::now ();
2041
+ auto CacheTimeout = DeterministicCheck
2042
+ ? std::chrono::milliseconds::max ()
2043
+ : std::chrono::milliseconds (5000 );
2044
+
2045
+ if (!HandledCacheReads.waitFor (CacheTimeout, AllModules)) {
2046
+ // If we were unable to finish all cache reads in time, just request
2047
+ // their cancellation (we already have all objects written) and don't
2048
+ // bother writing to the cache (that would probably be even slower
2049
+ // than reading form it).
2050
+ GetCancelTok->requestCancellation ();
2051
+ } else {
2052
+ auto Now = std::chrono::steady_clock::now ();
2053
+ auto RemainingCacheTimeout = CacheTimeout - (Now - Start);
2054
+ // If we finished all cache reads in time, request writes.
2055
+ if (!HandledCacheWrites.waitFor (RemainingCacheTimeout, AllModules)) {
2056
+ // If we were unable to finish all cache writes in time, request
2057
+ // their cancellation. We don't want to hold up the link any longer.
2058
+ PutCancelTok->requestCancellation ();
2059
+ }
2060
+ }
2061
+
2062
+ if (DeterministicCheck)
2063
+ for (int count : ModulesOrdering)
2064
+ (void )Infos[count].Entry ->areLoadedAndWrittenResultsIdentical ();
2065
+ }
2026
2066
}
2027
2067
2028
2068
pruneCache (CacheOptions.Path , CacheOptions.Policy , ProducedBinaries);
0 commit comments