@@ -234,8 +234,17 @@ func hashExecutable() (hash [32]byte, err error) {
234
234
// process, possibly running a different version of gopls, possibly
235
235
// running concurrently.
236
236
func gc (goplsDir string ) {
237
- const period = 1 * time .Minute // period between collections
238
- const statDelay = 100 * time .Microsecond // delay between stats to smooth out I/O
237
+ const period = 1 * time .Minute // period between collections
238
+ // Sleep statDelay*batchSize between stats to smooth out I/O.
239
+ //
240
+ // The constants below were chosen using the following heuristics:
241
+ // - 1GB of filecache is on the order of ~100-200k files, in which case
242
+ // 100μs delay per file introduces 10-20s of additional walk time, less
243
+ // than the 1m gc period.
244
+ // - Processing batches of stats at once is much more efficient than
245
+ // sleeping after every stat (due to OS optimizations).
246
+ const statDelay = 100 * time .Microsecond // average delay between stats, to smooth out I/O
247
+ const batchSize = 1000 // # of stats to process before sleeping
239
248
const maxAge = 5 * 24 * time .Hour // max time since last access before file is deleted
240
249
241
250
// The macOS filesystem is strikingly slow, at least on some machines.
@@ -261,6 +270,7 @@ func gc(goplsDir string) {
261
270
stat os.FileInfo
262
271
}
263
272
var files []item
273
+ start := time .Now ()
264
274
var total int64 // bytes
265
275
_ = filepath .Walk (goplsDir , func (path string , stat os.FileInfo , err error ) error {
266
276
if err != nil {
@@ -285,7 +295,12 @@ func gc(goplsDir string) {
285
295
} else {
286
296
files = append (files , item {path , stat })
287
297
total += stat .Size ()
288
- time .Sleep (statDelay )
298
+ if debug && len (files )% 1000 == 0 {
299
+ log .Printf ("filecache: checked %d files in %v" , len (files ), time .Since (start ))
300
+ }
301
+ if len (files )% batchSize == 0 {
302
+ time .Sleep (batchSize * statDelay )
303
+ }
289
304
}
290
305
}
291
306
return nil
0 commit comments