diff --git a/SConstruct b/SConstruct index 178f2d86a20..b1819a1dab5 100644 --- a/SConstruct +++ b/SConstruct @@ -1092,7 +1092,6 @@ if "check_c_headers" in env: env.AppendUnique(CPPDEFINES=[headers[header]]) -# FIXME: This method mixes both cosmetic progress stuff and cache handling... methods.show_progress(env) # TODO: replace this with `env.Dump(format="json")` # once we start requiring SCons 4.0 as min version. @@ -1124,3 +1123,5 @@ def purge_flaky_files(): atexit.register(purge_flaky_files) + +methods.clean_cache(env) diff --git a/methods.py b/methods.py index bfd08cfc7b5..36462fd30be 100644 --- a/methods.py +++ b/methods.py @@ -906,21 +906,18 @@ def show_progress(env): node_count_fname = str(env.Dir("#")) + "/.scons_node_count" import math - import time class cache_progress: - # The default is 1 GB cache and 12 hours half life - def __init__(self, path=None, limit=1073741824, half_life=43200): + # The default is 1 GB cache + def __init__(self, path=None, limit=pow(1024, 3)): self.path = path self.limit = limit - self.exponent_scale = math.log(2) / half_life if env["verbose"] and path is not None: screen.write( "Current cache limit is {} (used: {})\n".format( self.convert_size(limit), self.convert_size(self.get_size(path)) ) ) - self.delete(self.file_list()) def __call__(self, node, *args, **kw): nonlocal node_count, node_count_max, node_count_interval, node_count_fname, show_progress @@ -937,12 +934,66 @@ def show_progress(env): screen.write("\r[Initial build] ") screen.flush() + def convert_size(self, size_bytes): + if size_bytes == 0: + return "0 bytes" + size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (int(s) if i == 0 else s, size_name[i]) + + def get_size(self, start_path="."): + total_size = 0 + for dirpath, dirnames, filenames in os.walk(start_path): + for f in filenames: + fp = os.path.join(dirpath, f) + total_size += os.path.getsize(fp) + return total_size + + def progress_finish(target, source, env): + nonlocal node_count, progressor + try: + with open(node_count_fname, "w", encoding="utf-8", newline="\n") as f: + f.write("%d\n" % node_count) + except Exception: + pass + + try: + with open(node_count_fname, "r", encoding="utf-8") as f: + node_count_max = int(f.readline()) + except Exception: + pass + + cache_directory = os.environ.get("SCONS_CACHE") + # Simple cache pruning, attached to SCons' progress callback. Trim the + # cache directory to a size not larger than cache_limit. + cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024 + progressor = cache_progress(cache_directory, cache_limit) + Progress(progressor, interval=node_count_interval) + + progress_finish_command = Command("progress_finish", [], progress_finish) + AlwaysBuild(progress_finish_command) + + +def clean_cache(env): + import atexit + import time + + class cache_clean: + def __init__(self, path=None, limit=pow(1024, 3)): + self.path = path + self.limit = limit + + def clean(self): + self.delete(self.file_list()) + def delete(self, files): if len(files) == 0: return if env["verbose"]: # Utter something - screen.write("\rPurging %d %s from cache...\n" % (len(files), len(files) > 1 and "files" or "file")) + print("Purging %d %s from cache..." % (len(files), "files" if len(files) > 1 else "file")) [os.remove(f) for f in files] def file_list(self): @@ -976,47 +1027,20 @@ def show_progress(env): else: return [x[0] for x in file_stat[mark:]] - def convert_size(self, size_bytes): - if size_bytes == 0: - return "0 bytes" - size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size_bytes, 1024))) - p = math.pow(1024, i) - s = round(size_bytes / p, 2) - return "%s %s" % (int(s) if i == 0 else s, size_name[i]) - - def get_size(self, start_path="."): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(start_path): - for f in filenames: - fp = os.path.join(dirpath, f) - total_size += os.path.getsize(fp) - return total_size - - def progress_finish(target, source, env): - nonlocal node_count, progressor + def cache_finally(): + nonlocal cleaner try: - with open(node_count_fname, "w", encoding="utf-8", newline="\n") as f: - f.write("%d\n" % node_count) - progressor.delete(progressor.file_list()) + cleaner.clean() except Exception: pass - try: - with open(node_count_fname, "r", encoding="utf-8") as f: - node_count_max = int(f.readline()) - except Exception: - pass - cache_directory = os.environ.get("SCONS_CACHE") # Simple cache pruning, attached to SCons' progress callback. Trim the # cache directory to a size not larger than cache_limit. cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024 - progressor = cache_progress(cache_directory, cache_limit) - Progress(progressor, interval=node_count_interval) + cleaner = cache_clean(cache_directory, cache_limit) - progress_finish_command = Command("progress_finish", [], progress_finish) - AlwaysBuild(progress_finish_command) + atexit.register(cache_finally) def dump(env):