SCons: Fix running 'scons' without platform argument
The cache and progress logic assumed the 'env' to be defined, but it is only when the selected platform is in the supported list. Fixes #17497.
This commit is contained in:
parent
ea204628ad
commit
a44f9ca545
210
SConstruct
210
SConstruct
|
@ -491,122 +491,120 @@ else:
|
||||||
for x in platform_list:
|
for x in platform_list:
|
||||||
print("\t" + x)
|
print("\t" + x)
|
||||||
print("\nPlease run scons again with argument: platform=<string>")
|
print("\nPlease run scons again with argument: platform=<string>")
|
||||||
sys.exit(255)
|
|
||||||
|
|
||||||
|
|
||||||
screen = sys.stdout
|
# The following only makes sense when the env is defined, and assumes it is
|
||||||
node_count = 0
|
if 'env' in locals():
|
||||||
node_count_max = 0
|
screen = sys.stdout
|
||||||
node_count_interval = 1
|
# Progress reporting is not available in non-TTY environments since it
|
||||||
if ('env' in locals()):
|
# messes with the output (for example, when writing to a file)
|
||||||
|
show_progress = (env['progress'] and sys.stdout.isatty())
|
||||||
|
node_count = 0
|
||||||
|
node_count_max = 0
|
||||||
|
node_count_interval = 1
|
||||||
node_count_fname = str(env.Dir('#')) + '/.scons_node_count'
|
node_count_fname = str(env.Dir('#')) + '/.scons_node_count'
|
||||||
# Progress reporting is not available in non-TTY environments since it
|
|
||||||
# messes with the output (for example, when writing to a file)
|
|
||||||
if sys.stdout.isatty():
|
|
||||||
show_progress = env['progress']
|
|
||||||
else:
|
|
||||||
show_progress = False
|
|
||||||
|
|
||||||
import time, math
|
import time, math
|
||||||
|
|
||||||
class cache_progress:
|
class cache_progress:
|
||||||
# The default is 1 GB cache and 12 hours half life
|
# The default is 1 GB cache and 12 hours half life
|
||||||
def __init__(self, path = None, limit = 1073741824, half_life = 43200):
|
def __init__(self, path = None, limit = 1073741824, half_life = 43200):
|
||||||
self.path = path
|
self.path = path
|
||||||
self.limit = limit
|
self.limit = limit
|
||||||
self.exponent_scale = math.log(2) / half_life
|
self.exponent_scale = math.log(2) / half_life
|
||||||
if env['verbose'] and path != None:
|
if env['verbose'] and path != None:
|
||||||
screen.write('Current cache limit is ' + self.convert_size(limit) + ' (used: ' + self.convert_size(self.get_size(path)) + ')\n')
|
screen.write('Current cache limit is ' + self.convert_size(limit) + ' (used: ' + self.convert_size(self.get_size(path)) + ')\n')
|
||||||
self.delete(self.file_list())
|
self.delete(self.file_list())
|
||||||
|
|
||||||
def __call__(self, node, *args, **kw):
|
def __call__(self, node, *args, **kw):
|
||||||
global node_count, node_count_max, node_count_interval, node_count_fname, show_progress
|
global node_count, node_count_max, node_count_interval, node_count_fname, show_progress
|
||||||
if show_progress:
|
if show_progress:
|
||||||
# Print the progress percentage
|
# Print the progress percentage
|
||||||
node_count += node_count_interval
|
node_count += node_count_interval
|
||||||
if (node_count_max > 0 and node_count <= node_count_max):
|
if (node_count_max > 0 and node_count <= node_count_max):
|
||||||
screen.write('\r[%3d%%] ' % (node_count * 100 / node_count_max))
|
screen.write('\r[%3d%%] ' % (node_count * 100 / node_count_max))
|
||||||
screen.flush()
|
screen.flush()
|
||||||
elif (node_count_max > 0 and node_count > node_count_max):
|
elif (node_count_max > 0 and node_count > node_count_max):
|
||||||
screen.write('\r[100%] ')
|
screen.write('\r[100%] ')
|
||||||
screen.flush()
|
screen.flush()
|
||||||
|
else:
|
||||||
|
screen.write('\r[Initial build] ')
|
||||||
|
screen.flush()
|
||||||
|
|
||||||
|
def delete(self, files):
|
||||||
|
if len(files) == 0:
|
||||||
|
return
|
||||||
|
if env['verbose']:
|
||||||
|
# Utter something
|
||||||
|
screen.write('\rPurging %d %s from cache...\n' % (len(files), len(files) > 1 and 'files' or 'file'))
|
||||||
|
[os.remove(f) for f in files]
|
||||||
|
|
||||||
|
def file_list(self):
|
||||||
|
if self.path == None:
|
||||||
|
# Nothing to do
|
||||||
|
return []
|
||||||
|
# Gather a list of (filename, (size, atime)) within the
|
||||||
|
# cache directory
|
||||||
|
file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, '*', '*'))]
|
||||||
|
if file_stat == []:
|
||||||
|
# Nothing to do
|
||||||
|
return []
|
||||||
|
# Weight the cache files by size (assumed to be roughly
|
||||||
|
# proportional to the recompilation time) times an exponential
|
||||||
|
# decay since the ctime, and return a list with the entries
|
||||||
|
# (filename, size, weight).
|
||||||
|
current_time = time.time()
|
||||||
|
file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
|
||||||
|
# Sort by the most resently accessed files (most sensible to keep) first
|
||||||
|
file_stat.sort(key=lambda x: x[2])
|
||||||
|
# Search for the first entry where the storage limit is
|
||||||
|
# reached
|
||||||
|
sum, mark = 0, None
|
||||||
|
for i,x in enumerate(file_stat):
|
||||||
|
sum += x[1]
|
||||||
|
if sum > self.limit:
|
||||||
|
mark = i
|
||||||
|
break
|
||||||
|
if mark == None:
|
||||||
|
return []
|
||||||
else:
|
else:
|
||||||
screen.write('\r[Initial build] ')
|
return [x[0] for x in file_stat[mark:]]
|
||||||
screen.flush()
|
|
||||||
|
|
||||||
def delete(self, files):
|
def convert_size(self, size_bytes):
|
||||||
if len(files) == 0:
|
if size_bytes == 0:
|
||||||
return
|
return "0 bytes"
|
||||||
if env['verbose']:
|
size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
|
||||||
# Utter something
|
i = int(math.floor(math.log(size_bytes, 1024)))
|
||||||
screen.write('\rPurging %d %s from cache...\n' % (len(files), len(files) > 1 and 'files' or 'file'))
|
p = math.pow(1024, i)
|
||||||
[os.remove(f) for f in files]
|
s = round(size_bytes / p, 2)
|
||||||
|
return "%s %s" % (int(s) if i == 0 else s, size_name[i])
|
||||||
|
|
||||||
def file_list(self):
|
def get_size(self, start_path = '.'):
|
||||||
if self.path == None:
|
total_size = 0
|
||||||
# Nothing to do
|
for dirpath, dirnames, filenames in os.walk(start_path):
|
||||||
return []
|
for f in filenames:
|
||||||
# Gather a list of (filename, (size, atime)) within the
|
fp = os.path.join(dirpath, f)
|
||||||
# cache directory
|
total_size += os.path.getsize(fp)
|
||||||
file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, '*', '*'))]
|
return total_size
|
||||||
if file_stat == []:
|
|
||||||
# Nothing to do
|
|
||||||
return []
|
|
||||||
# Weight the cache files by size (assumed to be roughly
|
|
||||||
# proportional to the recompilation time) times an exponential
|
|
||||||
# decay since the ctime, and return a list with the entries
|
|
||||||
# (filename, size, weight).
|
|
||||||
current_time = time.time()
|
|
||||||
file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
|
|
||||||
# Sort by the most resently accessed files (most sensible to keep) first
|
|
||||||
file_stat.sort(key=lambda x: x[2])
|
|
||||||
# Search for the first entry where the storage limit is
|
|
||||||
# reached
|
|
||||||
sum, mark = 0, None
|
|
||||||
for i,x in enumerate(file_stat):
|
|
||||||
sum += x[1]
|
|
||||||
if sum > self.limit:
|
|
||||||
mark = i
|
|
||||||
break
|
|
||||||
if mark == None:
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
return [x[0] for x in file_stat[mark:]]
|
|
||||||
|
|
||||||
def convert_size(self, size_bytes):
|
def progress_finish(target, source, env):
|
||||||
if size_bytes == 0:
|
global node_count, progressor
|
||||||
return "0 bytes"
|
with open(node_count_fname, 'w') as f:
|
||||||
size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
|
f.write('%d\n' % node_count)
|
||||||
i = int(math.floor(math.log(size_bytes, 1024)))
|
progressor.delete(progressor.file_list())
|
||||||
p = math.pow(1024, i)
|
|
||||||
s = round(size_bytes / p, 2)
|
|
||||||
return "%s %s" % (int(s) if i == 0 else s, size_name[i])
|
|
||||||
|
|
||||||
def get_size(self, start_path = '.'):
|
try:
|
||||||
total_size = 0
|
with open(node_count_fname) as f:
|
||||||
for dirpath, dirnames, filenames in os.walk(start_path):
|
node_count_max = int(f.readline())
|
||||||
for f in filenames:
|
except:
|
||||||
fp = os.path.join(dirpath, f)
|
pass
|
||||||
total_size += os.path.getsize(fp)
|
|
||||||
return total_size
|
|
||||||
|
|
||||||
def progress_finish(target, source, env):
|
cache_directory = os.environ.get("SCONS_CACHE")
|
||||||
global node_count, progressor
|
# Simple cache pruning, attached to SCons' progress callback. Trim the
|
||||||
with open(node_count_fname, 'w') as f:
|
# cache directory to a size not larger than cache_limit.
|
||||||
f.write('%d\n' % node_count)
|
cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
|
||||||
progressor.delete(progressor.file_list())
|
progressor = cache_progress(cache_directory, cache_limit)
|
||||||
|
Progress(progressor, interval = node_count_interval)
|
||||||
|
|
||||||
try:
|
progress_finish_command = Command('progress_finish', [], progress_finish)
|
||||||
with open(node_count_fname) as f:
|
AlwaysBuild(progress_finish_command)
|
||||||
node_count_max = int(f.readline())
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
cache_directory = os.environ.get("SCONS_CACHE")
|
|
||||||
# Simple cache pruning, attached to SCons' progress callback. Trim the
|
|
||||||
# cache directory to a size not larger than cache_limit.
|
|
||||||
cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
|
|
||||||
progressor = cache_progress(cache_directory, cache_limit)
|
|
||||||
Progress(progressor, interval = node_count_interval)
|
|
||||||
|
|
||||||
progress_finish_command = Command('progress_finish', [], progress_finish)
|
|
||||||
AlwaysBuild(progress_finish_command)
|
|
||||||
|
|
Loading…
Reference in New Issue