Merge pull request #83031 from lawnjelly/fix_64bit_glgets

[3.x] Fix glGet overflows by using 64 bit versions
This commit is contained in:
Rémi Verschelde 2023-11-14 11:37:16 +01:00
commit 00b99bdab3
No known key found for this signature in database
GPG Key ID: C3336907360768E1
7 changed files with 52 additions and 10 deletions

View File

@ -134,6 +134,15 @@ void RasterizerStorageGLES2::GLWrapper::reset() {
texture_unit_table.blank();
}
int32_t RasterizerStorageGLES2::safe_gl_get_integer(unsigned int p_gl_param_name, int32_t p_max_accepted) {
// There is no glGetInteger64v in the base GLES2 spec as far as I can see.
// So we will just have a capped 32 bit version for GLES2.
int32_t temp;
glGetIntegerv(p_gl_param_name, &temp);
temp = MIN(temp, p_max_accepted);
return temp;
}
void RasterizerStorageGLES2::bind_quad_array() const {
glBindBuffer(GL_ARRAY_BUFFER, resources.quadie);
glVertexAttribPointer(VS::ARRAY_VERTEX, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 4, nullptr);
@ -6316,7 +6325,7 @@ void RasterizerStorageGLES2::initialize() {
config.depth_type = GL_UNSIGNED_INT;
// Initialize GLWrapper early on, as required for any calls to glActiveTexture.
glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &config.max_texture_image_units);
config.max_texture_image_units = safe_gl_get_integer(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, Config::max_desired_texture_image_units);
gl_wrapper.initialize(config.max_texture_image_units);
#ifdef GLES_OVER_GL

View File

@ -65,6 +65,7 @@ public:
int max_vertex_texture_image_units;
int max_texture_image_units;
static const int32_t max_desired_texture_image_units = 64;
int max_texture_size;
int max_cubemap_texture_size;
int max_viewport_dimensions[2];
@ -1390,6 +1391,8 @@ public:
virtual String get_video_adapter_name() const;
virtual String get_video_adapter_vendor() const;
static int32_t safe_gl_get_integer(unsigned int p_gl_param_name, int32_t p_max_accepted = INT32_MAX);
// NOTE : THESE SIZES ARE IN BYTES. BUFFER SIZES MAY NOT BE SPECIFIED IN BYTES SO REMEMBER TO CONVERT THEM WHEN CALLING.
void buffer_orphan_and_upload(unsigned int p_buffer_size_bytes, unsigned int p_offset_bytes, unsigned int p_data_size_bytes, const void *p_data, GLenum p_target = GL_ARRAY_BUFFER, GLenum p_usage = GL_DYNAMIC_DRAW, bool p_optional_orphan = false) const;
bool safe_buffer_sub_data(unsigned int p_total_buffer_size, GLenum p_target, unsigned int p_offset, unsigned int p_data_size, const void *p_data, unsigned int &r_offset_after) const;

View File

@ -558,7 +558,8 @@ void ShaderGLES2::setup(
}
}
glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &max_image_units);
// The upper limit must match the version used in storage.
max_image_units = RasterizerStorageGLES2::safe_gl_get_integer(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, RasterizerStorageGLES2::Config::max_desired_texture_image_units);
}
void ShaderGLES2::finish() {

View File

@ -5152,8 +5152,22 @@ void RasterizerSceneGLES3::initialize() {
{
//spot and omni ubos
int max_ubo_size;
glGetIntegerv(GL_MAX_UNIFORM_BLOCK_SIZE, &max_ubo_size);
// SPECIAL CASE for GL_MAX_UNIFORM_BLOCK_SIZE.
// Under ANGLE, in some situations this will return INT32_MAX + 1
// (or very high values).
// This seems to be because ANGLE supports system memory backing,
// but the true hardware GPU max may be lower.
// Afaik we have no way of querying this hardware supported value.
// We also ideally want to take advantage of GPUs that *do* support large uniform
// blocks in hardware (although this is probably not taken advantage of in Godot 3.x currently).
// This logic is thus a compromise.
int max_ubo_size = RasterizerStorageGLES3::safe_gl_get_integer(GL_MAX_UNIFORM_BLOCK_SIZE);
// Some maximum we are likely to currently need, currently 1 meg.
// Not really necessary but provides a small guard against excessive sizes.
max_ubo_size = MIN(max_ubo_size, 1024 * 1024);
const int ubo_light_size = 160;
state.ubo_light_size = ubo_light_size;
state.max_ubo_lights = MIN(render_list.max_lights, max_ubo_size / ubo_light_size);

View File

@ -152,6 +152,13 @@ void RasterizerStorageGLES3::GLWrapper::reset() {
texture_unit_table.blank();
}
int32_t RasterizerStorageGLES3::safe_gl_get_integer(unsigned int p_gl_param_name, int32_t p_max_accepted) {
int64_t temp;
glGetInteger64v(p_gl_param_name, &temp);
temp = MIN(temp, (int64_t)p_max_accepted);
return temp;
}
Ref<Image> RasterizerStorageGLES3::_get_gl_image_and_format(const Ref<Image> &p_image, Image::Format p_format, uint32_t p_flags, Image::Format &r_real_format, GLenum &r_gl_format, GLenum &r_gl_internal_format, GLenum &r_gl_type, bool &r_compressed, bool &r_srgb, bool p_force_decompress) const {
r_compressed = false;
r_gl_format = 0;
@ -8143,8 +8150,7 @@ void RasterizerStorageGLES3::initialize() {
///
{
int max_extensions = 0;
glGetIntegerv(GL_NUM_EXTENSIONS, &max_extensions);
int max_extensions = safe_gl_get_integer(GL_NUM_EXTENSIONS);
for (int i = 0; i < max_extensions; i++) {
const GLubyte *s = glGetStringi(GL_EXTENSIONS, i);
if (!s) {
@ -8157,8 +8163,12 @@ void RasterizerStorageGLES3::initialize() {
config.shrink_textures_x2 = false;
config.use_fast_texture_filter = int(ProjectSettings::get_singleton()->get("rendering/quality/filters/use_nearest_mipmap_filter"));
// Cap max_texture_image_units as we don't need large numbers of units,
// just in case an implementation provides a large number, as we want to keep
// the table in gl_wrapper small.
config.max_texture_image_units = safe_gl_get_integer(GL_MAX_TEXTURE_IMAGE_UNITS, Config::max_desired_texture_image_units);
// Initialize GLWrapper early on, as required for any calls to glActiveTexture.
glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &config.max_texture_image_units);
gl_wrapper.initialize(config.max_texture_image_units);
config.etc_supported = config.extensions.has("GL_OES_compressed_ETC1_RGB8_texture");
@ -8392,8 +8402,8 @@ void RasterizerStorageGLES3::initialize() {
glBindTexture(GL_TEXTURE_2D, 0);
}
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &config.max_texture_size);
glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &config.max_cubemap_texture_size);
config.max_texture_size = safe_gl_get_integer(GL_MAX_TEXTURE_SIZE);
config.max_cubemap_texture_size = safe_gl_get_integer(GL_MAX_CUBE_MAP_TEXTURE_SIZE);
config.use_rgba_2d_shadows = !config.framebuffer_float_supported;

View File

@ -97,6 +97,7 @@ public:
float anisotropic_level;
int max_texture_image_units;
static const int32_t max_desired_texture_image_units = 64;
int max_texture_size;
int max_cubemap_texture_size;
@ -1530,6 +1531,8 @@ public:
void initialize();
void finalize();
static int32_t safe_gl_get_integer(unsigned int p_gl_param_name, int32_t p_max_accepted = INT32_MAX);
virtual bool has_os_feature(const String &p_feature) const;
virtual void update_dirty_resources();

View File

@ -34,6 +34,7 @@
#include "core/os/os.h"
#include "core/print_string.h"
#include "core/threaded_callable_queue.h"
#include "drivers/gles3/rasterizer_storage_gles3.h"
#include "drivers/gles3/shader_cache_gles3.h"
#include "servers/visual_server.h"
@ -1216,7 +1217,8 @@ void ShaderGLES3::setup(const char **p_conditional_defines, int p_conditional_co
}
}
glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &max_image_units);
// The upper limit must match the version used in storage.
max_image_units = RasterizerStorageGLES3::safe_gl_get_integer(GL_MAX_TEXTURE_IMAGE_UNITS, RasterizerStorageGLES3::Config::max_desired_texture_image_units);
}
void ShaderGLES3::init_async_compilation() {