Fix AABB computation for position compression to not depend on vertex order

The previous computation was dependent on the vertex order in two ways:

- If the first vertex was on the AABB boundary, the AABB would be
  increased by the epsilon due to size clamping
- Every time the AABB would get expanded, we would recompute end from
  size and reconstruct size again, which resulted in slow floating point
  drift.

In isolation this may not seem like it matters, but it means that the
same mesh with a different vertex order may result in a slightly different
AABB. This can be a significant issue due to shadow meshes and their use in
depth prepass: shadow meshes reorder vertex data as part of the
deduplication process, as they append one unique position at a time and
as such remove the duplicate positions; this can result in a different
AABB which would result in a different reconstructed vertex position
during a depth pre-pass, causing mesh self-occlusion.
This commit is contained in:
Arseny Kapoulkine 2024-07-03 18:31:07 -07:00
parent cae2f853dc
commit 4e9e35b58a
1 changed files with 18 additions and 16 deletions

View File

@ -347,6 +347,22 @@ void _get_tbn_from_axis_angle(const Vector3 &p_axis, float p_angle, Vector3 &r_n
r_normal = tbn.rows[2]; r_normal = tbn.rows[2];
} }
AABB _compute_aabb_from_points(const Vector3 *p_data, int p_length) {
if (p_length == 0) {
return AABB();
}
Vector3 min = p_data[0];
Vector3 max = p_data[0];
for (int i = 1; i < p_length; ++i) {
min = min.min(p_data[i]);
max = max.max(p_data[i]);
}
return AABB(min, max - min);
}
Error RenderingServer::_surface_set_data(Array p_arrays, uint64_t p_format, uint32_t *p_offsets, uint32_t p_vertex_stride, uint32_t p_normal_stride, uint32_t p_attrib_stride, uint32_t p_skin_stride, Vector<uint8_t> &r_vertex_array, Vector<uint8_t> &r_attrib_array, Vector<uint8_t> &r_skin_array, int p_vertex_array_len, Vector<uint8_t> &r_index_array, int p_index_array_len, AABB &r_aabb, Vector<AABB> &r_bone_aabb, Vector4 &r_uv_scale) { Error RenderingServer::_surface_set_data(Array p_arrays, uint64_t p_format, uint32_t *p_offsets, uint32_t p_vertex_stride, uint32_t p_normal_stride, uint32_t p_attrib_stride, uint32_t p_skin_stride, Vector<uint8_t> &r_vertex_array, Vector<uint8_t> &r_attrib_array, Vector<uint8_t> &r_skin_array, int p_vertex_array_len, Vector<uint8_t> &r_index_array, int p_index_array_len, AABB &r_aabb, Vector<AABB> &r_bone_aabb, Vector4 &r_uv_scale) {
uint8_t *vw = r_vertex_array.ptrw(); uint8_t *vw = r_vertex_array.ptrw();
uint8_t *aw = r_attrib_array.ptrw(); uint8_t *aw = r_attrib_array.ptrw();
@ -440,18 +456,10 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint64_t p_format, uint
const Vector3 *src = array.ptr(); const Vector3 *src = array.ptr();
r_aabb = AABB(); r_aabb = _compute_aabb_from_points(src, p_vertex_array_len);
r_aabb.size = r_aabb.size.max(SMALL_VEC3);
if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) { if (p_format & ARRAY_FLAG_COMPRESS_ATTRIBUTES) {
// First we need to generate the AABB for the entire surface.
for (int i = 0; i < p_vertex_array_len; i++) {
if (i == 0) {
r_aabb = AABB(src[i], SMALL_VEC3);
} else {
r_aabb.expand_to(src[i]);
}
}
if (!(p_format & RS::ARRAY_FORMAT_NORMAL)) { if (!(p_format & RS::ARRAY_FORMAT_NORMAL)) {
// Early out if we are only setting vertex positions. // Early out if we are only setting vertex positions.
for (int i = 0; i < p_vertex_array_len; i++) { for (int i = 0; i < p_vertex_array_len; i++) {
@ -592,12 +600,6 @@ Error RenderingServer::_surface_set_data(Array p_arrays, uint64_t p_format, uint
float vector[3] = { (float)src[i].x, (float)src[i].y, (float)src[i].z }; float vector[3] = { (float)src[i].x, (float)src[i].y, (float)src[i].z };
memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, sizeof(float) * 3); memcpy(&vw[p_offsets[ai] + i * p_vertex_stride], vector, sizeof(float) * 3);
if (i == 0) {
r_aabb = AABB(src[i], SMALL_VEC3);
} else {
r_aabb.expand_to(src[i]);
}
} }
} }
} }