libktx: Update to 4.3.2

This commit is contained in:
Rémi Verschelde 2024-04-05 10:44:32 +02:00
parent f6a78f83aa
commit d402f5ecf2
No known key found for this signature in database
GPG Key ID: C3336907360768E1
14 changed files with 997 additions and 488 deletions

View File

@ -22,6 +22,7 @@ thirdparty_sources = [
"lib/texture1.c",
"lib/texture2.c",
"lib/vkformat_check.c",
"lib/vkformat_typesize.c",
"lib/dfdutils/createdfd.c",
"lib/dfdutils/colourspaces.c",
"lib/dfdutils/interpretdfd.c",

10
thirdparty/README.md vendored
View File

@ -432,18 +432,18 @@ Files extracted from upstream source:
## libktx
- Upstream: https://github.com/KhronosGroup/KTX-Software
- Version: 4.3.1 (c0214158d551cfc779624b0f84130bcbbefef59a, 2024)
- Version: 4.3.2 (91ace88675ac59a97e55d0378a6602a9ae6b98bd, 2024)
- License: Apache-2.0
Files extracted from upstream source:
- `LICENSE.md`
- `include/*`
- `include/`
- `lib/dfdutils/LICENSE.adoc` as `LICENSE.dfdutils.adoc` (in root)
- `lib/dfdutils/LICENSES/Apache-2.0.txt` as `Apache-2.0.txt` (in root)
- `lib/dfdutils/{KHR/*,dfd.h,colourspaces.c,createdfd.c,interpretdfd.c,printdfd.c,queries.c,dfd2vk.inl,vk2dfd.*}`
- `lib/{basis_sgd.h,formatsize.h,gl_format.h,ktxint.h,uthash.h,vk_format.h,vkformat_enum.h,checkheader.c,swap.c,hashlist.c,vkformat_check.c,basis_transcode.cpp,miniz_wrapper.cpp,filestream.*,memstream.*,texture*}`
- `other_include/KHR/*`
- `lib/dfdutils/{KHR/,dfd.h,colourspaces.c,createdfd.c,interpretdfd.c,printdfd.c,queries.c,dfd2vk.inl,vk2dfd.*}`
- `lib/{basis_sgd.h,formatsize.h,gl_format.h,ktxint.h,uthash.h,vk_format.h,vkformat_enum.h,checkheader.c,swap.c,hashlist.c,vkformat_check.c,vkformat_typesize.c,basis_transcode.cpp,miniz_wrapper.cpp,filestream.*,memstream.*,texture*}`
- `other_include/KHR/`
- `utils/unused.h`
Some Godot-specific changes are applied via patches included in the `patches` folder.

View File

@ -94,6 +94,10 @@ static uint32_t setChannelFlags(uint32_t channel, enum VkSuffix suffix)
channel |= KHR_DF_SAMPLE_DATATYPE_LINEAR;
}
break;
case s_S10_5:
channel |=
KHR_DF_SAMPLE_DATATYPE_SIGNED;
break;
}
return channel;
}
@ -109,7 +113,6 @@ static void writeSample(uint32_t *DFD, int sampleNo, int channel,
float f;
} lower, upper;
uint32_t *sample = DFD + 1 + KHR_DF_WORD_SAMPLESTART + sampleNo * KHR_DF_WORD_SAMPLEWORDS;
if (channel == 3) channel = KHR_DF_CHANNEL_RGBSDA_ALPHA;
if (channel == 3) channel = KHR_DF_CHANNEL_RGBSDA_ALPHA;
channel = setChannelFlags(channel, suffix);
@ -159,6 +162,10 @@ static void writeSample(uint32_t *DFD, int sampleNo, int channel,
upper.f = 1.0f;
lower.f = 0.0f;
break;
case s_S10_5:
assert(bits == 16 && "Format with this suffix must be 16 bits per channel.");
upper.i = 32;
lower.i = ~upper.i + 1; // -32
}
sample[KHR_DF_SAMPLEWORD_SAMPLELOWER] = lower.i;
sample[KHR_DF_SAMPLEWORD_SAMPLEUPPER] = upper.i;
@ -230,8 +237,9 @@ uint32_t *createDFDUnpacked(int bigEndian, int numChannels, int bytes,
* @param bits[] An array of length numChannels.
* Each entry is the number of bits composing the channel, in
* order starting at bit 0 of the packed type.
* @param paddings[] An array of length numChannels.
* Each entry is the number of padding bits after each channel.
* @param shiftBits[] An array of length numChannels.
* Each entry is the number of bits each channel is shifted
* and thus padded with insignificant bits.
* @param channels[] An array of length numChannels.
* Each entry enumerates the channel type: 0 = red, 1 = green,
* 2 = blue, 15 = alpha, in order starting at bit 0 of the
@ -243,8 +251,8 @@ uint32_t *createDFDUnpacked(int bigEndian, int numChannels, int bytes,
* @return A data format descriptor in malloc'd data. The caller is responsible
* for freeing the descriptor.
**/
uint32_t *createDFDPackedPadded(int bigEndian, int numChannels,
int bits[], int paddings[], int channels[],
uint32_t *createDFDPackedShifted(int bigEndian, int numChannels,
int bits[], int shiftBits[], int channels[],
enum VkSuffix suffix)
{
uint32_t *DFD = 0;
@ -291,17 +299,18 @@ uint32_t *createDFDPackedPadded(int bigEndian, int numChannels,
int sampleCounter;
for (channelCounter = 0; channelCounter < numChannels; ++channelCounter) {
beChannelStart[channelCounter] = totalBits;
totalBits += bits[channelCounter] + paddings[channelCounter];
totalBits += shiftBits[channelCounter] + bits[channelCounter];
}
BEMask = (totalBits - 1) & 0x18;
for (channelCounter = 0; channelCounter < numChannels; ++channelCounter) {
bitOffset += shiftBits[channelCounter];
bitChannel[bitOffset ^ BEMask] = channelCounter;
if (((bitOffset + bits[channelCounter] - 1) & ~7) != (bitOffset & ~7)) {
/* Continuation sample */
bitChannel[((bitOffset + bits[channelCounter] - 1) & ~7) ^ BEMask] = channelCounter;
numSamples++;
}
bitOffset += bits[channelCounter] + paddings[channelCounter];
bitOffset += bits[channelCounter];
}
DFD = writeHeader(numSamples, totalBits >> 3, suffix, i_COLOR);
@ -343,16 +352,17 @@ uint32_t *createDFDPackedPadded(int bigEndian, int numChannels,
int totalBits = 0;
int bitOffset = 0;
for (sampleCounter = 0; sampleCounter < numChannels; ++sampleCounter) {
totalBits += bits[sampleCounter] + paddings[sampleCounter];
totalBits += shiftBits[sampleCounter] + bits[sampleCounter];
}
/* One sample per channel */
DFD = writeHeader(numChannels, totalBits >> 3, suffix, i_COLOR);
for (sampleCounter = 0; sampleCounter < numChannels; ++sampleCounter) {
bitOffset += shiftBits[sampleCounter];
writeSample(DFD, sampleCounter, channels[sampleCounter],
bits[sampleCounter], bitOffset,
1, 1, suffix);
bitOffset += bits[sampleCounter] + paddings[sampleCounter];
bitOffset += bits[sampleCounter];
}
}
return DFD;
@ -383,12 +393,12 @@ uint32_t *createDFDPacked(int bigEndian, int numChannels,
int bits[], int channels[],
enum VkSuffix suffix) {
assert(numChannels <= 6);
int paddings[] = {0, 0, 0, 0, 0, 0};
return createDFDPackedPadded(bigEndian, numChannels, bits, paddings, channels, suffix);
int shiftBits[] = {0, 0, 0, 0, 0, 0};
return createDFDPackedShifted(bigEndian, numChannels, bits, shiftBits, channels, suffix);
}
uint32_t *createDFD422(int bigEndian, int numSamples,
int bits[], int paddings[], int channels[],
int bits[], int shiftBits[], int channels[],
int position_xs[], int position_ys[],
enum VkSuffix suffix) {
assert(!bigEndian); (void) bigEndian;
@ -396,7 +406,7 @@ uint32_t *createDFD422(int bigEndian, int numSamples,
int totalBits = 0;
for (int i = 0; i < numSamples; ++i)
totalBits += bits[i] + paddings[i];
totalBits += shiftBits[i] + bits[i];
assert(totalBits % 8 == 0);
uint32_t BDFDSize = sizeof(uint32_t) * (KHR_DF_WORD_SAMPLESTART + numSamples * KHR_DF_WORD_SAMPLEWORDS);
@ -428,6 +438,7 @@ uint32_t *createDFD422(int bigEndian, int numSamples,
int bitOffset = 0;
for (int i = 0; i < numSamples; ++i) {
bitOffset += shiftBits[i];
KHR_DFDSETSVAL(BDFD, i, BITOFFSET, bitOffset);
KHR_DFDSETSVAL(BDFD, i, BITLENGTH, bits[i] - 1);
KHR_DFDSETSVAL(BDFD, i, CHANNELID, channels[i]);
@ -438,7 +449,7 @@ uint32_t *createDFD422(int bigEndian, int numSamples,
KHR_DFDSETSVAL(BDFD, i, SAMPLEPOSITION3, 0);
KHR_DFDSETSVAL(BDFD, i, SAMPLELOWER, 0);
KHR_DFDSETSVAL(BDFD, i, SAMPLEUPPER, (1u << bits[i]) - 1u);
bitOffset += bits[i] + paddings[i];
bitOffset += bits[i];
}
return DFD;

View File

@ -35,7 +35,8 @@ enum VkSuffix {
s_SINT, /*!< Signed integer format. */
s_SFLOAT, /*!< Signed float format. */
s_UFLOAT, /*!< Unsigned float format. */
s_SRGB /*!< sRGB normalized format. */
s_SRGB, /*!< sRGB normalized format. */
s_S10_5 /*!< 2's complement fixed-point; 5 fractional bits. */
};
/** Compression scheme, in Vulkan terms. */
@ -68,15 +69,16 @@ typedef unsigned int uint32_t;
#endif
uint32_t* vk2dfd(enum VkFormat format);
enum VkFormat dfd2vk(uint32_t* dfd);
/* Create a Data Format Descriptor for an unpacked format. */
uint32_t *createDFDUnpacked(int bigEndian, int numChannels, int bytes,
int redBlueSwap, enum VkSuffix suffix);
/* Create a Data Format Descriptor for a packed padded format. */
uint32_t *createDFDPackedPadded(int bigEndian, int numChannels,
int bits[], int paddings[], int channels[],
enum VkSuffix suffix);
uint32_t *createDFDPackedShifted(int bigEndian, int numChannels,
int bits[], int shiftBits[],
int channels[], enum VkSuffix suffix);
/* Create a Data Format Descriptor for a packed format. */
uint32_t *createDFDPacked(int bigEndian, int numChannels,
@ -85,7 +87,7 @@ uint32_t *createDFDPacked(int bigEndian, int numChannels,
/* Create a Data Format Descriptor for a 4:2:2 format. */
uint32_t *createDFD422(int bigEndian, int numChannels,
int bits[], int paddings[], int channels[],
int bits[], int shiftBits[], int channels[],
int position_xs[], int position_ys[],
enum VkSuffix suffix);
@ -111,10 +113,11 @@ enum InterpretDFDResult {
i_SRGB_FORMAT_BIT = 1u << 2u, /*!< sRGB transfer function. */
i_NORMALIZED_FORMAT_BIT = 1u << 3u, /*!< Normalized (UNORM or SNORM). */
i_SIGNED_FORMAT_BIT = 1u << 4u, /*!< Format is signed. */
i_FLOAT_FORMAT_BIT = 1u << 5u, /*!< Format is floating point. */
i_COMPRESSED_FORMAT_BIT = 1u << 6u, /*!< Format is block compressed (422). */
i_YUVSDA_FORMAT_BIT = 1u << 7u, /*!< Color model is YUVSDA. */
i_UNSUPPORTED_ERROR_BIT = 1u << 8u, /*!< Format not successfully interpreted. */
i_FIXED_FORMAT_BIT = 1u << 5u, /*!< Format is a fixed-point representation. */
i_FLOAT_FORMAT_BIT = 1u << 6u, /*!< Format is floating point. */
i_COMPRESSED_FORMAT_BIT = 1u << 7u, /*!< Format is block compressed (422). */
i_YUVSDA_FORMAT_BIT = 1u << 8u, /*!< Color model is YUVSDA. */
i_UNSUPPORTED_ERROR_BIT = 1u << 9u, /*!< Format not successfully interpreted. */
/** "NONTRIVIAL_ENDIANNESS" means not big-endian, not little-endian
* (a channel has bits that are not consecutive in either order). **/
i_UNSUPPORTED_NONTRIVIAL_ENDIANNESS = i_UNSUPPORTED_ERROR_BIT,
@ -198,9 +201,12 @@ getDFDComponentInfoUnpacked(const uint32_t* DFD, uint32_t* numComponents,
/* Return the number of components described by a DFD. */
uint32_t getDFDNumComponents(const uint32_t* DFD);
/* Recreate and return the value of bytesPlane0 as it should be for the data
/* Reconstruct and return the value of bytesPlane0 as it should be for the data
* post-inflation from variable-rate compression.
*/
uint32_t
reconstructDFDBytesPlane0FromSamples(const uint32_t* DFD);
/* Deprecated. For backward compatibility. */
void
recreateBytesPlane0FromSampleInfo(const uint32_t* DFD, uint32_t* bytesPlane0);

View File

@ -4,12 +4,20 @@
/***************************** Do not edit. *****************************
Automatically generated by makedfd2vk.pl.
*************************************************************************/
if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA || KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_YUVSDA) {
enum InterpretDFDResult r;
InterpretedDFDChannel R = {0,0};
InterpretedDFDChannel G = {0,0};
InterpretedDFDChannel B = {0,0};
InterpretedDFDChannel A = {0,0};
/* interpretDFD channel overloadings for YUVSDA formats. These are
* different from the mapping used by Vulkan. */
#define Y1 R
#define Y2 A
#define CB G
#define U G
#define CR B
#define V B
uint32_t wordBytes;
/* Special case exponent format */
@ -44,7 +52,16 @@ if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
else if (wordBytes == 2) { /* PACK16 */
if (A.size == 4) {
if (R.offset == 12) return VK_FORMAT_R4G4B4A4_UNORM_PACK16;
else return VK_FORMAT_B4G4R4A4_UNORM_PACK16;
else if (B.offset == 12) return VK_FORMAT_B4G4R4A4_UNORM_PACK16;
else if (A.offset == 12) {
if (R.offset == 8) return VK_FORMAT_A4R4G4B4_UNORM_PACK16;
else return VK_FORMAT_A4B4G4R4_UNORM_PACK16;
}
} else if (G.size == 0 && B.size == 0 && A.size == 0) { /* One channel */
if (R.size == 10)
return VK_FORMAT_R10X6_UNORM_PACK16;
else if (R.size ==12)
return VK_FORMAT_R12X4_UNORM_PACK16;
} else if (A.size == 0) { /* Three channels */
if (B.offset == 0) return VK_FORMAT_R5G6B5_UNORM_PACK16;
else return VK_FORMAT_B5G6R5_UNORM_PACK16;
@ -54,7 +71,7 @@ if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
if (B.offset == 10) return VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR;
return VK_FORMAT_B5G5R5A1_UNORM_PACK16;
}
} else if (wordBytes == 4) { /* PACK32 */
} else if (wordBytes == 4) { /* PACK32 or 2PACK16 */
if (A.size == 8) {
if ((r & i_SRGB_FORMAT_BIT)) return VK_FORMAT_A8B8G8R8_SRGB_PACK32;
if ((r & i_NORMALIZED_FORMAT_BIT) && !(r & i_SIGNED_FORMAT_BIT)) return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
@ -71,11 +88,47 @@ if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
if ((r & i_NORMALIZED_FORMAT_BIT) && (r & i_SIGNED_FORMAT_BIT)) return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
if (!(r & i_NORMALIZED_FORMAT_BIT) && !(r & i_SIGNED_FORMAT_BIT)) return VK_FORMAT_A2B10G10R10_UINT_PACK32;
if (!(r & i_NORMALIZED_FORMAT_BIT) && (r & i_SIGNED_FORMAT_BIT)) return VK_FORMAT_A2B10G10R10_SINT_PACK32;
} else if (R.size == 11) return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
} else if (R.size == 11) {
return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
} else if (R.size == 10 && G.size == 10 && B.size == 0) {
return VK_FORMAT_R10X6G10X6_UNORM_2PACK16;
} else if (R.size == 12 && G.size == 12 && B.size == 0) {
return VK_FORMAT_R12X4G12X4_UNORM_2PACK16;
}
} else if (wordBytes == 8) { /* 4PACK16 */
if (r & i_YUVSDA_FORMAT_BIT) {
/* In Vulkan G = Y, R = Cr, B = Cb. */
if (Y1.size == 10 && Y1.offset == 6 && Y2.size == 10 && Y2.offset == 38)
return VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16;
if (Y1.size == 10 && Y1.offset == 22 && Y2.size == 10 && Y2.offset == 54)
return VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16;
if (Y1.size == 12 && Y1.offset == 4 && Y2.size == 12 && Y2.offset == 36)
return VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16;
if (Y1.size == 12 && Y1.offset == 20 && Y2.size == 12 && Y2.offset == 52)
return VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16;
} else {
if (R.size == 10)
return VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16;
else if (R.size == 12)
return VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16;
}
}
} else { /* Not a packed format */
if (r & i_YUVSDA_FORMAT_BIT) {
/* In Vulkan G = Y, R = Cr, B = Cb. */
if (Y1.size == 1 && Y1.offset == 0 && Y2.size == 1 && Y2.offset == 2)
return VK_FORMAT_G8B8G8R8_422_UNORM;
else if (Y1.size == 1 && Y1.offset == 1 && Y2.size == 1 && Y2.offset == 3)
return VK_FORMAT_B8G8R8G8_422_UNORM;
else if (Y1.size == 2 && Y1.offset == 0 && Y2.size == 2 && Y2.offset == 4)
return VK_FORMAT_G16B16G16R16_422_UNORM;
else if (Y1.size == 2 && Y1.offset == 2 && Y2.size == 2 && Y2.offset == 6)
return VK_FORMAT_B16G16R16G16_422_UNORM;
else
return VK_FORMAT_UNDEFINED; // Until support added.
} else { /* Not YUV */
if (wordBytes == 1) {
if (A.size > 8 && R.size == 0 && G.size == 0 && B.size == 0 && (r & i_NORMALIZED_FORMAT_BIT) && !(r & i_SIGNED_FORMAT_BIT)) {
if (A.size == 1 && R.size == 0 && G.size == 0 && B.size == 0 && (r & i_NORMALIZED_FORMAT_BIT) && !(r & i_SIGNED_FORMAT_BIT)) {
return VK_FORMAT_A8_UNORM_KHR;
}
if (A.size > 0) { /* 4 channels */
@ -120,6 +173,7 @@ if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
if (!(r & i_NORMALIZED_FORMAT_BIT) && (r & i_SIGNED_FORMAT_BIT)) return VK_FORMAT_R8_SINT;
}
} else if (wordBytes == 2) {
if ((r & i_FIXED_FORMAT_BIT) && R.size == 2 && G.size == 2) return VK_FORMAT_R16G16_S10_5_NV;
if (A.size > 0) { /* 4 channels */
if (R.offset == 0) { /* RGBA */
if ((r & i_FLOAT_FORMAT_BIT)) return VK_FORMAT_R16G16B16A16_SFLOAT;
@ -201,6 +255,7 @@ if (KHR_DFDVAL(dfd + 1, MODEL) == KHR_DF_MODEL_RGBSDA) {
}
}
}
}
} else if (KHR_DFDVAL((dfd + 1), MODEL) >= 128) {
const uint32_t *bdb = dfd + 1;
switch (KHR_DFDVAL(bdb, MODEL)) {

View File

@ -29,18 +29,37 @@ static uint32_t bit_ceil(uint32_t x) {
* @~English
* @brief Interpret a Data Format Descriptor for a simple format.
*
* @param DFD Pointer to a Data Format Descriptor to interpret,
described as 32-bit words in native endianness.
Note that this is the whole descriptor, not just
the basic descriptor block.
* @param R Information about the decoded red channel or the depth channel, if any.
* @param G Information about the decoded green channel or the stencil channel, if any.
* @param B Information about the decoded blue channel, if any.
* @param A Information about the decoded alpha channel, if any.
* @param wordBytes Byte size of the channels (unpacked) or total size (packed).
* Handles "simple" cases that can be translated to things a GPU can access.
* For simplicity, it ignores the compressed formats, which are generally a
* single sample (and I believe are all defined to be little-endian in their
* in-memory layout, even if some documentation confuses this). Focuses on
* the layout and ignores sRGB except for reporting if that is the transfer
* function by way of a bit in the returned value.
*
* @param[in] DFD Pointer to a Data Format Descriptor to interpret,
* described as 32-bit words in native endianness.
* Note that this is the whole descriptor, not just
* the basic descriptor block.
* @param R[in,out] Pointer to struct to receive information about the decoded
* red channel, the Y channel, if YUV, or the depth channel,
* if any.
* @param G[in,out] Pointer to struct to receive information about the decoded
* green channel, the U (Cb) channel, if YUV, or the stencil
* channel, if any.
* @param B[in,out] Pointer to struct to receive information about the decoded
* blue channel, if any or the V (Cr) channel, if YUV.
* @param A[in,out] Pointer to struct to receive information about the decoded
* alpha channel, if any or the second Y channel, if YUV and
* any.
* @param wordBytes[in,out] Pointer to a uint32_t to receive the byte size of
* the channels (unpacked) or total size (packed).
*
* @return An enumerant describing the decoded value,
* or an error code in case of failure.
*
* The mapping of YUV channels to the parameter names used here is based on
* the channel ids in @c khr_df.h and is different from the convention used
* in format names in the Vulkan specification where G == Y, R = Cr and B = Cb.
**/
enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
InterpretedDFDChannel *R,
@ -49,14 +68,6 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
InterpretedDFDChannel *A,
uint32_t *wordBytes)
{
/* We specifically handle "simple" cases that can be translated */
/* to things a GPU can access. For simplicity, we also ignore */
/* the compressed formats, which are generally a single sample */
/* (and I believe are all defined to be little-endian in their */
/* in-memory layout, even if some documentation confuses this). */
/* We also just worry about layout and ignore sRGB, since that's */
/* trivial to extract anyway. */
/* DFD points to the whole descriptor, not the basic descriptor block. */
/* Make everything else relative to the basic descriptor block. */
const uint32_t *BDFDB = DFD+1;
@ -78,7 +89,7 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
/* First rule out the multiple planes case (trivially) */
/* - that is, we check that only bytesPlane0 is non-zero. */
/* This means we don't handle YUV even if the API could. */
/* This means we don't handle multi-plane YUV, even if the API could. */
/* (We rely on KHR_DF_WORD_BYTESPLANE0..3 being the same and */
/* KHR_DF_WORD_BYTESPLANE4..7 being the same as a short cut.) */
if ((BDFDB[KHR_DF_WORD_BYTESPLANE0] & ~KHR_DF_MASK_BYTESPLANE0)
@ -104,6 +115,8 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
bool hasSigned = false;
bool hasFloat = false;
bool hasNormalized = false;
bool hasFixed = false;
khr_df_model_e model = KHR_DFDVAL(BDFDB, MODEL);
// Note: We're ignoring 9995, which is weird and worth special-casing
// rather than trying to generalise to all float formats.
@ -116,13 +129,23 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
// (i.e. set to the maximum bit value, and check min value) on
// the assumption that we're looking at a format which *came* from
// an API we can support.
const bool isNormalized = isFloat ?
*(float*) (void*) &BDFDB[KHR_DF_WORD_SAMPLESTART +
bool isFixed;
bool isNormalized;
if (isFloat) {
isNormalized = *(float*) (void*) &BDFDB[KHR_DF_WORD_SAMPLESTART +
KHR_DF_WORD_SAMPLEWORDS * i +
KHR_DF_SAMPLEWORD_SAMPLEUPPER] != 1.0f :
KHR_DFDSVAL(BDFDB, i, SAMPLEUPPER) != 1U;
KHR_DF_SAMPLEWORD_SAMPLEUPPER] != 1.0f;
isFixed = false;
} else {
uint32_t sampleUpper = KHR_DFDSVAL(BDFDB, i, SAMPLEUPPER);
uint32_t maxVal = 1U << KHR_DFDSVAL(BDFDB, i, BITLENGTH);
if (!isSigned) maxVal <<= 1;
maxVal--;
isFixed = 1U < sampleUpper && sampleUpper < maxVal;
isNormalized = !isFixed && sampleUpper != 1U;
}
hasSigned |= isSigned;
hasFixed |= isFixed;
hasFloat |= isFloat;
// By our definition the normalizedness of a single bit channel (like in RGBA 5:5:5:1)
// is ambiguous. Ignore these during normalized checks.
@ -132,9 +155,10 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
result |= hasSigned ? i_SIGNED_FORMAT_BIT : 0;
result |= hasFloat ? i_FLOAT_FORMAT_BIT : 0;
result |= hasNormalized ? i_NORMALIZED_FORMAT_BIT : 0;
result |= hasFixed ? i_FIXED_FORMAT_BIT : 0;
// Checks based on color model
if (KHR_DFDVAL(BDFDB, MODEL) == KHR_DF_MODEL_YUVSDA) {
if (model == KHR_DF_MODEL_YUVSDA) {
result |= i_NORMALIZED_FORMAT_BIT;
result |= i_COMPRESSED_FORMAT_BIT;
result |= i_YUVSDA_FORMAT_BIT;
@ -165,7 +189,7 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
*wordBytes = ((result & i_PACKED_FORMAT_BIT) ? 4 : 1) * bit_ceil(largestSampleSize) / 8;
} else if (KHR_DFDVAL(BDFDB, MODEL) == KHR_DF_MODEL_RGBSDA) {
/* We only pay attention to sRGB. */
/* Check if transfer is sRGB. */
if (KHR_DFDVAL(BDFDB, TRANSFER) == KHR_DF_TRANSFER_SRGB) result |= i_SRGB_FORMAT_BIT;
/* We only support samples at coordinate 0,0,0,0. */
@ -175,7 +199,11 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
if (KHR_DFDSVAL(BDFDB, sampleCounter, SAMPLEPOSITION_ALL))
return i_UNSUPPORTED_MULTIPLE_SAMPLE_LOCATIONS;
}
}
if (model == KHR_DF_MODEL_RGBSDA || model == KHR_DF_MODEL_YUVSDA) {
/* The values of the DEPTH and STENCIL tokens are the same for */
/* RGBSDA and YUVSDA. */
/* For Depth/Stencil formats mixed channels are allowed */
for (uint32_t sampleCounter = 0; sampleCounter < numSamples; ++sampleCounter) {
switch (KHR_DFDSVAL(BDFDB, sampleCounter, CHANNELID)) {
@ -206,6 +234,9 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
}
}
/* This all relies on the channel id values for RGB being equal to */
/* those for YUV. */
/* Remember: the canonical ordering of samples is to start with */
/* the lowest bit of the channel/location which touches bit 0 of */
/* the data, when the latter is concatenated in little-endian order, */
@ -288,9 +319,21 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
currentByteOffset = sampleByteOffset;
currentBitLength = sampleBitLength;
if (sampleChannelPtr->size) {
if (model == KHR_DF_MODEL_YUVSDA && sampleChannel == KHR_DF_CHANNEL_YUVSDA_Y) {
if (sampleChannelPtr == R) {
/* We've got another Y channel. Record details in A. */
if (A->size == 0) {
sampleChannelPtr = A;
} else {
/* Uh-oh, we've already got a second Y or an alpha channel. */
return i_UNSUPPORTED_CHANNEL_TYPES;
}
}
} else {
/* Uh-oh, we've seen this channel before. */
return i_UNSUPPORTED_NONTRIVIAL_ENDIANNESS;
}
}
/* For now, record the bit offset in little-endian terms, */
/* because we may not know to reverse it yet. */
sampleChannelPtr->offset = sampleBitOffset;
@ -378,9 +421,21 @@ enum InterpretDFDResult interpretDFD(const uint32_t *DFD,
currentByteOffset = sampleByteOffset;
currentByteLength = sampleByteLength;
if (sampleChannelPtr->size) {
if (model == KHR_DF_MODEL_YUVSDA && sampleChannel == KHR_DF_CHANNEL_YUVSDA_Y) {
if (sampleChannelPtr == R) {
/* We've got another Y channel. Record details in A. */
if (A->size == 0) {
sampleChannelPtr = A;
} else {
/* Uh-oh, we've already got a second Y or an alpha channel. */
return i_UNSUPPORTED_CHANNEL_TYPES;
}
}
} else {
/* Uh-oh, we've seen this channel before. */
return i_UNSUPPORTED_NONTRIVIAL_ENDIANNESS;
}
}
/* For now, record the byte offset in little-endian terms, */
/* because we may not know to reverse it yet. */
sampleChannelPtr->offset = sampleByteOffset;

View File

@ -41,15 +41,15 @@ getDFDComponentInfoUnpacked(const uint32_t* DFD, uint32_t* numComponents,
{
const uint32_t *BDFDB = DFD+1;
uint32_t numSamples = KHR_DFDSAMPLECOUNT(BDFDB);
uint32_t sampleCounter;
uint32_t sampleNumber;
uint32_t currentChannel = ~0U; /* Don't start matched. */
/* This is specifically for unpacked formats which means the size of */
/* each component is the same. */
*numComponents = 0;
for (sampleCounter = 0; sampleCounter < numSamples; ++sampleCounter) {
uint32_t sampleByteLength = (KHR_DFDSVAL(BDFDB, sampleCounter, BITLENGTH) + 1) >> 3U;
uint32_t sampleChannel = KHR_DFDSVAL(BDFDB, sampleCounter, CHANNELID);
for (sampleNumber = 0; sampleNumber < numSamples; ++sampleNumber) {
uint32_t sampleByteLength = (KHR_DFDSVAL(BDFDB, sampleNumber, BITLENGTH) + 1) >> 3U;
uint32_t sampleChannel = KHR_DFDSVAL(BDFDB, sampleNumber, CHANNELID);
if (sampleChannel == currentChannel) {
/* Continuation of the same channel. */
@ -85,10 +85,10 @@ uint32_t getDFDNumComponents(const uint32_t* DFD)
uint32_t currentChannel = ~0U; /* Don't start matched. */
uint32_t numComponents = 0;
uint32_t numSamples = KHR_DFDSAMPLECOUNT(BDFDB);
uint32_t sampleCounter;
uint32_t sampleNumber;
for (sampleCounter = 0; sampleCounter < numSamples; ++sampleCounter) {
uint32_t sampleChannel = KHR_DFDSVAL(BDFDB, sampleCounter, CHANNELID);
for (sampleNumber = 0; sampleNumber < numSamples; ++sampleNumber) {
uint32_t sampleChannel = KHR_DFDSVAL(BDFDB, sampleNumber, CHANNELID);
if (sampleChannel != currentChannel) {
numComponents++;
currentChannel = sampleChannel;
@ -97,14 +97,73 @@ uint32_t getDFDNumComponents(const uint32_t* DFD)
return numComponents;
}
/**
* @~English
* @brief Recreate the value of bytesPlane0 from sample info.
* @brief Reconstruct the value of bytesPlane0 from sample info.
*
* This can be use to recreate the value of bytesPlane0 for data that
* has been variable-rate compressed so has bytesPlane0 = 0. For DFDs
* that are valid for KTX files. Little-endian data only and no multi-plane
* formats.
* Reconstruct the value for data that has been variable-rate compressed so
* has bytesPlane0 = 0. For DFDs that are valid for KTX files. Little-endian
* data only and no multi-plane formats.
*
* @param DFD Pointer to a Data Format Descriptor for which,
* described as 32-bit words in native endianness.
* Note that this is the whole descriptor, not just
* the basic descriptor block.
*/
uint32_t
reconstructDFDBytesPlane0FromSamples(const uint32_t* DFD)
{
const uint32_t *BDFDB = DFD+1;
uint32_t numSamples = KHR_DFDSAMPLECOUNT(BDFDB);
uint32_t sampleNumber;
uint32_t bitsPlane0 = 0;
int32_t largestOffset = 0;
uint32_t sampleNumberWithLargestOffset = 0;
// Special case these depth{,-stencil} formats. The unused bits are
// in the MSBs so have no visibility in the DFD therefore the max offset
// algorithm below returns a value that is too small.
if (KHR_DFDSVAL(BDFDB, 0, CHANNELID) == KHR_DF_CHANNEL_COMMON_DEPTH) {
if (numSamples == 1) {
if (KHR_DFDSVAL(BDFDB, 0, BITLENGTH) + 1 == 24) {
// X8_D24_UNORM_PACK32,
return 4;
}
} else if (numSamples == 2) {
if (KHR_DFDSVAL(BDFDB, 0, BITLENGTH) + 1 == 16) {
// D16_UNORM_S8_UINT
return 4;
}
if (KHR_DFDSVAL(BDFDB, 0, BITLENGTH) + 1 == 32
&& KHR_DFDSVAL(BDFDB, 1, CHANNELID) == KHR_DF_CHANNEL_COMMON_STENCIL) {
// D32_SFLOAT_S8_UINT
return 8;
}
}
}
for (sampleNumber = 0; sampleNumber < numSamples; ++sampleNumber) {
int32_t sampleBitOffset = KHR_DFDSVAL(BDFDB, sampleNumber, BITOFFSET);
if (sampleBitOffset > largestOffset) {
largestOffset = sampleBitOffset;
sampleNumberWithLargestOffset = sampleNumber;
}
}
/* The sample bitLength field stores the bit length - 1. */
uint32_t sampleBitLength = KHR_DFDSVAL(BDFDB, sampleNumberWithLargestOffset, BITLENGTH) + 1;
bitsPlane0 = largestOffset + sampleBitLength;
return bitsPlane0 >> 3U;
}
/**
* @~English
* @brief Reconstruct the value of bytesPlane0 from sample info.
*
* @see reconstructDFDBytesPlane0FromSamples for details.
* @deprecated For backward comparibility only. Use
* reconstructDFDBytesPlane0FromSamples.
*
* @param DFD Pointer to a Data Format Descriptor for which,
* described as 32-bit words in native endianness.
@ -116,31 +175,5 @@ uint32_t getDFDNumComponents(const uint32_t* DFD)
void
recreateBytesPlane0FromSampleInfo(const uint32_t* DFD, uint32_t* bytesPlane0)
{
const uint32_t *BDFDB = DFD+1;
uint32_t numSamples = KHR_DFDSAMPLECOUNT(BDFDB);
uint32_t sampleCounter;
uint32_t bitsPlane0 = 0;
uint32_t* bitOffsets = malloc(sizeof(uint32_t) * numSamples);
memset(bitOffsets, -1, sizeof(uint32_t) * numSamples);
for (sampleCounter = 0; sampleCounter < numSamples; ++sampleCounter) {
uint32_t sampleBitOffset = KHR_DFDSVAL(BDFDB, sampleCounter, BITOFFSET);
/* The sample bitLength field stores the bit length - 1. */
uint32_t sampleBitLength = KHR_DFDSVAL(BDFDB, sampleCounter, BITLENGTH) + 1;
uint32_t i;
for (i = 0; i < numSamples; i++) {
if (sampleBitOffset == bitOffsets[i]) {
// This sample is being repeated as in e.g. RGB9E5.
break;
*bytesPlane0 = reconstructDFDBytesPlane0FromSamples(DFD);
}
}
if (i == numSamples) {
// Previously unseen bitOffset. Bump size.
bitsPlane0 += sampleBitLength;
bitOffsets[sampleCounter] = sampleBitOffset;
}
}
free(bitOffsets);
*bytesPlane0 = bitsPlane0 >> 3U;
}

View File

@ -277,68 +277,68 @@ case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return createDFDCompressed(c_ASTC, 12, 10,
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return createDFDCompressed(c_ASTC, 12, 12, 1, s_UNORM);
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return createDFDCompressed(c_ASTC, 12, 12, 1, s_SRGB);
case VK_FORMAT_G8B8G8R8_422_UNORM: {
int channels[] = {0, 1, 0, 2}; int bits[] = {8, 8, 8, 8}; int paddings[] = {0, 0, 0, 0};
int channels[] = {0, 1, 0, 2}; int bits[] = {8, 8, 8, 8}; int shiftBits[] = {0, 0, 0, 0};
int position_xs[] = {64, 64, 192, 64}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_B8G8R8G8_422_UNORM: {
int channels[] = {1, 0, 2, 0}; int bits[] = {8, 8, 8, 8}; int paddings[] = {0, 0, 0, 0};
int channels[] = {1, 0, 2, 0}; int bits[] = {8, 8, 8, 8}; int shiftBits[] = {0, 0, 0, 0};
int position_xs[] = {64, 64, 64, 192}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_R10X6_UNORM_PACK16: {
int channels[] = {0}; int bits[] = {10}; int paddings[] = {6};
return createDFDPackedPadded(0, 1, bits, paddings, channels, s_UNORM);
int channels[] = {0}; int bits[] = {10}; int shiftBits[] = {6};
return createDFDPackedShifted(0, 1, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_R10X6G10X6_UNORM_2PACK16: {
int channels[] = {0, 1}; int bits[] = {10, 10}; int paddings[] = {6, 6};
return createDFDPackedPadded(0, 2, bits, paddings, channels, s_UNORM);
int channels[] = {0, 1}; int bits[] = {10, 10}; int shiftBits[] = {6, 6};
return createDFDPackedShifted(0, 2, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16: {
int channels[] = {0, 1, 2, 3}; int bits[] = {10, 10, 10, 10}; int paddings[] = {6, 6, 6, 6};
return createDFDPackedPadded(0, 4, bits, paddings, channels, s_UNORM);
int channels[] = {0, 1, 2, 3}; int bits[] = {10, 10, 10, 10}; int shiftBits[] = {6, 6, 6, 6};
return createDFDPackedShifted(0, 4, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: {
int channels[] = {0, 1, 0, 2}; int bits[] = {10, 10, 10, 10}; int paddings[] = {6, 6, 6, 6};
int channels[] = {0, 1, 0, 2}; int bits[] = {10, 10, 10, 10}; int shiftBits[] = {6, 6, 6, 6};
int position_xs[] = {64, 64, 192, 64}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: {
int channels[] = {1, 0, 2, 0}; int bits[] = {10, 10, 10, 10}; int paddings[] = {6, 6, 6, 6};
int channels[] = {1, 0, 2, 0}; int bits[] = {10, 10, 10, 10}; int shiftBits[] = {6, 6, 6, 6};
int position_xs[] = {64, 64, 64, 192}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_R12X4_UNORM_PACK16: {
int channels[] = {0}; int bits[] = {12}; int paddings[] = {4};
return createDFDPackedPadded(0, 1, bits, paddings, channels, s_UNORM);
int channels[] = {0}; int bits[] = {12}; int shiftBits[] = {4};
return createDFDPackedShifted(0, 1, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_R12X4G12X4_UNORM_2PACK16: {
int channels[] = {0, 1}; int bits[] = {12, 12}; int paddings[] = {4, 4};
return createDFDPackedPadded(0, 2, bits, paddings, channels, s_UNORM);
int channels[] = {0, 1}; int bits[] = {12, 12}; int shiftBits[] = {4, 4};
return createDFDPackedShifted(0, 2, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16: {
int channels[] = {0, 1, 2, 3}; int bits[] = {12, 12, 12, 12}; int paddings[] = {4, 4, 4, 4};
return createDFDPackedPadded(0, 4, bits, paddings, channels, s_UNORM);
int channels[] = {0, 1, 2, 3}; int bits[] = {12, 12, 12, 12}; int shiftBits[] = {4, 4, 4, 4};
return createDFDPackedShifted(0, 4, bits, shiftBits, channels, s_UNORM);
}
case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: {
int channels[] = {0, 1, 0, 2}; int bits[] = {12, 12, 12, 12}; int paddings[] = {4, 4, 4, 4};
int channels[] = {0, 1, 0, 2}; int bits[] = {12, 12, 12, 12}; int shiftBits[] = {4, 4, 4, 4};
int position_xs[] = {64, 64, 192, 64}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: {
int channels[] = {1, 0, 2, 0}; int bits[] = {12, 12, 12, 12}; int paddings[] = {4, 4, 4, 4};
int channels[] = {1, 0, 2, 0}; int bits[] = {12, 12, 12, 12}; int shiftBits[] = {4, 4, 4, 4};
int position_xs[] = {64, 64, 64, 192}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_G16B16G16R16_422_UNORM: {
int channels[] = {0, 1, 0, 2}; int bits[] = {16, 16, 16, 16}; int paddings[] = {0, 0, 0, 0};
int channels[] = {0, 1, 0, 2}; int bits[] = {16, 16, 16, 16}; int shiftBits[] = {0, 0, 0, 0};
int position_xs[] = {64, 64, 192, 64}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_B16G16R16G16_422_UNORM: {
int channels[] = {1, 0, 2, 0}; int bits[] = {16, 16, 16, 16}; int paddings[] = {0, 0, 0, 0};
int channels[] = {1, 0, 2, 0}; int bits[] = {16, 16, 16, 16}; int shiftBits[] = {0, 0, 0, 0};
int position_xs[] = {64, 64, 64, 192}; int position_ys[] = {128, 128, 128, 128};
return createDFD422(0, 4, bits, paddings, channels, position_xs, position_ys, s_UNORM);
return createDFD422(0, 4, bits, shiftBits, channels, position_xs, position_ys, s_UNORM);
}
case VK_FORMAT_A4R4G4B4_UNORM_PACK16: {
int channels[] = {2,1,0,3}; int bits[] = {4,4,4,4};
@ -402,6 +402,7 @@ case VK_FORMAT_ASTC_6x6x6_UNORM_BLOCK_EXT: return createDFDCompressed(c_ASTC, 6,
case VK_FORMAT_ASTC_6x6x6_SRGB_BLOCK_EXT: return createDFDCompressed(c_ASTC, 6, 6, 6, s_SRGB);
case VK_FORMAT_ASTC_6x6x6_SFLOAT_BLOCK_EXT: return createDFDCompressed(c_ASTC, 6, 6, 6, s_SFLOAT);
#endif
case VK_FORMAT_R16G16_S10_5_NV: return createDFDUnpacked(0, 2, 2, 0, s_S10_5);
case VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR: {
int channels[] = {0,1,2,3}; int bits[] = {5,5,5,1};
return createDFDPacked(0, 4, bits, channels, s_UNORM);

View File

@ -1,4 +1,4 @@
/*
/*
================================================================================================
Description : OpenGL formats/types and properties.
@ -70,7 +70,6 @@ static inline GLenum glGetFormatFromInternalFormat( const GLenum internalFormat
static inline GLenum glGetTypeFromInternalFormat( const GLenum internalFormat );
static inline void glGetFormatSize( const GLenum internalFormat, GlFormatSize * pFormatSize );
static inline unsigned int glGetTypeSizeFromType( const GLenum type );
static inline GLenum glGetInternalFormatFromVkFormat ( VkFormat format );
MODIFICATIONS for use in libktx
===============================
@ -79,7 +78,6 @@ MODIFICATIONS for use in libktx
2019.3.09 #if 0 around GL type declarations.
2019.5.30 Use common ktxFormatSize to return results.
2019.5.30 Return blockSizeInBits 0 for default case of glGetFormatSize.
2019.5.30 Added glGetInternalFormatFromVkFormat.
================================================================================================
*/
@ -101,6 +99,7 @@ MODIFICATIONS for use in libktx
#endif // __cplusplus
#endif
/*
===========================================================================
Avoid warnings or even errors when using strict C99. "Redefinition of
@ -2436,219 +2435,4 @@ static inline void glGetFormatSize( const GLenum internalFormat, ktxFormatSize *
}
}
static inline GLint glGetInternalFormatFromVkFormat( VkFormat vkFormat )
{
switch ( vkFormat )
{
//
// 8 bits per component
//
case VK_FORMAT_R8_UNORM: return GL_R8; // 1-component, 8-bit unsigned normalized
case VK_FORMAT_R8G8_UNORM: return GL_RG8; // 2-component, 8-bit unsigned normalized
case VK_FORMAT_R8G8B8_UNORM: return GL_RGB8; // 3-component, 8-bit unsigned normalized
case VK_FORMAT_R8G8B8A8_UNORM: return GL_RGBA8; // 4-component, 8-bit unsigned normalized
case VK_FORMAT_R8_SNORM: return GL_R8_SNORM; // 1-component, 8-bit signed normalized
case VK_FORMAT_R8G8_SNORM: return GL_RG8_SNORM; // 2-component, 8-bit signed normalized
case VK_FORMAT_R8G8B8_SNORM: return GL_RGB8_SNORM; // 3-component, 8-bit signed normalized
case VK_FORMAT_R8G8B8A8_SNORM: return GL_RGBA8_SNORM; // 4-component, 8-bit signed normalized
case VK_FORMAT_R8_UINT: return GL_R8UI; // 1-component, 8-bit unsigned integer
case VK_FORMAT_R8G8_UINT: return GL_RG8UI; // 2-component, 8-bit unsigned integer
case VK_FORMAT_R8G8B8_UINT: return GL_RGB8UI; // 3-component, 8-bit unsigned integer
case VK_FORMAT_R8G8B8A8_UINT: return GL_RGBA8UI; // 4-component, 8-bit unsigned integer
case VK_FORMAT_R8_SINT: return GL_R8I; // 1-component, 8-bit signed integer
case VK_FORMAT_R8G8_SINT: return GL_RG8I; // 2-component, 8-bit signed integer
case VK_FORMAT_R8G8B8_SINT: return GL_RGB8I; // 3-component, 8-bit signed integer
case VK_FORMAT_R8G8B8A8_SINT: return GL_RGBA8I; // 4-component, 8-bit signed integer
case VK_FORMAT_R8_SRGB: return GL_SR8; // 1-component, 8-bit sRGB
case VK_FORMAT_R8G8_SRGB: return GL_SRG8; // 2-component, 8-bit sRGB
case VK_FORMAT_R8G8B8_SRGB: return GL_SRGB8; // 3-component, 8-bit sRGB
case VK_FORMAT_R8G8B8A8_SRGB: return GL_SRGB8_ALPHA8; // 4-component, 8-bit sRGB
//
// 16 bits per component
//
case VK_FORMAT_R16_UNORM: return GL_R16; // 1-component, 16-bit unsigned normalized
case VK_FORMAT_R16G16_UNORM: return GL_RG16; // 2-component, 16-bit unsigned normalized
case VK_FORMAT_R16G16B16_UNORM: return GL_RGB16; // 3-component, 16-bit unsigned normalized
case VK_FORMAT_R16G16B16A16_UNORM: return GL_RGBA16; // 4-component, 16-bit unsigned normalized
case VK_FORMAT_R16_SNORM: return GL_R16_SNORM; // 1-component, 16-bit signed normalized
case VK_FORMAT_R16G16_SNORM: return GL_RG16_SNORM; // 2-component, 16-bit signed normalized
case VK_FORMAT_R16G16B16_SNORM: return GL_RGB16_SNORM; // 3-component, 16-bit signed normalized
case VK_FORMAT_R16G16B16A16_SNORM: return GL_RGBA16_SNORM; // 4-component, 16-bit signed normalized
case VK_FORMAT_R16_UINT: return GL_R16UI; // 1-component, 16-bit unsigned integer
case VK_FORMAT_R16G16_UINT: return GL_RG16UI; // 2-component, 16-bit unsigned integer
case VK_FORMAT_R16G16B16_UINT: return GL_RGB16UI; // 3-component, 16-bit unsigned integer
case VK_FORMAT_R16G16B16A16_UINT: return GL_RGBA16UI; // 4-component, 16-bit unsigned integer
case VK_FORMAT_R16_SINT: return GL_R16I; // 1-component, 16-bit signed integer
case VK_FORMAT_R16G16_SINT: return GL_RG16I; // 2-component, 16-bit signed integer
case VK_FORMAT_R16G16B16_SINT: return GL_RGB16I; // 3-component, 16-bit signed integer
case VK_FORMAT_R16G16B16A16_SINT: return GL_RGBA16I; // 4-component, 16-bit signed integer
case VK_FORMAT_R16_SFLOAT: return GL_R16F; // 1-component, 16-bit floating-point
case VK_FORMAT_R16G16_SFLOAT: return GL_RG16F; // 2-component, 16-bit floating-point
case VK_FORMAT_R16G16B16_SFLOAT: return GL_RGB16F; // 3-component, 16-bit floating-point
case VK_FORMAT_R16G16B16A16_SFLOAT: return GL_RGBA16F; // 4-component, 16-bit floating-point
//
// 32 bits per component
//
case VK_FORMAT_R32_UINT: return GL_R32UI; // 1-component, 32-bit unsigned integer
case VK_FORMAT_R32G32_UINT: return GL_RG32UI; // 2-component, 32-bit unsigned integer
case VK_FORMAT_R32G32B32_UINT: return GL_RGB32UI; // 3-component, 32-bit unsigned integer
case VK_FORMAT_R32G32B32A32_UINT: return GL_RGBA32UI; // 4-component, 32-bit unsigned integer
case VK_FORMAT_R32_SINT: return GL_R32I; // 1-component, 32-bit signed integer
case VK_FORMAT_R32G32_SINT: return GL_RG32I; // 2-component, 32-bit signed integer
case VK_FORMAT_R32G32B32_SINT: return GL_RGB32I; // 3-component, 32-bit signed integer
case VK_FORMAT_R32G32B32A32_SINT: return GL_RGBA32I; // 4-component, 32-bit signed integer
case VK_FORMAT_R32_SFLOAT: return GL_R32F; // 1-component, 32-bit floating-point
case VK_FORMAT_R32G32_SFLOAT: return GL_RG32F; // 2-component, 32-bit floating-point
case VK_FORMAT_R32G32B32_SFLOAT: return GL_RGB32F; // 3-component, 32-bit floating-point
case VK_FORMAT_R32G32B32A32_SFLOAT: return GL_RGBA32F; // 4-component, 32-bit floating-point
//
// Packed
//
case VK_FORMAT_R5G5B5A1_UNORM_PACK16: return GL_RGB5; // 3-component 5:5:5, unsigned normalized
case VK_FORMAT_R5G6B5_UNORM_PACK16: return GL_RGB565; // 3-component 5:6:5, unsigned normalized
case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return GL_RGBA4; // 4-component 4:4:4:4, unsigned normalized
case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return GL_RGB5_A1; // 4-component 5:5:5:1, unsigned normalized
case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return GL_RGB10_A2; // 4-component 10:10:10:2, unsigned normalized
case VK_FORMAT_A2R10G10B10_UINT_PACK32: return GL_RGB10_A2UI; // 4-component 10:10:10:2, unsigned integer
case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return GL_R11F_G11F_B10F; // 3-component 11:11:10, floating-point
case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return GL_RGB9_E5; // 3-component/exp 9:9:9/5, floating-point
//
// S3TC/DXT/BC
//
case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; // line through 3D space, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; // line through 3D space plus 1-bit alpha, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC2_UNORM_BLOCK: return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; // line through 3D space plus line through 1D space, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC3_UNORM_BLOCK: return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; // line through 3D space plus 4-bit alpha, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return GL_COMPRESSED_SRGB_S3TC_DXT1_EXT; // line through 3D space, 4x4 blocks, sRGB
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT; // line through 3D space plus 1-bit alpha, 4x4 blocks, sRGB
case VK_FORMAT_BC2_SRGB_BLOCK: return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT; // line through 3D space plus line through 1D space, 4x4 blocks, sRGB
case VK_FORMAT_BC3_SRGB_BLOCK: return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT; // line through 3D space plus 4-bit alpha, 4x4 blocks, sRGB
case VK_FORMAT_BC4_UNORM_BLOCK: return GL_COMPRESSED_RED_RGTC1; // line through 1D space, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC5_UNORM_BLOCK: return GL_COMPRESSED_RG_RGTC2; // two lines through 1D space, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC4_SNORM_BLOCK: return GL_COMPRESSED_SIGNED_RED_RGTC1; // line through 1D space, 4x4 blocks, signed normalized
case VK_FORMAT_BC5_SNORM_BLOCK: return GL_COMPRESSED_SIGNED_RG_RGTC2; // two lines through 1D space, 4x4 blocks, signed normalized
case VK_FORMAT_BC6H_UFLOAT_BLOCK: return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT; // 3-component, 4x4 blocks, unsigned floating-point
case VK_FORMAT_BC6H_SFLOAT_BLOCK: return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT; // 3-component, 4x4 blocks, signed floating-point
case VK_FORMAT_BC7_UNORM_BLOCK: return GL_COMPRESSED_RGBA_BPTC_UNORM; // 4-component, 4x4 blocks, unsigned normalized
case VK_FORMAT_BC7_SRGB_BLOCK: return GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM; // 4-component, 4x4 blocks, sRGB
//
// ETC
//
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return GL_COMPRESSED_RGB8_ETC2; // 3-component ETC2, 4x4 blocks, unsigned normalized
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; // 4-component ETC2 with 1-bit alpha, 4x4 blocks, unsigned normalized
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return GL_COMPRESSED_RGBA8_ETC2_EAC; // 4-component ETC2, 4x4 blocks, unsigned normalized
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ETC2; // 3-component ETC2, 4x4 blocks, sRGB
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2; // 4-component ETC2 with 1-bit alpha, 4x4 blocks, sRGB
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC; // 4-component ETC2, 4x4 blocks, sRGB
case VK_FORMAT_EAC_R11_UNORM_BLOCK: return GL_COMPRESSED_R11_EAC; // 1-component ETC, 4x4 blocks, unsigned normalized
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: return GL_COMPRESSED_RG11_EAC; // 2-component ETC, 4x4 blocks, unsigned normalized
case VK_FORMAT_EAC_R11_SNORM_BLOCK: return GL_COMPRESSED_SIGNED_R11_EAC; // 1-component ETC, 4x4 blocks, signed normalized
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: return GL_COMPRESSED_SIGNED_RG11_EAC; // 2-component ETC, 4x4 blocks, signed normalized
//
// PVRTC
//
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; // 3- or 4-component PVRTC, 16x8 blocks, unsigned normalized
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG: return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; // 3- or 4-component PVRTC, 8x8 blocks, unsigned normalized
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG: return GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG; // 3- or 4-component PVRTC, 16x8 blocks, unsigned normalized
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG: return GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG; // 3- or 4-component PVRTC, 4x4 blocks, unsigned normalized
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: return GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT; // 4-component PVRTC, 16x8 blocks, sRGB
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG: return GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT; // 4-component PVRTC, 8x8 blocks, sRGB
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG; // 4-component PVRTC, 8x4 blocks, sRGB
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG; // 4-component PVRTC, 4x4 blocks, sRGB
//
// ASTC
//
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_4x4_KHR; // 4-component ASTC, 4x4 blocks, unsigned normalized
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_5x4_KHR; // 4-component ASTC, 5x4 blocks, unsigned normalized
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_5x5_KHR; // 4-component ASTC, 5x5 blocks, unsigned normalized
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_6x5_KHR; // 4-component ASTC, 6x5 blocks, unsigned normalized
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_6x6_KHR; // 4-component ASTC, 6x6 blocks, unsigned normalized
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_8x5_KHR; // 4-component ASTC, 8x5 blocks, unsigned normalized
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_8x6_KHR; // 4-component ASTC, 8x6 blocks, unsigned normalized
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_8x8_KHR; // 4-component ASTC, 8x8 blocks, unsigned normalized
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_10x5_KHR; // 4-component ASTC, 10x5 blocks, unsigned normalized
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_10x6_KHR; // 4-component ASTC, 10x6 blocks, unsigned normalized
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_10x8_KHR; // 4-component ASTC, 10x8 blocks, unsigned normalized
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_10x10_KHR; // 4-component ASTC, 10x10 blocks, unsigned normalized
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_12x10_KHR; // 4-component ASTC, 12x10 blocks, unsigned normalized
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return GL_COMPRESSED_RGBA_ASTC_12x12_KHR; // 4-component ASTC, 12x12 blocks, unsigned normalized
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR; // 4-component ASTC, 4x4 blocks, sRGB
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR; // 4-component ASTC, 5x4 blocks, sRGB
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR; // 4-component ASTC, 5x5 blocks, sRGB
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR; // 4-component ASTC, 6x5 blocks, sRGB
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR; // 4-component ASTC, 6x6 blocks, sRGB
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR; // 4-component ASTC, 8x5 blocks, sRGB
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR; // 4-component ASTC, 8x6 blocks, sRGB
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR; // 4-component ASTC, 8x8 blocks, sRGB
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR; // 4-component ASTC, 10x5 blocks, sRGB
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR; // 4-component ASTC, 10x6 blocks, sRGB
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR; // 4-component ASTC, 10x8 blocks, sRGB
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR; // 4-component ASTC, 10x10 blocks, sRGB
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR; // 4-component ASTC, 12x10 blocks, sRGB
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR; // 4-component ASTC, 12x12 blocks, sRGB
// XXX FIXME Update once Vulkan ASTC HDR & 3D extensions are released.
#if 0
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_3x3x3_OES; // 4-component ASTC, 3x3x3 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_4x3x3_OES; // 4-component ASTC, 4x3x3 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_4x4x3_OES; // 4-component ASTC, 4x4x3 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_4x4x4_OES; // 4-component ASTC, 4x4x4 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_5x4x4_OES; // 4-component ASTC, 5x4x4 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_5x5x4_OES; // 4-component ASTC, 5x5x4 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_5x5x5_OES; // 4-component ASTC, 5x5x5 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_6x5x5_OES; // 4-component ASTC, 6x5x5 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_6x6x5_OES; // 4-component ASTC, 6x6x5 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_RGBA_ASTC_6x6x6_OES; // 4-component ASTC, 6x6x6 blocks, unsigned normalized
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES; // 4-component ASTC, 3x3x3 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES; // 4-component ASTC, 4x3x3 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES; // 4-component ASTC, 4x4x3 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES; // 4-component ASTC, 4x4x4 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES; // 4-component ASTC, 5x4x4 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES; // 4-component ASTC, 5x5x4 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES; // 4-component ASTC, 5x5x5 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES; // 4-component ASTC, 6x5x5 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES; // 4-component ASTC, 6x6x5 blocks, sRGB
case VK_FORMAT_UNDEFINED: return GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES; // 4-component ASTC, 6x6x6 blocks, sRGB
#endif
//
// Depth/stencil
//
case VK_FORMAT_D16_UNORM: return GL_DEPTH_COMPONENT16;
case VK_FORMAT_X8_D24_UNORM_PACK32: return GL_DEPTH_COMPONENT24;
case VK_FORMAT_D32_SFLOAT: return GL_DEPTH_COMPONENT32F;
case VK_FORMAT_S8_UINT: return GL_STENCIL_INDEX8;
case VK_FORMAT_D24_UNORM_S8_UINT: return GL_DEPTH24_STENCIL8;
case VK_FORMAT_D32_SFLOAT_S8_UINT: return GL_DEPTH32F_STENCIL8;
default: return GL_INVALID_VALUE;
}
}
#endif // !GL_FORMAT_H

View File

@ -20,6 +20,7 @@
#define _CRT_SECURE_NO_WARNINGS
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
@ -34,12 +35,13 @@
#include "memstream.h"
#include "texture2.h"
#include "unused.h"
#include "vk_format.h"
// FIXME: Test this #define and put it in a header somewhere.
//#define IS_BIG_ENDIAN (1 == *(unsigned char *)&(const int){0x01000000ul})
#define IS_BIG_ENDIAN 0
extern uint32_t vkFormatTypeSize(VkFormat format);
struct ktxTexture_vtbl ktxTexture2_vtbl;
struct ktxTexture_vtblInt ktxTexture2_vtblInt;
@ -224,8 +226,8 @@ ktx_uint32_t e5b9g9r9_ufloat_comparator[e5b9g9r9_bdbwordcount] = {
* This is used instead of referring to the DFD directly so code dealing
* with format info can be common to KTX 1 & 2.
*
* @param[in] This pointer the ktxTexture2 whose DFD to use.
* @param[in] fi pointer to the ktxFormatSize object to initialize.
* @param[in] This pointer the ktxFormatSize to initialize.
* @param[in] pDFD pointer to the DFD whose data to use.
*
* @return KTX_TRUE on success, otherwise KTX_FALSE.
*/
@ -308,9 +310,10 @@ ktxFormatSize_initFromDfd(ktxFormatSize* This, ktx_uint32_t* pDfd)
// the following reasons. (1) in v2 files levelIndex is always used to
// calculate data size and, of course, for the level offsets. (2) Finer
// grain access to supercompressed data than levels is not possible.
uint32_t blockByteLength;
recreateBytesPlane0FromSampleInfo(pDfd, &blockByteLength);
This->blockSizeInBits = blockByteLength * 8;
//
// The value set here is applied to the DFD after the data has been
// inflated during loading.
This->blockSizeInBits = reconstructDFDBytesPlane0FromSamples(pDfd) * 8;
}
return true;
}
@ -427,34 +430,11 @@ ktxTexture2_construct(ktxTexture2* This, ktxTextureCreateInfo* createInfo,
This->vkFormat = createInfo->vkFormat;
// Ideally we'd set all these things in ktxFormatSize_initFromDfd
// but This->_protected is not allocated until ktxTexture_construct;
if (This->isCompressed && (formatSize.flags & KTX_FORMAT_SIZE_YUVSDA_BIT) == 0) {
This->_protected->_typeSize = 1;
} else if (formatSize.flags & (KTX_FORMAT_SIZE_DEPTH_BIT | KTX_FORMAT_SIZE_STENCIL_BIT)) {
switch (createInfo->vkFormat) {
case VK_FORMAT_S8_UINT:
This->_protected->_typeSize = 1;
break;
case VK_FORMAT_D16_UNORM: // [[fallthrough]];
case VK_FORMAT_D16_UNORM_S8_UINT:
This->_protected->_typeSize = 2;
break;
case VK_FORMAT_X8_D24_UNORM_PACK32: // [[fallthrough]];
case VK_FORMAT_D24_UNORM_S8_UINT: // [[fallthrough]];
case VK_FORMAT_D32_SFLOAT: // [[fallthrough]];
case VK_FORMAT_D32_SFLOAT_S8_UINT:
This->_protected->_typeSize = 4;
break;
}
} else if (formatSize.flags & KTX_FORMAT_SIZE_PACKED_BIT) {
This->_protected->_typeSize = formatSize.blockSizeInBits / 8;
} else {
// Unpacked and uncompressed
uint32_t numComponents;
getDFDComponentInfoUnpacked(This->pDfd, &numComponents,
&This->_protected->_typeSize);
}
// The typeSize cannot be reconstructed just from the DFD as the BDFD
// does not capture the packing expressed by the [m]PACK[n] layout
// information in the VkFormat, so we calculate the typeSize directly
// from the vkFormat
This->_protected->_typeSize = vkFormatTypeSize(createInfo->vkFormat);
This->supercompressionScheme = KTX_SS_NONE;

View File

@ -401,6 +401,7 @@ static inline VkFormat vkGetFormatFromOpenGLFormat( const GLenum format, const G
return VK_FORMAT_UNDEFINED;
}
#if defined(NEED_VK_GET_FORMAT_FROM_OPENGL_TYPE)
static inline VkFormat vkGetFormatFromOpenGLType( const GLenum type, const GLuint numComponents, const GLboolean normalized )
{
switch ( type )
@ -566,6 +567,7 @@ static inline VkFormat vkGetFormatFromOpenGLType( const GLenum type, const GLuin
return VK_FORMAT_UNDEFINED;
}
#endif
static inline VkFormat vkGetFormatFromOpenGLInternalFormat( const GLenum internalFormat )
{
@ -823,6 +825,7 @@ static inline VkFormat vkGetFormatFromOpenGLInternalFormat( const GLenum interna
}
}
#if defined(NEED_VK_GET_FORMAT_SIZE)
static inline void vkGetFormatSize( const VkFormat format, ktxFormatSize * pFormatSize )
{
pFormatSize->minBlocksX = pFormatSize->minBlocksY = 1;
@ -1384,5 +1387,6 @@ static inline void vkGetFormatSize( const VkFormat format, ktxFormatSize * pForm
break;
}
}
#endif
#endif // !VK_FORMAT_H

View File

@ -30,13 +30,8 @@ isProhibitedFormat(VkFormat format)
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
case VK_FORMAT_A8B8G8R8_UINT_PACK32:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:

View File

@ -0,0 +1,584 @@
/***************************** Do not edit. *****************************
Automatically generated from vulkan_core.h version 267 by mkvkformatfiles.
*************************************************************************/
/*
** Copyright 2015-2023 The Khronos Group Inc.
**
** SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include "vkformat_enum.h"
uint32_t
vkFormatTypeSize(VkFormat format)
{
switch (format) {
case VK_FORMAT_UNDEFINED:
return 1;
case VK_FORMAT_R4G4_UNORM_PACK8:
return 1;
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
return 2;
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
return 2;
case VK_FORMAT_R5G6B5_UNORM_PACK16:
return 2;
case VK_FORMAT_B5G6R5_UNORM_PACK16:
return 2;
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
return 2;
case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
return 2;
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
return 2;
case VK_FORMAT_R8_UNORM:
return 1;
case VK_FORMAT_R8_SNORM:
return 1;
case VK_FORMAT_R8_USCALED:
return 1;
case VK_FORMAT_R8_SSCALED:
return 1;
case VK_FORMAT_R8_UINT:
return 1;
case VK_FORMAT_R8_SINT:
return 1;
case VK_FORMAT_R8_SRGB:
return 1;
case VK_FORMAT_R8G8_UNORM:
return 1;
case VK_FORMAT_R8G8_SNORM:
return 1;
case VK_FORMAT_R8G8_USCALED:
return 1;
case VK_FORMAT_R8G8_SSCALED:
return 1;
case VK_FORMAT_R8G8_UINT:
return 1;
case VK_FORMAT_R8G8_SINT:
return 1;
case VK_FORMAT_R8G8_SRGB:
return 1;
case VK_FORMAT_R8G8B8_UNORM:
return 1;
case VK_FORMAT_R8G8B8_SNORM:
return 1;
case VK_FORMAT_R8G8B8_USCALED:
return 1;
case VK_FORMAT_R8G8B8_SSCALED:
return 1;
case VK_FORMAT_R8G8B8_UINT:
return 1;
case VK_FORMAT_R8G8B8_SINT:
return 1;
case VK_FORMAT_R8G8B8_SRGB:
return 1;
case VK_FORMAT_B8G8R8_UNORM:
return 1;
case VK_FORMAT_B8G8R8_SNORM:
return 1;
case VK_FORMAT_B8G8R8_USCALED:
return 1;
case VK_FORMAT_B8G8R8_SSCALED:
return 1;
case VK_FORMAT_B8G8R8_UINT:
return 1;
case VK_FORMAT_B8G8R8_SINT:
return 1;
case VK_FORMAT_B8G8R8_SRGB:
return 1;
case VK_FORMAT_R8G8B8A8_UNORM:
return 1;
case VK_FORMAT_R8G8B8A8_SNORM:
return 1;
case VK_FORMAT_R8G8B8A8_USCALED:
return 1;
case VK_FORMAT_R8G8B8A8_SSCALED:
return 1;
case VK_FORMAT_R8G8B8A8_UINT:
return 1;
case VK_FORMAT_R8G8B8A8_SINT:
return 1;
case VK_FORMAT_R8G8B8A8_SRGB:
return 1;
case VK_FORMAT_B8G8R8A8_UNORM:
return 1;
case VK_FORMAT_B8G8R8A8_SNORM:
return 1;
case VK_FORMAT_B8G8R8A8_USCALED:
return 1;
case VK_FORMAT_B8G8R8A8_SSCALED:
return 1;
case VK_FORMAT_B8G8R8A8_UINT:
return 1;
case VK_FORMAT_B8G8R8A8_SINT:
return 1;
case VK_FORMAT_B8G8R8A8_SRGB:
return 1;
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_UINT_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
return 4;
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_UINT_PACK32:
return 4;
case VK_FORMAT_A2R10G10B10_SINT_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_UINT_PACK32:
return 4;
case VK_FORMAT_A2B10G10R10_SINT_PACK32:
return 4;
case VK_FORMAT_R16_UNORM:
return 2;
case VK_FORMAT_R16_SNORM:
return 2;
case VK_FORMAT_R16_USCALED:
return 2;
case VK_FORMAT_R16_SSCALED:
return 2;
case VK_FORMAT_R16_UINT:
return 2;
case VK_FORMAT_R16_SINT:
return 2;
case VK_FORMAT_R16_SFLOAT:
return 2;
case VK_FORMAT_R16G16_UNORM:
return 2;
case VK_FORMAT_R16G16_SNORM:
return 2;
case VK_FORMAT_R16G16_USCALED:
return 2;
case VK_FORMAT_R16G16_SSCALED:
return 2;
case VK_FORMAT_R16G16_UINT:
return 2;
case VK_FORMAT_R16G16_SINT:
return 2;
case VK_FORMAT_R16G16_SFLOAT:
return 2;
case VK_FORMAT_R16G16B16_UNORM:
return 2;
case VK_FORMAT_R16G16B16_SNORM:
return 2;
case VK_FORMAT_R16G16B16_USCALED:
return 2;
case VK_FORMAT_R16G16B16_SSCALED:
return 2;
case VK_FORMAT_R16G16B16_UINT:
return 2;
case VK_FORMAT_R16G16B16_SINT:
return 2;
case VK_FORMAT_R16G16B16_SFLOAT:
return 2;
case VK_FORMAT_R16G16B16A16_UNORM:
return 2;
case VK_FORMAT_R16G16B16A16_SNORM:
return 2;
case VK_FORMAT_R16G16B16A16_USCALED:
return 2;
case VK_FORMAT_R16G16B16A16_SSCALED:
return 2;
case VK_FORMAT_R16G16B16A16_UINT:
return 2;
case VK_FORMAT_R16G16B16A16_SINT:
return 2;
case VK_FORMAT_R16G16B16A16_SFLOAT:
return 2;
case VK_FORMAT_R32_UINT:
return 4;
case VK_FORMAT_R32_SINT:
return 4;
case VK_FORMAT_R32_SFLOAT:
return 4;
case VK_FORMAT_R32G32_UINT:
return 4;
case VK_FORMAT_R32G32_SINT:
return 4;
case VK_FORMAT_R32G32_SFLOAT:
return 4;
case VK_FORMAT_R32G32B32_UINT:
return 4;
case VK_FORMAT_R32G32B32_SINT:
return 4;
case VK_FORMAT_R32G32B32_SFLOAT:
return 4;
case VK_FORMAT_R32G32B32A32_UINT:
return 4;
case VK_FORMAT_R32G32B32A32_SINT:
return 4;
case VK_FORMAT_R32G32B32A32_SFLOAT:
return 4;
case VK_FORMAT_R64_UINT:
return 8;
case VK_FORMAT_R64_SINT:
return 8;
case VK_FORMAT_R64_SFLOAT:
return 8;
case VK_FORMAT_R64G64_UINT:
return 8;
case VK_FORMAT_R64G64_SINT:
return 8;
case VK_FORMAT_R64G64_SFLOAT:
return 8;
case VK_FORMAT_R64G64B64_UINT:
return 8;
case VK_FORMAT_R64G64B64_SINT:
return 8;
case VK_FORMAT_R64G64B64_SFLOAT:
return 8;
case VK_FORMAT_R64G64B64A64_UINT:
return 8;
case VK_FORMAT_R64G64B64A64_SINT:
return 8;
case VK_FORMAT_R64G64B64A64_SFLOAT:
return 8;
case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
return 4;
case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
return 4;
case VK_FORMAT_D16_UNORM:
return 2;
case VK_FORMAT_X8_D24_UNORM_PACK32:
return 4;
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_S8_UINT:
return 1;
case VK_FORMAT_D16_UNORM_S8_UINT:
return 2;
case VK_FORMAT_D24_UNORM_S8_UINT:
return 4;
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return 4;
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
return 1;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
return 1;
case VK_FORMAT_BC2_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC2_SRGB_BLOCK:
return 1;
case VK_FORMAT_BC3_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC3_SRGB_BLOCK:
return 1;
case VK_FORMAT_BC4_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC4_SNORM_BLOCK:
return 1;
case VK_FORMAT_BC5_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC5_SNORM_BLOCK:
return 1;
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
return 1;
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_BC7_UNORM_BLOCK:
return 1;
case VK_FORMAT_BC7_SRGB_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
return 1;
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
return 1;
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
return 1;
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
return 1;
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
return 1;
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
return 1;
case VK_FORMAT_G8B8G8R8_422_UNORM:
return 1;
case VK_FORMAT_B8G8R8G8_422_UNORM:
return 1;
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
return 1;
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
return 1;
case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
return 1;
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
return 1;
case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
return 1;
case VK_FORMAT_R10X6_UNORM_PACK16:
return 2;
case VK_FORMAT_R10X6G10X6_UNORM_2PACK16:
return 2;
case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16:
return 2;
case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16:
return 2;
case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16:
return 2;
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
return 2;
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
return 2;
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
return 2;
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
return 2;
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
return 2;
case VK_FORMAT_R12X4_UNORM_PACK16:
return 2;
case VK_FORMAT_R12X4G12X4_UNORM_2PACK16:
return 2;
case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16:
return 2;
case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16:
return 2;
case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
return 2;
case VK_FORMAT_G16B16G16R16_422_UNORM:
return 2;
case VK_FORMAT_B16G16R16G16_422_UNORM:
return 2;
case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
return 2;
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
return 2;
case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
return 2;
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
return 2;
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
return 2;
case VK_FORMAT_G8_B8R8_2PLANE_444_UNORM:
return 1;
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16:
return 2;
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16:
return 2;
case VK_FORMAT_G16_B16R16_2PLANE_444_UNORM:
return 2;
case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
return 2;
case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
return 2;
case VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK:
return 1;
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG:
return 1;
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG:
return 1;
case VK_FORMAT_ASTC_3x3x3_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_3x3x3_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_3x3x3_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x3x3_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x3x3_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x3x3_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x3_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x3_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x3_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x4_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x4_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_4x4x4_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x4x4_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x4x4_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x4x4_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x4_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x4_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x4_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x5_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x5_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_5x5x5_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x5x5_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x5x5_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x5x5_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x5_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x5_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x5_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x6_UNORM_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x6_SRGB_BLOCK_EXT:
return 1;
case VK_FORMAT_ASTC_6x6x6_SFLOAT_BLOCK_EXT:
return 1;
case VK_FORMAT_R16G16_S10_5_NV:
return 2;
case VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR:
return 2;
case VK_FORMAT_A8_UNORM_KHR:
return 1;
default:
return 0;
}
}

View File

@ -16,7 +16,7 @@ index ca68545e4a..d7ecb7a0fd 100644
#undef DECLARE_PRIVATE
#undef DECLARE_PROTECTED
diff --git a/thirdparty/libktx/lib/dfdutils/vk2dfd.inl b/thirdparty/libktx/lib/dfdutils/vk2dfd.inl
index 85d53202a5..25c7a2c238 100644
index 5104c8fcb4..3398441e8c 100644
--- a/thirdparty/libktx/lib/dfdutils/vk2dfd.inl
+++ b/thirdparty/libktx/lib/dfdutils/vk2dfd.inl
@@ -370,6 +370,7 @@ case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: return createDFDCompressed(c_PVRTC, 8
@ -32,9 +32,9 @@ index 85d53202a5..25c7a2c238 100644
case VK_FORMAT_ASTC_6x6x6_SRGB_BLOCK_EXT: return createDFDCompressed(c_ASTC, 6, 6, 6, s_SRGB);
case VK_FORMAT_ASTC_6x6x6_SFLOAT_BLOCK_EXT: return createDFDCompressed(c_ASTC, 6, 6, 6, s_SFLOAT);
+#endif
case VK_FORMAT_R16G16_S10_5_NV: return createDFDUnpacked(0, 2, 2, 0, s_S10_5);
case VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR: {
int channels[] = {0,1,2,3}; int bits[] = {5,5,5,1};
return createDFDPacked(0, 4, bits, channels, s_UNORM);
diff --git a/thirdparty/libktx/lib/miniz_wrapper.cpp b/thirdparty/libktx/lib/miniz_wrapper.cpp
index 07920c4809..cbd7da540a 100644
--- a/thirdparty/libktx/lib/miniz_wrapper.cpp