20 static const XMVECTORF32
g_Gamma22 = { 2.2f, 2.2f, 2.2f, 1.f };
24 _Out_
float& mse, _Out_writes_opt_(4)
float* mseV,
27 if ( !image1.pixels || !image2.pixels )
30 assert( image1.width == image2.width && image1.height == image2.height );
33 const size_t width = image1.width;
40 switch( image1.format )
42 case DXGI_FORMAT_B8G8R8X8_UNORM:
46 case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
50 case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
51 case DXGI_FORMAT_BC1_UNORM_SRGB:
52 case DXGI_FORMAT_BC2_UNORM_SRGB:
53 case DXGI_FORMAT_BC3_UNORM_SRGB:
54 case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
55 case DXGI_FORMAT_BC7_UNORM_SRGB:
60 switch( image2.format )
62 case DXGI_FORMAT_B8G8R8X8_UNORM:
66 case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
70 case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
71 case DXGI_FORMAT_BC1_UNORM_SRGB:
72 case DXGI_FORMAT_BC2_UNORM_SRGB:
73 case DXGI_FORMAT_BC3_UNORM_SRGB:
74 case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
75 case DXGI_FORMAT_BC7_UNORM_SRGB:
80 const uint8_t *pSrc1 = image1.pixels;
81 const size_t rowPitch1 = image1.rowPitch;
83 const uint8_t *pSrc2 = image2.pixels;
84 const size_t rowPitch2 = image2.rowPitch;
86 XMVECTOR acc = g_XMZero;
87 static XMVECTORF32 two = { 2.0f, 2.0f, 2.0f, 2.0f };
89 for(
size_t h = 0; h < image1.height; ++h )
91 XMVECTOR* ptr1 = scanline.get();
92 if ( !
_LoadScanline( ptr1, width, pSrc1, rowPitch1, image1.format ) )
95 XMVECTOR* ptr2 = scanline.get() + width;
96 if ( !
_LoadScanline( ptr2, width, pSrc2, rowPitch2, image2.format ) )
99 for(
size_t i = 0; i < width; ++i )
101 XMVECTOR v1 = *(ptr1++);
108 v1 = XMVectorMultiplyAdd( v1, two, g_XMNegativeOne );
111 XMVECTOR v2 = *(ptr2++);
118 v1 = XMVectorMultiplyAdd( v2, two, g_XMNegativeOne );
122 XMVECTOR v = XMVectorSubtract( v1, v2 );
125 v = XMVectorSelect( v, g_XMZero, g_XMMaskX );
129 v = XMVectorSelect( v, g_XMZero, g_XMMaskY );
133 v = XMVectorSelect( v, g_XMZero, g_XMMaskZ );
137 v = XMVectorSelect( v, g_XMZero, g_XMMaskW );
140 acc = XMVectorMultiplyAdd( v, v, acc );
148 XMVECTOR d = XMVectorReplicate(
float(image1.width * image1.height) );
149 XMVECTOR v = XMVectorDivide( acc, d );
152 XMStoreFloat4( reinterpret_cast<XMFLOAT4*>( mseV ), v );
153 mse = mseV[0] + mseV[1] + mseV[2] + mseV[3];
158 XMStoreFloat4( &_mseV, v );
159 mse = _mseV.x + _mseV.y + _mseV.z + _mseV.w;
173 _Use_decl_annotations_
182 return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
185 if ( !srcRect.
w || !srcRect.
h || ( (srcRect.
x + srcRect.
w) > srcImage.
width ) || ( (srcRect.
y + srcRect.
h) > srcImage.
height ) )
190 if ( ( (xOffset + srcRect.
w) > dstImage.
width ) || ( (yOffset + srcRect.
h) > dstImage.
height ) )
203 return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
210 sbpp = ( sbpp + 7 ) / 8;
218 const size_t copyW = srcRect.
w * sbpp;
219 for(
size_t h=0; h < srcRect.
h; ++h )
221 if ( ( (pSrc+copyW) > pEndSrc ) || (pDest > pEndDest) )
224 memcpy_s( pDest, pEndDest - pDest, pSrc, copyW );
241 return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
245 dbpp = ( dbpp + 7 ) / 8;
251 return E_OUTOFMEMORY;
253 const size_t copyS = srcRect.
w * sbpp;
254 const size_t copyD = srcRect.
w * dbpp;
256 for(
size_t h=0; h < srcRect.
h; ++h )
258 if ( ( (pSrc+copyS) > pEndSrc) || ((pDest+copyD) > pEndDest) )
280 _Use_decl_annotations_
291 return HRESULT_FROM_WIN32( ERROR_NOT_SUPPORTED );
299 HRESULT hr =
Decompress( image1, DXGI_FORMAT_R32G32B32A32_FLOAT, temp1 );
304 hr =
Decompress( image2, DXGI_FORMAT_R32G32B32A32_FLOAT, temp2 );
310 if ( !img1 || !img2 )
313 return _ComputeMSE( *img1, *img2, mse, mseV, flags );
319 HRESULT hr =
Decompress( image1, DXGI_FORMAT_R32G32B32A32_FLOAT, temp );
327 return _ComputeMSE( *img, image2, mse, mseV, flags );
336 HRESULT hr =
Decompress( image2, DXGI_FORMAT_R32G32B32A32_FLOAT, temp );
344 return _ComputeMSE( image1, *img, mse, mseV, flags );
349 return _ComputeMSE( image1, image2, mse, mseV, flags );
std::unique_ptr< DirectX::XMVECTOR, aligned_deleter > ScopedAlignedArrayXMVECTOR
const Image * GetImage(_In_ size_t mip, _In_ size_t item, _In_ size_t slice) const
bool IsPlanar(_In_ DXGI_FORMAT fmt)
_In_ size_t _In_ DXGI_FORMAT _In_ size_t _In_ DXGI_FORMAT _In_ DWORD flags
size_t _In_ DXGI_FORMAT size_t _In_ TEXP_LEGACY_FORMAT _In_ DWORD flags assert(pDestination &&outSize > 0)
_Use_decl_annotations_ bool _LoadScanline(XMVECTOR *pDestination, size_t count, LPCVOID pSource, size_t size, DXGI_FORMAT format)
bool IsCompressed(_In_ DXGI_FORMAT fmt)
static const XMVECTORF32 g_Gamma22
_Use_decl_annotations_ void _ConvertScanline(XMVECTOR *pBuffer, size_t count, DXGI_FORMAT outFormat, DXGI_FORMAT inFormat, DWORD flags)
bool IsPalettized(_In_ DXGI_FORMAT fmt)
HRESULT ComputeMSE(_In_ const Image &image1, _In_ const Image &image2, _Out_ float &mse, _Out_writes_opt_(4) float *mseV, _In_ DWORD flags=0)
HRESULT Decompress(_In_ const Image &cImage, _In_ DXGI_FORMAT format, _Out_ ScratchImage &image)
static HRESULT _ComputeMSE(_In_ const Image &image1, _In_ const Image &image2, _Out_ float &mse, _Out_writes_opt_(4) float *mseV, _In_ DWORD flags)
HRESULT CopyRectangle(_In_ const Image &srcImage, _In_ const Rect &srcRect, _In_ const Image &dstImage, _In_ DWORD filter, _In_ size_t xOffset, _In_ size_t yOffset)
size_t BitsPerPixel(_In_ DXGI_FORMAT fmt)
_Use_decl_annotations_ bool _StoreScanline(LPVOID pDestination, size_t size, DXGI_FORMAT format, const XMVECTOR *pSource, size_t count, float threshold)