2
0

ssim.cc 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright 2013 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "../util/ssim.h" // NOLINT
  11. #include <string.h>
  12. #ifdef __cplusplus
  13. extern "C" {
  14. #endif
  15. typedef unsigned int uint32_t; // NOLINT
  16. typedef unsigned short uint16_t; // NOLINT
  17. #if !defined(LIBYUV_DISABLE_X86) && !defined(__SSE2__) && \
  18. (defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)))
  19. #define __SSE2__
  20. #endif
  21. #if !defined(LIBYUV_DISABLE_X86) && defined(__SSE2__)
  22. #include <emmintrin.h>
  23. #endif
  24. #ifdef _OPENMP
  25. #include <omp.h>
  26. #endif
  27. // SSIM
  28. enum { KERNEL = 3, KERNEL_SIZE = 2 * KERNEL + 1 };
  29. // Symmetric Gaussian kernel: K[i] = ~11 * exp(-0.3 * i * i)
  30. // The maximum value (11 x 11) must be less than 128 to avoid sign
  31. // problems during the calls to _mm_mullo_epi16().
  32. static const int K[KERNEL_SIZE] = {
  33. 1, 3, 7, 11, 7, 3, 1 // ~11 * exp(-0.3 * i * i)
  34. };
  35. static const double kiW[KERNEL + 1 + 1] = {
  36. 1. / 1089., // 1 / sum(i:0..6, j..6) K[i]*K[j]
  37. 1. / 1089., // 1 / sum(i:0..6, j..6) K[i]*K[j]
  38. 1. / 1056., // 1 / sum(i:0..5, j..6) K[i]*K[j]
  39. 1. / 957., // 1 / sum(i:0..4, j..6) K[i]*K[j]
  40. 1. / 726., // 1 / sum(i:0..3, j..6) K[i]*K[j]
  41. };
  42. #if !defined(LIBYUV_DISABLE_X86) && defined(__SSE2__)
  43. #define PWEIGHT(A, B) static_cast<uint16_t>(K[(A)] * K[(B)]) // weight product
  44. #define MAKE_WEIGHT(L) \
  45. { \
  46. { \
  47. { \
  48. PWEIGHT(L, 0) \
  49. , PWEIGHT(L, 1), PWEIGHT(L, 2), PWEIGHT(L, 3), PWEIGHT(L, 4), \
  50. PWEIGHT(L, 5), PWEIGHT(L, 6), 0 \
  51. } \
  52. } \
  53. }
  54. // We need this union trick to be able to initialize constant static __m128i
  55. // values. We can't call _mm_set_epi16() for static compile-time initialization.
  56. static const struct {
  57. union {
  58. uint16_t i16_[8];
  59. __m128i m_;
  60. } values_;
  61. } W0 = MAKE_WEIGHT(0), W1 = MAKE_WEIGHT(1), W2 = MAKE_WEIGHT(2),
  62. W3 = MAKE_WEIGHT(3);
  63. // ... the rest is symmetric.
  64. #undef MAKE_WEIGHT
  65. #undef PWEIGHT
  66. #endif
  67. // Common final expression for SSIM, once the weighted sums are known.
  68. static double FinalizeSSIM(double iw,
  69. double xm,
  70. double ym,
  71. double xxm,
  72. double xym,
  73. double yym) {
  74. const double iwx = xm * iw;
  75. const double iwy = ym * iw;
  76. double sxx = xxm * iw - iwx * iwx;
  77. double syy = yym * iw - iwy * iwy;
  78. // small errors are possible, due to rounding. Clamp to zero.
  79. if (sxx < 0.) {
  80. sxx = 0.;
  81. }
  82. if (syy < 0.) {
  83. syy = 0.;
  84. }
  85. const double sxsy = sqrt(sxx * syy);
  86. const double sxy = xym * iw - iwx * iwy;
  87. static const double C11 = (0.01 * 0.01) * (255 * 255);
  88. static const double C22 = (0.03 * 0.03) * (255 * 255);
  89. static const double C33 = (0.015 * 0.015) * (255 * 255);
  90. const double l = (2. * iwx * iwy + C11) / (iwx * iwx + iwy * iwy + C11);
  91. const double c = (2. * sxsy + C22) / (sxx + syy + C22);
  92. const double s = (sxy + C33) / (sxsy + C33);
  93. return l * c * s;
  94. }
  95. // GetSSIM() does clipping. GetSSIMFullKernel() does not
  96. // TODO(skal): use summed tables?
  97. // Note: worst case of accumulation is a weight of 33 = 11 + 2 * (7 + 3 + 1)
  98. // with a diff of 255, squared. The maximum error is thus 0x4388241,
  99. // which fits into 32 bits integers.
  100. double GetSSIM(const uint8_t* org,
  101. const uint8_t* rec,
  102. int xo,
  103. int yo,
  104. int W,
  105. int H,
  106. int stride) {
  107. uint32_t ws = 0, xm = 0, ym = 0, xxm = 0, xym = 0, yym = 0;
  108. org += (yo - KERNEL) * stride;
  109. org += (xo - KERNEL);
  110. rec += (yo - KERNEL) * stride;
  111. rec += (xo - KERNEL);
  112. for (int y_ = 0; y_ < KERNEL_SIZE; ++y_, org += stride, rec += stride) {
  113. if (((yo - KERNEL + y_) < 0) || ((yo - KERNEL + y_) >= H)) {
  114. continue;
  115. }
  116. const int Wy = K[y_];
  117. for (int x_ = 0; x_ < KERNEL_SIZE; ++x_) {
  118. const int Wxy = Wy * K[x_];
  119. if (((xo - KERNEL + x_) >= 0) && ((xo - KERNEL + x_) < W)) {
  120. const int org_x = org[x_];
  121. const int rec_x = rec[x_];
  122. ws += Wxy;
  123. xm += Wxy * org_x;
  124. ym += Wxy * rec_x;
  125. xxm += Wxy * org_x * org_x;
  126. xym += Wxy * org_x * rec_x;
  127. yym += Wxy * rec_x * rec_x;
  128. }
  129. }
  130. }
  131. return FinalizeSSIM(1. / ws, xm, ym, xxm, xym, yym);
  132. }
  133. double GetSSIMFullKernel(const uint8_t* org,
  134. const uint8_t* rec,
  135. int xo,
  136. int yo,
  137. int stride,
  138. double area_weight) {
  139. uint32_t xm = 0, ym = 0, xxm = 0, xym = 0, yym = 0;
  140. #if defined(LIBYUV_DISABLE_X86) || !defined(__SSE2__)
  141. org += yo * stride + xo;
  142. rec += yo * stride + xo;
  143. for (int y = 1; y <= KERNEL; y++) {
  144. const int dy1 = y * stride;
  145. const int dy2 = y * stride;
  146. const int Wy = K[KERNEL + y];
  147. for (int x = 1; x <= KERNEL; x++) {
  148. // Compute the contributions of upper-left (ul), upper-right (ur)
  149. // lower-left (ll) and lower-right (lr) points (see the diagram below).
  150. // Symmetric Kernel will have same weight on those points.
  151. // - - - - - - -
  152. // - ul - - - ur -
  153. // - - - - - - -
  154. // - - - 0 - - -
  155. // - - - - - - -
  156. // - ll - - - lr -
  157. // - - - - - - -
  158. const int Wxy = Wy * K[KERNEL + x];
  159. const int ul1 = org[-dy1 - x];
  160. const int ur1 = org[-dy1 + x];
  161. const int ll1 = org[dy1 - x];
  162. const int lr1 = org[dy1 + x];
  163. const int ul2 = rec[-dy2 - x];
  164. const int ur2 = rec[-dy2 + x];
  165. const int ll2 = rec[dy2 - x];
  166. const int lr2 = rec[dy2 + x];
  167. xm += Wxy * (ul1 + ur1 + ll1 + lr1);
  168. ym += Wxy * (ul2 + ur2 + ll2 + lr2);
  169. xxm += Wxy * (ul1 * ul1 + ur1 * ur1 + ll1 * ll1 + lr1 * lr1);
  170. xym += Wxy * (ul1 * ul2 + ur1 * ur2 + ll1 * ll2 + lr1 * lr2);
  171. yym += Wxy * (ul2 * ul2 + ur2 * ur2 + ll2 * ll2 + lr2 * lr2);
  172. }
  173. // Compute the contributions of up (u), down (d), left (l) and right (r)
  174. // points across the main axes (see the diagram below).
  175. // Symmetric Kernel will have same weight on those points.
  176. // - - - - - - -
  177. // - - - u - - -
  178. // - - - - - - -
  179. // - l - 0 - r -
  180. // - - - - - - -
  181. // - - - d - - -
  182. // - - - - - - -
  183. const int Wxy = Wy * K[KERNEL];
  184. const int u1 = org[-dy1];
  185. const int d1 = org[dy1];
  186. const int l1 = org[-y];
  187. const int r1 = org[y];
  188. const int u2 = rec[-dy2];
  189. const int d2 = rec[dy2];
  190. const int l2 = rec[-y];
  191. const int r2 = rec[y];
  192. xm += Wxy * (u1 + d1 + l1 + r1);
  193. ym += Wxy * (u2 + d2 + l2 + r2);
  194. xxm += Wxy * (u1 * u1 + d1 * d1 + l1 * l1 + r1 * r1);
  195. xym += Wxy * (u1 * u2 + d1 * d2 + l1 * l2 + r1 * r2);
  196. yym += Wxy * (u2 * u2 + d2 * d2 + l2 * l2 + r2 * r2);
  197. }
  198. // Lastly the contribution of (x0, y0) point.
  199. const int Wxy = K[KERNEL] * K[KERNEL];
  200. const int s1 = org[0];
  201. const int s2 = rec[0];
  202. xm += Wxy * s1;
  203. ym += Wxy * s2;
  204. xxm += Wxy * s1 * s1;
  205. xym += Wxy * s1 * s2;
  206. yym += Wxy * s2 * s2;
  207. #else // __SSE2__
  208. org += (yo - KERNEL) * stride + (xo - KERNEL);
  209. rec += (yo - KERNEL) * stride + (xo - KERNEL);
  210. const __m128i zero = _mm_setzero_si128();
  211. __m128i x = zero;
  212. __m128i y = zero;
  213. __m128i xx = zero;
  214. __m128i xy = zero;
  215. __m128i yy = zero;
  216. // Read 8 pixels at line #L, and convert to 16bit, perform weighting
  217. // and acccumulate.
  218. #define LOAD_LINE_PAIR(L, WEIGHT) \
  219. do { \
  220. const __m128i v0 = \
  221. _mm_loadl_epi64(reinterpret_cast<const __m128i*>(org + (L)*stride)); \
  222. const __m128i v1 = \
  223. _mm_loadl_epi64(reinterpret_cast<const __m128i*>(rec + (L)*stride)); \
  224. const __m128i w0 = _mm_unpacklo_epi8(v0, zero); \
  225. const __m128i w1 = _mm_unpacklo_epi8(v1, zero); \
  226. const __m128i ww0 = _mm_mullo_epi16(w0, (WEIGHT).values_.m_); \
  227. const __m128i ww1 = _mm_mullo_epi16(w1, (WEIGHT).values_.m_); \
  228. x = _mm_add_epi32(x, _mm_unpacklo_epi16(ww0, zero)); \
  229. y = _mm_add_epi32(y, _mm_unpacklo_epi16(ww1, zero)); \
  230. x = _mm_add_epi32(x, _mm_unpackhi_epi16(ww0, zero)); \
  231. y = _mm_add_epi32(y, _mm_unpackhi_epi16(ww1, zero)); \
  232. xx = _mm_add_epi32(xx, _mm_madd_epi16(ww0, w0)); \
  233. xy = _mm_add_epi32(xy, _mm_madd_epi16(ww0, w1)); \
  234. yy = _mm_add_epi32(yy, _mm_madd_epi16(ww1, w1)); \
  235. } while (0)
  236. #define ADD_AND_STORE_FOUR_EPI32(M, OUT) \
  237. do { \
  238. uint32_t tmp[4]; \
  239. _mm_storeu_si128(reinterpret_cast<__m128i*>(tmp), (M)); \
  240. (OUT) = tmp[3] + tmp[2] + tmp[1] + tmp[0]; \
  241. } while (0)
  242. LOAD_LINE_PAIR(0, W0);
  243. LOAD_LINE_PAIR(1, W1);
  244. LOAD_LINE_PAIR(2, W2);
  245. LOAD_LINE_PAIR(3, W3);
  246. LOAD_LINE_PAIR(4, W2);
  247. LOAD_LINE_PAIR(5, W1);
  248. LOAD_LINE_PAIR(6, W0);
  249. ADD_AND_STORE_FOUR_EPI32(x, xm);
  250. ADD_AND_STORE_FOUR_EPI32(y, ym);
  251. ADD_AND_STORE_FOUR_EPI32(xx, xxm);
  252. ADD_AND_STORE_FOUR_EPI32(xy, xym);
  253. ADD_AND_STORE_FOUR_EPI32(yy, yym);
  254. #undef LOAD_LINE_PAIR
  255. #undef ADD_AND_STORE_FOUR_EPI32
  256. #endif
  257. return FinalizeSSIM(area_weight, xm, ym, xxm, xym, yym);
  258. }
  259. static int start_max(int x, int y) {
  260. return (x > y) ? x : y;
  261. }
  262. double CalcSSIM(const uint8_t* org,
  263. const uint8_t* rec,
  264. const int image_width,
  265. const int image_height) {
  266. double SSIM = 0.;
  267. const int KERNEL_Y = (image_height < KERNEL) ? image_height : KERNEL;
  268. const int KERNEL_X = (image_width < KERNEL) ? image_width : KERNEL;
  269. const int start_x = start_max(image_width - 8 + KERNEL_X, KERNEL_X);
  270. const int start_y = start_max(image_height - KERNEL_Y, KERNEL_Y);
  271. const int stride = image_width;
  272. for (int j = 0; j < KERNEL_Y; ++j) {
  273. for (int i = 0; i < image_width; ++i) {
  274. SSIM += GetSSIM(org, rec, i, j, image_width, image_height, stride);
  275. }
  276. }
  277. #ifdef _OPENMP
  278. #pragma omp parallel for reduction(+ : SSIM)
  279. #endif
  280. for (int j = KERNEL_Y; j < image_height - KERNEL_Y; ++j) {
  281. for (int i = 0; i < KERNEL_X; ++i) {
  282. SSIM += GetSSIM(org, rec, i, j, image_width, image_height, stride);
  283. }
  284. for (int i = KERNEL_X; i < start_x; ++i) {
  285. SSIM += GetSSIMFullKernel(org, rec, i, j, stride, kiW[0]);
  286. }
  287. if (start_x < image_width) {
  288. // GetSSIMFullKernel() needs to be able to read 8 pixels (in SSE2). So we
  289. // copy the 8 rightmost pixels on a cache area, and pad this area with
  290. // zeros which won't contribute to the overall SSIM value (but we need
  291. // to pass the correct normalizing constant!). By using this cache, we can
  292. // still call GetSSIMFullKernel() instead of the slower GetSSIM().
  293. // NOTE: we could use similar method for the left-most pixels too.
  294. const int kScratchWidth = 8;
  295. const int kScratchStride = kScratchWidth + KERNEL + 1;
  296. uint8_t scratch_org[KERNEL_SIZE * kScratchStride] = {0};
  297. uint8_t scratch_rec[KERNEL_SIZE * kScratchStride] = {0};
  298. for (int k = 0; k < KERNEL_SIZE; ++k) {
  299. const int offset =
  300. (j - KERNEL + k) * stride + image_width - kScratchWidth;
  301. memcpy(scratch_org + k * kScratchStride, org + offset, kScratchWidth);
  302. memcpy(scratch_rec + k * kScratchStride, rec + offset, kScratchWidth);
  303. }
  304. for (int k = 0; k <= KERNEL_X + 1; ++k) {
  305. SSIM += GetSSIMFullKernel(scratch_org, scratch_rec, KERNEL + k, KERNEL,
  306. kScratchStride, kiW[k]);
  307. }
  308. }
  309. }
  310. for (int j = start_y; j < image_height; ++j) {
  311. for (int i = 0; i < image_width; ++i) {
  312. SSIM += GetSSIM(org, rec, i, j, image_width, image_height, stride);
  313. }
  314. }
  315. return SSIM;
  316. }
  317. double CalcLSSIM(double ssim) {
  318. return -10.0 * log10(1.0 - ssim);
  319. }
  320. #ifdef __cplusplus
  321. } // extern "C"
  322. #endif