FreeRDP
prim_alphaComp_sse3.c
1 /* FreeRDP: A Remote Desktop Protocol Client
2  * Optimized alpha blending routines.
3  * vi:ts=4 sw=4:
4  *
5  * (c) Copyright 2012 Hewlett-Packard Development Company, L.P.
6  * Licensed under the Apache License, Version 2.0 (the "License"); you may
7  * not use this file except in compliance with the License. You may obtain
8  * a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12  * or implied. See the License for the specific language governing
13  * permissions and limitations under the License.
14  *
15  * Note: this code assumes the second operand is fully opaque,
16  * e.g.
17  * newval = alpha1*val1 + (1-alpha1)*val2
18  * rather than
19  * newval = alpha1*val1 + (1-alpha1)*alpha2*val2
20  * The IPP gives other options.
21  */
22 
23 #include <freerdp/config.h>
24 
25 #include <freerdp/types.h>
26 #include <freerdp/primitives.h>
27 #include <winpr/sysinfo.h>
28 
29 #include "prim_alphaComp.h"
30 
31 #include "prim_internal.h"
32 
33 /* ------------------------------------------------------------------------- */
34 #if defined(SSE_AVX_INTRINSICS_ENABLED)
35 #include <emmintrin.h>
36 #include <pmmintrin.h>
37 
38 static primitives_t* generic = NULL;
39 
40 static pstatus_t sse2_alphaComp_argb(const BYTE* WINPR_RESTRICT pSrc1, UINT32 src1Step,
41  const BYTE* WINPR_RESTRICT pSrc2, UINT32 src2Step,
42  BYTE* WINPR_RESTRICT pDst, UINT32 dstStep, UINT32 width,
43  UINT32 height)
44 {
45  const UINT32* sptr1 = (const UINT32*)pSrc1;
46  const UINT32* sptr2 = (const UINT32*)pSrc2;
47 
48  if ((width <= 0) || (height <= 0))
49  return PRIMITIVES_SUCCESS;
50 
51  if (width < 4) /* pointless if too small */
52  {
53  return generic->alphaComp_argb(pSrc1, src1Step, pSrc2, src2Step, pDst, dstStep, width,
54  height);
55  }
56 
57  UINT32* dptr = (UINT32*)pDst;
58  const size_t linebytes = width * sizeof(UINT32);
59  const size_t src1Jump = (src1Step - linebytes) / sizeof(UINT32);
60  const size_t src2Jump = (src2Step - linebytes) / sizeof(UINT32);
61  const size_t dstJump = (dstStep - linebytes) / sizeof(UINT32);
62  __m128i xmm0 = mm_set1_epu32(0);
63  __m128i xmm1 = _mm_set1_epi16(1);
64 
65  for (UINT32 y = 0; y < height; ++y)
66  {
67  uint32_t pixels = width;
68  uint32_t count = 0;
69  /* Get to the 16-byte boundary now. */
70  uint32_t leadIn = 0;
71 
72  switch ((ULONG_PTR)dptr & 0x0f)
73  {
74  case 0:
75  leadIn = 0;
76  break;
77 
78  case 4:
79  leadIn = 3;
80  break;
81 
82  case 8:
83  leadIn = 2;
84  break;
85 
86  case 12:
87  leadIn = 1;
88  break;
89 
90  default:
91  /* We'll never hit a 16-byte boundary, so do the whole
92  * thing the slow way.
93  */
94  leadIn = width;
95  break;
96  }
97 
98  if (leadIn)
99  {
100  pstatus_t status = 0;
101  status = generic->alphaComp_argb((const BYTE*)sptr1, src1Step, (const BYTE*)sptr2,
102  src2Step, (BYTE*)dptr, dstStep, leadIn, 1);
103  if (status != PRIMITIVES_SUCCESS)
104  return status;
105 
106  sptr1 += leadIn;
107  sptr2 += leadIn;
108  dptr += leadIn;
109  pixels -= leadIn;
110  }
111 
112  /* Use SSE registers to do 4 pixels at a time. */
113  count = pixels >> 2;
114  pixels -= count << 2;
115 
116  while (count--)
117  {
118  __m128i xmm2;
119  __m128i xmm3;
120  __m128i xmm4;
121  __m128i xmm5;
122  __m128i xmm6;
123  __m128i xmm7;
124  /* BdGdRdAdBcGcRcAcBbGbRbAbBaGaRaAa */
125  xmm2 = LOAD_SI128(sptr1);
126  sptr1 += 4;
127  /* BhGhRhAhBgGgRgAgBfGfRfAfBeGeReAe */
128  xmm3 = LOAD_SI128(sptr2);
129  sptr2 += 4;
130  /* 00Bb00Gb00Rb00Ab00Ba00Ga00Ra00Aa */
131  xmm4 = _mm_unpackhi_epi8(xmm2, xmm0);
132  /* 00Bf00Gf00Bf00Af00Be00Ge00Re00Ae */
133  xmm5 = _mm_unpackhi_epi8(xmm3, xmm0);
134  /* subtract */
135  xmm6 = _mm_subs_epi16(xmm4, xmm5);
136  /* 00Bb00Gb00Rb00Ab00Aa00Aa00Aa00Aa */
137  xmm4 = _mm_shufflelo_epi16(xmm4, 0xff);
138  /* 00Ab00Ab00Ab00Ab00Aa00Aa00Aa00Aa */
139  xmm4 = _mm_shufflehi_epi16(xmm4, 0xff);
140  /* Add one to alphas */
141  xmm4 = _mm_adds_epi16(xmm4, xmm1);
142  /* Multiply and take low word */
143  xmm4 = _mm_mullo_epi16(xmm4, xmm6);
144  /* Shift 8 right */
145  xmm4 = _mm_srai_epi16(xmm4, 8);
146  /* Add xmm5 */
147  xmm4 = _mm_adds_epi16(xmm4, xmm5);
148  /* 00Bj00Gj00Rj00Aj00Bi00Gi00Ri00Ai */
149  /* 00Bd00Gd00Rd00Ad00Bc00Gc00Rc00Ac */
150  xmm5 = _mm_unpacklo_epi8(xmm2, xmm0);
151  /* 00Bh00Gh00Rh00Ah00Bg00Gg00Rg00Ag */
152  xmm6 = _mm_unpacklo_epi8(xmm3, xmm0);
153  /* subtract */
154  xmm7 = _mm_subs_epi16(xmm5, xmm6);
155  /* 00Bd00Gd00Rd00Ad00Ac00Ac00Ac00Ac */
156  xmm5 = _mm_shufflelo_epi16(xmm5, 0xff);
157  /* 00Ad00Ad00Ad00Ad00Ac00Ac00Ac00Ac */
158  xmm5 = _mm_shufflehi_epi16(xmm5, 0xff);
159  /* Add one to alphas */
160  xmm5 = _mm_adds_epi16(xmm5, xmm1);
161  /* Multiply and take low word */
162  xmm5 = _mm_mullo_epi16(xmm5, xmm7);
163  /* Shift 8 right */
164  xmm5 = _mm_srai_epi16(xmm5, 8);
165  /* Add xmm6 */
166  xmm5 = _mm_adds_epi16(xmm5, xmm6);
167  /* 00Bl00Gl00Rl00Al00Bk00Gk00Rk0ABk */
168  /* Must mask off remainders or pack gets confused */
169  xmm3 = _mm_set1_epi16(0x00ffU);
170  xmm4 = _mm_and_si128(xmm4, xmm3);
171  xmm5 = _mm_and_si128(xmm5, xmm3);
172  /* BlGlRlAlBkGkRkAkBjGjRjAjBiGiRiAi */
173  xmm5 = _mm_packus_epi16(xmm5, xmm4);
174  _mm_store_si128((__m128i*)dptr, xmm5);
175  dptr += 4;
176  }
177 
178  /* Finish off the remainder. */
179  if (pixels)
180  {
181  pstatus_t status = 0;
182  status = generic->alphaComp_argb((const BYTE*)sptr1, src1Step, (const BYTE*)sptr2,
183  src2Step, (BYTE*)dptr, dstStep, pixels, 1);
184  if (status != PRIMITIVES_SUCCESS)
185  return status;
186 
187  sptr1 += pixels;
188  sptr2 += pixels;
189  dptr += pixels;
190  }
191 
192  /* Jump to next row. */
193  sptr1 += src1Jump;
194  sptr2 += src2Jump;
195  dptr += dstJump;
196  }
197 
198  return PRIMITIVES_SUCCESS;
199 }
200 #endif
201 
202 /* ------------------------------------------------------------------------- */
203 void primitives_init_alphaComp_sse3(primitives_t* WINPR_RESTRICT prims)
204 {
205 #if defined(SSE_AVX_INTRINSICS_ENABLED)
206  generic = primitives_get_generic();
207  primitives_init_alphaComp(prims);
208 
209  if (IsProcessorFeaturePresent(PF_SSE2_INSTRUCTIONS_AVAILABLE) &&
210  IsProcessorFeaturePresent(PF_SSE3_INSTRUCTIONS_AVAILABLE)) /* for LDDQU */
211  {
212  WLog_VRB(PRIM_TAG, "SSE2/SSE3 optimizations");
213  prims->alphaComp_argb = sse2_alphaComp_argb;
214  }
215 
216 #else
217  WLog_VRB(PRIM_TAG, "undefined WITH_SIMD or SSE3 intrinsics not available");
218  WINPR_UNUSED(prims);
219 #endif
220 }