|
|
|
1 |
|
/** |
2 |
|
*** Copyright (C) 1985-1999 Intel Corporation. All rights reserved. |
3 |
|
*** |
4 |
|
*** The information and source code contained herein is the exclusive |
5 |
|
*** property of Intel Corporation and may not be disclosed, examined |
6 |
|
*** or reproduced in whole or in part without explicit written authorization |
7 |
|
*** from the company. |
8 |
|
*** |
9 |
|
**/ |
10 |
|
|
11 |
|
/* |
12 |
|
* xmmintrin.h |
13 |
|
* |
14 |
|
* Principal header file for Streaming SIMD Extensions intrinsics |
15 |
|
* |
16 |
|
* The intrinsics package can be used in 2 ways, based whether or not |
17 |
|
* _MM_FUNCTIONALITY is defined; if it is, the C/x87 implementation |
18 |
|
* will be used (the "faux intrinsics"). |
19 |
|
* |
20 |
|
* |
21 |
|
* Note that the m128 datatype provided using _MM2_FUNCTIONALITY mode is |
22 |
|
* implemented as struct, will not be 128b aligned, will be passed |
23 |
|
* via the stack, etc. MM_FUNCTIONALITY mode is not intended for |
24 |
|
* performance, just semantics. |
25 |
|
* |
26 |
|
*/ |
27 |
|
|
28 |
|
#pragma once |
29 |
|
#ifndef __midl |
30 |
|
#ifndef _INCLUDED_MM2 |
31 |
|
#define _INCLUDED_MM2 |
32 |
|
|
33 |
|
#include <crtdefs.h> |
34 |
|
|
35 |
|
#if defined(_M_CEE_PURE) |
36 |
|
#error ERROR: XMM intrinsics not supported in the pure mode! |
37 |
|
#else |
38 |
|
|
39 |
|
/* |
40 |
|
* the m64 type is required for the integer Streaming SIMD Extensions intrinsics |
41 |
|
*/ |
42 |
|
#ifndef _MMINTRIN_H_INCLUDED |
43 |
|
#include <mmintrin.h> |
44 |
|
#endif |
45 |
|
|
46 |
|
#ifdef _MM2_FUNCTIONALITY |
47 |
|
/* support old notation */ |
48 |
|
#ifndef _MM_FUNCTIONALITY |
49 |
|
#define _MM_FUNCTIONALITY |
50 |
|
#endif |
51 |
|
#endif |
52 |
|
|
53 |
|
#ifdef __ICL |
54 |
|
#ifdef _MM_FUNCTIONALITY |
55 |
|
#include "xmm_func.h" |
56 |
|
#else |
57 |
|
/* using real intrinsics */ |
58 |
|
typedef long long __m128; |
59 |
|
#endif |
60 |
|
#else |
61 |
|
|
62 |
|
typedef union __declspec(intrin_type) _CRT_ALIGN(16) __m128 { |
63 |
|
float m128_f32[4]; |
64 |
|
unsigned __int64 m128_u64[2]; |
65 |
|
__int8 m128_i8[16]; |
66 |
|
__int16 m128_i16[8]; |
67 |
|
__int32 m128_i32[4]; |
68 |
|
__int64 m128_i64[2]; |
69 |
|
unsigned __int8 m128_u8[16]; |
70 |
|
unsigned __int16 m128_u16[8]; |
71 |
|
unsigned __int32 m128_u32[4]; |
72 |
|
} __m128; |
73 |
|
|
74 |
|
#ifndef _INC_MALLOC |
75 |
|
/* pick up _mm_malloc() and _mm_free() */ |
76 |
|
#include <malloc.h> |
77 |
|
#endif |
78 |
|
#endif |
79 |
|
|
80 |
|
/*******************************************************/ |
81 |
|
/* MACRO for shuffle parameter for _mm_shuffle_ps(). */ |
82 |
|
/* Argument fp3 is a digit[0123] that represents the fp*/ |
83 |
|
/* from argument "b" of mm_shuffle_ps that will be */ |
84 |
|
/* placed in fp3 of result. fp2 is the same for fp2 in */ |
85 |
|
/* result. fp1 is a digit[0123] that represents the fp */ |
86 |
|
/* from argument "a" of mm_shuffle_ps that will be */ |
87 |
|
/* places in fp1 of result. fp0 is the same for fp0 of */ |
88 |
|
/* result */ |
89 |
|
/*******************************************************/ |
90 |
|
#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) (((fp3) << 6) | ((fp2) << 4) | \ |
91 |
|
((fp1) << 2) | ((fp0))) |
92 |
|
|
93 |
|
|
94 |
|
/*******************************************************/ |
95 |
|
/* MACRO for performing the transpose of a 4x4 matrix */ |
96 |
|
/* of single precision floating point values. */ |
97 |
|
/* Arguments row0, row1, row2, and row3 are __m128 */ |
98 |
|
/* values whose elements form the corresponding rows */ |
99 |
|
/* of a 4x4 matrix. The matrix transpose is returned */ |
100 |
|
/* in arguments row0, row1, row2, and row3 where row0 */ |
101 |
|
/* now holds column 0 of the original matrix, row1 now */ |
102 |
|
/* holds column 1 of the original matrix, etc. */ |
103 |
|
/*******************************************************/ |
104 |
|
#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) { \ |
105 |
|
__m128 tmp3, tmp2, tmp1, tmp0; \ |
106 |
|
\ |
107 |
|
tmp0 = _mm_shuffle_ps((row0), (row1), 0x44); \ |
108 |
|
tmp2 = _mm_shuffle_ps((row0), (row1), 0xEE); \ |
109 |
|
tmp1 = _mm_shuffle_ps((row2), (row3), 0x44); \ |
110 |
|
tmp3 = _mm_shuffle_ps((row2), (row3), 0xEE); \ |
111 |
|
\ |
112 |
|
(row0) = _mm_shuffle_ps(tmp0, tmp1, 0x88); \ |
113 |
|
(row1) = _mm_shuffle_ps(tmp0, tmp1, 0xDD); \ |
114 |
|
(row2) = _mm_shuffle_ps(tmp2, tmp3, 0x88); \ |
115 |
|
(row3) = _mm_shuffle_ps(tmp2, tmp3, 0xDD); \ |
116 |
|
} |
117 |
|
|
118 |
|
|
119 |
|
/* constants for use with _mm_prefetch */ |
120 |
|
#define _MM_HINT_T0 1 |
121 |
|
#define _MM_HINT_T1 2 |
122 |
|
#define _MM_HINT_T2 3 |
123 |
|
#define _MM_HINT_NTA 0 |
124 |
|
|
125 |
|
/* (this declspec not supported with 0.A or 0.B) */ |
126 |
|
#define _MM_ALIGN16 _CRT_ALIGN(16) |
127 |
|
|
128 |
|
/* MACRO functions for setting and reading the MXCSR */ |
129 |
|
#define _MM_EXCEPT_MASK 0x003f |
130 |
|
#define _MM_EXCEPT_INVALID 0x0001 |
131 |
|
#define _MM_EXCEPT_DENORM 0x0002 |
132 |
|
#define _MM_EXCEPT_DIV_ZERO 0x0004 |
133 |
|
#define _MM_EXCEPT_OVERFLOW 0x0008 |
134 |
|
#define _MM_EXCEPT_UNDERFLOW 0x0010 |
135 |
|
#define _MM_EXCEPT_INEXACT 0x0020 |
136 |
|
|
137 |
|
#define _MM_MASK_MASK 0x1f80 |
138 |
|
#define _MM_MASK_INVALID 0x0080 |
139 |
|
#define _MM_MASK_DENORM 0x0100 |
140 |
|
#define _MM_MASK_DIV_ZERO 0x0200 |
141 |
|
#define _MM_MASK_OVERFLOW 0x0400 |
142 |
|
#define _MM_MASK_UNDERFLOW 0x0800 |
143 |
|
#define _MM_MASK_INEXACT 0x1000 |
144 |
|
|
145 |
|
#define _MM_ROUND_MASK 0x6000 |
146 |
|
#define _MM_ROUND_NEAREST 0x0000 |
147 |
|
#define _MM_ROUND_DOWN 0x2000 |
148 |
|
#define _MM_ROUND_UP 0x4000 |
149 |
|
#define _MM_ROUND_TOWARD_ZERO 0x6000 |
150 |
|
|
151 |
|
#define _MM_FLUSH_ZERO_MASK 0x8000 |
152 |
|
#define _MM_FLUSH_ZERO_ON 0x8000 |
153 |
|
#define _MM_FLUSH_ZERO_OFF 0x0000 |
154 |
|
|
155 |
|
#define _MM_SET_EXCEPTION_STATE(mask) \ |
156 |
|
_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (mask)) |
157 |
|
#define _MM_GET_EXCEPTION_STATE() \ |
158 |
|
(_mm_getcsr() & _MM_EXCEPT_MASK) |
159 |
|
|
160 |
|
#define _MM_SET_EXCEPTION_MASK(mask) \ |
161 |
|
_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (mask)) |
162 |
|
#define _MM_GET_EXCEPTION_MASK() \ |
163 |
|
(_mm_getcsr() & _MM_MASK_MASK) |
164 |
|
|
165 |
|
#define _MM_SET_ROUNDING_MODE(mode) \ |
166 |
|
_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (mode)) |
167 |
|
#define _MM_GET_ROUNDING_MODE() \ |
168 |
|
(_mm_getcsr() & _MM_ROUND_MASK) |
169 |
|
|
170 |
|
#define _MM_SET_FLUSH_ZERO_MODE(mode) \ |
171 |
|
_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (mode)) |
172 |
|
#define _MM_GET_FLUSH_ZERO_MODE(mode) \ |
173 |
|
(_mm_getcsr() & _MM_FLUSH_ZERO_MASK) |
174 |
|
|
175 |
|
/*****************************************************/ |
176 |
|
/* INTRINSICS FUNCTION PROTOTYPES START HERE */ |
177 |
|
/*****************************************************/ |
178 |
|
|
179 |
|
#if defined __cplusplus |
180 |
|
extern "C" { /* Begin "C" */ |
181 |
|
/* Intrinsics use C name-mangling. */ |
182 |
|
#endif /* __cplusplus */ |
183 |
|
|
184 |
|
/* |
185 |
|
* FP, arithmetic |
186 |
|
*/ |
187 |
|
|
188 |
|
extern __m128 _mm_add_ss(__m128 _A, __m128 _B); |
189 |
|
extern __m128 _mm_add_ps(__m128 _A, __m128 _B); |
190 |
|
extern __m128 _mm_sub_ss(__m128 _A, __m128 _B); |
191 |
|
extern __m128 _mm_sub_ps(__m128 _A, __m128 _B); |
192 |
|
extern __m128 _mm_mul_ss(__m128 _A, __m128 _B); |
193 |
|
extern __m128 _mm_mul_ps(__m128 _A, __m128 _B); |
194 |
|
extern __m128 _mm_div_ss(__m128 _A, __m128 _B); |
195 |
|
extern __m128 _mm_div_ps(__m128 _A, __m128 _B); |
196 |
|
extern __m128 _mm_sqrt_ss(__m128 _A); |
197 |
|
extern __m128 _mm_sqrt_ps(__m128 _A); |
198 |
|
extern __m128 _mm_rcp_ss(__m128 _A); |
199 |
|
extern __m128 _mm_rcp_ps(__m128 _A); |
200 |
|
extern __m128 _mm_rsqrt_ss(__m128 _A); |
201 |
|
extern __m128 _mm_rsqrt_ps(__m128 _A); |
202 |
|
extern __m128 _mm_min_ss(__m128 _A, __m128 _B); |
203 |
|
extern __m128 _mm_min_ps(__m128 _A, __m128 _B); |
204 |
|
extern __m128 _mm_max_ss(__m128 _A, __m128 _B); |
205 |
|
extern __m128 _mm_max_ps(__m128 _A, __m128 _B); |
206 |
|
|
207 |
|
/* |
208 |
|
* FP, logical |
209 |
|
*/ |
210 |
|
|
211 |
|
extern __m128 _mm_and_ps(__m128 _A, __m128 _B); |
212 |
|
extern __m128 _mm_andnot_ps(__m128 _A, __m128 _B); |
213 |
|
extern __m128 _mm_or_ps(__m128 _A, __m128 _B); |
214 |
|
extern __m128 _mm_xor_ps(__m128 _A, __m128 _B); |
215 |
|
|
216 |
|
/* |
217 |
|
* FP, comparison |
218 |
|
*/ |
219 |
|
|
220 |
|
extern __m128 _mm_cmpeq_ss(__m128 _A, __m128 _B); |
221 |
|
extern __m128 _mm_cmpeq_ps(__m128 _A, __m128 _B); |
222 |
|
extern __m128 _mm_cmplt_ss(__m128 _A, __m128 _B); |
223 |
|
extern __m128 _mm_cmplt_ps(__m128 _A, __m128 _B); |
224 |
|
extern __m128 _mm_cmple_ss(__m128 _A, __m128 _B); |
225 |
|
extern __m128 _mm_cmple_ps(__m128 _A, __m128 _B); |
226 |
|
extern __m128 _mm_cmpgt_ss(__m128 _A, __m128 _B); |
227 |
|
extern __m128 _mm_cmpgt_ps(__m128 _A, __m128 _B); |
228 |
|
extern __m128 _mm_cmpge_ss(__m128 _A, __m128 _B); |
229 |
|
extern __m128 _mm_cmpge_ps(__m128 _A, __m128 _B); |
230 |
|
extern __m128 _mm_cmpneq_ss(__m128 _A, __m128 _B); |
231 |
|
extern __m128 _mm_cmpneq_ps(__m128 _A, __m128 _B); |
232 |
|
extern __m128 _mm_cmpnlt_ss(__m128 _A, __m128 _B); |
233 |
|
extern __m128 _mm_cmpnlt_ps(__m128 _A, __m128 _B); |
234 |
|
extern __m128 _mm_cmpnle_ss(__m128 _A, __m128 _B); |
235 |
|
extern __m128 _mm_cmpnle_ps(__m128 _A, __m128 _B); |
236 |
|
extern __m128 _mm_cmpngt_ss(__m128 _A, __m128 _B); |
237 |
|
extern __m128 _mm_cmpngt_ps(__m128 _A, __m128 _B); |
238 |
|
extern __m128 _mm_cmpnge_ss(__m128 _A, __m128 _B); |
239 |
|
extern __m128 _mm_cmpnge_ps(__m128 _A, __m128 _B); |
240 |
|
extern __m128 _mm_cmpord_ss(__m128 _A, __m128 _B); |
241 |
|
extern __m128 _mm_cmpord_ps(__m128 _A, __m128 _B); |
242 |
|
extern __m128 _mm_cmpunord_ss(__m128 _A, __m128 _B); |
243 |
|
extern __m128 _mm_cmpunord_ps(__m128 _A, __m128 _B); |
244 |
|
extern int _mm_comieq_ss(__m128 _A, __m128 _B); |
245 |
|
extern int _mm_comilt_ss(__m128 _A, __m128 _B); |
246 |
|
extern int _mm_comile_ss(__m128 _A, __m128 _B); |
247 |
|
extern int _mm_comigt_ss(__m128 _A, __m128 _B); |
248 |
|
extern int _mm_comige_ss(__m128 _A, __m128 _B); |
249 |
|
extern int _mm_comineq_ss(__m128 _A, __m128 _B); |
250 |
|
extern int _mm_ucomieq_ss(__m128 _A, __m128 _B); |
251 |
|
extern int _mm_ucomilt_ss(__m128 _A, __m128 _B); |
252 |
|
extern int _mm_ucomile_ss(__m128 _A, __m128 _B); |
253 |
|
extern int _mm_ucomigt_ss(__m128 _A, __m128 _B); |
254 |
|
extern int _mm_ucomige_ss(__m128 _A, __m128 _B); |
255 |
|
extern int _mm_ucomineq_ss(__m128 _A, __m128 _B); |
256 |
|
|
257 |
|
/* |
258 |
|
* FP, conversions |
259 |
|
*/ |
260 |
|
|
261 |
|
extern int _mm_cvt_ss2si(__m128 _A); |
262 |
|
extern __m64 _mm_cvt_ps2pi(__m128 _A); |
263 |
|
extern int _mm_cvtt_ss2si(__m128 _A); |
264 |
|
extern __m64 _mm_cvtt_ps2pi(__m128 _A); |
265 |
|
extern __m128 _mm_cvt_si2ss(__m128, int); |
266 |
|
extern __m128 _mm_cvt_pi2ps(__m128, __m64); |
267 |
|
extern float _mm_cvtss_f32(__m128 _A); |
268 |
|
|
269 |
|
/* |
270 |
|
* Support for 64-bit extension intrinsics |
271 |
|
*/ |
272 |
|
#if defined(_M_AMD64) |
273 |
|
extern __int64 _mm_cvtss_si64(__m128 _A); |
274 |
|
extern __int64 _mm_cvttss_si64(__m128 _A); |
275 |
|
extern __m128 _mm_cvtsi64_ss(__m128 _A, __int64 _B); |
276 |
|
#endif |
277 |
|
|
278 |
|
/* |
279 |
|
* FP, misc |
280 |
|
*/ |
281 |
|
|
282 |
|
extern __m128 _mm_shuffle_ps(__m128 _A, __m128 _B, unsigned int _Imm8); |
283 |
|
extern __m128 _mm_unpackhi_ps(__m128 _A, __m128 _B); |
284 |
|
extern __m128 _mm_unpacklo_ps(__m128 _A, __m128 _B); |
285 |
|
extern __m128 _mm_loadh_pi(__m128, __m64 const*); |
286 |
|
extern __m128 _mm_movehl_ps(__m128, __m128); |
287 |
|
extern __m128 _mm_movelh_ps(__m128, __m128); |
288 |
|
extern void _mm_storeh_pi(__m64 *, __m128); |
289 |
|
extern __m128 _mm_loadl_pi(__m128, __m64 const*); |
290 |
|
extern void _mm_storel_pi(__m64 *, __m128); |
291 |
|
extern int _mm_movemask_ps(__m128 _A); |
292 |
|
|
293 |
|
|
294 |
|
/* |
295 |
|
* Integer extensions |
296 |
|
*/ |
297 |
|
extern int _m_pextrw(__m64, int); |
298 |
|
extern __m64 _m_pinsrw(__m64, int, int); |
299 |
|
extern __m64 _m_pmaxsw(__m64, __m64); |
300 |
|
extern __m64 _m_pmaxub(__m64, __m64); |
301 |
|
extern __m64 _m_pminsw(__m64, __m64); |
302 |
|
extern __m64 _m_pminub(__m64, __m64); |
303 |
|
extern int _m_pmovmskb(__m64); |
304 |
|
extern __m64 _m_pmulhuw(__m64, __m64); |
305 |
|
extern __m64 _m_pshufw(__m64, int); |
306 |
|
extern void _m_maskmovq(__m64, __m64, char *); |
307 |
|
extern __m64 _m_pavgb(__m64, __m64); |
308 |
|
extern __m64 _m_pavgw(__m64, __m64); |
309 |
|
extern __m64 _m_psadbw(__m64, __m64); |
310 |
|
|
311 |
|
/* |
312 |
|
* memory & initialization |
313 |
|
*/ |
314 |
|
|
315 |
|
extern __m128 _mm_set_ss(float _A); |
316 |
|
extern __m128 _mm_set_ps1(float _A); |
317 |
|
extern __m128 _mm_set_ps(float _A, float _B, float _C, float _D); |
318 |
|
extern __m128 _mm_setr_ps(float _A, float _B, float _C, float _D); |
319 |
|
extern __m128 _mm_setzero_ps(void); |
320 |
|
extern __m128 _mm_load_ss(float const*_A); |
321 |
|
extern __m128 _mm_load_ps1(float const*_A); |
322 |
|
extern __m128 _mm_load_ps(float const*_A); |
323 |
|
extern __m128 _mm_loadr_ps(float const*_A); |
324 |
|
extern __m128 _mm_loadu_ps(float const*_A); |
325 |
|
extern void _mm_store_ss(float *_V, __m128 _A); |
326 |
|
extern void _mm_store_ps1(float *_V, __m128 _A); |
327 |
|
extern void _mm_store_ps(float *_V, __m128 _A); |
328 |
|
extern void _mm_storer_ps(float *_V, __m128 _A); |
329 |
|
extern void _mm_storeu_ps(float *_V, __m128 _A); |
330 |
|
extern void _mm_prefetch(char const*_A, int _Sel); |
331 |
|
extern void _mm_stream_pi(__m64 *, __m64); |
332 |
|
extern void _mm_stream_ps(float *, __m128); |
333 |
|
extern __m128 _mm_move_ss(__m128 _A, __m128 _B); |
334 |
|
|
335 |
|
extern void _mm_sfence(void); |
336 |
|
extern unsigned int _mm_getcsr(void); |
337 |
|
extern void _mm_setcsr(unsigned int); |
338 |
|
|
339 |
|
#ifdef __ICL |
340 |
|
extern void* __cdecl _mm_malloc(size_t _Siz, size_t _Al); |
341 |
|
extern void __cdecl _mm_free(void *_P); |
342 |
|
#endif |
343 |
|
|
344 |
|
/* Alternate intrinsic names definition */ |
345 |
|
#define _mm_cvtss_si32 _mm_cvt_ss2si |
346 |
|
#define _mm_cvtps_pi32 _mm_cvt_ps2pi |
347 |
|
#define _mm_cvttss_si32 _mm_cvtt_ss2si |
348 |
|
#define _mm_cvttps_pi32 _mm_cvtt_ps2pi |
349 |
|
#define _mm_cvtsi32_ss _mm_cvt_si2ss |
350 |
|
#define _mm_cvtpi32_ps _mm_cvt_pi2ps |
351 |
|
#define _mm_extract_pi16 _m_pextrw |
352 |
|
#define _mm_insert_pi16 _m_pinsrw |
353 |
|
#define _mm_max_pi16 _m_pmaxsw |
354 |
|
#define _mm_max_pu8 _m_pmaxub |
355 |
|
#define _mm_min_pi16 _m_pminsw |
356 |
|
#define _mm_min_pu8 _m_pminub |
357 |
|
#define _mm_movemask_pi8 _m_pmovmskb |
358 |
|
#define _mm_mulhi_pu16 _m_pmulhuw |
359 |
|
#define _mm_shuffle_pi16 _m_pshufw |
360 |
|
#define _mm_maskmove_si64 _m_maskmovq |
361 |
|
#define _mm_avg_pu8 _m_pavgb |
362 |
|
#define _mm_avg_pu16 _m_pavgw |
363 |
|
#define _mm_sad_pu8 _m_psadbw |
364 |
|
#define _mm_set1_ps _mm_set_ps1 |
365 |
|
#define _mm_load1_ps _mm_load_ps1 |
366 |
|
#define _mm_store1_ps _mm_store_ps1 |
367 |
|
|
368 |
|
/******************************************************/ |
369 |
|
/* UTILITY INTRINSICS FUNCTION DEFINITIONS START HERE */ |
370 |
|
/******************************************************/ |
371 |
|
|
372 |
|
/*********************************************************/ |
373 |
|
/* NAME : _mm_cvtpi16_ps */ |
374 |
|
/* DESCRIPTION : Convert 4 16-bit signed integer values */ |
375 |
|
/* to 4 single-precision float values */ |
376 |
|
/* IN : __m64 a */ |
377 |
|
/* OUT : none */ |
378 |
|
/* RETURN : __m128 : (float)a */ |
379 |
|
/*********************************************************/ |
380 |
|
__inline __m128 _mm_cvtpi16_ps(__m64 a) |
381 |
|
{ |
382 |
|
__m128 tmp; |
383 |
|
__m64 ext_val = _mm_cmpgt_pi16(_mm_setzero_si64(), a); |
384 |
|
|
385 |
|
tmp = _mm_cvtpi32_ps(_mm_setzero_ps(), _mm_unpackhi_pi16(a, ext_val)); |
386 |
|
return(_mm_cvtpi32_ps(_mm_movelh_ps(tmp, tmp), |
387 |
|
_mm_unpacklo_pi16(a, ext_val))); |
388 |
|
} |
389 |
|
|
390 |
|
|
391 |
|
/***********************************************************/ |
392 |
|
/* NAME : _mm_cvtpu16_ps */ |
393 |
|
/* DESCRIPTION : Convert 4 16-bit unsigned integer values */ |
394 |
|
/* to 4 single-precision float values */ |
395 |
|
/* IN : __m64 a */ |
396 |
|
/* OUT : none */ |
397 |
|
/* RETURN : __m128 : (float)a */ |
398 |
|
/***********************************************************/ |
399 |
|
__inline __m128 _mm_cvtpu16_ps(__m64 a) |
400 |
|
{ |
401 |
|
__m128 tmp; |
402 |
|
__m64 ext_val = _mm_setzero_si64(); |
403 |
|
|
404 |
|
tmp = _mm_cvtpi32_ps(_mm_setzero_ps(), _mm_unpackhi_pi16(a, ext_val)); |
405 |
|
return(_mm_cvtpi32_ps(_mm_movelh_ps(tmp, tmp), |
406 |
|
_mm_unpacklo_pi16(a, ext_val))); |
407 |
|
} |