include/fpu/softfloat: implement float16_abs helper

This will be required when expanding the MINMAX() macro for 16
bit/half-precision operations.

Backports commit 28136775cd99c628f7d7c642b04eb87f062efef8 from qemu
This commit is contained in:
Alex Bennée 2018-03-08 10:00:23 -05:00 committed by Lioncash
parent 0eee5afd0e
commit facbc9ef66
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7

View File

@ -252,6 +252,14 @@ static inline int float16_is_any_nan(float16 a)
return ((float16_val(a) & ~0x8000) > 0x7c00);
}
static inline float16 float16_abs(float16 a)
{
/* Note that abs does *not* handle NaN specially, nor does
* it flush denormal inputs to zero.
*/
return make_float16(float16_val(a) & 0x7fff);
}
/*----------------------------------------------------------------------------
| The pattern for a default generated half-precision NaN.
*----------------------------------------------------------------------------*/