softfloat.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * Floating point emulation support for subnormalised numbers on SH4
  3. * architecture This file is derived from the SoftFloat IEC/IEEE
  4. * Floating-point Arithmetic Package, Release 2 the original license of
  5. * which is reproduced below.
  6. *
  7. * ========================================================================
  8. *
  9. * This C source file is part of the SoftFloat IEC/IEEE Floating-point
  10. * Arithmetic Package, Release 2.
  11. *
  12. * Written by John R. Hauser. This work was made possible in part by the
  13. * International Computer Science Institute, located at Suite 600, 1947 Center
  14. * Street, Berkeley, California 94704. Funding was partially provided by the
  15. * National Science Foundation under grant MIP-9311980. The original version
  16. * of this code was written as part of a project to build a fixed-point vector
  17. * processor in collaboration with the University of California at Berkeley,
  18. * overseen by Profs. Nelson Morgan and John Wawrzynek. More information
  19. * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
  20. * arithmetic/softfloat.html'.
  21. *
  22. * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
  23. * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
  24. * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
  25. * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
  26. * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
  27. *
  28. * Derivative works are acceptable, even for commercial purposes, so long as
  29. * (1) they include prominent notice that the work is derivative, and (2) they
  30. * include prominent notice akin to these three paragraphs for those parts of
  31. * this code that are retained.
  32. *
  33. * ========================================================================
  34. *
  35. * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com>
  36. * and Kamel Khelifi <kamel.khelifi@st.com>
  37. */
  38. #include <linux/kernel.h>
  39. #include <cpu/fpu.h>
  40. #define LIT64( a ) a##LL
  41. typedef char flag;
  42. typedef unsigned char uint8;
  43. typedef signed char int8;
  44. typedef int uint16;
  45. typedef int int16;
  46. typedef unsigned int uint32;
  47. typedef signed int int32;
  48. typedef unsigned long long int bits64;
  49. typedef signed long long int sbits64;
  50. typedef unsigned char bits8;
  51. typedef signed char sbits8;
  52. typedef unsigned short int bits16;
  53. typedef signed short int sbits16;
  54. typedef unsigned int bits32;
  55. typedef signed int sbits32;
  56. typedef unsigned long long int uint64;
  57. typedef signed long long int int64;
  58. typedef unsigned long int float32;
  59. typedef unsigned long long float64;
  60. extern void float_raise(unsigned int flags); /* in fpu.c */
  61. extern int float_rounding_mode(void); /* in fpu.c */
  62. inline bits64 extractFloat64Frac(float64 a);
  63. inline flag extractFloat64Sign(float64 a);
  64. inline int16 extractFloat64Exp(float64 a);
  65. inline int16 extractFloat32Exp(float32 a);
  66. inline flag extractFloat32Sign(float32 a);
  67. inline bits32 extractFloat32Frac(float32 a);
  68. inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
  69. inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
  70. inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
  71. inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
  72. float64 float64_sub(float64 a, float64 b);
  73. float32 float32_sub(float32 a, float32 b);
  74. float32 float32_add(float32 a, float32 b);
  75. float64 float64_add(float64 a, float64 b);
  76. float64 float64_div(float64 a, float64 b);
  77. float32 float32_div(float32 a, float32 b);
  78. float32 float32_mul(float32 a, float32 b);
  79. float64 float64_mul(float64 a, float64 b);
  80. inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
  81. bits64 * z1Ptr);
  82. inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
  83. bits64 * z1Ptr);
  84. inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
  85. static int8 countLeadingZeros32(bits32 a);
  86. static int8 countLeadingZeros64(bits64 a);
  87. static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
  88. bits64 zSig);
  89. static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
  90. static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
  91. static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
  92. static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
  93. bits32 zSig);
  94. static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
  95. static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
  96. static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
  97. static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
  98. bits64 * zSigPtr);
  99. static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
  100. static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
  101. bits32 * zSigPtr);
  102. inline bits64 extractFloat64Frac(float64 a)
  103. {
  104. return a & LIT64(0x000FFFFFFFFFFFFF);
  105. }
  106. inline flag extractFloat64Sign(float64 a)
  107. {
  108. return a >> 63;
  109. }
  110. inline int16 extractFloat64Exp(float64 a)
  111. {
  112. return (a >> 52) & 0x7FF;
  113. }
  114. inline int16 extractFloat32Exp(float32 a)
  115. {
  116. return (a >> 23) & 0xFF;
  117. }
  118. inline flag extractFloat32Sign(float32 a)
  119. {
  120. return a >> 31;
  121. }
  122. inline bits32 extractFloat32Frac(float32 a)
  123. {
  124. return a & 0x007FFFFF;
  125. }
  126. inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
  127. {
  128. return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
  129. }
  130. inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
  131. {
  132. bits64 z;
  133. if (count == 0) {
  134. z = a;
  135. } else if (count < 64) {
  136. z = (a >> count) | ((a << ((-count) & 63)) != 0);
  137. } else {
  138. z = (a != 0);
  139. }
  140. *zPtr = z;
  141. }
  142. static int8 countLeadingZeros32(bits32 a)
  143. {
  144. static const int8 countLeadingZerosHigh[] = {
  145. 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
  146. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  147. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  148. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  149. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  150. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  151. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  152. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  153. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  154. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  155. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  156. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  157. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  158. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  159. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  160. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  161. };
  162. int8 shiftCount;
  163. shiftCount = 0;
  164. if (a < 0x10000) {
  165. shiftCount += 16;
  166. a <<= 16;
  167. }
  168. if (a < 0x1000000) {
  169. shiftCount += 8;
  170. a <<= 8;
  171. }
  172. shiftCount += countLeadingZerosHigh[a >> 24];
  173. return shiftCount;
  174. }
  175. static int8 countLeadingZeros64(bits64 a)
  176. {
  177. int8 shiftCount;
  178. shiftCount = 0;
  179. if (a < ((bits64) 1) << 32) {
  180. shiftCount += 32;
  181. } else {
  182. a >>= 32;
  183. }
  184. shiftCount += countLeadingZeros32(a);
  185. return shiftCount;
  186. }
  187. static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
  188. {
  189. int8 shiftCount;
  190. shiftCount = countLeadingZeros64(zSig) - 1;
  191. return roundAndPackFloat64(zSign, zExp - shiftCount,
  192. zSig << shiftCount);
  193. }
  194. static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
  195. {
  196. int16 aExp, bExp, zExp;
  197. bits64 aSig, bSig, zSig;
  198. int16 expDiff;
  199. aSig = extractFloat64Frac(a);
  200. aExp = extractFloat64Exp(a);
  201. bSig = extractFloat64Frac(b);
  202. bExp = extractFloat64Exp(b);
  203. expDiff = aExp - bExp;
  204. aSig <<= 10;
  205. bSig <<= 10;
  206. if (0 < expDiff)
  207. goto aExpBigger;
  208. if (expDiff < 0)
  209. goto bExpBigger;
  210. if (aExp == 0) {
  211. aExp = 1;
  212. bExp = 1;
  213. }
  214. if (bSig < aSig)
  215. goto aBigger;
  216. if (aSig < bSig)
  217. goto bBigger;
  218. return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
  219. bExpBigger:
  220. if (bExp == 0x7FF) {
  221. return packFloat64(zSign ^ 1, 0x7FF, 0);
  222. }
  223. if (aExp == 0) {
  224. ++expDiff;
  225. } else {
  226. aSig |= LIT64(0x4000000000000000);
  227. }
  228. shift64RightJamming(aSig, -expDiff, &aSig);
  229. bSig |= LIT64(0x4000000000000000);
  230. bBigger:
  231. zSig = bSig - aSig;
  232. zExp = bExp;
  233. zSign ^= 1;
  234. goto normalizeRoundAndPack;
  235. aExpBigger:
  236. if (aExp == 0x7FF) {
  237. return a;
  238. }
  239. if (bExp == 0) {
  240. --expDiff;
  241. } else {
  242. bSig |= LIT64(0x4000000000000000);
  243. }
  244. shift64RightJamming(bSig, expDiff, &bSig);
  245. aSig |= LIT64(0x4000000000000000);
  246. aBigger:
  247. zSig = aSig - bSig;
  248. zExp = aExp;
  249. normalizeRoundAndPack:
  250. --zExp;
  251. return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
  252. }
  253. static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
  254. {
  255. int16 aExp, bExp, zExp;
  256. bits64 aSig, bSig, zSig;
  257. int16 expDiff;
  258. aSig = extractFloat64Frac(a);
  259. aExp = extractFloat64Exp(a);
  260. bSig = extractFloat64Frac(b);
  261. bExp = extractFloat64Exp(b);
  262. expDiff = aExp - bExp;
  263. aSig <<= 9;
  264. bSig <<= 9;
  265. if (0 < expDiff) {
  266. if (aExp == 0x7FF) {
  267. return a;
  268. }
  269. if (bExp == 0) {
  270. --expDiff;
  271. } else {
  272. bSig |= LIT64(0x2000000000000000);
  273. }
  274. shift64RightJamming(bSig, expDiff, &bSig);
  275. zExp = aExp;
  276. } else if (expDiff < 0) {
  277. if (bExp == 0x7FF) {
  278. return packFloat64(zSign, 0x7FF, 0);
  279. }
  280. if (aExp == 0) {
  281. ++expDiff;
  282. } else {
  283. aSig |= LIT64(0x2000000000000000);
  284. }
  285. shift64RightJamming(aSig, -expDiff, &aSig);
  286. zExp = bExp;
  287. } else {
  288. if (aExp == 0x7FF) {
  289. return a;
  290. }
  291. if (aExp == 0)
  292. return packFloat64(zSign, 0, (aSig + bSig) >> 9);
  293. zSig = LIT64(0x4000000000000000) + aSig + bSig;
  294. zExp = aExp;
  295. goto roundAndPack;
  296. }
  297. aSig |= LIT64(0x2000000000000000);
  298. zSig = (aSig + bSig) << 1;
  299. --zExp;
  300. if ((sbits64) zSig < 0) {
  301. zSig = aSig + bSig;
  302. ++zExp;
  303. }
  304. roundAndPack:
  305. return roundAndPackFloat64(zSign, zExp, zSig);
  306. }
  307. inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
  308. {
  309. return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
  310. }
  311. inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
  312. {
  313. bits32 z;
  314. if (count == 0) {
  315. z = a;
  316. } else if (count < 32) {
  317. z = (a >> count) | ((a << ((-count) & 31)) != 0);
  318. } else {
  319. z = (a != 0);
  320. }
  321. *zPtr = z;
  322. }
  323. static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
  324. {
  325. flag roundNearestEven;
  326. int8 roundIncrement, roundBits;
  327. flag isTiny;
  328. /* SH4 has only 2 rounding modes - round to nearest and round to zero */
  329. roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
  330. roundIncrement = 0x40;
  331. if (!roundNearestEven) {
  332. roundIncrement = 0;
  333. }
  334. roundBits = zSig & 0x7F;
  335. if (0xFD <= (bits16) zExp) {
  336. if ((0xFD < zExp)
  337. || ((zExp == 0xFD)
  338. && ((sbits32) (zSig + roundIncrement) < 0))
  339. ) {
  340. float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
  341. return packFloat32(zSign, 0xFF,
  342. 0) - (roundIncrement == 0);
  343. }
  344. if (zExp < 0) {
  345. isTiny = (zExp < -1)
  346. || (zSig + roundIncrement < 0x80000000);
  347. shift32RightJamming(zSig, -zExp, &zSig);
  348. zExp = 0;
  349. roundBits = zSig & 0x7F;
  350. if (isTiny && roundBits)
  351. float_raise(FPSCR_CAUSE_UNDERFLOW);
  352. }
  353. }
  354. if (roundBits)
  355. float_raise(FPSCR_CAUSE_INEXACT);
  356. zSig = (zSig + roundIncrement) >> 7;
  357. zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
  358. if (zSig == 0)
  359. zExp = 0;
  360. return packFloat32(zSign, zExp, zSig);
  361. }
  362. static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
  363. {
  364. int8 shiftCount;
  365. shiftCount = countLeadingZeros32(zSig) - 1;
  366. return roundAndPackFloat32(zSign, zExp - shiftCount,
  367. zSig << shiftCount);
  368. }
  369. static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
  370. {
  371. flag roundNearestEven;
  372. int16 roundIncrement, roundBits;
  373. flag isTiny;
  374. /* SH4 has only 2 rounding modes - round to nearest and round to zero */
  375. roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
  376. roundIncrement = 0x200;
  377. if (!roundNearestEven) {
  378. roundIncrement = 0;
  379. }
  380. roundBits = zSig & 0x3FF;
  381. if (0x7FD <= (bits16) zExp) {
  382. if ((0x7FD < zExp)
  383. || ((zExp == 0x7FD)
  384. && ((sbits64) (zSig + roundIncrement) < 0))
  385. ) {
  386. float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
  387. return packFloat64(zSign, 0x7FF,
  388. 0) - (roundIncrement == 0);
  389. }
  390. if (zExp < 0) {
  391. isTiny = (zExp < -1)
  392. || (zSig + roundIncrement <
  393. LIT64(0x8000000000000000));
  394. shift64RightJamming(zSig, -zExp, &zSig);
  395. zExp = 0;
  396. roundBits = zSig & 0x3FF;
  397. if (isTiny && roundBits)
  398. float_raise(FPSCR_CAUSE_UNDERFLOW);
  399. }
  400. }
  401. if (roundBits)
  402. float_raise(FPSCR_CAUSE_INEXACT);
  403. zSig = (zSig + roundIncrement) >> 10;
  404. zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
  405. if (zSig == 0)
  406. zExp = 0;
  407. return packFloat64(zSign, zExp, zSig);
  408. }
  409. static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
  410. {
  411. int16 aExp, bExp, zExp;
  412. bits32 aSig, bSig, zSig;
  413. int16 expDiff;
  414. aSig = extractFloat32Frac(a);
  415. aExp = extractFloat32Exp(a);
  416. bSig = extractFloat32Frac(b);
  417. bExp = extractFloat32Exp(b);
  418. expDiff = aExp - bExp;
  419. aSig <<= 7;
  420. bSig <<= 7;
  421. if (0 < expDiff)
  422. goto aExpBigger;
  423. if (expDiff < 0)
  424. goto bExpBigger;
  425. if (aExp == 0) {
  426. aExp = 1;
  427. bExp = 1;
  428. }
  429. if (bSig < aSig)
  430. goto aBigger;
  431. if (aSig < bSig)
  432. goto bBigger;
  433. return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
  434. bExpBigger:
  435. if (bExp == 0xFF) {
  436. return packFloat32(zSign ^ 1, 0xFF, 0);
  437. }
  438. if (aExp == 0) {
  439. ++expDiff;
  440. } else {
  441. aSig |= 0x40000000;
  442. }
  443. shift32RightJamming(aSig, -expDiff, &aSig);
  444. bSig |= 0x40000000;
  445. bBigger:
  446. zSig = bSig - aSig;
  447. zExp = bExp;
  448. zSign ^= 1;
  449. goto normalizeRoundAndPack;
  450. aExpBigger:
  451. if (aExp == 0xFF) {
  452. return a;
  453. }
  454. if (bExp == 0) {
  455. --expDiff;
  456. } else {
  457. bSig |= 0x40000000;
  458. }
  459. shift32RightJamming(bSig, expDiff, &bSig);
  460. aSig |= 0x40000000;
  461. aBigger:
  462. zSig = aSig - bSig;
  463. zExp = aExp;
  464. normalizeRoundAndPack:
  465. --zExp;
  466. return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
  467. }
  468. static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
  469. {
  470. int16 aExp, bExp, zExp;
  471. bits32 aSig, bSig, zSig;
  472. int16 expDiff;
  473. aSig = extractFloat32Frac(a);
  474. aExp = extractFloat32Exp(a);
  475. bSig = extractFloat32Frac(b);
  476. bExp = extractFloat32Exp(b);
  477. expDiff = aExp - bExp;
  478. aSig <<= 6;
  479. bSig <<= 6;
  480. if (0 < expDiff) {
  481. if (aExp == 0xFF) {
  482. return a;
  483. }
  484. if (bExp == 0) {
  485. --expDiff;
  486. } else {
  487. bSig |= 0x20000000;
  488. }
  489. shift32RightJamming(bSig, expDiff, &bSig);
  490. zExp = aExp;
  491. } else if (expDiff < 0) {
  492. if (bExp == 0xFF) {
  493. return packFloat32(zSign, 0xFF, 0);
  494. }
  495. if (aExp == 0) {
  496. ++expDiff;
  497. } else {
  498. aSig |= 0x20000000;
  499. }
  500. shift32RightJamming(aSig, -expDiff, &aSig);
  501. zExp = bExp;
  502. } else {
  503. if (aExp == 0xFF) {
  504. return a;
  505. }
  506. if (aExp == 0)
  507. return packFloat32(zSign, 0, (aSig + bSig) >> 6);
  508. zSig = 0x40000000 + aSig + bSig;
  509. zExp = aExp;
  510. goto roundAndPack;
  511. }
  512. aSig |= 0x20000000;
  513. zSig = (aSig + bSig) << 1;
  514. --zExp;
  515. if ((sbits32) zSig < 0) {
  516. zSig = aSig + bSig;
  517. ++zExp;
  518. }
  519. roundAndPack:
  520. return roundAndPackFloat32(zSign, zExp, zSig);
  521. }
  522. float64 float64_sub(float64 a, float64 b)
  523. {
  524. flag aSign, bSign;
  525. aSign = extractFloat64Sign(a);
  526. bSign = extractFloat64Sign(b);
  527. if (aSign == bSign) {
  528. return subFloat64Sigs(a, b, aSign);
  529. } else {
  530. return addFloat64Sigs(a, b, aSign);
  531. }
  532. }
  533. float32 float32_sub(float32 a, float32 b)
  534. {
  535. flag aSign, bSign;
  536. aSign = extractFloat32Sign(a);
  537. bSign = extractFloat32Sign(b);
  538. if (aSign == bSign) {
  539. return subFloat32Sigs(a, b, aSign);
  540. } else {
  541. return addFloat32Sigs(a, b, aSign);
  542. }
  543. }
  544. float32 float32_add(float32 a, float32 b)
  545. {
  546. flag aSign, bSign;
  547. aSign = extractFloat32Sign(a);
  548. bSign = extractFloat32Sign(b);
  549. if (aSign == bSign) {
  550. return addFloat32Sigs(a, b, aSign);
  551. } else {
  552. return subFloat32Sigs(a, b, aSign);
  553. }
  554. }
  555. float64 float64_add(float64 a, float64 b)
  556. {
  557. flag aSign, bSign;
  558. aSign = extractFloat64Sign(a);
  559. bSign = extractFloat64Sign(b);
  560. if (aSign == bSign) {
  561. return addFloat64Sigs(a, b, aSign);
  562. } else {
  563. return subFloat64Sigs(a, b, aSign);
  564. }
  565. }
  566. static void
  567. normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
  568. {
  569. int8 shiftCount;
  570. shiftCount = countLeadingZeros64(aSig) - 11;
  571. *zSigPtr = aSig << shiftCount;
  572. *zExpPtr = 1 - shiftCount;
  573. }
  574. inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
  575. bits64 * z1Ptr)
  576. {
  577. bits64 z1;
  578. z1 = a1 + b1;
  579. *z1Ptr = z1;
  580. *z0Ptr = a0 + b0 + (z1 < a1);
  581. }
  582. inline void
  583. sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
  584. bits64 * z1Ptr)
  585. {
  586. *z1Ptr = a1 - b1;
  587. *z0Ptr = a0 - b0 - (a1 < b1);
  588. }
  589. static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
  590. {
  591. bits64 b0, b1;
  592. bits64 rem0, rem1, term0, term1;
  593. bits64 z;
  594. if (b <= a0)
  595. return LIT64(0xFFFFFFFFFFFFFFFF);
  596. b0 = b >> 32;
  597. z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
  598. mul64To128(b, z, &term0, &term1);
  599. sub128(a0, a1, term0, term1, &rem0, &rem1);
  600. while (((sbits64) rem0) < 0) {
  601. z -= LIT64(0x100000000);
  602. b1 = b << 32;
  603. add128(rem0, rem1, b0, b1, &rem0, &rem1);
  604. }
  605. rem0 = (rem0 << 32) | (rem1 >> 32);
  606. z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
  607. return z;
  608. }
  609. inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
  610. {
  611. bits32 aHigh, aLow, bHigh, bLow;
  612. bits64 z0, zMiddleA, zMiddleB, z1;
  613. aLow = a;
  614. aHigh = a >> 32;
  615. bLow = b;
  616. bHigh = b >> 32;
  617. z1 = ((bits64) aLow) * bLow;
  618. zMiddleA = ((bits64) aLow) * bHigh;
  619. zMiddleB = ((bits64) aHigh) * bLow;
  620. z0 = ((bits64) aHigh) * bHigh;
  621. zMiddleA += zMiddleB;
  622. z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
  623. zMiddleA <<= 32;
  624. z1 += zMiddleA;
  625. z0 += (z1 < zMiddleA);
  626. *z1Ptr = z1;
  627. *z0Ptr = z0;
  628. }
  629. static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
  630. bits32 * zSigPtr)
  631. {
  632. int8 shiftCount;
  633. shiftCount = countLeadingZeros32(aSig) - 8;
  634. *zSigPtr = aSig << shiftCount;
  635. *zExpPtr = 1 - shiftCount;
  636. }
  637. float64 float64_div(float64 a, float64 b)
  638. {
  639. flag aSign, bSign, zSign;
  640. int16 aExp, bExp, zExp;
  641. bits64 aSig, bSig, zSig;
  642. bits64 rem0, rem1;
  643. bits64 term0, term1;
  644. aSig = extractFloat64Frac(a);
  645. aExp = extractFloat64Exp(a);
  646. aSign = extractFloat64Sign(a);
  647. bSig = extractFloat64Frac(b);
  648. bExp = extractFloat64Exp(b);
  649. bSign = extractFloat64Sign(b);
  650. zSign = aSign ^ bSign;
  651. if (aExp == 0x7FF) {
  652. if (bExp == 0x7FF) {
  653. }
  654. return packFloat64(zSign, 0x7FF, 0);
  655. }
  656. if (bExp == 0x7FF) {
  657. return packFloat64(zSign, 0, 0);
  658. }
  659. if (bExp == 0) {
  660. if (bSig == 0) {
  661. if ((aExp | aSig) == 0) {
  662. float_raise(FPSCR_CAUSE_INVALID);
  663. }
  664. return packFloat64(zSign, 0x7FF, 0);
  665. }
  666. normalizeFloat64Subnormal(bSig, &bExp, &bSig);
  667. }
  668. if (aExp == 0) {
  669. if (aSig == 0)
  670. return packFloat64(zSign, 0, 0);
  671. normalizeFloat64Subnormal(aSig, &aExp, &aSig);
  672. }
  673. zExp = aExp - bExp + 0x3FD;
  674. aSig = (aSig | LIT64(0x0010000000000000)) << 10;
  675. bSig = (bSig | LIT64(0x0010000000000000)) << 11;
  676. if (bSig <= (aSig + aSig)) {
  677. aSig >>= 1;
  678. ++zExp;
  679. }
  680. zSig = estimateDiv128To64(aSig, 0, bSig);
  681. if ((zSig & 0x1FF) <= 2) {
  682. mul64To128(bSig, zSig, &term0, &term1);
  683. sub128(aSig, 0, term0, term1, &rem0, &rem1);
  684. while ((sbits64) rem0 < 0) {
  685. --zSig;
  686. add128(rem0, rem1, 0, bSig, &rem0, &rem1);
  687. }
  688. zSig |= (rem1 != 0);
  689. }
  690. return roundAndPackFloat64(zSign, zExp, zSig);
  691. }
  692. float32 float32_div(float32 a, float32 b)
  693. {
  694. flag aSign, bSign, zSign;
  695. int16 aExp, bExp, zExp;
  696. bits32 aSig, bSig, zSig;
  697. aSig = extractFloat32Frac(a);
  698. aExp = extractFloat32Exp(a);
  699. aSign = extractFloat32Sign(a);
  700. bSig = extractFloat32Frac(b);
  701. bExp = extractFloat32Exp(b);
  702. bSign = extractFloat32Sign(b);
  703. zSign = aSign ^ bSign;
  704. if (aExp == 0xFF) {
  705. if (bExp == 0xFF) {
  706. }
  707. return packFloat32(zSign, 0xFF, 0);
  708. }
  709. if (bExp == 0xFF) {
  710. return packFloat32(zSign, 0, 0);
  711. }
  712. if (bExp == 0) {
  713. if (bSig == 0) {
  714. return packFloat32(zSign, 0xFF, 0);
  715. }
  716. normalizeFloat32Subnormal(bSig, &bExp, &bSig);
  717. }
  718. if (aExp == 0) {
  719. if (aSig == 0)
  720. return packFloat32(zSign, 0, 0);
  721. normalizeFloat32Subnormal(aSig, &aExp, &aSig);
  722. }
  723. zExp = aExp - bExp + 0x7D;
  724. aSig = (aSig | 0x00800000) << 7;
  725. bSig = (bSig | 0x00800000) << 8;
  726. if (bSig <= (aSig + aSig)) {
  727. aSig >>= 1;
  728. ++zExp;
  729. }
  730. zSig = (((bits64) aSig) << 32) / bSig;
  731. if ((zSig & 0x3F) == 0) {
  732. zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
  733. }
  734. return roundAndPackFloat32(zSign, zExp, zSig);
  735. }
  736. float32 float32_mul(float32 a, float32 b)
  737. {
  738. char aSign, bSign, zSign;
  739. int aExp, bExp, zExp;
  740. unsigned int aSig, bSig;
  741. unsigned long long zSig64;
  742. unsigned int zSig;
  743. aSig = extractFloat32Frac(a);
  744. aExp = extractFloat32Exp(a);
  745. aSign = extractFloat32Sign(a);
  746. bSig = extractFloat32Frac(b);
  747. bExp = extractFloat32Exp(b);
  748. bSign = extractFloat32Sign(b);
  749. zSign = aSign ^ bSign;
  750. if (aExp == 0) {
  751. if (aSig == 0)
  752. return packFloat32(zSign, 0, 0);
  753. normalizeFloat32Subnormal(aSig, &aExp, &aSig);
  754. }
  755. if (bExp == 0) {
  756. if (bSig == 0)
  757. return packFloat32(zSign, 0, 0);
  758. normalizeFloat32Subnormal(bSig, &bExp, &bSig);
  759. }
  760. if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
  761. return roundAndPackFloat32(zSign, 0xff, 0);
  762. zExp = aExp + bExp - 0x7F;
  763. aSig = (aSig | 0x00800000) << 7;
  764. bSig = (bSig | 0x00800000) << 8;
  765. shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
  766. zSig = zSig64;
  767. if (0 <= (signed int)(zSig << 1)) {
  768. zSig <<= 1;
  769. --zExp;
  770. }
  771. return roundAndPackFloat32(zSign, zExp, zSig);
  772. }
  773. float64 float64_mul(float64 a, float64 b)
  774. {
  775. char aSign, bSign, zSign;
  776. int aExp, bExp, zExp;
  777. unsigned long long int aSig, bSig, zSig0, zSig1;
  778. aSig = extractFloat64Frac(a);
  779. aExp = extractFloat64Exp(a);
  780. aSign = extractFloat64Sign(a);
  781. bSig = extractFloat64Frac(b);
  782. bExp = extractFloat64Exp(b);
  783. bSign = extractFloat64Sign(b);
  784. zSign = aSign ^ bSign;
  785. if (aExp == 0) {
  786. if (aSig == 0)
  787. return packFloat64(zSign, 0, 0);
  788. normalizeFloat64Subnormal(aSig, &aExp, &aSig);
  789. }
  790. if (bExp == 0) {
  791. if (bSig == 0)
  792. return packFloat64(zSign, 0, 0);
  793. normalizeFloat64Subnormal(bSig, &bExp, &bSig);
  794. }
  795. if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
  796. return roundAndPackFloat64(zSign, 0x7ff, 0);
  797. zExp = aExp + bExp - 0x3FF;
  798. aSig = (aSig | 0x0010000000000000LL) << 10;
  799. bSig = (bSig | 0x0010000000000000LL) << 11;
  800. mul64To128(aSig, bSig, &zSig0, &zSig1);
  801. zSig0 |= (zSig1 != 0);
  802. if (0 <= (signed long long int)(zSig0 << 1)) {
  803. zSig0 <<= 1;
  804. --zExp;
  805. }
  806. return roundAndPackFloat64(zSign, zExp, zSig0);
  807. }