1/*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001-2018 Free Software Foundation, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19/*********************************************************************/
20/* MODULE_NAME: utan.c */
21/* */
22/* FUNCTIONS: utan */
23/* tanMp */
24/* */
25/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26/* branred.c sincos32.c mptan.c */
27/* utan.tbl */
28/* */
29/* An ultimate tan routine. Given an IEEE double machine number x */
30/* it computes the correctly rounded (to nearest) value of tan(x). */
31/* Assumption: Machine arithmetic operations are performed in */
32/* round to nearest mode of IEEE 754 standard. */
33/* */
34/*********************************************************************/
35
36#include <errno.h>
37#include <float.h>
38#include "endian.h"
39#include <dla.h>
40#include "mpa.h"
41#include "MathLib.h"
42#include <math.h>
43#include <math_private.h>
44#include <math-underflow.h>
45#include <libm-alias-double.h>
46#include <fenv.h>
47#include <stap-probe.h>
48
49#ifndef SECTION
50# define SECTION
51#endif
52
53static double tanMp (double);
54void __mptan (double, mp_no *, int);
55
56double
57SECTION
58__tan (double x)
59{
60#include "utan.h"
61#include "utan.tbl"
62
63 int ux, i, n;
64 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
65 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
66 yya, z0, z, zz, z2, zz2;
67#ifndef DLA_FMS
68 double t5, t6;
69#endif
70 int p;
71 number num, v;
72 mp_no mpa, mpt1, mpt2;
73
74 double retval;
75
76 int __branred (double, double *, double *);
77 int __mpranred (double, mp_no *, int);
78
79 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
80
81 /* x=+-INF, x=NaN */
82 num.d = x;
83 ux = num.i[HIGH_HALF];
84 if ((ux & 0x7ff00000) == 0x7ff00000)
85 {
86 if ((ux & 0x7fffffff) == 0x7ff00000)
87 __set_errno (EDOM);
88 retval = x - x;
89 goto ret;
90 }
91
92 w = (x < 0.0) ? -x : x;
93
94 /* (I) The case abs(x) <= 1.259e-8 */
95 if (w <= g1.d)
96 {
97 math_check_force_underflow_nonneg (w);
98 retval = x;
99 goto ret;
100 }
101
102 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
103 if (w <= g2.d)
104 {
105 /* First stage */
106 x2 = x * x;
107
108 t2 = d9.d + x2 * d11.d;
109 t2 = d7.d + x2 * t2;
110 t2 = d5.d + x2 * t2;
111 t2 = d3.d + x2 * t2;
112 t2 *= x * x2;
113
114 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
115 {
116 retval = y;
117 goto ret;
118 }
119
120 /* Second stage */
121 c1 = a25.d + x2 * a27.d;
122 c1 = a23.d + x2 * c1;
123 c1 = a21.d + x2 * c1;
124 c1 = a19.d + x2 * c1;
125 c1 = a17.d + x2 * c1;
126 c1 = a15.d + x2 * c1;
127 c1 *= x2;
128
129 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
130 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
131 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
132 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
133 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
134 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
135 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
136 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
137 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
138 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
139 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
140 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
141 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
142 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
143 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
144 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
145 {
146 retval = y;
147 goto ret;
148 }
149 retval = tanMp (x);
150 goto ret;
151 }
152
153 /* (III) The case 0.0608 < abs(x) <= 0.787 */
154 if (w <= g3.d)
155 {
156 /* First stage */
157 i = ((int) (mfftnhf.d + TWO8 * w));
158 z = w - xfg[i][0].d;
159 z2 = z * z;
160 s = (x < 0.0) ? -1 : 1;
161 pz = z + z * z2 * (e0.d + z2 * e1.d);
162 fi = xfg[i][1].d;
163 gi = xfg[i][2].d;
164 t2 = pz * (gi + fi) / (gi - pz);
165 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
166 {
167 retval = (s * y);
168 goto ret;
169 }
170 t3 = (t2 < 0.0) ? -t2 : t2;
171 t4 = fi * ua3.d + t3 * ub3.d;
172 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
173 {
174 retval = (s * y);
175 goto ret;
176 }
177
178 /* Second stage */
179 ffi = xfg[i][3].d;
180 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
181 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
182 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
183 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
184 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
185 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
186 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
187 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
188
189 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
190 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
191 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
192 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
193 t10);
194
195 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
196 {
197 retval = (s * y);
198 goto ret;
199 }
200 retval = tanMp (x);
201 goto ret;
202 }
203
204 /* (---) The case 0.787 < abs(x) <= 25 */
205 if (w <= g4.d)
206 {
207 /* Range reduction by algorithm i */
208 t = (x * hpinv.d + toint.d);
209 xn = t - toint.d;
210 v.d = t;
211 t1 = (x - xn * mp1.d) - xn * mp2.d;
212 n = v.i[LOW_HALF] & 0x00000001;
213 da = xn * mp3.d;
214 a = t1 - da;
215 da = (t1 - a) - da;
216 if (a < 0.0)
217 {
218 ya = -a;
219 yya = -da;
220 sy = -1;
221 }
222 else
223 {
224 ya = a;
225 yya = da;
226 sy = 1;
227 }
228
229 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
230 if (ya <= gy1.d)
231 {
232 retval = tanMp (x);
233 goto ret;
234 }
235
236 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
237 if (ya <= gy2.d)
238 {
239 a2 = a * a;
240 t2 = d9.d + a2 * d11.d;
241 t2 = d7.d + a2 * t2;
242 t2 = d5.d + a2 * t2;
243 t2 = d3.d + a2 * t2;
244 t2 = da + a * a2 * t2;
245
246 if (n)
247 {
248 /* First stage -cot */
249 EADD (a, t2, b, db);
250 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
251 t9, t10);
252 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
253 {
254 retval = (-y);
255 goto ret;
256 }
257 }
258 else
259 {
260 /* First stage tan */
261 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
262 {
263 retval = y;
264 goto ret;
265 }
266 }
267 /* Second stage */
268 /* Range reduction by algorithm ii */
269 t = (x * hpinv.d + toint.d);
270 xn = t - toint.d;
271 v.d = t;
272 t1 = (x - xn * mp1.d) - xn * mp2.d;
273 n = v.i[LOW_HALF] & 0x00000001;
274 da = xn * pp3.d;
275 t = t1 - da;
276 da = (t1 - t) - da;
277 t1 = xn * pp4.d;
278 a = t - t1;
279 da = ((t - a) - t1) + da;
280
281 /* Second stage */
282 EADD (a, da, t1, t2);
283 a = t1;
284 da = t2;
285 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
286
287 c1 = a25.d + x2 * a27.d;
288 c1 = a23.d + x2 * c1;
289 c1 = a21.d + x2 * c1;
290 c1 = a19.d + x2 * c1;
291 c1 = a17.d + x2 * c1;
292 c1 = a15.d + x2 * c1;
293 c1 *= x2;
294
295 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
296 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
297 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
298 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
299 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
300 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
301 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
302 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
303 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
304 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
305 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
306 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
307 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
308 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
309
310 if (n)
311 {
312 /* Second stage -cot */
313 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
314 t8, t9, t10);
315 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
316 {
317 retval = (-y);
318 goto ret;
319 }
320 }
321 else
322 {
323 /* Second stage tan */
324 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
325 {
326 retval = y;
327 goto ret;
328 }
329 }
330 retval = tanMp (x);
331 goto ret;
332 }
333
334 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
335
336 /* First stage */
337 i = ((int) (mfftnhf.d + TWO8 * ya));
338 z = (z0 = (ya - xfg[i][0].d)) + yya;
339 z2 = z * z;
340 pz = z + z * z2 * (e0.d + z2 * e1.d);
341 fi = xfg[i][1].d;
342 gi = xfg[i][2].d;
343
344 if (n)
345 {
346 /* -cot */
347 t2 = pz * (fi + gi) / (fi + pz);
348 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
349 {
350 retval = (-sy * y);
351 goto ret;
352 }
353 t3 = (t2 < 0.0) ? -t2 : t2;
354 t4 = gi * ua10.d + t3 * ub10.d;
355 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
356 {
357 retval = (-sy * y);
358 goto ret;
359 }
360 }
361 else
362 {
363 /* tan */
364 t2 = pz * (gi + fi) / (gi - pz);
365 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
366 {
367 retval = (sy * y);
368 goto ret;
369 }
370 t3 = (t2 < 0.0) ? -t2 : t2;
371 t4 = fi * ua9.d + t3 * ub9.d;
372 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
373 {
374 retval = (sy * y);
375 goto ret;
376 }
377 }
378
379 /* Second stage */
380 ffi = xfg[i][3].d;
381 EADD (z0, yya, z, zz)
382 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
383 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
384 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
385 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
386 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
387 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
388 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
389 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
390
391 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
392 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
393 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
394
395 if (n)
396 {
397 /* -cot */
398 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
399 t10);
400 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
401 {
402 retval = (-sy * y);
403 goto ret;
404 }
405 }
406 else
407 {
408 /* tan */
409 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
410 t10);
411 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
412 {
413 retval = (sy * y);
414 goto ret;
415 }
416 }
417
418 retval = tanMp (x);
419 goto ret;
420 }
421
422 /* (---) The case 25 < abs(x) <= 1e8 */
423 if (w <= g5.d)
424 {
425 /* Range reduction by algorithm ii */
426 t = (x * hpinv.d + toint.d);
427 xn = t - toint.d;
428 v.d = t;
429 t1 = (x - xn * mp1.d) - xn * mp2.d;
430 n = v.i[LOW_HALF] & 0x00000001;
431 da = xn * pp3.d;
432 t = t1 - da;
433 da = (t1 - t) - da;
434 t1 = xn * pp4.d;
435 a = t - t1;
436 da = ((t - a) - t1) + da;
437 EADD (a, da, t1, t2);
438 a = t1;
439 da = t2;
440 if (a < 0.0)
441 {
442 ya = -a;
443 yya = -da;
444 sy = -1;
445 }
446 else
447 {
448 ya = a;
449 yya = da;
450 sy = 1;
451 }
452
453 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
454 if (ya <= gy1.d)
455 {
456 retval = tanMp (x);
457 goto ret;
458 }
459
460 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
461 if (ya <= gy2.d)
462 {
463 a2 = a * a;
464 t2 = d9.d + a2 * d11.d;
465 t2 = d7.d + a2 * t2;
466 t2 = d5.d + a2 * t2;
467 t2 = d3.d + a2 * t2;
468 t2 = da + a * a2 * t2;
469
470 if (n)
471 {
472 /* First stage -cot */
473 EADD (a, t2, b, db);
474 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
475 t9, t10);
476 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
477 {
478 retval = (-y);
479 goto ret;
480 }
481 }
482 else
483 {
484 /* First stage tan */
485 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
486 {
487 retval = y;
488 goto ret;
489 }
490 }
491
492 /* Second stage */
493 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
494 c1 = a25.d + x2 * a27.d;
495 c1 = a23.d + x2 * c1;
496 c1 = a21.d + x2 * c1;
497 c1 = a19.d + x2 * c1;
498 c1 = a17.d + x2 * c1;
499 c1 = a15.d + x2 * c1;
500 c1 *= x2;
501
502 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
503 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
504 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
505 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
506 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
507 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
508 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
509 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
510 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
511 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
512 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
513 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
514 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
515 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
516
517 if (n)
518 {
519 /* Second stage -cot */
520 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
521 t8, t9, t10);
522 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
523 {
524 retval = (-y);
525 goto ret;
526 }
527 }
528 else
529 {
530 /* Second stage tan */
531 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
532 {
533 retval = (y);
534 goto ret;
535 }
536 }
537 retval = tanMp (x);
538 goto ret;
539 }
540
541 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
542 /* First stage */
543 i = ((int) (mfftnhf.d + TWO8 * ya));
544 z = (z0 = (ya - xfg[i][0].d)) + yya;
545 z2 = z * z;
546 pz = z + z * z2 * (e0.d + z2 * e1.d);
547 fi = xfg[i][1].d;
548 gi = xfg[i][2].d;
549
550 if (n)
551 {
552 /* -cot */
553 t2 = pz * (fi + gi) / (fi + pz);
554 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
555 {
556 retval = (-sy * y);
557 goto ret;
558 }
559 t3 = (t2 < 0.0) ? -t2 : t2;
560 t4 = gi * ua18.d + t3 * ub18.d;
561 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
562 {
563 retval = (-sy * y);
564 goto ret;
565 }
566 }
567 else
568 {
569 /* tan */
570 t2 = pz * (gi + fi) / (gi - pz);
571 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
572 {
573 retval = (sy * y);
574 goto ret;
575 }
576 t3 = (t2 < 0.0) ? -t2 : t2;
577 t4 = fi * ua17.d + t3 * ub17.d;
578 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
579 {
580 retval = (sy * y);
581 goto ret;
582 }
583 }
584
585 /* Second stage */
586 ffi = xfg[i][3].d;
587 EADD (z0, yya, z, zz);
588 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
589 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
590 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
591 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
592 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
593 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
594 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
595 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
596
597 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
598 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
599 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
600
601 if (n)
602 {
603 /* -cot */
604 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
605 t10);
606 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
607 {
608 retval = (-sy * y);
609 goto ret;
610 }
611 }
612 else
613 {
614 /* tan */
615 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
616 t10);
617 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
618 {
619 retval = (sy * y);
620 goto ret;
621 }
622 }
623 retval = tanMp (x);
624 goto ret;
625 }
626
627 /* (---) The case 1e8 < abs(x) < 2**1024 */
628 /* Range reduction by algorithm iii */
629 n = (__branred (x, &a, &da)) & 0x00000001;
630 EADD (a, da, t1, t2);
631 a = t1;
632 da = t2;
633 if (a < 0.0)
634 {
635 ya = -a;
636 yya = -da;
637 sy = -1;
638 }
639 else
640 {
641 ya = a;
642 yya = da;
643 sy = 1;
644 }
645
646 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
647 if (ya <= gy1.d)
648 {
649 retval = tanMp (x);
650 goto ret;
651 }
652
653 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
654 if (ya <= gy2.d)
655 {
656 a2 = a * a;
657 t2 = d9.d + a2 * d11.d;
658 t2 = d7.d + a2 * t2;
659 t2 = d5.d + a2 * t2;
660 t2 = d3.d + a2 * t2;
661 t2 = da + a * a2 * t2;
662 if (n)
663 {
664 /* First stage -cot */
665 EADD (a, t2, b, db);
666 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
667 t10);
668 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
669 {
670 retval = (-y);
671 goto ret;
672 }
673 }
674 else
675 {
676 /* First stage tan */
677 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
678 {
679 retval = y;
680 goto ret;
681 }
682 }
683
684 /* Second stage */
685 /* Reduction by algorithm iv */
686 p = 10;
687 n = (__mpranred (x, &mpa, p)) & 0x00000001;
688 __mp_dbl (&mpa, &a, p);
689 __dbl_mp (a, &mpt1, p);
690 __sub (&mpa, &mpt1, &mpt2, p);
691 __mp_dbl (&mpt2, &da, p);
692
693 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
694
695 c1 = a25.d + x2 * a27.d;
696 c1 = a23.d + x2 * c1;
697 c1 = a21.d + x2 * c1;
698 c1 = a19.d + x2 * c1;
699 c1 = a17.d + x2 * c1;
700 c1 = a15.d + x2 * c1;
701 c1 *= x2;
702
703 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
704 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
705 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
706 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
707 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
708 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
709 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
710 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
711 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
712 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
713 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
714 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
715 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
716 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
717
718 if (n)
719 {
720 /* Second stage -cot */
721 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
722 t9, t10);
723 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
724 {
725 retval = (-y);
726 goto ret;
727 }
728 }
729 else
730 {
731 /* Second stage tan */
732 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
733 {
734 retval = y;
735 goto ret;
736 }
737 }
738 retval = tanMp (x);
739 goto ret;
740 }
741
742 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
743 /* First stage */
744 i = ((int) (mfftnhf.d + TWO8 * ya));
745 z = (z0 = (ya - xfg[i][0].d)) + yya;
746 z2 = z * z;
747 pz = z + z * z2 * (e0.d + z2 * e1.d);
748 fi = xfg[i][1].d;
749 gi = xfg[i][2].d;
750
751 if (n)
752 {
753 /* -cot */
754 t2 = pz * (fi + gi) / (fi + pz);
755 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
756 {
757 retval = (-sy * y);
758 goto ret;
759 }
760 t3 = (t2 < 0.0) ? -t2 : t2;
761 t4 = gi * ua26.d + t3 * ub26.d;
762 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
763 {
764 retval = (-sy * y);
765 goto ret;
766 }
767 }
768 else
769 {
770 /* tan */
771 t2 = pz * (gi + fi) / (gi - pz);
772 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
773 {
774 retval = (sy * y);
775 goto ret;
776 }
777 t3 = (t2 < 0.0) ? -t2 : t2;
778 t4 = fi * ua25.d + t3 * ub25.d;
779 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
780 {
781 retval = (sy * y);
782 goto ret;
783 }
784 }
785
786 /* Second stage */
787 ffi = xfg[i][3].d;
788 EADD (z0, yya, z, zz);
789 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
790 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
791 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
792 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
793 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
794 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
795 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
796 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
797
798 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
799 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
800 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
801
802 if (n)
803 {
804 /* -cot */
805 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
806 t10);
807 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
808 {
809 retval = (-sy * y);
810 goto ret;
811 }
812 }
813 else
814 {
815 /* tan */
816 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
817 t10);
818 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
819 {
820 retval = (sy * y);
821 goto ret;
822 }
823 }
824 retval = tanMp (x);
825 goto ret;
826
827ret:
828 return retval;
829}
830
831/* multiple precision stage */
832/* Convert x to multi precision number,compute tan(x) by mptan() routine */
833/* and converts result back to double */
834static double
835SECTION
836tanMp (double x)
837{
838 int p;
839 double y;
840 mp_no mpy;
841 p = 32;
842 __mptan (x, &mpy, p);
843 __mp_dbl (&mpy, &y, p);
844 LIBC_PROBE (slowtan, 2, &x, &y);
845 return y;
846}
847
848#ifndef __tan
849libm_alias_double (__tan, tan)
850#endif
851