File indexing completed on 2025-05-11 08:23:49
0001 #include "fpsp-namespace.h"
0002 //
0003 //
0004 // stan.sa 3.3 7/29/91
0005 //
0006 // The entry point stan computes the tangent of
0007 // an input argument;
0008 // stand does the same except for denormalized input.
0009 //
0010 // Input: Double-extended number X in location pointed to
0011 // by address register a0.
0012 //
0013 // Output: The value tan(X) returned in floating-point register Fp0.
0014 //
0015 // Accuracy and Monotonicity: The returned result is within 3 ulp in
0016 // 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
0017 // result is subsequently rounded to double precision. The
0018 // result is provably monotonic in double precision.
0019 //
0020 // Speed: The program sTAN takes approximately 170 cycles for
0021 // input argument X such that |X| < 15Pi, which is the the usual
0022 // situation.
0023 //
0024 // Algorithm:
0025 //
0026 // 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.
0027 //
0028 // 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
0029 // k = N mod 2, so in particular, k = 0 or 1.
0030 //
0031 // 3. If k is odd, go to 5.
0032 //
0033 // 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a
0034 // rational function U/V where
0035 // U = r + r*s*(P1 + s*(P2 + s*P3)), and
0036 // V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r.
0037 // Exit.
0038 //
0039 // 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by a
0040 // rational function U/V where
0041 // U = r + r*s*(P1 + s*(P2 + s*P3)), and
0042 // V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,
0043 // -Cot(r) = -V/U. Exit.
0044 //
0045 // 6. If |X| > 1, go to 8.
0046 //
0047 // 7. (|X|<2**(-40)) Tan(X) = X. Exit.
0048 //
0049 // 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 2.
0050 //
0051
0052 // Copyright (C) Motorola, Inc. 1990
0053 // All Rights Reserved
0054 //
0055 // THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE OF MOTOROLA
0056 // The copyright notice above does not evidence any
0057 // actual or intended publication of such source code.
0058
0059 //STAN idnt 2,1 | Motorola 040 Floating Point Software Package
0060
0061 |section 8
0062
0063 #include "fpsp.defs"
0064
0065 BOUNDS1: .long 0x3FD78000,0x4004BC7E
0066 TWOBYPI: .long 0x3FE45F30,0x6DC9C883
0067
0068 TANQ4: .long 0x3EA0B759,0xF50F8688
0069 TANP3: .long 0xBEF2BAA5,0xA8924F04
0070
0071 TANQ3: .long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
0072
0073 TANP2: .long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
0074
0075 TANQ2: .long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
0076
0077 TANP1: .long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
0078
0079 TANQ1: .long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
0080
0081 INVTWOPI: .long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
0082
0083 TWOPI1: .long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
0084 TWOPI2: .long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
0085
0086 //--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
0087 //--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
0088 //--MOST 69 BITS LONG.
0089 .global PITBL
0090 PITBL:
0091 .long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
0092 .long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
0093 .long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
0094 .long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
0095 .long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
0096 .long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
0097 .long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
0098 .long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
0099 .long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
0100 .long 0xC0040000,0x90836524,0x88034B96,0x20B00000
0101 .long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
0102 .long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
0103 .long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
0104 .long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
0105 .long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
0106 .long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
0107 .long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
0108 .long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
0109 .long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
0110 .long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
0111 .long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
0112 .long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
0113 .long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
0114 .long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
0115 .long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
0116 .long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
0117 .long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
0118 .long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
0119 .long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
0120 .long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
0121 .long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
0122 .long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
0123 .long 0x00000000,0x00000000,0x00000000,0x00000000
0124 .long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
0125 .long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
0126 .long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
0127 .long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
0128 .long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
0129 .long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
0130 .long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
0131 .long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
0132 .long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
0133 .long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
0134 .long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
0135 .long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
0136 .long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
0137 .long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
0138 .long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
0139 .long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
0140 .long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
0141 .long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
0142 .long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
0143 .long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
0144 .long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
0145 .long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
0146 .long 0x40040000,0x90836524,0x88034B96,0xA0B00000
0147 .long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
0148 .long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
0149 .long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
0150 .long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
0151 .long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
0152 .long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
0153 .long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
0154 .long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
0155 .long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
0156
0157 .set INARG,FP_SCR4
0158
0159 .set TWOTO63,L_SCR1
0160 .set ENDFLAG,L_SCR2
0161 .set N,L_SCR3
0162
0163 | xref t_frcinx
0164 |xref t_extdnrm
0165
0166 .global stand
0167 stand:
0168 //--TAN(X) = X FOR DENORMALIZED X
0169
0170 bra t_extdnrm
0171
0172 .global stan
0173 stan:
0174 fmovex (%a0),%fp0 // ...LOAD INPUT
0175
0176 movel (%a0),%d0
0177 movew 4(%a0),%d0
0178 andil #0x7FFFFFFF,%d0
0179
0180 cmpil #0x3FD78000,%d0 // ...|X| >= 2**(-40)?
0181 bges TANOK1
0182 bra TANSM
0183 TANOK1:
0184 cmpil #0x4004BC7E,%d0 // ...|X| < 15 PI?
0185 blts TANMAIN
0186 bra REDUCEX
0187
0188
0189 TANMAIN:
0190 //--THIS IS THE USUAL CASE, |X| <= 15 PI.
0191 //--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
0192 fmovex %fp0,%fp1
0193 fmuld TWOBYPI,%fp1 // ...X*2/PI
0194
0195 //--HIDE THE NEXT TWO INSTRUCTIONS
0196 leal PITBL+0x200,%a1 // ...TABLE OF N*PI/2, N = -32,...,32
0197
0198 //--FP1 IS NOW READY
0199 fmovel %fp1,%d0 // ...CONVERT TO INTEGER
0200
0201 asll #4,%d0
0202 addal %d0,%a1 // ...ADDRESS N*PIBY2 IN Y1, Y2
0203
0204 fsubx (%a1)+,%fp0 // ...X-Y1
0205 //--HIDE THE NEXT ONE
0206
0207 fsubs (%a1),%fp0 // ...FP0 IS R = (X-Y1)-Y2
0208
0209 rorl #5,%d0
0210 andil #0x80000000,%d0 // ...D0 WAS ODD IFF D0 < 0
0211
0212 TANCONT:
0213
0214 cmpil #0,%d0
0215 blt NODD
0216
0217 fmovex %fp0,%fp1
0218 fmulx %fp1,%fp1 // ...S = R*R
0219
0220 fmoved TANQ4,%fp3
0221 fmoved TANP3,%fp2
0222
0223 fmulx %fp1,%fp3 // ...SQ4
0224 fmulx %fp1,%fp2 // ...SP3
0225
0226 faddd TANQ3,%fp3 // ...Q3+SQ4
0227 faddx TANP2,%fp2 // ...P2+SP3
0228
0229 fmulx %fp1,%fp3 // ...S(Q3+SQ4)
0230 fmulx %fp1,%fp2 // ...S(P2+SP3)
0231
0232 faddx TANQ2,%fp3 // ...Q2+S(Q3+SQ4)
0233 faddx TANP1,%fp2 // ...P1+S(P2+SP3)
0234
0235 fmulx %fp1,%fp3 // ...S(Q2+S(Q3+SQ4))
0236 fmulx %fp1,%fp2 // ...S(P1+S(P2+SP3))
0237
0238 faddx TANQ1,%fp3 // ...Q1+S(Q2+S(Q3+SQ4))
0239 fmulx %fp0,%fp2 // ...RS(P1+S(P2+SP3))
0240
0241 fmulx %fp3,%fp1 // ...S(Q1+S(Q2+S(Q3+SQ4)))
0242
0243
0244 faddx %fp2,%fp0 // ...R+RS(P1+S(P2+SP3))
0245
0246
0247 fadds #0x3F800000,%fp1 // ...1+S(Q1+...)
0248
0249 fmovel %d1,%fpcr //restore users exceptions
0250 fdivx %fp1,%fp0 //last inst - possible exception set
0251
0252 bra t_frcinx
0253
0254 NODD:
0255 fmovex %fp0,%fp1
0256 fmulx %fp0,%fp0 // ...S = R*R
0257
0258 fmoved TANQ4,%fp3
0259 fmoved TANP3,%fp2
0260
0261 fmulx %fp0,%fp3 // ...SQ4
0262 fmulx %fp0,%fp2 // ...SP3
0263
0264 faddd TANQ3,%fp3 // ...Q3+SQ4
0265 faddx TANP2,%fp2 // ...P2+SP3
0266
0267 fmulx %fp0,%fp3 // ...S(Q3+SQ4)
0268 fmulx %fp0,%fp2 // ...S(P2+SP3)
0269
0270 faddx TANQ2,%fp3 // ...Q2+S(Q3+SQ4)
0271 faddx TANP1,%fp2 // ...P1+S(P2+SP3)
0272
0273 fmulx %fp0,%fp3 // ...S(Q2+S(Q3+SQ4))
0274 fmulx %fp0,%fp2 // ...S(P1+S(P2+SP3))
0275
0276 faddx TANQ1,%fp3 // ...Q1+S(Q2+S(Q3+SQ4))
0277 fmulx %fp1,%fp2 // ...RS(P1+S(P2+SP3))
0278
0279 fmulx %fp3,%fp0 // ...S(Q1+S(Q2+S(Q3+SQ4)))
0280
0281
0282 faddx %fp2,%fp1 // ...R+RS(P1+S(P2+SP3))
0283 fadds #0x3F800000,%fp0 // ...1+S(Q1+...)
0284
0285
0286 fmovex %fp1,-(%sp)
0287 eoril #0x80000000,(%sp)
0288
0289 fmovel %d1,%fpcr //restore users exceptions
0290 fdivx (%sp)+,%fp0 //last inst - possible exception set
0291
0292 bra t_frcinx
0293
0294 TANBORS:
0295 //--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
0296 //--IF |X| < 2**(-40), RETURN X OR 1.
0297 cmpil #0x3FFF8000,%d0
0298 bgts REDUCEX
0299
0300 TANSM:
0301
0302 fmovex %fp0,-(%sp)
0303 fmovel %d1,%fpcr //restore users exceptions
0304 fmovex (%sp)+,%fp0 //last inst - possible exception set
0305
0306 bra t_frcinx
0307
0308
0309 REDUCEX:
0310 //--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
0311 //--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
0312 //--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
0313
0314 fmovemx %fp2-%fp5,-(%a7) // ...save FP2 through FP5
0315 movel %d2,-(%a7)
0316 fmoves #0x00000000,%fp1
0317
0318 //--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
0319 //--there is a danger of unwanted overflow in first LOOP iteration. In this
0320 //--case, reduce argument by one remainder step to make subsequent reduction
0321 //--safe.
0322 cmpil #0x7ffeffff,%d0 //is argument dangerously large?
0323 bnes LOOP
0324 movel #0x7ffe0000,FP_SCR2(%a6) //yes
0325 // ;create 2**16383*PI/2
0326 movel #0xc90fdaa2,FP_SCR2+4(%a6)
0327 clrl FP_SCR2+8(%a6)
0328 ftstx %fp0 //test sign of argument
0329 movel #0x7fdc0000,FP_SCR3(%a6) //create low half of 2**16383*
0330 // ;PI/2 at FP_SCR3
0331 movel #0x85a308d3,FP_SCR3+4(%a6)
0332 clrl FP_SCR3+8(%a6)
0333 fblt red_neg
0334 orw #0x8000,FP_SCR2(%a6) //positive arg
0335 orw #0x8000,FP_SCR3(%a6)
0336 red_neg:
0337 faddx FP_SCR2(%a6),%fp0 //high part of reduction is exact
0338 fmovex %fp0,%fp1 //save high result in fp1
0339 faddx FP_SCR3(%a6),%fp0 //low part of reduction
0340 fsubx %fp0,%fp1 //determine low component of result
0341 faddx FP_SCR3(%a6),%fp1 //fp0/fp1 are reduced argument.
0342
0343 //--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
0344 //--integer quotient will be stored in N
0345 //--Intermediate remainder is 66-bit long; (R,r) in (FP0,FP1)
0346
0347 LOOP:
0348 fmovex %fp0,INARG(%a6) // ...+-2**K * F, 1 <= F < 2
0349 movew INARG(%a6),%d0
0350 movel %d0,%a1 // ...save a copy of D0
0351 andil #0x00007FFF,%d0
0352 subil #0x00003FFF,%d0 // ...D0 IS K
0353 cmpil #28,%d0
0354 bles LASTLOOP
0355 CONTLOOP:
0356 subil #27,%d0 // ...D0 IS L := K-27
0357 movel #0,ENDFLAG(%a6)
0358 bras WORK
0359 LASTLOOP:
0360 clrl %d0 // ...D0 IS L := 0
0361 movel #1,ENDFLAG(%a6)
0362
0363 WORK:
0364 //--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
0365 //--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
0366
0367 //--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
0368 //--2**L * (PIby2_1), 2**L * (PIby2_2)
0369
0370 movel #0x00003FFE,%d2 // ...BIASED EXPO OF 2/PI
0371 subl %d0,%d2 // ...BIASED EXPO OF 2**(-L)*(2/PI)
0372
0373 movel #0xA2F9836E,FP_SCR1+4(%a6)
0374 movel #0x4E44152A,FP_SCR1+8(%a6)
0375 movew %d2,FP_SCR1(%a6) // ...FP_SCR1 is 2**(-L)*(2/PI)
0376
0377 fmovex %fp0,%fp2
0378 fmulx FP_SCR1(%a6),%fp2
0379 //--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
0380 //--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
0381 //--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
0382 //--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
0383 //--US THE DESIRED VALUE IN FLOATING POINT.
0384
0385 //--HIDE SIX CYCLES OF INSTRUCTION
0386 movel %a1,%d2
0387 swap %d2
0388 andil #0x80000000,%d2
0389 oril #0x5F000000,%d2 // ...D2 IS SIGN(INARG)*2**63 IN SGL
0390 movel %d2,TWOTO63(%a6)
0391
0392 movel %d0,%d2
0393 addil #0x00003FFF,%d2 // ...BIASED EXPO OF 2**L * (PI/2)
0394
0395 //--FP2 IS READY
0396 fadds TWOTO63(%a6),%fp2 // ...THE FRACTIONAL PART OF FP1 IS ROUNDED
0397
0398 //--HIDE 4 CYCLES OF INSTRUCTION; creating 2**(L)*Piby2_1 and 2**(L)*Piby2_2
0399 movew %d2,FP_SCR2(%a6)
0400 clrw FP_SCR2+2(%a6)
0401 movel #0xC90FDAA2,FP_SCR2+4(%a6)
0402 clrl FP_SCR2+8(%a6) // ...FP_SCR2 is 2**(L) * Piby2_1
0403
0404 //--FP2 IS READY
0405 fsubs TWOTO63(%a6),%fp2 // ...FP2 is N
0406
0407 addil #0x00003FDD,%d0
0408 movew %d0,FP_SCR3(%a6)
0409 clrw FP_SCR3+2(%a6)
0410 movel #0x85A308D3,FP_SCR3+4(%a6)
0411 clrl FP_SCR3+8(%a6) // ...FP_SCR3 is 2**(L) * Piby2_2
0412
0413 movel ENDFLAG(%a6),%d0
0414
0415 //--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
0416 //--P2 = 2**(L) * Piby2_2
0417 fmovex %fp2,%fp4
0418 fmulx FP_SCR2(%a6),%fp4 // ...W = N*P1
0419 fmovex %fp2,%fp5
0420 fmulx FP_SCR3(%a6),%fp5 // ...w = N*P2
0421 fmovex %fp4,%fp3
0422 //--we want P+p = W+w but |p| <= half ulp of P
0423 //--Then, we need to compute A := R-P and a := r-p
0424 faddx %fp5,%fp3 // ...FP3 is P
0425 fsubx %fp3,%fp4 // ...W-P
0426
0427 fsubx %fp3,%fp0 // ...FP0 is A := R - P
0428 faddx %fp5,%fp4 // ...FP4 is p = (W-P)+w
0429
0430 fmovex %fp0,%fp3 // ...FP3 A
0431 fsubx %fp4,%fp1 // ...FP1 is a := r - p
0432
0433 //--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
0434 //--|r| <= half ulp of R.
0435 faddx %fp1,%fp0 // ...FP0 is R := A+a
0436 //--No need to calculate r if this is the last loop
0437 cmpil #0,%d0
0438 bgt RESTORE
0439
0440 //--Need to calculate r
0441 fsubx %fp0,%fp3 // ...A-R
0442 faddx %fp3,%fp1 // ...FP1 is r := (A-R)+a
0443 bra LOOP
0444
0445 RESTORE:
0446 fmovel %fp2,N(%a6)
0447 movel (%a7)+,%d2
0448 fmovemx (%a7)+,%fp2-%fp5
0449
0450
0451 movel N(%a6),%d0
0452 rorl #1,%d0
0453
0454
0455 bra TANCONT
0456
0457 |end