
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
Herbie found 16 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x eps) :precision binary64 (- (tan (+ x eps)) (tan x)))
double code(double x, double eps) {
return tan((x + eps)) - tan(x);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = tan((x + eps)) - tan(x)
end function
public static double code(double x, double eps) {
return Math.tan((x + eps)) - Math.tan(x);
}
def code(x, eps): return math.tan((x + eps)) - math.tan(x)
function code(x, eps) return Float64(tan(Float64(x + eps)) - tan(x)) end
function tmp = code(x, eps) tmp = tan((x + eps)) - tan(x); end
code[x_, eps_] := N[(N[Tan[N[(x + eps), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\tan \left(x + \varepsilon\right) - \tan x
\end{array}
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (tan x) 2.0))
(t_1 (- t_0))
(t_2 (- 1.0 t_1))
(t_3
(+
(fma
(/ (* t_2 (pow (sin x) 2.0)) (pow (cos x) 2.0))
-1.0
(fma t_2 -0.5 (* t_0 0.16666666666666666)))
0.16666666666666666))
(t_4 (* t_2 (sin x)))
(t_5 (/ t_4 (cos x))))
(*
(-
(fma
(fma
(-
(*
(- eps)
(fma
t_5
-0.5
(/ (fma t_3 (sin x) (* 0.16666666666666666 t_4)) (cos x))))
t_3)
eps
(* 1.0 t_5))
eps
1.0)
t_1)
eps)))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
double t_1 = -t_0;
double t_2 = 1.0 - t_1;
double t_3 = fma(((t_2 * pow(sin(x), 2.0)) / pow(cos(x), 2.0)), -1.0, fma(t_2, -0.5, (t_0 * 0.16666666666666666))) + 0.16666666666666666;
double t_4 = t_2 * sin(x);
double t_5 = t_4 / cos(x);
return (fma(fma(((-eps * fma(t_5, -0.5, (fma(t_3, sin(x), (0.16666666666666666 * t_4)) / cos(x)))) - t_3), eps, (1.0 * t_5)), eps, 1.0) - t_1) * eps;
}
function code(x, eps) t_0 = tan(x) ^ 2.0 t_1 = Float64(-t_0) t_2 = Float64(1.0 - t_1) t_3 = Float64(fma(Float64(Float64(t_2 * (sin(x) ^ 2.0)) / (cos(x) ^ 2.0)), -1.0, fma(t_2, -0.5, Float64(t_0 * 0.16666666666666666))) + 0.16666666666666666) t_4 = Float64(t_2 * sin(x)) t_5 = Float64(t_4 / cos(x)) return Float64(Float64(fma(fma(Float64(Float64(Float64(-eps) * fma(t_5, -0.5, Float64(fma(t_3, sin(x), Float64(0.16666666666666666 * t_4)) / cos(x)))) - t_3), eps, Float64(1.0 * t_5)), eps, 1.0) - t_1) * eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = (-t$95$0)}, Block[{t$95$2 = N[(1.0 - t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[(N[(N[(N[(t$95$2 * N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * -1.0 + N[(t$95$2 * -0.5 + N[(t$95$0 * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]}, Block[{t$95$4 = N[(t$95$2 * N[Sin[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$5 = N[(t$95$4 / N[Cos[x], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[((-eps) * N[(t$95$5 * -0.5 + N[(N[(t$95$3 * N[Sin[x], $MachinePrecision] + N[(0.16666666666666666 * t$95$4), $MachinePrecision]), $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - t$95$3), $MachinePrecision] * eps + N[(1.0 * t$95$5), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] - t$95$1), $MachinePrecision] * eps), $MachinePrecision]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
t_1 := -t\_0\\
t_2 := 1 - t\_1\\
t_3 := \mathsf{fma}\left(\frac{t\_2 \cdot {\sin x}^{2}}{{\cos x}^{2}}, -1, \mathsf{fma}\left(t\_2, -0.5, t\_0 \cdot 0.16666666666666666\right)\right) + 0.16666666666666666\\
t_4 := t\_2 \cdot \sin x\\
t_5 := \frac{t\_4}{\cos x}\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(-\varepsilon\right) \cdot \mathsf{fma}\left(t\_5, -0.5, \frac{\mathsf{fma}\left(t\_3, \sin x, 0.16666666666666666 \cdot t\_4\right)}{\cos x}\right) - t\_3, \varepsilon, 1 \cdot t\_5\right), \varepsilon, 1\right) - t\_1\right) \cdot \varepsilon
\end{array}
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
Applied rewrites99.6%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (pow (tan x) 2.0)) (t_1 (- 1.0 (- t_0))))
(*
(fma
(fma
(-
(-
(-
(* (* eps x) 0.6666666666666666)
(- (* (/ (pow (sin x) 2.0) (pow (cos x) 2.0)) t_1)))
(fma -0.5 t_1 (* 0.16666666666666666 t_0)))
0.16666666666666666)
eps
(* t_1 (tan x)))
eps
t_1)
eps)))
double code(double x, double eps) {
double t_0 = pow(tan(x), 2.0);
double t_1 = 1.0 - -t_0;
return fma(fma((((((eps * x) * 0.6666666666666666) - -((pow(sin(x), 2.0) / pow(cos(x), 2.0)) * t_1)) - fma(-0.5, t_1, (0.16666666666666666 * t_0))) - 0.16666666666666666), eps, (t_1 * tan(x))), eps, t_1) * eps;
}
function code(x, eps) t_0 = tan(x) ^ 2.0 t_1 = Float64(1.0 - Float64(-t_0)) return Float64(fma(fma(Float64(Float64(Float64(Float64(Float64(eps * x) * 0.6666666666666666) - Float64(-Float64(Float64((sin(x) ^ 2.0) / (cos(x) ^ 2.0)) * t_1))) - fma(-0.5, t_1, Float64(0.16666666666666666 * t_0))) - 0.16666666666666666), eps, Float64(t_1 * tan(x))), eps, t_1) * eps) end
code[x_, eps_] := Block[{t$95$0 = N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision]}, Block[{t$95$1 = N[(1.0 - (-t$95$0)), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(N[(N[(eps * x), $MachinePrecision] * 0.6666666666666666), $MachinePrecision] - (-N[(N[(N[Power[N[Sin[x], $MachinePrecision], 2.0], $MachinePrecision] / N[Power[N[Cos[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * t$95$1), $MachinePrecision])), $MachinePrecision] - N[(-0.5 * t$95$1 + N[(0.16666666666666666 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 0.16666666666666666), $MachinePrecision] * eps + N[(t$95$1 * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + t$95$1), $MachinePrecision] * eps), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\tan x}^{2}\\
t_1 := 1 - \left(-t\_0\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(\left(\left(\left(\varepsilon \cdot x\right) \cdot 0.6666666666666666 - \left(-\frac{{\sin x}^{2}}{{\cos x}^{2}} \cdot t\_1\right)\right) - \mathsf{fma}\left(-0.5, t\_1, 0.16666666666666666 \cdot t\_0\right)\right) - 0.16666666666666666, \varepsilon, t\_1 \cdot \tan x\right), \varepsilon, t\_1\right) \cdot \varepsilon
\end{array}
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
lower-*.f6499.5
Applied rewrites99.5%
Applied rewrites99.5%
Applied rewrites99.5%
(FPCore (x eps)
:precision binary64
(let* ((t_0 (- 1.0 (- (pow (tan x) 2.0)))))
(*
(fma
(fma
(fma
(fma 1.3333333333333333 x (* 0.6666666666666666 eps))
x
0.3333333333333333)
eps
(* t_0 (tan x)))
eps
t_0)
eps)))
double code(double x, double eps) {
double t_0 = 1.0 - -pow(tan(x), 2.0);
return fma(fma(fma(fma(1.3333333333333333, x, (0.6666666666666666 * eps)), x, 0.3333333333333333), eps, (t_0 * tan(x))), eps, t_0) * eps;
}
function code(x, eps) t_0 = Float64(1.0 - Float64(-(tan(x) ^ 2.0))) return Float64(fma(fma(fma(fma(1.3333333333333333, x, Float64(0.6666666666666666 * eps)), x, 0.3333333333333333), eps, Float64(t_0 * tan(x))), eps, t_0) * eps) end
code[x_, eps_] := Block[{t$95$0 = N[(1.0 - (-N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision]}, N[(N[(N[(N[(N[(1.3333333333333333 * x + N[(0.6666666666666666 * eps), $MachinePrecision]), $MachinePrecision] * x + 0.3333333333333333), $MachinePrecision] * eps + N[(t$95$0 * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps + t$95$0), $MachinePrecision] * eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \left(-{\tan x}^{2}\right)\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333, x, 0.6666666666666666 \cdot \varepsilon\right), x, 0.3333333333333333\right), \varepsilon, t\_0 \cdot \tan x\right), \varepsilon, t\_0\right) \cdot \varepsilon
\end{array}
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
lower-*.f6499.5
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in x around 0
Applied rewrites99.3%
(FPCore (x eps) :precision binary64 (let* ((t_0 (- 1.0 (- (pow (tan x) 2.0))))) (* (fma (* t_0 (tan x)) eps t_0) eps)))
double code(double x, double eps) {
double t_0 = 1.0 - -pow(tan(x), 2.0);
return fma((t_0 * tan(x)), eps, t_0) * eps;
}
function code(x, eps) t_0 = Float64(1.0 - Float64(-(tan(x) ^ 2.0))) return Float64(fma(Float64(t_0 * tan(x)), eps, t_0) * eps) end
code[x_, eps_] := Block[{t$95$0 = N[(1.0 - (-N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision]}, N[(N[(N[(t$95$0 * N[Tan[x], $MachinePrecision]), $MachinePrecision] * eps + t$95$0), $MachinePrecision] * eps), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \left(-{\tan x}^{2}\right)\\
\mathsf{fma}\left(t\_0 \cdot \tan x, \varepsilon, t\_0\right) \cdot \varepsilon
\end{array}
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Applied rewrites99.3%
(FPCore (x eps)
:precision binary64
(*
(-
(fma
(fma
(+
(fma
(fma
(fma 1.8888888888888888 (* eps eps) 1.3333333333333333)
x
(* 1.3333333333333333 eps))
x
(* 0.6666666666666666 (* eps eps)))
1.0)
x
(* 0.3333333333333333 eps))
eps
1.0)
(- (pow (tan x) 2.0)))
eps))
double code(double x, double eps) {
return (fma(fma((fma(fma(fma(1.8888888888888888, (eps * eps), 1.3333333333333333), x, (1.3333333333333333 * eps)), x, (0.6666666666666666 * (eps * eps))) + 1.0), x, (0.3333333333333333 * eps)), eps, 1.0) - -pow(tan(x), 2.0)) * eps;
}
function code(x, eps) return Float64(Float64(fma(fma(Float64(fma(fma(fma(1.8888888888888888, Float64(eps * eps), 1.3333333333333333), x, Float64(1.3333333333333333 * eps)), x, Float64(0.6666666666666666 * Float64(eps * eps))) + 1.0), x, Float64(0.3333333333333333 * eps)), eps, 1.0) - Float64(-(tan(x) ^ 2.0))) * eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(N[(N[(N[(1.8888888888888888 * N[(eps * eps), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * x + N[(1.3333333333333333 * eps), $MachinePrecision]), $MachinePrecision] * x + N[(0.6666666666666666 * N[(eps * eps), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * x + N[(0.3333333333333333 * eps), $MachinePrecision]), $MachinePrecision] * eps + 1.0), $MachinePrecision] - (-N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.8888888888888888, \varepsilon \cdot \varepsilon, 1.3333333333333333\right), x, 1.3333333333333333 \cdot \varepsilon\right), x, 0.6666666666666666 \cdot \left(\varepsilon \cdot \varepsilon\right)\right) + 1, x, 0.3333333333333333 \cdot \varepsilon\right), \varepsilon, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
Applied rewrites99.6%
Taylor expanded in x around 0
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (* (- (fma eps x 1.0) (- (pow (tan x) 2.0))) eps))
double code(double x, double eps) {
return (fma(eps, x, 1.0) - -pow(tan(x), 2.0)) * eps;
}
function code(x, eps) return Float64(Float64(fma(eps, x, 1.0) - Float64(-(tan(x) ^ 2.0))) * eps) end
code[x_, eps_] := N[(N[(N[(eps * x + 1.0), $MachinePrecision] - (-N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\varepsilon, x, 1\right) - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
*-lft-identity99.0
Applied rewrites99.0%
(FPCore (x eps) :precision binary64 (* (- 1.0 (- (pow (tan (+ x PI)) 2.0))) eps))
double code(double x, double eps) {
return (1.0 - -pow(tan((x + ((double) M_PI))), 2.0)) * eps;
}
public static double code(double x, double eps) {
return (1.0 - -Math.pow(Math.tan((x + Math.PI)), 2.0)) * eps;
}
def code(x, eps): return (1.0 - -math.pow(math.tan((x + math.pi)), 2.0)) * eps
function code(x, eps) return Float64(Float64(1.0 - Float64(-(tan(Float64(x + pi)) ^ 2.0))) * eps) end
function tmp = code(x, eps) tmp = (1.0 - -(tan((x + pi)) ^ 2.0)) * eps; end
code[x_, eps_] := N[(N[(1.0 - (-N[Power[N[Tan[N[(x + Pi), $MachinePrecision]], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - \left(-{\tan \left(x + \pi\right)}^{2}\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
mul-1-negN/A
unpow2N/A
unpow2N/A
frac-timesN/A
tan-quotN/A
tan-quotN/A
lower-neg.f64N/A
pow2N/A
lower-pow.f64N/A
lift-tan.f6498.9
Applied rewrites98.9%
lift-tan.f64N/A
tan-+PI-revN/A
lower-tan.f64N/A
lower-+.f64N/A
lower-PI.f6498.9
Applied rewrites98.9%
(FPCore (x eps) :precision binary64 (* (- 1.0 (- (pow (tan x) 2.0))) eps))
double code(double x, double eps) {
return (1.0 - -pow(tan(x), 2.0)) * eps;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = (1.0d0 - -(tan(x) ** 2.0d0)) * eps
end function
public static double code(double x, double eps) {
return (1.0 - -Math.pow(Math.tan(x), 2.0)) * eps;
}
def code(x, eps): return (1.0 - -math.pow(math.tan(x), 2.0)) * eps
function code(x, eps) return Float64(Float64(1.0 - Float64(-(tan(x) ^ 2.0))) * eps) end
function tmp = code(x, eps) tmp = (1.0 - -(tan(x) ^ 2.0)) * eps; end
code[x_, eps_] := N[(N[(1.0 - (-N[Power[N[Tan[x], $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - \left(-{\tan x}^{2}\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
mul-1-negN/A
unpow2N/A
unpow2N/A
frac-timesN/A
tan-quotN/A
tan-quotN/A
lower-neg.f64N/A
pow2N/A
lower-pow.f64N/A
lift-tan.f6498.9
Applied rewrites98.9%
(FPCore (x eps)
:precision binary64
(*
(-
(fma
eps
(* (fma (fma 1.1333333333333333 (* x x) 1.3333333333333333) (* x x) 1.0) x)
1.0)
(-
(pow
(*
(fma
(fma
(fma 0.05396825396825397 (* x x) 0.13333333333333333)
(* x x)
0.3333333333333333)
(* x x)
1.0)
x)
2.0)))
eps))
double code(double x, double eps) {
return (fma(eps, (fma(fma(1.1333333333333333, (x * x), 1.3333333333333333), (x * x), 1.0) * x), 1.0) - -pow((fma(fma(fma(0.05396825396825397, (x * x), 0.13333333333333333), (x * x), 0.3333333333333333), (x * x), 1.0) * x), 2.0)) * eps;
}
function code(x, eps) return Float64(Float64(fma(eps, Float64(fma(fma(1.1333333333333333, Float64(x * x), 1.3333333333333333), Float64(x * x), 1.0) * x), 1.0) - Float64(-(Float64(fma(fma(fma(0.05396825396825397, Float64(x * x), 0.13333333333333333), Float64(x * x), 0.3333333333333333), Float64(x * x), 1.0) * x) ^ 2.0))) * eps) end
code[x_, eps_] := N[(N[(N[(eps * N[(N[(N[(1.1333333333333333 * N[(x * x), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] + 1.0), $MachinePrecision] - (-N[Power[N[(N[(N[(N[(0.05396825396825397 * N[(x * x), $MachinePrecision] + 0.13333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision], 2.0], $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x, 1\right) - \left(-{\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.05396825396825397, x \cdot x, 0.13333333333333333\right), x \cdot x, 0.3333333333333333\right), x \cdot x, 1\right) \cdot x\right)}^{2}\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
*-lft-identityN/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6498.7
Applied rewrites98.7%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6498.5
Applied rewrites98.5%
(FPCore (x eps)
:precision binary64
(*
(-
(fma
eps
(* (fma (fma 1.1333333333333333 (* x x) 1.3333333333333333) (* x x) 1.0) x)
1.0)
(*
(-
(*
(-
(* (- (* -0.19682539682539682 (* x x)) 0.37777777777777777) (* x x))
0.6666666666666666)
(* x x))
1.0)
(* x x)))
eps))
double code(double x, double eps) {
return (fma(eps, (fma(fma(1.1333333333333333, (x * x), 1.3333333333333333), (x * x), 1.0) * x), 1.0) - (((((((-0.19682539682539682 * (x * x)) - 0.37777777777777777) * (x * x)) - 0.6666666666666666) * (x * x)) - 1.0) * (x * x))) * eps;
}
function code(x, eps) return Float64(Float64(fma(eps, Float64(fma(fma(1.1333333333333333, Float64(x * x), 1.3333333333333333), Float64(x * x), 1.0) * x), 1.0) - Float64(Float64(Float64(Float64(Float64(Float64(Float64(-0.19682539682539682 * Float64(x * x)) - 0.37777777777777777) * Float64(x * x)) - 0.6666666666666666) * Float64(x * x)) - 1.0) * Float64(x * x))) * eps) end
code[x_, eps_] := N[(N[(N[(eps * N[(N[(N[(1.1333333333333333 * N[(x * x), $MachinePrecision] + 1.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * x), $MachinePrecision] + 1.0), $MachinePrecision] - N[(N[(N[(N[(N[(N[(N[(-0.19682539682539682 * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\varepsilon, \mathsf{fma}\left(\mathsf{fma}\left(1.1333333333333333, x \cdot x, 1.3333333333333333\right), x \cdot x, 1\right) \cdot x, 1\right) - \left(\left(\left(-0.19682539682539682 \cdot \left(x \cdot x\right) - 0.37777777777777777\right) \cdot \left(x \cdot x\right) - 0.6666666666666666\right) \cdot \left(x \cdot x\right) - 1\right) \cdot \left(x \cdot x\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
*-lft-identityN/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f64N/A
pow2N/A
lift-*.f6498.7
Applied rewrites98.7%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites98.5%
(FPCore (x eps)
:precision binary64
(*
(-
1.0
(-
(*
(fma
(fma
(fma 0.19682539682539682 (* x x) 0.37777777777777777)
(* x x)
0.6666666666666666)
(* x x)
1.0)
(* x x))))
eps))
double code(double x, double eps) {
return (1.0 - -(fma(fma(fma(0.19682539682539682, (x * x), 0.37777777777777777), (x * x), 0.6666666666666666), (x * x), 1.0) * (x * x))) * eps;
}
function code(x, eps) return Float64(Float64(1.0 - Float64(-Float64(fma(fma(fma(0.19682539682539682, Float64(x * x), 0.37777777777777777), Float64(x * x), 0.6666666666666666), Float64(x * x), 1.0) * Float64(x * x)))) * eps) end
code[x_, eps_] := N[(N[(1.0 - (-N[(N[(N[(N[(0.19682539682539682 * N[(x * x), $MachinePrecision] + 0.37777777777777777), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision])), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\left(1 - \left(-\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.19682539682539682, x \cdot x, 0.37777777777777777\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 1\right) \cdot \left(x \cdot x\right)\right)\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
mul-1-negN/A
unpow2N/A
unpow2N/A
frac-timesN/A
tan-quotN/A
tan-quotN/A
lower-neg.f64N/A
pow2N/A
lower-pow.f64N/A
lift-tan.f6498.9
Applied rewrites98.9%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites98.4%
(FPCore (x eps) :precision binary64 (* (fma (fma (fma (* 1.3333333333333333 eps) x 1.0) x eps) x 1.0) eps))
double code(double x, double eps) {
return fma(fma(fma((1.3333333333333333 * eps), x, 1.0), x, eps), x, 1.0) * eps;
}
function code(x, eps) return Float64(fma(fma(fma(Float64(1.3333333333333333 * eps), x, 1.0), x, eps), x, 1.0) * eps) end
code[x_, eps_] := N[(N[(N[(N[(N[(1.3333333333333333 * eps), $MachinePrecision] * x + 1.0), $MachinePrecision] * x + eps), $MachinePrecision] * x + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(1.3333333333333333 \cdot \varepsilon, x, 1\right), x, \varepsilon\right), x, 1\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.3%
(FPCore (x eps) :precision binary64 (fma (fma eps eps (* eps x)) x eps))
double code(double x, double eps) {
return fma(fma(eps, eps, (eps * x)), x, eps);
}
function code(x, eps) return fma(fma(eps, eps, Float64(eps * x)), x, eps) end
code[x_, eps_] := N[(N[(eps * eps + N[(eps * x), $MachinePrecision]), $MachinePrecision] * x + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\varepsilon, \varepsilon, \varepsilon \cdot x\right), x, \varepsilon\right)
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
unpow2N/A
lower-fma.f64N/A
lower-*.f6498.4
Applied rewrites98.4%
(FPCore (x eps) :precision binary64 (* (fma (+ eps x) x 1.0) eps))
double code(double x, double eps) {
return fma((eps + x), x, 1.0) * eps;
}
function code(x, eps) return Float64(fma(Float64(eps + x), x, 1.0) * eps) end
code[x_, eps_] := N[(N[(N[(eps + x), $MachinePrecision] * x + 1.0), $MachinePrecision] * eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\varepsilon + x, x, 1\right) \cdot \varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
+-commutativeN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-+.f6498.4
Applied rewrites98.4%
(FPCore (x eps) :precision binary64 (fma (* x x) eps eps))
double code(double x, double eps) {
return fma((x * x), eps, eps);
}
function code(x, eps) return fma(Float64(x * x), eps, eps) end
code[x_, eps_] := N[(N[(x * x), $MachinePrecision] * eps + eps), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot x, \varepsilon, \varepsilon\right)
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
lower--.f64N/A
mul-1-negN/A
unpow2N/A
unpow2N/A
frac-timesN/A
tan-quotN/A
tan-quotN/A
lower-neg.f64N/A
pow2N/A
lower-pow.f64N/A
lift-tan.f6498.9
Applied rewrites98.9%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
pow2N/A
lift-*.f6498.3
Applied rewrites98.3%
(FPCore (x eps) :precision binary64 eps)
double code(double x, double eps) {
return eps;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps
end function
public static double code(double x, double eps) {
return eps;
}
def code(x, eps): return eps
function code(x, eps) return eps end
function tmp = code(x, eps) tmp = eps; end
code[x_, eps_] := eps
\begin{array}{l}
\\
\varepsilon
\end{array}
Initial program 62.5%
Taylor expanded in eps around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites99.3%
Taylor expanded in x around 0
Applied rewrites97.9%
(FPCore (x eps) :precision binary64 (/ (sin eps) (* (cos x) (cos (+ x eps)))))
double code(double x, double eps) {
return sin(eps) / (cos(x) * cos((x + eps)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = sin(eps) / (cos(x) * cos((x + eps)))
end function
public static double code(double x, double eps) {
return Math.sin(eps) / (Math.cos(x) * Math.cos((x + eps)));
}
def code(x, eps): return math.sin(eps) / (math.cos(x) * math.cos((x + eps)))
function code(x, eps) return Float64(sin(eps) / Float64(cos(x) * cos(Float64(x + eps)))) end
function tmp = code(x, eps) tmp = sin(eps) / (cos(x) * cos((x + eps))); end
code[x_, eps_] := N[(N[Sin[eps], $MachinePrecision] / N[(N[Cos[x], $MachinePrecision] * N[Cos[N[(x + eps), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sin \varepsilon}{\cos x \cdot \cos \left(x + \varepsilon\right)}
\end{array}
(FPCore (x eps) :precision binary64 (- (/ (+ (tan x) (tan eps)) (- 1.0 (* (tan x) (tan eps)))) (tan x)))
double code(double x, double eps) {
return ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = ((tan(x) + tan(eps)) / (1.0d0 - (tan(x) * tan(eps)))) - tan(x)
end function
public static double code(double x, double eps) {
return ((Math.tan(x) + Math.tan(eps)) / (1.0 - (Math.tan(x) * Math.tan(eps)))) - Math.tan(x);
}
def code(x, eps): return ((math.tan(x) + math.tan(eps)) / (1.0 - (math.tan(x) * math.tan(eps)))) - math.tan(x)
function code(x, eps) return Float64(Float64(Float64(tan(x) + tan(eps)) / Float64(1.0 - Float64(tan(x) * tan(eps)))) - tan(x)) end
function tmp = code(x, eps) tmp = ((tan(x) + tan(eps)) / (1.0 - (tan(x) * tan(eps)))) - tan(x); end
code[x_, eps_] := N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[eps], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[Tan[x], $MachinePrecision] * N[Tan[eps], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\tan x + \tan \varepsilon}{1 - \tan x \cdot \tan \varepsilon} - \tan x
\end{array}
(FPCore (x eps) :precision binary64 (+ eps (* (* eps (tan x)) (tan x))))
double code(double x, double eps) {
return eps + ((eps * tan(x)) * tan(x));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, eps)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: eps
code = eps + ((eps * tan(x)) * tan(x))
end function
public static double code(double x, double eps) {
return eps + ((eps * Math.tan(x)) * Math.tan(x));
}
def code(x, eps): return eps + ((eps * math.tan(x)) * math.tan(x))
function code(x, eps) return Float64(eps + Float64(Float64(eps * tan(x)) * tan(x))) end
function tmp = code(x, eps) tmp = eps + ((eps * tan(x)) * tan(x)); end
code[x_, eps_] := N[(eps + N[(N[(eps * N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\varepsilon + \left(\varepsilon \cdot \tan x\right) \cdot \tan x
\end{array}
herbie shell --seed 2025101
(FPCore (x eps)
:name "2tan (problem 3.3.2)"
:precision binary64
:pre (and (and (and (<= -10000.0 x) (<= x 10000.0)) (< (* 1e-16 (fabs x)) eps)) (< eps (fabs x)))
:alt
(! :herbie-platform default (/ (sin eps) (* (cos x) (cos (+ x eps)))))
:alt
(! :herbie-platform default (- (/ (+ (tan x) (tan eps)) (- 1 (* (tan x) (tan eps)))) (tan x)))
:alt
(! :herbie-platform default (+ eps (* eps (tan x) (tan x))))
(- (tan (+ x eps)) (tan x)))