
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
double code(double x) {
return (x - sin(x)) / tan(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x - sin(x)) / tan(x)
end function
public static double code(double x) {
return (x - Math.sin(x)) / Math.tan(x);
}
def code(x): return (x - math.sin(x)) / math.tan(x)
function code(x) return Float64(Float64(x - sin(x)) / tan(x)) end
function tmp = code(x) tmp = (x - sin(x)) / tan(x); end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - \sin x}{\tan x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (- x (sin x)) (tan x)))
double code(double x) {
return (x - sin(x)) / tan(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x - sin(x)) / tan(x)
end function
public static double code(double x) {
return (x - Math.sin(x)) / Math.tan(x);
}
def code(x): return (x - math.sin(x)) / math.tan(x)
function code(x) return Float64(Float64(x - sin(x)) / tan(x)) end
function tmp = code(x) tmp = (x - sin(x)) / tan(x); end
code[x_] := N[(N[(x - N[Sin[x], $MachinePrecision]), $MachinePrecision] / N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - \sin x}{\tan x}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0
(fma
(* x x)
(fma (* x x) -2.7557319223985893e-6 0.0001984126984126984)
-0.008333333333333333)))
(*
(*
(* x x)
(/
(fma t_0 (* (* x x) (* (* x x) t_0)) -0.027777777777777776)
(fma (* x x) t_0 -0.16666666666666666)))
(/ x (tan x)))))
double code(double x) {
double t_0 = fma((x * x), fma((x * x), -2.7557319223985893e-6, 0.0001984126984126984), -0.008333333333333333);
return ((x * x) * (fma(t_0, ((x * x) * ((x * x) * t_0)), -0.027777777777777776) / fma((x * x), t_0, -0.16666666666666666))) * (x / tan(x));
}
function code(x) t_0 = fma(Float64(x * x), fma(Float64(x * x), -2.7557319223985893e-6, 0.0001984126984126984), -0.008333333333333333) return Float64(Float64(Float64(x * x) * Float64(fma(t_0, Float64(Float64(x * x) * Float64(Float64(x * x) * t_0)), -0.027777777777777776) / fma(Float64(x * x), t_0, -0.16666666666666666))) * Float64(x / tan(x))) end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -2.7557319223985893e-6 + 0.0001984126984126984), $MachinePrecision] + -0.008333333333333333), $MachinePrecision]}, N[(N[(N[(x * x), $MachinePrecision] * N[(N[(t$95$0 * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + -0.027777777777777776), $MachinePrecision] / N[(N[(x * x), $MachinePrecision] * t$95$0 + -0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(x / N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -2.7557319223985893 \cdot 10^{-6}, 0.0001984126984126984\right), -0.008333333333333333\right)\\
\left(\left(x \cdot x\right) \cdot \frac{\mathsf{fma}\left(t\_0, \left(x \cdot x\right) \cdot \left(\left(x \cdot x\right) \cdot t\_0\right), -0.027777777777777776\right)}{\mathsf{fma}\left(x \cdot x, t\_0, -0.16666666666666666\right)}\right) \cdot \frac{x}{\tan x}
\end{array}
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites81.3%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-tan.f64N/A
Applied rewrites99.6%
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
flip-+N/A
lower-/.f64N/A
Applied rewrites99.6%
(FPCore (x)
:precision binary64
(*
(/ x (tan x))
(*
(* x x)
(fma
(* x x)
(fma
x
(* x (fma (* x x) -2.7557319223985893e-6 0.0001984126984126984))
-0.008333333333333333)
0.16666666666666666))))
double code(double x) {
return (x / tan(x)) * ((x * x) * fma((x * x), fma(x, (x * fma((x * x), -2.7557319223985893e-6, 0.0001984126984126984)), -0.008333333333333333), 0.16666666666666666));
}
function code(x) return Float64(Float64(x / tan(x)) * Float64(Float64(x * x) * fma(Float64(x * x), fma(x, Float64(x * fma(Float64(x * x), -2.7557319223985893e-6, 0.0001984126984126984)), -0.008333333333333333), 0.16666666666666666))) end
code[x_] := N[(N[(x / N[Tan[x], $MachinePrecision]), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -2.7557319223985893e-6 + 0.0001984126984126984), $MachinePrecision]), $MachinePrecision] + -0.008333333333333333), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\tan x} \cdot \left(\left(x \cdot x\right) \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -2.7557319223985893 \cdot 10^{-6}, 0.0001984126984126984\right), -0.008333333333333333\right), 0.16666666666666666\right)\right)
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
cube-multN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
Applied rewrites81.3%
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-tan.f64N/A
Applied rewrites99.6%
Final simplification99.6%
(FPCore (x)
:precision binary64
(let* ((t_0
(fma
x
(* x (fma (* x x) -0.00023644179894179894 -0.0007275132275132275))
-0.06388888888888888))
(t_1 (- 0.16666666666666666 (* x (* x t_0)))))
(* x (* (fma (* x x) t_0 0.16666666666666666) (/ (* x t_1) t_1)))))
double code(double x) {
double t_0 = fma(x, (x * fma((x * x), -0.00023644179894179894, -0.0007275132275132275)), -0.06388888888888888);
double t_1 = 0.16666666666666666 - (x * (x * t_0));
return x * (fma((x * x), t_0, 0.16666666666666666) * ((x * t_1) / t_1));
}
function code(x) t_0 = fma(x, Float64(x * fma(Float64(x * x), -0.00023644179894179894, -0.0007275132275132275)), -0.06388888888888888) t_1 = Float64(0.16666666666666666 - Float64(x * Float64(x * t_0))) return Float64(x * Float64(fma(Float64(x * x), t_0, 0.16666666666666666) * Float64(Float64(x * t_1) / t_1))) end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.00023644179894179894 + -0.0007275132275132275), $MachinePrecision]), $MachinePrecision] + -0.06388888888888888), $MachinePrecision]}, Block[{t$95$1 = N[(0.16666666666666666 - N[(x * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(x * N[(N[(N[(x * x), $MachinePrecision] * t$95$0 + 0.16666666666666666), $MachinePrecision] * N[(N[(x * t$95$1), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.00023644179894179894, -0.0007275132275132275\right), -0.06388888888888888\right)\\
t_1 := 0.16666666666666666 - x \cdot \left(x \cdot t\_0\right)\\
x \cdot \left(\mathsf{fma}\left(x \cdot x, t\_0, 0.16666666666666666\right) \cdot \frac{x \cdot t\_1}{t\_1}\right)
\end{array}
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.6%
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
distribute-rgt-inN/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
associate-*r*N/A
Applied rewrites99.5%
Applied rewrites99.5%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
unpow2N/A
associate-*l*N/A
metadata-evalN/A
lower-fma.f64N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.6
Applied rewrites99.6%
(FPCore (x)
:precision binary64
(*
x
(*
x
(fma
x
(*
x
(fma
x
(* x (fma (* x x) -0.00023644179894179894 -0.0007275132275132275))
-0.06388888888888888))
0.16666666666666666))))
double code(double x) {
return x * (x * fma(x, (x * fma(x, (x * fma((x * x), -0.00023644179894179894, -0.0007275132275132275)), -0.06388888888888888)), 0.16666666666666666));
}
function code(x) return Float64(x * Float64(x * fma(x, Float64(x * fma(x, Float64(x * fma(Float64(x * x), -0.00023644179894179894, -0.0007275132275132275)), -0.06388888888888888)), 0.16666666666666666))) end
code[x_] := N[(x * N[(x * N[(x * N[(x * N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.00023644179894179894 + -0.0007275132275132275), $MachinePrecision]), $MachinePrecision] + -0.06388888888888888), $MachinePrecision]), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x \cdot x, -0.00023644179894179894, -0.0007275132275132275\right), -0.06388888888888888\right), 0.16666666666666666\right)\right)
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
*-commutativeN/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
unpow2N/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.6%
(FPCore (x)
:precision binary64
(*
x
(*
x
(fma
(* x x)
(fma (* x x) -0.0007275132275132275 -0.06388888888888888)
0.16666666666666666))))
double code(double x) {
return x * (x * fma((x * x), fma((x * x), -0.0007275132275132275, -0.06388888888888888), 0.16666666666666666));
}
function code(x) return Float64(x * Float64(x * fma(Float64(x * x), fma(Float64(x * x), -0.0007275132275132275, -0.06388888888888888), 0.16666666666666666))) end
code[x_] := N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * N[(N[(x * x), $MachinePrecision] * -0.0007275132275132275 + -0.06388888888888888), $MachinePrecision] + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x \cdot x, -0.0007275132275132275, -0.06388888888888888\right), 0.16666666666666666\right)\right)
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* x (* x (fma (* x x) -0.06388888888888888 0.16666666666666666))))
double code(double x) {
return x * (x * fma((x * x), -0.06388888888888888, 0.16666666666666666));
}
function code(x) return Float64(x * Float64(x * fma(Float64(x * x), -0.06388888888888888, 0.16666666666666666))) end
code[x_] := N[(x * N[(x * N[(N[(x * x), $MachinePrecision] * -0.06388888888888888 + 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x \cdot x, -0.06388888888888888, 0.16666666666666666\right)\right)
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
(FPCore (x) :precision binary64 (* x (/ x 6.0)))
double code(double x) {
return x * (x / 6.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x / 6.0d0)
end function
public static double code(double x) {
return x * (x / 6.0);
}
def code(x): return x * (x / 6.0)
function code(x) return Float64(x * Float64(x / 6.0)) end
function tmp = code(x) tmp = x * (x / 6.0); end
code[x_] := N[(x * N[(x / 6.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \frac{x}{6}
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
unpow2N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f6499.3
Applied rewrites99.3%
lift-*.f64N/A
flip3-+N/A
clear-numN/A
un-div-invN/A
lower-/.f64N/A
clear-numN/A
flip3-+N/A
lift-fma.f64N/A
lower-/.f6499.4
lift-fma.f64N/A
lift-*.f64N/A
associate-*l*N/A
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites99.1%
(FPCore (x) :precision binary64 (* x (* x 0.16666666666666666)))
double code(double x) {
return x * (x * 0.16666666666666666);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * 0.16666666666666666d0)
end function
public static double code(double x) {
return x * (x * 0.16666666666666666);
}
def code(x): return x * (x * 0.16666666666666666)
function code(x) return Float64(x * Float64(x * 0.16666666666666666)) end
function tmp = code(x) tmp = x * (x * 0.16666666666666666); end
code[x_] := N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot 0.16666666666666666\right)
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6499.0
Applied rewrites99.0%
associate-*r*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f6499.1
Applied rewrites99.1%
Final simplification99.1%
(FPCore (x) :precision binary64 (* (* x x) 0.16666666666666666))
double code(double x) {
return (x * x) * 0.16666666666666666;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * 0.16666666666666666d0
end function
public static double code(double x) {
return (x * x) * 0.16666666666666666;
}
def code(x): return (x * x) * 0.16666666666666666
function code(x) return Float64(Float64(x * x) * 0.16666666666666666) end
function tmp = code(x) tmp = (x * x) * 0.16666666666666666; end
code[x_] := N[(N[(x * x), $MachinePrecision] * 0.16666666666666666), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot 0.16666666666666666
\end{array}
Initial program 50.6%
Taylor expanded in x around 0
lower-*.f64N/A
unpow2N/A
lower-*.f6499.0
Applied rewrites99.0%
Final simplification99.0%
(FPCore (x) :precision binary64 (* 0.16666666666666666 (* x x)))
double code(double x) {
return 0.16666666666666666 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.16666666666666666d0 * (x * x)
end function
public static double code(double x) {
return 0.16666666666666666 * (x * x);
}
def code(x): return 0.16666666666666666 * (x * x)
function code(x) return Float64(0.16666666666666666 * Float64(x * x)) end
function tmp = code(x) tmp = 0.16666666666666666 * (x * x); end
code[x_] := N[(0.16666666666666666 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.16666666666666666 \cdot \left(x \cdot x\right)
\end{array}
herbie shell --seed 2024219
(FPCore (x)
:name "ENA, Section 1.4, Exercise 4a"
:precision binary64
:pre (and (<= -1.0 x) (<= x 1.0))
:alt
(! :herbie-platform default (* 1/6 (* x x)))
(/ (- x (sin x)) (tan x)))