
(FPCore (x) :precision binary64 (* (* 3.0 (- 2.0 (* x 3.0))) x))
double code(double x) {
return (3.0 * (2.0 - (x * 3.0))) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (3.0d0 * (2.0d0 - (x * 3.0d0))) * x
end function
public static double code(double x) {
return (3.0 * (2.0 - (x * 3.0))) * x;
}
def code(x): return (3.0 * (2.0 - (x * 3.0))) * x
function code(x) return Float64(Float64(3.0 * Float64(2.0 - Float64(x * 3.0))) * x) end
function tmp = code(x) tmp = (3.0 * (2.0 - (x * 3.0))) * x; end
code[x_] := N[(N[(3.0 * N[(2.0 - N[(x * 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(3 \cdot \left(2 - x \cdot 3\right)\right) \cdot x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (* 3.0 (- 2.0 (* x 3.0))) x))
double code(double x) {
return (3.0 * (2.0 - (x * 3.0))) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (3.0d0 * (2.0d0 - (x * 3.0d0))) * x
end function
public static double code(double x) {
return (3.0 * (2.0 - (x * 3.0))) * x;
}
def code(x): return (3.0 * (2.0 - (x * 3.0))) * x
function code(x) return Float64(Float64(3.0 * Float64(2.0 - Float64(x * 3.0))) * x) end
function tmp = code(x) tmp = (3.0 * (2.0 - (x * 3.0))) * x; end
code[x_] := N[(N[(3.0 * N[(2.0 - N[(x * 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(3 \cdot \left(2 - x \cdot 3\right)\right) \cdot x
\end{array}
(FPCore (x) :precision binary64 (fma x 6.0 (* x (* x -9.0))))
double code(double x) {
return fma(x, 6.0, (x * (x * -9.0)));
}
function code(x) return fma(x, 6.0, Float64(x * Float64(x * -9.0))) end
code[x_] := N[(x * 6.0 + N[(x * N[(x * -9.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, 6, x \cdot \left(x \cdot -9\right)\right)
\end{array}
Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
distribute-rgt-in99.7%
metadata-eval99.7%
distribute-rgt-neg-in99.7%
associate-*l*99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
distribute-lft-in99.8%
fma-def99.8%
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (if (or (<= x -0.66) (not (<= x 0.65))) (* -9.0 (* x x)) (* x 6.0)))
double code(double x) {
double tmp;
if ((x <= -0.66) || !(x <= 0.65)) {
tmp = -9.0 * (x * x);
} else {
tmp = x * 6.0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x <= (-0.66d0)) .or. (.not. (x <= 0.65d0))) then
tmp = (-9.0d0) * (x * x)
else
tmp = x * 6.0d0
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x <= -0.66) || !(x <= 0.65)) {
tmp = -9.0 * (x * x);
} else {
tmp = x * 6.0;
}
return tmp;
}
def code(x): tmp = 0 if (x <= -0.66) or not (x <= 0.65): tmp = -9.0 * (x * x) else: tmp = x * 6.0 return tmp
function code(x) tmp = 0.0 if ((x <= -0.66) || !(x <= 0.65)) tmp = Float64(-9.0 * Float64(x * x)); else tmp = Float64(x * 6.0); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x <= -0.66) || ~((x <= 0.65))) tmp = -9.0 * (x * x); else tmp = x * 6.0; end tmp_2 = tmp; end
code[x_] := If[Or[LessEqual[x, -0.66], N[Not[LessEqual[x, 0.65]], $MachinePrecision]], N[(-9.0 * N[(x * x), $MachinePrecision]), $MachinePrecision], N[(x * 6.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.66 \lor \neg \left(x \leq 0.65\right):\\
\;\;\;\;-9 \cdot \left(x \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;x \cdot 6\\
\end{array}
\end{array}
if x < -0.660000000000000031 or 0.650000000000000022 < x Initial program 99.6%
associate-*l*99.7%
*-commutative99.7%
Simplified99.7%
Taylor expanded in x around inf 98.8%
unpow298.8%
Simplified98.8%
if -0.660000000000000031 < x < 0.650000000000000022Initial program 99.8%
*-commutative99.8%
sub-neg99.8%
distribute-rgt-in99.8%
metadata-eval99.8%
distribute-rgt-neg-in99.8%
associate-*l*99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in x around 0 97.0%
Final simplification97.9%
(FPCore (x) :precision binary64 (* x (+ 6.0 (* x -9.0))))
double code(double x) {
return x * (6.0 + (x * -9.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (6.0d0 + (x * (-9.0d0)))
end function
public static double code(double x) {
return x * (6.0 + (x * -9.0));
}
def code(x): return x * (6.0 + (x * -9.0))
function code(x) return Float64(x * Float64(6.0 + Float64(x * -9.0))) end
function tmp = code(x) tmp = x * (6.0 + (x * -9.0)); end
code[x_] := N[(x * N[(6.0 + N[(x * -9.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(6 + x \cdot -9\right)
\end{array}
Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
distribute-rgt-in99.7%
metadata-eval99.7%
distribute-rgt-neg-in99.7%
associate-*l*99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (* x 6.0))
double code(double x) {
return x * 6.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * 6.0d0
end function
public static double code(double x) {
return x * 6.0;
}
def code(x): return x * 6.0
function code(x) return Float64(x * 6.0) end
function tmp = code(x) tmp = x * 6.0; end
code[x_] := N[(x * 6.0), $MachinePrecision]
\begin{array}{l}
\\
x \cdot 6
\end{array}
Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
distribute-rgt-in99.7%
metadata-eval99.7%
distribute-rgt-neg-in99.7%
associate-*l*99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
Taylor expanded in x around 0 49.4%
Final simplification49.4%
(FPCore (x) :precision binary64 4.0)
double code(double x) {
return 4.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 4.0d0
end function
public static double code(double x) {
return 4.0;
}
def code(x): return 4.0
function code(x) return 4.0 end
function tmp = code(x) tmp = 4.0; end
code[x_] := 4.0
\begin{array}{l}
\\
4
\end{array}
Initial program 99.7%
*-commutative99.7%
sub-neg99.7%
distribute-rgt-in99.7%
metadata-eval99.7%
distribute-rgt-neg-in99.7%
associate-*l*99.8%
metadata-eval99.8%
metadata-eval99.8%
Simplified99.8%
*-commutative99.8%
flip-+99.8%
associate-*l/92.5%
metadata-eval92.5%
pow292.5%
Applied egg-rr92.5%
Taylor expanded in x around 0 48.3%
*-commutative48.3%
Simplified48.3%
Taylor expanded in x around inf 2.3%
Final simplification2.3%
(FPCore (x) :precision binary64 (- (* 6.0 x) (* 9.0 (* x x))))
double code(double x) {
return (6.0 * x) - (9.0 * (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (6.0d0 * x) - (9.0d0 * (x * x))
end function
public static double code(double x) {
return (6.0 * x) - (9.0 * (x * x));
}
def code(x): return (6.0 * x) - (9.0 * (x * x))
function code(x) return Float64(Float64(6.0 * x) - Float64(9.0 * Float64(x * x))) end
function tmp = code(x) tmp = (6.0 * x) - (9.0 * (x * x)); end
code[x_] := N[(N[(6.0 * x), $MachinePrecision] - N[(9.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
6 \cdot x - 9 \cdot \left(x \cdot x\right)
\end{array}
herbie shell --seed 2023258
(FPCore (x)
:name "Diagrams.Tangent:$catParam from diagrams-lib-1.3.0.3, E"
:precision binary64
:herbie-target
(- (* 6.0 x) (* 9.0 (* x x)))
(* (* 3.0 (- 2.0 (* x 3.0))) x))