
(FPCore (x) :precision binary64 (- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))
double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.954929658551372d0 * x) - (0.12900613773279798d0 * ((x * x) * x))
end function
public static double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
def code(x): return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x))
function code(x) return Float64(Float64(0.954929658551372 * x) - Float64(0.12900613773279798 * Float64(Float64(x * x) * x))) end
function tmp = code(x) tmp = (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x)); end
code[x_] := N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(0.12900613773279798 * N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x - 0.12900613773279798 \cdot \left(\left(x \cdot x\right) \cdot x\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))
double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.954929658551372d0 * x) - (0.12900613773279798d0 * ((x * x) * x))
end function
public static double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
def code(x): return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x))
function code(x) return Float64(Float64(0.954929658551372 * x) - Float64(0.12900613773279798 * Float64(Float64(x * x) * x))) end
function tmp = code(x) tmp = (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x)); end
code[x_] := N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(0.12900613773279798 * N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x - 0.12900613773279798 \cdot \left(\left(x \cdot x\right) \cdot x\right)
\end{array}
(FPCore (x) :precision binary64 (fma 0.954929658551372 x (* (pow x 3.0) -0.12900613773279798)))
double code(double x) {
return fma(0.954929658551372, x, (pow(x, 3.0) * -0.12900613773279798));
}
function code(x) return fma(0.954929658551372, x, Float64((x ^ 3.0) * -0.12900613773279798)) end
code[x_] := N[(0.954929658551372 * x + N[(N[Power[x, 3.0], $MachinePrecision] * -0.12900613773279798), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(0.954929658551372, x, {x}^{3} \cdot -0.12900613773279798\right)
\end{array}
Initial program 99.9%
fma-neg99.8%
*-commutative99.8%
distribute-rgt-neg-in99.8%
unpow399.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (* x (- 0.954929658551372 (* x (* x 0.12900613773279798)))))
double code(double x) {
return x * (0.954929658551372 - (x * (x * 0.12900613773279798)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (0.954929658551372d0 - (x * (x * 0.12900613773279798d0)))
end function
public static double code(double x) {
return x * (0.954929658551372 - (x * (x * 0.12900613773279798)));
}
def code(x): return x * (0.954929658551372 - (x * (x * 0.12900613773279798)))
function code(x) return Float64(x * Float64(0.954929658551372 - Float64(x * Float64(x * 0.12900613773279798)))) end
function tmp = code(x) tmp = x * (0.954929658551372 - (x * (x * 0.12900613773279798))); end
code[x_] := N[(x * N[(0.954929658551372 - N[(x * N[(x * 0.12900613773279798), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(0.954929658551372 - x \cdot \left(x \cdot 0.12900613773279798\right)\right)
\end{array}
Initial program 99.9%
associate-*r*99.9%
distribute-rgt-out--99.8%
pow299.8%
Applied egg-rr99.8%
add-sqr-sqrt99.8%
pow299.8%
*-commutative99.8%
sqrt-prod99.8%
unpow299.8%
sqrt-prod48.3%
add-sqr-sqrt99.8%
Applied egg-rr99.8%
unpow299.8%
*-commutative99.8%
*-commutative99.8%
swap-sqr99.8%
rem-square-sqrt99.8%
associate-*r*99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (* 0.954929658551372 x))
double code(double x) {
return 0.954929658551372 * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.954929658551372d0 * x
end function
public static double code(double x) {
return 0.954929658551372 * x;
}
def code(x): return 0.954929658551372 * x
function code(x) return Float64(0.954929658551372 * x) end
function tmp = code(x) tmp = 0.954929658551372 * x; end
code[x_] := N[(0.954929658551372 * x), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x
\end{array}
Initial program 99.9%
Taylor expanded in x around 0 48.1%
*-commutative48.1%
Simplified48.1%
Final simplification48.1%
herbie shell --seed 2023331
(FPCore (x)
:name "Rosa's Benchmark"
:precision binary64
(- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))