
(FPCore (x) :precision binary64 (- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))
double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.954929658551372d0 * x) - (0.12900613773279798d0 * ((x * x) * x))
end function
public static double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
def code(x): return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x))
function code(x) return Float64(Float64(0.954929658551372 * x) - Float64(0.12900613773279798 * Float64(Float64(x * x) * x))) end
function tmp = code(x) tmp = (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x)); end
code[x_] := N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(0.12900613773279798 * N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x - 0.12900613773279798 \cdot \left(\left(x \cdot x\right) \cdot x\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))
double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (0.954929658551372d0 * x) - (0.12900613773279798d0 * ((x * x) * x))
end function
public static double code(double x) {
return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x));
}
def code(x): return (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x))
function code(x) return Float64(Float64(0.954929658551372 * x) - Float64(0.12900613773279798 * Float64(Float64(x * x) * x))) end
function tmp = code(x) tmp = (0.954929658551372 * x) - (0.12900613773279798 * ((x * x) * x)); end
code[x_] := N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(0.12900613773279798 * N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x - 0.12900613773279798 \cdot \left(\left(x \cdot x\right) \cdot x\right)
\end{array}
(FPCore (x) :precision binary64 (fma (* (* x -0.12900613773279798) x) x (* 0.954929658551372 x)))
double code(double x) {
return fma(((x * -0.12900613773279798) * x), x, (0.954929658551372 * x));
}
function code(x) return fma(Float64(Float64(x * -0.12900613773279798) * x), x, Float64(0.954929658551372 * x)) end
code[x_] := N[(N[(N[(x * -0.12900613773279798), $MachinePrecision] * x), $MachinePrecision] * x + N[(0.954929658551372 * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(x \cdot -0.12900613773279798\right) \cdot x, x, 0.954929658551372 \cdot x\right)
\end{array}
Initial program 99.8%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval99.8
Applied rewrites99.8%
Applied rewrites99.7%
lift-/.f64N/A
lift-pow.f64N/A
unpow-1N/A
remove-double-div99.8
lift-*.f64N/A
*-commutativeN/A
lift-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
distribute-rgt-inN/A
lift-*.f64N/A
lower-fma.f64N/A
*-commutativeN/A
lift-*.f64N/A
lower-*.f6499.8
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x)
:precision binary64
(if (<=
(- (* 0.954929658551372 x) (* (* (* x x) x) 0.12900613773279798))
-5.0)
(* (* (* x -0.12900613773279798) x) x)
(* 0.954929658551372 x)))
double code(double x) {
double tmp;
if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) {
tmp = ((x * -0.12900613773279798) * x) * x;
} else {
tmp = 0.954929658551372 * x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (((0.954929658551372d0 * x) - (((x * x) * x) * 0.12900613773279798d0)) <= (-5.0d0)) then
tmp = ((x * (-0.12900613773279798d0)) * x) * x
else
tmp = 0.954929658551372d0 * x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) {
tmp = ((x * -0.12900613773279798) * x) * x;
} else {
tmp = 0.954929658551372 * x;
}
return tmp;
}
def code(x): tmp = 0 if ((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0: tmp = ((x * -0.12900613773279798) * x) * x else: tmp = 0.954929658551372 * x return tmp
function code(x) tmp = 0.0 if (Float64(Float64(0.954929658551372 * x) - Float64(Float64(Float64(x * x) * x) * 0.12900613773279798)) <= -5.0) tmp = Float64(Float64(Float64(x * -0.12900613773279798) * x) * x); else tmp = Float64(0.954929658551372 * x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) tmp = ((x * -0.12900613773279798) * x) * x; else tmp = 0.954929658551372 * x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * 0.12900613773279798), $MachinePrecision]), $MachinePrecision], -5.0], N[(N[(N[(x * -0.12900613773279798), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], N[(0.954929658551372 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;0.954929658551372 \cdot x - \left(\left(x \cdot x\right) \cdot x\right) \cdot 0.12900613773279798 \leq -5:\\
\;\;\;\;\left(\left(x \cdot -0.12900613773279798\right) \cdot x\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;0.954929658551372 \cdot x\\
\end{array}
\end{array}
if (-.f64 (*.f64 #s(literal 238732414637843/250000000000000 binary64) x) (*.f64 #s(literal 6450306886639899/50000000000000000 binary64) (*.f64 (*.f64 x x) x))) < -5Initial program 99.9%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.3
Applied rewrites98.3%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6498.3
Applied rewrites98.4%
if -5 < (-.f64 (*.f64 #s(literal 238732414637843/250000000000000 binary64) x) (*.f64 #s(literal 6450306886639899/50000000000000000 binary64) (*.f64 (*.f64 x x) x))) Initial program 99.8%
Taylor expanded in x around 0
lower-*.f6461.8
Applied rewrites61.8%
Final simplification72.1%
(FPCore (x)
:precision binary64
(if (<=
(- (* 0.954929658551372 x) (* (* (* x x) x) 0.12900613773279798))
-5.0)
(* (* (* x x) -0.12900613773279798) x)
(* 0.954929658551372 x)))
double code(double x) {
double tmp;
if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) {
tmp = ((x * x) * -0.12900613773279798) * x;
} else {
tmp = 0.954929658551372 * x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (((0.954929658551372d0 * x) - (((x * x) * x) * 0.12900613773279798d0)) <= (-5.0d0)) then
tmp = ((x * x) * (-0.12900613773279798d0)) * x
else
tmp = 0.954929658551372d0 * x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) {
tmp = ((x * x) * -0.12900613773279798) * x;
} else {
tmp = 0.954929658551372 * x;
}
return tmp;
}
def code(x): tmp = 0 if ((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0: tmp = ((x * x) * -0.12900613773279798) * x else: tmp = 0.954929658551372 * x return tmp
function code(x) tmp = 0.0 if (Float64(Float64(0.954929658551372 * x) - Float64(Float64(Float64(x * x) * x) * 0.12900613773279798)) <= -5.0) tmp = Float64(Float64(Float64(x * x) * -0.12900613773279798) * x); else tmp = Float64(0.954929658551372 * x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (((0.954929658551372 * x) - (((x * x) * x) * 0.12900613773279798)) <= -5.0) tmp = ((x * x) * -0.12900613773279798) * x; else tmp = 0.954929658551372 * x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(N[(0.954929658551372 * x), $MachinePrecision] - N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * 0.12900613773279798), $MachinePrecision]), $MachinePrecision], -5.0], N[(N[(N[(x * x), $MachinePrecision] * -0.12900613773279798), $MachinePrecision] * x), $MachinePrecision], N[(0.954929658551372 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;0.954929658551372 \cdot x - \left(\left(x \cdot x\right) \cdot x\right) \cdot 0.12900613773279798 \leq -5:\\
\;\;\;\;\left(\left(x \cdot x\right) \cdot -0.12900613773279798\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;0.954929658551372 \cdot x\\
\end{array}
\end{array}
if (-.f64 (*.f64 #s(literal 238732414637843/250000000000000 binary64) x) (*.f64 #s(literal 6450306886639899/50000000000000000 binary64) (*.f64 (*.f64 x x) x))) < -5Initial program 99.9%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval99.9
Applied rewrites99.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6498.3
Applied rewrites98.3%
if -5 < (-.f64 (*.f64 #s(literal 238732414637843/250000000000000 binary64) x) (*.f64 #s(literal 6450306886639899/50000000000000000 binary64) (*.f64 (*.f64 x x) x))) Initial program 99.8%
Taylor expanded in x around 0
lower-*.f6461.8
Applied rewrites61.8%
Final simplification72.1%
(FPCore (x) :precision binary64 (* (fma (* x -0.12900613773279798) x 0.954929658551372) x))
double code(double x) {
return fma((x * -0.12900613773279798), x, 0.954929658551372) * x;
}
function code(x) return Float64(fma(Float64(x * -0.12900613773279798), x, 0.954929658551372) * x) end
code[x_] := N[(N[(N[(x * -0.12900613773279798), $MachinePrecision] * x + 0.954929658551372), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x \cdot -0.12900613773279798, x, 0.954929658551372\right) \cdot x
\end{array}
Initial program 99.8%
lift--.f64N/A
sub-negN/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*r*N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
distribute-lft-neg-inN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval99.8
Applied rewrites99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (* 0.954929658551372 x))
double code(double x) {
return 0.954929658551372 * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.954929658551372d0 * x
end function
public static double code(double x) {
return 0.954929658551372 * x;
}
def code(x): return 0.954929658551372 * x
function code(x) return Float64(0.954929658551372 * x) end
function tmp = code(x) tmp = 0.954929658551372 * x; end
code[x_] := N[(0.954929658551372 * x), $MachinePrecision]
\begin{array}{l}
\\
0.954929658551372 \cdot x
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
lower-*.f6444.5
Applied rewrites44.5%
herbie shell --seed 2024288
(FPCore (x)
:name "Rosa's Benchmark"
:precision binary64
(- (* 0.954929658551372 x) (* 0.12900613773279798 (* (* x x) x))))