
(FPCore (x) :precision binary64 (* (* x x) (- 3.0 (* x 2.0))))
double code(double x) {
return (x * x) * (3.0 - (x * 2.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (3.0d0 - (x * 2.0d0))
end function
public static double code(double x) {
return (x * x) * (3.0 - (x * 2.0));
}
def code(x): return (x * x) * (3.0 - (x * 2.0))
function code(x) return Float64(Float64(x * x) * Float64(3.0 - Float64(x * 2.0))) end
function tmp = code(x) tmp = (x * x) * (3.0 - (x * 2.0)); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(3.0 - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(3 - x \cdot 2\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (* x x) (- 3.0 (* x 2.0))))
double code(double x) {
return (x * x) * (3.0 - (x * 2.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * (3.0d0 - (x * 2.0d0))
end function
public static double code(double x) {
return (x * x) * (3.0 - (x * 2.0));
}
def code(x): return (x * x) * (3.0 - (x * 2.0))
function code(x) return Float64(Float64(x * x) * Float64(3.0 - Float64(x * 2.0))) end
function tmp = code(x) tmp = (x * x) * (3.0 - (x * 2.0)); end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(3.0 - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot \left(3 - x \cdot 2\right)
\end{array}
(FPCore (x) :precision binary64 (* (fma x -2.0 3.0) (* x x)))
double code(double x) {
return fma(x, -2.0, 3.0) * (x * x);
}
function code(x) return Float64(fma(x, -2.0, 3.0) * Float64(x * x)) end
code[x_] := N[(N[(x * -2.0 + 3.0), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, -2, 3\right) \cdot \left(x \cdot x\right)
\end{array}
Initial program 99.9%
*-commutativeN/A
*-lowering-*.f64N/A
sub-negN/A
+-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
*-lowering-*.f6499.9
Applied egg-rr99.9%
(FPCore (x) :precision binary64 (let* ((t_0 (* (* x x) (* x -2.0)))) (if (<= x -1.5) t_0 (if (<= x 1.5) (* 3.0 (* x x)) t_0))))
double code(double x) {
double t_0 = (x * x) * (x * -2.0);
double tmp;
if (x <= -1.5) {
tmp = t_0;
} else if (x <= 1.5) {
tmp = 3.0 * (x * x);
} else {
tmp = t_0;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = (x * x) * (x * (-2.0d0))
if (x <= (-1.5d0)) then
tmp = t_0
else if (x <= 1.5d0) then
tmp = 3.0d0 * (x * x)
else
tmp = t_0
end if
code = tmp
end function
public static double code(double x) {
double t_0 = (x * x) * (x * -2.0);
double tmp;
if (x <= -1.5) {
tmp = t_0;
} else if (x <= 1.5) {
tmp = 3.0 * (x * x);
} else {
tmp = t_0;
}
return tmp;
}
def code(x): t_0 = (x * x) * (x * -2.0) tmp = 0 if x <= -1.5: tmp = t_0 elif x <= 1.5: tmp = 3.0 * (x * x) else: tmp = t_0 return tmp
function code(x) t_0 = Float64(Float64(x * x) * Float64(x * -2.0)) tmp = 0.0 if (x <= -1.5) tmp = t_0; elseif (x <= 1.5) tmp = Float64(3.0 * Float64(x * x)); else tmp = t_0; end return tmp end
function tmp_2 = code(x) t_0 = (x * x) * (x * -2.0); tmp = 0.0; if (x <= -1.5) tmp = t_0; elseif (x <= 1.5) tmp = 3.0 * (x * x); else tmp = t_0; end tmp_2 = tmp; end
code[x_] := Block[{t$95$0 = N[(N[(x * x), $MachinePrecision] * N[(x * -2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -1.5], t$95$0, If[LessEqual[x, 1.5], N[(3.0 * N[(x * x), $MachinePrecision]), $MachinePrecision], t$95$0]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(x \cdot x\right) \cdot \left(x \cdot -2\right)\\
\mathbf{if}\;x \leq -1.5:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;x \leq 1.5:\\
\;\;\;\;3 \cdot \left(x \cdot x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if x < -1.5 or 1.5 < x Initial program 99.9%
Taylor expanded in x around inf
*-lowering-*.f6497.2
Simplified97.2%
if -1.5 < x < 1.5Initial program 99.8%
Taylor expanded in x around 0
Simplified96.6%
Final simplification96.9%
(FPCore (x) :precision binary64 (* x (* x (fma x -2.0 3.0))))
double code(double x) {
return x * (x * fma(x, -2.0, 3.0));
}
function code(x) return Float64(x * Float64(x * fma(x, -2.0, 3.0))) end
code[x_] := N[(x * N[(x * N[(x * -2.0 + 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \mathsf{fma}\left(x, -2, 3\right)\right)
\end{array}
Initial program 99.9%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
sub-negN/A
+-commutativeN/A
distribute-rgt-neg-inN/A
accelerator-lowering-fma.f64N/A
metadata-eval99.8
Applied egg-rr99.8%
Final simplification99.8%
(FPCore (x) :precision binary64 (* 3.0 (* x x)))
double code(double x) {
return 3.0 * (x * x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 * (x * x)
end function
public static double code(double x) {
return 3.0 * (x * x);
}
def code(x): return 3.0 * (x * x)
function code(x) return Float64(3.0 * Float64(x * x)) end
function tmp = code(x) tmp = 3.0 * (x * x); end
code[x_] := N[(3.0 * N[(x * x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(x \cdot x\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
Simplified64.5%
Final simplification64.5%
(FPCore (x) :precision binary64 (* x (* x 3.0)))
double code(double x) {
return x * (x * 3.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * 3.0d0)
end function
public static double code(double x) {
return x * (x * 3.0);
}
def code(x): return x * (x * 3.0)
function code(x) return Float64(x * Float64(x * 3.0)) end
function tmp = code(x) tmp = x * (x * 3.0); end
code[x_] := N[(x * N[(x * 3.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot 3\right)
\end{array}
Initial program 99.9%
Taylor expanded in x around 0
metadata-evalN/A
lft-mult-inverseN/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
associate-*l*N/A
*-lowering-*.f64N/A
unpow2N/A
associate-*r*N/A
remove-double-negN/A
distribute-lft-neg-outN/A
*-commutativeN/A
distribute-lft-neg-outN/A
mul-1-negN/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
distribute-lft-neg-inN/A
associate-*r*N/A
Simplified64.5%
(FPCore (x) :precision binary64 (fma x -13.5 6.75))
double code(double x) {
return fma(x, -13.5, 6.75);
}
function code(x) return fma(x, -13.5, 6.75) end
code[x_] := N[(x * -13.5 + 6.75), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, -13.5, 6.75\right)
\end{array}
Initial program 99.9%
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
flip3--N/A
/-lowering-/.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
sub-negN/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
metadata-evalN/A
+-lowering-+.f64N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr70.2%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6424.2
Simplified24.2%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f645.6
Simplified5.6%
(FPCore (x) :precision binary64 6.75)
double code(double x) {
return 6.75;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 6.75d0
end function
public static double code(double x) {
return 6.75;
}
def code(x): return 6.75
function code(x) return 6.75 end
function tmp = code(x) tmp = 6.75; end
code[x_] := 6.75
\begin{array}{l}
\\
6.75
\end{array}
Initial program 99.9%
sub-negN/A
+-commutativeN/A
neg-sub0N/A
associate-+l-N/A
flip3--N/A
/-lowering-/.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
sub-negN/A
accelerator-lowering-fma.f64N/A
metadata-evalN/A
metadata-evalN/A
+-lowering-+.f64N/A
accelerator-lowering-fma.f64N/A
Applied egg-rr70.2%
Taylor expanded in x around inf
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6424.2
Simplified24.2%
Taylor expanded in x around 0
Simplified3.5%
(FPCore (x) :precision binary64 (* x (* x (- 3.0 (* x 2.0)))))
double code(double x) {
return x * (x * (3.0 - (x * 2.0)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (x * (3.0d0 - (x * 2.0d0)))
end function
public static double code(double x) {
return x * (x * (3.0 - (x * 2.0)));
}
def code(x): return x * (x * (3.0 - (x * 2.0)))
function code(x) return Float64(x * Float64(x * Float64(3.0 - Float64(x * 2.0)))) end
function tmp = code(x) tmp = x * (x * (3.0 - (x * 2.0))); end
code[x_] := N[(x * N[(x * N[(3.0 - N[(x * 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(x \cdot \left(3 - x \cdot 2\right)\right)
\end{array}
herbie shell --seed 2024204
(FPCore (x)
:name "Data.Spline.Key:interpolateKeys from smoothie-0.4.0.2"
:precision binary64
:alt
(! :herbie-platform default (* x (* x (- 3 (* x 2)))))
(* (* x x) (- 3.0 (* x 2.0))))