
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (- (- (* b b) (* b b)) (* c (* a -3.0))) (* (* a -3.0) (+ b (sqrt (fma c (* a -3.0) (* b b)))))))
double code(double a, double b, double c) {
return (((b * b) - (b * b)) - (c * (a * -3.0))) / ((a * -3.0) * (b + sqrt(fma(c, (a * -3.0), (b * b)))));
}
function code(a, b, c) return Float64(Float64(Float64(Float64(b * b) - Float64(b * b)) - Float64(c * Float64(a * -3.0))) / Float64(Float64(a * -3.0) * Float64(b + sqrt(fma(c, Float64(a * -3.0), Float64(b * b)))))) end
code[a_, b_, c_] := N[(N[(N[(N[(b * b), $MachinePrecision] - N[(b * b), $MachinePrecision]), $MachinePrecision] - N[(c * N[(a * -3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(a * -3.0), $MachinePrecision] * N[(b + N[Sqrt[N[(c * N[(a * -3.0), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(b \cdot b - b \cdot b\right) - c \cdot \left(a \cdot -3\right)}{\left(a \cdot -3\right) \cdot \left(b + \sqrt{\mathsf{fma}\left(c, a \cdot -3, b \cdot b\right)}\right)}
\end{array}
Initial program 30.2%
Applied egg-rr30.1%
associate-/l/N/A
flip--N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr31.2%
+-commutativeN/A
associate--r+N/A
--lowering--.f64N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.4
Applied egg-rr99.4%
(FPCore (a b c) :precision binary64 (/ (* 3.0 (* c a)) (* (* a -3.0) (+ b (sqrt (fma c (* a -3.0) (* b b)))))))
double code(double a, double b, double c) {
return (3.0 * (c * a)) / ((a * -3.0) * (b + sqrt(fma(c, (a * -3.0), (b * b)))));
}
function code(a, b, c) return Float64(Float64(3.0 * Float64(c * a)) / Float64(Float64(a * -3.0) * Float64(b + sqrt(fma(c, Float64(a * -3.0), Float64(b * b)))))) end
code[a_, b_, c_] := N[(N[(3.0 * N[(c * a), $MachinePrecision]), $MachinePrecision] / N[(N[(a * -3.0), $MachinePrecision] * N[(b + N[Sqrt[N[(c * N[(a * -3.0), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{3 \cdot \left(c \cdot a\right)}{\left(a \cdot -3\right) \cdot \left(b + \sqrt{\mathsf{fma}\left(c, a \cdot -3, b \cdot b\right)}\right)}
\end{array}
Initial program 30.2%
Applied egg-rr30.1%
associate-/l/N/A
flip--N/A
associate-/l/N/A
/-lowering-/.f64N/A
Applied egg-rr31.2%
Taylor expanded in b around 0
*-lowering-*.f64N/A
*-lowering-*.f6499.1
Simplified99.1%
Final simplification99.1%
(FPCore (a b c) :precision binary64 (fma a (/ (* c (* c -0.375)) (* b (* b b))) (/ (* c -0.5) b)))
double code(double a, double b, double c) {
return fma(a, ((c * (c * -0.375)) / (b * (b * b))), ((c * -0.5) / b));
}
function code(a, b, c) return fma(a, Float64(Float64(c * Float64(c * -0.375)) / Float64(b * Float64(b * b))), Float64(Float64(c * -0.5) / b)) end
code[a_, b_, c_] := N[(a * N[(N[(c * N[(c * -0.375), $MachinePrecision]), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(c * -0.5), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(a, \frac{c \cdot \left(c \cdot -0.375\right)}{b \cdot \left(b \cdot b\right)}, \frac{c \cdot -0.5}{b}\right)
\end{array}
Initial program 30.2%
Taylor expanded in b around inf
Simplified96.5%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*r/N/A
/-lowering-/.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
associate-*r/N/A
/-lowering-/.f64N/A
*-lowering-*.f6492.2
Simplified92.2%
Final simplification92.2%
(FPCore (a b c) :precision binary64 (/ (fma a (/ (* -0.375 (* c c)) (* b b)) (* c -0.5)) b))
double code(double a, double b, double c) {
return fma(a, ((-0.375 * (c * c)) / (b * b)), (c * -0.5)) / b;
}
function code(a, b, c) return Float64(fma(a, Float64(Float64(-0.375 * Float64(c * c)) / Float64(b * b)), Float64(c * -0.5)) / b) end
code[a_, b_, c_] := N[(N[(a * N[(N[(-0.375 * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + N[(c * -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(a, \frac{-0.375 \cdot \left(c \cdot c\right)}{b \cdot b}, c \cdot -0.5\right)}{b}
\end{array}
Initial program 30.2%
Taylor expanded in b around inf
/-lowering-/.f64N/A
Simplified92.2%
Final simplification92.2%
(FPCore (a b c) :precision binary64 (/ (* c (fma -0.375 (* c (/ a (* b b))) -0.5)) b))
double code(double a, double b, double c) {
return (c * fma(-0.375, (c * (a / (b * b))), -0.5)) / b;
}
function code(a, b, c) return Float64(Float64(c * fma(-0.375, Float64(c * Float64(a / Float64(b * b))), -0.5)) / b) end
code[a_, b_, c_] := N[(N[(c * N[(-0.375 * N[(c * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot \mathsf{fma}\left(-0.375, c \cdot \frac{a}{b \cdot b}, -0.5\right)}{b}
\end{array}
Initial program 30.2%
Taylor expanded in b around inf
Simplified96.5%
Taylor expanded in a around 0
+-commutativeN/A
associate-*r/N/A
associate-*r*N/A
associate-*l/N/A
associate-*r/N/A
unpow2N/A
associate-*r*N/A
associate-*r/N/A
associate-*l/N/A
associate-*r*N/A
associate-*r/N/A
distribute-rgt-inN/A
distribute-lft-inN/A
Simplified92.1%
(FPCore (a b c) :precision binary64 (* c (/ (fma -0.375 (* a (/ c (* b b))) -0.5) b)))
double code(double a, double b, double c) {
return c * (fma(-0.375, (a * (c / (b * b))), -0.5) / b);
}
function code(a, b, c) return Float64(c * Float64(fma(-0.375, Float64(a * Float64(c / Float64(b * b))), -0.5) / b)) end
code[a_, b_, c_] := N[(c * N[(N[(-0.375 * N[(a * N[(c / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -0.5), $MachinePrecision] / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \frac{\mathsf{fma}\left(-0.375, a \cdot \frac{c}{b \cdot b}, -0.5\right)}{b}
\end{array}
Initial program 30.2%
Taylor expanded in c around 0
Simplified94.8%
Taylor expanded in b around inf
/-lowering-/.f64N/A
sub-negN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
associate-/l*N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6491.9
Simplified91.9%
(FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
double code(double a, double b, double c) {
return -0.5 * (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-0.5d0) * (c / b)
end function
public static double code(double a, double b, double c) {
return -0.5 * (c / b);
}
def code(a, b, c): return -0.5 * (c / b)
function code(a, b, c) return Float64(-0.5 * Float64(c / b)) end
function tmp = code(a, b, c) tmp = -0.5 * (c / b); end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b}
\end{array}
Initial program 30.2%
Taylor expanded in b around inf
*-lowering-*.f64N/A
/-lowering-/.f6482.5
Simplified82.5%
herbie shell --seed 2024204
(FPCore (a b c)
:name "Cubic critical, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))