
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* -0.3333333333333333 (/ c a)) (* (+ (sqrt (fma (* c -3.0) a (* b b))) b) (/ 0.3333333333333333 a))))
double code(double a, double b, double c) {
return (-0.3333333333333333 * (c / a)) / ((sqrt(fma((c * -3.0), a, (b * b))) + b) * (0.3333333333333333 / a));
}
function code(a, b, c) return Float64(Float64(-0.3333333333333333 * Float64(c / a)) / Float64(Float64(sqrt(fma(Float64(c * -3.0), a, Float64(b * b))) + b) * Float64(0.3333333333333333 / a))) end
code[a_, b_, c_] := N[(N[(-0.3333333333333333 * N[(c / a), $MachinePrecision]), $MachinePrecision] / N[(N[(N[Sqrt[N[(N[(c * -3.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision] * N[(0.3333333333333333 / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.3333333333333333 \cdot \frac{c}{a}}{\left(\sqrt{\mathsf{fma}\left(c \cdot -3, a, b \cdot b\right)} + b\right) \cdot \frac{0.3333333333333333}{a}}
\end{array}
Initial program 53.5%
lift-/.f64N/A
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
div-subN/A
lower--.f64N/A
Applied rewrites52.8%
lift--.f64N/A
flip--N/A
lower-/.f64N/A
Applied rewrites53.4%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6499.2
Applied rewrites99.2%
Final simplification99.2%
(FPCore (a b c) :precision binary64 (if (<= b 7.0) (* (/ (- (sqrt (fma (* c -3.0) a (* b b))) b) a) 0.3333333333333333) (/ 1.0 (fma (/ 1.5 b) a (* -2.0 (/ b c))))))
double code(double a, double b, double c) {
double tmp;
if (b <= 7.0) {
tmp = ((sqrt(fma((c * -3.0), a, (b * b))) - b) / a) * 0.3333333333333333;
} else {
tmp = 1.0 / fma((1.5 / b), a, (-2.0 * (b / c)));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 7.0) tmp = Float64(Float64(Float64(sqrt(fma(Float64(c * -3.0), a, Float64(b * b))) - b) / a) * 0.3333333333333333); else tmp = Float64(1.0 / fma(Float64(1.5 / b), a, Float64(-2.0 * Float64(b / c)))); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 7.0], N[(N[(N[(N[Sqrt[N[(N[(c * -3.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / a), $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(1.0 / N[(N[(1.5 / b), $MachinePrecision] * a + N[(-2.0 * N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 7:\\
\;\;\;\;\frac{\sqrt{\mathsf{fma}\left(c \cdot -3, a, b \cdot b\right)} - b}{a} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(\frac{1.5}{b}, a, -2 \cdot \frac{b}{c}\right)}\\
\end{array}
\end{array}
if b < 7Initial program 79.3%
lift-/.f64N/A
lift-*.f64N/A
associate-/l/N/A
div-invN/A
lower-*.f64N/A
Applied rewrites79.3%
if 7 < b Initial program 46.1%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6446.1
lift-*.f64N/A
*-commutativeN/A
lower-*.f6446.1
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6446.1
Applied rewrites46.1%
Taylor expanded in a around 0
+-commutativeN/A
associate-*r/N/A
associate-*l/N/A
metadata-evalN/A
associate-*r/N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6488.6
Applied rewrites88.6%
Final simplification86.6%
(FPCore (a b c) :precision binary64 (if (<= b 7.0) (* (- (sqrt (fma (* c -3.0) a (* b b))) b) (/ 0.3333333333333333 a)) (/ 1.0 (fma (/ 1.5 b) a (* -2.0 (/ b c))))))
double code(double a, double b, double c) {
double tmp;
if (b <= 7.0) {
tmp = (sqrt(fma((c * -3.0), a, (b * b))) - b) * (0.3333333333333333 / a);
} else {
tmp = 1.0 / fma((1.5 / b), a, (-2.0 * (b / c)));
}
return tmp;
}
function code(a, b, c) tmp = 0.0 if (b <= 7.0) tmp = Float64(Float64(sqrt(fma(Float64(c * -3.0), a, Float64(b * b))) - b) * Float64(0.3333333333333333 / a)); else tmp = Float64(1.0 / fma(Float64(1.5 / b), a, Float64(-2.0 * Float64(b / c)))); end return tmp end
code[a_, b_, c_] := If[LessEqual[b, 7.0], N[(N[(N[Sqrt[N[(N[(c * -3.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] * N[(0.3333333333333333 / a), $MachinePrecision]), $MachinePrecision], N[(1.0 / N[(N[(1.5 / b), $MachinePrecision] * a + N[(-2.0 * N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;b \leq 7:\\
\;\;\;\;\left(\sqrt{\mathsf{fma}\left(c \cdot -3, a, b \cdot b\right)} - b\right) \cdot \frac{0.3333333333333333}{a}\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{\mathsf{fma}\left(\frac{1.5}{b}, a, -2 \cdot \frac{b}{c}\right)}\\
\end{array}
\end{array}
if b < 7Initial program 79.3%
lift-/.f64N/A
clear-numN/A
associate-/r/N/A
lower-*.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
metadata-eval79.3
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6479.3
Applied rewrites79.3%
if 7 < b Initial program 46.1%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6446.1
lift-*.f64N/A
*-commutativeN/A
lower-*.f6446.1
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6446.1
Applied rewrites46.1%
Taylor expanded in a around 0
+-commutativeN/A
associate-*r/N/A
associate-*l/N/A
metadata-evalN/A
associate-*r/N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6488.6
Applied rewrites88.6%
Final simplification86.6%
(FPCore (a b c) :precision binary64 (/ 1.0 (fma (/ 1.5 b) a (* -2.0 (/ b c)))))
double code(double a, double b, double c) {
return 1.0 / fma((1.5 / b), a, (-2.0 * (b / c)));
}
function code(a, b, c) return Float64(1.0 / fma(Float64(1.5 / b), a, Float64(-2.0 * Float64(b / c)))) end
code[a_, b_, c_] := N[(1.0 / N[(N[(1.5 / b), $MachinePrecision] * a + N[(-2.0 * N[(b / c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\mathsf{fma}\left(\frac{1.5}{b}, a, -2 \cdot \frac{b}{c}\right)}
\end{array}
Initial program 53.5%
lift-/.f64N/A
clear-numN/A
lower-/.f64N/A
lower-/.f6453.5
lift-*.f64N/A
*-commutativeN/A
lower-*.f6453.5
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6453.5
Applied rewrites53.5%
Taylor expanded in a around 0
+-commutativeN/A
associate-*r/N/A
associate-*l/N/A
metadata-evalN/A
associate-*r/N/A
lower-fma.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6482.9
Applied rewrites82.9%
Final simplification82.9%
(FPCore (a b c) :precision binary64 (* -0.5 (/ c b)))
double code(double a, double b, double c) {
return -0.5 * (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-0.5d0) * (c / b)
end function
public static double code(double a, double b, double c) {
return -0.5 * (c / b);
}
def code(a, b, c): return -0.5 * (c / b)
function code(a, b, c) return Float64(-0.5 * Float64(c / b)) end
function tmp = code(a, b, c) tmp = -0.5 * (c / b); end
code[a_, b_, c_] := N[(-0.5 * N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-0.5 \cdot \frac{c}{b}
\end{array}
Initial program 53.5%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6465.5
Applied rewrites65.5%
Final simplification65.5%
(FPCore (a b c) :precision binary64 (* (/ -0.5 b) c))
double code(double a, double b, double c) {
return (-0.5 / b) * c;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-0.5d0) / b) * c
end function
public static double code(double a, double b, double c) {
return (-0.5 / b) * c;
}
def code(a, b, c): return (-0.5 / b) * c
function code(a, b, c) return Float64(Float64(-0.5 / b) * c) end
function tmp = code(a, b, c) tmp = (-0.5 / b) * c; end
code[a_, b_, c_] := N[(N[(-0.5 / b), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.5}{b} \cdot c
\end{array}
Initial program 53.5%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6465.5
Applied rewrites65.5%
Applied rewrites65.4%
Final simplification65.4%
herbie shell --seed 2024242
(FPCore (a b c)
:name "Cubic critical, narrow range"
:precision binary64
:pre (and (and (and (< 1.0536712127723509e-8 a) (< a 94906265.62425156)) (and (< 1.0536712127723509e-8 b) (< b 94906265.62425156))) (and (< 1.0536712127723509e-8 c) (< c 94906265.62425156)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))