
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* (* 3.0 a) c) (* (- (- b) (sqrt (fma (* c -3.0) a (* b b)))) (* 3.0 a))))
double code(double a, double b, double c) {
return ((3.0 * a) * c) / ((-b - sqrt(fma((c * -3.0), a, (b * b)))) * (3.0 * a));
}
function code(a, b, c) return Float64(Float64(Float64(3.0 * a) * c) / Float64(Float64(Float64(-b) - sqrt(fma(Float64(c * -3.0), a, Float64(b * b)))) * Float64(3.0 * a))) end
code[a_, b_, c_] := N[(N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision] / N[(N[((-b) - N[Sqrt[N[(N[(c * -3.0), $MachinePrecision] * a + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(3.0 * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(3 \cdot a\right) \cdot c}{\left(\left(-b\right) - \sqrt{\mathsf{fma}\left(c \cdot -3, a, b \cdot b\right)}\right) \cdot \left(3 \cdot a\right)}
\end{array}
Initial program 17.3%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
metadata-eval17.4
Applied rewrites17.4%
lift-/.f64N/A
lift-+.f64N/A
flip-+N/A
associate-/l/N/A
lower-/.f64N/A
Applied rewrites17.7%
Taylor expanded in c around 0
associate-*r*N/A
lower-*.f64N/A
lower-*.f6499.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (fma (/ (* -0.375 (* c c)) b) (/ a b) (* -0.5 c)) b))
double code(double a, double b, double c) {
return fma(((-0.375 * (c * c)) / b), (a / b), (-0.5 * c)) / b;
}
function code(a, b, c) return Float64(fma(Float64(Float64(-0.375 * Float64(c * c)) / b), Float64(a / b), Float64(-0.5 * c)) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(-0.375 * N[(c * c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision] * N[(a / b), $MachinePrecision] + N[(-0.5 * c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\frac{-0.375 \cdot \left(c \cdot c\right)}{b}, \frac{a}{b}, -0.5 \cdot c\right)}{b}
\end{array}
Initial program 17.3%
Taylor expanded in b around inf
lower-/.f64N/A
+-commutativeN/A
associate-*r/N/A
unpow2N/A
*-commutativeN/A
associate-*r*N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f64N/A
lower-*.f6496.7
Applied rewrites96.7%
Final simplification96.7%
(FPCore (a b c) :precision binary64 (/ (* (fma (/ -0.375 b) (* (/ c b) a) -0.5) c) b))
double code(double a, double b, double c) {
return (fma((-0.375 / b), ((c / b) * a), -0.5) * c) / b;
}
function code(a, b, c) return Float64(Float64(fma(Float64(-0.375 / b), Float64(Float64(c / b) * a), -0.5) * c) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(-0.375 / b), $MachinePrecision] * N[(N[(c / b), $MachinePrecision] * a), $MachinePrecision] + -0.5), $MachinePrecision] * c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\frac{-0.375}{b}, \frac{c}{b} \cdot a, -0.5\right) \cdot c}{b}
\end{array}
Initial program 17.3%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites98.4%
Taylor expanded in c around 0
Applied rewrites96.6%
Final simplification96.6%
(FPCore (a b c) :precision binary64 (* (/ c b) -0.5))
double code(double a, double b, double c) {
return (c / b) * -0.5;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (c / b) * (-0.5d0)
end function
public static double code(double a, double b, double c) {
return (c / b) * -0.5;
}
def code(a, b, c): return (c / b) * -0.5
function code(a, b, c) return Float64(Float64(c / b) * -0.5) end
function tmp = code(a, b, c) tmp = (c / b) * -0.5; end
code[a_, b_, c_] := N[(N[(c / b), $MachinePrecision] * -0.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{b} \cdot -0.5
\end{array}
Initial program 17.3%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6491.1
Applied rewrites91.1%
(FPCore (a b c) :precision binary64 (* (/ -0.5 b) c))
double code(double a, double b, double c) {
return (-0.5 / b) * c;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-0.5d0) / b) * c
end function
public static double code(double a, double b, double c) {
return (-0.5 / b) * c;
}
def code(a, b, c): return (-0.5 / b) * c
function code(a, b, c) return Float64(Float64(-0.5 / b) * c) end
function tmp = code(a, b, c) tmp = (-0.5 / b) * c; end
code[a_, b_, c_] := N[(N[(-0.5 / b), $MachinePrecision] * c), $MachinePrecision]
\begin{array}{l}
\\
\frac{-0.5}{b} \cdot c
\end{array}
Initial program 17.3%
Taylor expanded in c around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6491.1
Applied rewrites91.1%
Applied rewrites90.7%
Final simplification90.7%
herbie shell --seed 2024251
(FPCore (a b c)
:name "Cubic critical, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))