
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((3.0d0 * a) * c)))) / (3.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(3.0 * a) * c)))) / Float64(3.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((3.0 * a) * c)))) / (3.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(3.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(3 \cdot a\right) \cdot c}}{3 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (/ (fma (* -3.0 a) c 0.0) (+ (sqrt (fma (* c a) -3.0 (* b b))) b)) (* 3.0 a)))
double code(double a, double b, double c) {
return (fma((-3.0 * a), c, 0.0) / (sqrt(fma((c * a), -3.0, (b * b))) + b)) / (3.0 * a);
}
function code(a, b, c) return Float64(Float64(fma(Float64(-3.0 * a), c, 0.0) / Float64(sqrt(fma(Float64(c * a), -3.0, Float64(b * b))) + b)) / Float64(3.0 * a)) end
code[a_, b_, c_] := N[(N[(N[(N[(-3.0 * a), $MachinePrecision] * c + 0.0), $MachinePrecision] / N[(N[Sqrt[N[(N[(c * a), $MachinePrecision] * -3.0 + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision]), $MachinePrecision] / N[(3.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\mathsf{fma}\left(-3 \cdot a, c, 0\right)}{\sqrt{\mathsf{fma}\left(c \cdot a, -3, b \cdot b\right)} + b}}{3 \cdot a}
\end{array}
Initial program 20.3%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
*-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval20.3
Applied rewrites20.3%
lift-+.f64N/A
+-commutativeN/A
lift-sqrt.f64N/A
lift-fma.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-*l*N/A
Applied rewrites21.2%
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites20.8%
lift--.f64N/A
lift-fma.f64N/A
associate--l+N/A
*-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
associate-*r*N/A
metadata-evalN/A
distribute-lft-neg-inN/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
distribute-lft-neg-inN/A
metadata-evalN/A
lower-*.f64N/A
+-inverses99.4
Applied rewrites99.4%
Final simplification99.4%
(FPCore (a b c) :precision binary64 (/ (fma -3.0 (* c a) 0.0) (* (+ (sqrt (fma -3.0 (* c a) (* b b))) b) (* a 3.0))))
double code(double a, double b, double c) {
return fma(-3.0, (c * a), 0.0) / ((sqrt(fma(-3.0, (c * a), (b * b))) + b) * (a * 3.0));
}
function code(a, b, c) return Float64(fma(-3.0, Float64(c * a), 0.0) / Float64(Float64(sqrt(fma(-3.0, Float64(c * a), Float64(b * b))) + b) * Float64(a * 3.0))) end
code[a_, b_, c_] := N[(N[(-3.0 * N[(c * a), $MachinePrecision] + 0.0), $MachinePrecision] / N[(N[(N[Sqrt[N[(-3.0 * N[(c * a), $MachinePrecision] + N[(b * b), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + b), $MachinePrecision] * N[(a * 3.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(-3, c \cdot a, 0\right)}{\left(\sqrt{\mathsf{fma}\left(-3, c \cdot a, b \cdot b\right)} + b\right) \cdot \left(a \cdot 3\right)}
\end{array}
Initial program 20.3%
lift--.f64N/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
*-commutativeN/A
lift-*.f64N/A
distribute-lft-neg-inN/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
metadata-eval20.3
Applied rewrites20.3%
lift-+.f64N/A
+-commutativeN/A
lift-sqrt.f64N/A
lift-fma.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
associate-*l*N/A
metadata-evalN/A
lift-*.f64N/A
fp-cancel-sub-sign-invN/A
lift-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-*l*N/A
Applied rewrites21.2%
lift-fma.f64N/A
lift-sqrt.f64N/A
lift-sqrt.f64N/A
rem-square-sqrtN/A
flip-+N/A
lower-/.f64N/A
Applied rewrites20.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/l/N/A
lower-/.f64N/A
lift--.f64N/A
lift-fma.f64N/A
associate--l+N/A
*-commutativeN/A
lower-fma.f64N/A
+-inversesN/A
lower-*.f6499.1
Applied rewrites99.1%
Final simplification99.1%
(FPCore (a b c) :precision binary64 (/ (fma (/ (* -0.375 a) b) (/ (* c c) b) (* -0.5 c)) b))
double code(double a, double b, double c) {
return fma(((-0.375 * a) / b), ((c * c) / b), (-0.5 * c)) / b;
}
function code(a, b, c) return Float64(fma(Float64(Float64(-0.375 * a) / b), Float64(Float64(c * c) / b), Float64(-0.5 * c)) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(-0.375 * a), $MachinePrecision] / b), $MachinePrecision] * N[(N[(c * c), $MachinePrecision] / b), $MachinePrecision] + N[(-0.5 * c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(\frac{-0.375 \cdot a}{b}, \frac{c \cdot c}{b}, -0.5 \cdot c\right)}{b}
\end{array}
Initial program 20.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites96.9%
Taylor expanded in b around inf
lower-/.f64N/A
+-commutativeN/A
associate-*r/N/A
associate-*r*N/A
unpow2N/A
times-fracN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
lower-*.f6494.8
Applied rewrites94.8%
(FPCore (a b c) :precision binary64 (fma (* a -0.375) (* c (/ c (* (* b b) b))) (* (/ c b) -0.5)))
double code(double a, double b, double c) {
return fma((a * -0.375), (c * (c / ((b * b) * b))), ((c / b) * -0.5));
}
function code(a, b, c) return fma(Float64(a * -0.375), Float64(c * Float64(c / Float64(Float64(b * b) * b))), Float64(Float64(c / b) * -0.5)) end
code[a_, b_, c_] := N[(N[(a * -0.375), $MachinePrecision] * N[(c * N[(c / N[(N[(b * b), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(c / b), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(a \cdot -0.375, c \cdot \frac{c}{\left(b \cdot b\right) \cdot b}, \frac{c}{b} \cdot -0.5\right)
\end{array}
Initial program 20.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6494.8
Applied rewrites94.8%
Applied rewrites94.8%
(FPCore (a b c) :precision binary64 (fma (* a -0.375) (* c (/ c (* (* b b) b))) (* (/ -0.5 b) c)))
double code(double a, double b, double c) {
return fma((a * -0.375), (c * (c / ((b * b) * b))), ((-0.5 / b) * c));
}
function code(a, b, c) return fma(Float64(a * -0.375), Float64(c * Float64(c / Float64(Float64(b * b) * b))), Float64(Float64(-0.5 / b) * c)) end
code[a_, b_, c_] := N[(N[(a * -0.375), $MachinePrecision] * N[(c * N[(c / N[(N[(b * b), $MachinePrecision] * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.5 / b), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(a \cdot -0.375, c \cdot \frac{c}{\left(b \cdot b\right) \cdot b}, \frac{-0.5}{b} \cdot c\right)
\end{array}
Initial program 20.3%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
associate-*r*N/A
*-commutativeN/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f64N/A
unpow2N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f64N/A
lower-/.f6494.8
Applied rewrites94.8%
Applied rewrites94.8%
Applied rewrites94.4%
(FPCore (a b c) :precision binary64 (* (/ c b) -0.5))
double code(double a, double b, double c) {
return (c / b) * -0.5;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (c / b) * (-0.5d0)
end function
public static double code(double a, double b, double c) {
return (c / b) * -0.5;
}
def code(a, b, c): return (c / b) * -0.5
function code(a, b, c) return Float64(Float64(c / b) * -0.5) end
function tmp = code(a, b, c) tmp = (c / b) * -0.5; end
code[a_, b_, c_] := N[(N[(c / b), $MachinePrecision] * -0.5), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{b} \cdot -0.5
\end{array}
Initial program 20.3%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6489.1
Applied rewrites89.1%
(FPCore (a b c) :precision binary64 (* c (/ -0.5 b)))
double code(double a, double b, double c) {
return c * (-0.5 / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c * ((-0.5d0) / b)
end function
public static double code(double a, double b, double c) {
return c * (-0.5 / b);
}
def code(a, b, c): return c * (-0.5 / b)
function code(a, b, c) return Float64(c * Float64(-0.5 / b)) end
function tmp = code(a, b, c) tmp = c * (-0.5 / b); end
code[a_, b_, c_] := N[(c * N[(-0.5 / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
c \cdot \frac{-0.5}{b}
\end{array}
Initial program 20.3%
Taylor expanded in a around 0
*-commutativeN/A
lower-*.f64N/A
lower-/.f6489.1
Applied rewrites89.1%
Applied rewrites88.7%
herbie shell --seed 2024339
(FPCore (a b c)
:name "Cubic critical, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 3.0 a) c)))) (* 3.0 a)))