
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c) :precision binary64 (/ (* c 4.0) (* -2.0 (+ b (sqrt (fma b b (* -4.0 (* c a))))))))
double code(double a, double b, double c) {
return (c * 4.0) / (-2.0 * (b + sqrt(fma(b, b, (-4.0 * (c * a))))));
}
function code(a, b, c) return Float64(Float64(c * 4.0) / Float64(-2.0 * Float64(b + sqrt(fma(b, b, Float64(-4.0 * Float64(c * a))))))) end
code[a_, b_, c_] := N[(N[(c * 4.0), $MachinePrecision] / N[(-2.0 * N[(b + N[Sqrt[N[(b * b + N[(-4.0 * N[(c * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c \cdot 4}{-2 \cdot \left(b + \sqrt{\mathsf{fma}\left(b, b, -4 \cdot \left(c \cdot a\right)\right)}\right)}
\end{array}
Initial program 17.0%
Applied egg-rr17.0%
associate-*r*N/A
*-commutativeN/A
times-fracN/A
*-lowering-*.f64N/A
Applied egg-rr17.4%
Taylor expanded in b around 0
*-commutativeN/A
*-lowering-*.f6499.5
Simplified99.5%
clear-numN/A
un-div-invN/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
associate-/r/N/A
metadata-evalN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f6499.9
Applied egg-rr99.9%
(FPCore (a b c) :precision binary64 (* (* c 4.0) (/ -0.5 (+ b (sqrt (fma b b (* -4.0 (* c a))))))))
double code(double a, double b, double c) {
return (c * 4.0) * (-0.5 / (b + sqrt(fma(b, b, (-4.0 * (c * a))))));
}
function code(a, b, c) return Float64(Float64(c * 4.0) * Float64(-0.5 / Float64(b + sqrt(fma(b, b, Float64(-4.0 * Float64(c * a))))))) end
code[a_, b_, c_] := N[(N[(c * 4.0), $MachinePrecision] * N[(-0.5 / N[(b + N[Sqrt[N[(b * b + N[(-4.0 * N[(c * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(c \cdot 4\right) \cdot \frac{-0.5}{b + \sqrt{\mathsf{fma}\left(b, b, -4 \cdot \left(c \cdot a\right)\right)}}
\end{array}
Initial program 17.0%
Applied egg-rr17.0%
associate-*r*N/A
*-commutativeN/A
times-fracN/A
*-lowering-*.f64N/A
Applied egg-rr17.4%
Taylor expanded in b around 0
*-commutativeN/A
*-lowering-*.f6499.5
Simplified99.5%
*-commutativeN/A
*-lowering-*.f64N/A
associate-/l/N/A
associate-/r*N/A
metadata-evalN/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
sqrt-lowering-sqrt.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.5
Applied egg-rr99.5%
Final simplification99.5%
(FPCore (a b c) :precision binary64 (/ c (- (/ (* c a) b) b)))
double code(double a, double b, double c) {
return c / (((c * a) / b) - b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / (((c * a) / b) - b)
end function
public static double code(double a, double b, double c) {
return c / (((c * a) / b) - b);
}
def code(a, b, c): return c / (((c * a) / b) - b)
function code(a, b, c) return Float64(c / Float64(Float64(Float64(c * a) / b) - b)) end
function tmp = code(a, b, c) tmp = c / (((c * a) / b) - b); end
code[a_, b_, c_] := N[(c / N[(N[(N[(c * a), $MachinePrecision] / b), $MachinePrecision] - b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{\frac{c \cdot a}{b} - b}
\end{array}
Initial program 17.0%
+-commutativeN/A
unsub-negN/A
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
sub-negN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
metadata-eval17.0
Applied egg-rr17.0%
clear-numN/A
metadata-evalN/A
/-lowering-/.f64N/A
metadata-evalN/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
rem-square-sqrtN/A
sqrt-lowering-sqrt.f64N/A
rem-square-sqrtN/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6416.9
Applied egg-rr16.9%
Taylor expanded in c around 0
/-lowering-/.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f6494.8
Simplified94.8%
clear-numN/A
/-lowering-/.f64N/A
--lowering--.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f6495.1
Applied egg-rr95.1%
(FPCore (a b c) :precision binary64 (- 0.0 (/ c b)))
double code(double a, double b, double c) {
return 0.0 - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 - (c / b)
end function
public static double code(double a, double b, double c) {
return 0.0 - (c / b);
}
def code(a, b, c): return 0.0 - (c / b)
function code(a, b, c) return Float64(0.0 - Float64(c / b)) end
function tmp = code(a, b, c) tmp = 0.0 - (c / b); end
code[a_, b_, c_] := N[(0.0 - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0 - \frac{c}{b}
\end{array}
Initial program 17.0%
Taylor expanded in b around inf
mul-1-negN/A
neg-sub0N/A
--lowering--.f64N/A
/-lowering-/.f6490.8
Simplified90.8%
sub0-negN/A
neg-lowering-neg.f64N/A
/-lowering-/.f6490.8
Applied egg-rr90.8%
Final simplification90.8%
(FPCore (a b c) :precision binary64 (/ b a))
double code(double a, double b, double c) {
return b / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = b / a
end function
public static double code(double a, double b, double c) {
return b / a;
}
def code(a, b, c): return b / a
function code(a, b, c) return Float64(b / a) end
function tmp = code(a, b, c) tmp = b / a; end
code[a_, b_, c_] := N[(b / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{b}{a}
\end{array}
Initial program 17.0%
+-commutativeN/A
unsub-negN/A
--lowering--.f64N/A
sqrt-lowering-sqrt.f64N/A
sub-negN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
*-commutativeN/A
distribute-rgt-neg-inN/A
*-lowering-*.f64N/A
metadata-eval17.0
Applied egg-rr17.0%
clear-numN/A
metadata-evalN/A
/-lowering-/.f64N/A
metadata-evalN/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
--lowering--.f64N/A
rem-square-sqrtN/A
sqrt-lowering-sqrt.f64N/A
rem-square-sqrtN/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6416.9
Applied egg-rr16.9%
Taylor expanded in c around 0
/-lowering-/.f64N/A
+-commutativeN/A
mul-1-negN/A
unsub-negN/A
--lowering--.f64N/A
/-lowering-/.f64N/A
*-commutativeN/A
*-lowering-*.f6494.8
Simplified94.8%
Taylor expanded in c around inf
/-lowering-/.f641.6
Simplified1.6%
herbie shell --seed 2024196
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))