
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* a (* c a))))
(/
(-
(fma
(/ (* (* c t_0) (* t_0 -5.0)) (* b (* (* a (* b b)) (* b (* b b)))))
c
(/ (* (* c c) (- (/ (* c (* a (* a -2.0))) (* b b)) a)) (* b b)))
c)
b)))
double code(double a, double b, double c) {
double t_0 = a * (c * a);
return (fma((((c * t_0) * (t_0 * -5.0)) / (b * ((a * (b * b)) * (b * (b * b))))), c, (((c * c) * (((c * (a * (a * -2.0))) / (b * b)) - a)) / (b * b))) - c) / b;
}
function code(a, b, c) t_0 = Float64(a * Float64(c * a)) return Float64(Float64(fma(Float64(Float64(Float64(c * t_0) * Float64(t_0 * -5.0)) / Float64(b * Float64(Float64(a * Float64(b * b)) * Float64(b * Float64(b * b))))), c, Float64(Float64(Float64(c * c) * Float64(Float64(Float64(c * Float64(a * Float64(a * -2.0))) / Float64(b * b)) - a)) / Float64(b * b))) - c) / b) end
code[a_, b_, c_] := Block[{t$95$0 = N[(a * N[(c * a), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(N[(c * t$95$0), $MachinePrecision] * N[(t$95$0 * -5.0), $MachinePrecision]), $MachinePrecision] / N[(b * N[(N[(a * N[(b * b), $MachinePrecision]), $MachinePrecision] * N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * c + N[(N[(N[(c * c), $MachinePrecision] * N[(N[(N[(c * N[(a * N[(a * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - a), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := a \cdot \left(c \cdot a\right)\\
\frac{\mathsf{fma}\left(\frac{\left(c \cdot t\_0\right) \cdot \left(t\_0 \cdot -5\right)}{b \cdot \left(\left(a \cdot \left(b \cdot b\right)\right) \cdot \left(b \cdot \left(b \cdot b\right)\right)\right)}, c, \frac{\left(c \cdot c\right) \cdot \left(\frac{c \cdot \left(a \cdot \left(a \cdot -2\right)\right)}{b \cdot b} - a\right)}{b \cdot b}\right) - c}{b}
\end{array}
\end{array}
Initial program 17.8%
Taylor expanded in b around inf
Simplified98.0%
Applied egg-rr98.0%
Applied egg-rr98.0%
Applied egg-rr98.0%
(FPCore (a b c)
:precision binary64
(/
(-
(/
(fma -2.0 (/ (* (* a a) (* c (* c c))) (* b b)) (- (* a (* c c))))
(* b b))
c)
b))
double code(double a, double b, double c) {
return ((fma(-2.0, (((a * a) * (c * (c * c))) / (b * b)), -(a * (c * c))) / (b * b)) - c) / b;
}
function code(a, b, c) return Float64(Float64(Float64(fma(-2.0, Float64(Float64(Float64(a * a) * Float64(c * Float64(c * c))) / Float64(b * b)), Float64(-Float64(a * Float64(c * c)))) / Float64(b * b)) - c) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[(N[(N[(a * a), $MachinePrecision] * N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + (-N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision])), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\mathsf{fma}\left(-2, \frac{\left(a \cdot a\right) \cdot \left(c \cdot \left(c \cdot c\right)\right)}{b \cdot b}, -a \cdot \left(c \cdot c\right)\right)}{b \cdot b} - c}{b}
\end{array}
Initial program 17.8%
Taylor expanded in b around inf
Simplified98.0%
Applied egg-rr98.0%
Applied egg-rr98.0%
Taylor expanded in b around inf
/-lowering-/.f64N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
mul-1-negN/A
neg-lowering-neg.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6497.1
Simplified97.1%
(FPCore (a b c) :precision binary64 (/ c (fma c (/ a b) (- b))))
double code(double a, double b, double c) {
return c / fma(c, (a / b), -b);
}
function code(a, b, c) return Float64(c / fma(c, Float64(a / b), Float64(-b))) end
code[a_, b_, c_] := N[(c / N[(c * N[(a / b), $MachinePrecision] + (-b)), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{\mathsf{fma}\left(c, \frac{a}{b}, -b\right)}
\end{array}
Initial program 17.8%
Taylor expanded in b around inf
/-lowering-/.f64N/A
Simplified95.2%
clear-numN/A
/-lowering-/.f64N/A
associate-/r/N/A
*-lowering-*.f64N/A
Applied egg-rr95.0%
Taylor expanded in c around 0
/-lowering-/.f64N/A
+-commutativeN/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
mul-1-negN/A
neg-lowering-neg.f6495.5
Simplified95.5%
clear-numN/A
/-lowering-/.f64N/A
associate-*r/N/A
*-commutativeN/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
neg-lowering-neg.f6495.7
Applied egg-rr95.7%
(FPCore (a b c) :precision binary64 (/ c (- b)))
double code(double a, double b, double c) {
return c / -b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = c / -b
end function
public static double code(double a, double b, double c) {
return c / -b;
}
def code(a, b, c): return c / -b
function code(a, b, c) return Float64(c / Float64(-b)) end
function tmp = code(a, b, c) tmp = c / -b; end
code[a_, b_, c_] := N[(c / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{c}{-b}
\end{array}
Initial program 17.8%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
/-lowering-/.f64N/A
neg-lowering-neg.f6490.4
Simplified90.4%
(FPCore (a b c) :precision binary64 (/ b a))
double code(double a, double b, double c) {
return b / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = b / a
end function
public static double code(double a, double b, double c) {
return b / a;
}
def code(a, b, c): return b / a
function code(a, b, c) return Float64(b / a) end
function tmp = code(a, b, c) tmp = b / a; end
code[a_, b_, c_] := N[(b / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{b}{a}
\end{array}
Initial program 17.8%
Taylor expanded in b around inf
/-lowering-/.f64N/A
Simplified95.2%
clear-numN/A
/-lowering-/.f64N/A
associate-/r/N/A
*-lowering-*.f64N/A
Applied egg-rr95.0%
Taylor expanded in c around 0
/-lowering-/.f64N/A
+-commutativeN/A
associate-/l*N/A
accelerator-lowering-fma.f64N/A
/-lowering-/.f64N/A
mul-1-negN/A
neg-lowering-neg.f6495.5
Simplified95.5%
Taylor expanded in a around inf
/-lowering-/.f641.6
Simplified1.6%
herbie shell --seed 2024205
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))