
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(fma
(/
(fma
(* (* a a) -5.0)
(pow c 4.0)
(* (* (fma (* -2.0 a) c (* (- b) b)) (* c c)) (* b b)))
(pow b 7.0))
a
(/ (- c) b)))
double code(double a, double b, double c) {
return fma((fma(((a * a) * -5.0), pow(c, 4.0), ((fma((-2.0 * a), c, (-b * b)) * (c * c)) * (b * b))) / pow(b, 7.0)), a, (-c / b));
}
function code(a, b, c) return fma(Float64(fma(Float64(Float64(a * a) * -5.0), (c ^ 4.0), Float64(Float64(fma(Float64(-2.0 * a), c, Float64(Float64(-b) * b)) * Float64(c * c)) * Float64(b * b))) / (b ^ 7.0)), a, Float64(Float64(-c) / b)) end
code[a_, b_, c_] := N[(N[(N[(N[(N[(a * a), $MachinePrecision] * -5.0), $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision] + N[(N[(N[(N[(-2.0 * a), $MachinePrecision] * c + N[((-b) * b), $MachinePrecision]), $MachinePrecision] * N[(c * c), $MachinePrecision]), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision] * a + N[((-c) / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\frac{\mathsf{fma}\left(\left(a \cdot a\right) \cdot -5, {c}^{4}, \left(\mathsf{fma}\left(-2 \cdot a, c, \left(-b\right) \cdot b\right) \cdot \left(c \cdot c\right)\right) \cdot \left(b \cdot b\right)\right)}{{b}^{7}}, a, \frac{-c}{b}\right)
\end{array}
Initial program 33.8%
Taylor expanded in a around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites94.7%
Taylor expanded in b around 0
Applied rewrites94.7%
Taylor expanded in b around 0
Applied rewrites94.7%
Taylor expanded in c around 0
Applied rewrites94.7%
Final simplification94.7%
(FPCore (a b c)
:precision binary64
(/
0.5
(/
(fma
c
(fma (- c) (* -0.5 (/ (* a a) (pow b 3.0))) (* (/ a b) 0.5))
(* -0.5 b))
c)))
double code(double a, double b, double c) {
return 0.5 / (fma(c, fma(-c, (-0.5 * ((a * a) / pow(b, 3.0))), ((a / b) * 0.5)), (-0.5 * b)) / c);
}
function code(a, b, c) return Float64(0.5 / Float64(fma(c, fma(Float64(-c), Float64(-0.5 * Float64(Float64(a * a) / (b ^ 3.0))), Float64(Float64(a / b) * 0.5)), Float64(-0.5 * b)) / c)) end
code[a_, b_, c_] := N[(0.5 / N[(N[(c * N[((-c) * N[(-0.5 * N[(N[(a * a), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(a / b), $MachinePrecision] * 0.5), $MachinePrecision]), $MachinePrecision] + N[(-0.5 * b), $MachinePrecision]), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{\frac{\mathsf{fma}\left(c, \mathsf{fma}\left(-c, -0.5 \cdot \frac{a \cdot a}{{b}^{3}}, \frac{a}{b} \cdot 0.5\right), -0.5 \cdot b\right)}{c}}
\end{array}
Initial program 33.8%
lift-/.f64N/A
clear-numN/A
lift-*.f64N/A
associate-/l*N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-/.f6433.8
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6433.8
Applied rewrites33.8%
Taylor expanded in c around 0
lower-/.f64N/A
Applied rewrites93.0%
Final simplification93.0%
(FPCore (a b c) :precision binary64 (/ 0.5 (fma -0.5 (/ b c) (* (fma -1.0 (* (* (/ c (pow b 3.0)) -0.5) a) (/ 0.5 b)) a))))
double code(double a, double b, double c) {
return 0.5 / fma(-0.5, (b / c), (fma(-1.0, (((c / pow(b, 3.0)) * -0.5) * a), (0.5 / b)) * a));
}
function code(a, b, c) return Float64(0.5 / fma(-0.5, Float64(b / c), Float64(fma(-1.0, Float64(Float64(Float64(c / (b ^ 3.0)) * -0.5) * a), Float64(0.5 / b)) * a))) end
code[a_, b_, c_] := N[(0.5 / N[(-0.5 * N[(b / c), $MachinePrecision] + N[(N[(-1.0 * N[(N[(N[(c / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] * -0.5), $MachinePrecision] * a), $MachinePrecision] + N[(0.5 / b), $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{\mathsf{fma}\left(-0.5, \frac{b}{c}, \mathsf{fma}\left(-1, \left(\frac{c}{{b}^{3}} \cdot -0.5\right) \cdot a, \frac{0.5}{b}\right) \cdot a\right)}
\end{array}
Initial program 33.8%
lift-/.f64N/A
clear-numN/A
lift-*.f64N/A
associate-/l*N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-/.f6433.8
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6433.8
Applied rewrites33.8%
Taylor expanded in a around 0
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
distribute-rgt-outN/A
metadata-evalN/A
lower-*.f64N/A
lower-/.f64N/A
lower-pow.f64N/A
associate-*r/N/A
metadata-evalN/A
lower-/.f6493.0
Applied rewrites93.0%
Final simplification93.0%
(FPCore (a b c) :precision binary64 (/ 0.5 (/ (fma 0.5 (* (/ c b) a) (* -0.5 b)) c)))
double code(double a, double b, double c) {
return 0.5 / (fma(0.5, ((c / b) * a), (-0.5 * b)) / c);
}
function code(a, b, c) return Float64(0.5 / Float64(fma(0.5, Float64(Float64(c / b) * a), Float64(-0.5 * b)) / c)) end
code[a_, b_, c_] := N[(0.5 / N[(N[(0.5 * N[(N[(c / b), $MachinePrecision] * a), $MachinePrecision] + N[(-0.5 * b), $MachinePrecision]), $MachinePrecision] / c), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{\frac{\mathsf{fma}\left(0.5, \frac{c}{b} \cdot a, -0.5 \cdot b\right)}{c}}
\end{array}
Initial program 33.8%
lift-/.f64N/A
clear-numN/A
lift-*.f64N/A
associate-/l*N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-/.f6433.8
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6433.8
Applied rewrites33.8%
Taylor expanded in c around 0
lower-/.f64N/A
+-commutativeN/A
lower-fma.f64N/A
associate-/l*N/A
lower-*.f64N/A
lower-/.f64N/A
lower-*.f6489.5
Applied rewrites89.5%
Final simplification89.5%
(FPCore (a b c) :precision binary64 (/ 0.5 (fma 0.5 (/ a b) (* (/ b c) -0.5))))
double code(double a, double b, double c) {
return 0.5 / fma(0.5, (a / b), ((b / c) * -0.5));
}
function code(a, b, c) return Float64(0.5 / fma(0.5, Float64(a / b), Float64(Float64(b / c) * -0.5))) end
code[a_, b_, c_] := N[(0.5 / N[(0.5 * N[(a / b), $MachinePrecision] + N[(N[(b / c), $MachinePrecision] * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{\mathsf{fma}\left(0.5, \frac{a}{b}, \frac{b}{c} \cdot -0.5\right)}
\end{array}
Initial program 33.8%
lift-/.f64N/A
clear-numN/A
lift-*.f64N/A
associate-/l*N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-/.f6433.8
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6433.8
Applied rewrites33.8%
Taylor expanded in a around 0
+-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
lower-*.f64N/A
lower-/.f6489.5
Applied rewrites89.5%
Final simplification89.5%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 33.8%
Taylor expanded in c around 0
associate-*r/N/A
lower-/.f64N/A
mul-1-negN/A
lower-neg.f6479.4
Applied rewrites79.4%
(FPCore (a b c) :precision binary64 0.0)
double code(double a, double b, double c) {
return 0.0;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0
end function
public static double code(double a, double b, double c) {
return 0.0;
}
def code(a, b, c): return 0.0
function code(a, b, c) return 0.0 end
function tmp = code(a, b, c) tmp = 0.0; end
code[a_, b_, c_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 33.8%
lift-/.f64N/A
clear-numN/A
lift-*.f64N/A
associate-/l*N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
lower-/.f6433.8
lift-+.f64N/A
+-commutativeN/A
lift-neg.f64N/A
unsub-negN/A
lower--.f6433.8
Applied rewrites33.8%
lift-/.f64N/A
lift-/.f64N/A
associate-/r/N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
metadata-evalN/A
associate-/r*N/A
lift-*.f64N/A
lower-fma.f64N/A
lift-*.f64N/A
associate-/r*N/A
metadata-evalN/A
lower-/.f64N/A
metadata-evalN/A
associate-/r*N/A
lift-*.f64N/A
lower-*.f64N/A
Applied rewrites35.4%
Taylor expanded in c around 0
distribute-rgt-outN/A
metadata-evalN/A
mul0-rgt3.2
Applied rewrites3.2%
herbie shell --seed 2024270
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))