
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* c (* a c))) (t_1 (* a t_0)) (t_2 (* (* b b) (* b b))))
(/
(-
(fma
(/ (* c t_1) t_2)
-2.0
(* -5.0 (/ (* t_0 (* a t_1)) (* a (* (* b b) t_2)))))
(fma (* c c) (/ a (* b b)) c))
b)))
double code(double a, double b, double c) {
double t_0 = c * (a * c);
double t_1 = a * t_0;
double t_2 = (b * b) * (b * b);
return (fma(((c * t_1) / t_2), -2.0, (-5.0 * ((t_0 * (a * t_1)) / (a * ((b * b) * t_2))))) - fma((c * c), (a / (b * b)), c)) / b;
}
function code(a, b, c) t_0 = Float64(c * Float64(a * c)) t_1 = Float64(a * t_0) t_2 = Float64(Float64(b * b) * Float64(b * b)) return Float64(Float64(fma(Float64(Float64(c * t_1) / t_2), -2.0, Float64(-5.0 * Float64(Float64(t_0 * Float64(a * t_1)) / Float64(a * Float64(Float64(b * b) * t_2))))) - fma(Float64(c * c), Float64(a / Float64(b * b)), c)) / b) end
code[a_, b_, c_] := Block[{t$95$0 = N[(c * N[(a * c), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(a * t$95$0), $MachinePrecision]}, Block[{t$95$2 = N[(N[(b * b), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(N[(c * t$95$1), $MachinePrecision] / t$95$2), $MachinePrecision] * -2.0 + N[(-5.0 * N[(N[(t$95$0 * N[(a * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(a * N[(N[(b * b), $MachinePrecision] * t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := c \cdot \left(a \cdot c\right)\\
t_1 := a \cdot t\_0\\
t_2 := \left(b \cdot b\right) \cdot \left(b \cdot b\right)\\
\frac{\mathsf{fma}\left(\frac{c \cdot t\_1}{t\_2}, -2, -5 \cdot \frac{t\_0 \cdot \left(a \cdot t\_1\right)}{a \cdot \left(\left(b \cdot b\right) \cdot t\_2\right)}\right) - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}
\end{array}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
Applied rewrites95.4%
Applied rewrites95.4%
Applied rewrites95.4%
Final simplification95.4%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (* c (* a c))) (t_1 (* a t_0)))
(/
(-
(fma
-5.0
(/ (* t_0 (* a t_1)) (* a (* (* b b) (* (* b b) (* b b)))))
(/ (fma -2.0 (/ (* c t_1) (* b b)) (* c (* a (- c)))) (* b b)))
c)
b)))
double code(double a, double b, double c) {
double t_0 = c * (a * c);
double t_1 = a * t_0;
return (fma(-5.0, ((t_0 * (a * t_1)) / (a * ((b * b) * ((b * b) * (b * b))))), (fma(-2.0, ((c * t_1) / (b * b)), (c * (a * -c))) / (b * b))) - c) / b;
}
function code(a, b, c) t_0 = Float64(c * Float64(a * c)) t_1 = Float64(a * t_0) return Float64(Float64(fma(-5.0, Float64(Float64(t_0 * Float64(a * t_1)) / Float64(a * Float64(Float64(b * b) * Float64(Float64(b * b) * Float64(b * b))))), Float64(fma(-2.0, Float64(Float64(c * t_1) / Float64(b * b)), Float64(c * Float64(a * Float64(-c)))) / Float64(b * b))) - c) / b) end
code[a_, b_, c_] := Block[{t$95$0 = N[(c * N[(a * c), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(a * t$95$0), $MachinePrecision]}, N[(N[(N[(-5.0 * N[(N[(t$95$0 * N[(a * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(a * N[(N[(b * b), $MachinePrecision] * N[(N[(b * b), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-2.0 * N[(N[(c * t$95$1), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] + N[(c * N[(a * (-c)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := c \cdot \left(a \cdot c\right)\\
t_1 := a \cdot t\_0\\
\frac{\mathsf{fma}\left(-5, \frac{t\_0 \cdot \left(a \cdot t\_1\right)}{a \cdot \left(\left(b \cdot b\right) \cdot \left(\left(b \cdot b\right) \cdot \left(b \cdot b\right)\right)\right)}, \frac{\mathsf{fma}\left(-2, \frac{c \cdot t\_1}{b \cdot b}, c \cdot \left(a \cdot \left(-c\right)\right)\right)}{b \cdot b}\right) - c}{b}
\end{array}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
Applied rewrites95.4%
Applied rewrites95.4%
Applied rewrites95.4%
Final simplification95.4%
(FPCore (a b c) :precision binary64 (- (/ (/ (- (* (* -2.0 (* a a)) (/ (* c (* c c)) (* b b))) (* a (* c c))) (* b b)) b) (/ c b)))
double code(double a, double b, double c) {
return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) / b) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((((-2.0d0) * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) / b) - (c / b)
end function
public static double code(double a, double b, double c) {
return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) / b) - (c / b);
}
def code(a, b, c): return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) / b) - (c / b)
function code(a, b, c) return Float64(Float64(Float64(Float64(Float64(Float64(-2.0 * Float64(a * a)) * Float64(Float64(c * Float64(c * c)) / Float64(b * b))) - Float64(a * Float64(c * c))) / Float64(b * b)) / b) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) / b) - (c / b); end
code[a_, b_, c_] := N[(N[(N[(N[(N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{b \cdot b} - a \cdot \left(c \cdot c\right)}{b \cdot b}}{b} - \frac{c}{b}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites94.1%
Applied rewrites94.1%
(FPCore (a b c) :precision binary64 (/ (- (* (* -2.0 (* a a)) (/ (* c (* c c)) (* (* b b) (* b b)))) (fma (* c c) (/ a (* b b)) c)) b))
double code(double a, double b, double c) {
return (((-2.0 * (a * a)) * ((c * (c * c)) / ((b * b) * (b * b)))) - fma((c * c), (a / (b * b)), c)) / b;
}
function code(a, b, c) return Float64(Float64(Float64(Float64(-2.0 * Float64(a * a)) * Float64(Float64(c * Float64(c * c)) / Float64(Float64(b * b) * Float64(b * b)))) - fma(Float64(c * c), Float64(a / Float64(b * b)), c)) / b) end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(N[(b * b), $MachinePrecision] * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision]), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{\left(b \cdot b\right) \cdot \left(b \cdot b\right)} - \mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{b}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites94.1%
(FPCore (a b c) :precision binary64 (/ (- (/ (- (* (* -2.0 (* a a)) (/ (* c (* c c)) (* b b))) (* a (* c c))) (* b b)) c) b))
double code(double a, double b, double c) {
return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) - c) / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((((-2.0d0) * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) - c) / b
end function
public static double code(double a, double b, double c) {
return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) - c) / b;
}
def code(a, b, c): return (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) - c) / b
function code(a, b, c) return Float64(Float64(Float64(Float64(Float64(Float64(-2.0 * Float64(a * a)) * Float64(Float64(c * Float64(c * c)) / Float64(b * b))) - Float64(a * Float64(c * c))) / Float64(b * b)) - c) / b) end
function tmp = code(a, b, c) tmp = (((((-2.0 * (a * a)) * ((c * (c * c)) / (b * b))) - (a * (c * c))) / (b * b)) - c) / b; end
code[a_, b_, c_] := N[(N[(N[(N[(N[(N[(-2.0 * N[(a * a), $MachinePrecision]), $MachinePrecision] * N[(N[(c * N[(c * c), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(a * N[(c * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(b * b), $MachinePrecision]), $MachinePrecision] - c), $MachinePrecision] / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\left(-2 \cdot \left(a \cdot a\right)\right) \cdot \frac{c \cdot \left(c \cdot c\right)}{b \cdot b} - a \cdot \left(c \cdot c\right)}{b \cdot b} - c}{b}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
lower-/.f64N/A
Applied rewrites94.1%
Applied rewrites94.1%
(FPCore (a b c) :precision binary64 (- (fma a (/ (* c c) (* b (* b b))) (/ c b))))
double code(double a, double b, double c) {
return -fma(a, ((c * c) / (b * (b * b))), (c / b));
}
function code(a, b, c) return Float64(-fma(a, Float64(Float64(c * c) / Float64(b * Float64(b * b))), Float64(c / b))) end
code[a_, b_, c_] := (-N[(a * N[(N[(c * c), $MachinePrecision] / N[(b * N[(b * b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(c / b), $MachinePrecision]), $MachinePrecision])
\begin{array}{l}
\\
-\mathsf{fma}\left(a, \frac{c \cdot c}{b \cdot \left(b \cdot b\right)}, \frac{c}{b}\right)
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
Applied rewrites95.4%
Taylor expanded in a around 0
+-commutativeN/A
mul-1-negN/A
mul-1-negN/A
distribute-neg-outN/A
lower-neg.f64N/A
associate-/l*N/A
lower-fma.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f64N/A
cube-multN/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f6491.3
Applied rewrites91.3%
(FPCore (a b c) :precision binary64 (/ (fma (* c c) (/ a (* b b)) c) (- b)))
double code(double a, double b, double c) {
return fma((c * c), (a / (b * b)), c) / -b;
}
function code(a, b, c) return Float64(fma(Float64(c * c), Float64(a / Float64(b * b)), c) / Float64(-b)) end
code[a_, b_, c_] := N[(N[(N[(c * c), $MachinePrecision] * N[(a / N[(b * b), $MachinePrecision]), $MachinePrecision] + c), $MachinePrecision] / (-b)), $MachinePrecision]
\begin{array}{l}
\\
\frac{\mathsf{fma}\left(c \cdot c, \frac{a}{b \cdot b}, c\right)}{-b}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
distribute-lft-outN/A
associate-/l*N/A
mul-1-negN/A
lower-neg.f64N/A
lower-/.f64N/A
+-commutativeN/A
*-commutativeN/A
associate-/l*N/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
lower-/.f64N/A
unpow2N/A
lower-*.f6491.3
Applied rewrites91.3%
Final simplification91.3%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 31.8%
Taylor expanded in b around inf
mul-1-negN/A
distribute-neg-frac2N/A
lower-/.f64N/A
lower-neg.f6481.5
Applied rewrites81.5%
Final simplification81.5%
herbie shell --seed 2024233
(FPCore (a b c)
:name "Quadratic roots, medium range"
:precision binary64
:pre (and (and (and (< 1.1102230246251565e-16 a) (< a 9007199254740992.0)) (and (< 1.1102230246251565e-16 b) (< b 9007199254740992.0))) (and (< 1.1102230246251565e-16 c) (< c 9007199254740992.0)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))