
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c) :precision binary64 (/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))
double code(double a, double b, double c) {
return (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-b + sqrt(((b * b) - ((4.0d0 * a) * c)))) / (2.0d0 * a)
end function
public static double code(double a, double b, double c) {
return (-b + Math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a);
}
def code(a, b, c): return (-b + math.sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a)
function code(a, b, c) return Float64(Float64(Float64(-b) + sqrt(Float64(Float64(b * b) - Float64(Float64(4.0 * a) * c)))) / Float64(2.0 * a)) end
function tmp = code(a, b, c) tmp = (-b + sqrt(((b * b) - ((4.0 * a) * c)))) / (2.0 * a); end
code[a_, b_, c_] := N[(N[((-b) + N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(N[(4.0 * a), $MachinePrecision] * c), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[(2.0 * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-b\right) + \sqrt{b \cdot b - \left(4 \cdot a\right) \cdot c}}{2 \cdot a}
\end{array}
(FPCore (a b c)
:precision binary64
(+
(* -2.0 (/ (* (pow a 2.0) (pow c 3.0)) (pow b 5.0)))
(-
(-
(*
-0.25
(/
(+
(* 16.0 (* (pow a 4.0) (pow c 4.0)))
(pow (* -2.0 (* (pow a 2.0) (pow c 2.0))) 2.0))
(* a (pow b 7.0))))
(/ (* a (pow c 2.0)) (pow b 3.0)))
(/ c b))))
double code(double a, double b, double c) {
return (-2.0 * ((pow(a, 2.0) * pow(c, 3.0)) / pow(b, 5.0))) + (((-0.25 * (((16.0 * (pow(a, 4.0) * pow(c, 4.0))) + pow((-2.0 * (pow(a, 2.0) * pow(c, 2.0))), 2.0)) / (a * pow(b, 7.0)))) - ((a * pow(c, 2.0)) / pow(b, 3.0))) - (c / b));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-2.0d0) * (((a ** 2.0d0) * (c ** 3.0d0)) / (b ** 5.0d0))) + ((((-0.25d0) * (((16.0d0 * ((a ** 4.0d0) * (c ** 4.0d0))) + (((-2.0d0) * ((a ** 2.0d0) * (c ** 2.0d0))) ** 2.0d0)) / (a * (b ** 7.0d0)))) - ((a * (c ** 2.0d0)) / (b ** 3.0d0))) - (c / b))
end function
public static double code(double a, double b, double c) {
return (-2.0 * ((Math.pow(a, 2.0) * Math.pow(c, 3.0)) / Math.pow(b, 5.0))) + (((-0.25 * (((16.0 * (Math.pow(a, 4.0) * Math.pow(c, 4.0))) + Math.pow((-2.0 * (Math.pow(a, 2.0) * Math.pow(c, 2.0))), 2.0)) / (a * Math.pow(b, 7.0)))) - ((a * Math.pow(c, 2.0)) / Math.pow(b, 3.0))) - (c / b));
}
def code(a, b, c): return (-2.0 * ((math.pow(a, 2.0) * math.pow(c, 3.0)) / math.pow(b, 5.0))) + (((-0.25 * (((16.0 * (math.pow(a, 4.0) * math.pow(c, 4.0))) + math.pow((-2.0 * (math.pow(a, 2.0) * math.pow(c, 2.0))), 2.0)) / (a * math.pow(b, 7.0)))) - ((a * math.pow(c, 2.0)) / math.pow(b, 3.0))) - (c / b))
function code(a, b, c) return Float64(Float64(-2.0 * Float64(Float64((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + Float64(Float64(Float64(-0.25 * Float64(Float64(Float64(16.0 * Float64((a ^ 4.0) * (c ^ 4.0))) + (Float64(-2.0 * Float64((a ^ 2.0) * (c ^ 2.0))) ^ 2.0)) / Float64(a * (b ^ 7.0)))) - Float64(Float64(a * (c ^ 2.0)) / (b ^ 3.0))) - Float64(c / b))) end
function tmp = code(a, b, c) tmp = (-2.0 * (((a ^ 2.0) * (c ^ 3.0)) / (b ^ 5.0))) + (((-0.25 * (((16.0 * ((a ^ 4.0) * (c ^ 4.0))) + ((-2.0 * ((a ^ 2.0) * (c ^ 2.0))) ^ 2.0)) / (a * (b ^ 7.0)))) - ((a * (c ^ 2.0)) / (b ^ 3.0))) - (c / b)); end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(-0.25 * N[(N[(N[(16.0 * N[(N[Power[a, 4.0], $MachinePrecision] * N[Power[c, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[Power[N[(-2.0 * N[(N[Power[a, 2.0], $MachinePrecision] * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(a * N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-2 \cdot \frac{{a}^{2} \cdot {c}^{3}}{{b}^{5}} + \left(\left(-0.25 \cdot \frac{16 \cdot \left({a}^{4} \cdot {c}^{4}\right) + {\left(-2 \cdot \left({a}^{2} \cdot {c}^{2}\right)\right)}^{2}}{a \cdot {b}^{7}} - \frac{a \cdot {c}^{2}}{{b}^{3}}\right) - \frac{c}{b}\right)
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 97.2%
Final simplification97.2%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (pow (* a (* c -4.0)) 4.0)))
(fma
-0.25
(/ (fma 0.0625 t_0 (* t_0 0.015625)) (* a (pow b 7.0)))
(fma
-0.0625
(/ (/ (* 16.0 (* (* a c) (* a c))) a) (pow b 3.0))
(fma
0.25
(* (/ (* c -4.0) b) (/ a a))
(* (/ 0.03125 (pow b 5.0)) (/ (* (pow (* a c) 3.0) -64.0) a)))))))
double code(double a, double b, double c) {
double t_0 = pow((a * (c * -4.0)), 4.0);
return fma(-0.25, (fma(0.0625, t_0, (t_0 * 0.015625)) / (a * pow(b, 7.0))), fma(-0.0625, (((16.0 * ((a * c) * (a * c))) / a) / pow(b, 3.0)), fma(0.25, (((c * -4.0) / b) * (a / a)), ((0.03125 / pow(b, 5.0)) * ((pow((a * c), 3.0) * -64.0) / a)))));
}
function code(a, b, c) t_0 = Float64(a * Float64(c * -4.0)) ^ 4.0 return fma(-0.25, Float64(fma(0.0625, t_0, Float64(t_0 * 0.015625)) / Float64(a * (b ^ 7.0))), fma(-0.0625, Float64(Float64(Float64(16.0 * Float64(Float64(a * c) * Float64(a * c))) / a) / (b ^ 3.0)), fma(0.25, Float64(Float64(Float64(c * -4.0) / b) * Float64(a / a)), Float64(Float64(0.03125 / (b ^ 5.0)) * Float64(Float64((Float64(a * c) ^ 3.0) * -64.0) / a))))) end
code[a_, b_, c_] := Block[{t$95$0 = N[Power[N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision], 4.0], $MachinePrecision]}, N[(-0.25 * N[(N[(0.0625 * t$95$0 + N[(t$95$0 * 0.015625), $MachinePrecision]), $MachinePrecision] / N[(a * N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.0625 * N[(N[(N[(16.0 * N[(N[(a * c), $MachinePrecision] * N[(a * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / a), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] + N[(0.25 * N[(N[(N[(c * -4.0), $MachinePrecision] / b), $MachinePrecision] * N[(a / a), $MachinePrecision]), $MachinePrecision] + N[(N[(0.03125 / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision] * N[(N[(N[Power[N[(a * c), $MachinePrecision], 3.0], $MachinePrecision] * -64.0), $MachinePrecision] / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(a \cdot \left(c \cdot -4\right)\right)}^{4}\\
\mathsf{fma}\left(-0.25, \frac{\mathsf{fma}\left(0.0625, t\_0, t\_0 \cdot 0.015625\right)}{a \cdot {b}^{7}}, \mathsf{fma}\left(-0.0625, \frac{\frac{16 \cdot \left(\left(a \cdot c\right) \cdot \left(a \cdot c\right)\right)}{a}}{{b}^{3}}, \mathsf{fma}\left(0.25, \frac{c \cdot -4}{b} \cdot \frac{a}{a}, \frac{0.03125}{{b}^{5}} \cdot \frac{{\left(a \cdot c\right)}^{3} \cdot -64}{a}\right)\right)\right)
\end{array}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
*-commutative20.3%
prod-diff20.4%
*-commutative20.4%
distribute-rgt-neg-in20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
associate-*r*20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
*-commutative20.4%
add-sqr-sqrt0.0%
sqrt-unprod2.9%
*-commutative2.9%
*-commutative2.9%
swap-sqr2.9%
metadata-eval2.9%
metadata-eval2.9%
swap-sqr2.9%
sqrt-unprod2.9%
add-sqr-sqrt2.9%
*-commutative2.9%
add-sqr-sqrt2.9%
sqrt-unprod2.9%
swap-sqr2.9%
Applied egg-rr20.4%
+-commutative20.4%
fma-undefine20.4%
associate-+l+20.4%
*-commutative20.4%
associate-*l*20.4%
+-commutative20.4%
fma-undefine20.3%
unpow220.3%
associate-+l+20.4%
distribute-rgt-out20.4%
distribute-lft-out20.4%
metadata-eval20.4%
Simplified20.4%
Taylor expanded in b around inf 96.8%
Simplified97.2%
unpow293.3%
Applied egg-rr97.2%
Final simplification97.2%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (+ (* (* a c) -8.0) (* 4.0 (* a c)))) (t_1 (pow t_0 2.0)))
(/
(+
(*
-0.5
(/ (+ (* 0.0625 (pow t_0 4.0)) (pow (* -0.125 t_1) 2.0)) (pow b 7.0)))
(+
(* -0.125 (/ t_1 (pow b 3.0)))
(+ (* 0.0625 (/ (pow t_0 3.0) (pow b 5.0))) (* 0.5 (/ t_0 b)))))
(* a 2.0))))
double code(double a, double b, double c) {
double t_0 = ((a * c) * -8.0) + (4.0 * (a * c));
double t_1 = pow(t_0, 2.0);
return ((-0.5 * (((0.0625 * pow(t_0, 4.0)) + pow((-0.125 * t_1), 2.0)) / pow(b, 7.0))) + ((-0.125 * (t_1 / pow(b, 3.0))) + ((0.0625 * (pow(t_0, 3.0) / pow(b, 5.0))) + (0.5 * (t_0 / b))))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8) :: t_0
real(8) :: t_1
t_0 = ((a * c) * (-8.0d0)) + (4.0d0 * (a * c))
t_1 = t_0 ** 2.0d0
code = (((-0.5d0) * (((0.0625d0 * (t_0 ** 4.0d0)) + (((-0.125d0) * t_1) ** 2.0d0)) / (b ** 7.0d0))) + (((-0.125d0) * (t_1 / (b ** 3.0d0))) + ((0.0625d0 * ((t_0 ** 3.0d0) / (b ** 5.0d0))) + (0.5d0 * (t_0 / b))))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
double t_0 = ((a * c) * -8.0) + (4.0 * (a * c));
double t_1 = Math.pow(t_0, 2.0);
return ((-0.5 * (((0.0625 * Math.pow(t_0, 4.0)) + Math.pow((-0.125 * t_1), 2.0)) / Math.pow(b, 7.0))) + ((-0.125 * (t_1 / Math.pow(b, 3.0))) + ((0.0625 * (Math.pow(t_0, 3.0) / Math.pow(b, 5.0))) + (0.5 * (t_0 / b))))) / (a * 2.0);
}
def code(a, b, c): t_0 = ((a * c) * -8.0) + (4.0 * (a * c)) t_1 = math.pow(t_0, 2.0) return ((-0.5 * (((0.0625 * math.pow(t_0, 4.0)) + math.pow((-0.125 * t_1), 2.0)) / math.pow(b, 7.0))) + ((-0.125 * (t_1 / math.pow(b, 3.0))) + ((0.0625 * (math.pow(t_0, 3.0) / math.pow(b, 5.0))) + (0.5 * (t_0 / b))))) / (a * 2.0)
function code(a, b, c) t_0 = Float64(Float64(Float64(a * c) * -8.0) + Float64(4.0 * Float64(a * c))) t_1 = t_0 ^ 2.0 return Float64(Float64(Float64(-0.5 * Float64(Float64(Float64(0.0625 * (t_0 ^ 4.0)) + (Float64(-0.125 * t_1) ^ 2.0)) / (b ^ 7.0))) + Float64(Float64(-0.125 * Float64(t_1 / (b ^ 3.0))) + Float64(Float64(0.0625 * Float64((t_0 ^ 3.0) / (b ^ 5.0))) + Float64(0.5 * Float64(t_0 / b))))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) t_0 = ((a * c) * -8.0) + (4.0 * (a * c)); t_1 = t_0 ^ 2.0; tmp = ((-0.5 * (((0.0625 * (t_0 ^ 4.0)) + ((-0.125 * t_1) ^ 2.0)) / (b ^ 7.0))) + ((-0.125 * (t_1 / (b ^ 3.0))) + ((0.0625 * ((t_0 ^ 3.0) / (b ^ 5.0))) + (0.5 * (t_0 / b))))) / (a * 2.0); end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(N[(a * c), $MachinePrecision] * -8.0), $MachinePrecision] + N[(4.0 * N[(a * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Power[t$95$0, 2.0], $MachinePrecision]}, N[(N[(N[(-0.5 * N[(N[(N[(0.0625 * N[Power[t$95$0, 4.0], $MachinePrecision]), $MachinePrecision] + N[Power[N[(-0.125 * t$95$1), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] / N[Power[b, 7.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(-0.125 * N[(t$95$1 / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(N[(0.0625 * N[(N[Power[t$95$0, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[(t$95$0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(a \cdot c\right) \cdot -8 + 4 \cdot \left(a \cdot c\right)\\
t_1 := {t\_0}^{2}\\
\frac{-0.5 \cdot \frac{0.0625 \cdot {t\_0}^{4} + {\left(-0.125 \cdot t\_1\right)}^{2}}{{b}^{7}} + \left(-0.125 \cdot \frac{t\_1}{{b}^{3}} + \left(0.0625 \cdot \frac{{t\_0}^{3}}{{b}^{5}} + 0.5 \cdot \frac{t\_0}{b}\right)\right)}{a \cdot 2}
\end{array}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
*-commutative20.3%
prod-diff20.4%
*-commutative20.4%
distribute-rgt-neg-in20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
associate-*r*20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
*-commutative20.4%
add-sqr-sqrt0.0%
sqrt-unprod2.9%
*-commutative2.9%
*-commutative2.9%
swap-sqr2.9%
metadata-eval2.9%
metadata-eval2.9%
swap-sqr2.9%
sqrt-unprod2.9%
add-sqr-sqrt2.9%
*-commutative2.9%
add-sqr-sqrt2.9%
sqrt-unprod2.9%
swap-sqr2.9%
Applied egg-rr20.4%
+-commutative20.4%
fma-undefine20.4%
associate-+l+20.4%
*-commutative20.4%
associate-*l*20.4%
+-commutative20.4%
fma-undefine20.3%
unpow220.3%
associate-+l+20.4%
distribute-rgt-out20.4%
distribute-lft-out20.4%
metadata-eval20.4%
Simplified20.4%
Taylor expanded in b around inf 96.8%
Final simplification96.8%
(FPCore (a b c) :precision binary64 (- (- (/ (* -2.0 (pow a 2.0)) (/ (pow b 5.0) (pow c 3.0))) (/ c b)) (/ a (/ (pow b 3.0) (pow c 2.0)))))
double code(double a, double b, double c) {
return (((-2.0 * pow(a, 2.0)) / (pow(b, 5.0) / pow(c, 3.0))) - (c / b)) - (a / (pow(b, 3.0) / pow(c, 2.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((((-2.0d0) * (a ** 2.0d0)) / ((b ** 5.0d0) / (c ** 3.0d0))) - (c / b)) - (a / ((b ** 3.0d0) / (c ** 2.0d0)))
end function
public static double code(double a, double b, double c) {
return (((-2.0 * Math.pow(a, 2.0)) / (Math.pow(b, 5.0) / Math.pow(c, 3.0))) - (c / b)) - (a / (Math.pow(b, 3.0) / Math.pow(c, 2.0)));
}
def code(a, b, c): return (((-2.0 * math.pow(a, 2.0)) / (math.pow(b, 5.0) / math.pow(c, 3.0))) - (c / b)) - (a / (math.pow(b, 3.0) / math.pow(c, 2.0)))
function code(a, b, c) return Float64(Float64(Float64(Float64(-2.0 * (a ^ 2.0)) / Float64((b ^ 5.0) / (c ^ 3.0))) - Float64(c / b)) - Float64(a / Float64((b ^ 3.0) / (c ^ 2.0)))) end
function tmp = code(a, b, c) tmp = (((-2.0 * (a ^ 2.0)) / ((b ^ 5.0) / (c ^ 3.0))) - (c / b)) - (a / ((b ^ 3.0) / (c ^ 2.0))); end
code[a_, b_, c_] := N[(N[(N[(N[(-2.0 * N[Power[a, 2.0], $MachinePrecision]), $MachinePrecision] / N[(N[Power[b, 5.0], $MachinePrecision] / N[Power[c, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision] - N[(a / N[(N[Power[b, 3.0], $MachinePrecision] / N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(\frac{-2 \cdot {a}^{2}}{\frac{{b}^{5}}{{c}^{3}}} - \frac{c}{b}\right) - \frac{a}{\frac{{b}^{3}}{{c}^{2}}}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 96.1%
associate-+r+96.1%
mul-1-neg96.1%
unsub-neg96.1%
mul-1-neg96.1%
unsub-neg96.1%
associate-/l*96.1%
associate-*r/96.1%
associate-/l*96.1%
Simplified96.1%
Final simplification96.1%
(FPCore (a b c)
:precision binary64
(let* ((t_0 (+ (* (* a c) -8.0) (* 4.0 (* a c)))) (t_1 (* a (* c -4.0))))
(/
(+
(+ (* 0.0625 (/ (pow t_0 3.0) (pow b 5.0))) (* 0.5 (/ t_0 b)))
(* -0.125 (/ (* t_1 t_1) (pow b 3.0))))
(* a 2.0))))
double code(double a, double b, double c) {
double t_0 = ((a * c) * -8.0) + (4.0 * (a * c));
double t_1 = a * (c * -4.0);
return (((0.0625 * (pow(t_0, 3.0) / pow(b, 5.0))) + (0.5 * (t_0 / b))) + (-0.125 * ((t_1 * t_1) / pow(b, 3.0)))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8) :: t_0
real(8) :: t_1
t_0 = ((a * c) * (-8.0d0)) + (4.0d0 * (a * c))
t_1 = a * (c * (-4.0d0))
code = (((0.0625d0 * ((t_0 ** 3.0d0) / (b ** 5.0d0))) + (0.5d0 * (t_0 / b))) + ((-0.125d0) * ((t_1 * t_1) / (b ** 3.0d0)))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
double t_0 = ((a * c) * -8.0) + (4.0 * (a * c));
double t_1 = a * (c * -4.0);
return (((0.0625 * (Math.pow(t_0, 3.0) / Math.pow(b, 5.0))) + (0.5 * (t_0 / b))) + (-0.125 * ((t_1 * t_1) / Math.pow(b, 3.0)))) / (a * 2.0);
}
def code(a, b, c): t_0 = ((a * c) * -8.0) + (4.0 * (a * c)) t_1 = a * (c * -4.0) return (((0.0625 * (math.pow(t_0, 3.0) / math.pow(b, 5.0))) + (0.5 * (t_0 / b))) + (-0.125 * ((t_1 * t_1) / math.pow(b, 3.0)))) / (a * 2.0)
function code(a, b, c) t_0 = Float64(Float64(Float64(a * c) * -8.0) + Float64(4.0 * Float64(a * c))) t_1 = Float64(a * Float64(c * -4.0)) return Float64(Float64(Float64(Float64(0.0625 * Float64((t_0 ^ 3.0) / (b ^ 5.0))) + Float64(0.5 * Float64(t_0 / b))) + Float64(-0.125 * Float64(Float64(t_1 * t_1) / (b ^ 3.0)))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) t_0 = ((a * c) * -8.0) + (4.0 * (a * c)); t_1 = a * (c * -4.0); tmp = (((0.0625 * ((t_0 ^ 3.0) / (b ^ 5.0))) + (0.5 * (t_0 / b))) + (-0.125 * ((t_1 * t_1) / (b ^ 3.0)))) / (a * 2.0); end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(N[(a * c), $MachinePrecision] * -8.0), $MachinePrecision] + N[(4.0 * N[(a * c), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(a * N[(c * -4.0), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[(0.0625 * N[(N[Power[t$95$0, 3.0], $MachinePrecision] / N[Power[b, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(0.5 * N[(t$95$0 / b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[(-0.125 * N[(N[(t$95$1 * t$95$1), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \left(a \cdot c\right) \cdot -8 + 4 \cdot \left(a \cdot c\right)\\
t_1 := a \cdot \left(c \cdot -4\right)\\
\frac{\left(0.0625 \cdot \frac{{t\_0}^{3}}{{b}^{5}} + 0.5 \cdot \frac{t\_0}{b}\right) + -0.125 \cdot \frac{t\_1 \cdot t\_1}{{b}^{3}}}{a \cdot 2}
\end{array}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
*-commutative20.3%
prod-diff20.4%
*-commutative20.4%
distribute-rgt-neg-in20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
associate-*r*20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
*-commutative20.4%
add-sqr-sqrt0.0%
sqrt-unprod2.9%
*-commutative2.9%
*-commutative2.9%
swap-sqr2.9%
metadata-eval2.9%
metadata-eval2.9%
swap-sqr2.9%
sqrt-unprod2.9%
add-sqr-sqrt2.9%
*-commutative2.9%
add-sqr-sqrt2.9%
sqrt-unprod2.9%
swap-sqr2.9%
Applied egg-rr20.4%
+-commutative20.4%
fma-undefine20.4%
associate-+l+20.4%
*-commutative20.4%
associate-*l*20.4%
+-commutative20.4%
fma-undefine20.3%
unpow220.3%
associate-+l+20.4%
distribute-rgt-out20.4%
distribute-lft-out20.4%
metadata-eval20.4%
Simplified20.4%
Taylor expanded in b around inf 95.8%
unpow295.8%
distribute-rgt-out95.8%
metadata-eval95.8%
associate-*r*95.8%
distribute-rgt-out95.8%
metadata-eval95.8%
associate-*r*95.8%
Applied egg-rr95.8%
Final simplification95.8%
(FPCore (a b c) :precision binary64 (let* ((t_0 (/ (- (sqrt (- (* b b) (* c (* a 4.0)))) b) (* a 2.0)))) (if (<= t_0 -5e-20) t_0 (/ (- c) b))))
double code(double a, double b, double c) {
double t_0 = (sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0);
double tmp;
if (t_0 <= -5e-20) {
tmp = t_0;
} else {
tmp = -c / b;
}
return tmp;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8) :: t_0
real(8) :: tmp
t_0 = (sqrt(((b * b) - (c * (a * 4.0d0)))) - b) / (a * 2.0d0)
if (t_0 <= (-5d-20)) then
tmp = t_0
else
tmp = -c / b
end if
code = tmp
end function
public static double code(double a, double b, double c) {
double t_0 = (Math.sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0);
double tmp;
if (t_0 <= -5e-20) {
tmp = t_0;
} else {
tmp = -c / b;
}
return tmp;
}
def code(a, b, c): t_0 = (math.sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0) tmp = 0 if t_0 <= -5e-20: tmp = t_0 else: tmp = -c / b return tmp
function code(a, b, c) t_0 = Float64(Float64(sqrt(Float64(Float64(b * b) - Float64(c * Float64(a * 4.0)))) - b) / Float64(a * 2.0)) tmp = 0.0 if (t_0 <= -5e-20) tmp = t_0; else tmp = Float64(Float64(-c) / b); end return tmp end
function tmp_2 = code(a, b, c) t_0 = (sqrt(((b * b) - (c * (a * 4.0)))) - b) / (a * 2.0); tmp = 0.0; if (t_0 <= -5e-20) tmp = t_0; else tmp = -c / b; end tmp_2 = tmp; end
code[a_, b_, c_] := Block[{t$95$0 = N[(N[(N[Sqrt[N[(N[(b * b), $MachinePrecision] - N[(c * N[(a * 4.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - b), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$0, -5e-20], t$95$0, N[((-c) / b), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{\sqrt{b \cdot b - c \cdot \left(a \cdot 4\right)} - b}{a \cdot 2}\\
\mathbf{if}\;t\_0 \leq -5 \cdot 10^{-20}:\\
\;\;\;\;t\_0\\
\mathbf{else}:\\
\;\;\;\;\frac{-c}{b}\\
\end{array}
\end{array}
if (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 4 a) c)))) (*.f64 2 a)) < -4.9999999999999999e-20Initial program 70.2%
if -4.9999999999999999e-20 < (/.f64 (+.f64 (neg.f64 b) (sqrt.f64 (-.f64 (*.f64 b b) (*.f64 (*.f64 4 a) c)))) (*.f64 2 a)) Initial program 6.1%
*-commutative6.1%
Simplified6.1%
Taylor expanded in b around inf 98.3%
mul-1-neg98.3%
distribute-neg-frac98.3%
Simplified98.3%
Final simplification92.0%
(FPCore (a b c) :precision binary64 (- (/ (- c) b) (/ a (/ (pow b 3.0) (pow c 2.0)))))
double code(double a, double b, double c) {
return (-c / b) - (a / (pow(b, 3.0) / pow(c, 2.0)));
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = (-c / b) - (a / ((b ** 3.0d0) / (c ** 2.0d0)))
end function
public static double code(double a, double b, double c) {
return (-c / b) - (a / (Math.pow(b, 3.0) / Math.pow(c, 2.0)));
}
def code(a, b, c): return (-c / b) - (a / (math.pow(b, 3.0) / math.pow(c, 2.0)))
function code(a, b, c) return Float64(Float64(Float64(-c) / b) - Float64(a / Float64((b ^ 3.0) / (c ^ 2.0)))) end
function tmp = code(a, b, c) tmp = (-c / b) - (a / ((b ^ 3.0) / (c ^ 2.0))); end
code[a_, b_, c_] := N[(N[((-c) / b), $MachinePrecision] - N[(a / N[(N[Power[b, 3.0], $MachinePrecision] / N[Power[c, 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b} - \frac{a}{\frac{{b}^{3}}{{c}^{2}}}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 93.7%
mul-1-neg93.7%
unsub-neg93.7%
mul-1-neg93.7%
distribute-neg-frac93.7%
associate-/l*93.7%
Simplified93.7%
Final simplification93.7%
(FPCore (a b c) :precision binary64 (- (/ (* a (- (pow c 2.0))) (pow b 3.0)) (/ c b)))
double code(double a, double b, double c) {
return ((a * -pow(c, 2.0)) / pow(b, 3.0)) - (c / b);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((a * -(c ** 2.0d0)) / (b ** 3.0d0)) - (c / b)
end function
public static double code(double a, double b, double c) {
return ((a * -Math.pow(c, 2.0)) / Math.pow(b, 3.0)) - (c / b);
}
def code(a, b, c): return ((a * -math.pow(c, 2.0)) / math.pow(b, 3.0)) - (c / b)
function code(a, b, c) return Float64(Float64(Float64(a * Float64(-(c ^ 2.0))) / (b ^ 3.0)) - Float64(c / b)) end
function tmp = code(a, b, c) tmp = ((a * -(c ^ 2.0)) / (b ^ 3.0)) - (c / b); end
code[a_, b_, c_] := N[(N[(N[(a * (-N[Power[c, 2.0], $MachinePrecision])), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision] - N[(c / b), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{a \cdot \left(-{c}^{2}\right)}{{b}^{3}} - \frac{c}{b}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 93.3%
distribute-lft-out93.3%
associate-/l*93.3%
associate-/l*93.3%
Simplified93.3%
Taylor expanded in a around 0 93.7%
mul-1-neg93.7%
unsub-neg93.7%
associate-*r/93.7%
mul-1-neg93.7%
Simplified93.7%
Final simplification93.7%
(FPCore (a b c) :precision binary64 (/ (* -2.0 (+ (* c (/ a b)) (/ (* (* a c) (* a c)) (pow b 3.0)))) (* a 2.0)))
double code(double a, double b, double c) {
return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / pow(b, 3.0)))) / (a * 2.0);
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = ((-2.0d0) * ((c * (a / b)) + (((a * c) * (a * c)) / (b ** 3.0d0)))) / (a * 2.0d0)
end function
public static double code(double a, double b, double c) {
return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / Math.pow(b, 3.0)))) / (a * 2.0);
}
def code(a, b, c): return (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / math.pow(b, 3.0)))) / (a * 2.0)
function code(a, b, c) return Float64(Float64(-2.0 * Float64(Float64(c * Float64(a / b)) + Float64(Float64(Float64(a * c) * Float64(a * c)) / (b ^ 3.0)))) / Float64(a * 2.0)) end
function tmp = code(a, b, c) tmp = (-2.0 * ((c * (a / b)) + (((a * c) * (a * c)) / (b ^ 3.0)))) / (a * 2.0); end
code[a_, b_, c_] := N[(N[(-2.0 * N[(N[(c * N[(a / b), $MachinePrecision]), $MachinePrecision] + N[(N[(N[(a * c), $MachinePrecision] * N[(a * c), $MachinePrecision]), $MachinePrecision] / N[Power[b, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(a * 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-2 \cdot \left(c \cdot \frac{a}{b} + \frac{\left(a \cdot c\right) \cdot \left(a \cdot c\right)}{{b}^{3}}\right)}{a \cdot 2}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 93.3%
distribute-lft-out93.3%
associate-/l*93.3%
associate-/l*93.3%
Simplified93.3%
Taylor expanded in a around 0 93.3%
unpow293.3%
unpow293.3%
swap-sqr93.3%
unpow293.3%
Simplified93.3%
associate-/r/93.3%
Applied egg-rr93.3%
unpow293.3%
Applied egg-rr93.3%
Final simplification93.3%
(FPCore (a b c) :precision binary64 (/ (- c) b))
double code(double a, double b, double c) {
return -c / b;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = -c / b
end function
public static double code(double a, double b, double c) {
return -c / b;
}
def code(a, b, c): return -c / b
function code(a, b, c) return Float64(Float64(-c) / b) end
function tmp = code(a, b, c) tmp = -c / b; end
code[a_, b_, c_] := N[((-c) / b), $MachinePrecision]
\begin{array}{l}
\\
\frac{-c}{b}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
Taylor expanded in b around inf 88.1%
mul-1-neg88.1%
distribute-neg-frac88.1%
Simplified88.1%
Final simplification88.1%
(FPCore (a b c) :precision binary64 (/ 0.0 a))
double code(double a, double b, double c) {
return 0.0 / a;
}
real(8) function code(a, b, c)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
code = 0.0d0 / a
end function
public static double code(double a, double b, double c) {
return 0.0 / a;
}
def code(a, b, c): return 0.0 / a
function code(a, b, c) return Float64(0.0 / a) end
function tmp = code(a, b, c) tmp = 0.0 / a; end
code[a_, b_, c_] := N[(0.0 / a), $MachinePrecision]
\begin{array}{l}
\\
\frac{0}{a}
\end{array}
Initial program 20.3%
*-commutative20.3%
Simplified20.3%
*-commutative20.3%
prod-diff20.4%
*-commutative20.4%
distribute-rgt-neg-in20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
associate-*r*20.4%
distribute-lft-neg-in20.4%
metadata-eval20.4%
*-commutative20.4%
add-sqr-sqrt0.0%
sqrt-unprod2.9%
*-commutative2.9%
*-commutative2.9%
swap-sqr2.9%
metadata-eval2.9%
metadata-eval2.9%
swap-sqr2.9%
sqrt-unprod2.9%
add-sqr-sqrt2.9%
*-commutative2.9%
add-sqr-sqrt2.9%
sqrt-unprod2.9%
swap-sqr2.9%
Applied egg-rr20.4%
+-commutative20.4%
fma-undefine20.4%
associate-+l+20.4%
*-commutative20.4%
associate-*l*20.4%
+-commutative20.4%
fma-undefine20.3%
unpow220.3%
associate-+l+20.4%
distribute-rgt-out20.4%
distribute-lft-out20.4%
metadata-eval20.4%
Simplified20.4%
add-cube-cbrt20.4%
pow320.4%
Applied egg-rr20.4%
Taylor expanded in c around 0 3.3%
pow-base-13.3%
associate-*r*3.3%
metadata-eval3.3%
associate-*r/3.3%
distribute-rgt1-in3.3%
metadata-eval3.3%
mul0-lft3.3%
metadata-eval3.3%
Simplified3.3%
Final simplification3.3%
herbie shell --seed 2024034
(FPCore (a b c)
:name "Quadratic roots, wide range"
:precision binary64
:pre (and (and (and (< 4.930380657631324e-32 a) (< a 2.028240960365167e+31)) (and (< 4.930380657631324e-32 b) (< b 2.028240960365167e+31))) (and (< 4.930380657631324e-32 c) (< c 2.028240960365167e+31)))
(/ (+ (- b) (sqrt (- (* b b) (* (* 4.0 a) c)))) (* 2.0 a)))