
(FPCore (g a) :precision binary64 (cbrt (/ g (* 2.0 a))))
double code(double g, double a) {
return cbrt((g / (2.0 * a)));
}
public static double code(double g, double a) {
return Math.cbrt((g / (2.0 * a)));
}
function code(g, a) return cbrt(Float64(g / Float64(2.0 * a))) end
code[g_, a_] := N[Power[N[(g / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{\frac{g}{2 \cdot a}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (g a) :precision binary64 (cbrt (/ g (* 2.0 a))))
double code(double g, double a) {
return cbrt((g / (2.0 * a)));
}
public static double code(double g, double a) {
return Math.cbrt((g / (2.0 * a)));
}
function code(g, a) return cbrt(Float64(g / Float64(2.0 * a))) end
code[g_, a_] := N[Power[N[(g / N[(2.0 * a), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{\frac{g}{2 \cdot a}}
\end{array}
(FPCore (g a) :precision binary64 (/ (cbrt g) (cbrt (* a 2.0))))
double code(double g, double a) {
return cbrt(g) / cbrt((a * 2.0));
}
public static double code(double g, double a) {
return Math.cbrt(g) / Math.cbrt((a * 2.0));
}
function code(g, a) return Float64(cbrt(g) / cbrt(Float64(a * 2.0))) end
code[g_, a_] := N[(N[Power[g, 1/3], $MachinePrecision] / N[Power[N[(a * 2.0), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sqrt[3]{g}}{\sqrt[3]{a \cdot 2}}
\end{array}
Initial program 74.9%
lift-cbrt.f64N/A
lift-*.f64N/A
lift-/.f64N/A
cbrt-divN/A
lower-/.f64N/A
lower-cbrt.f64N/A
lower-cbrt.f64N/A
*-commutativeN/A
lower-*.f6498.7
Applied rewrites98.7%
(FPCore (g a) :precision binary64 (cbrt (/ g (* a (* a (* (/ a a) (/ 2.0 a)))))))
double code(double g, double a) {
return cbrt((g / (a * (a * ((a / a) * (2.0 / a))))));
}
public static double code(double g, double a) {
return Math.cbrt((g / (a * (a * ((a / a) * (2.0 / a))))));
}
function code(g, a) return cbrt(Float64(g / Float64(a * Float64(a * Float64(Float64(a / a) * Float64(2.0 / a)))))) end
code[g_, a_] := N[Power[N[(g / N[(a * N[(a * N[(N[(a / a), $MachinePrecision] * N[(2.0 / a), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{\frac{g}{a \cdot \left(a \cdot \left(\frac{a}{a} \cdot \frac{2}{a}\right)\right)}}
\end{array}
Initial program 74.9%
lift-*.f64N/A
count-2-revN/A
flip3-+N/A
lower-/.f64N/A
lower-+.f64N/A
lower-pow.f64N/A
lower-pow.f64N/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower-*.f6427.1
Applied rewrites27.1%
lift-+.f64N/A
lift-pow.f64N/A
lift-pow.f64N/A
sum-cubesN/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-fma.f64N/A
lower-*.f64N/A
lift--.f64N/A
+-inversesN/A
count-2-revN/A
lower-*.f6427.1
Applied rewrites27.1%
lift-/.f64N/A
lift-*.f64N/A
lift-fma.f64N/A
lift-*.f64N/A
+-rgt-identityN/A
lift-fma.f64N/A
lift--.f64N/A
lift-*.f64N/A
lift-*.f64N/A
associate-/l*N/A
lower-*.f64N/A
lift-*.f64N/A
+-inversesN/A
+-rgt-identityN/A
lower-/.f64N/A
*-commutativeN/A
lower-*.f64N/A
lift-*.f6440.8
Applied rewrites40.8%
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
lower-*.f64N/A
lower-*.f6441.9
lift-*.f64N/A
lift-/.f64N/A
lift-*.f64N/A
times-fracN/A
lower-*.f64N/A
lower-/.f64N/A
lower-/.f6474.9
Applied rewrites74.9%
(FPCore (g a) :precision binary64 (cbrt (/ g (+ a a))))
double code(double g, double a) {
return cbrt((g / (a + a)));
}
public static double code(double g, double a) {
return Math.cbrt((g / (a + a)));
}
function code(g, a) return cbrt(Float64(g / Float64(a + a))) end
code[g_, a_] := N[Power[N[(g / N[(a + a), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{\frac{g}{a + a}}
\end{array}
Initial program 74.9%
lift-*.f64N/A
count-2-revN/A
lower-+.f6474.9
Applied rewrites74.9%
herbie shell --seed 2025057
(FPCore (g a)
:name "2-ancestry mixing, zero discriminant"
:precision binary64
(cbrt (/ g (* 2.0 a))))