
(FPCore (x) :precision binary64 (- (cbrt (+ x 1.0)) (cbrt x)))
double code(double x) {
return cbrt((x + 1.0)) - cbrt(x);
}
public static double code(double x) {
return Math.cbrt((x + 1.0)) - Math.cbrt(x);
}
function code(x) return Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) end
code[x_] := N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{x + 1} - \sqrt[3]{x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (cbrt (+ x 1.0)) (cbrt x)))
double code(double x) {
return cbrt((x + 1.0)) - cbrt(x);
}
public static double code(double x) {
return Math.cbrt((x + 1.0)) - Math.cbrt(x);
}
function code(x) return Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) end
code[x_] := N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{x + 1} - \sqrt[3]{x}
\end{array}
(FPCore (x)
:precision binary64
(/
(-
(/
(/
(* (+ 0.012345679012345678 (/ 0.0038103947568968147 (* x x))) (cbrt x))
(+ -0.1111111111111111 (/ -0.06172839506172839 x)))
x)
(* -0.3333333333333333 (cbrt (/ -1.0 (/ -1.0 x)))))
x))
double code(double x) {
return (((((0.012345679012345678 + (0.0038103947568968147 / (x * x))) * cbrt(x)) / (-0.1111111111111111 + (-0.06172839506172839 / x))) / x) - (-0.3333333333333333 * cbrt((-1.0 / (-1.0 / x))))) / x;
}
public static double code(double x) {
return (((((0.012345679012345678 + (0.0038103947568968147 / (x * x))) * Math.cbrt(x)) / (-0.1111111111111111 + (-0.06172839506172839 / x))) / x) - (-0.3333333333333333 * Math.cbrt((-1.0 / (-1.0 / x))))) / x;
}
function code(x) return Float64(Float64(Float64(Float64(Float64(Float64(0.012345679012345678 + Float64(0.0038103947568968147 / Float64(x * x))) * cbrt(x)) / Float64(-0.1111111111111111 + Float64(-0.06172839506172839 / x))) / x) - Float64(-0.3333333333333333 * cbrt(Float64(-1.0 / Float64(-1.0 / x))))) / x) end
code[x_] := N[(N[(N[(N[(N[(N[(0.012345679012345678 + N[(0.0038103947568968147 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision] / N[(-0.1111111111111111 + N[(-0.06172839506172839 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] - N[(-0.3333333333333333 * N[Power[N[(-1.0 / N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\frac{\left(0.012345679012345678 + \frac{0.0038103947568968147}{x \cdot x}\right) \cdot \sqrt[3]{x}}{-0.1111111111111111 + \frac{-0.06172839506172839}{x}}}{x} - -0.3333333333333333 \cdot \sqrt[3]{\frac{-1}{\frac{-1}{x}}}}{x}
\end{array}
Initial program 7.9%
pow1/3N/A
sqr-powN/A
pow1/3N/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-eval7.9%
Applied egg-rr7.9%
Taylor expanded in x around -inf
Simplified97.9%
Applied egg-rr97.9%
Final simplification97.9%
(FPCore (x)
:precision binary64
(/
(-
(/
(*
(cbrt (/ -1.0 (/ -1.0 x)))
(- -0.1111111111111111 (/ -0.06172839506172839 x)))
x)
(* -0.3333333333333333 (cbrt x)))
x))
double code(double x) {
return (((cbrt((-1.0 / (-1.0 / x))) * (-0.1111111111111111 - (-0.06172839506172839 / x))) / x) - (-0.3333333333333333 * cbrt(x))) / x;
}
public static double code(double x) {
return (((Math.cbrt((-1.0 / (-1.0 / x))) * (-0.1111111111111111 - (-0.06172839506172839 / x))) / x) - (-0.3333333333333333 * Math.cbrt(x))) / x;
}
function code(x) return Float64(Float64(Float64(Float64(cbrt(Float64(-1.0 / Float64(-1.0 / x))) * Float64(-0.1111111111111111 - Float64(-0.06172839506172839 / x))) / x) - Float64(-0.3333333333333333 * cbrt(x))) / x) end
code[x_] := N[(N[(N[(N[(N[Power[N[(-1.0 / N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision] * N[(-0.1111111111111111 - N[(-0.06172839506172839 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] - N[(-0.3333333333333333 * N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{\sqrt[3]{\frac{-1}{\frac{-1}{x}}} \cdot \left(-0.1111111111111111 - \frac{-0.06172839506172839}{x}\right)}{x} - -0.3333333333333333 \cdot \sqrt[3]{x}}{x}
\end{array}
Initial program 7.9%
pow1/3N/A
sqr-powN/A
pow1/3N/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-eval7.9%
Applied egg-rr7.9%
Taylor expanded in x around -inf
Simplified97.9%
*-commutativeN/A
associate-/r/N/A
metadata-evalN/A
*-lft-identityN/A
*-lowering-*.f64N/A
cbrt-lowering-cbrt.f6497.9%
Applied egg-rr97.9%
Final simplification97.9%
(FPCore (x) :precision binary64 (/ (+ (/ -0.1111111111111111 (pow x 0.6666666666666666)) (* (cbrt x) 0.3333333333333333)) x))
double code(double x) {
return ((-0.1111111111111111 / pow(x, 0.6666666666666666)) + (cbrt(x) * 0.3333333333333333)) / x;
}
public static double code(double x) {
return ((-0.1111111111111111 / Math.pow(x, 0.6666666666666666)) + (Math.cbrt(x) * 0.3333333333333333)) / x;
}
function code(x) return Float64(Float64(Float64(-0.1111111111111111 / (x ^ 0.6666666666666666)) + Float64(cbrt(x) * 0.3333333333333333)) / x) end
code[x_] := N[(N[(N[(-0.1111111111111111 / N[Power[x, 0.6666666666666666], $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 1/3], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{-0.1111111111111111}{{x}^{0.6666666666666666}} + \sqrt[3]{x} \cdot 0.3333333333333333}{x}
\end{array}
Initial program 7.9%
pow1/3N/A
sqr-powN/A
pow1/3N/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-eval7.9%
Applied egg-rr7.9%
Taylor expanded in x around inf
/-lowering-/.f64N/A
associate-+r+N/A
+-commutativeN/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
cbrt-lowering-cbrt.f64N/A
distribute-rgt-outN/A
metadata-evalN/A
*-lowering-*.f64N/A
cbrt-lowering-cbrt.f64N/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6497.4%
Simplified97.4%
+-commutativeN/A
+-lowering-+.f64N/A
pow1/3N/A
inv-powN/A
pow-powN/A
pow2N/A
metadata-evalN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
pow-flipN/A
associate-*l/N/A
metadata-evalN/A
/-lowering-/.f64N/A
pow-lowering-pow.f64N/A
*-lowering-*.f64N/A
cbrt-lowering-cbrt.f6497.4%
Applied egg-rr97.4%
Final simplification97.4%
(FPCore (x) :precision binary64 (/ (* (cbrt (/ -1.0 (/ -1.0 x))) (- (/ -0.1111111111111111 x) -0.3333333333333333)) x))
double code(double x) {
return (cbrt((-1.0 / (-1.0 / x))) * ((-0.1111111111111111 / x) - -0.3333333333333333)) / x;
}
public static double code(double x) {
return (Math.cbrt((-1.0 / (-1.0 / x))) * ((-0.1111111111111111 / x) - -0.3333333333333333)) / x;
}
function code(x) return Float64(Float64(cbrt(Float64(-1.0 / Float64(-1.0 / x))) * Float64(Float64(-0.1111111111111111 / x) - -0.3333333333333333)) / x) end
code[x_] := N[(N[(N[Power[N[(-1.0 / N[(-1.0 / x), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision] * N[(N[(-0.1111111111111111 / x), $MachinePrecision] - -0.3333333333333333), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\sqrt[3]{\frac{-1}{\frac{-1}{x}}} \cdot \left(\frac{-0.1111111111111111}{x} - -0.3333333333333333\right)}{x}
\end{array}
Initial program 7.9%
pow1/3N/A
sqr-powN/A
pow1/3N/A
sqr-powN/A
difference-of-squaresN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
--lowering--.f64N/A
pow-lowering-pow.f64N/A
+-lowering-+.f64N/A
metadata-evalN/A
pow-lowering-pow.f64N/A
metadata-eval7.9%
Applied egg-rr7.9%
Taylor expanded in x around -inf
mul-1-negN/A
distribute-neg-frac2N/A
mul-1-negN/A
/-lowering-/.f64N/A
Simplified97.4%
Final simplification97.4%
(FPCore (x) :precision binary64 (* 0.3333333333333333 (/ (cbrt x) x)))
double code(double x) {
return 0.3333333333333333 * (cbrt(x) / x);
}
public static double code(double x) {
return 0.3333333333333333 * (Math.cbrt(x) / x);
}
function code(x) return Float64(0.3333333333333333 * Float64(cbrt(x) / x)) end
code[x_] := N[(0.3333333333333333 * N[(N[Power[x, 1/3], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.3333333333333333 \cdot \frac{\sqrt[3]{x}}{x}
\end{array}
Initial program 7.9%
Taylor expanded in x around inf
*-lowering-*.f64N/A
metadata-evalN/A
associate-*r/N/A
cbrt-lowering-cbrt.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6444.9%
Simplified44.9%
*-commutativeN/A
*-lowering-*.f64N/A
pow1/3N/A
inv-powN/A
pow-powN/A
pow2N/A
pow-powN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
metadata-eval87.9%
Applied egg-rr87.9%
rem-cube-cbrtN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
pow-lowering-pow.f64N/A
cbrt-lowering-cbrt.f64N/A
metadata-eval95.7%
Applied egg-rr95.7%
pow1/3N/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
pow-divN/A
pow1/3N/A
unpow1N/A
/-lowering-/.f64N/A
cbrt-lowering-cbrt.f6496.4%
Applied egg-rr96.4%
Final simplification96.4%
(FPCore (x) :precision binary64 (* 0.3333333333333333 (pow x -0.6666666666666666)))
double code(double x) {
return 0.3333333333333333 * pow(x, -0.6666666666666666);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.3333333333333333d0 * (x ** (-0.6666666666666666d0))
end function
public static double code(double x) {
return 0.3333333333333333 * Math.pow(x, -0.6666666666666666);
}
def code(x): return 0.3333333333333333 * math.pow(x, -0.6666666666666666)
function code(x) return Float64(0.3333333333333333 * (x ^ -0.6666666666666666)) end
function tmp = code(x) tmp = 0.3333333333333333 * (x ^ -0.6666666666666666); end
code[x_] := N[(0.3333333333333333 * N[Power[x, -0.6666666666666666], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.3333333333333333 \cdot {x}^{-0.6666666666666666}
\end{array}
Initial program 7.9%
Taylor expanded in x around inf
*-lowering-*.f64N/A
metadata-evalN/A
associate-*r/N/A
cbrt-lowering-cbrt.f64N/A
associate-*r/N/A
metadata-evalN/A
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6444.9%
Simplified44.9%
*-commutativeN/A
*-lowering-*.f64N/A
pow1/3N/A
inv-powN/A
pow-powN/A
pow2N/A
pow-powN/A
pow-lowering-pow.f64N/A
metadata-evalN/A
metadata-eval87.9%
Applied egg-rr87.9%
Final simplification87.9%
(FPCore (x) :precision binary64 (- 1.0 (cbrt x)))
double code(double x) {
return 1.0 - cbrt(x);
}
public static double code(double x) {
return 1.0 - Math.cbrt(x);
}
function code(x) return Float64(1.0 - cbrt(x)) end
code[x_] := N[(1.0 - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt[3]{x}
\end{array}
Initial program 7.9%
Taylor expanded in x around 0
--lowering--.f64N/A
cbrt-lowering-cbrt.f641.8%
Simplified1.8%
(FPCore (x) :precision binary64 (let* ((t_0 (cbrt (+ x 1.0)))) (/ 1.0 (+ (+ (* t_0 t_0) (* (cbrt x) t_0)) (* (cbrt x) (cbrt x))))))
double code(double x) {
double t_0 = cbrt((x + 1.0));
return 1.0 / (((t_0 * t_0) + (cbrt(x) * t_0)) + (cbrt(x) * cbrt(x)));
}
public static double code(double x) {
double t_0 = Math.cbrt((x + 1.0));
return 1.0 / (((t_0 * t_0) + (Math.cbrt(x) * t_0)) + (Math.cbrt(x) * Math.cbrt(x)));
}
function code(x) t_0 = cbrt(Float64(x + 1.0)) return Float64(1.0 / Float64(Float64(Float64(t_0 * t_0) + Float64(cbrt(x) * t_0)) + Float64(cbrt(x) * cbrt(x)))) end
code[x_] := Block[{t$95$0 = N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision]}, N[(1.0 / N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] + N[(N[Power[x, 1/3], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 1/3], $MachinePrecision] * N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt[3]{x + 1}\\
\frac{1}{\left(t\_0 \cdot t\_0 + \sqrt[3]{x} \cdot t\_0\right) + \sqrt[3]{x} \cdot \sqrt[3]{x}}
\end{array}
\end{array}
herbie shell --seed 2024191
(FPCore (x)
:name "2cbrt (problem 3.3.4)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(! :herbie-platform default (/ 1 (+ (* (cbrt (+ x 1)) (cbrt (+ x 1))) (* (cbrt x) (cbrt (+ x 1))) (* (cbrt x) (cbrt x)))))
(- (cbrt (+ x 1.0)) (cbrt x)))