
(FPCore (x) :precision binary64 (- (cbrt (+ x 1.0)) (cbrt x)))
double code(double x) {
return cbrt((x + 1.0)) - cbrt(x);
}
public static double code(double x) {
return Math.cbrt((x + 1.0)) - Math.cbrt(x);
}
function code(x) return Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) end
code[x_] := N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{x + 1} - \sqrt[3]{x}
\end{array}
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (cbrt (+ x 1.0)) (cbrt x)))
double code(double x) {
return cbrt((x + 1.0)) - cbrt(x);
}
public static double code(double x) {
return Math.cbrt((x + 1.0)) - Math.cbrt(x);
}
function code(x) return Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) end
code[x_] := N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\sqrt[3]{x + 1} - \sqrt[3]{x}
\end{array}
(FPCore (x)
:precision binary64
(let* ((t_0 (cbrt (+ (/ 1.0 x) (/ 1.0 (* x x)))))
(t_1 (+ (/ 2.0 (* x x)) (/ 1.0 x))))
(/
(fma
(*
(pow t_1 -0.6666666666666666)
(*
(/ 1.0 (* (* x x) x))
(pow (+ (+ (cbrt t_1) t_0) (pow x -0.3333333333333333)) -2.0)))
-0.3333333333333333
(/
1.0
(+ (+ (/ (cbrt (+ (/ 2.0 x) 1.0)) (cbrt x)) t_0) (/ 1.0 (cbrt x)))))
x)))
double code(double x) {
double t_0 = cbrt(((1.0 / x) + (1.0 / (x * x))));
double t_1 = (2.0 / (x * x)) + (1.0 / x);
return fma((pow(t_1, -0.6666666666666666) * ((1.0 / ((x * x) * x)) * pow(((cbrt(t_1) + t_0) + pow(x, -0.3333333333333333)), -2.0))), -0.3333333333333333, (1.0 / (((cbrt(((2.0 / x) + 1.0)) / cbrt(x)) + t_0) + (1.0 / cbrt(x))))) / x;
}
function code(x) t_0 = cbrt(Float64(Float64(1.0 / x) + Float64(1.0 / Float64(x * x)))) t_1 = Float64(Float64(2.0 / Float64(x * x)) + Float64(1.0 / x)) return Float64(fma(Float64((t_1 ^ -0.6666666666666666) * Float64(Float64(1.0 / Float64(Float64(x * x) * x)) * (Float64(Float64(cbrt(t_1) + t_0) + (x ^ -0.3333333333333333)) ^ -2.0))), -0.3333333333333333, Float64(1.0 / Float64(Float64(Float64(cbrt(Float64(Float64(2.0 / x) + 1.0)) / cbrt(x)) + t_0) + Float64(1.0 / cbrt(x))))) / x) end
code[x_] := Block[{t$95$0 = N[Power[N[(N[(1.0 / x), $MachinePrecision] + N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]}, Block[{t$95$1 = N[(N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Power[t$95$1, -0.6666666666666666], $MachinePrecision] * N[(N[(1.0 / N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * N[Power[N[(N[(N[Power[t$95$1, 1/3], $MachinePrecision] + t$95$0), $MachinePrecision] + N[Power[x, -0.3333333333333333], $MachinePrecision]), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.3333333333333333 + N[(1.0 / N[(N[(N[(N[Power[N[(N[(2.0 / x), $MachinePrecision] + 1.0), $MachinePrecision], 1/3], $MachinePrecision] / N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision] + t$95$0), $MachinePrecision] + N[(1.0 / N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt[3]{\frac{1}{x} + \frac{1}{x \cdot x}}\\
t_1 := \frac{2}{x \cdot x} + \frac{1}{x}\\
\frac{\mathsf{fma}\left({t\_1}^{-0.6666666666666666} \cdot \left(\frac{1}{\left(x \cdot x\right) \cdot x} \cdot {\left(\left(\sqrt[3]{t\_1} + t\_0\right) + {x}^{-0.3333333333333333}\right)}^{-2}\right), -0.3333333333333333, \frac{1}{\left(\frac{\sqrt[3]{\frac{2}{x} + 1}}{\sqrt[3]{x}} + t\_0\right) + \frac{1}{\sqrt[3]{x}}}\right)}{x}
\end{array}
\end{array}
Initial program 6.8%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites8.8%
Taylor expanded in x around inf
Applied rewrites94.1%
lift-pow.f64N/A
metadata-evalN/A
pow-powN/A
inv-powN/A
pow1/3N/A
cbrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-cbrt.f6498.6
Applied rewrites98.6%
lift-cbrt.f64N/A
lift-+.f64N/A
lift-/.f64N/A
lift-*.f64N/A
lift-/.f64N/A
associate-/r*N/A
div-addN/A
cbrt-divN/A
lower-/.f64N/A
lower-cbrt.f64N/A
lift-/.f64N/A
lift-+.f64N/A
lift-cbrt.f6498.6
Applied rewrites98.6%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (/ 2.0 (* x x)) (/ 1.0 x)))
(t_1 (+ (cbrt t_0) (cbrt (+ (/ 1.0 x) (/ 1.0 (* x x)))))))
(/
(fma
(*
(pow t_0 -0.6666666666666666)
(* (/ 1.0 (* (* x x) x)) (pow (+ t_1 (pow x -0.3333333333333333)) -2.0)))
-0.3333333333333333
(/ 1.0 (+ t_1 (/ 1.0 (cbrt x)))))
x)))
double code(double x) {
double t_0 = (2.0 / (x * x)) + (1.0 / x);
double t_1 = cbrt(t_0) + cbrt(((1.0 / x) + (1.0 / (x * x))));
return fma((pow(t_0, -0.6666666666666666) * ((1.0 / ((x * x) * x)) * pow((t_1 + pow(x, -0.3333333333333333)), -2.0))), -0.3333333333333333, (1.0 / (t_1 + (1.0 / cbrt(x))))) / x;
}
function code(x) t_0 = Float64(Float64(2.0 / Float64(x * x)) + Float64(1.0 / x)) t_1 = Float64(cbrt(t_0) + cbrt(Float64(Float64(1.0 / x) + Float64(1.0 / Float64(x * x))))) return Float64(fma(Float64((t_0 ^ -0.6666666666666666) * Float64(Float64(1.0 / Float64(Float64(x * x) * x)) * (Float64(t_1 + (x ^ -0.3333333333333333)) ^ -2.0))), -0.3333333333333333, Float64(1.0 / Float64(t_1 + Float64(1.0 / cbrt(x))))) / x) end
code[x_] := Block[{t$95$0 = N[(N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Power[t$95$0, 1/3], $MachinePrecision] + N[Power[N[(N[(1.0 / x), $MachinePrecision] + N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Power[t$95$0, -0.6666666666666666], $MachinePrecision] * N[(N[(1.0 / N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * N[Power[N[(t$95$1 + N[Power[x, -0.3333333333333333], $MachinePrecision]), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.3333333333333333 + N[(1.0 / N[(t$95$1 + N[(1.0 / N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{x \cdot x} + \frac{1}{x}\\
t_1 := \sqrt[3]{t\_0} + \sqrt[3]{\frac{1}{x} + \frac{1}{x \cdot x}}\\
\frac{\mathsf{fma}\left({t\_0}^{-0.6666666666666666} \cdot \left(\frac{1}{\left(x \cdot x\right) \cdot x} \cdot {\left(t\_1 + {x}^{-0.3333333333333333}\right)}^{-2}\right), -0.3333333333333333, \frac{1}{t\_1 + \frac{1}{\sqrt[3]{x}}}\right)}{x}
\end{array}
\end{array}
Initial program 6.8%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites8.8%
Taylor expanded in x around inf
Applied rewrites94.1%
lift-pow.f64N/A
metadata-evalN/A
pow-powN/A
inv-powN/A
pow1/3N/A
cbrt-divN/A
metadata-evalN/A
lower-/.f64N/A
lower-cbrt.f6498.6
Applied rewrites98.6%
(FPCore (x)
:precision binary64
(if (<= x 1e+15)
(/
(- (- x -1.0) x)
(+
(pow (- x -1.0) 0.6666666666666666)
(*
(+ (pow x -0.3333333333333333) (cbrt (+ (/ 1.0 x) (/ 1.0 (* x x)))))
x)))
(* (pow (cbrt x) -2.0) 0.3333333333333333)))
double code(double x) {
double tmp;
if (x <= 1e+15) {
tmp = ((x - -1.0) - x) / (pow((x - -1.0), 0.6666666666666666) + ((pow(x, -0.3333333333333333) + cbrt(((1.0 / x) + (1.0 / (x * x))))) * x));
} else {
tmp = pow(cbrt(x), -2.0) * 0.3333333333333333;
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1e+15) {
tmp = ((x - -1.0) - x) / (Math.pow((x - -1.0), 0.6666666666666666) + ((Math.pow(x, -0.3333333333333333) + Math.cbrt(((1.0 / x) + (1.0 / (x * x))))) * x));
} else {
tmp = Math.pow(Math.cbrt(x), -2.0) * 0.3333333333333333;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1e+15) tmp = Float64(Float64(Float64(x - -1.0) - x) / Float64((Float64(x - -1.0) ^ 0.6666666666666666) + Float64(Float64((x ^ -0.3333333333333333) + cbrt(Float64(Float64(1.0 / x) + Float64(1.0 / Float64(x * x))))) * x))); else tmp = Float64((cbrt(x) ^ -2.0) * 0.3333333333333333); end return tmp end
code[x_] := If[LessEqual[x, 1e+15], N[(N[(N[(x - -1.0), $MachinePrecision] - x), $MachinePrecision] / N[(N[Power[N[(x - -1.0), $MachinePrecision], 0.6666666666666666], $MachinePrecision] + N[(N[(N[Power[x, -0.3333333333333333], $MachinePrecision] + N[Power[N[(N[(1.0 / x), $MachinePrecision] + N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Power[N[Power[x, 1/3], $MachinePrecision], -2.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 10^{+15}:\\
\;\;\;\;\frac{\left(x - -1\right) - x}{{\left(x - -1\right)}^{0.6666666666666666} + \left({x}^{-0.3333333333333333} + \sqrt[3]{\frac{1}{x} + \frac{1}{x \cdot x}}\right) \cdot x}\\
\mathbf{else}:\\
\;\;\;\;{\left(\sqrt[3]{x}\right)}^{-2} \cdot 0.3333333333333333\\
\end{array}
\end{array}
if x < 1e15Initial program 59.9%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites97.4%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
pow1/3N/A
inv-powN/A
pow-powN/A
metadata-evalN/A
lower-pow.f64N/A
lower-cbrt.f64N/A
lower-+.f64N/A
lower-/.f64N/A
lower-/.f64N/A
pow2N/A
lift-*.f6498.7
Applied rewrites98.7%
if 1e15 < x Initial program 4.2%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval90.3
Applied rewrites90.3%
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow2N/A
lower-pow.f64N/A
pow2N/A
lift-*.f6445.8
Applied rewrites45.8%
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
pow-prod-upN/A
metadata-evalN/A
lower-pow.f64N/A
lift-cbrt.f6498.4
Applied rewrites98.4%
(FPCore (x)
:precision binary64
(if (<= (- (cbrt (+ x 1.0)) (cbrt x)) 2e-11)
(* (pow (cbrt x) -2.0) 0.3333333333333333)
(/
(- (- x -1.0) x)
(+
(pow (- x -1.0) 0.6666666666666666)
(+ (cbrt (* x x)) (cbrt (* (- x -1.0) x)))))))
double code(double x) {
double tmp;
if ((cbrt((x + 1.0)) - cbrt(x)) <= 2e-11) {
tmp = pow(cbrt(x), -2.0) * 0.3333333333333333;
} else {
tmp = ((x - -1.0) - x) / (pow((x - -1.0), 0.6666666666666666) + (cbrt((x * x)) + cbrt(((x - -1.0) * x))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if ((Math.cbrt((x + 1.0)) - Math.cbrt(x)) <= 2e-11) {
tmp = Math.pow(Math.cbrt(x), -2.0) * 0.3333333333333333;
} else {
tmp = ((x - -1.0) - x) / (Math.pow((x - -1.0), 0.6666666666666666) + (Math.cbrt((x * x)) + Math.cbrt(((x - -1.0) * x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) <= 2e-11) tmp = Float64((cbrt(x) ^ -2.0) * 0.3333333333333333); else tmp = Float64(Float64(Float64(x - -1.0) - x) / Float64((Float64(x - -1.0) ^ 0.6666666666666666) + Float64(cbrt(Float64(x * x)) + cbrt(Float64(Float64(x - -1.0) * x))))); end return tmp end
code[x_] := If[LessEqual[N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision], 2e-11], N[(N[Power[N[Power[x, 1/3], $MachinePrecision], -2.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(N[(N[(x - -1.0), $MachinePrecision] - x), $MachinePrecision] / N[(N[Power[N[(x - -1.0), $MachinePrecision], 0.6666666666666666], $MachinePrecision] + N[(N[Power[N[(x * x), $MachinePrecision], 1/3], $MachinePrecision] + N[Power[N[(N[(x - -1.0), $MachinePrecision] * x), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\sqrt[3]{x + 1} - \sqrt[3]{x} \leq 2 \cdot 10^{-11}:\\
\;\;\;\;{\left(\sqrt[3]{x}\right)}^{-2} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;\frac{\left(x - -1\right) - x}{{\left(x - -1\right)}^{0.6666666666666666} + \left(\sqrt[3]{x \cdot x} + \sqrt[3]{\left(x - -1\right) \cdot x}\right)}\\
\end{array}
\end{array}
if (-.f64 (cbrt.f64 (+.f64 x #s(literal 1 binary64))) (cbrt.f64 x)) < 1.99999999999999988e-11Initial program 4.2%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval90.3
Applied rewrites90.3%
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow2N/A
lower-pow.f64N/A
pow2N/A
lift-*.f6445.8
Applied rewrites45.8%
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
pow-prod-upN/A
metadata-evalN/A
lower-pow.f64N/A
lift-cbrt.f6498.4
Applied rewrites98.4%
if 1.99999999999999988e-11 < (-.f64 (cbrt.f64 (+.f64 x #s(literal 1 binary64))) (cbrt.f64 x)) Initial program 59.0%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites97.3%
lift-pow.f64N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
lower-cbrt.f64N/A
pow2N/A
lift-*.f6498.3
Applied rewrites98.3%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ (/ 2.0 (* x x)) (/ 1.0 x)))
(t_1 (+ (cbrt t_0) (cbrt (+ (/ 1.0 x) (/ 1.0 (* x x)))))))
(/
(fma
(*
(pow t_0 -0.6666666666666666)
(* (/ 1.0 (* (* x x) x)) (pow (+ t_1 (pow x -0.3333333333333333)) -2.0)))
-0.3333333333333333
(/ 1.0 (+ t_1 (cbrt (/ 1.0 x)))))
x)))
double code(double x) {
double t_0 = (2.0 / (x * x)) + (1.0 / x);
double t_1 = cbrt(t_0) + cbrt(((1.0 / x) + (1.0 / (x * x))));
return fma((pow(t_0, -0.6666666666666666) * ((1.0 / ((x * x) * x)) * pow((t_1 + pow(x, -0.3333333333333333)), -2.0))), -0.3333333333333333, (1.0 / (t_1 + cbrt((1.0 / x))))) / x;
}
function code(x) t_0 = Float64(Float64(2.0 / Float64(x * x)) + Float64(1.0 / x)) t_1 = Float64(cbrt(t_0) + cbrt(Float64(Float64(1.0 / x) + Float64(1.0 / Float64(x * x))))) return Float64(fma(Float64((t_0 ^ -0.6666666666666666) * Float64(Float64(1.0 / Float64(Float64(x * x) * x)) * (Float64(t_1 + (x ^ -0.3333333333333333)) ^ -2.0))), -0.3333333333333333, Float64(1.0 / Float64(t_1 + cbrt(Float64(1.0 / x))))) / x) end
code[x_] := Block[{t$95$0 = N[(N[(2.0 / N[(x * x), $MachinePrecision]), $MachinePrecision] + N[(1.0 / x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Power[t$95$0, 1/3], $MachinePrecision] + N[Power[N[(N[(1.0 / x), $MachinePrecision] + N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]}, N[(N[(N[(N[Power[t$95$0, -0.6666666666666666], $MachinePrecision] * N[(N[(1.0 / N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * N[Power[N[(t$95$1 + N[Power[x, -0.3333333333333333], $MachinePrecision]), $MachinePrecision], -2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * -0.3333333333333333 + N[(1.0 / N[(t$95$1 + N[Power[N[(1.0 / x), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{2}{x \cdot x} + \frac{1}{x}\\
t_1 := \sqrt[3]{t\_0} + \sqrt[3]{\frac{1}{x} + \frac{1}{x \cdot x}}\\
\frac{\mathsf{fma}\left({t\_0}^{-0.6666666666666666} \cdot \left(\frac{1}{\left(x \cdot x\right) \cdot x} \cdot {\left(t\_1 + {x}^{-0.3333333333333333}\right)}^{-2}\right), -0.3333333333333333, \frac{1}{t\_1 + \sqrt[3]{\frac{1}{x}}}\right)}{x}
\end{array}
\end{array}
Initial program 6.8%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites8.8%
Taylor expanded in x around inf
Applied rewrites94.1%
lift-pow.f64N/A
metadata-evalN/A
pow-powN/A
inv-powN/A
pow1/3N/A
lower-cbrt.f64N/A
lift-/.f6498.5
Applied rewrites98.5%
(FPCore (x)
:precision binary64
(if (<= (- (cbrt (+ x 1.0)) (cbrt x)) 2e-11)
(* (pow (cbrt x) -2.0) 0.3333333333333333)
(/
1.0
(+
(exp (* (log (- x -1.0)) 0.6666666666666666))
(+ (pow x 0.6666666666666666) (cbrt (* (- x -1.0) x)))))))
double code(double x) {
double tmp;
if ((cbrt((x + 1.0)) - cbrt(x)) <= 2e-11) {
tmp = pow(cbrt(x), -2.0) * 0.3333333333333333;
} else {
tmp = 1.0 / (exp((log((x - -1.0)) * 0.6666666666666666)) + (pow(x, 0.6666666666666666) + cbrt(((x - -1.0) * x))));
}
return tmp;
}
public static double code(double x) {
double tmp;
if ((Math.cbrt((x + 1.0)) - Math.cbrt(x)) <= 2e-11) {
tmp = Math.pow(Math.cbrt(x), -2.0) * 0.3333333333333333;
} else {
tmp = 1.0 / (Math.exp((Math.log((x - -1.0)) * 0.6666666666666666)) + (Math.pow(x, 0.6666666666666666) + Math.cbrt(((x - -1.0) * x))));
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(cbrt(Float64(x + 1.0)) - cbrt(x)) <= 2e-11) tmp = Float64((cbrt(x) ^ -2.0) * 0.3333333333333333); else tmp = Float64(1.0 / Float64(exp(Float64(log(Float64(x - -1.0)) * 0.6666666666666666)) + Float64((x ^ 0.6666666666666666) + cbrt(Float64(Float64(x - -1.0) * x))))); end return tmp end
code[x_] := If[LessEqual[N[(N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision] - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision], 2e-11], N[(N[Power[N[Power[x, 1/3], $MachinePrecision], -2.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(1.0 / N[(N[Exp[N[(N[Log[N[(x - -1.0), $MachinePrecision]], $MachinePrecision] * 0.6666666666666666), $MachinePrecision]], $MachinePrecision] + N[(N[Power[x, 0.6666666666666666], $MachinePrecision] + N[Power[N[(N[(x - -1.0), $MachinePrecision] * x), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;\sqrt[3]{x + 1} - \sqrt[3]{x} \leq 2 \cdot 10^{-11}:\\
\;\;\;\;{\left(\sqrt[3]{x}\right)}^{-2} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;\frac{1}{e^{\log \left(x - -1\right) \cdot 0.6666666666666666} + \left({x}^{0.6666666666666666} + \sqrt[3]{\left(x - -1\right) \cdot x}\right)}\\
\end{array}
\end{array}
if (-.f64 (cbrt.f64 (+.f64 x #s(literal 1 binary64))) (cbrt.f64 x)) < 1.99999999999999988e-11Initial program 4.2%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval90.3
Applied rewrites90.3%
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow2N/A
lower-pow.f64N/A
pow2N/A
lift-*.f6445.8
Applied rewrites45.8%
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
pow-prod-upN/A
metadata-evalN/A
lower-pow.f64N/A
lift-cbrt.f6498.4
Applied rewrites98.4%
if 1.99999999999999988e-11 < (-.f64 (cbrt.f64 (+.f64 x #s(literal 1 binary64))) (cbrt.f64 x)) Initial program 59.0%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites97.3%
lift--.f64N/A
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f64N/A
lift--.f6497.5
Applied rewrites97.5%
Taylor expanded in x around 0
Applied rewrites97.5%
(FPCore (x)
:precision binary64
(if (<= x 3.9e+14)
(/
1.0
(+
(pow (- x -1.0) 0.6666666666666666)
(+ (pow x 0.6666666666666666) (cbrt (* (- x -1.0) x)))))
(* (pow (cbrt x) -2.0) 0.3333333333333333)))
double code(double x) {
double tmp;
if (x <= 3.9e+14) {
tmp = 1.0 / (pow((x - -1.0), 0.6666666666666666) + (pow(x, 0.6666666666666666) + cbrt(((x - -1.0) * x))));
} else {
tmp = pow(cbrt(x), -2.0) * 0.3333333333333333;
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 3.9e+14) {
tmp = 1.0 / (Math.pow((x - -1.0), 0.6666666666666666) + (Math.pow(x, 0.6666666666666666) + Math.cbrt(((x - -1.0) * x))));
} else {
tmp = Math.pow(Math.cbrt(x), -2.0) * 0.3333333333333333;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 3.9e+14) tmp = Float64(1.0 / Float64((Float64(x - -1.0) ^ 0.6666666666666666) + Float64((x ^ 0.6666666666666666) + cbrt(Float64(Float64(x - -1.0) * x))))); else tmp = Float64((cbrt(x) ^ -2.0) * 0.3333333333333333); end return tmp end
code[x_] := If[LessEqual[x, 3.9e+14], N[(1.0 / N[(N[Power[N[(x - -1.0), $MachinePrecision], 0.6666666666666666], $MachinePrecision] + N[(N[Power[x, 0.6666666666666666], $MachinePrecision] + N[Power[N[(N[(x - -1.0), $MachinePrecision] * x), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[Power[N[Power[x, 1/3], $MachinePrecision], -2.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 3.9 \cdot 10^{+14}:\\
\;\;\;\;\frac{1}{{\left(x - -1\right)}^{0.6666666666666666} + \left({x}^{0.6666666666666666} + \sqrt[3]{\left(x - -1\right) \cdot x}\right)}\\
\mathbf{else}:\\
\;\;\;\;{\left(\sqrt[3]{x}\right)}^{-2} \cdot 0.3333333333333333\\
\end{array}
\end{array}
if x < 3.9e14Initial program 60.7%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites97.4%
Taylor expanded in x around 0
Applied rewrites97.4%
if 3.9e14 < x Initial program 4.2%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval90.3
Applied rewrites90.3%
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow2N/A
lower-pow.f64N/A
pow2N/A
lift-*.f6445.9
Applied rewrites45.9%
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
pow-prod-upN/A
metadata-evalN/A
lower-pow.f64N/A
lift-cbrt.f6498.4
Applied rewrites98.4%
(FPCore (x)
:precision binary64
(/
(/
1.0
(+
(+ (cbrt (/ (+ (/ 1.0 x) 1.0) x)) (/ 1.0 (cbrt x)))
(cbrt (/ (+ (/ 2.0 x) 1.0) x))))
x))
double code(double x) {
return (1.0 / ((cbrt((((1.0 / x) + 1.0) / x)) + (1.0 / cbrt(x))) + cbrt((((2.0 / x) + 1.0) / x)))) / x;
}
public static double code(double x) {
return (1.0 / ((Math.cbrt((((1.0 / x) + 1.0) / x)) + (1.0 / Math.cbrt(x))) + Math.cbrt((((2.0 / x) + 1.0) / x)))) / x;
}
function code(x) return Float64(Float64(1.0 / Float64(Float64(cbrt(Float64(Float64(Float64(1.0 / x) + 1.0) / x)) + Float64(1.0 / cbrt(x))) + cbrt(Float64(Float64(Float64(2.0 / x) + 1.0) / x)))) / x) end
code[x_] := N[(N[(1.0 / N[(N[(N[Power[N[(N[(N[(1.0 / x), $MachinePrecision] + 1.0), $MachinePrecision] / x), $MachinePrecision], 1/3], $MachinePrecision] + N[(1.0 / N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + N[Power[N[(N[(N[(2.0 / x), $MachinePrecision] + 1.0), $MachinePrecision] / x), $MachinePrecision], 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{1}{\left(\sqrt[3]{\frac{\frac{1}{x} + 1}{x}} + \frac{1}{\sqrt[3]{x}}\right) + \sqrt[3]{\frac{\frac{2}{x} + 1}{x}}}}{x}
\end{array}
Initial program 6.8%
lift--.f64N/A
lift-+.f64N/A
lift-cbrt.f64N/A
lift-cbrt.f64N/A
flip3--N/A
lower-/.f64N/A
rem-cube-cbrtN/A
rem-cube-cbrtN/A
lower--.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-+.f64N/A
Applied rewrites8.8%
Taylor expanded in x around inf
Applied rewrites94.1%
Taylor expanded in x around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
Applied rewrites93.6%
lift-pow.f64N/A
metadata-evalN/A
pow-flipN/A
pow1/3N/A
lift-cbrt.f64N/A
lift-/.f6498.1
Applied rewrites98.1%
(FPCore (x) :precision binary64 (* (pow (cbrt x) -2.0) 0.3333333333333333))
double code(double x) {
return pow(cbrt(x), -2.0) * 0.3333333333333333;
}
public static double code(double x) {
return Math.pow(Math.cbrt(x), -2.0) * 0.3333333333333333;
}
function code(x) return Float64((cbrt(x) ^ -2.0) * 0.3333333333333333) end
code[x_] := N[(N[Power[N[Power[x, 1/3], $MachinePrecision], -2.0], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]
\begin{array}{l}
\\
{\left(\sqrt[3]{x}\right)}^{-2} \cdot 0.3333333333333333
\end{array}
Initial program 6.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval88.9
Applied rewrites88.9%
lift-pow.f64N/A
metadata-evalN/A
pow-prod-upN/A
pow-prod-downN/A
pow2N/A
lower-pow.f64N/A
pow2N/A
lift-*.f6446.5
Applied rewrites46.5%
lift-pow.f64N/A
lift-*.f64N/A
unpow-prod-downN/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
metadata-evalN/A
pow-powN/A
pow1/3N/A
pow-prod-upN/A
metadata-evalN/A
lower-pow.f64N/A
lift-cbrt.f6496.6
Applied rewrites96.6%
(FPCore (x) :precision binary64 (if (<= x 1.35e+154) (* (/ -1.0 (cbrt (- (* x x)))) 0.3333333333333333) (* (exp (* (log x) -0.6666666666666666)) 0.3333333333333333)))
double code(double x) {
double tmp;
if (x <= 1.35e+154) {
tmp = (-1.0 / cbrt(-(x * x))) * 0.3333333333333333;
} else {
tmp = exp((log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.35e+154) {
tmp = (-1.0 / Math.cbrt(-(x * x))) * 0.3333333333333333;
} else {
tmp = Math.exp((Math.log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.35e+154) tmp = Float64(Float64(-1.0 / cbrt(Float64(-Float64(x * x)))) * 0.3333333333333333); else tmp = Float64(exp(Float64(log(x) * -0.6666666666666666)) * 0.3333333333333333); end return tmp end
code[x_] := If[LessEqual[x, 1.35e+154], N[(N[(-1.0 / N[Power[(-N[(x * x), $MachinePrecision]), 1/3], $MachinePrecision]), $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(N[Exp[N[(N[Log[x], $MachinePrecision] * -0.6666666666666666), $MachinePrecision]], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.35 \cdot 10^{+154}:\\
\;\;\;\;\frac{-1}{\sqrt[3]{-x \cdot x}} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;e^{\log x \cdot -0.6666666666666666} \cdot 0.3333333333333333\\
\end{array}
\end{array}
if x < 1.35000000000000003e154Initial program 8.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval88.7
Applied rewrites88.7%
lift-pow.f64N/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow-flipN/A
pow1/3N/A
frac-2negN/A
metadata-evalN/A
cbrt-divN/A
metadata-evalN/A
rem-cbrt-cubeN/A
lower-/.f64N/A
lower-cbrt.f64N/A
lower-neg.f64N/A
pow2N/A
lift-*.f6495.3
Applied rewrites95.3%
if 1.35000000000000003e154 < x Initial program 4.7%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval89.1
Applied rewrites89.1%
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f6489.5
Applied rewrites89.5%
(FPCore (x) :precision binary64 (if (<= x 1.35e+154) (* (cbrt (/ 1.0 (* x x))) 0.3333333333333333) (* (exp (* (log x) -0.6666666666666666)) 0.3333333333333333)))
double code(double x) {
double tmp;
if (x <= 1.35e+154) {
tmp = cbrt((1.0 / (x * x))) * 0.3333333333333333;
} else {
tmp = exp((log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
return tmp;
}
public static double code(double x) {
double tmp;
if (x <= 1.35e+154) {
tmp = Math.cbrt((1.0 / (x * x))) * 0.3333333333333333;
} else {
tmp = Math.exp((Math.log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= 1.35e+154) tmp = Float64(cbrt(Float64(1.0 / Float64(x * x))) * 0.3333333333333333); else tmp = Float64(exp(Float64(log(x) * -0.6666666666666666)) * 0.3333333333333333); end return tmp end
code[x_] := If[LessEqual[x, 1.35e+154], N[(N[Power[N[(1.0 / N[(x * x), $MachinePrecision]), $MachinePrecision], 1/3], $MachinePrecision] * 0.3333333333333333), $MachinePrecision], N[(N[Exp[N[(N[Log[x], $MachinePrecision] * -0.6666666666666666), $MachinePrecision]], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 1.35 \cdot 10^{+154}:\\
\;\;\;\;\sqrt[3]{\frac{1}{x \cdot x}} \cdot 0.3333333333333333\\
\mathbf{else}:\\
\;\;\;\;e^{\log x \cdot -0.6666666666666666} \cdot 0.3333333333333333\\
\end{array}
\end{array}
if x < 1.35000000000000003e154Initial program 8.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval88.7
Applied rewrites88.7%
lift-pow.f64N/A
metadata-evalN/A
metadata-evalN/A
pow-powN/A
pow-flipN/A
pow1/3N/A
lower-cbrt.f64N/A
lower-/.f64N/A
pow2N/A
lift-*.f6495.1
Applied rewrites95.1%
if 1.35000000000000003e154 < x Initial program 4.7%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval89.1
Applied rewrites89.1%
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f6489.5
Applied rewrites89.5%
(FPCore (x) :precision binary64 (* (exp (* (log x) -0.6666666666666666)) 0.3333333333333333))
double code(double x) {
return exp((log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = exp((log(x) * (-0.6666666666666666d0))) * 0.3333333333333333d0
end function
public static double code(double x) {
return Math.exp((Math.log(x) * -0.6666666666666666)) * 0.3333333333333333;
}
def code(x): return math.exp((math.log(x) * -0.6666666666666666)) * 0.3333333333333333
function code(x) return Float64(exp(Float64(log(x) * -0.6666666666666666)) * 0.3333333333333333) end
function tmp = code(x) tmp = exp((log(x) * -0.6666666666666666)) * 0.3333333333333333; end
code[x_] := N[(N[Exp[N[(N[Log[x], $MachinePrecision] * -0.6666666666666666), $MachinePrecision]], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]
\begin{array}{l}
\\
e^{\log x \cdot -0.6666666666666666} \cdot 0.3333333333333333
\end{array}
Initial program 6.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval88.9
Applied rewrites88.9%
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f6489.3
Applied rewrites89.3%
(FPCore (x) :precision binary64 (* (pow x -0.6666666666666666) 0.3333333333333333))
double code(double x) {
return pow(x, -0.6666666666666666) * 0.3333333333333333;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (x ** (-0.6666666666666666d0)) * 0.3333333333333333d0
end function
public static double code(double x) {
return Math.pow(x, -0.6666666666666666) * 0.3333333333333333;
}
def code(x): return math.pow(x, -0.6666666666666666) * 0.3333333333333333
function code(x) return Float64((x ^ -0.6666666666666666) * 0.3333333333333333) end
function tmp = code(x) tmp = (x ^ -0.6666666666666666) * 0.3333333333333333; end
code[x_] := N[(N[Power[x, -0.6666666666666666], $MachinePrecision] * 0.3333333333333333), $MachinePrecision]
\begin{array}{l}
\\
{x}^{-0.6666666666666666} \cdot 0.3333333333333333
\end{array}
Initial program 6.8%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
pow1/3N/A
pow-flipN/A
pow-powN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
metadata-evalN/A
metadata-eval88.9
Applied rewrites88.9%
(FPCore (x) :precision binary64 (- 1.0 (cbrt x)))
double code(double x) {
return 1.0 - cbrt(x);
}
public static double code(double x) {
return 1.0 - Math.cbrt(x);
}
function code(x) return Float64(1.0 - cbrt(x)) end
code[x_] := N[(1.0 - N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \sqrt[3]{x}
\end{array}
Initial program 6.8%
Taylor expanded in x around 0
Applied rewrites1.8%
(FPCore (x) :precision binary64 (let* ((t_0 (cbrt (+ x 1.0)))) (/ 1.0 (+ (+ (* t_0 t_0) (* (cbrt x) t_0)) (* (cbrt x) (cbrt x))))))
double code(double x) {
double t_0 = cbrt((x + 1.0));
return 1.0 / (((t_0 * t_0) + (cbrt(x) * t_0)) + (cbrt(x) * cbrt(x)));
}
public static double code(double x) {
double t_0 = Math.cbrt((x + 1.0));
return 1.0 / (((t_0 * t_0) + (Math.cbrt(x) * t_0)) + (Math.cbrt(x) * Math.cbrt(x)));
}
function code(x) t_0 = cbrt(Float64(x + 1.0)) return Float64(1.0 / Float64(Float64(Float64(t_0 * t_0) + Float64(cbrt(x) * t_0)) + Float64(cbrt(x) * cbrt(x)))) end
code[x_] := Block[{t$95$0 = N[Power[N[(x + 1.0), $MachinePrecision], 1/3], $MachinePrecision]}, N[(1.0 / N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] + N[(N[Power[x, 1/3], $MachinePrecision] * t$95$0), $MachinePrecision]), $MachinePrecision] + N[(N[Power[x, 1/3], $MachinePrecision] * N[Power[x, 1/3], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \sqrt[3]{x + 1}\\
\frac{1}{\left(t\_0 \cdot t\_0 + \sqrt[3]{x} \cdot t\_0\right) + \sqrt[3]{x} \cdot \sqrt[3]{x}}
\end{array}
\end{array}
herbie shell --seed 2025106
(FPCore (x)
:name "2cbrt (problem 3.3.4)"
:precision binary64
:pre (and (> x 1.0) (< x 1e+308))
:alt
(! :herbie-platform c (/ 1 (+ (* (cbrt (+ x 1)) (cbrt (+ x 1))) (* (cbrt x) (cbrt (+ x 1))) (* (cbrt x) (cbrt x)))))
(- (cbrt (+ x 1.0)) (cbrt x)))