
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
(FPCore (x) :precision binary64 (/ 10.0 (fma (- 0.0 x) x 1.0)))
double code(double x) {
return 10.0 / fma((0.0 - x), x, 1.0);
}
function code(x) return Float64(10.0 / fma(Float64(0.0 - x), x, 1.0)) end
code[x_] := N[(10.0 / N[(N[(0.0 - x), $MachinePrecision] * x + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{\mathsf{fma}\left(0 - x, x, 1\right)}
\end{array}
Initial program 87.9%
sub-negN/A
+-commutativeN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-sub0N/A
--lowering--.f6499.6%
Applied egg-rr99.6%
sub0-negN/A
neg-lowering-neg.f6499.6%
Applied egg-rr99.6%
Final simplification99.6%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ 1.0 (* x x)))
(t_1 (* x (* x x)))
(t_2 (* x t_1))
(t_3 (* t_0 (+ 1.0 t_2)))
(t_4 (/ t_3 (* x (* x (* t_1 t_1))))))
(/ 10.0 (/ (+ t_4 (* t_0 (- -1.0 t_2))) (* t_3 t_4)))))
double code(double x) {
double t_0 = 1.0 + (x * x);
double t_1 = x * (x * x);
double t_2 = x * t_1;
double t_3 = t_0 * (1.0 + t_2);
double t_4 = t_3 / (x * (x * (t_1 * t_1)));
return 10.0 / ((t_4 + (t_0 * (-1.0 - t_2))) / (t_3 * t_4));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
real(8) :: t_3
real(8) :: t_4
t_0 = 1.0d0 + (x * x)
t_1 = x * (x * x)
t_2 = x * t_1
t_3 = t_0 * (1.0d0 + t_2)
t_4 = t_3 / (x * (x * (t_1 * t_1)))
code = 10.0d0 / ((t_4 + (t_0 * ((-1.0d0) - t_2))) / (t_3 * t_4))
end function
public static double code(double x) {
double t_0 = 1.0 + (x * x);
double t_1 = x * (x * x);
double t_2 = x * t_1;
double t_3 = t_0 * (1.0 + t_2);
double t_4 = t_3 / (x * (x * (t_1 * t_1)));
return 10.0 / ((t_4 + (t_0 * (-1.0 - t_2))) / (t_3 * t_4));
}
def code(x): t_0 = 1.0 + (x * x) t_1 = x * (x * x) t_2 = x * t_1 t_3 = t_0 * (1.0 + t_2) t_4 = t_3 / (x * (x * (t_1 * t_1))) return 10.0 / ((t_4 + (t_0 * (-1.0 - t_2))) / (t_3 * t_4))
function code(x) t_0 = Float64(1.0 + Float64(x * x)) t_1 = Float64(x * Float64(x * x)) t_2 = Float64(x * t_1) t_3 = Float64(t_0 * Float64(1.0 + t_2)) t_4 = Float64(t_3 / Float64(x * Float64(x * Float64(t_1 * t_1)))) return Float64(10.0 / Float64(Float64(t_4 + Float64(t_0 * Float64(-1.0 - t_2))) / Float64(t_3 * t_4))) end
function tmp = code(x) t_0 = 1.0 + (x * x); t_1 = x * (x * x); t_2 = x * t_1; t_3 = t_0 * (1.0 + t_2); t_4 = t_3 / (x * (x * (t_1 * t_1))); tmp = 10.0 / ((t_4 + (t_0 * (-1.0 - t_2))) / (t_3 * t_4)); end
code[x_] := Block[{t$95$0 = N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(x * t$95$1), $MachinePrecision]}, Block[{t$95$3 = N[(t$95$0 * N[(1.0 + t$95$2), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$4 = N[(t$95$3 / N[(x * N[(x * N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(10.0 / N[(N[(t$95$4 + N[(t$95$0 * N[(-1.0 - t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(t$95$3 * t$95$4), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 + x \cdot x\\
t_1 := x \cdot \left(x \cdot x\right)\\
t_2 := x \cdot t\_1\\
t_3 := t\_0 \cdot \left(1 + t\_2\right)\\
t_4 := \frac{t\_3}{x \cdot \left(x \cdot \left(t\_1 \cdot t\_1\right)\right)}\\
\frac{10}{\frac{t\_4 + t\_0 \cdot \left(-1 - t\_2\right)}{t\_3 \cdot t\_4}}
\end{array}
\end{array}
Initial program 87.9%
sub-negN/A
+-commutativeN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-sub0N/A
--lowering--.f6499.6%
Applied egg-rr99.6%
sub0-negN/A
neg-lowering-neg.f6499.6%
Applied egg-rr99.6%
Applied egg-rr88.9%
Final simplification88.9%
(FPCore (x)
:precision binary64
(let* ((t_0 (+ 1.0 (* x x)))
(t_1 (* x (* x x)))
(t_2 (* x (* x (* t_1 t_1)))))
(/
10.0
(/
(* (/ (- 1.0 (* t_2 t_2)) (* t_0 t_0)) (/ 1.0 (/ (+ 1.0 t_2) t_0)))
(+ 1.0 (* x t_1))))))
double code(double x) {
double t_0 = 1.0 + (x * x);
double t_1 = x * (x * x);
double t_2 = x * (x * (t_1 * t_1));
return 10.0 / ((((1.0 - (t_2 * t_2)) / (t_0 * t_0)) * (1.0 / ((1.0 + t_2) / t_0))) / (1.0 + (x * t_1)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = 1.0d0 + (x * x)
t_1 = x * (x * x)
t_2 = x * (x * (t_1 * t_1))
code = 10.0d0 / ((((1.0d0 - (t_2 * t_2)) / (t_0 * t_0)) * (1.0d0 / ((1.0d0 + t_2) / t_0))) / (1.0d0 + (x * t_1)))
end function
public static double code(double x) {
double t_0 = 1.0 + (x * x);
double t_1 = x * (x * x);
double t_2 = x * (x * (t_1 * t_1));
return 10.0 / ((((1.0 - (t_2 * t_2)) / (t_0 * t_0)) * (1.0 / ((1.0 + t_2) / t_0))) / (1.0 + (x * t_1)));
}
def code(x): t_0 = 1.0 + (x * x) t_1 = x * (x * x) t_2 = x * (x * (t_1 * t_1)) return 10.0 / ((((1.0 - (t_2 * t_2)) / (t_0 * t_0)) * (1.0 / ((1.0 + t_2) / t_0))) / (1.0 + (x * t_1)))
function code(x) t_0 = Float64(1.0 + Float64(x * x)) t_1 = Float64(x * Float64(x * x)) t_2 = Float64(x * Float64(x * Float64(t_1 * t_1))) return Float64(10.0 / Float64(Float64(Float64(Float64(1.0 - Float64(t_2 * t_2)) / Float64(t_0 * t_0)) * Float64(1.0 / Float64(Float64(1.0 + t_2) / t_0))) / Float64(1.0 + Float64(x * t_1)))) end
function tmp = code(x) t_0 = 1.0 + (x * x); t_1 = x * (x * x); t_2 = x * (x * (t_1 * t_1)); tmp = 10.0 / ((((1.0 - (t_2 * t_2)) / (t_0 * t_0)) * (1.0 / ((1.0 + t_2) / t_0))) / (1.0 + (x * t_1))); end
code[x_] := Block[{t$95$0 = N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(x * N[(x * N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(10.0 / N[(N[(N[(N[(1.0 - N[(t$95$2 * t$95$2), $MachinePrecision]), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] * N[(1.0 / N[(N[(1.0 + t$95$2), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(x * t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 + x \cdot x\\
t_1 := x \cdot \left(x \cdot x\right)\\
t_2 := x \cdot \left(x \cdot \left(t\_1 \cdot t\_1\right)\right)\\
\frac{10}{\frac{\frac{1 - t\_2 \cdot t\_2}{t\_0 \cdot t\_0} \cdot \frac{1}{\frac{1 + t\_2}{t\_0}}}{1 + x \cdot t\_1}}
\end{array}
\end{array}
Initial program 87.9%
sub-negN/A
+-commutativeN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-sub0N/A
--lowering--.f6499.6%
Applied egg-rr99.6%
+-commutativeN/A
sub0-negN/A
cancel-sign-sub-invN/A
flip--N/A
metadata-evalN/A
associate-*r*N/A
flip--N/A
metadata-evalN/A
associate-*r*N/A
associate-*r*N/A
associate-/r*N/A
*-commutativeN/A
Applied egg-rr88.1%
Applied egg-rr88.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x x)))
(t_1 (* x (* x (* t_0 t_0))))
(t_2 (+ 1.0 (* x x))))
(/
10.0
(/
(/ (- 1.0 (* t_1 t_1)) (* t_2 t_2))
(* (+ 1.0 (* x t_0)) (/ (+ 1.0 t_1) t_2))))))
double code(double x) {
double t_0 = x * (x * x);
double t_1 = x * (x * (t_0 * t_0));
double t_2 = 1.0 + (x * x);
return 10.0 / (((1.0 - (t_1 * t_1)) / (t_2 * t_2)) / ((1.0 + (x * t_0)) * ((1.0 + t_1) / t_2)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: t_2
t_0 = x * (x * x)
t_1 = x * (x * (t_0 * t_0))
t_2 = 1.0d0 + (x * x)
code = 10.0d0 / (((1.0d0 - (t_1 * t_1)) / (t_2 * t_2)) / ((1.0d0 + (x * t_0)) * ((1.0d0 + t_1) / t_2)))
end function
public static double code(double x) {
double t_0 = x * (x * x);
double t_1 = x * (x * (t_0 * t_0));
double t_2 = 1.0 + (x * x);
return 10.0 / (((1.0 - (t_1 * t_1)) / (t_2 * t_2)) / ((1.0 + (x * t_0)) * ((1.0 + t_1) / t_2)));
}
def code(x): t_0 = x * (x * x) t_1 = x * (x * (t_0 * t_0)) t_2 = 1.0 + (x * x) return 10.0 / (((1.0 - (t_1 * t_1)) / (t_2 * t_2)) / ((1.0 + (x * t_0)) * ((1.0 + t_1) / t_2)))
function code(x) t_0 = Float64(x * Float64(x * x)) t_1 = Float64(x * Float64(x * Float64(t_0 * t_0))) t_2 = Float64(1.0 + Float64(x * x)) return Float64(10.0 / Float64(Float64(Float64(1.0 - Float64(t_1 * t_1)) / Float64(t_2 * t_2)) / Float64(Float64(1.0 + Float64(x * t_0)) * Float64(Float64(1.0 + t_1) / t_2)))) end
function tmp = code(x) t_0 = x * (x * x); t_1 = x * (x * (t_0 * t_0)); t_2 = 1.0 + (x * x); tmp = 10.0 / (((1.0 - (t_1 * t_1)) / (t_2 * t_2)) / ((1.0 + (x * t_0)) * ((1.0 + t_1) / t_2))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(x * N[(x * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]}, N[(10.0 / N[(N[(N[(1.0 - N[(t$95$1 * t$95$1), $MachinePrecision]), $MachinePrecision] / N[(t$95$2 * t$95$2), $MachinePrecision]), $MachinePrecision] / N[(N[(1.0 + N[(x * t$95$0), $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 + t$95$1), $MachinePrecision] / t$95$2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot x\right)\\
t_1 := x \cdot \left(x \cdot \left(t\_0 \cdot t\_0\right)\right)\\
t_2 := 1 + x \cdot x\\
\frac{10}{\frac{\frac{1 - t\_1 \cdot t\_1}{t\_2 \cdot t\_2}}{\left(1 + x \cdot t\_0\right) \cdot \frac{1 + t\_1}{t\_2}}}
\end{array}
\end{array}
Initial program 87.9%
sub-negN/A
+-commutativeN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-sub0N/A
--lowering--.f6499.6%
Applied egg-rr99.6%
sub0-negN/A
neg-lowering-neg.f6499.6%
Applied egg-rr99.6%
Applied egg-rr88.8%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (* x x)))))
(/
10.0
(/
1.0
(/
(* (+ 1.0 (* x x)) (+ 1.0 t_0))
(- 1.0 (* (* x x) (* x (* x t_0)))))))))
double code(double x) {
double t_0 = x * (x * (x * x));
return 10.0 / (1.0 / (((1.0 + (x * x)) * (1.0 + t_0)) / (1.0 - ((x * x) * (x * (x * t_0))))));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * (x * x))
code = 10.0d0 / (1.0d0 / (((1.0d0 + (x * x)) * (1.0d0 + t_0)) / (1.0d0 - ((x * x) * (x * (x * t_0))))))
end function
public static double code(double x) {
double t_0 = x * (x * (x * x));
return 10.0 / (1.0 / (((1.0 + (x * x)) * (1.0 + t_0)) / (1.0 - ((x * x) * (x * (x * t_0))))));
}
def code(x): t_0 = x * (x * (x * x)) return 10.0 / (1.0 / (((1.0 + (x * x)) * (1.0 + t_0)) / (1.0 - ((x * x) * (x * (x * t_0))))))
function code(x) t_0 = Float64(x * Float64(x * Float64(x * x))) return Float64(10.0 / Float64(1.0 / Float64(Float64(Float64(1.0 + Float64(x * x)) * Float64(1.0 + t_0)) / Float64(1.0 - Float64(Float64(x * x) * Float64(x * Float64(x * t_0))))))) end
function tmp = code(x) t_0 = x * (x * (x * x)); tmp = 10.0 / (1.0 / (((1.0 + (x * x)) * (1.0 + t_0)) / (1.0 - ((x * x) * (x * (x * t_0)))))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[(10.0 / N[(1.0 / N[(N[(N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(x \cdot x\right)\right)\\
\frac{10}{\frac{1}{\frac{\left(1 + x \cdot x\right) \cdot \left(1 + t\_0\right)}{1 - \left(x \cdot x\right) \cdot \left(x \cdot \left(x \cdot t\_0\right)\right)}}}
\end{array}
\end{array}
Initial program 87.9%
Applied egg-rr88.3%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6488.8%
Applied egg-rr88.8%
Final simplification88.8%
(FPCore (x) :precision binary64 (/ 10.0 (/ (+ (* x (* x (* x (* x (* x x))))) -1.0) (+ -1.0 (* x (* x (- -1.0 (* x x))))))))
double code(double x) {
return 10.0 / (((x * (x * (x * (x * (x * x))))) + -1.0) / (-1.0 + (x * (x * (-1.0 - (x * x))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (((x * (x * (x * (x * (x * x))))) + (-1.0d0)) / ((-1.0d0) + (x * (x * ((-1.0d0) - (x * x))))))
end function
public static double code(double x) {
return 10.0 / (((x * (x * (x * (x * (x * x))))) + -1.0) / (-1.0 + (x * (x * (-1.0 - (x * x))))));
}
def code(x): return 10.0 / (((x * (x * (x * (x * (x * x))))) + -1.0) / (-1.0 + (x * (x * (-1.0 - (x * x))))))
function code(x) return Float64(10.0 / Float64(Float64(Float64(x * Float64(x * Float64(x * Float64(x * Float64(x * x))))) + -1.0) / Float64(-1.0 + Float64(x * Float64(x * Float64(-1.0 - Float64(x * x))))))) end
function tmp = code(x) tmp = 10.0 / (((x * (x * (x * (x * (x * x))))) + -1.0) / (-1.0 + (x * (x * (-1.0 - (x * x)))))); end
code[x_] := N[(10.0 / N[(N[(N[(x * N[(x * N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] / N[(-1.0 + N[(x * N[(x * N[(-1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{\frac{x \cdot \left(x \cdot \left(x \cdot \left(x \cdot \left(x \cdot x\right)\right)\right)\right) + -1}{-1 + x \cdot \left(x \cdot \left(-1 - x \cdot x\right)\right)}}
\end{array}
Initial program 87.9%
Applied egg-rr88.3%
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6488.8%
Applied egg-rr88.8%
Final simplification88.8%
(FPCore (x) :precision binary64 (/ (* 10.0 (- -1.0 (* x x))) (+ (* x (* x (* x x))) -1.0)))
double code(double x) {
return (10.0 * (-1.0 - (x * x))) / ((x * (x * (x * x))) + -1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = (10.0d0 * ((-1.0d0) - (x * x))) / ((x * (x * (x * x))) + (-1.0d0))
end function
public static double code(double x) {
return (10.0 * (-1.0 - (x * x))) / ((x * (x * (x * x))) + -1.0);
}
def code(x): return (10.0 * (-1.0 - (x * x))) / ((x * (x * (x * x))) + -1.0)
function code(x) return Float64(Float64(10.0 * Float64(-1.0 - Float64(x * x))) / Float64(Float64(x * Float64(x * Float64(x * x))) + -1.0)) end
function tmp = code(x) tmp = (10.0 * (-1.0 - (x * x))) / ((x * (x * (x * x))) + -1.0); end
code[x_] := N[(N[(10.0 * N[(-1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10 \cdot \left(-1 - x \cdot x\right)}{x \cdot \left(x \cdot \left(x \cdot x\right)\right) + -1}
\end{array}
Initial program 87.9%
flip--N/A
associate-/r/N/A
associate-*l/N/A
frac-2negN/A
/-lowering-/.f64N/A
neg-lowering-neg.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
metadata-evalN/A
sub-negN/A
distribute-neg-inN/A
metadata-evalN/A
distribute-rgt-neg-inN/A
distribute-lft-neg-outN/A
sqr-negN/A
+-lowering-+.f64N/A
associate-*l*N/A
Applied egg-rr88.6%
Final simplification88.6%
(FPCore (x) :precision binary64 (/ 10.0 (/ (- 1.0 (* x (* x (* x x)))) (+ 1.0 (* x x)))))
double code(double x) {
return 10.0 / ((1.0 - (x * (x * (x * x)))) / (1.0 + (x * x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / ((1.0d0 - (x * (x * (x * x)))) / (1.0d0 + (x * x)))
end function
public static double code(double x) {
return 10.0 / ((1.0 - (x * (x * (x * x)))) / (1.0 + (x * x)));
}
def code(x): return 10.0 / ((1.0 - (x * (x * (x * x)))) / (1.0 + (x * x)))
function code(x) return Float64(10.0 / Float64(Float64(1.0 - Float64(x * Float64(x * Float64(x * x)))) / Float64(1.0 + Float64(x * x)))) end
function tmp = code(x) tmp = 10.0 / ((1.0 - (x * (x * (x * x)))) / (1.0 + (x * x))); end
code[x_] := N[(10.0 / N[(N[(1.0 - N[(x * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(1.0 + N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{\frac{1 - x \cdot \left(x \cdot \left(x \cdot x\right)\right)}{1 + x \cdot x}}
\end{array}
Initial program 87.9%
sub-negN/A
+-commutativeN/A
distribute-lft-neg-inN/A
accelerator-lowering-fma.f64N/A
neg-sub0N/A
--lowering--.f6499.6%
Applied egg-rr99.6%
+-commutativeN/A
sub0-negN/A
cancel-sign-sub-invN/A
flip--N/A
metadata-evalN/A
associate-*r*N/A
/-lowering-/.f64N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f6488.6%
Applied egg-rr88.6%
(FPCore (x) :precision binary64 (if (<= (* x x) 1.0) 10.0 (/ -10.0 (* x x))))
double code(double x) {
double tmp;
if ((x * x) <= 1.0) {
tmp = 10.0;
} else {
tmp = -10.0 / (x * x);
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if ((x * x) <= 1.0d0) then
tmp = 10.0d0
else
tmp = (-10.0d0) / (x * x)
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if ((x * x) <= 1.0) {
tmp = 10.0;
} else {
tmp = -10.0 / (x * x);
}
return tmp;
}
def code(x): tmp = 0 if (x * x) <= 1.0: tmp = 10.0 else: tmp = -10.0 / (x * x) return tmp
function code(x) tmp = 0.0 if (Float64(x * x) <= 1.0) tmp = 10.0; else tmp = Float64(-10.0 / Float64(x * x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((x * x) <= 1.0) tmp = 10.0; else tmp = -10.0 / (x * x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(x * x), $MachinePrecision], 1.0], 10.0, N[(-10.0 / N[(x * x), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \cdot x \leq 1:\\
\;\;\;\;10\\
\mathbf{else}:\\
\;\;\;\;\frac{-10}{x \cdot x}\\
\end{array}
\end{array}
if (*.f64 x x) < 1Initial program 88.4%
Taylor expanded in x around 0
Simplified13.5%
if 1 < (*.f64 x x) Initial program 86.8%
Taylor expanded in x around inf
/-lowering-/.f64N/A
unpow2N/A
*-lowering-*.f6413.5%
Simplified13.5%
(FPCore (x) :precision binary64 (/ 1.0 (/ (- 1.0 (* x x)) 10.0)))
double code(double x) {
return 1.0 / ((1.0 - (x * x)) / 10.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0 / ((1.0d0 - (x * x)) / 10.0d0)
end function
public static double code(double x) {
return 1.0 / ((1.0 - (x * x)) / 10.0);
}
def code(x): return 1.0 / ((1.0 - (x * x)) / 10.0)
function code(x) return Float64(1.0 / Float64(Float64(1.0 - Float64(x * x)) / 10.0)) end
function tmp = code(x) tmp = 1.0 / ((1.0 - (x * x)) / 10.0); end
code[x_] := N[(1.0 / N[(N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision] / 10.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{\frac{1 - x \cdot x}{10}}
\end{array}
Initial program 87.9%
Applied egg-rr88.3%
Applied egg-rr88.3%
distribute-lft-neg-inN/A
un-div-invN/A
associate-*r/N/A
/-lowering-/.f64N/A
distribute-lft-neg-outN/A
neg-sub0N/A
--lowering--.f64N/A
*-lowering-*.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f6488.3%
Applied egg-rr88.3%
Applied egg-rr88.0%
(FPCore (x) :precision binary64 (/ 10.0 (- 1.0 (* x x))))
double code(double x) {
return 10.0 / (1.0 - (x * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0 / (1.0d0 - (x * x))
end function
public static double code(double x) {
return 10.0 / (1.0 - (x * x));
}
def code(x): return 10.0 / (1.0 - (x * x))
function code(x) return Float64(10.0 / Float64(1.0 - Float64(x * x))) end
function tmp = code(x) tmp = 10.0 / (1.0 - (x * x)); end
code[x_] := N[(10.0 / N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{10}{1 - x \cdot x}
\end{array}
Initial program 87.9%
(FPCore (x) :precision binary64 10.0)
double code(double x) {
return 10.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 10.0d0
end function
public static double code(double x) {
return 10.0;
}
def code(x): return 10.0
function code(x) return 10.0 end
function tmp = code(x) tmp = 10.0; end
code[x_] := 10.0
\begin{array}{l}
\\
10
\end{array}
Initial program 87.9%
Taylor expanded in x around 0
Simplified9.8%
herbie shell --seed 2024192
(FPCore (x)
:name "ENA, Section 1.4, Mentioned, B"
:precision binary64
:pre (and (<= 0.999 x) (<= x 1.001))
(/ 10.0 (- 1.0 (* x x))))