
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (exp (- x)))))
double code(double x) {
return 2.0 / (exp(x) + exp(-x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + exp(-x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + Math.exp(-x));
}
def code(x): return 2.0 / (math.exp(x) + math.exp(-x))
function code(x) return Float64(2.0 / Float64(exp(x) + exp(Float64(-x)))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + exp(-x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + e^{-x}}
\end{array}
Initial program 100.0%
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) (- 1.0 x))))
double code(double x) {
return 2.0 / (exp(x) + (1.0 - x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + (1.0d0 - x))
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + (1.0 - x));
}
def code(x): return 2.0 / (math.exp(x) + (1.0 - x))
function code(x) return Float64(2.0 / Float64(exp(x) + Float64(1.0 - x))) end
function tmp = code(x) tmp = 2.0 / (exp(x) + (1.0 - x)); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + N[(1.0 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + \left(1 - x\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
(FPCore (x) :precision binary64 (/ 2.0 (+ (exp x) 1.0)))
double code(double x) {
return 2.0 / (exp(x) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (exp(x) + 1.0d0)
end function
public static double code(double x) {
return 2.0 / (Math.exp(x) + 1.0);
}
def code(x): return 2.0 / (math.exp(x) + 1.0)
function code(x) return Float64(2.0 / Float64(exp(x) + 1.0)) end
function tmp = code(x) tmp = 2.0 / (exp(x) + 1.0); end
code[x_] := N[(2.0 / N[(N[Exp[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{e^{x} + 1}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
(FPCore (x) :precision binary64 (/ 2.0 (+ (- 1.0 x) (+ 1.0 (* x (+ 1.0 (* x (+ 0.5 (* x 0.16666666666666666)))))))))
double code(double x) {
return 2.0 / ((1.0 - x) + (1.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / ((1.0d0 - x) + (1.0d0 + (x * (1.0d0 + (x * (0.5d0 + (x * 0.16666666666666666d0)))))))
end function
public static double code(double x) {
return 2.0 / ((1.0 - x) + (1.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666)))))));
}
def code(x): return 2.0 / ((1.0 - x) + (1.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666)))))))
function code(x) return Float64(2.0 / Float64(Float64(1.0 - x) + Float64(1.0 + Float64(x * Float64(1.0 + Float64(x * Float64(0.5 + Float64(x * 0.16666666666666666)))))))) end
function tmp = code(x) tmp = 2.0 / ((1.0 - x) + (1.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666))))))); end
code[x_] := N[(2.0 / N[(N[(1.0 - x), $MachinePrecision] + N[(1.0 + N[(x * N[(1.0 + N[(x * N[(0.5 + N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{\left(1 - x\right) + \left(1 + x \cdot \left(1 + x \cdot \left(0.5 + x \cdot 0.16666666666666666\right)\right)\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 84.6%
*-lft-identity84.6%
*-lft-identity84.6%
*-commutative84.6%
Simplified84.6%
Final simplification84.6%
(FPCore (x) :precision binary64 (/ 2.0 (+ 2.0 (* x (+ 1.0 (* x (+ 0.5 (* x 0.16666666666666666))))))))
double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (2.0d0 + (x * (1.0d0 + (x * (0.5d0 + (x * 0.16666666666666666d0))))))
end function
public static double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666))))));
}
def code(x): return 2.0 / (2.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666))))))
function code(x) return Float64(2.0 / Float64(2.0 + Float64(x * Float64(1.0 + Float64(x * Float64(0.5 + Float64(x * 0.16666666666666666))))))) end
function tmp = code(x) tmp = 2.0 / (2.0 + (x * (1.0 + (x * (0.5 + (x * 0.16666666666666666)))))); end
code[x_] := N[(2.0 / N[(2.0 + N[(x * N[(1.0 + N[(x * N[(0.5 + N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{2 + x \cdot \left(1 + x \cdot \left(0.5 + x \cdot 0.16666666666666666\right)\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
Taylor expanded in x around 0 84.2%
Final simplification84.2%
(FPCore (x) :precision binary64 (/ 2.0 (+ 2.0 (* x (+ 1.0 (* x (* x 0.16666666666666666)))))))
double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * (x * 0.16666666666666666)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (2.0d0 + (x * (1.0d0 + (x * (x * 0.16666666666666666d0)))))
end function
public static double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * (x * 0.16666666666666666)))));
}
def code(x): return 2.0 / (2.0 + (x * (1.0 + (x * (x * 0.16666666666666666)))))
function code(x) return Float64(2.0 / Float64(2.0 + Float64(x * Float64(1.0 + Float64(x * Float64(x * 0.16666666666666666)))))) end
function tmp = code(x) tmp = 2.0 / (2.0 + (x * (1.0 + (x * (x * 0.16666666666666666))))); end
code[x_] := N[(2.0 / N[(2.0 + N[(x * N[(1.0 + N[(x * N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{2 + x \cdot \left(1 + x \cdot \left(x \cdot 0.16666666666666666\right)\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
Taylor expanded in x around 0 84.2%
Taylor expanded in x around inf 84.2%
*-commutative84.2%
Simplified84.2%
(FPCore (x) :precision binary64 (/ 2.0 (+ 2.0 (* x (+ 1.0 (* x 0.5))))))
double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * 0.5))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (2.0d0 + (x * (1.0d0 + (x * 0.5d0))))
end function
public static double code(double x) {
return 2.0 / (2.0 + (x * (1.0 + (x * 0.5))));
}
def code(x): return 2.0 / (2.0 + (x * (1.0 + (x * 0.5))))
function code(x) return Float64(2.0 / Float64(2.0 + Float64(x * Float64(1.0 + Float64(x * 0.5))))) end
function tmp = code(x) tmp = 2.0 / (2.0 + (x * (1.0 + (x * 0.5)))); end
code[x_] := N[(2.0 / N[(2.0 + N[(x * N[(1.0 + N[(x * 0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{2 + x \cdot \left(1 + x \cdot 0.5\right)}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
Taylor expanded in x around 0 75.3%
Final simplification75.3%
(FPCore (x) :precision binary64 (if (<= x 2.45) 1.0 (/ (/ 6.0 x) x)))
double code(double x) {
double tmp;
if (x <= 2.45) {
tmp = 1.0;
} else {
tmp = (6.0 / x) / x;
}
return tmp;
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: tmp
if (x <= 2.45d0) then
tmp = 1.0d0
else
tmp = (6.0d0 / x) / x
end if
code = tmp
end function
public static double code(double x) {
double tmp;
if (x <= 2.45) {
tmp = 1.0;
} else {
tmp = (6.0 / x) / x;
}
return tmp;
}
def code(x): tmp = 0 if x <= 2.45: tmp = 1.0 else: tmp = (6.0 / x) / x return tmp
function code(x) tmp = 0.0 if (x <= 2.45) tmp = 1.0; else tmp = Float64(Float64(6.0 / x) / x); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if (x <= 2.45) tmp = 1.0; else tmp = (6.0 / x) / x; end tmp_2 = tmp; end
code[x_] := If[LessEqual[x, 2.45], 1.0, N[(N[(6.0 / x), $MachinePrecision] / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq 2.45:\\
\;\;\;\;1\\
\mathbf{else}:\\
\;\;\;\;\frac{\frac{6}{x}}{x}\\
\end{array}
\end{array}
if x < 2.4500000000000002Initial program 100.0%
Taylor expanded in x around 0 68.5%
mul-1-neg68.5%
unsub-neg68.5%
Simplified68.5%
Taylor expanded in x around 0 67.4%
Taylor expanded in x around 0 67.9%
if 2.4500000000000002 < x Initial program 100.0%
Taylor expanded in x around 0 98.9%
mul-1-neg98.9%
unsub-neg98.9%
Simplified98.9%
Applied egg-rr5.4%
Taylor expanded in x around inf 5.4%
sub-neg5.4%
associate-*r/5.4%
metadata-eval5.4%
metadata-eval5.4%
Simplified5.4%
Taylor expanded in x around 0 49.7%
(FPCore (x) :precision binary64 (/ 2.0 (+ 2.0 x)))
double code(double x) {
return 2.0 / (2.0 + x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 2.0d0 / (2.0d0 + x)
end function
public static double code(double x) {
return 2.0 / (2.0 + x);
}
def code(x): return 2.0 / (2.0 + x)
function code(x) return Float64(2.0 / Float64(2.0 + x)) end
function tmp = code(x) tmp = 2.0 / (2.0 + x); end
code[x_] := N[(2.0 / N[(2.0 + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{2 + x}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
Taylor expanded in x around 0 52.7%
+-commutative52.7%
Simplified52.7%
Final simplification52.7%
(FPCore (x) :precision binary64 1.0)
double code(double x) {
return 1.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double x) {
return 1.0;
}
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Taylor expanded in x around 0 75.2%
Taylor expanded in x around 0 52.0%
(FPCore (x) :precision binary64 -0.6666666666666666)
double code(double x) {
return -0.6666666666666666;
}
real(8) function code(x)
real(8), intent (in) :: x
code = -0.6666666666666666d0
end function
public static double code(double x) {
return -0.6666666666666666;
}
def code(x): return -0.6666666666666666
function code(x) return -0.6666666666666666 end
function tmp = code(x) tmp = -0.6666666666666666; end
code[x_] := -0.6666666666666666
\begin{array}{l}
\\
-0.6666666666666666
\end{array}
Initial program 100.0%
Taylor expanded in x around 0 76.0%
mul-1-neg76.0%
unsub-neg76.0%
Simplified76.0%
Applied egg-rr3.4%
Taylor expanded in x around 0 2.3%
herbie shell --seed 2024110
(FPCore (x)
:name "Hyperbolic secant"
:precision binary64
(/ 2.0 (+ (exp x) (exp (- x)))))