
(FPCore (x) :precision binary64 (let* ((t_0 (exp (- x)))) (/ (- (exp x) t_0) (+ (exp x) t_0))))
double code(double x) {
double t_0 = exp(-x);
return (exp(x) - t_0) / (exp(x) + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = exp(-x)
code = (exp(x) - t_0) / (exp(x) + t_0)
end function
public static double code(double x) {
double t_0 = Math.exp(-x);
return (Math.exp(x) - t_0) / (Math.exp(x) + t_0);
}
def code(x): t_0 = math.exp(-x) return (math.exp(x) - t_0) / (math.exp(x) + t_0)
function code(x) t_0 = exp(Float64(-x)) return Float64(Float64(exp(x) - t_0) / Float64(exp(x) + t_0)) end
function tmp = code(x) t_0 = exp(-x); tmp = (exp(x) - t_0) / (exp(x) + t_0); end
code[x_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, N[(N[(N[Exp[x], $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\frac{e^{x} - t\_0}{e^{x} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (let* ((t_0 (exp (- x)))) (/ (- (exp x) t_0) (+ (exp x) t_0))))
double code(double x) {
double t_0 = exp(-x);
return (exp(x) - t_0) / (exp(x) + t_0);
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = exp(-x)
code = (exp(x) - t_0) / (exp(x) + t_0)
end function
public static double code(double x) {
double t_0 = Math.exp(-x);
return (Math.exp(x) - t_0) / (Math.exp(x) + t_0);
}
def code(x): t_0 = math.exp(-x) return (math.exp(x) - t_0) / (math.exp(x) + t_0)
function code(x) t_0 = exp(Float64(-x)) return Float64(Float64(exp(x) - t_0) / Float64(exp(x) + t_0)) end
function tmp = code(x) t_0 = exp(-x); tmp = (exp(x) - t_0) / (exp(x) + t_0); end
code[x_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, N[(N[(N[Exp[x], $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := e^{-x}\\
\frac{e^{x} - t\_0}{e^{x} + t\_0}
\end{array}
\end{array}
(FPCore (x) :precision binary64 (tanh x))
double code(double x) {
return tanh(x);
}
real(8) function code(x)
real(8), intent (in) :: x
code = tanh(x)
end function
public static double code(double x) {
return Math.tanh(x);
}
def code(x): return math.tanh(x)
function code(x) return tanh(x) end
function tmp = code(x) tmp = tanh(x); end
code[x_] := N[Tanh[x], $MachinePrecision]
\begin{array}{l}
\\
\tanh x
\end{array}
Initial program 7.6%
tanh-undefN/A
tanh-lowering-tanh.f64100.0
Applied egg-rr100.0%
(FPCore (x)
:precision binary64
(/
x
(fma
(* x x)
(fma
x
(* x (fma x (* x 0.0021164021164021165) -0.022222222222222223))
0.3333333333333333)
1.0)))
double code(double x) {
return x / fma((x * x), fma(x, (x * fma(x, (x * 0.0021164021164021165), -0.022222222222222223)), 0.3333333333333333), 1.0);
}
function code(x) return Float64(x / fma(Float64(x * x), fma(x, Float64(x * fma(x, Float64(x * 0.0021164021164021165), -0.022222222222222223)), 0.3333333333333333), 1.0)) end
code[x_] := N[(x / N[(N[(x * x), $MachinePrecision] * N[(x * N[(x * N[(x * N[(x * 0.0021164021164021165), $MachinePrecision] + -0.022222222222222223), $MachinePrecision]), $MachinePrecision] + 0.3333333333333333), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(x \cdot x, \mathsf{fma}\left(x, x \cdot \mathsf{fma}\left(x, x \cdot 0.0021164021164021165, -0.022222222222222223\right), 0.3333333333333333\right), 1\right)}
\end{array}
Initial program 7.6%
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
/-lowering-/.f64N/A
tanh-undefN/A
tanh-lowering-tanh.f6499.7
Applied egg-rr99.7%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6498.6
Simplified98.6%
clear-numN/A
/-lowering-/.f64N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.8
Applied egg-rr98.8%
(FPCore (x) :precision binary64 (fma (fma x (* x 0.13333333333333333) -0.3333333333333333) (* x (* x x)) x))
double code(double x) {
return fma(fma(x, (x * 0.13333333333333333), -0.3333333333333333), (x * (x * x)), x);
}
function code(x) return fma(fma(x, Float64(x * 0.13333333333333333), -0.3333333333333333), Float64(x * Float64(x * x)), x) end
code[x_] := N[(N[(x * N[(x * 0.13333333333333333), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] * N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(x, x \cdot 0.13333333333333333, -0.3333333333333333\right), x \cdot \left(x \cdot x\right), x\right)
\end{array}
Initial program 7.6%
Taylor expanded in x around 0
+-commutativeN/A
distribute-rgt-inN/A
*-lft-identityN/A
*-commutativeN/A
associate-*r*N/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
sub-negN/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
metadata-evalN/A
accelerator-lowering-fma.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.8
Simplified98.8%
(FPCore (x) :precision binary64 (/ x (fma x (* x 0.3333333333333333) 1.0)))
double code(double x) {
return x / fma(x, (x * 0.3333333333333333), 1.0);
}
function code(x) return Float64(x / fma(x, Float64(x * 0.3333333333333333), 1.0)) end
code[x_] := N[(x / N[(x * N[(x * 0.3333333333333333), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{\mathsf{fma}\left(x, x \cdot 0.3333333333333333, 1\right)}
\end{array}
Initial program 7.6%
clear-numN/A
/-lowering-/.f64N/A
clear-numN/A
/-lowering-/.f64N/A
tanh-undefN/A
tanh-lowering-tanh.f6499.7
Applied egg-rr99.7%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-commutativeN/A
*-commutativeN/A
accelerator-lowering-fma.f64N/A
unpow2N/A
*-lowering-*.f6498.5
Simplified98.5%
clear-numN/A
/-lowering-/.f64N/A
associate-*l*N/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f6498.8
Applied egg-rr98.8%
(FPCore (x) :precision binary64 (fma x (* (* x x) -0.3333333333333333) x))
double code(double x) {
return fma(x, ((x * x) * -0.3333333333333333), x);
}
function code(x) return fma(x, Float64(Float64(x * x) * -0.3333333333333333), x) end
code[x_] := N[(x * N[(N[(x * x), $MachinePrecision] * -0.3333333333333333), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(x, \left(x \cdot x\right) \cdot -0.3333333333333333, x\right)
\end{array}
Initial program 7.6%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-rgt-identityN/A
accelerator-lowering-fma.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6498.6
Simplified98.6%
Final simplification98.6%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 7.6%
Taylor expanded in x around 0
Simplified98.4%
herbie shell --seed 2024205
(FPCore (x)
:name "Hyperbolic tangent"
:precision binary64
(/ (- (exp x) (exp (- x))) (+ (exp x) (exp (- x)))))