
(FPCore (x) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x) {
return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x) {
return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x): return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x) return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0) end
function tmp = code(x) tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0; end
code[x_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 14 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))
double code(double x) {
return (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (2.0d0 / (1.0d0 + exp(((-2.0d0) * x)))) - 1.0d0
end function
public static double code(double x) {
return (2.0 / (1.0 + Math.exp((-2.0 * x)))) - 1.0;
}
def code(x): return (2.0 / (1.0 + math.exp((-2.0 * x)))) - 1.0
function code(x) return Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0) end
function tmp = code(x) tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0; end
code[x_] := N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{2}{1 + e^{-2 \cdot x}} - 1
\end{array}
(FPCore (x)
:precision binary64
(if (<= x -0.00095)
(- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0)
(if (<= x 0.00096)
(fma (* -0.3333333333333333 (* x x)) x x)
(expm1 (- (log 2.0) (log1p (pow (exp x) -2.0)))))))
double code(double x) {
double tmp;
if (x <= -0.00095) {
tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
} else if (x <= 0.00096) {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
} else {
tmp = expm1((log(2.0) - log1p(pow(exp(x), -2.0))));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -0.00095) tmp = Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0); elseif (x <= 0.00096) tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); else tmp = expm1(Float64(log(2.0) - log1p((exp(x) ^ -2.0)))); end return tmp end
code[x_] := If[LessEqual[x, -0.00095], N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], If[LessEqual[x, 0.00096], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[(Exp[N[(N[Log[2.0], $MachinePrecision] - N[Log[1 + N[Power[N[Exp[x], $MachinePrecision], -2.0], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]] - 1), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00095:\\
\;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\
\mathbf{elif}\;x \leq 0.00096:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{expm1}\left(\log 2 - \mathsf{log1p}\left({\left(e^{x}\right)}^{-2}\right)\right)\\
\end{array}
\end{array}
if x < -9.49999999999999998e-4Initial program 100.0%
if -9.49999999999999998e-4 < x < 9.60000000000000024e-4Initial program 7.5%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites100.0%
if 9.60000000000000024e-4 < x Initial program 99.9%
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(if (<= x -0.00095)
(- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0)
(if (<= x 0.001)
(fma (* -0.3333333333333333 (* x x)) x x)
(- (/ 2.0 (+ (pow (exp (* (- x) -2.0)) -1.0) 1.0)) 1.0))))
double code(double x) {
double tmp;
if (x <= -0.00095) {
tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
} else if (x <= 0.001) {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
} else {
tmp = (2.0 / (pow(exp((-x * -2.0)), -1.0) + 1.0)) - 1.0;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -0.00095) tmp = Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0); elseif (x <= 0.001) tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); else tmp = Float64(Float64(2.0 / Float64((exp(Float64(Float64(-x) * -2.0)) ^ -1.0) + 1.0)) - 1.0); end return tmp end
code[x_] := If[LessEqual[x, -0.00095], N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], If[LessEqual[x, 0.001], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[(N[(2.0 / N[(N[Power[N[Exp[N[((-x) * -2.0), $MachinePrecision]], $MachinePrecision], -1.0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00095:\\
\;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\
\mathbf{elif}\;x \leq 0.001:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{{\left(e^{\left(-x\right) \cdot -2}\right)}^{-1} + 1} - 1\\
\end{array}
\end{array}
if x < -9.49999999999999998e-4Initial program 100.0%
if -9.49999999999999998e-4 < x < 1e-3Initial program 7.5%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites100.0%
if 1e-3 < x Initial program 99.9%
lift-+.f64N/A
+-commutativeN/A
lower-+.f6499.9
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-pow.f64N/A
lift-exp.f64N/A
pow-expN/A
*-commutativeN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(if (<= x -1.55)
(- (/ 2.0 (* (+ x x) x)) 1.0)
(if (<= x 1.16)
(fma
(* (fma 0.13333333333333333 (* x x) -0.3333333333333333) (* x x))
x
x)
(- (/ 2.0 (+ (pow (fma (fma x 2.0 2.0) x 1.0) -1.0) 1.0)) 1.0))))
double code(double x) {
double tmp;
if (x <= -1.55) {
tmp = (2.0 / ((x + x) * x)) - 1.0;
} else if (x <= 1.16) {
tmp = fma((fma(0.13333333333333333, (x * x), -0.3333333333333333) * (x * x)), x, x);
} else {
tmp = (2.0 / (pow(fma(fma(x, 2.0, 2.0), x, 1.0), -1.0) + 1.0)) - 1.0;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.55) tmp = Float64(Float64(2.0 / Float64(Float64(x + x) * x)) - 1.0); elseif (x <= 1.16) tmp = fma(Float64(fma(0.13333333333333333, Float64(x * x), -0.3333333333333333) * Float64(x * x)), x, x); else tmp = Float64(Float64(2.0 / Float64((fma(fma(x, 2.0, 2.0), x, 1.0) ^ -1.0) + 1.0)) - 1.0); end return tmp end
code[x_] := If[LessEqual[x, -1.55], N[(N[(2.0 / N[(N[(x + x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], If[LessEqual[x, 1.16], N[(N[(N[(0.13333333333333333 * N[(x * x), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[(N[(2.0 / N[(N[Power[N[(N[(x * 2.0 + 2.0), $MachinePrecision] * x + 1.0), $MachinePrecision], -1.0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.55:\\
\;\;\;\;\frac{2}{\left(x + x\right) \cdot x} - 1\\
\mathbf{elif}\;x \leq 1.16:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.13333333333333333, x \cdot x, -0.3333333333333333\right) \cdot \left(x \cdot x\right), x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{{\left(\mathsf{fma}\left(\mathsf{fma}\left(x, 2, 2\right), x, 1\right)\right)}^{-1} + 1} - 1\\
\end{array}
\end{array}
if x < -1.55000000000000004Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
Applied rewrites100.0%
if -1.55000000000000004 < x < 1.15999999999999992Initial program 8.7%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.1
Applied rewrites99.1%
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
if 1.15999999999999992 < x Initial program 100.0%
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-pow.f64N/A
lift-exp.f64N/A
pow-expN/A
*-commutativeN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64100.0
Applied rewrites100.0%
Final simplification99.5%
(FPCore (x) :precision binary64 (if (<= (+ 1.0 (exp (* -2.0 x))) 5.0) (fma (* (fma 0.13333333333333333 (* x x) -0.3333333333333333) (* x x)) x x) (- (/ 2.0 (* (+ x x) x)) 1.0)))
double code(double x) {
double tmp;
if ((1.0 + exp((-2.0 * x))) <= 5.0) {
tmp = fma((fma(0.13333333333333333, (x * x), -0.3333333333333333) * (x * x)), x, x);
} else {
tmp = (2.0 / ((x + x) * x)) - 1.0;
}
return tmp;
}
function code(x) tmp = 0.0 if (Float64(1.0 + exp(Float64(-2.0 * x))) <= 5.0) tmp = fma(Float64(fma(0.13333333333333333, Float64(x * x), -0.3333333333333333) * Float64(x * x)), x, x); else tmp = Float64(Float64(2.0 / Float64(Float64(x + x) * x)) - 1.0); end return tmp end
code[x_] := If[LessEqual[N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision], 5.0], N[(N[(N[(0.13333333333333333 * N[(x * x), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[(N[(2.0 / N[(N[(x + x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;1 + e^{-2 \cdot x} \leq 5:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.13333333333333333, x \cdot x, -0.3333333333333333\right) \cdot \left(x \cdot x\right), x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{\left(x + x\right) \cdot x} - 1\\
\end{array}
\end{array}
if (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))) < 5Initial program 33.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6473.2
Applied rewrites73.2%
Applied rewrites73.2%
Taylor expanded in x around 0
Applied rewrites73.2%
if 5 < (+.f64 #s(literal 1 binary64) (exp.f64 (*.f64 #s(literal -2 binary64) x))) Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(if (<= x -1.55)
(- (/ 2.0 (* (+ x x) x)) 1.0)
(if (<= x 1.5)
(fma
(* (fma 0.13333333333333333 (* x x) -0.3333333333333333) (* x x))
x
x)
(- (/ 2.0 (+ (pow (fma x 2.0 1.0) -1.0) 1.0)) 1.0))))
double code(double x) {
double tmp;
if (x <= -1.55) {
tmp = (2.0 / ((x + x) * x)) - 1.0;
} else if (x <= 1.5) {
tmp = fma((fma(0.13333333333333333, (x * x), -0.3333333333333333) * (x * x)), x, x);
} else {
tmp = (2.0 / (pow(fma(x, 2.0, 1.0), -1.0) + 1.0)) - 1.0;
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.55) tmp = Float64(Float64(2.0 / Float64(Float64(x + x) * x)) - 1.0); elseif (x <= 1.5) tmp = fma(Float64(fma(0.13333333333333333, Float64(x * x), -0.3333333333333333) * Float64(x * x)), x, x); else tmp = Float64(Float64(2.0 / Float64((fma(x, 2.0, 1.0) ^ -1.0) + 1.0)) - 1.0); end return tmp end
code[x_] := If[LessEqual[x, -1.55], N[(N[(2.0 / N[(N[(x + x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], If[LessEqual[x, 1.5], N[(N[(N[(0.13333333333333333 * N[(x * x), $MachinePrecision] + -0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision], N[(N[(2.0 / N[(N[Power[N[(x * 2.0 + 1.0), $MachinePrecision], -1.0], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.55:\\
\;\;\;\;\frac{2}{\left(x + x\right) \cdot x} - 1\\
\mathbf{elif}\;x \leq 1.5:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(0.13333333333333333, x \cdot x, -0.3333333333333333\right) \cdot \left(x \cdot x\right), x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{2}{{\left(\mathsf{fma}\left(x, 2, 1\right)\right)}^{-1} + 1} - 1\\
\end{array}
\end{array}
if x < -1.55000000000000004Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
Applied rewrites100.0%
if -1.55000000000000004 < x < 1.5Initial program 8.7%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.1
Applied rewrites99.1%
Applied rewrites99.1%
Taylor expanded in x around 0
Applied rewrites99.1%
if 1.5 < x Initial program 100.0%
lift-+.f64N/A
+-commutativeN/A
lower-+.f64100.0
lift-exp.f64N/A
lift-*.f64N/A
*-commutativeN/A
exp-prodN/A
lower-pow.f64N/A
lower-exp.f64100.0
Applied rewrites100.0%
lift-pow.f64N/A
lift-exp.f64N/A
pow-expN/A
*-commutativeN/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
lower-/.f64N/A
lower-exp.f64N/A
lower-neg.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f6499.5
Applied rewrites99.5%
Final simplification99.4%
(FPCore (x) :precision binary64 (if (or (<= x -0.00095) (not (<= x 0.001))) (- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0) (fma (* -0.3333333333333333 (* x x)) x x)))
double code(double x) {
double tmp;
if ((x <= -0.00095) || !(x <= 0.001)) {
tmp = (2.0 / (1.0 + exp((-2.0 * x)))) - 1.0;
} else {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
}
return tmp;
}
function code(x) tmp = 0.0 if ((x <= -0.00095) || !(x <= 0.001)) tmp = Float64(Float64(2.0 / Float64(1.0 + exp(Float64(-2.0 * x)))) - 1.0); else tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); end return tmp end
code[x_] := If[Or[LessEqual[x, -0.00095], N[Not[LessEqual[x, 0.001]], $MachinePrecision]], N[(N[(2.0 / N[(1.0 + N[Exp[N[(-2.0 * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -0.00095 \lor \neg \left(x \leq 0.001\right):\\
\;\;\;\;\frac{2}{1 + e^{-2 \cdot x}} - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\end{array}
\end{array}
if x < -9.49999999999999998e-4 or 1e-3 < x Initial program 100.0%
if -9.49999999999999998e-4 < x < 1e-3Initial program 7.5%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64100.0
Applied rewrites100.0%
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (if (<= x -1.4) (- (/ 2.0 (* (+ x x) x)) 1.0) (fma (* -0.3333333333333333 (* x x)) x x)))
double code(double x) {
double tmp;
if (x <= -1.4) {
tmp = (2.0 / ((x + x) * x)) - 1.0;
} else {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.4) tmp = Float64(Float64(2.0 / Float64(Float64(x + x) * x)) - 1.0); else tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); end return tmp end
code[x_] := If[LessEqual[x, -1.4], N[(N[(2.0 / N[(N[(x + x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.4:\\
\;\;\;\;\frac{2}{\left(x + x\right) \cdot x} - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\end{array}
\end{array}
if x < -1.3999999999999999Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around 0
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around inf
Applied rewrites100.0%
Applied rewrites100.0%
if -1.3999999999999999 < x Initial program 33.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6473.2
Applied rewrites73.2%
Applied rewrites73.2%
Taylor expanded in x around 0
Applied rewrites72.4%
(FPCore (x) :precision binary64 (if (<= x -1.4) (- (/ 2.0 (* (- x) -2.0)) 1.0) (fma (* -0.3333333333333333 (* x x)) x x)))
double code(double x) {
double tmp;
if (x <= -1.4) {
tmp = (2.0 / (-x * -2.0)) - 1.0;
} else {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.4) tmp = Float64(Float64(2.0 / Float64(Float64(-x) * -2.0)) - 1.0); else tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); end return tmp end
code[x_] := If[LessEqual[x, -1.4], N[(N[(2.0 / N[((-x) * -2.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.4:\\
\;\;\;\;\frac{2}{\left(-x\right) \cdot -2} - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\end{array}
\end{array}
if x < -1.3999999999999999Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around inf
Applied rewrites98.5%
Applied rewrites98.5%
if -1.3999999999999999 < x Initial program 33.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6473.2
Applied rewrites73.2%
Applied rewrites73.2%
Taylor expanded in x around 0
Applied rewrites72.4%
(FPCore (x) :precision binary64 (if (<= x -1.3) (- (/ 2.0 (fma -2.0 x 2.0)) 1.0) (fma (* -0.3333333333333333 (* x x)) x x)))
double code(double x) {
double tmp;
if (x <= -1.3) {
tmp = (2.0 / fma(-2.0, x, 2.0)) - 1.0;
} else {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.3) tmp = Float64(Float64(2.0 / fma(-2.0, x, 2.0)) - 1.0); else tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); end return tmp end
code[x_] := If[LessEqual[x, -1.3], N[(N[(2.0 / N[(-2.0 * x + 2.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.3:\\
\;\;\;\;\frac{2}{\mathsf{fma}\left(-2, x, 2\right)} - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\end{array}
\end{array}
if x < -1.30000000000000004Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
if -1.30000000000000004 < x Initial program 33.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6473.2
Applied rewrites73.2%
Applied rewrites73.2%
Taylor expanded in x around 0
Applied rewrites72.4%
(FPCore (x) :precision binary64 (if (<= x -1.55) (- (/ 2.0 (* x -2.0)) 1.0) (fma (* -0.3333333333333333 (* x x)) x x)))
double code(double x) {
double tmp;
if (x <= -1.55) {
tmp = (2.0 / (x * -2.0)) - 1.0;
} else {
tmp = fma((-0.3333333333333333 * (x * x)), x, x);
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -1.55) tmp = Float64(Float64(2.0 / Float64(x * -2.0)) - 1.0); else tmp = fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x); end return tmp end
code[x_] := If[LessEqual[x, -1.55], N[(N[(2.0 / N[(x * -2.0), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision], N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1.55:\\
\;\;\;\;\frac{2}{x \cdot -2} - 1\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)\\
\end{array}
\end{array}
if x < -1.55000000000000004Initial program 100.0%
Taylor expanded in x around 0
+-commutativeN/A
lower-fma.f6498.5
Applied rewrites98.5%
Taylor expanded in x around inf
Applied rewrites98.5%
if -1.55000000000000004 < x Initial program 33.4%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6473.2
Applied rewrites73.2%
Applied rewrites73.2%
Taylor expanded in x around 0
Applied rewrites72.4%
(FPCore (x) :precision binary64 (fma (* -0.3333333333333333 (* x x)) x x))
double code(double x) {
return fma((-0.3333333333333333 * (x * x)), x, x);
}
function code(x) return fma(Float64(-0.3333333333333333 * Float64(x * x)), x, x) end
code[x_] := N[(N[(-0.3333333333333333 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(-0.3333333333333333 \cdot \left(x \cdot x\right), x, x\right)
\end{array}
Initial program 47.2%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
associate-*r*N/A
*-rgt-identityN/A
lower-fma.f64N/A
*-commutativeN/A
pow-plusN/A
lower-pow.f64N/A
metadata-evalN/A
lower--.f64N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6458.8
Applied rewrites58.8%
Applied rewrites58.8%
Taylor expanded in x around 0
Applied rewrites57.6%
(FPCore (x) :precision binary64 (- (+ 1.0 x) 1.0))
double code(double x) {
return (1.0 + x) - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = (1.0d0 + x) - 1.0d0
end function
public static double code(double x) {
return (1.0 + x) - 1.0;
}
def code(x): return (1.0 + x) - 1.0
function code(x) return Float64(Float64(1.0 + x) - 1.0) end
function tmp = code(x) tmp = (1.0 + x) - 1.0; end
code[x_] := N[(N[(1.0 + x), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l}
\\
\left(1 + x\right) - 1
\end{array}
Initial program 47.2%
Taylor expanded in x around 0
lower-+.f646.5
Applied rewrites6.5%
(FPCore (x) :precision binary64 (- x 1.0))
double code(double x) {
return x - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = x - 1.0d0
end function
public static double code(double x) {
return x - 1.0;
}
def code(x): return x - 1.0
function code(x) return Float64(x - 1.0) end
function tmp = code(x) tmp = x - 1.0; end
code[x_] := N[(x - 1.0), $MachinePrecision]
\begin{array}{l}
\\
x - 1
\end{array}
Initial program 47.2%
Taylor expanded in x around 0
Applied rewrites4.4%
Taylor expanded in x around 0
+-commutativeN/A
lower-+.f646.5
Applied rewrites6.5%
Applied rewrites3.4%
Taylor expanded in x around -inf
Applied rewrites4.4%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) {
return 1.0 - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 1.0d0 - 1.0d0
end function
public static double code(double x) {
return 1.0 - 1.0;
}
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l}
\\
1 - 1
\end{array}
Initial program 47.2%
Taylor expanded in x around 0
Applied rewrites4.4%
herbie shell --seed 2024351
(FPCore (x)
:name "Logistic function from Lakshay Garg"
:precision binary64
(- (/ 2.0 (+ 1.0 (exp (* -2.0 x)))) 1.0))