
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (/ (exp x) (- (exp x) 1.0)))
double code(double x) {
return exp(x) / (exp(x) - 1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = exp(x) / (exp(x) - 1.0d0)
end function
public static double code(double x) {
return Math.exp(x) / (Math.exp(x) - 1.0);
}
def code(x): return math.exp(x) / (math.exp(x) - 1.0)
function code(x) return Float64(exp(x) / Float64(exp(x) - 1.0)) end
function tmp = code(x) tmp = exp(x) / (exp(x) - 1.0); end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{e^{x} - 1}
\end{array}
(FPCore (x) :precision binary64 (/ (exp x) (expm1 x)))
double code(double x) {
return exp(x) / expm1(x);
}
public static double code(double x) {
return Math.exp(x) / Math.expm1(x);
}
def code(x): return math.exp(x) / math.expm1(x)
function code(x) return Float64(exp(x) / expm1(x)) end
code[x_] := N[(N[Exp[x], $MachinePrecision] / N[(Exp[x] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{x}}{\mathsf{expm1}\left(x\right)}
\end{array}
Initial program 35.9%
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (if (<= (exp x) 1e-23) (pow (* (* (fma 0.16666666666666666 x -0.5) x) x) -1.0) (fma 0.08333333333333333 x (- (pow x -1.0) -0.5))))
double code(double x) {
double tmp;
if (exp(x) <= 1e-23) {
tmp = pow(((fma(0.16666666666666666, x, -0.5) * x) * x), -1.0);
} else {
tmp = fma(0.08333333333333333, x, (pow(x, -1.0) - -0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (exp(x) <= 1e-23) tmp = Float64(Float64(fma(0.16666666666666666, x, -0.5) * x) * x) ^ -1.0; else tmp = fma(0.08333333333333333, x, Float64((x ^ -1.0) - -0.5)); end return tmp end
code[x_] := If[LessEqual[N[Exp[x], $MachinePrecision], 1e-23], N[Power[N[(N[(N[(0.16666666666666666 * x + -0.5), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision], N[(0.08333333333333333 * x + N[(N[Power[x, -1.0], $MachinePrecision] - -0.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} \leq 10^{-23}:\\
\;\;\;\;{\left(\left(\mathsf{fma}\left(0.16666666666666666, x, -0.5\right) \cdot x\right) \cdot x\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.08333333333333333, x, {x}^{-1} - -0.5\right)\\
\end{array}
\end{array}
if (exp.f64 x) < 9.9999999999999996e-24Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f6474.9
Applied rewrites74.9%
Taylor expanded in x around inf
Applied rewrites74.9%
if 9.9999999999999996e-24 < (exp.f64 x) Initial program 5.7%
Taylor expanded in x around 0
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
div-addN/A
*-commutativeN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
fp-cancel-sign-sub-invN/A
div-subN/A
associate-*r/N/A
distribute-lft-neg-outN/A
associate-/l*N/A
*-commutativeN/A
*-commutativeN/A
*-rgt-identityN/A
times-fracN/A
*-inversesN/A
metadata-evalN/A
metadata-evalN/A
Applied rewrites99.4%
Final simplification91.6%
(FPCore (x) :precision binary64 (if (<= (exp x) 1e-23) (pow (* (* (* 0.16666666666666666 x) x) x) -1.0) (fma 0.08333333333333333 x (- (pow x -1.0) -0.5))))
double code(double x) {
double tmp;
if (exp(x) <= 1e-23) {
tmp = pow((((0.16666666666666666 * x) * x) * x), -1.0);
} else {
tmp = fma(0.08333333333333333, x, (pow(x, -1.0) - -0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (exp(x) <= 1e-23) tmp = Float64(Float64(Float64(0.16666666666666666 * x) * x) * x) ^ -1.0; else tmp = fma(0.08333333333333333, x, Float64((x ^ -1.0) - -0.5)); end return tmp end
code[x_] := If[LessEqual[N[Exp[x], $MachinePrecision], 1e-23], N[Power[N[(N[(N[(0.16666666666666666 * x), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision], N[(0.08333333333333333 * x + N[(N[Power[x, -1.0], $MachinePrecision] - -0.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;e^{x} \leq 10^{-23}:\\
\;\;\;\;{\left(\left(\left(0.16666666666666666 \cdot x\right) \cdot x\right) \cdot x\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.08333333333333333, x, {x}^{-1} - -0.5\right)\\
\end{array}
\end{array}
if (exp.f64 x) < 9.9999999999999996e-24Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
lower-*.f6474.9
Applied rewrites74.9%
Taylor expanded in x around inf
Applied rewrites74.9%
if 9.9999999999999996e-24 < (exp.f64 x) Initial program 5.7%
Taylor expanded in x around 0
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
div-addN/A
*-commutativeN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
fp-cancel-sign-sub-invN/A
div-subN/A
associate-*r/N/A
distribute-lft-neg-outN/A
associate-/l*N/A
*-commutativeN/A
*-commutativeN/A
*-rgt-identityN/A
times-fracN/A
*-inversesN/A
metadata-evalN/A
metadata-evalN/A
Applied rewrites99.4%
Final simplification91.6%
(FPCore (x)
:precision binary64
(if (<= x -3.7)
(/ (exp x) (- (+ 1.0 x) 1.0))
(fma
(fma (* x x) -0.001388888888888889 0.08333333333333333)
x
(- (pow x -1.0) -0.5))))
double code(double x) {
double tmp;
if (x <= -3.7) {
tmp = exp(x) / ((1.0 + x) - 1.0);
} else {
tmp = fma(fma((x * x), -0.001388888888888889, 0.08333333333333333), x, (pow(x, -1.0) - -0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -3.7) tmp = Float64(exp(x) / Float64(Float64(1.0 + x) - 1.0)); else tmp = fma(fma(Float64(x * x), -0.001388888888888889, 0.08333333333333333), x, Float64((x ^ -1.0) - -0.5)); end return tmp end
code[x_] := If[LessEqual[x, -3.7], N[(N[Exp[x], $MachinePrecision] / N[(N[(1.0 + x), $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision], N[(N[(N[(x * x), $MachinePrecision] * -0.001388888888888889 + 0.08333333333333333), $MachinePrecision] * x + N[(N[Power[x, -1.0], $MachinePrecision] - -0.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -3.7:\\
\;\;\;\;\frac{e^{x}}{\left(1 + x\right) - 1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, -0.001388888888888889, 0.08333333333333333\right), x, {x}^{-1} - -0.5\right)\\
\end{array}
\end{array}
if x < -3.7000000000000002Initial program 100.0%
Taylor expanded in x around 0
lower-+.f6497.9
Applied rewrites97.9%
if -3.7000000000000002 < x Initial program 5.7%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f645.7
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.8%
Final simplification99.2%
(FPCore (x) :precision binary64 (pow (* (fma (- (* (fma -0.041666666666666664 x 0.16666666666666666) x) 0.5) x 1.0) x) -1.0))
double code(double x) {
return pow((fma(((fma(-0.041666666666666664, x, 0.16666666666666666) * x) - 0.5), x, 1.0) * x), -1.0);
}
function code(x) return Float64(fma(Float64(Float64(fma(-0.041666666666666664, x, 0.16666666666666666) * x) - 0.5), x, 1.0) * x) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(N[(-0.041666666666666664 * x + 0.16666666666666666), $MachinePrecision] * x), $MachinePrecision] - 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.041666666666666664, x, 0.16666666666666666\right) \cdot x - 0.5, x, 1\right) \cdot x\right)}^{-1}
\end{array}
Initial program 35.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6435.9
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6493.7
Applied rewrites93.7%
Final simplification93.7%
(FPCore (x) :precision binary64 (pow (* (fma (- (* (* -0.041666666666666664 x) x) 0.5) x 1.0) x) -1.0))
double code(double x) {
return pow((fma((((-0.041666666666666664 * x) * x) - 0.5), x, 1.0) * x), -1.0);
}
function code(x) return Float64(fma(Float64(Float64(Float64(-0.041666666666666664 * x) * x) - 0.5), x, 1.0) * x) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(N[(-0.041666666666666664 * x), $MachinePrecision] * x), $MachinePrecision] - 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\left(-0.041666666666666664 \cdot x\right) \cdot x - 0.5, x, 1\right) \cdot x\right)}^{-1}
\end{array}
Initial program 35.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6435.9
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6493.7
Applied rewrites93.7%
Taylor expanded in x around inf
Applied rewrites93.3%
Final simplification93.3%
(FPCore (x) :precision binary64 (pow (* (fma (* (* -0.041666666666666664 x) x) x 1.0) x) -1.0))
double code(double x) {
return pow((fma(((-0.041666666666666664 * x) * x), x, 1.0) * x), -1.0);
}
function code(x) return Float64(fma(Float64(Float64(-0.041666666666666664 * x) * x), x, 1.0) * x) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(N[(-0.041666666666666664 * x), $MachinePrecision] * x), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\left(-0.041666666666666664 \cdot x\right) \cdot x, x, 1\right) \cdot x\right)}^{-1}
\end{array}
Initial program 35.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6435.9
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6493.7
Applied rewrites93.7%
Taylor expanded in x around inf
Applied rewrites92.8%
Final simplification92.8%
(FPCore (x) :precision binary64 (pow (* (fma (fma 0.16666666666666666 x -0.5) x 1.0) x) -1.0))
double code(double x) {
return pow((fma(fma(0.16666666666666666, x, -0.5), x, 1.0) * x), -1.0);
}
function code(x) return Float64(fma(fma(0.16666666666666666, x, -0.5), x, 1.0) * x) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(0.16666666666666666 * x + -0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, -0.5\right), x, 1\right) \cdot x\right)}^{-1}
\end{array}
Initial program 35.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6435.9
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
fp-cancel-sign-sub-invN/A
fp-cancel-sub-sign-invN/A
+-commutativeN/A
remove-double-negN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6493.7
Applied rewrites93.7%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
Applied rewrites91.3%
Final simplification91.3%
(FPCore (x) :precision binary64 (if (<= x -4.6) (pow (* (* -0.5 x) x) -1.0) (fma 0.08333333333333333 x (- (pow x -1.0) -0.5))))
double code(double x) {
double tmp;
if (x <= -4.6) {
tmp = pow(((-0.5 * x) * x), -1.0);
} else {
tmp = fma(0.08333333333333333, x, (pow(x, -1.0) - -0.5));
}
return tmp;
}
function code(x) tmp = 0.0 if (x <= -4.6) tmp = Float64(Float64(-0.5 * x) * x) ^ -1.0; else tmp = fma(0.08333333333333333, x, Float64((x ^ -1.0) - -0.5)); end return tmp end
code[x_] := If[LessEqual[x, -4.6], N[Power[N[(N[(-0.5 * x), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision], N[(0.08333333333333333 * x + N[(N[Power[x, -1.0], $MachinePrecision] - -0.5), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -4.6:\\
\;\;\;\;{\left(\left(-0.5 \cdot x\right) \cdot x\right)}^{-1}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(0.08333333333333333, x, {x}^{-1} - -0.5\right)\\
\end{array}
\end{array}
if x < -4.5999999999999996Initial program 100.0%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f64100.0
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6454.1
Applied rewrites54.1%
Taylor expanded in x around inf
Applied rewrites54.1%
if -4.5999999999999996 < x Initial program 5.7%
Taylor expanded in x around 0
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
div-addN/A
*-commutativeN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
fp-cancel-sign-sub-invN/A
div-subN/A
associate-*r/N/A
distribute-lft-neg-outN/A
associate-/l*N/A
*-commutativeN/A
*-commutativeN/A
*-rgt-identityN/A
times-fracN/A
*-inversesN/A
metadata-evalN/A
metadata-evalN/A
Applied rewrites99.4%
Final simplification84.9%
(FPCore (x) :precision binary64 (pow (* (fma -0.5 x 1.0) x) -1.0))
double code(double x) {
return pow((fma(-0.5, x, 1.0) * x), -1.0);
}
function code(x) return Float64(fma(-0.5, x, 1.0) * x) ^ -1.0 end
code[x_] := N[Power[N[(N[(-0.5 * x + 1.0), $MachinePrecision] * x), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l}
\\
{\left(\mathsf{fma}\left(-0.5, x, 1\right) \cdot x\right)}^{-1}
\end{array}
Initial program 35.9%
lift-/.f64N/A
lift-exp.f64N/A
sinh-+-cosh-revN/A
flip-+N/A
sinh-coshN/A
sinh---cosh-revN/A
associate-/l/N/A
lower-/.f64N/A
lower-*.f64N/A
lower-exp.f64N/A
lower-neg.f6435.9
lift--.f64N/A
unpow1N/A
metadata-evalN/A
sqrt-pow1N/A
pow2N/A
rem-sqrt-square-revN/A
rem-sqrt-square-revN/A
pow2N/A
sqrt-pow1N/A
metadata-evalN/A
unpow1N/A
lift-exp.f64N/A
lower-expm1.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-fma.f6484.4
Applied rewrites84.4%
Final simplification84.4%
(FPCore (x) :precision binary64 (pow x -1.0))
double code(double x) {
return pow(x, -1.0);
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = x ** (-1.0d0)
end function
public static double code(double x) {
return Math.pow(x, -1.0);
}
def code(x): return math.pow(x, -1.0)
function code(x) return x ^ -1.0 end
function tmp = code(x) tmp = x ^ -1.0; end
code[x_] := N[Power[x, -1.0], $MachinePrecision]
\begin{array}{l}
\\
{x}^{-1}
\end{array}
Initial program 35.9%
Taylor expanded in x around 0
lower-/.f6468.4
Applied rewrites68.4%
Final simplification68.4%
(FPCore (x) :precision binary64 (* 0.08333333333333333 x))
double code(double x) {
return 0.08333333333333333 * x;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 0.08333333333333333d0 * x
end function
public static double code(double x) {
return 0.08333333333333333 * x;
}
def code(x): return 0.08333333333333333 * x
function code(x) return Float64(0.08333333333333333 * x) end
function tmp = code(x) tmp = 0.08333333333333333 * x; end
code[x_] := N[(0.08333333333333333 * x), $MachinePrecision]
\begin{array}{l}
\\
0.08333333333333333 \cdot x
\end{array}
Initial program 35.9%
Taylor expanded in x around 0
distribute-lft-inN/A
*-commutativeN/A
associate-+r+N/A
div-addN/A
*-commutativeN/A
associate-/l*N/A
*-inversesN/A
*-rgt-identityN/A
+-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
fp-cancel-sign-sub-invN/A
div-subN/A
associate-*r/N/A
distribute-lft-neg-outN/A
associate-/l*N/A
*-commutativeN/A
*-commutativeN/A
*-rgt-identityN/A
times-fracN/A
*-inversesN/A
metadata-evalN/A
metadata-evalN/A
Applied rewrites68.3%
Taylor expanded in x around inf
Applied rewrites3.3%
(FPCore (x) :precision binary64 0.5)
double code(double x) {
return 0.5;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x)
use fmin_fmax_functions
real(8), intent (in) :: x
code = 0.5d0
end function
public static double code(double x) {
return 0.5;
}
def code(x): return 0.5
function code(x) return 0.5 end
function tmp = code(x) tmp = 0.5; end
code[x_] := 0.5
\begin{array}{l}
\\
0.5
\end{array}
Initial program 35.9%
Taylor expanded in x around 0
*-commutativeN/A
fp-cancel-sign-sub-invN/A
div-subN/A
associate-*r/N/A
distribute-lft-neg-outN/A
associate-/l*N/A
*-commutativeN/A
*-commutativeN/A
*-rgt-identityN/A
times-fracN/A
*-inversesN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
lower-/.f64N/A
metadata-eval68.1
Applied rewrites68.1%
Taylor expanded in x around inf
Applied rewrites3.3%
(FPCore (x) :precision binary64 (/ (- 1.0) (expm1 (- x))))
double code(double x) {
return -1.0 / expm1(-x);
}
public static double code(double x) {
return -1.0 / Math.expm1(-x);
}
def code(x): return -1.0 / math.expm1(-x)
function code(x) return Float64(Float64(-1.0) / expm1(Float64(-x))) end
code[x_] := N[((-1.0) / N[(Exp[(-x)] - 1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{\mathsf{expm1}\left(-x\right)}
\end{array}
herbie shell --seed 2024351
(FPCore (x)
:name "expq2 (section 3.11)"
:precision binary64
:pre (> 710.0 x)
:alt
(! :herbie-platform default (/ (- 1) (expm1 (- x))))
(/ (exp x) (- (exp x) 1.0)))