
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 15 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(if (<= wj -3.8e-6)
(- wj (/ (/ (- (* (exp wj) wj) x) (- wj -1.0)) (exp wj)))
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -3.8e-6) {
tmp = wj - ((((exp(wj) * wj) - x) / (wj - -1.0)) / exp(wj));
} else {
tmp = fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -3.8e-6) tmp = Float64(wj - Float64(Float64(Float64(Float64(exp(wj) * wj) - x) / Float64(wj - -1.0)) / exp(wj))); else tmp = fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -3.8e-6], N[(wj - N[(N[(N[(N[(N[Exp[wj], $MachinePrecision] * wj), $MachinePrecision] - x), $MachinePrecision] / N[(wj - -1.0), $MachinePrecision]), $MachinePrecision] / N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.8 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{\frac{e^{wj} \cdot wj - x}{wj - -1}}{e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -3.8e-6Initial program 55.4%
lift-/.f64N/A
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
metadata-eval100.0
Applied rewrites100.0%
if -3.8e-6 < wj Initial program 82.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.4%
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj))) (t_1 (- wj (/ (- t_0 x) (+ (exp wj) t_0))))) (if (or (<= t_1 -1e-248) (not (<= t_1 0.0))) (- wj (- x)) (* wj wj))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double t_1 = wj - ((t_0 - x) / (exp(wj) + t_0));
double tmp;
if ((t_1 <= -1e-248) || !(t_1 <= 0.0)) {
tmp = wj - -x;
} else {
tmp = wj * wj;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = wj * exp(wj)
t_1 = wj - ((t_0 - x) / (exp(wj) + t_0))
if ((t_1 <= (-1d-248)) .or. (.not. (t_1 <= 0.0d0))) then
tmp = wj - -x
else
tmp = wj * wj
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double t_1 = wj - ((t_0 - x) / (Math.exp(wj) + t_0));
double tmp;
if ((t_1 <= -1e-248) || !(t_1 <= 0.0)) {
tmp = wj - -x;
} else {
tmp = wj * wj;
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) t_1 = wj - ((t_0 - x) / (math.exp(wj) + t_0)) tmp = 0 if (t_1 <= -1e-248) or not (t_1 <= 0.0): tmp = wj - -x else: tmp = wj * wj return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) t_1 = Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) tmp = 0.0 if ((t_1 <= -1e-248) || !(t_1 <= 0.0)) tmp = Float64(wj - Float64(-x)); else tmp = Float64(wj * wj); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); t_1 = wj - ((t_0 - x) / (exp(wj) + t_0)); tmp = 0.0; if ((t_1 <= -1e-248) || ~((t_1 <= 0.0))) tmp = wj - -x; else tmp = wj * wj; end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -1e-248], N[Not[LessEqual[t$95$1, 0.0]], $MachinePrecision]], N[(wj - (-x)), $MachinePrecision], N[(wj * wj), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
t_1 := wj - \frac{t\_0 - x}{e^{wj} + t\_0}\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{-248} \lor \neg \left(t\_1 \leq 0\right):\\
\;\;\;\;wj - \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;wj \cdot wj\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -9.9999999999999998e-249 or 0.0 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 96.0%
Taylor expanded in wj around 0
mul-1-negN/A
lower-neg.f6490.2
Applied rewrites90.2%
if -9.9999999999999998e-249 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 0.0Initial program 5.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites59.0%
Final simplification85.4%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (- wj (/ (- t_0 x) (+ (exp wj) t_0))) -2e-154)
(- wj 1.0)
(* wj wj))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= -2e-154) {
tmp = wj - 1.0;
} else {
tmp = wj * wj;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
real(8) :: tmp
t_0 = wj * exp(wj)
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= (-2d-154)) then
tmp = wj - 1.0d0
else
tmp = wj * wj
end if
code = tmp
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (Math.exp(wj) + t_0))) <= -2e-154) {
tmp = wj - 1.0;
} else {
tmp = wj * wj;
}
return tmp;
}
def code(wj, x): t_0 = wj * math.exp(wj) tmp = 0 if (wj - ((t_0 - x) / (math.exp(wj) + t_0))) <= -2e-154: tmp = wj - 1.0 else: tmp = wj * wj return tmp
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) <= -2e-154) tmp = Float64(wj - 1.0); else tmp = Float64(wj * wj); end return tmp end
function tmp_2 = code(wj, x) t_0 = wj * exp(wj); tmp = 0.0; if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= -2e-154) tmp = wj - 1.0; else tmp = wj * wj; end tmp_2 = tmp; end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -2e-154], N[(wj - 1.0), $MachinePrecision], N[(wj * wj), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj - \frac{t\_0 - x}{e^{wj} + t\_0} \leq -2 \cdot 10^{-154}:\\
\;\;\;\;wj - 1\\
\mathbf{else}:\\
\;\;\;\;wj \cdot wj\\
\end{array}
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < -1.9999999999999999e-154Initial program 99.0%
Taylor expanded in x around inf
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6496.7
Applied rewrites96.7%
Taylor expanded in wj around inf
Applied rewrites6.4%
if -1.9999999999999999e-154 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 70.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f6495.9
Applied rewrites95.9%
Taylor expanded in x around 0
Applied rewrites18.7%
(FPCore (wj x)
:precision binary64
(if (<= wj -4.2e-6)
(- wj (/ (- (* wj (exp wj)) x) (* (- wj -1.0) (exp wj))))
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -4.2e-6) {
tmp = wj - (((wj * exp(wj)) - x) / ((wj - -1.0) * exp(wj)));
} else {
tmp = fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -4.2e-6) tmp = Float64(wj - Float64(Float64(Float64(wj * exp(wj)) - x) / Float64(Float64(wj - -1.0) * exp(wj)))); else tmp = fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -4.2e-6], N[(wj - N[(N[(N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision] - x), $MachinePrecision] / N[(N[(wj - -1.0), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -4.2 \cdot 10^{-6}:\\
\;\;\;\;wj - \frac{wj \cdot e^{wj} - x}{\left(wj - -1\right) \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -4.1999999999999996e-6Initial program 55.4%
lift-+.f64N/A
lift-*.f64N/A
distribute-rgt1-inN/A
lower-*.f64N/A
metadata-evalN/A
fp-cancel-sign-sub-invN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
lower--.f64N/A
metadata-eval99.8
Applied rewrites99.8%
if -4.1999999999999996e-6 < wj Initial program 82.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.4%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.00225)
(- wj (/ (/ x (+ 1.0 wj)) (- (exp wj))))
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.00225) {
tmp = wj - ((x / (1.0 + wj)) / -exp(wj));
} else {
tmp = fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.00225) tmp = Float64(wj - Float64(Float64(x / Float64(1.0 + wj)) / Float64(-exp(wj)))); else tmp = fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.00225], N[(wj - N[(N[(x / N[(1.0 + wj), $MachinePrecision]), $MachinePrecision] / (-N[Exp[wj], $MachinePrecision])), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.00225:\\
\;\;\;\;wj - \frac{\frac{x}{1 + wj}}{-e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -0.00224999999999999983Initial program 55.4%
Taylor expanded in x around inf
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6473.6
Applied rewrites73.6%
if -0.00224999999999999983 < wj Initial program 82.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.4%
(FPCore (wj x)
:precision binary64
(if (<= wj -0.00225)
(- wj (/ x (* (- -1.0 wj) (exp wj))))
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -0.00225) {
tmp = wj - (x / ((-1.0 - wj) * exp(wj)));
} else {
tmp = fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -0.00225) tmp = Float64(wj - Float64(x / Float64(Float64(-1.0 - wj) * exp(wj)))); else tmp = fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -0.00225], N[(wj - N[(x / N[(N[(-1.0 - wj), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -0.00225:\\
\;\;\;\;wj - \frac{x}{\left(-1 - wj\right) \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -0.00224999999999999983Initial program 55.4%
Taylor expanded in x around inf
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6473.6
Applied rewrites73.6%
Applied rewrites73.6%
if -0.00224999999999999983 < wj Initial program 82.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.4%
Final simplification97.5%
(FPCore (wj x)
:precision binary64
(if (<= wj -1.0)
(- wj (/ x (* (- wj) (exp wj))))
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x)))
double code(double wj, double x) {
double tmp;
if (wj <= -1.0) {
tmp = wj - (x / (-wj * exp(wj)));
} else {
tmp = fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -1.0) tmp = Float64(wj - Float64(x / Float64(Float64(-wj) * exp(wj)))); else tmp = fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -1.0], N[(wj - N[(x / N[((-wj) * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -1:\\
\;\;\;\;wj - \frac{x}{\left(-wj\right) \cdot e^{wj}}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\end{array}
\end{array}
if wj < -1Initial program 49.8%
Taylor expanded in x around inf
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6470.3
Applied rewrites70.3%
Applied rewrites70.3%
Taylor expanded in wj around inf
Applied rewrites62.8%
if -1 < wj Initial program 82.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.1%
(FPCore (wj x)
:precision binary64
(fma
(fma
(-
(- 1.0 (fma (fma -3.0 x (fma 0.6666666666666666 x (* x 5.0))) wj wj))
(* -2.5 x))
wj
(* -2.0 x))
wj
x))
double code(double wj, double x) {
return fma(fma(((1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, (x * 5.0))), wj, wj)) - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
function code(wj, x) return fma(fma(Float64(Float64(1.0 - fma(fma(-3.0, x, fma(0.6666666666666666, x, Float64(x * 5.0))), wj, wj)) - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x) end
code[wj_, x_] := N[(N[(N[(N[(1.0 - N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(x * 5.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + wj), $MachinePrecision]), $MachinePrecision] - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\left(1 - \mathsf{fma}\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, x \cdot 5\right)\right), wj, wj\right)\right) - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)
\end{array}
Initial program 81.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.1%
(FPCore (wj x) :precision binary64 (fma (* (fma wj (+ (fma -2.6666666666666665 wj 2.5) (/ (- 1.0 wj) x)) -2.0) x) wj x))
double code(double wj, double x) {
return fma((fma(wj, (fma(-2.6666666666666665, wj, 2.5) + ((1.0 - wj) / x)), -2.0) * x), wj, x);
}
function code(wj, x) return fma(Float64(fma(wj, Float64(fma(-2.6666666666666665, wj, 2.5) + Float64(Float64(1.0 - wj) / x)), -2.0) * x), wj, x) end
code[wj_, x_] := N[(N[(N[(wj * N[(N[(-2.6666666666666665 * wj + 2.5), $MachinePrecision] + N[(N[(1.0 - wj), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision] + -2.0), $MachinePrecision] * x), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(wj, \mathsf{fma}\left(-2.6666666666666665, wj, 2.5\right) + \frac{1 - wj}{x}, -2\right) \cdot x, wj, x\right)
\end{array}
Initial program 81.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.1%
Taylor expanded in x around 0
Applied rewrites94.9%
Taylor expanded in x around inf
Applied rewrites95.1%
Taylor expanded in x around inf
Applied rewrites95.1%
(FPCore (wj x) :precision binary64 (fma (fma (- 1.0 (* -2.5 x)) wj (* -2.0 x)) wj x))
double code(double wj, double x) {
return fma(fma((1.0 - (-2.5 * x)), wj, (-2.0 * x)), wj, x);
}
function code(wj, x) return fma(fma(Float64(1.0 - Float64(-2.5 * x)), wj, Float64(-2.0 * x)), wj, x) end
code[wj_, x_] := N[(N[(N[(1.0 - N[(-2.5 * x), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(1 - -2.5 \cdot x, wj, -2 \cdot x\right), wj, x\right)
\end{array}
Initial program 81.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f6494.9
Applied rewrites94.9%
(FPCore (wj x) :precision binary64 (fma (fma (- 1.0 wj) wj (* -2.0 x)) wj x))
double code(double wj, double x) {
return fma(fma((1.0 - wj), wj, (-2.0 * x)), wj, x);
}
function code(wj, x) return fma(fma(Float64(1.0 - wj), wj, Float64(-2.0 * x)), wj, x) end
code[wj_, x_] := N[(N[(N[(1.0 - wj), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(1 - wj, wj, -2 \cdot x\right), wj, x\right)
\end{array}
Initial program 81.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.1%
Taylor expanded in x around 0
Applied rewrites94.9%
(FPCore (wj x) :precision binary64 (if (<= wj -3.3e-48) (* (* (- 1.0 wj) wj) wj) (fma (* x wj) -2.0 x)))
double code(double wj, double x) {
double tmp;
if (wj <= -3.3e-48) {
tmp = ((1.0 - wj) * wj) * wj;
} else {
tmp = fma((x * wj), -2.0, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -3.3e-48) tmp = Float64(Float64(Float64(1.0 - wj) * wj) * wj); else tmp = fma(Float64(x * wj), -2.0, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -3.3e-48], N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj), $MachinePrecision], N[(N[(x * wj), $MachinePrecision] * -2.0 + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.3 \cdot 10^{-48}:\\
\;\;\;\;\left(\left(1 - wj\right) \cdot wj\right) \cdot wj\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x \cdot wj, -2, x\right)\\
\end{array}
\end{array}
if wj < -3.3e-48Initial program 40.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites68.0%
Taylor expanded in x around 0
Applied rewrites67.8%
Taylor expanded in x around 0
Applied rewrites45.9%
if -3.3e-48 < wj Initial program 86.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6492.4
Applied rewrites92.4%
(FPCore (wj x) :precision binary64 (if (<= wj -3.3e-48) (* wj wj) (fma (* x wj) -2.0 x)))
double code(double wj, double x) {
double tmp;
if (wj <= -3.3e-48) {
tmp = wj * wj;
} else {
tmp = fma((x * wj), -2.0, x);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= -3.3e-48) tmp = Float64(wj * wj); else tmp = fma(Float64(x * wj), -2.0, x); end return tmp end
code[wj_, x_] := If[LessEqual[wj, -3.3e-48], N[(wj * wj), $MachinePrecision], N[(N[(x * wj), $MachinePrecision] * -2.0 + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq -3.3 \cdot 10^{-48}:\\
\;\;\;\;wj \cdot wj\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(x \cdot wj, -2, x\right)\\
\end{array}
\end{array}
if wj < -3.3e-48Initial program 40.4%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
metadata-evalN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
*-commutativeN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f6466.9
Applied rewrites66.9%
Taylor expanded in x around 0
Applied rewrites44.9%
if -3.3e-48 < wj Initial program 86.8%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6492.4
Applied rewrites92.4%
(FPCore (wj x) :precision binary64 (fma (* (- 1.0 wj) wj) wj x))
double code(double wj, double x) {
return fma(((1.0 - wj) * wj), wj, x);
}
function code(wj, x) return fma(Float64(Float64(1.0 - wj) * wj), wj, x) end
code[wj_, x_] := N[(N[(N[(1.0 - wj), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(1 - wj\right) \cdot wj, wj, x\right)
\end{array}
Initial program 81.9%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.1%
Taylor expanded in x around 0
Applied rewrites94.9%
Taylor expanded in x around 0
Applied rewrites94.8%
(FPCore (wj x) :precision binary64 (- wj 1.0))
double code(double wj, double x) {
return wj - 1.0;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - 1.0d0
end function
public static double code(double wj, double x) {
return wj - 1.0;
}
def code(wj, x): return wj - 1.0
function code(wj, x) return Float64(wj - 1.0) end
function tmp = code(wj, x) tmp = wj - 1.0; end
code[wj_, x_] := N[(wj - 1.0), $MachinePrecision]
\begin{array}{l}
\\
wj - 1
\end{array}
Initial program 81.9%
Taylor expanded in x around inf
associate-*r/N/A
distribute-rgt1-inN/A
+-commutativeN/A
associate-/r*N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-neg-frac2N/A
mul-1-negN/A
lower-/.f64N/A
lower-/.f64N/A
lower-+.f64N/A
mul-1-negN/A
lower-neg.f64N/A
lower-exp.f6479.7
Applied rewrites79.7%
Taylor expanded in wj around inf
Applied rewrites4.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2024357
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))