
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (- wj (/ (- t_0 x) (+ (exp wj) t_0))) 5e-28)
(fma (fma -2.0 x (* (fma x 2.5 1.0) wj)) wj x)
(/
(fma (/ x (exp wj)) (- wj -1.0) (* (- wj -1.0) (- (fma wj wj wj) wj)))
(* (- wj -1.0) (- wj -1.0))))))double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 5e-28) {
tmp = fma(fma(-2.0, x, (fma(x, 2.5, 1.0) * wj)), wj, x);
} else {
tmp = fma((x / exp(wj)), (wj - -1.0), ((wj - -1.0) * (fma(wj, wj, wj) - wj))) / ((wj - -1.0) * (wj - -1.0));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) <= 5e-28) tmp = fma(fma(-2.0, x, Float64(fma(x, 2.5, 1.0) * wj)), wj, x); else tmp = Float64(fma(Float64(x / exp(wj)), Float64(wj - -1.0), Float64(Float64(wj - -1.0) * Float64(fma(wj, wj, wj) - wj))) / Float64(Float64(wj - -1.0) * Float64(wj - -1.0))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e-28], N[(N[(-2.0 * x + N[(N[(x * 2.5 + 1.0), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] * N[(wj - -1.0), $MachinePrecision] + N[(N[(wj - -1.0), $MachinePrecision] * N[(N[(wj * wj + wj), $MachinePrecision] - wj), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[(N[(wj - -1.0), $MachinePrecision] * N[(wj - -1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj - \frac{t\_0 - x}{e^{wj} + t\_0} \leq 5 \cdot 10^{-28}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-2, x, \mathsf{fma}\left(x, 2.5, 1\right) \cdot wj\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(\frac{x}{e^{wj}}, wj - -1, \left(wj - -1\right) \cdot \left(\mathsf{fma}\left(wj, wj, wj\right) - wj\right)\right)}{\left(wj - -1\right) \cdot \left(wj - -1\right)}\\
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.0000000000000002e-28Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
if 5.0000000000000002e-28 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 78.1%
lift--.f64N/A
lift-/.f64N/A
lift--.f64N/A
div-subN/A
associate--r-N/A
sum-to-multN/A
lower-unsound-*.f64N/A
Applied rewrites7.0%
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
sum-to-mult-revN/A
+-commutativeN/A
lift-/.f64N/A
lift-*.f64N/A
*-commutativeN/A
associate-/r*N/A
lift-/.f64N/A
lift--.f64N/A
lift-/.f64N/A
sub-to-fractionN/A
frac-addN/A
Applied rewrites89.6%
(FPCore (wj x)
:precision binary64
(let* ((t_0 (* wj (exp wj))))
(if (<= (- wj (/ (- t_0 x) (+ (exp wj) t_0))) 5e-28)
(fma (fma -2.0 x (* (fma x 2.5 1.0) wj)) wj x)
(fma
(- (fma wj wj wj) wj)
(/ -1.0 (- -1.0 wj))
(/ x (* (- wj -1.0) (exp wj)))))))double code(double wj, double x) {
double t_0 = wj * exp(wj);
double tmp;
if ((wj - ((t_0 - x) / (exp(wj) + t_0))) <= 5e-28) {
tmp = fma(fma(-2.0, x, (fma(x, 2.5, 1.0) * wj)), wj, x);
} else {
tmp = fma((fma(wj, wj, wj) - wj), (-1.0 / (-1.0 - wj)), (x / ((wj - -1.0) * exp(wj))));
}
return tmp;
}
function code(wj, x) t_0 = Float64(wj * exp(wj)) tmp = 0.0 if (Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) <= 5e-28) tmp = fma(fma(-2.0, x, Float64(fma(x, 2.5, 1.0) * wj)), wj, x); else tmp = fma(Float64(fma(wj, wj, wj) - wj), Float64(-1.0 / Float64(-1.0 - wj)), Float64(x / Float64(Float64(wj - -1.0) * exp(wj)))); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 5e-28], N[(N[(-2.0 * x + N[(N[(x * 2.5 + 1.0), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(N[(N[(wj * wj + wj), $MachinePrecision] - wj), $MachinePrecision] * N[(-1.0 / N[(-1.0 - wj), $MachinePrecision]), $MachinePrecision] + N[(x / N[(N[(wj - -1.0), $MachinePrecision] * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
\mathbf{if}\;wj - \frac{t\_0 - x}{e^{wj} + t\_0} \leq 5 \cdot 10^{-28}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-2, x, \mathsf{fma}\left(x, 2.5, 1\right) \cdot wj\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(wj, wj, wj\right) - wj, \frac{-1}{-1 - wj}, \frac{x}{\left(wj - -1\right) \cdot e^{wj}}\right)\\
\end{array}
if (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) < 5.0000000000000002e-28Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
if 5.0000000000000002e-28 < (-.f64 wj (/.f64 (-.f64 (*.f64 wj (exp.f64 wj)) x) (+.f64 (exp.f64 wj) (*.f64 wj (exp.f64 wj))))) Initial program 78.1%
lift--.f64N/A
lift-/.f64N/A
lift--.f64N/A
div-subN/A
associate--r-N/A
sum-to-multN/A
lower-unsound-*.f64N/A
Applied rewrites7.0%
lift-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
sum-to-mult-revN/A
lift--.f64N/A
lift-/.f64N/A
sub-to-fractionN/A
mult-flipN/A
lower-fma.f64N/A
Applied rewrites89.6%
(FPCore (wj x) :precision binary64 (fma (fma -2.0 x (* (fma x 2.5 1.0) wj)) wj x))
double code(double wj, double x) {
return fma(fma(-2.0, x, (fma(x, 2.5, 1.0) * wj)), wj, x);
}
function code(wj, x) return fma(fma(-2.0, x, Float64(fma(x, 2.5, 1.0) * wj)), wj, x) end
code[wj_, x_] := N[(N[(-2.0 * x + N[(N[(x * 2.5 + 1.0), $MachinePrecision] * wj), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\mathsf{fma}\left(\mathsf{fma}\left(-2, x, \mathsf{fma}\left(x, 2.5, 1\right) \cdot wj\right), wj, x\right)
Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
(FPCore (wj x) :precision binary64 (fma (+ wj (* x (- (* 2.5 wj) 2.0))) wj x))
double code(double wj, double x) {
return fma((wj + (x * ((2.5 * wj) - 2.0))), wj, x);
}
function code(wj, x) return fma(Float64(wj + Float64(x * Float64(Float64(2.5 * wj) - 2.0))), wj, x) end
code[wj_, x_] := N[(N[(wj + N[(x * N[(N[(2.5 * wj), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\mathsf{fma}\left(wj + x \cdot \left(2.5 \cdot wj - 2\right), wj, x\right)
Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
Taylor expanded in x around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
(FPCore (wj x) :precision binary64 (fma (+ wj (* x -2.0)) wj x))
double code(double wj, double x) {
return fma((wj + (x * -2.0)), wj, x);
}
function code(wj, x) return fma(Float64(wj + Float64(x * -2.0)), wj, x) end
code[wj_, x_] := N[(N[(wj + N[(x * -2.0), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision]
\mathsf{fma}\left(wj + x \cdot -2, wj, x\right)
Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
Taylor expanded in x around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
Taylor expanded in wj around 0
Applied rewrites95.7%
(FPCore (wj x) :precision binary64 (+ x (* -2.0 (* wj x))))
double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x + ((-2.0d0) * (wj * x))
end function
public static double code(double wj, double x) {
return x + (-2.0 * (wj * x));
}
def code(wj, x): return x + (-2.0 * (wj * x))
function code(wj, x) return Float64(x + Float64(-2.0 * Float64(wj * x))) end
function tmp = code(wj, x) tmp = x + (-2.0 * (wj * x)); end
code[wj_, x_] := N[(x + N[(-2.0 * N[(wj * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
x + -2 \cdot \left(wj \cdot x\right)
Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower-*.f6484.8%
Applied rewrites84.8%
(FPCore (wj x) :precision binary64 (fma (* -2.0 x) wj x))
double code(double wj, double x) {
return fma((-2.0 * x), wj, x);
}
function code(wj, x) return fma(Float64(-2.0 * x), wj, x) end
code[wj_, x_] := N[(N[(-2.0 * x), $MachinePrecision] * wj + x), $MachinePrecision]
\mathsf{fma}\left(-2 \cdot x, wj, x\right)
Initial program 78.1%
Taylor expanded in wj around 0
lower-+.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-*.f64N/A
lower--.f64N/A
lower-fma.f64N/A
lower-*.f64N/A
lower-*.f6495.9%
Applied rewrites95.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6495.9%
Applied rewrites95.9%
Taylor expanded in wj around 0
lower-*.f6484.8%
Applied rewrites84.8%
(FPCore (wj x) :precision binary64 x)
double code(double wj, double x) {
return x;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = x
end function
public static double code(double wj, double x) {
return x;
}
def code(wj, x): return x
function code(wj, x) return x end
function tmp = code(wj, x) tmp = x; end
code[wj_, x_] := x
x
Initial program 78.1%
Taylor expanded in wj around 0
Applied rewrites84.3%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
herbie shell --seed 2025183
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform c (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))