
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 12 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (wj x) :precision binary64 (let* ((t_0 (* wj (exp wj)))) (- wj (/ (- t_0 x) (+ (exp wj) t_0)))))
double code(double wj, double x) {
double t_0 = wj * exp(wj);
return wj - ((t_0 - x) / (exp(wj) + t_0));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: t_0
t_0 = wj * exp(wj)
code = wj - ((t_0 - x) / (exp(wj) + t_0))
end function
public static double code(double wj, double x) {
double t_0 = wj * Math.exp(wj);
return wj - ((t_0 - x) / (Math.exp(wj) + t_0));
}
def code(wj, x): t_0 = wj * math.exp(wj) return wj - ((t_0 - x) / (math.exp(wj) + t_0))
function code(wj, x) t_0 = Float64(wj * exp(wj)) return Float64(wj - Float64(Float64(t_0 - x) / Float64(exp(wj) + t_0))) end
function tmp = code(wj, x) t_0 = wj * exp(wj); tmp = wj - ((t_0 - x) / (exp(wj) + t_0)); end
code[wj_, x_] := Block[{t$95$0 = N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, N[(wj - N[(N[(t$95$0 - x), $MachinePrecision] / N[(N[Exp[wj], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := wj \cdot e^{wj}\\
wj - \frac{t\_0 - x}{e^{wj} + t\_0}
\end{array}
\end{array}
(FPCore (wj x)
:precision binary64
(let* ((t_0 (/ x (exp wj))))
(if (<= wj 1.0)
(fma
(fma
(-
(fma
(*
(+ (fma -3.0 x (fma 0.6666666666666666 x (* (* x -2.5) -2.0))) 1.0)
wj)
-1.0
1.0)
(* x -2.5))
wj
(* -2.0 x))
wj
x)
(- wj (fma (/ (+ (fma (/ (+ t_0 1.0) wj) -1.0 t_0) 1.0) wj) -1.0 1.0)))))
double code(double wj, double x) {
double t_0 = x / exp(wj);
double tmp;
if (wj <= 1.0) {
tmp = fma(fma((fma(((fma(-3.0, x, fma(0.6666666666666666, x, ((x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - (x * -2.5)), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - fma(((fma(((t_0 + 1.0) / wj), -1.0, t_0) + 1.0) / wj), -1.0, 1.0);
}
return tmp;
}
function code(wj, x) t_0 = Float64(x / exp(wj)) tmp = 0.0 if (wj <= 1.0) tmp = fma(fma(Float64(fma(Float64(Float64(fma(-3.0, x, fma(0.6666666666666666, x, Float64(Float64(x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - Float64(x * -2.5)), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - fma(Float64(Float64(fma(Float64(Float64(t_0 + 1.0) / wj), -1.0, t_0) + 1.0) / wj), -1.0, 1.0)); end return tmp end
code[wj_, x_] := Block[{t$95$0 = N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[wj, 1.0], N[(N[(N[(N[(N[(N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(N[(x * -2.5), $MachinePrecision] * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * wj), $MachinePrecision] * -1.0 + 1.0), $MachinePrecision] - N[(x * -2.5), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(N[(N[(N[(N[(t$95$0 + 1.0), $MachinePrecision] / wj), $MachinePrecision] * -1.0 + t$95$0), $MachinePrecision] + 1.0), $MachinePrecision] / wj), $MachinePrecision] * -1.0 + 1.0), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{x}{e^{wj}}\\
\mathbf{if}\;wj \leq 1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, \left(x \cdot -2.5\right) \cdot -2\right)\right) + 1\right) \cdot wj, -1, 1\right) - x \cdot -2.5, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \mathsf{fma}\left(\frac{\mathsf{fma}\left(\frac{t\_0 + 1}{wj}, -1, t\_0\right) + 1}{wj}, -1, 1\right)\\
\end{array}
\end{array}
if wj < 1Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
if 1 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites88.2%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.65)
(fma
(fma
(-
(fma
(*
(+ (fma -3.0 x (fma 0.6666666666666666 x (* (* x -2.5) -2.0))) 1.0)
wj)
-1.0
1.0)
(* x -2.5))
wj
(* -2.0 x))
wj
x)
(- wj (fma (/ (+ (/ x (exp wj)) 1.0) wj) -1.0 1.0))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.65) {
tmp = fma(fma((fma(((fma(-3.0, x, fma(0.6666666666666666, x, ((x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - (x * -2.5)), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - fma((((x / exp(wj)) + 1.0) / wj), -1.0, 1.0);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.65) tmp = fma(fma(Float64(fma(Float64(Float64(fma(-3.0, x, fma(0.6666666666666666, x, Float64(Float64(x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - Float64(x * -2.5)), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - fma(Float64(Float64(Float64(x / exp(wj)) + 1.0) / wj), -1.0, 1.0)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.65], N[(N[(N[(N[(N[(N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(N[(x * -2.5), $MachinePrecision] * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * wj), $MachinePrecision] * -1.0 + 1.0), $MachinePrecision] - N[(x * -2.5), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(N[(N[(x / N[Exp[wj], $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] / wj), $MachinePrecision] * -1.0 + 1.0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.65:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, \left(x \cdot -2.5\right) \cdot -2\right)\right) + 1\right) \cdot wj, -1, 1\right) - x \cdot -2.5, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \mathsf{fma}\left(\frac{\frac{x}{e^{wj}} + 1}{wj}, -1, 1\right)\\
\end{array}
\end{array}
if wj < 0.650000000000000022Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
if 0.650000000000000022 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.65)
(fma
(fma
(-
(fma
(*
(+ (fma -3.0 x (fma 0.6666666666666666 x (* (* x -2.5) -2.0))) 1.0)
wj)
-1.0
1.0)
(* x -2.5))
wj
(* -2.0 x))
wj
x)
(- wj (/ (- wj 1.0) wj))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.65) {
tmp = fma(fma((fma(((fma(-3.0, x, fma(0.6666666666666666, x, ((x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - (x * -2.5)), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - ((wj - 1.0) / wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.65) tmp = fma(fma(Float64(fma(Float64(Float64(fma(-3.0, x, fma(0.6666666666666666, x, Float64(Float64(x * -2.5) * -2.0))) + 1.0) * wj), -1.0, 1.0) - Float64(x * -2.5)), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(Float64(wj - 1.0) / wj)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.65], N[(N[(N[(N[(N[(N[(N[(-3.0 * x + N[(0.6666666666666666 * x + N[(N[(x * -2.5), $MachinePrecision] * -2.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision] * wj), $MachinePrecision] * -1.0 + 1.0), $MachinePrecision] - N[(x * -2.5), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(wj - 1.0), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.65:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(\left(\mathsf{fma}\left(-3, x, \mathsf{fma}\left(0.6666666666666666, x, \left(x \cdot -2.5\right) \cdot -2\right)\right) + 1\right) \cdot wj, -1, 1\right) - x \cdot -2.5, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - 1}{wj}\\
\end{array}
\end{array}
if wj < 0.650000000000000022Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
if 0.650000000000000022 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
Taylor expanded in wj around 0
lower-/.f64N/A
Applied rewrites61.9%
Taylor expanded in x around 0
lower--.f6478.4
Applied rewrites78.4%
(FPCore (wj x)
:precision binary64
(if (<= wj 0.65)
(fma
(fma
(* (+ (fma -2.6666666666666665 wj (/ (fma -1.0 wj 1.0) x)) 2.5) x)
wj
(* -2.0 x))
wj
x)
(- wj (/ (- wj 1.0) wj))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.65) {
tmp = fma(fma(((fma(-2.6666666666666665, wj, (fma(-1.0, wj, 1.0) / x)) + 2.5) * x), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - ((wj - 1.0) / wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.65) tmp = fma(fma(Float64(Float64(fma(-2.6666666666666665, wj, Float64(fma(-1.0, wj, 1.0) / x)) + 2.5) * x), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(Float64(wj - 1.0) / wj)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.65], N[(N[(N[(N[(N[(-2.6666666666666665 * wj + N[(N[(-1.0 * wj + 1.0), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision] + 2.5), $MachinePrecision] * x), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(wj - 1.0), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.65:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\left(\mathsf{fma}\left(-2.6666666666666665, wj, \frac{\mathsf{fma}\left(-1, wj, 1\right)}{x}\right) + 2.5\right) \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - 1}{wj}\\
\end{array}
\end{array}
if wj < 0.650000000000000022Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-fma.f64N/A
+-commutativeN/A
associate-*r/N/A
div-addN/A
lower-/.f64N/A
mul-1-negN/A
+-commutativeN/A
mul-1-negN/A
lower-fma.f6498.9
Applied rewrites98.9%
if 0.650000000000000022 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
Taylor expanded in wj around 0
lower-/.f64N/A
Applied rewrites61.9%
Taylor expanded in x around 0
lower--.f6478.4
Applied rewrites78.4%
(FPCore (wj x) :precision binary64 (if (<= wj 0.5) (fma (fma (* (/ (fma -1.0 wj 1.0) x) x) wj (* -2.0 x)) wj x) (- wj (/ (- wj 1.0) wj))))
double code(double wj, double x) {
double tmp;
if (wj <= 0.5) {
tmp = fma(fma(((fma(-1.0, wj, 1.0) / x) * x), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - ((wj - 1.0) / wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 0.5) tmp = fma(fma(Float64(Float64(fma(-1.0, wj, 1.0) / x) * x), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(Float64(wj - 1.0) / wj)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 0.5], N[(N[(N[(N[(N[(-1.0 * wj + 1.0), $MachinePrecision] / x), $MachinePrecision] * x), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(wj - 1.0), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.5:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\frac{\mathsf{fma}\left(-1, wj, 1\right)}{x} \cdot x, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - 1}{wj}\\
\end{array}
\end{array}
if wj < 0.5Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around inf
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-fma.f64N/A
+-commutativeN/A
associate-*r/N/A
div-addN/A
lower-/.f64N/A
mul-1-negN/A
+-commutativeN/A
mul-1-negN/A
lower-fma.f6498.9
Applied rewrites98.9%
Taylor expanded in x around 0
mul-1-negN/A
+-commutativeN/A
mul-1-negN/A
lift-fma.f64N/A
lift-/.f6498.8
Applied rewrites98.8%
if 0.5 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
Taylor expanded in wj around 0
lower-/.f64N/A
Applied rewrites61.9%
Taylor expanded in x around 0
lower--.f6478.4
Applied rewrites78.4%
(FPCore (wj x) :precision binary64 (if (<= wj 3.9) (fma (fma (- 1.0 (* x -2.5)) wj (* -2.0 x)) wj x) (- wj (/ (- wj 1.0) wj))))
double code(double wj, double x) {
double tmp;
if (wj <= 3.9) {
tmp = fma(fma((1.0 - (x * -2.5)), wj, (-2.0 * x)), wj, x);
} else {
tmp = wj - ((wj - 1.0) / wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 3.9) tmp = fma(fma(Float64(1.0 - Float64(x * -2.5)), wj, Float64(-2.0 * x)), wj, x); else tmp = Float64(wj - Float64(Float64(wj - 1.0) / wj)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 3.9], N[(N[(N[(1.0 - N[(x * -2.5), $MachinePrecision]), $MachinePrecision] * wj + N[(-2.0 * x), $MachinePrecision]), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(wj - 1.0), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 3.9:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(1 - x \cdot -2.5, wj, -2 \cdot x\right), wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - 1}{wj}\\
\end{array}
\end{array}
if wj < 3.89999999999999991Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f6498.6
Applied rewrites98.6%
if 3.89999999999999991 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
Taylor expanded in wj around 0
lower-/.f64N/A
Applied rewrites61.9%
Taylor expanded in x around 0
lower--.f6478.4
Applied rewrites78.4%
(FPCore (wj x) :precision binary64 (if (<= wj 1.0) (fma (* (fma -1.0 wj 1.0) wj) wj x) (- wj (/ (- wj 1.0) wj))))
double code(double wj, double x) {
double tmp;
if (wj <= 1.0) {
tmp = fma((fma(-1.0, wj, 1.0) * wj), wj, x);
} else {
tmp = wj - ((wj - 1.0) / wj);
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 1.0) tmp = fma(Float64(fma(-1.0, wj, 1.0) * wj), wj, x); else tmp = Float64(wj - Float64(Float64(wj - 1.0) / wj)); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 1.0], N[(N[(N[(-1.0 * wj + 1.0), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - N[(N[(wj - 1.0), $MachinePrecision] / wj), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, wj, 1\right) \cdot wj, wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - \frac{wj - 1}{wj}\\
\end{array}
\end{array}
if wj < 1Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
mul-1-negN/A
+-commutativeN/A
mul-1-negN/A
lower-fma.f6498.3
Applied rewrites98.3%
if 1 < wj Initial program 27.3%
Taylor expanded in wj around -inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
lower-/.f64N/A
lift-exp.f6481.2
Applied rewrites81.2%
Taylor expanded in wj around 0
lower-/.f64N/A
Applied rewrites61.9%
Taylor expanded in x around 0
lower--.f6478.4
Applied rewrites78.4%
(FPCore (wj x) :precision binary64 (if (<= wj 1.0) (fma (* (fma -1.0 wj 1.0) wj) wj x) (- wj 1.0)))
double code(double wj, double x) {
double tmp;
if (wj <= 1.0) {
tmp = fma((fma(-1.0, wj, 1.0) * wj), wj, x);
} else {
tmp = wj - 1.0;
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 1.0) tmp = fma(Float64(fma(-1.0, wj, 1.0) * wj), wj, x); else tmp = Float64(wj - 1.0); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 1.0], N[(N[(N[(-1.0 * wj + 1.0), $MachinePrecision] * wj), $MachinePrecision] * wj + x), $MachinePrecision], N[(wj - 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1:\\
\;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(-1, wj, 1\right) \cdot wj, wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - 1\\
\end{array}
\end{array}
if wj < 1Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
mul-1-negN/A
+-commutativeN/A
mul-1-negN/A
lower-fma.f6498.3
Applied rewrites98.3%
if 1 < wj Initial program 27.3%
Taylor expanded in wj around inf
Applied rewrites65.7%
(FPCore (wj x) :precision binary64 (if (<= wj 6.8) (fma wj wj x) (- wj 1.0)))
double code(double wj, double x) {
double tmp;
if (wj <= 6.8) {
tmp = fma(wj, wj, x);
} else {
tmp = wj - 1.0;
}
return tmp;
}
function code(wj, x) tmp = 0.0 if (wj <= 6.8) tmp = fma(wj, wj, x); else tmp = Float64(wj - 1.0); end return tmp end
code[wj_, x_] := If[LessEqual[wj, 6.8], N[(wj * wj + x), $MachinePrecision], N[(wj - 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 6.8:\\
\;\;\;\;\mathsf{fma}\left(wj, wj, x\right)\\
\mathbf{else}:\\
\;\;\;\;wj - 1\\
\end{array}
\end{array}
if wj < 6.79999999999999982Initial program 78.7%
Taylor expanded in wj around 0
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
fp-cancel-sub-sign-invN/A
*-commutativeN/A
metadata-evalN/A
lower-fma.f64N/A
lower--.f64N/A
distribute-rgt-outN/A
lower-*.f64N/A
metadata-evalN/A
lower-*.f6498.6
Applied rewrites98.6%
Taylor expanded in x around 0
Applied rewrites98.0%
if 6.79999999999999982 < wj Initial program 27.3%
Taylor expanded in wj around inf
Applied rewrites65.7%
(FPCore (wj x) :precision binary64 (if (<= wj 1.0) x (- wj 1.0)))
double code(double wj, double x) {
double tmp;
if (wj <= 1.0) {
tmp = x;
} else {
tmp = wj - 1.0;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 1.0d0) then
tmp = x
else
tmp = wj - 1.0d0
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 1.0) {
tmp = x;
} else {
tmp = wj - 1.0;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 1.0: tmp = x else: tmp = wj - 1.0 return tmp
function code(wj, x) tmp = 0.0 if (wj <= 1.0) tmp = x; else tmp = Float64(wj - 1.0); end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 1.0) tmp = x; else tmp = wj - 1.0; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 1.0], x, N[(wj - 1.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 1:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;wj - 1\\
\end{array}
\end{array}
if wj < 1Initial program 78.7%
Taylor expanded in wj around 0
Applied rewrites86.9%
if 1 < wj Initial program 27.3%
Taylor expanded in wj around inf
Applied rewrites65.7%
(FPCore (wj x) :precision binary64 (if (<= wj 0.205) x wj))
double code(double wj, double x) {
double tmp;
if (wj <= 0.205) {
tmp = x;
} else {
tmp = wj;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
real(8) :: tmp
if (wj <= 0.205d0) then
tmp = x
else
tmp = wj
end if
code = tmp
end function
public static double code(double wj, double x) {
double tmp;
if (wj <= 0.205) {
tmp = x;
} else {
tmp = wj;
}
return tmp;
}
def code(wj, x): tmp = 0 if wj <= 0.205: tmp = x else: tmp = wj return tmp
function code(wj, x) tmp = 0.0 if (wj <= 0.205) tmp = x; else tmp = wj; end return tmp end
function tmp_2 = code(wj, x) tmp = 0.0; if (wj <= 0.205) tmp = x; else tmp = wj; end tmp_2 = tmp; end
code[wj_, x_] := If[LessEqual[wj, 0.205], x, wj]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;wj \leq 0.205:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;wj\\
\end{array}
\end{array}
if wj < 0.204999999999999988Initial program 78.7%
Taylor expanded in wj around 0
Applied rewrites87.3%
if 0.204999999999999988 < wj Initial program 33.1%
Taylor expanded in wj around inf
Applied rewrites39.3%
(FPCore (wj x) :precision binary64 wj)
double code(double wj, double x) {
return wj;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj
end function
public static double code(double wj, double x) {
return wj;
}
def code(wj, x): return wj
function code(wj, x) return wj end
function tmp = code(wj, x) tmp = wj; end
code[wj_, x_] := wj
\begin{array}{l}
\\
wj
\end{array}
Initial program 76.5%
Taylor expanded in wj around inf
Applied rewrites5.7%
(FPCore (wj x) :precision binary64 (- wj (- (/ wj (+ wj 1.0)) (/ x (+ (exp wj) (* wj (exp wj)))))))
double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj)))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(wj, x)
use fmin_fmax_functions
real(8), intent (in) :: wj
real(8), intent (in) :: x
code = wj - ((wj / (wj + 1.0d0)) - (x / (exp(wj) + (wj * exp(wj)))))
end function
public static double code(double wj, double x) {
return wj - ((wj / (wj + 1.0)) - (x / (Math.exp(wj) + (wj * Math.exp(wj)))));
}
def code(wj, x): return wj - ((wj / (wj + 1.0)) - (x / (math.exp(wj) + (wj * math.exp(wj)))))
function code(wj, x) return Float64(wj - Float64(Float64(wj / Float64(wj + 1.0)) - Float64(x / Float64(exp(wj) + Float64(wj * exp(wj)))))) end
function tmp = code(wj, x) tmp = wj - ((wj / (wj + 1.0)) - (x / (exp(wj) + (wj * exp(wj))))); end
code[wj_, x_] := N[(wj - N[(N[(wj / N[(wj + 1.0), $MachinePrecision]), $MachinePrecision] - N[(x / N[(N[Exp[wj], $MachinePrecision] + N[(wj * N[Exp[wj], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
wj - \left(\frac{wj}{wj + 1} - \frac{x}{e^{wj} + wj \cdot e^{wj}}\right)
\end{array}
herbie shell --seed 2025051
(FPCore (wj x)
:name "Jmat.Real.lambertw, newton loop step"
:precision binary64
:alt
(! :herbie-platform default (let ((ew (exp wj))) (- wj (- (/ wj (+ wj 1)) (/ x (+ ew (* wj ew)))))))
(- wj (/ (- (* wj (exp wj)) x) (+ (exp wj) (* wj (exp wj))))))