
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, y, z, t)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, y, z, t)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (* (- (tanh (/ t y)) (tanh (/ x y))) z) y x))
double code(double x, double y, double z, double t) {
return fma(((tanh((t / y)) - tanh((x / y))) * z), y, x);
}
function code(x, y, z, t) return fma(Float64(Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))) * z), y, x) end
code[x_, y_, z_, t_] := N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \cdot z, y, x\right)
\end{array}
Initial program 93.3%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
+-commutativeN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites97.0%
lift-fma.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites97.0%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (fma y (* z (tanh (/ t y))) x)))
(if (<= t -2.4e-28)
t_1
(if (<= t 2.75e+23) (fma z t (- x (* (* z y) (tanh (/ x y))))) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma(y, (z * tanh((t / y))), x);
double tmp;
if (t <= -2.4e-28) {
tmp = t_1;
} else if (t <= 2.75e+23) {
tmp = fma(z, t, (x - ((z * y) * tanh((x / y)))));
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = fma(y, Float64(z * tanh(Float64(t / y))), x) tmp = 0.0 if (t <= -2.4e-28) tmp = t_1; elseif (t <= 2.75e+23) tmp = fma(z, t, Float64(x - Float64(Float64(z * y) * tanh(Float64(x / y))))); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(y * N[(z * N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]}, If[LessEqual[t, -2.4e-28], t$95$1, If[LessEqual[t, 2.75e+23], N[(z * t + N[(x - N[(N[(z * y), $MachinePrecision] * N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(y, z \cdot \tanh \left(\frac{t}{y}\right), x\right)\\
\mathbf{if}\;t \leq -2.4 \cdot 10^{-28}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;t \leq 2.75 \cdot 10^{+23}:\\
\;\;\;\;\mathsf{fma}\left(z, t, x - \left(z \cdot y\right) \cdot \tanh \left(\frac{x}{y}\right)\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if t < -2.4000000000000002e-28 or 2.75000000000000002e23 < t Initial program 96.3%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
+-commutativeN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.1%
Taylor expanded in x around 0
associate-/r*N/A
div-subN/A
rec-expN/A
rec-expN/A
tanh-def-aN/A
lift-tanh.f64N/A
lift-/.f6488.5
Applied rewrites88.5%
if -2.4000000000000002e-28 < t < 2.75000000000000002e23Initial program 90.3%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
+-commutativeN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites95.0%
lift-fma.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
*-commutativeN/A
lower-fma.f64N/A
Applied rewrites95.0%
Taylor expanded in t around 0
Applied rewrites82.6%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (tanh (/ t y))))
(if (<= t -6.4e-29)
(fma y (* z t_1) x)
(if (<= t 3.1e-114)
(- x (* (* z y) (tanh (/ x y))))
(+ x (* (* y z) t_1))))))
double code(double x, double y, double z, double t) {
double t_1 = tanh((t / y));
double tmp;
if (t <= -6.4e-29) {
tmp = fma(y, (z * t_1), x);
} else if (t <= 3.1e-114) {
tmp = x - ((z * y) * tanh((x / y)));
} else {
tmp = x + ((y * z) * t_1);
}
return tmp;
}
function code(x, y, z, t) t_1 = tanh(Float64(t / y)) tmp = 0.0 if (t <= -6.4e-29) tmp = fma(y, Float64(z * t_1), x); elseif (t <= 3.1e-114) tmp = Float64(x - Float64(Float64(z * y) * tanh(Float64(x / y)))); else tmp = Float64(x + Float64(Float64(y * z) * t_1)); end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[t, -6.4e-29], N[(y * N[(z * t$95$1), $MachinePrecision] + x), $MachinePrecision], If[LessEqual[t, 3.1e-114], N[(x - N[(N[(z * y), $MachinePrecision] * N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(N[(y * z), $MachinePrecision] * t$95$1), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \tanh \left(\frac{t}{y}\right)\\
\mathbf{if}\;t \leq -6.4 \cdot 10^{-29}:\\
\;\;\;\;\mathsf{fma}\left(y, z \cdot t\_1, x\right)\\
\mathbf{elif}\;t \leq 3.1 \cdot 10^{-114}:\\
\;\;\;\;x - \left(z \cdot y\right) \cdot \tanh \left(\frac{x}{y}\right)\\
\mathbf{else}:\\
\;\;\;\;x + \left(y \cdot z\right) \cdot t\_1\\
\end{array}
\end{array}
if t < -6.400000000000001e-29Initial program 95.6%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
+-commutativeN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites98.9%
Taylor expanded in x around 0
associate-/r*N/A
div-subN/A
rec-expN/A
rec-expN/A
tanh-def-aN/A
lift-tanh.f64N/A
lift-/.f6488.0
Applied rewrites88.0%
if -6.400000000000001e-29 < t < 3.1e-114Initial program 89.8%
Taylor expanded in t around 0
fp-cancel-sign-sub-invN/A
lower--.f64N/A
distribute-lft-neg-outN/A
associate-*r*N/A
Applied rewrites80.4%
if 3.1e-114 < t Initial program 95.4%
Taylor expanded in x around 0
associate-/r*N/A
div-subN/A
rec-expN/A
rec-expN/A
tanh-def-aN/A
lift-tanh.f64N/A
lift-/.f6484.5
Applied rewrites84.5%
(FPCore (x y z t)
:precision binary64
(if (<= y -3.6e+86)
(fma (- t x) z x)
(if (<= y 1.1e+57)
(fma y (* z (tanh (/ t y))) x)
(fma (- 1.0 z) x (* t z)))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= -3.6e+86) {
tmp = fma((t - x), z, x);
} else if (y <= 1.1e+57) {
tmp = fma(y, (z * tanh((t / y))), x);
} else {
tmp = fma((1.0 - z), x, (t * z));
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= -3.6e+86) tmp = fma(Float64(t - x), z, x); elseif (y <= 1.1e+57) tmp = fma(y, Float64(z * tanh(Float64(t / y))), x); else tmp = fma(Float64(1.0 - z), x, Float64(t * z)); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, -3.6e+86], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision], If[LessEqual[y, 1.1e+57], N[(y * N[(z * N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision], N[(N[(1.0 - z), $MachinePrecision] * x + N[(t * z), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.6 \cdot 10^{+86}:\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\mathbf{elif}\;y \leq 1.1 \cdot 10^{+57}:\\
\;\;\;\;\mathsf{fma}\left(y, z \cdot \tanh \left(\frac{t}{y}\right), x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(1 - z, x, t \cdot z\right)\\
\end{array}
\end{array}
if y < -3.60000000000000005e86Initial program 83.7%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
*-lft-identityN/A
metadata-evalN/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-fma.f64N/A
distribute-lft-out--N/A
distribute-lft-neg-inN/A
metadata-evalN/A
*-lft-identityN/A
*-rgt-identityN/A
lower--.f64N/A
*-rgt-identity86.2
Applied rewrites86.2%
if -3.60000000000000005e86 < y < 1.1e57Initial program 99.4%
lift-+.f64N/A
lift-*.f64N/A
lift-*.f64N/A
lift--.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
+-commutativeN/A
associate-*l*N/A
lower-fma.f64N/A
Applied rewrites99.8%
Taylor expanded in x around 0
associate-/r*N/A
div-subN/A
rec-expN/A
rec-expN/A
tanh-def-aN/A
lift-tanh.f64N/A
lift-/.f6484.8
Applied rewrites84.8%
if 1.1e57 < y Initial program 83.1%
Taylor expanded in x around 0
Applied rewrites72.3%
Taylor expanded in y around inf
lower-*.f6484.7
Applied rewrites84.7%
(FPCore (x y z t) :precision binary64 (if (<= y -8e+18) (fma (- t x) z x) (if (<= y 2.4e+28) x (fma (- 1.0 z) x (* t z)))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= -8e+18) {
tmp = fma((t - x), z, x);
} else if (y <= 2.4e+28) {
tmp = x;
} else {
tmp = fma((1.0 - z), x, (t * z));
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= -8e+18) tmp = fma(Float64(t - x), z, x); elseif (y <= 2.4e+28) tmp = x; else tmp = fma(Float64(1.0 - z), x, Float64(t * z)); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, -8e+18], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision], If[LessEqual[y, 2.4e+28], x, N[(N[(1.0 - z), $MachinePrecision] * x + N[(t * z), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -8 \cdot 10^{+18}:\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\mathbf{elif}\;y \leq 2.4 \cdot 10^{+28}:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(1 - z, x, t \cdot z\right)\\
\end{array}
\end{array}
if y < -8e18Initial program 86.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
*-lft-identityN/A
metadata-evalN/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-fma.f64N/A
distribute-lft-out--N/A
distribute-lft-neg-inN/A
metadata-evalN/A
*-lft-identityN/A
*-rgt-identityN/A
lower--.f64N/A
*-rgt-identity80.2
Applied rewrites80.2%
if -8e18 < y < 2.39999999999999981e28Initial program 99.9%
Taylor expanded in x around inf
Applied rewrites75.4%
if 2.39999999999999981e28 < y Initial program 84.7%
Taylor expanded in x around 0
Applied rewrites73.0%
Taylor expanded in y around inf
lower-*.f6482.0
Applied rewrites82.0%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (fma (- t x) z x))) (if (<= y -8e+18) t_1 (if (<= y 2.4e+28) x t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma((t - x), z, x);
double tmp;
if (y <= -8e+18) {
tmp = t_1;
} else if (y <= 2.4e+28) {
tmp = x;
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = fma(Float64(t - x), z, x) tmp = 0.0 if (y <= -8e+18) tmp = t_1; elseif (y <= 2.4e+28) tmp = x; else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision]}, If[LessEqual[y, -8e+18], t$95$1, If[LessEqual[y, 2.4e+28], x, t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(t - x, z, x\right)\\
\mathbf{if}\;y \leq -8 \cdot 10^{+18}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;y \leq 2.4 \cdot 10^{+28}:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if y < -8e18 or 2.39999999999999981e28 < y Initial program 85.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
*-lft-identityN/A
metadata-evalN/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-fma.f64N/A
distribute-lft-out--N/A
distribute-lft-neg-inN/A
metadata-evalN/A
*-lft-identityN/A
*-rgt-identityN/A
lower--.f64N/A
*-rgt-identity81.3
Applied rewrites81.3%
if -8e18 < y < 2.39999999999999981e28Initial program 99.9%
Taylor expanded in x around inf
Applied rewrites75.4%
(FPCore (x y z t) :precision binary64 (if (<= y -7.8e+29) (fma t z x) (if (<= y 3.3e+28) x (fma t z x))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= -7.8e+29) {
tmp = fma(t, z, x);
} else if (y <= 3.3e+28) {
tmp = x;
} else {
tmp = fma(t, z, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= -7.8e+29) tmp = fma(t, z, x); elseif (y <= 3.3e+28) tmp = x; else tmp = fma(t, z, x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, -7.8e+29], N[(t * z + x), $MachinePrecision], If[LessEqual[y, 3.3e+28], x, N[(t * z + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -7.8 \cdot 10^{+29}:\\
\;\;\;\;\mathsf{fma}\left(t, z, x\right)\\
\mathbf{elif}\;y \leq 3.3 \cdot 10^{+28}:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t, z, x\right)\\
\end{array}
\end{array}
if y < -7.79999999999999937e29 or 3.3e28 < y Initial program 85.4%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
*-lft-identityN/A
metadata-evalN/A
distribute-lft-neg-inN/A
distribute-lft-out--N/A
lower-fma.f64N/A
distribute-lft-out--N/A
distribute-lft-neg-inN/A
metadata-evalN/A
*-lft-identityN/A
*-rgt-identityN/A
lower--.f64N/A
*-rgt-identity82.0
Applied rewrites82.0%
Taylor expanded in x around 0
Applied rewrites65.8%
if -7.79999999999999937e29 < y < 3.3e28Initial program 99.8%
Taylor expanded in x around inf
Applied rewrites75.0%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))) (if (<= t_1 (- INFINITY)) (* z t) (if (<= t_1 INFINITY) x (* z t)))))
double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
double tmp;
if (t_1 <= -((double) INFINITY)) {
tmp = z * t;
} else if (t_1 <= ((double) INFINITY)) {
tmp = x;
} else {
tmp = z * t;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
double tmp;
if (t_1 <= -Double.POSITIVE_INFINITY) {
tmp = z * t;
} else if (t_1 <= Double.POSITIVE_INFINITY) {
tmp = x;
} else {
tmp = z * t;
}
return tmp;
}
def code(x, y, z, t): t_1 = x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y)))) tmp = 0 if t_1 <= -math.inf: tmp = z * t elif t_1 <= math.inf: tmp = x else: tmp = z * t return tmp
function code(x, y, z, t) t_1 = Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) tmp = 0.0 if (t_1 <= Float64(-Inf)) tmp = Float64(z * t); elseif (t_1 <= Inf) tmp = x; else tmp = Float64(z * t); end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); tmp = 0.0; if (t_1 <= -Inf) tmp = z * t; elseif (t_1 <= Inf) tmp = x; else tmp = z * t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], N[(z * t), $MachinePrecision], If[LessEqual[t$95$1, Infinity], x, N[(z * t), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;z \cdot t\\
\mathbf{elif}\;t\_1 \leq \infty:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;z \cdot t\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < -inf.0 or +inf.0 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 47.6%
Taylor expanded in x around 0
associate-*r*N/A
associate-/r*N/A
div-subN/A
rec-expN/A
rec-expN/A
tanh-def-aN/A
Applied rewrites32.5%
Taylor expanded in y around inf
*-commutativeN/A
lower-*.f6452.9
Applied rewrites52.9%
if -inf.0 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < +inf.0Initial program 96.9%
Taylor expanded in x around inf
Applied rewrites65.1%
(FPCore (x y z t) :precision binary64 x)
double code(double x, double y, double z, double t) {
return x;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(x, y, z, t)
use fmin_fmax_functions
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x
end function
public static double code(double x, double y, double z, double t) {
return x;
}
def code(x, y, z, t): return x
function code(x, y, z, t) return x end
function tmp = code(x, y, z, t) tmp = x; end
code[x_, y_, z_, t_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 93.3%
Taylor expanded in x around inf
Applied rewrites60.8%
herbie shell --seed 2025130
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))