
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 9 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (* (- (tanh (/ t y)) (tanh (/ x y))) z) y x))
double code(double x, double y, double z, double t) {
return fma(((tanh((t / y)) - tanh((x / y))) * z), y, x);
}
function code(x, y, z, t) return fma(Float64(Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))) * z), y, x) end
code[x_, y_, z_, t_] := N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \cdot z, y, x\right)
\end{array}
Initial program 95.4%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6496.9
Applied rewrites96.9%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y)))))))
(if (<= t_1 (- INFINITY))
(* z t)
(if (<= t_1 2e+306) (* 1.0 x) (* (- z) x)))))
double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
double tmp;
if (t_1 <= -((double) INFINITY)) {
tmp = z * t;
} else if (t_1 <= 2e+306) {
tmp = 1.0 * x;
} else {
tmp = -z * x;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
double tmp;
if (t_1 <= -Double.POSITIVE_INFINITY) {
tmp = z * t;
} else if (t_1 <= 2e+306) {
tmp = 1.0 * x;
} else {
tmp = -z * x;
}
return tmp;
}
def code(x, y, z, t): t_1 = x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y)))) tmp = 0 if t_1 <= -math.inf: tmp = z * t elif t_1 <= 2e+306: tmp = 1.0 * x else: tmp = -z * x return tmp
function code(x, y, z, t) t_1 = Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) tmp = 0.0 if (t_1 <= Float64(-Inf)) tmp = Float64(z * t); elseif (t_1 <= 2e+306) tmp = Float64(1.0 * x); else tmp = Float64(Float64(-z) * x); end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); tmp = 0.0; if (t_1 <= -Inf) tmp = z * t; elseif (t_1 <= 2e+306) tmp = 1.0 * x; else tmp = -z * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[t$95$1, (-Infinity)], N[(z * t), $MachinePrecision], If[LessEqual[t$95$1, 2e+306], N[(1.0 * x), $MachinePrecision], N[((-z) * x), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\\
\mathbf{if}\;t\_1 \leq -\infty:\\
\;\;\;\;z \cdot t\\
\mathbf{elif}\;t\_1 \leq 2 \cdot 10^{+306}:\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;\left(-z\right) \cdot x\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < -inf.0Initial program 85.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites63.1%
if -inf.0 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < 2.00000000000000003e306Initial program 99.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6451.1
Applied rewrites51.1%
Taylor expanded in x around inf
Applied rewrites50.6%
Taylor expanded in z around 0
Applied rewrites65.6%
if 2.00000000000000003e306 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 51.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6488.9
Applied rewrites88.9%
Taylor expanded in x around inf
Applied rewrites61.6%
Taylor expanded in z around inf
Applied rewrites61.6%
(FPCore (x y z t) :precision binary64 (if (<= (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))) (- INFINITY)) (* z t) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((x + ((y * z) * (tanh((t / y)) - tanh((x / y))))) <= -((double) INFINITY)) {
tmp = z * t;
} else {
tmp = 1.0 * x;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double tmp;
if ((x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))))) <= -Double.POSITIVE_INFINITY) {
tmp = z * t;
} else {
tmp = 1.0 * x;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if (x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))) <= -math.inf: tmp = z * t else: tmp = 1.0 * x return tmp
function code(x, y, z, t) tmp = 0.0 if (Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) <= Float64(-Inf)) tmp = Float64(z * t); else tmp = Float64(1.0 * x); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if ((x + ((y * z) * (tanh((t / y)) - tanh((x / y))))) <= -Inf) tmp = z * t; else tmp = 1.0 * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], (-Infinity)], N[(z * t), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \leq -\infty:\\
\;\;\;\;z \cdot t\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < -inf.0Initial program 85.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites63.1%
if -inf.0 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 96.0%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6453.9
Applied rewrites53.9%
Taylor expanded in x around inf
Applied rewrites51.4%
Taylor expanded in z around 0
Applied rewrites61.8%
(FPCore (x y z t) :precision binary64 (if (or (<= t -1.85e-43) (not (<= t 1.12e+99))) (+ x (fma (* z y) (tanh (/ t y)) (* (- x) z))) (fma (* (- (/ t y) (tanh (/ x y))) z) y x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((t <= -1.85e-43) || !(t <= 1.12e+99)) {
tmp = x + fma((z * y), tanh((t / y)), (-x * z));
} else {
tmp = fma((((t / y) - tanh((x / y))) * z), y, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((t <= -1.85e-43) || !(t <= 1.12e+99)) tmp = Float64(x + fma(Float64(z * y), tanh(Float64(t / y)), Float64(Float64(-x) * z))); else tmp = fma(Float64(Float64(Float64(t / y) - tanh(Float64(x / y))) * z), y, x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[t, -1.85e-43], N[Not[LessEqual[t, 1.12e+99]], $MachinePrecision]], N[(x + N[(N[(z * y), $MachinePrecision] * N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] + N[((-x) * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(N[(N[(t / y), $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -1.85 \cdot 10^{-43} \lor \neg \left(t \leq 1.12 \cdot 10^{+99}\right):\\
\;\;\;\;x + \mathsf{fma}\left(z \cdot y, \tanh \left(\frac{t}{y}\right), \left(-x\right) \cdot z\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\frac{t}{y} - \tanh \left(\frac{x}{y}\right)\right) \cdot z, y, x\right)\\
\end{array}
\end{array}
if t < -1.85e-43 or 1.12e99 < t Initial program 98.3%
Taylor expanded in x around 0
lower-/.f6476.1
Applied rewrites76.1%
lift-*.f64N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lower-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lower-neg.f6471.1
Applied rewrites71.1%
Taylor expanded in x around 0
associate-*r*N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6481.0
Applied rewrites81.0%
if -1.85e-43 < t < 1.12e99Initial program 92.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6496.9
Applied rewrites96.9%
Taylor expanded in y around inf
lower-/.f64N/A
+-commutativeN/A
lower-+.f64N/A
associate-*r/N/A
distribute-lft1-inN/A
metadata-evalN/A
mul0-lftN/A
metadata-evalN/A
lower-/.f6484.3
Applied rewrites84.3%
Final simplification82.7%
(FPCore (x y z t)
:precision binary64
(if (<= y 3.2e-60)
(* 1.0 x)
(if (<= y 1.12e+224)
(fma (* (- (tanh (/ t y)) (/ x y)) z) y x)
(fma (- (/ (* z t) x) z) x x))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 3.2e-60) {
tmp = 1.0 * x;
} else if (y <= 1.12e+224) {
tmp = fma(((tanh((t / y)) - (x / y)) * z), y, x);
} else {
tmp = fma((((z * t) / x) - z), x, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 3.2e-60) tmp = Float64(1.0 * x); elseif (y <= 1.12e+224) tmp = fma(Float64(Float64(tanh(Float64(t / y)) - Float64(x / y)) * z), y, x); else tmp = fma(Float64(Float64(Float64(z * t) / x) - z), x, x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 3.2e-60], N[(1.0 * x), $MachinePrecision], If[LessEqual[y, 1.12e+224], N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision], N[(N[(N[(N[(z * t), $MachinePrecision] / x), $MachinePrecision] - z), $MachinePrecision] * x + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 3.2 \cdot 10^{-60}:\\
\;\;\;\;1 \cdot x\\
\mathbf{elif}\;y \leq 1.12 \cdot 10^{+224}:\\
\;\;\;\;\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right) \cdot z, y, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\frac{z \cdot t}{x} - z, x, x\right)\\
\end{array}
\end{array}
if y < 3.2000000000000001e-60Initial program 97.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6451.9
Applied rewrites51.9%
Taylor expanded in x around inf
Applied rewrites54.8%
Taylor expanded in z around 0
Applied rewrites66.9%
if 3.2000000000000001e-60 < y < 1.1199999999999999e224Initial program 91.1%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6497.3
Applied rewrites97.3%
Taylor expanded in x around 0
lower-/.f6484.4
Applied rewrites84.4%
if 1.1199999999999999e224 < y Initial program 94.2%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6494.6
Applied rewrites94.6%
Taylor expanded in x around inf
Applied rewrites94.6%
(FPCore (x y z t)
:precision binary64
(if (<= y 2.5e-92)
(* 1.0 x)
(if (<= y 3.9e+117)
(+ x (fma (* z y) (tanh (/ t y)) (* (- x) z)))
(fma (- t x) z x))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 2.5e-92) {
tmp = 1.0 * x;
} else if (y <= 3.9e+117) {
tmp = x + fma((z * y), tanh((t / y)), (-x * z));
} else {
tmp = fma((t - x), z, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 2.5e-92) tmp = Float64(1.0 * x); elseif (y <= 3.9e+117) tmp = Float64(x + fma(Float64(z * y), tanh(Float64(t / y)), Float64(Float64(-x) * z))); else tmp = fma(Float64(t - x), z, x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 2.5e-92], N[(1.0 * x), $MachinePrecision], If[LessEqual[y, 3.9e+117], N[(x + N[(N[(z * y), $MachinePrecision] * N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] + N[((-x) * z), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 2.5 \cdot 10^{-92}:\\
\;\;\;\;1 \cdot x\\
\mathbf{elif}\;y \leq 3.9 \cdot 10^{+117}:\\
\;\;\;\;x + \mathsf{fma}\left(z \cdot y, \tanh \left(\frac{t}{y}\right), \left(-x\right) \cdot z\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\end{array}
\end{array}
if y < 2.50000000000000006e-92Initial program 97.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6451.6
Applied rewrites51.6%
Taylor expanded in x around inf
Applied rewrites55.2%
Taylor expanded in z around 0
Applied rewrites67.3%
if 2.50000000000000006e-92 < y < 3.8999999999999999e117Initial program 98.1%
Taylor expanded in x around 0
lower-/.f6480.0
Applied rewrites80.0%
lift-*.f64N/A
lift--.f64N/A
sub-negN/A
distribute-lft-inN/A
lower-fma.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lower-*.f64N/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
lower-neg.f6480.0
Applied rewrites80.0%
Taylor expanded in x around 0
associate-*r*N/A
lower-*.f64N/A
mul-1-negN/A
lower-neg.f6480.0
Applied rewrites80.0%
if 3.8999999999999999e117 < y Initial program 85.1%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6487.7
Applied rewrites87.7%
(FPCore (x y z t) :precision binary64 (if (<= y 1.75e+79) (* 1.0 x) (fma (- t x) z x)))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.75e+79) {
tmp = 1.0 * x;
} else {
tmp = fma((t - x), z, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 1.75e+79) tmp = Float64(1.0 * x); else tmp = fma(Float64(t - x), z, x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 1.75e+79], N[(1.0 * x), $MachinePrecision], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.75 \cdot 10^{+79}:\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\end{array}
\end{array}
if y < 1.7499999999999999e79Initial program 97.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6449.5
Applied rewrites49.5%
Taylor expanded in x around inf
Applied rewrites52.2%
Taylor expanded in z around 0
Applied rewrites64.8%
if 1.7499999999999999e79 < y Initial program 88.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6479.2
Applied rewrites79.2%
(FPCore (x y z t) :precision binary64 (if (<= y 1.75e+79) (* 1.0 x) (* (- 1.0 z) x)))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.75e+79) {
tmp = 1.0 * x;
} else {
tmp = (1.0 - z) * x;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (y <= 1.75d+79) then
tmp = 1.0d0 * x
else
tmp = (1.0d0 - z) * x
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.75e+79) {
tmp = 1.0 * x;
} else {
tmp = (1.0 - z) * x;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if y <= 1.75e+79: tmp = 1.0 * x else: tmp = (1.0 - z) * x return tmp
function code(x, y, z, t) tmp = 0.0 if (y <= 1.75e+79) tmp = Float64(1.0 * x); else tmp = Float64(Float64(1.0 - z) * x); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (y <= 1.75e+79) tmp = 1.0 * x; else tmp = (1.0 - z) * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[y, 1.75e+79], N[(1.0 * x), $MachinePrecision], N[(N[(1.0 - z), $MachinePrecision] * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.75 \cdot 10^{+79}:\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;\left(1 - z\right) \cdot x\\
\end{array}
\end{array}
if y < 1.7499999999999999e79Initial program 97.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6449.5
Applied rewrites49.5%
Taylor expanded in x around inf
Applied rewrites52.2%
Taylor expanded in z around 0
Applied rewrites64.8%
if 1.7499999999999999e79 < y Initial program 88.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6479.2
Applied rewrites79.2%
Taylor expanded in x around inf
Applied rewrites49.8%
(FPCore (x y z t) :precision binary64 (* z t))
double code(double x, double y, double z, double t) {
return z * t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = z * t
end function
public static double code(double x, double y, double z, double t) {
return z * t;
}
def code(x, y, z, t): return z * t
function code(x, y, z, t) return Float64(z * t) end
function tmp = code(x, y, z, t) tmp = z * t; end
code[x_, y_, z_, t_] := N[(z * t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot t
\end{array}
Initial program 95.4%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6456.3
Applied rewrites56.3%
Taylor expanded in x around 0
Applied rewrites14.8%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024314
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:alt
(! :herbie-platform default (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))