
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (* (- (tanh (/ t y)) (tanh (/ x y))) y) z x))
double code(double x, double y, double z, double t) {
return fma(((tanh((t / y)) - tanh((x / y))) * y), z, x);
}
function code(x, y, z, t) return fma(Float64(Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))) * y), z, x) end
code[x_, y_, z_, t_] := N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * y), $MachinePrecision] * z + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \cdot y, z, x\right)
\end{array}
Initial program 94.4%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6498.8
Applied rewrites98.8%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))) (if (or (<= t_1 -1e+295) (not (<= t_1 2e+306))) (* (- z) x) (* 1.0 x))))
double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
double tmp;
if ((t_1 <= -1e+295) || !(t_1 <= 2e+306)) {
tmp = -z * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
if ((t_1 <= (-1d+295)) .or. (.not. (t_1 <= 2d+306))) then
tmp = -z * x
else
tmp = 1.0d0 * x
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
double tmp;
if ((t_1 <= -1e+295) || !(t_1 <= 2e+306)) {
tmp = -z * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
def code(x, y, z, t): t_1 = x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y)))) tmp = 0 if (t_1 <= -1e+295) or not (t_1 <= 2e+306): tmp = -z * x else: tmp = 1.0 * x return tmp
function code(x, y, z, t) t_1 = Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) tmp = 0.0 if ((t_1 <= -1e+295) || !(t_1 <= 2e+306)) tmp = Float64(Float64(-z) * x); else tmp = Float64(1.0 * x); end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); tmp = 0.0; if ((t_1 <= -1e+295) || ~((t_1 <= 2e+306))) tmp = -z * x; else tmp = 1.0 * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -1e+295], N[Not[LessEqual[t$95$1, 2e+306]], $MachinePrecision]], N[((-z) * x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{+295} \lor \neg \left(t\_1 \leq 2 \cdot 10^{+306}\right):\\
\;\;\;\;\left(-z\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < -9.9999999999999998e294 or 2.00000000000000003e306 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 57.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6497.1
Applied rewrites97.1%
Taylor expanded in x around inf
Applied rewrites59.4%
Taylor expanded in z around inf
Applied rewrites59.4%
if -9.9999999999999998e294 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < 2.00000000000000003e306Initial program 99.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6453.8
Applied rewrites53.8%
Taylor expanded in x around inf
Applied rewrites52.5%
Taylor expanded in z around 0
Applied rewrites69.0%
Final simplification67.9%
(FPCore (x y z t) :precision binary64 (if (<= (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))) 2e+306) (* 1.0 x) (* z t)))
double code(double x, double y, double z, double t) {
double tmp;
if ((x + ((y * z) * (tanh((t / y)) - tanh((x / y))))) <= 2e+306) {
tmp = 1.0 * x;
} else {
tmp = z * t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if ((x + ((y * z) * (tanh((t / y)) - tanh((x / y))))) <= 2d+306) then
tmp = 1.0d0 * x
else
tmp = z * t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if ((x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))))) <= 2e+306) {
tmp = 1.0 * x;
} else {
tmp = z * t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if (x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))) <= 2e+306: tmp = 1.0 * x else: tmp = z * t return tmp
function code(x, y, z, t) tmp = 0.0 if (Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) <= 2e+306) tmp = Float64(1.0 * x); else tmp = Float64(z * t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if ((x + ((y * z) * (tanh((t / y)) - tanh((x / y))))) <= 2e+306) tmp = 1.0 * x; else tmp = z * t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 2e+306], N[(1.0 * x), $MachinePrecision], N[(z * t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \leq 2 \cdot 10^{+306}:\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;z \cdot t\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < 2.00000000000000003e306Initial program 98.4%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6455.9
Applied rewrites55.9%
Taylor expanded in x around inf
Applied rewrites53.1%
Taylor expanded in z around 0
Applied rewrites65.4%
if 2.00000000000000003e306 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 42.0%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f64100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites44.5%
(FPCore (x y z t) :precision binary64 (if (or (<= y -1.5e-104) (not (<= y 1.9e-79))) (fma (* (- (tanh (/ t y)) (/ x y)) y) z x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -1.5e-104) || !(y <= 1.9e-79)) {
tmp = fma(((tanh((t / y)) - (x / y)) * y), z, x);
} else {
tmp = 1.0 * x;
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((y <= -1.5e-104) || !(y <= 1.9e-79)) tmp = fma(Float64(Float64(tanh(Float64(t / y)) - Float64(x / y)) * y), z, x); else tmp = Float64(1.0 * x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -1.5e-104], N[Not[LessEqual[y, 1.9e-79]], $MachinePrecision]], N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision] * y), $MachinePrecision] * z + x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.5 \cdot 10^{-104} \lor \neg \left(y \leq 1.9 \cdot 10^{-79}\right):\\
\;\;\;\;\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right) \cdot y, z, x\right)\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -1.5000000000000001e-104 or 1.9000000000000001e-79 < y Initial program 91.2%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6498.1
Applied rewrites98.1%
Taylor expanded in x around 0
lower-/.f6484.3
Applied rewrites84.3%
if -1.5000000000000001e-104 < y < 1.9000000000000001e-79Initial program 99.9%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6440.3
Applied rewrites40.3%
Taylor expanded in x around inf
Applied rewrites46.9%
Taylor expanded in z around 0
Applied rewrites84.6%
Final simplification84.4%
(FPCore (x y z t) :precision binary64 (if (or (<= y -6.2e-56) (not (<= y 1.04e-94))) (fma (* (- (/ t y) (tanh (/ x y))) z) y x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -6.2e-56) || !(y <= 1.04e-94)) {
tmp = fma((((t / y) - tanh((x / y))) * z), y, x);
} else {
tmp = 1.0 * x;
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((y <= -6.2e-56) || !(y <= 1.04e-94)) tmp = fma(Float64(Float64(Float64(t / y) - tanh(Float64(x / y))) * z), y, x); else tmp = Float64(1.0 * x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -6.2e-56], N[Not[LessEqual[y, 1.04e-94]], $MachinePrecision]], N[(N[(N[(N[(t / y), $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -6.2 \cdot 10^{-56} \lor \neg \left(y \leq 1.04 \cdot 10^{-94}\right):\\
\;\;\;\;\mathsf{fma}\left(\left(\frac{t}{y} - \tanh \left(\frac{x}{y}\right)\right) \cdot z, y, x\right)\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -6.19999999999999975e-56 or 1.04e-94 < y Initial program 90.7%
Taylor expanded in y around inf
lower-/.f6469.7
Applied rewrites69.7%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6475.5
Applied rewrites75.5%
if -6.19999999999999975e-56 < y < 1.04e-94Initial program 100.0%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6440.9
Applied rewrites40.9%
Taylor expanded in x around inf
Applied rewrites49.0%
Taylor expanded in z around 0
Applied rewrites83.3%
Final simplification78.6%
(FPCore (x y z t) :precision binary64 (if (or (<= y -1.15e-55) (not (<= y 5.5e+27))) (fma (- t x) z x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -1.15e-55) || !(y <= 5.5e+27)) {
tmp = fma((t - x), z, x);
} else {
tmp = 1.0 * x;
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((y <= -1.15e-55) || !(y <= 5.5e+27)) tmp = fma(Float64(t - x), z, x); else tmp = Float64(1.0 * x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -1.15e-55], N[Not[LessEqual[y, 5.5e+27]], $MachinePrecision]], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.15 \cdot 10^{-55} \lor \neg \left(y \leq 5.5 \cdot 10^{+27}\right):\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -1.15000000000000006e-55 or 5.49999999999999966e27 < y Initial program 88.9%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6479.1
Applied rewrites79.1%
if -1.15000000000000006e-55 < y < 5.49999999999999966e27Initial program 99.9%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6438.9
Applied rewrites38.9%
Taylor expanded in x around inf
Applied rewrites45.4%
Taylor expanded in z around 0
Applied rewrites74.6%
Final simplification76.9%
(FPCore (x y z t) :precision binary64 (if (or (<= y -9.5e+44) (not (<= y 5.8e+27))) (* (- 1.0 z) x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -9.5e+44) || !(y <= 5.8e+27)) {
tmp = (1.0 - z) * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if ((y <= (-9.5d+44)) .or. (.not. (y <= 5.8d+27))) then
tmp = (1.0d0 - z) * x
else
tmp = 1.0d0 * x
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -9.5e+44) || !(y <= 5.8e+27)) {
tmp = (1.0 - z) * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if (y <= -9.5e+44) or not (y <= 5.8e+27): tmp = (1.0 - z) * x else: tmp = 1.0 * x return tmp
function code(x, y, z, t) tmp = 0.0 if ((y <= -9.5e+44) || !(y <= 5.8e+27)) tmp = Float64(Float64(1.0 - z) * x); else tmp = Float64(1.0 * x); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if ((y <= -9.5e+44) || ~((y <= 5.8e+27))) tmp = (1.0 - z) * x; else tmp = 1.0 * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -9.5e+44], N[Not[LessEqual[y, 5.8e+27]], $MachinePrecision]], N[(N[(1.0 - z), $MachinePrecision] * x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -9.5 \cdot 10^{+44} \lor \neg \left(y \leq 5.8 \cdot 10^{+27}\right):\\
\;\;\;\;\left(1 - z\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -9.5000000000000004e44 or 5.8000000000000002e27 < y Initial program 87.5%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6480.6
Applied rewrites80.6%
Taylor expanded in x around inf
Applied rewrites63.1%
if -9.5000000000000004e44 < y < 5.8000000000000002e27Initial program 99.9%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6441.7
Applied rewrites41.7%
Taylor expanded in x around inf
Applied rewrites45.6%
Taylor expanded in z around 0
Applied rewrites73.8%
Final simplification69.0%
(FPCore (x y z t) :precision binary64 (* z t))
double code(double x, double y, double z, double t) {
return z * t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = z * t
end function
public static double code(double x, double y, double z, double t) {
return z * t;
}
def code(x, y, z, t): return z * t
function code(x, y, z, t) return Float64(z * t) end
function tmp = code(x, y, z, t) tmp = z * t; end
code[x_, y_, z_, t_] := N[(z * t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot t
\end{array}
Initial program 94.4%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6459.0
Applied rewrites59.0%
Taylor expanded in x around 0
Applied rewrites13.1%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024332
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:alt
(! :herbie-platform default (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))