
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (+ x (* (- (tanh (/ t y)) (tanh (/ x y))) (* z y)))))
(if (or (<= t_1 (- INFINITY)) (not (<= t_1 1e+301)))
(+ x (* z (- t x)))
t_1)))
double code(double x, double y, double z, double t) {
double t_1 = x + ((tanh((t / y)) - tanh((x / y))) * (z * y));
double tmp;
if ((t_1 <= -((double) INFINITY)) || !(t_1 <= 1e+301)) {
tmp = x + (z * (t - x));
} else {
tmp = t_1;
}
return tmp;
}
public static double code(double x, double y, double z, double t) {
double t_1 = x + ((Math.tanh((t / y)) - Math.tanh((x / y))) * (z * y));
double tmp;
if ((t_1 <= -Double.POSITIVE_INFINITY) || !(t_1 <= 1e+301)) {
tmp = x + (z * (t - x));
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = x + ((math.tanh((t / y)) - math.tanh((x / y))) * (z * y)) tmp = 0 if (t_1 <= -math.inf) or not (t_1 <= 1e+301): tmp = x + (z * (t - x)) else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(x + Float64(Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))) * Float64(z * y))) tmp = 0.0 if ((t_1 <= Float64(-Inf)) || !(t_1 <= 1e+301)) tmp = Float64(x + Float64(z * Float64(t - x))); else tmp = t_1; end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = x + ((tanh((t / y)) - tanh((x / y))) * (z * y)); tmp = 0.0; if ((t_1 <= -Inf) || ~((t_1 <= 1e+301))) tmp = x + (z * (t - x)); else tmp = t_1; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(x + N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(z * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[Or[LessEqual[t$95$1, (-Infinity)], N[Not[LessEqual[t$95$1, 1e+301]], $MachinePrecision]], N[(x + N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], t$95$1]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := x + \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \cdot \left(z \cdot y\right)\\
\mathbf{if}\;t_1 \leq -\infty \lor \neg \left(t_1 \leq 10^{+301}\right):\\
\;\;\;\;x + z \cdot \left(t - x\right)\\
\mathbf{else}:\\
\;\;\;\;t_1\\
\end{array}
\end{array}
if (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < -inf.0 or 1.00000000000000005e301 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) Initial program 43.8%
Taylor expanded in y around inf 99.9%
if -inf.0 < (+.f64 x (*.f64 (*.f64 y z) (-.f64 (tanh.f64 (/.f64 t y)) (tanh.f64 (/.f64 x y))))) < 1.00000000000000005e301Initial program 98.6%
Final simplification98.8%
(FPCore (x y z t) :precision binary64 (fma z (* y (- (tanh (/ t y)) (tanh (/ x y)))) x))
double code(double x, double y, double z, double t) {
return fma(z, (y * (tanh((t / y)) - tanh((x / y)))), x);
}
function code(x, y, z, t) return fma(z, Float64(y * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))), x) end
code[x_, y_, z_, t_] := N[(z * N[(y * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(z, y \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right), x\right)
\end{array}
Initial program 92.8%
+-commutative92.8%
*-commutative92.8%
associate-*l*97.3%
fma-def97.3%
Simplified97.3%
Final simplification97.3%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* y (tanh (/ t y))))) (if (<= y 4.4e+60) (fma z t_1 x) (+ x (* z (- t_1 x))))))
double code(double x, double y, double z, double t) {
double t_1 = y * tanh((t / y));
double tmp;
if (y <= 4.4e+60) {
tmp = fma(z, t_1, x);
} else {
tmp = x + (z * (t_1 - x));
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(y * tanh(Float64(t / y))) tmp = 0.0 if (y <= 4.4e+60) tmp = fma(z, t_1, x); else tmp = Float64(x + Float64(z * Float64(t_1 - x))); end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(y * N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, If[LessEqual[y, 4.4e+60], N[(z * t$95$1 + x), $MachinePrecision], N[(x + N[(z * N[(t$95$1 - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := y \cdot \tanh \left(\frac{t}{y}\right)\\
\mathbf{if}\;y \leq 4.4 \cdot 10^{+60}:\\
\;\;\;\;\mathsf{fma}\left(z, t_1, x\right)\\
\mathbf{else}:\\
\;\;\;\;x + z \cdot \left(t_1 - x\right)\\
\end{array}
\end{array}
if y < 4.39999999999999992e60Initial program 96.0%
+-commutative96.0%
*-commutative96.0%
associate-*l*97.4%
fma-def97.4%
Simplified97.4%
Taylor expanded in x around 0 25.2%
associate-/r*25.2%
div-sub25.2%
rec-exp25.2%
rec-exp25.2%
tanh-def-a85.2%
Simplified85.2%
if 4.39999999999999992e60 < y Initial program 81.7%
Taylor expanded in x around 0 60.2%
+-commutative60.2%
Simplified95.3%
Final simplification87.4%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (tanh (/ t y)))) (if (<= y 4.4e+60) (+ x (* t_1 (* z y))) (+ x (* z (- (* y t_1) x))))))
double code(double x, double y, double z, double t) {
double t_1 = tanh((t / y));
double tmp;
if (y <= 4.4e+60) {
tmp = x + (t_1 * (z * y));
} else {
tmp = x + (z * ((y * t_1) - x));
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = tanh((t / y))
if (y <= 4.4d+60) then
tmp = x + (t_1 * (z * y))
else
tmp = x + (z * ((y * t_1) - x))
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = Math.tanh((t / y));
double tmp;
if (y <= 4.4e+60) {
tmp = x + (t_1 * (z * y));
} else {
tmp = x + (z * ((y * t_1) - x));
}
return tmp;
}
def code(x, y, z, t): t_1 = math.tanh((t / y)) tmp = 0 if y <= 4.4e+60: tmp = x + (t_1 * (z * y)) else: tmp = x + (z * ((y * t_1) - x)) return tmp
function code(x, y, z, t) t_1 = tanh(Float64(t / y)) tmp = 0.0 if (y <= 4.4e+60) tmp = Float64(x + Float64(t_1 * Float64(z * y))); else tmp = Float64(x + Float64(z * Float64(Float64(y * t_1) - x))); end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = tanh((t / y)); tmp = 0.0; if (y <= 4.4e+60) tmp = x + (t_1 * (z * y)); else tmp = x + (z * ((y * t_1) - x)); end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision]}, If[LessEqual[y, 4.4e+60], N[(x + N[(t$95$1 * N[(z * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(z * N[(N[(y * t$95$1), $MachinePrecision] - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \tanh \left(\frac{t}{y}\right)\\
\mathbf{if}\;y \leq 4.4 \cdot 10^{+60}:\\
\;\;\;\;x + t_1 \cdot \left(z \cdot y\right)\\
\mathbf{else}:\\
\;\;\;\;x + z \cdot \left(y \cdot t_1 - x\right)\\
\end{array}
\end{array}
if y < 4.39999999999999992e60Initial program 96.0%
Taylor expanded in x around 0 25.2%
associate-*r*25.0%
associate-/r*25.0%
div-sub25.0%
rec-exp25.0%
rec-exp25.0%
tanh-def-a83.7%
Simplified83.7%
if 4.39999999999999992e60 < y Initial program 81.7%
Taylor expanded in x around 0 60.2%
+-commutative60.2%
Simplified95.3%
Final simplification86.3%
(FPCore (x y z t) :precision binary64 (if (<= y 1.12e+77) (+ x (* (tanh (/ t y)) (* z y))) (+ x (* z (- t x)))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.12e+77) {
tmp = x + (tanh((t / y)) * (z * y));
} else {
tmp = x + (z * (t - x));
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (y <= 1.12d+77) then
tmp = x + (tanh((t / y)) * (z * y))
else
tmp = x + (z * (t - x))
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.12e+77) {
tmp = x + (Math.tanh((t / y)) * (z * y));
} else {
tmp = x + (z * (t - x));
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if y <= 1.12e+77: tmp = x + (math.tanh((t / y)) * (z * y)) else: tmp = x + (z * (t - x)) return tmp
function code(x, y, z, t) tmp = 0.0 if (y <= 1.12e+77) tmp = Float64(x + Float64(tanh(Float64(t / y)) * Float64(z * y))); else tmp = Float64(x + Float64(z * Float64(t - x))); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (y <= 1.12e+77) tmp = x + (tanh((t / y)) * (z * y)); else tmp = x + (z * (t - x)); end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[y, 1.12e+77], N[(x + N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] * N[(z * y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], N[(x + N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.12 \cdot 10^{+77}:\\
\;\;\;\;x + \tanh \left(\frac{t}{y}\right) \cdot \left(z \cdot y\right)\\
\mathbf{else}:\\
\;\;\;\;x + z \cdot \left(t - x\right)\\
\end{array}
\end{array}
if y < 1.1199999999999999e77Initial program 96.0%
Taylor expanded in x around 0 24.9%
associate-*r*24.8%
associate-/r*24.8%
div-sub24.8%
rec-exp24.8%
rec-exp24.8%
tanh-def-a83.9%
Simplified83.9%
if 1.1199999999999999e77 < y Initial program 81.0%
Taylor expanded in y around inf 89.6%
Final simplification85.1%
(FPCore (x y z t) :precision binary64 (if (<= y 2.85e+46) x (+ x (* z (- t x)))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 2.85e+46) {
tmp = x;
} else {
tmp = x + (z * (t - x));
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (y <= 2.85d+46) then
tmp = x
else
tmp = x + (z * (t - x))
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (y <= 2.85e+46) {
tmp = x;
} else {
tmp = x + (z * (t - x));
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if y <= 2.85e+46: tmp = x else: tmp = x + (z * (t - x)) return tmp
function code(x, y, z, t) tmp = 0.0 if (y <= 2.85e+46) tmp = x; else tmp = Float64(x + Float64(z * Float64(t - x))); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (y <= 2.85e+46) tmp = x; else tmp = x + (z * (t - x)); end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[y, 2.85e+46], x, N[(x + N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 2.85 \cdot 10^{+46}:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;x + z \cdot \left(t - x\right)\\
\end{array}
\end{array}
if y < 2.84999999999999994e46Initial program 96.3%
+-commutative96.3%
*-commutative96.3%
associate-*l*97.3%
fma-def97.3%
Simplified97.3%
Taylor expanded in z around 0 70.1%
if 2.84999999999999994e46 < y Initial program 82.6%
Taylor expanded in y around inf 81.3%
Final simplification72.9%
(FPCore (x y z t) :precision binary64 (if (<= y 0.4) x (+ x (* z t))))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 0.4) {
tmp = x;
} else {
tmp = x + (z * t);
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (y <= 0.4d0) then
tmp = x
else
tmp = x + (z * t)
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (y <= 0.4) {
tmp = x;
} else {
tmp = x + (z * t);
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if y <= 0.4: tmp = x else: tmp = x + (z * t) return tmp
function code(x, y, z, t) tmp = 0.0 if (y <= 0.4) tmp = x; else tmp = Float64(x + Float64(z * t)); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (y <= 0.4) tmp = x; else tmp = x + (z * t); end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[y, 0.4], x, N[(x + N[(z * t), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 0.4:\\
\;\;\;\;x\\
\mathbf{else}:\\
\;\;\;\;x + z \cdot t\\
\end{array}
\end{array}
if y < 0.40000000000000002Initial program 96.2%
+-commutative96.2%
*-commutative96.2%
associate-*l*97.3%
fma-def97.3%
Simplified97.3%
Taylor expanded in z around 0 70.0%
if 0.40000000000000002 < y Initial program 83.6%
Taylor expanded in y around inf 81.0%
Taylor expanded in t around inf 66.9%
*-commutative66.9%
Simplified66.9%
Final simplification69.2%
(FPCore (x y z t) :precision binary64 x)
double code(double x, double y, double z, double t) {
return x;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x
end function
public static double code(double x, double y, double z, double t) {
return x;
}
def code(x, y, z, t): return x
function code(x, y, z, t) return x end
function tmp = code(x, y, z, t) tmp = x; end
code[x_, y_, z_, t_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 92.8%
+-commutative92.8%
*-commutative92.8%
associate-*l*97.3%
fma-def97.3%
Simplified97.3%
Taylor expanded in z around 0 63.7%
Final simplification63.7%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024024
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:herbie-target
(+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))