
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 6 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (* (- (tanh (/ t y)) (tanh (/ x y))) y) z x))
double code(double x, double y, double z, double t) {
return fma(((tanh((t / y)) - tanh((x / y))) * y), z, x);
}
function code(x, y, z, t) return fma(Float64(Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))) * y), z, x) end
code[x_, y_, z_, t_] := N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * y), $MachinePrecision] * z + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right) \cdot y, z, x\right)
\end{array}
Initial program 95.1%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6498.5
Applied rewrites98.5%
(FPCore (x y z t) :precision binary64 (if (or (<= x -5.7e-5) (not (<= x 5e+40))) (* 1.0 x) (fma (* (- (tanh (/ t y)) (/ x y)) z) y x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((x <= -5.7e-5) || !(x <= 5e+40)) {
tmp = 1.0 * x;
} else {
tmp = fma(((tanh((t / y)) - (x / y)) * z), y, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((x <= -5.7e-5) || !(x <= 5e+40)) tmp = Float64(1.0 * x); else tmp = fma(Float64(Float64(tanh(Float64(t / y)) - Float64(x / y)) * z), y, x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[x, -5.7e-5], N[Not[LessEqual[x, 5e+40]], $MachinePrecision]], N[(1.0 * x), $MachinePrecision], N[(N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision] * z), $MachinePrecision] * y + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -5.7 \cdot 10^{-5} \lor \neg \left(x \leq 5 \cdot 10^{+40}\right):\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(\left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right) \cdot z, y, x\right)\\
\end{array}
\end{array}
if x < -5.7000000000000003e-5 or 5.00000000000000003e40 < x Initial program 97.3%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6465.7
Applied rewrites65.7%
Taylor expanded in x around inf
Applied rewrites67.3%
Taylor expanded in z around 0
Applied rewrites86.2%
if -5.7000000000000003e-5 < x < 5.00000000000000003e40Initial program 92.4%
Taylor expanded in x around 0
lower-/.f6485.1
Applied rewrites85.1%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
*-commutativeN/A
lower-*.f6486.6
Applied rewrites86.6%
Final simplification86.4%
(FPCore (x y z t) :precision binary64 (if (or (<= y -3.8e+87) (not (<= y 1.2e+41))) (fma (- t x) z x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -3.8e+87) || !(y <= 1.2e+41)) {
tmp = fma((t - x), z, x);
} else {
tmp = 1.0 * x;
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if ((y <= -3.8e+87) || !(y <= 1.2e+41)) tmp = fma(Float64(t - x), z, x); else tmp = Float64(1.0 * x); end return tmp end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -3.8e+87], N[Not[LessEqual[y, 1.2e+41]], $MachinePrecision]], N[(N[(t - x), $MachinePrecision] * z + x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -3.8 \cdot 10^{+87} \lor \neg \left(y \leq 1.2 \cdot 10^{+41}\right):\\
\;\;\;\;\mathsf{fma}\left(t - x, z, x\right)\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -3.80000000000000011e87 or 1.2000000000000001e41 < y Initial program 88.8%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6485.1
Applied rewrites85.1%
if -3.80000000000000011e87 < y < 1.2000000000000001e41Initial program 99.3%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6448.5
Applied rewrites48.5%
Taylor expanded in x around inf
Applied rewrites54.3%
Taylor expanded in z around 0
Applied rewrites76.3%
Final simplification79.8%
(FPCore (x y z t) :precision binary64 (if (or (<= y -1.02e+151) (not (<= y 6.2e+46))) (* (- 1.0 z) x) (* 1.0 x)))
double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -1.02e+151) || !(y <= 6.2e+46)) {
tmp = (1.0 - z) * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if ((y <= (-1.02d+151)) .or. (.not. (y <= 6.2d+46))) then
tmp = (1.0d0 - z) * x
else
tmp = 1.0d0 * x
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if ((y <= -1.02e+151) || !(y <= 6.2e+46)) {
tmp = (1.0 - z) * x;
} else {
tmp = 1.0 * x;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if (y <= -1.02e+151) or not (y <= 6.2e+46): tmp = (1.0 - z) * x else: tmp = 1.0 * x return tmp
function code(x, y, z, t) tmp = 0.0 if ((y <= -1.02e+151) || !(y <= 6.2e+46)) tmp = Float64(Float64(1.0 - z) * x); else tmp = Float64(1.0 * x); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if ((y <= -1.02e+151) || ~((y <= 6.2e+46))) tmp = (1.0 - z) * x; else tmp = 1.0 * x; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[Or[LessEqual[y, -1.02e+151], N[Not[LessEqual[y, 6.2e+46]], $MachinePrecision]], N[(N[(1.0 - z), $MachinePrecision] * x), $MachinePrecision], N[(1.0 * x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq -1.02 \cdot 10^{+151} \lor \neg \left(y \leq 6.2 \cdot 10^{+46}\right):\\
\;\;\;\;\left(1 - z\right) \cdot x\\
\mathbf{else}:\\
\;\;\;\;1 \cdot x\\
\end{array}
\end{array}
if y < -1.02000000000000002e151 or 6.1999999999999995e46 < y Initial program 86.8%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6485.6
Applied rewrites85.6%
Taylor expanded in x around inf
Applied rewrites60.0%
if -1.02000000000000002e151 < y < 6.1999999999999995e46Initial program 99.3%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6451.7
Applied rewrites51.7%
Taylor expanded in x around inf
Applied rewrites55.8%
Taylor expanded in z around 0
Applied rewrites75.7%
Final simplification70.4%
(FPCore (x y z t) :precision binary64 (if (<= z 4.4e+169) (* 1.0 x) (* z t)))
double code(double x, double y, double z, double t) {
double tmp;
if (z <= 4.4e+169) {
tmp = 1.0 * x;
} else {
tmp = z * t;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (z <= 4.4d+169) then
tmp = 1.0d0 * x
else
tmp = z * t
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (z <= 4.4e+169) {
tmp = 1.0 * x;
} else {
tmp = z * t;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if z <= 4.4e+169: tmp = 1.0 * x else: tmp = z * t return tmp
function code(x, y, z, t) tmp = 0.0 if (z <= 4.4e+169) tmp = Float64(1.0 * x); else tmp = Float64(z * t); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (z <= 4.4e+169) tmp = 1.0 * x; else tmp = z * t; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[z, 4.4e+169], N[(1.0 * x), $MachinePrecision], N[(z * t), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;z \leq 4.4 \cdot 10^{+169}:\\
\;\;\;\;1 \cdot x\\
\mathbf{else}:\\
\;\;\;\;z \cdot t\\
\end{array}
\end{array}
if z < 4.4e169Initial program 97.1%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6462.6
Applied rewrites62.6%
Taylor expanded in x around inf
Applied rewrites60.5%
Taylor expanded in z around 0
Applied rewrites70.3%
if 4.4e169 < z Initial program 76.6%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6468.8
Applied rewrites68.8%
Taylor expanded in x around 0
Applied rewrites45.9%
(FPCore (x y z t) :precision binary64 (* z t))
double code(double x, double y, double z, double t) {
return z * t;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = z * t
end function
public static double code(double x, double y, double z, double t) {
return z * t;
}
def code(x, y, z, t): return z * t
function code(x, y, z, t) return Float64(z * t) end
function tmp = code(x, y, z, t) tmp = z * t; end
code[x_, y_, z_, t_] := N[(z * t), $MachinePrecision]
\begin{array}{l}
\\
z \cdot t
\end{array}
Initial program 95.1%
Taylor expanded in y around inf
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
lower--.f6463.2
Applied rewrites63.2%
Taylor expanded in x around 0
Applied rewrites17.1%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024327
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:alt
(! :herbie-platform default (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))