
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (* y (- (tanh (/ t y)) (tanh (/ x y)))) z x))
double code(double x, double y, double z, double t) {
return fma((y * (tanh((t / y)) - tanh((x / y)))), z, x);
}
function code(x, y, z, t) return fma(Float64(y * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))), z, x) end
code[x_, y_, z_, t_] := N[(N[(y * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * z + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(y \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right), z, x\right)
\end{array}
Initial program 94.6%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6498.1
Applied rewrites98.1%
Final simplification98.1%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (fma (- (/ t y) (tanh (/ x y))) (* y z) x)))
(if (<= x -1e+108)
t_1
(if (<= x 5.5e+63) (fma (* y (- (tanh (/ t y)) (/ x y))) z x) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = fma(((t / y) - tanh((x / y))), (y * z), x);
double tmp;
if (x <= -1e+108) {
tmp = t_1;
} else if (x <= 5.5e+63) {
tmp = fma((y * (tanh((t / y)) - (x / y))), z, x);
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = fma(Float64(Float64(t / y) - tanh(Float64(x / y))), Float64(y * z), x) tmp = 0.0 if (x <= -1e+108) tmp = t_1; elseif (x <= 5.5e+63) tmp = fma(Float64(y * Float64(tanh(Float64(t / y)) - Float64(x / y))), z, x); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(N[(N[(t / y), $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(y * z), $MachinePrecision] + x), $MachinePrecision]}, If[LessEqual[x, -1e+108], t$95$1, If[LessEqual[x, 5.5e+63], N[(N[(y * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * z + x), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := \mathsf{fma}\left(\frac{t}{y} - \tanh \left(\frac{x}{y}\right), y \cdot z, x\right)\\
\mathbf{if}\;x \leq -1 \cdot 10^{+108}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;x \leq 5.5 \cdot 10^{+63}:\\
\;\;\;\;\mathsf{fma}\left(y \cdot \left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right), z, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if x < -1e108 or 5.50000000000000004e63 < x Initial program 99.0%
Taylor expanded in t around 0
lower-/.f6480.9
Applied rewrites80.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f6480.9
Applied rewrites80.9%
if -1e108 < x < 5.50000000000000004e63Initial program 91.9%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6496.9
Applied rewrites96.9%
Taylor expanded in x around 0
lower-/.f6481.2
Applied rewrites81.2%
Final simplification81.0%
(FPCore (x y z t)
:precision binary64
(if (<= x -1e+109)
(fma (* y (/ t y)) z x)
(if (<= x 3.2e+40)
(fma (* y (- (tanh (/ t y)) (/ x y))) z x)
(fma z (- x) x))))
double code(double x, double y, double z, double t) {
double tmp;
if (x <= -1e+109) {
tmp = fma((y * (t / y)), z, x);
} else if (x <= 3.2e+40) {
tmp = fma((y * (tanh((t / y)) - (x / y))), z, x);
} else {
tmp = fma(z, -x, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (x <= -1e+109) tmp = fma(Float64(y * Float64(t / y)), z, x); elseif (x <= 3.2e+40) tmp = fma(Float64(y * Float64(tanh(Float64(t / y)) - Float64(x / y))), z, x); else tmp = fma(z, Float64(-x), x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[x, -1e+109], N[(N[(y * N[(t / y), $MachinePrecision]), $MachinePrecision] * z + x), $MachinePrecision], If[LessEqual[x, 3.2e+40], N[(N[(y * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * z + x), $MachinePrecision], N[(z * (-x) + x), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -1 \cdot 10^{+109}:\\
\;\;\;\;\mathsf{fma}\left(y \cdot \frac{t}{y}, z, x\right)\\
\mathbf{elif}\;x \leq 3.2 \cdot 10^{+40}:\\
\;\;\;\;\mathsf{fma}\left(y \cdot \left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right), z, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\end{array}
\end{array}
if x < -9.99999999999999982e108Initial program 98.0%
Taylor expanded in y around inf
lower-/.f64N/A
lower--.f6441.7
Applied rewrites41.7%
Taylor expanded in t around inf
Applied rewrites71.5%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6471.6
Applied rewrites71.6%
if -9.99999999999999982e108 < x < 3.19999999999999981e40Initial program 91.6%
lift-+.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6496.8
Applied rewrites96.8%
Taylor expanded in x around 0
lower-/.f6482.5
Applied rewrites82.5%
if 3.19999999999999981e40 < x Initial program 100.0%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6468.4
Applied rewrites68.4%
Taylor expanded in t around 0
Applied rewrites71.3%
Final simplification77.9%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* z (- t x)))) (if (<= z -9e-31) t_1 (if (<= z 2.7e+42) (fma z (- x) x) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = z * (t - x);
double tmp;
if (z <= -9e-31) {
tmp = t_1;
} else if (z <= 2.7e+42) {
tmp = fma(z, -x, x);
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(z * Float64(t - x)) tmp = 0.0 if (z <= -9e-31) tmp = t_1; elseif (z <= 2.7e+42) tmp = fma(z, Float64(-x), x); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -9e-31], t$95$1, If[LessEqual[z, 2.7e+42], N[(z * (-x) + x), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := z \cdot \left(t - x\right)\\
\mathbf{if}\;z \leq -9 \cdot 10^{-31}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;z \leq 2.7 \cdot 10^{+42}:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if z < -9.0000000000000008e-31 or 2.7000000000000001e42 < z Initial program 88.5%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6448.5
Applied rewrites48.5%
Taylor expanded in z around inf
Applied rewrites47.6%
if -9.0000000000000008e-31 < z < 2.7000000000000001e42Initial program 99.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6477.2
Applied rewrites77.2%
Taylor expanded in t around 0
Applied rewrites84.3%
(FPCore (x y z t) :precision binary64 (if (<= t -5.8e-68) (* t z) (if (<= t 2.65e-170) (* z (- x)) (* t z))))
double code(double x, double y, double z, double t) {
double tmp;
if (t <= -5.8e-68) {
tmp = t * z;
} else if (t <= 2.65e-170) {
tmp = z * -x;
} else {
tmp = t * z;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (t <= (-5.8d-68)) then
tmp = t * z
else if (t <= 2.65d-170) then
tmp = z * -x
else
tmp = t * z
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (t <= -5.8e-68) {
tmp = t * z;
} else if (t <= 2.65e-170) {
tmp = z * -x;
} else {
tmp = t * z;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if t <= -5.8e-68: tmp = t * z elif t <= 2.65e-170: tmp = z * -x else: tmp = t * z return tmp
function code(x, y, z, t) tmp = 0.0 if (t <= -5.8e-68) tmp = Float64(t * z); elseif (t <= 2.65e-170) tmp = Float64(z * Float64(-x)); else tmp = Float64(t * z); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (t <= -5.8e-68) tmp = t * z; elseif (t <= 2.65e-170) tmp = z * -x; else tmp = t * z; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[t, -5.8e-68], N[(t * z), $MachinePrecision], If[LessEqual[t, 2.65e-170], N[(z * (-x)), $MachinePrecision], N[(t * z), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -5.8 \cdot 10^{-68}:\\
\;\;\;\;t \cdot z\\
\mathbf{elif}\;t \leq 2.65 \cdot 10^{-170}:\\
\;\;\;\;z \cdot \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;t \cdot z\\
\end{array}
\end{array}
if t < -5.8000000000000001e-68 or 2.65e-170 < t Initial program 96.8%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6458.2
Applied rewrites58.2%
Taylor expanded in t around inf
Applied rewrites22.6%
if -5.8000000000000001e-68 < t < 2.65e-170Initial program 90.2%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6476.7
Applied rewrites76.7%
Applied rewrites39.5%
Taylor expanded in z around inf
Applied rewrites29.1%
Taylor expanded in t around 0
Applied rewrites23.5%
Final simplification22.9%
(FPCore (x y z t) :precision binary64 (if (<= y 1.8e-121) (fma z (- x) x) (fma z (- t x) x)))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 1.8e-121) {
tmp = fma(z, -x, x);
} else {
tmp = fma(z, (t - x), x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 1.8e-121) tmp = fma(z, Float64(-x), x); else tmp = fma(z, Float64(t - x), x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 1.8e-121], N[(z * (-x) + x), $MachinePrecision], N[(z * N[(t - x), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.8 \cdot 10^{-121}:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(z, t - x, x\right)\\
\end{array}
\end{array}
if y < 1.79999999999999992e-121Initial program 93.7%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6457.3
Applied rewrites57.3%
Taylor expanded in t around 0
Applied rewrites55.3%
if 1.79999999999999992e-121 < y Initial program 96.5%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6478.5
Applied rewrites78.5%
(FPCore (x y z t) :precision binary64 (* z (- t x)))
double code(double x, double y, double z, double t) {
return z * (t - x);
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = z * (t - x)
end function
public static double code(double x, double y, double z, double t) {
return z * (t - x);
}
def code(x, y, z, t): return z * (t - x)
function code(x, y, z, t) return Float64(z * Float64(t - x)) end
function tmp = code(x, y, z, t) tmp = z * (t - x); end
code[x_, y_, z_, t_] := N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(t - x\right)
\end{array}
Initial program 94.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6464.3
Applied rewrites64.3%
Taylor expanded in z around inf
Applied rewrites26.8%
(FPCore (x y z t) :precision binary64 (* t z))
double code(double x, double y, double z, double t) {
return t * z;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = t * z
end function
public static double code(double x, double y, double z, double t) {
return t * z;
}
def code(x, y, z, t): return t * z
function code(x, y, z, t) return Float64(t * z) end
function tmp = code(x, y, z, t) tmp = t * z; end
code[x_, y_, z_, t_] := N[(t * z), $MachinePrecision]
\begin{array}{l}
\\
t \cdot z
\end{array}
Initial program 94.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6464.3
Applied rewrites64.3%
Taylor expanded in t around inf
Applied rewrites17.7%
Final simplification17.7%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024223
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:alt
(! :herbie-platform default (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))