
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 10 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y z t) :precision binary64 (+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))
double code(double x, double y, double z, double t) {
return x + ((y * z) * (tanh((t / y)) - tanh((x / y))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + ((y * z) * (tanh((t / y)) - tanh((x / y))))
end function
public static double code(double x, double y, double z, double t) {
return x + ((y * z) * (Math.tanh((t / y)) - Math.tanh((x / y))));
}
def code(x, y, z, t): return x + ((y * z) * (math.tanh((t / y)) - math.tanh((x / y))))
function code(x, y, z, t) return Float64(x + Float64(Float64(y * z) * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y))))) end
function tmp = code(x, y, z, t) tmp = x + ((y * z) * (tanh((t / y)) - tanh((x / y)))); end
code[x_, y_, z_, t_] := N[(x + N[(N[(y * z), $MachinePrecision] * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + \left(y \cdot z\right) \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)
\end{array}
(FPCore (x y z t) :precision binary64 (fma (fma (tanh (/ t y)) z (* (tanh (/ x y)) (- z))) y x))
double code(double x, double y, double z, double t) {
return fma(fma(tanh((t / y)), z, (tanh((x / y)) * -z)), y, x);
}
function code(x, y, z, t) return fma(fma(tanh(Float64(t / y)), z, Float64(tanh(Float64(x / y)) * Float64(-z))), y, x) end
code[x_, y_, z_, t_] := N[(N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] * z + N[(N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision] * (-z)), $MachinePrecision]), $MachinePrecision] * y + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\mathsf{fma}\left(\tanh \left(\frac{t}{y}\right), z, \tanh \left(\frac{x}{y}\right) \cdot \left(-z\right)\right), y, x\right)
\end{array}
Initial program 93.6%
lift-*.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift--.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6498.0
Applied rewrites98.0%
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
sub-negN/A
distribute-rgt-inN/A
lower-fma.f64N/A
lower-*.f64N/A
lower-neg.f6498.1
Applied rewrites98.1%
Final simplification98.1%
(FPCore (x y z t) :precision binary64 (fma (* z (- (tanh (/ t y)) (tanh (/ x y)))) y x))
double code(double x, double y, double z, double t) {
return fma((z * (tanh((t / y)) - tanh((x / y)))), y, x);
}
function code(x, y, z, t) return fma(Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))), y, x) end
code[x_, y_, z_, t_] := N[(N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * y + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right), y, x\right)
\end{array}
Initial program 93.6%
lift-*.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift--.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6498.0
Applied rewrites98.0%
(FPCore (x y z t)
:precision binary64
(let* ((t_1 (* t (/ z (* x x)))))
(if (<= x -7.8e+59)
(+ x (* x (- (fma t t_1 (- z)) (fma t t_1 (/ (* t (- z)) x)))))
(if (<= x 8.2e+113)
(fma (* y (- (tanh (/ t y)) (/ x y))) z x)
(+ x (* t z))))))
double code(double x, double y, double z, double t) {
double t_1 = t * (z / (x * x));
double tmp;
if (x <= -7.8e+59) {
tmp = x + (x * (fma(t, t_1, -z) - fma(t, t_1, ((t * -z) / x))));
} else if (x <= 8.2e+113) {
tmp = fma((y * (tanh((t / y)) - (x / y))), z, x);
} else {
tmp = x + (t * z);
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(t * Float64(z / Float64(x * x))) tmp = 0.0 if (x <= -7.8e+59) tmp = Float64(x + Float64(x * Float64(fma(t, t_1, Float64(-z)) - fma(t, t_1, Float64(Float64(t * Float64(-z)) / x))))); elseif (x <= 8.2e+113) tmp = fma(Float64(y * Float64(tanh(Float64(t / y)) - Float64(x / y))), z, x); else tmp = Float64(x + Float64(t * z)); end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(t * N[(z / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[x, -7.8e+59], N[(x + N[(x * N[(N[(t * t$95$1 + (-z)), $MachinePrecision] - N[(t * t$95$1 + N[(N[(t * (-z)), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[x, 8.2e+113], N[(N[(y * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[(x / y), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * z + x), $MachinePrecision], N[(x + N[(t * z), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := t \cdot \frac{z}{x \cdot x}\\
\mathbf{if}\;x \leq -7.8 \cdot 10^{+59}:\\
\;\;\;\;x + x \cdot \left(\mathsf{fma}\left(t, t\_1, -z\right) - \mathsf{fma}\left(t, t\_1, \frac{t \cdot \left(-z\right)}{x}\right)\right)\\
\mathbf{elif}\;x \leq 8.2 \cdot 10^{+113}:\\
\;\;\;\;\mathsf{fma}\left(y \cdot \left(\tanh \left(\frac{t}{y}\right) - \frac{x}{y}\right), z, x\right)\\
\mathbf{else}:\\
\;\;\;\;x + t \cdot z\\
\end{array}
\end{array}
if x < -7.80000000000000043e59Initial program 100.0%
Taylor expanded in y around inf
lower-/.f64N/A
lower--.f6451.6
Applied rewrites51.6%
div-subN/A
lift-/.f64N/A
lift-/.f64N/A
flip--N/A
lower-/.f64N/A
lower--.f64N/A
lower-*.f64N/A
lift-/.f64N/A
lift-/.f64N/A
frac-timesN/A
lift-*.f64N/A
lift-*.f64N/A
lower-/.f64N/A
lower-+.f6417.8
Applied rewrites17.8%
lift-/.f64N/A
lift-/.f64N/A
times-fracN/A
lift-/.f64N/A
lift-/.f64N/A
difference-of-squaresN/A
lift-+.f64N/A
lower-*.f64N/A
lift-+.f64N/A
lift-/.f64N/A
div-invN/A
lift-/.f64N/A
div-invN/A
distribute-rgt-outN/A
lower-*.f64N/A
lower-/.f64N/A
lower-+.f64N/A
lift-/.f64N/A
lift-/.f64N/A
sub-divN/A
lift--.f64N/A
lower-/.f6432.5
Applied rewrites32.5%
Taylor expanded in x around inf
Applied rewrites78.2%
if -7.80000000000000043e59 < x < 8.19999999999999985e113Initial program 90.2%
Taylor expanded in x around 0
lower-/.f6474.9
Applied rewrites74.9%
lift-*.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift--.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
*-commutativeN/A
lift-*.f64N/A
associate-*r*N/A
lower-fma.f64N/A
lower-*.f6481.8
Applied rewrites81.8%
if 8.19999999999999985e113 < x Initial program 99.9%
Taylor expanded in y around inf
lower-/.f64N/A
lower--.f6443.1
Applied rewrites43.1%
Taylor expanded in t around inf
*-commutativeN/A
lower-*.f6482.0
Applied rewrites82.0%
Final simplification81.1%
(FPCore (x y z t) :precision binary64 (if (<= y 3.2e-68) (fma z (- x) x) (fma (* z (- (/ t y) (tanh (/ x y)))) y x)))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 3.2e-68) {
tmp = fma(z, -x, x);
} else {
tmp = fma((z * ((t / y) - tanh((x / y)))), y, x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 3.2e-68) tmp = fma(z, Float64(-x), x); else tmp = fma(Float64(z * Float64(Float64(t / y) - tanh(Float64(x / y)))), y, x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 3.2e-68], N[(z * (-x) + x), $MachinePrecision], N[(N[(z * N[(N[(t / y), $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * y + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 3.2 \cdot 10^{-68}:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(z \cdot \left(\frac{t}{y} - \tanh \left(\frac{x}{y}\right)\right), y, x\right)\\
\end{array}
\end{array}
if y < 3.1999999999999999e-68Initial program 94.5%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6454.8
Applied rewrites54.8%
Taylor expanded in t around 0
mul-1-negN/A
lower-neg.f6449.6
Applied rewrites49.6%
if 3.1999999999999999e-68 < y Initial program 91.8%
lift-*.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift-/.f64N/A
lift-tanh.f64N/A
lift--.f64N/A
lift-*.f64N/A
+-commutativeN/A
lift-*.f64N/A
lift-*.f64N/A
associate-*l*N/A
*-commutativeN/A
lower-fma.f64N/A
lower-*.f6496.3
Applied rewrites96.3%
Taylor expanded in t around 0
lower-/.f6480.2
Applied rewrites80.2%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* z (- t x)))) (if (<= z -4.2e-10) t_1 (if (<= z 6800.0) (fma z (- x) x) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = z * (t - x);
double tmp;
if (z <= -4.2e-10) {
tmp = t_1;
} else if (z <= 6800.0) {
tmp = fma(z, -x, x);
} else {
tmp = t_1;
}
return tmp;
}
function code(x, y, z, t) t_1 = Float64(z * Float64(t - x)) tmp = 0.0 if (z <= -4.2e-10) tmp = t_1; elseif (z <= 6800.0) tmp = fma(z, Float64(-x), x); else tmp = t_1; end return tmp end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -4.2e-10], t$95$1, If[LessEqual[z, 6800.0], N[(z * (-x) + x), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := z \cdot \left(t - x\right)\\
\mathbf{if}\;z \leq -4.2 \cdot 10^{-10}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;z \leq 6800:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if z < -4.2e-10 or 6800 < z Initial program 88.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6446.7
Applied rewrites46.7%
Taylor expanded in z around inf
lower-*.f64N/A
lower--.f6446.4
Applied rewrites46.4%
if -4.2e-10 < z < 6800Initial program 99.2%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6478.7
Applied rewrites78.7%
Taylor expanded in t around 0
mul-1-negN/A
lower-neg.f6486.1
Applied rewrites86.1%
(FPCore (x y z t) :precision binary64 (let* ((t_1 (* z (- t x)))) (if (<= z -2.65e+36) t_1 (if (<= z 360000.0) (+ x (* t z)) t_1))))
double code(double x, double y, double z, double t) {
double t_1 = z * (t - x);
double tmp;
if (z <= -2.65e+36) {
tmp = t_1;
} else if (z <= 360000.0) {
tmp = x + (t * z);
} else {
tmp = t_1;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: t_1
real(8) :: tmp
t_1 = z * (t - x)
if (z <= (-2.65d+36)) then
tmp = t_1
else if (z <= 360000.0d0) then
tmp = x + (t * z)
else
tmp = t_1
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double t_1 = z * (t - x);
double tmp;
if (z <= -2.65e+36) {
tmp = t_1;
} else if (z <= 360000.0) {
tmp = x + (t * z);
} else {
tmp = t_1;
}
return tmp;
}
def code(x, y, z, t): t_1 = z * (t - x) tmp = 0 if z <= -2.65e+36: tmp = t_1 elif z <= 360000.0: tmp = x + (t * z) else: tmp = t_1 return tmp
function code(x, y, z, t) t_1 = Float64(z * Float64(t - x)) tmp = 0.0 if (z <= -2.65e+36) tmp = t_1; elseif (z <= 360000.0) tmp = Float64(x + Float64(t * z)); else tmp = t_1; end return tmp end
function tmp_2 = code(x, y, z, t) t_1 = z * (t - x); tmp = 0.0; if (z <= -2.65e+36) tmp = t_1; elseif (z <= 360000.0) tmp = x + (t * z); else tmp = t_1; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := Block[{t$95$1 = N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[z, -2.65e+36], t$95$1, If[LessEqual[z, 360000.0], N[(x + N[(t * z), $MachinePrecision]), $MachinePrecision], t$95$1]]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := z \cdot \left(t - x\right)\\
\mathbf{if}\;z \leq -2.65 \cdot 10^{+36}:\\
\;\;\;\;t\_1\\
\mathbf{elif}\;z \leq 360000:\\
\;\;\;\;x + t \cdot z\\
\mathbf{else}:\\
\;\;\;\;t\_1\\
\end{array}
\end{array}
if z < -2.65e36 or 3.6e5 < z Initial program 87.9%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6447.1
Applied rewrites47.1%
Taylor expanded in z around inf
lower-*.f64N/A
lower--.f6447.1
Applied rewrites47.1%
if -2.65e36 < z < 3.6e5Initial program 99.2%
Taylor expanded in y around inf
lower-/.f64N/A
lower--.f6460.0
Applied rewrites60.0%
Taylor expanded in t around inf
*-commutativeN/A
lower-*.f6478.0
Applied rewrites78.0%
Final simplification62.8%
(FPCore (x y z t) :precision binary64 (if (<= t -6e-161) (* t z) (if (<= t 1.35e-123) (* z (- x)) (* t z))))
double code(double x, double y, double z, double t) {
double tmp;
if (t <= -6e-161) {
tmp = t * z;
} else if (t <= 1.35e-123) {
tmp = z * -x;
} else {
tmp = t * z;
}
return tmp;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
real(8) :: tmp
if (t <= (-6d-161)) then
tmp = t * z
else if (t <= 1.35d-123) then
tmp = z * -x
else
tmp = t * z
end if
code = tmp
end function
public static double code(double x, double y, double z, double t) {
double tmp;
if (t <= -6e-161) {
tmp = t * z;
} else if (t <= 1.35e-123) {
tmp = z * -x;
} else {
tmp = t * z;
}
return tmp;
}
def code(x, y, z, t): tmp = 0 if t <= -6e-161: tmp = t * z elif t <= 1.35e-123: tmp = z * -x else: tmp = t * z return tmp
function code(x, y, z, t) tmp = 0.0 if (t <= -6e-161) tmp = Float64(t * z); elseif (t <= 1.35e-123) tmp = Float64(z * Float64(-x)); else tmp = Float64(t * z); end return tmp end
function tmp_2 = code(x, y, z, t) tmp = 0.0; if (t <= -6e-161) tmp = t * z; elseif (t <= 1.35e-123) tmp = z * -x; else tmp = t * z; end tmp_2 = tmp; end
code[x_, y_, z_, t_] := If[LessEqual[t, -6e-161], N[(t * z), $MachinePrecision], If[LessEqual[t, 1.35e-123], N[(z * (-x)), $MachinePrecision], N[(t * z), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;t \leq -6 \cdot 10^{-161}:\\
\;\;\;\;t \cdot z\\
\mathbf{elif}\;t \leq 1.35 \cdot 10^{-123}:\\
\;\;\;\;z \cdot \left(-x\right)\\
\mathbf{else}:\\
\;\;\;\;t \cdot z\\
\end{array}
\end{array}
if t < -5.99999999999999977e-161 or 1.35e-123 < t Initial program 95.4%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6453.6
Applied rewrites53.6%
Taylor expanded in t around inf
*-commutativeN/A
lower-*.f6423.8
Applied rewrites23.8%
if -5.99999999999999977e-161 < t < 1.35e-123Initial program 89.0%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6484.1
Applied rewrites84.1%
Taylor expanded in z around inf
lower-*.f64N/A
lower--.f6438.3
Applied rewrites38.3%
Taylor expanded in t around 0
mul-1-negN/A
lower-neg.f6430.3
Applied rewrites30.3%
Final simplification25.6%
(FPCore (x y z t) :precision binary64 (if (<= y 7.5e-68) (fma z (- x) x) (fma z (- t x) x)))
double code(double x, double y, double z, double t) {
double tmp;
if (y <= 7.5e-68) {
tmp = fma(z, -x, x);
} else {
tmp = fma(z, (t - x), x);
}
return tmp;
}
function code(x, y, z, t) tmp = 0.0 if (y <= 7.5e-68) tmp = fma(z, Float64(-x), x); else tmp = fma(z, Float64(t - x), x); end return tmp end
code[x_, y_, z_, t_] := If[LessEqual[y, 7.5e-68], N[(z * (-x) + x), $MachinePrecision], N[(z * N[(t - x), $MachinePrecision] + x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 7.5 \cdot 10^{-68}:\\
\;\;\;\;\mathsf{fma}\left(z, -x, x\right)\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(z, t - x, x\right)\\
\end{array}
\end{array}
if y < 7.50000000000000081e-68Initial program 94.5%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6454.8
Applied rewrites54.8%
Taylor expanded in t around 0
mul-1-negN/A
lower-neg.f6449.6
Applied rewrites49.6%
if 7.50000000000000081e-68 < y Initial program 91.8%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6477.0
Applied rewrites77.0%
(FPCore (x y z t) :precision binary64 (* z (- t x)))
double code(double x, double y, double z, double t) {
return z * (t - x);
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = z * (t - x)
end function
public static double code(double x, double y, double z, double t) {
return z * (t - x);
}
def code(x, y, z, t): return z * (t - x)
function code(x, y, z, t) return Float64(z * Float64(t - x)) end
function tmp = code(x, y, z, t) tmp = z * (t - x); end
code[x_, y_, z_, t_] := N[(z * N[(t - x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
z \cdot \left(t - x\right)
\end{array}
Initial program 93.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6462.0
Applied rewrites62.0%
Taylor expanded in z around inf
lower-*.f64N/A
lower--.f6428.7
Applied rewrites28.7%
(FPCore (x y z t) :precision binary64 (* t z))
double code(double x, double y, double z, double t) {
return t * z;
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = t * z
end function
public static double code(double x, double y, double z, double t) {
return t * z;
}
def code(x, y, z, t): return t * z
function code(x, y, z, t) return Float64(t * z) end
function tmp = code(x, y, z, t) tmp = t * z; end
code[x_, y_, z_, t_] := N[(t * z), $MachinePrecision]
\begin{array}{l}
\\
t \cdot z
\end{array}
Initial program 93.6%
Taylor expanded in y around inf
+-commutativeN/A
lower-fma.f64N/A
lower--.f6462.0
Applied rewrites62.0%
Taylor expanded in t around inf
*-commutativeN/A
lower-*.f6420.2
Applied rewrites20.2%
Final simplification20.2%
(FPCore (x y z t) :precision binary64 (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
double code(double x, double y, double z, double t) {
return x + (y * (z * (tanh((t / y)) - tanh((x / y)))));
}
real(8) function code(x, y, z, t)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8), intent (in) :: z
real(8), intent (in) :: t
code = x + (y * (z * (tanh((t / y)) - tanh((x / y)))))
end function
public static double code(double x, double y, double z, double t) {
return x + (y * (z * (Math.tanh((t / y)) - Math.tanh((x / y)))));
}
def code(x, y, z, t): return x + (y * (z * (math.tanh((t / y)) - math.tanh((x / y)))))
function code(x, y, z, t) return Float64(x + Float64(y * Float64(z * Float64(tanh(Float64(t / y)) - tanh(Float64(x / y)))))) end
function tmp = code(x, y, z, t) tmp = x + (y * (z * (tanh((t / y)) - tanh((x / y))))); end
code[x_, y_, z_, t_] := N[(x + N[(y * N[(z * N[(N[Tanh[N[(t / y), $MachinePrecision]], $MachinePrecision] - N[Tanh[N[(x / y), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x + y \cdot \left(z \cdot \left(\tanh \left(\frac{t}{y}\right) - \tanh \left(\frac{x}{y}\right)\right)\right)
\end{array}
herbie shell --seed 2024219
(FPCore (x y z t)
:name "SynthBasics:moogVCF from YampaSynth-0.2"
:precision binary64
:alt
(! :herbie-platform default (+ x (* y (* z (- (tanh (/ t y)) (tanh (/ x y)))))))
(+ x (* (* y z) (- (tanh (/ t y)) (tanh (/ x y))))))