
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (if (<= y_m 5.2e-160) (/ (/ x y_m) y_m) (- (/ x (* y_m y_m)) 3.0)))
y_m = fabs(y);
double code(double x, double y_m) {
double tmp;
if (y_m <= 5.2e-160) {
tmp = (x / y_m) / y_m;
} else {
tmp = (x / (y_m * y_m)) - 3.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: tmp
if (y_m <= 5.2d-160) then
tmp = (x / y_m) / y_m
else
tmp = (x / (y_m * y_m)) - 3.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double tmp;
if (y_m <= 5.2e-160) {
tmp = (x / y_m) / y_m;
} else {
tmp = (x / (y_m * y_m)) - 3.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): tmp = 0 if y_m <= 5.2e-160: tmp = (x / y_m) / y_m else: tmp = (x / (y_m * y_m)) - 3.0 return tmp
y_m = abs(y) function code(x, y_m) tmp = 0.0 if (y_m <= 5.2e-160) tmp = Float64(Float64(x / y_m) / y_m); else tmp = Float64(Float64(x / Float64(y_m * y_m)) - 3.0); end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) tmp = 0.0; if (y_m <= 5.2e-160) tmp = (x / y_m) / y_m; else tmp = (x / (y_m * y_m)) - 3.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := If[LessEqual[y$95$m, 5.2e-160], N[(N[(x / y$95$m), $MachinePrecision] / y$95$m), $MachinePrecision], N[(N[(x / N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
\mathbf{if}\;y\_m \leq 5.2 \cdot 10^{-160}:\\
\;\;\;\;\frac{\frac{x}{y\_m}}{y\_m}\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{y\_m \cdot y\_m} - 3\\
\end{array}
\end{array}
if y < 5.20000000000000007e-160Initial program 94.8%
Applied rewrites32.5%
Taylor expanded in x around -inf
associate-*r/N/A
unpow2N/A
associate-/r*N/A
mul-1-negN/A
*-commutativeN/A
unpow2N/A
rem-square-sqrtN/A
distribute-frac-negN/A
distribute-frac-neg2N/A
mul-1-negN/A
distribute-frac-negN/A
distribute-neg-fracN/A
distribute-frac-neg2N/A
associate-/r*N/A
sqr-neg-revN/A
unpow2N/A
unpow2N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6459.6
Applied rewrites59.6%
if 5.20000000000000007e-160 < y Initial program 99.9%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (let* ((t_0 (/ x (* y_m y_m))) (t_1 (- t_0 3.0))) (if (or (<= t_1 -1e+14) (not (<= t_1 -2.0))) t_0 -3.0)))
y_m = fabs(y);
double code(double x, double y_m) {
double t_0 = x / (y_m * y_m);
double t_1 = t_0 - 3.0;
double tmp;
if ((t_1 <= -1e+14) || !(t_1 <= -2.0)) {
tmp = t_0;
} else {
tmp = -3.0;
}
return tmp;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
real(8) :: t_0
real(8) :: t_1
real(8) :: tmp
t_0 = x / (y_m * y_m)
t_1 = t_0 - 3.0d0
if ((t_1 <= (-1d+14)) .or. (.not. (t_1 <= (-2.0d0)))) then
tmp = t_0
else
tmp = -3.0d0
end if
code = tmp
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
double t_0 = x / (y_m * y_m);
double t_1 = t_0 - 3.0;
double tmp;
if ((t_1 <= -1e+14) || !(t_1 <= -2.0)) {
tmp = t_0;
} else {
tmp = -3.0;
}
return tmp;
}
y_m = math.fabs(y) def code(x, y_m): t_0 = x / (y_m * y_m) t_1 = t_0 - 3.0 tmp = 0 if (t_1 <= -1e+14) or not (t_1 <= -2.0): tmp = t_0 else: tmp = -3.0 return tmp
y_m = abs(y) function code(x, y_m) t_0 = Float64(x / Float64(y_m * y_m)) t_1 = Float64(t_0 - 3.0) tmp = 0.0 if ((t_1 <= -1e+14) || !(t_1 <= -2.0)) tmp = t_0; else tmp = -3.0; end return tmp end
y_m = abs(y); function tmp_2 = code(x, y_m) t_0 = x / (y_m * y_m); t_1 = t_0 - 3.0; tmp = 0.0; if ((t_1 <= -1e+14) || ~((t_1 <= -2.0))) tmp = t_0; else tmp = -3.0; end tmp_2 = tmp; end
y_m = N[Abs[y], $MachinePrecision]
code[x_, y$95$m_] := Block[{t$95$0 = N[(x / N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(t$95$0 - 3.0), $MachinePrecision]}, If[Or[LessEqual[t$95$1, -1e+14], N[Not[LessEqual[t$95$1, -2.0]], $MachinePrecision]], t$95$0, -3.0]]]
\begin{array}{l}
y_m = \left|y\right|
\\
\begin{array}{l}
t_0 := \frac{x}{y\_m \cdot y\_m}\\
t_1 := t\_0 - 3\\
\mathbf{if}\;t\_1 \leq -1 \cdot 10^{+14} \lor \neg \left(t\_1 \leq -2\right):\\
\;\;\;\;t\_0\\
\mathbf{else}:\\
\;\;\;\;-3\\
\end{array}
\end{array}
if (-.f64 (/.f64 x (*.f64 y y)) #s(literal 3 binary64)) < -1e14 or -2 < (-.f64 (/.f64 x (*.f64 y y)) #s(literal 3 binary64)) Initial program 93.6%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6499.8
Applied rewrites99.8%
Applied rewrites3.1%
Taylor expanded in x around -inf
*-commutativeN/A
associate-*l/N/A
*-commutativeN/A
unpow2N/A
rem-square-sqrtN/A
mul-1-negN/A
distribute-lft-neg-inN/A
distribute-rgt-neg-inN/A
metadata-evalN/A
*-rgt-identityN/A
lower-/.f64N/A
unpow2N/A
lower-*.f6492.5
Applied rewrites92.5%
if -1e14 < (-.f64 (/.f64 x (*.f64 y y)) #s(literal 3 binary64)) < -2Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites99.8%
Final simplification96.0%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (- (/ (/ x y_m) y_m) 3.0))
y_m = fabs(y);
double code(double x, double y_m) {
return ((x / y_m) / y_m) - 3.0;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
code = ((x / y_m) / y_m) - 3.0d0
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return ((x / y_m) / y_m) - 3.0;
}
y_m = math.fabs(y) def code(x, y_m): return ((x / y_m) / y_m) - 3.0
y_m = abs(y) function code(x, y_m) return Float64(Float64(Float64(x / y_m) / y_m) - 3.0) end
y_m = abs(y); function tmp = code(x, y_m) tmp = ((x / y_m) / y_m) - 3.0; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := N[(N[(N[(x / y$95$m), $MachinePrecision] / y$95$m), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|
\\
\frac{\frac{x}{y\_m}}{y\_m} - 3
\end{array}
Initial program 96.7%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6499.9
Applied rewrites99.9%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 (- (/ x (* y_m y_m)) 3.0))
y_m = fabs(y);
double code(double x, double y_m) {
return (x / (y_m * y_m)) - 3.0;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
code = (x / (y_m * y_m)) - 3.0d0
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return (x / (y_m * y_m)) - 3.0;
}
y_m = math.fabs(y) def code(x, y_m): return (x / (y_m * y_m)) - 3.0
y_m = abs(y) function code(x, y_m) return Float64(Float64(x / Float64(y_m * y_m)) - 3.0) end
y_m = abs(y); function tmp = code(x, y_m) tmp = (x / (y_m * y_m)) - 3.0; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := N[(N[(x / N[(y$95$m * y$95$m), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
y_m = \left|y\right|
\\
\frac{x}{y\_m \cdot y\_m} - 3
\end{array}
Initial program 96.7%
y_m = (fabs.f64 y) (FPCore (x y_m) :precision binary64 -3.0)
y_m = fabs(y);
double code(double x, double y_m) {
return -3.0;
}
y_m = abs(y)
real(8) function code(x, y_m)
real(8), intent (in) :: x
real(8), intent (in) :: y_m
code = -3.0d0
end function
y_m = Math.abs(y);
public static double code(double x, double y_m) {
return -3.0;
}
y_m = math.fabs(y) def code(x, y_m): return -3.0
y_m = abs(y) function code(x, y_m) return -3.0 end
y_m = abs(y); function tmp = code(x, y_m) tmp = -3.0; end
y_m = N[Abs[y], $MachinePrecision] code[x_, y$95$m_] := -3.0
\begin{array}{l}
y_m = \left|y\right|
\\
-3
\end{array}
Initial program 96.7%
Taylor expanded in x around 0
Applied rewrites50.0%
(FPCore (x y) :precision binary64 (- (/ (/ x y) y) 3.0))
double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x / y) / y) - 3.0d0
end function
public static double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
def code(x, y): return ((x / y) / y) - 3.0
function code(x, y) return Float64(Float64(Float64(x / y) / y) - 3.0) end
function tmp = code(x, y) tmp = ((x / y) / y) - 3.0; end
code[x_, y_] := N[(N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{x}{y}}{y} - 3
\end{array}
herbie shell --seed 2024342
(FPCore (x y)
:name "Statistics.Sample:$skurtosis from math-functions-0.1.5.2"
:precision binary64
:alt
(! :herbie-platform default (- (/ (/ x y) y) 3))
(- (/ x (* y y)) 3.0))