
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
(FPCore (x y) :precision binary64 (- (pow (/ y (/ x y)) -1.0) 3.0))
double code(double x, double y) {
return pow((y / (x / y)), -1.0) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((y / (x / y)) ** (-1.0d0)) - 3.0d0
end function
public static double code(double x, double y) {
return Math.pow((y / (x / y)), -1.0) - 3.0;
}
def code(x, y): return math.pow((y / (x / y)), -1.0) - 3.0
function code(x, y) return Float64((Float64(y / Float64(x / y)) ^ -1.0) - 3.0) end
function tmp = code(x, y) tmp = ((y / (x / y)) ^ -1.0) - 3.0; end
code[x_, y_] := N[(N[Power[N[(y / N[(x / y), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{y}{\frac{x}{y}}\right)}^{-1} - 3
\end{array}
Initial program 94.1%
associate-/r*99.9%
clear-num99.9%
inv-pow99.9%
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 (if (<= (* y y) 0.0) (* (/ x y) (/ 1.0 y)) (- (/ x (* y y)) 3.0)))
double code(double x, double y) {
double tmp;
if ((y * y) <= 0.0) {
tmp = (x / y) * (1.0 / y);
} else {
tmp = (x / (y * y)) - 3.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((y * y) <= 0.0d0) then
tmp = (x / y) * (1.0d0 / y)
else
tmp = (x / (y * y)) - 3.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((y * y) <= 0.0) {
tmp = (x / y) * (1.0 / y);
} else {
tmp = (x / (y * y)) - 3.0;
}
return tmp;
}
def code(x, y): tmp = 0 if (y * y) <= 0.0: tmp = (x / y) * (1.0 / y) else: tmp = (x / (y * y)) - 3.0 return tmp
function code(x, y) tmp = 0.0 if (Float64(y * y) <= 0.0) tmp = Float64(Float64(x / y) * Float64(1.0 / y)); else tmp = Float64(Float64(x / Float64(y * y)) - 3.0); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((y * y) <= 0.0) tmp = (x / y) * (1.0 / y); else tmp = (x / (y * y)) - 3.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[N[(y * y), $MachinePrecision], 0.0], N[(N[(x / y), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision], N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \cdot y \leq 0:\\
\;\;\;\;\frac{x}{y} \cdot \frac{1}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{x}{y \cdot y} - 3\\
\end{array}
\end{array}
if (*.f64 y y) < 0.0Initial program 77.5%
Taylor expanded in x around inf 77.5%
*-rgt-identity77.5%
associate-*r/77.5%
unpow277.5%
associate-/r*77.5%
*-rgt-identity77.5%
associate-/l*77.5%
unpow-177.5%
unpow-177.5%
pow-sqr77.5%
metadata-eval77.5%
Simplified77.5%
metadata-eval77.5%
pow-sqr77.5%
inv-pow77.5%
inv-pow77.5%
un-div-inv77.5%
Applied egg-rr77.5%
associate-/l/77.5%
div-inv77.5%
associate-/l/99.8%
Applied egg-rr99.8%
div-inv99.8%
Applied egg-rr99.8%
if 0.0 < (*.f64 y y) Initial program 99.9%
(FPCore (x y) :precision binary64 (if (<= y 1.06e+34) (* (/ x y) (/ 1.0 y)) -3.0))
double code(double x, double y) {
double tmp;
if (y <= 1.06e+34) {
tmp = (x / y) * (1.0 / y);
} else {
tmp = -3.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 1.06d+34) then
tmp = (x / y) * (1.0d0 / y)
else
tmp = -3.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 1.06e+34) {
tmp = (x / y) * (1.0 / y);
} else {
tmp = -3.0;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 1.06e+34: tmp = (x / y) * (1.0 / y) else: tmp = -3.0 return tmp
function code(x, y) tmp = 0.0 if (y <= 1.06e+34) tmp = Float64(Float64(x / y) * Float64(1.0 / y)); else tmp = -3.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 1.06e+34) tmp = (x / y) * (1.0 / y); else tmp = -3.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 1.06e+34], N[(N[(x / y), $MachinePrecision] * N[(1.0 / y), $MachinePrecision]), $MachinePrecision], -3.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.06 \cdot 10^{+34}:\\
\;\;\;\;\frac{x}{y} \cdot \frac{1}{y}\\
\mathbf{else}:\\
\;\;\;\;-3\\
\end{array}
\end{array}
if y < 1.06000000000000005e34Initial program 92.6%
Taylor expanded in x around inf 55.6%
*-rgt-identity55.6%
associate-*r/55.6%
unpow255.6%
associate-/r*55.6%
*-rgt-identity55.6%
associate-/l*55.5%
unpow-155.5%
unpow-155.5%
pow-sqr55.6%
metadata-eval55.6%
Simplified55.6%
metadata-eval55.6%
pow-sqr55.5%
inv-pow55.5%
inv-pow55.5%
un-div-inv55.6%
Applied egg-rr55.6%
associate-/l/55.6%
div-inv55.6%
associate-/l/62.8%
Applied egg-rr62.8%
div-inv62.8%
Applied egg-rr62.8%
if 1.06000000000000005e34 < y Initial program 100.0%
Taylor expanded in x around 0 94.3%
(FPCore (x y) :precision binary64 (if (<= y 1.06e+34) (/ (/ x y) y) -3.0))
double code(double x, double y) {
double tmp;
if (y <= 1.06e+34) {
tmp = (x / y) / y;
} else {
tmp = -3.0;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if (y <= 1.06d+34) then
tmp = (x / y) / y
else
tmp = -3.0d0
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if (y <= 1.06e+34) {
tmp = (x / y) / y;
} else {
tmp = -3.0;
}
return tmp;
}
def code(x, y): tmp = 0 if y <= 1.06e+34: tmp = (x / y) / y else: tmp = -3.0 return tmp
function code(x, y) tmp = 0.0 if (y <= 1.06e+34) tmp = Float64(Float64(x / y) / y); else tmp = -3.0; end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if (y <= 1.06e+34) tmp = (x / y) / y; else tmp = -3.0; end tmp_2 = tmp; end
code[x_, y_] := If[LessEqual[y, 1.06e+34], N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision], -3.0]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;y \leq 1.06 \cdot 10^{+34}:\\
\;\;\;\;\frac{\frac{x}{y}}{y}\\
\mathbf{else}:\\
\;\;\;\;-3\\
\end{array}
\end{array}
if y < 1.06000000000000005e34Initial program 92.6%
Taylor expanded in x around inf 55.6%
*-rgt-identity55.6%
associate-*r/55.6%
unpow255.6%
associate-/r*55.6%
*-rgt-identity55.6%
associate-/l*55.5%
unpow-155.5%
unpow-155.5%
pow-sqr55.6%
metadata-eval55.6%
Simplified55.6%
metadata-eval55.6%
pow-sqr55.5%
inv-pow55.5%
inv-pow55.5%
un-div-inv55.6%
Applied egg-rr55.6%
associate-/l/55.6%
div-inv55.6%
associate-/l/62.8%
Applied egg-rr62.8%
if 1.06000000000000005e34 < y Initial program 100.0%
Taylor expanded in x around 0 94.3%
(FPCore (x y) :precision binary64 (- (/ -1.0 (* y (/ y (- x)))) 3.0))
double code(double x, double y) {
return (-1.0 / (y * (y / -x))) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((-1.0d0) / (y * (y / -x))) - 3.0d0
end function
public static double code(double x, double y) {
return (-1.0 / (y * (y / -x))) - 3.0;
}
def code(x, y): return (-1.0 / (y * (y / -x))) - 3.0
function code(x, y) return Float64(Float64(-1.0 / Float64(y * Float64(y / Float64(-x)))) - 3.0) end
function tmp = code(x, y) tmp = (-1.0 / (y * (y / -x))) - 3.0; end
code[x_, y_] := N[(N[(-1.0 / N[(y * N[(y / (-x)), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{-1}{y \cdot \frac{y}{-x}} - 3
\end{array}
Initial program 94.1%
associate-/r*99.9%
clear-num99.9%
inv-pow99.9%
Applied egg-rr99.9%
unpow-199.9%
associate-/r/99.9%
*-commutative99.9%
clear-num99.8%
frac-2neg99.8%
metadata-eval99.8%
frac-times99.9%
metadata-eval99.9%
Applied egg-rr99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (- (/ (/ x y) y) 3.0))
double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x / y) / y) - 3.0d0
end function
public static double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
def code(x, y): return ((x / y) / y) - 3.0
function code(x, y) return Float64(Float64(Float64(x / y) / y) - 3.0) end
function tmp = code(x, y) tmp = ((x / y) / y) - 3.0; end
code[x_, y_] := N[(N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{x}{y}}{y} - 3
\end{array}
Initial program 94.1%
associate-/r*99.9%
clear-num99.9%
inv-pow99.9%
Applied egg-rr99.9%
unpow-199.9%
clear-num99.9%
Applied egg-rr99.9%
(FPCore (x y) :precision binary64 -3.0)
double code(double x, double y) {
return -3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -3.0d0
end function
public static double code(double x, double y) {
return -3.0;
}
def code(x, y): return -3.0
function code(x, y) return -3.0 end
function tmp = code(x, y) tmp = -3.0; end
code[x_, y_] := -3.0
\begin{array}{l}
\\
-3
\end{array}
Initial program 94.1%
Taylor expanded in x around 0 50.1%
(FPCore (x y) :precision binary64 (- (/ (/ x y) y) 3.0))
double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x / y) / y) - 3.0d0
end function
public static double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
def code(x, y): return ((x / y) / y) - 3.0
function code(x, y) return Float64(Float64(Float64(x / y) / y) - 3.0) end
function tmp = code(x, y) tmp = ((x / y) / y) - 3.0; end
code[x_, y_] := N[(N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{x}{y}}{y} - 3
\end{array}
herbie shell --seed 2024163
(FPCore (x y)
:name "Statistics.Sample:$skurtosis from math-functions-0.1.5.2"
:precision binary64
:alt
(! :herbie-platform default (- (/ (/ x y) y) 3))
(- (/ x (* y y)) 3.0))