
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
(FPCore (x y) :precision binary64 (- (pow (* (/ y x) y) -1.0) 3.0))
double code(double x, double y) {
return pow(((y / x) * y), -1.0) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (((y / x) * y) ** (-1.0d0)) - 3.0d0
end function
public static double code(double x, double y) {
return Math.pow(((y / x) * y), -1.0) - 3.0;
}
def code(x, y): return math.pow(((y / x) * y), -1.0) - 3.0
function code(x, y) return Float64((Float64(Float64(y / x) * y) ^ -1.0) - 3.0) end
function tmp = code(x, y) tmp = (((y / x) * y) ^ -1.0) - 3.0; end
code[x_, y_] := N[(N[Power[N[(N[(y / x), $MachinePrecision] * y), $MachinePrecision], -1.0], $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
{\left(\frac{y}{x} \cdot y\right)}^{-1} - 3
\end{array}
Initial program 96.0%
lift-/.f64N/A
clear-numN/A
frac-2negN/A
metadata-evalN/A
lower-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
distribute-lft-neg-inN/A
lower-*.f64N/A
lower-neg.f64N/A
lower-/.f6499.9
Applied rewrites99.9%
lift-/.f64N/A
metadata-evalN/A
lift-*.f64N/A
lift-neg.f64N/A
distribute-lft-neg-outN/A
lift-/.f64N/A
associate-/l*N/A
lift-*.f64N/A
lift-/.f64N/A
frac-2negN/A
lift-/.f6496.0
lift-/.f64N/A
lift-*.f64N/A
associate-/l*N/A
lift-/.f64N/A
*-commutativeN/A
lower-*.f6499.9
Applied rewrites99.9%
Final simplification99.9%
(FPCore (x y) :precision binary64 (- (/ (/ x y) y) 3.0))
double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x / y) / y) - 3.0d0
end function
public static double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
def code(x, y): return ((x / y) / y) - 3.0
function code(x, y) return Float64(Float64(Float64(x / y) / y) - 3.0) end
function tmp = code(x, y) tmp = ((x / y) / y) - 3.0; end
code[x_, y_] := N[(N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{x}{y}}{y} - 3
\end{array}
Initial program 96.0%
lift-/.f64N/A
lift-*.f64N/A
associate-/r*N/A
lower-/.f64N/A
lower-/.f6499.9
Applied rewrites99.9%
(FPCore (x y) :precision binary64 (- (/ x (* y y)) 3.0))
double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x / (y * y)) - 3.0d0
end function
public static double code(double x, double y) {
return (x / (y * y)) - 3.0;
}
def code(x, y): return (x / (y * y)) - 3.0
function code(x, y) return Float64(Float64(x / Float64(y * y)) - 3.0) end
function tmp = code(x, y) tmp = (x / (y * y)) - 3.0; end
code[x_, y_] := N[(N[(x / N[(y * y), $MachinePrecision]), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{x}{y \cdot y} - 3
\end{array}
Initial program 96.0%
(FPCore (x y) :precision binary64 -3.0)
double code(double x, double y) {
return -3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = -3.0d0
end function
public static double code(double x, double y) {
return -3.0;
}
def code(x, y): return -3.0
function code(x, y) return -3.0 end
function tmp = code(x, y) tmp = -3.0; end
code[x_, y_] := -3.0
\begin{array}{l}
\\
-3
\end{array}
Initial program 96.0%
Taylor expanded in x around 0
Applied rewrites54.2%
(FPCore (x y) :precision binary64 (- (/ (/ x y) y) 3.0))
double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = ((x / y) / y) - 3.0d0
end function
public static double code(double x, double y) {
return ((x / y) / y) - 3.0;
}
def code(x, y): return ((x / y) / y) - 3.0
function code(x, y) return Float64(Float64(Float64(x / y) / y) - 3.0) end
function tmp = code(x, y) tmp = ((x / y) / y) - 3.0; end
code[x_, y_] := N[(N[(N[(x / y), $MachinePrecision] / y), $MachinePrecision] - 3.0), $MachinePrecision]
\begin{array}{l}
\\
\frac{\frac{x}{y}}{y} - 3
\end{array}
herbie shell --seed 2024324
(FPCore (x y)
:name "Statistics.Sample:$skurtosis from math-functions-0.1.5.2"
:precision binary64
:alt
(! :herbie-platform default (- (/ (/ x y) y) 3))
(- (/ x (* y y)) 3.0))