
(FPCore (x y) :precision binary64 (/ (+ x y) (* (* x 2.0) y)))
double code(double x, double y) {
return (x + y) / ((x * 2.0) * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / ((x * 2.0d0) * y)
end function
public static double code(double x, double y) {
return (x + y) / ((x * 2.0) * y);
}
def code(x, y): return (x + y) / ((x * 2.0) * y)
function code(x, y) return Float64(Float64(x + y) / Float64(Float64(x * 2.0) * y)) end
function tmp = code(x, y) tmp = (x + y) / ((x * 2.0) * y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(N[(x * 2.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{\left(x \cdot 2\right) \cdot y}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x y) :precision binary64 (/ (+ x y) (* (* x 2.0) y)))
double code(double x, double y) {
return (x + y) / ((x * 2.0) * y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (x + y) / ((x * 2.0d0) * y)
end function
public static double code(double x, double y) {
return (x + y) / ((x * 2.0) * y);
}
def code(x, y): return (x + y) / ((x * 2.0) * y)
function code(x, y) return Float64(Float64(x + y) / Float64(Float64(x * 2.0) * y)) end
function tmp = code(x, y) tmp = (x + y) / ((x * 2.0) * y); end
code[x_, y_] := N[(N[(x + y), $MachinePrecision] / N[(N[(x * 2.0), $MachinePrecision] * y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x + y}{\left(x \cdot 2\right) \cdot y}
\end{array}
(FPCore (x y) :precision binary64 (+ (/ 0.5 y) (/ 0.5 x)))
double code(double x, double y) {
return (0.5 / y) + (0.5 / x);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / y) + (0.5d0 / x)
end function
public static double code(double x, double y) {
return (0.5 / y) + (0.5 / x);
}
def code(x, y): return (0.5 / y) + (0.5 / x)
function code(x, y) return Float64(Float64(0.5 / y) + Float64(0.5 / x)) end
function tmp = code(x, y) tmp = (0.5 / y) + (0.5 / x); end
code[x_, y_] := N[(N[(0.5 / y), $MachinePrecision] + N[(0.5 / x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{y} + \frac{0.5}{x}
\end{array}
Initial program 77.1%
Taylor expanded in x around 0 100.0%
associate-*r/100.0%
metadata-eval100.0%
associate-*r/100.0%
metadata-eval100.0%
Simplified100.0%
Final simplification100.0%
(FPCore (x y) :precision binary64 (if (or (<= x -6e-36) (and (not (<= x -6.8e-87)) (<= x -3.4e-132))) (/ 0.5 y) (/ 0.5 x)))
double code(double x, double y) {
double tmp;
if ((x <= -6e-36) || (!(x <= -6.8e-87) && (x <= -3.4e-132))) {
tmp = 0.5 / y;
} else {
tmp = 0.5 / x;
}
return tmp;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
real(8) :: tmp
if ((x <= (-6d-36)) .or. (.not. (x <= (-6.8d-87))) .and. (x <= (-3.4d-132))) then
tmp = 0.5d0 / y
else
tmp = 0.5d0 / x
end if
code = tmp
end function
public static double code(double x, double y) {
double tmp;
if ((x <= -6e-36) || (!(x <= -6.8e-87) && (x <= -3.4e-132))) {
tmp = 0.5 / y;
} else {
tmp = 0.5 / x;
}
return tmp;
}
def code(x, y): tmp = 0 if (x <= -6e-36) or (not (x <= -6.8e-87) and (x <= -3.4e-132)): tmp = 0.5 / y else: tmp = 0.5 / x return tmp
function code(x, y) tmp = 0.0 if ((x <= -6e-36) || (!(x <= -6.8e-87) && (x <= -3.4e-132))) tmp = Float64(0.5 / y); else tmp = Float64(0.5 / x); end return tmp end
function tmp_2 = code(x, y) tmp = 0.0; if ((x <= -6e-36) || (~((x <= -6.8e-87)) && (x <= -3.4e-132))) tmp = 0.5 / y; else tmp = 0.5 / x; end tmp_2 = tmp; end
code[x_, y_] := If[Or[LessEqual[x, -6e-36], And[N[Not[LessEqual[x, -6.8e-87]], $MachinePrecision], LessEqual[x, -3.4e-132]]], N[(0.5 / y), $MachinePrecision], N[(0.5 / x), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;x \leq -6 \cdot 10^{-36} \lor \neg \left(x \leq -6.8 \cdot 10^{-87}\right) \land x \leq -3.4 \cdot 10^{-132}:\\
\;\;\;\;\frac{0.5}{y}\\
\mathbf{else}:\\
\;\;\;\;\frac{0.5}{x}\\
\end{array}
\end{array}
if x < -6.0000000000000003e-36 or -6.7999999999999997e-87 < x < -3.39999999999999983e-132Initial program 81.6%
Taylor expanded in x around inf 66.5%
if -6.0000000000000003e-36 < x < -6.7999999999999997e-87 or -3.39999999999999983e-132 < x Initial program 74.6%
Taylor expanded in x around 0 65.9%
Final simplification66.1%
(FPCore (x y) :precision binary64 (/ 0.5 x))
double code(double x, double y) {
return 0.5 / x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.5d0 / x
end function
public static double code(double x, double y) {
return 0.5 / x;
}
def code(x, y): return 0.5 / x
function code(x, y) return Float64(0.5 / x) end
function tmp = code(x, y) tmp = 0.5 / x; end
code[x_, y_] := N[(0.5 / x), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{x}
\end{array}
Initial program 77.1%
Taylor expanded in x around 0 55.0%
Final simplification55.0%
(FPCore (x y) :precision binary64 0.0)
double code(double x, double y) {
return 0.0;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = 0.0d0
end function
public static double code(double x, double y) {
return 0.0;
}
def code(x, y): return 0.0
function code(x, y) return 0.0 end
function tmp = code(x, y) tmp = 0.0; end
code[x_, y_] := 0.0
\begin{array}{l}
\\
0
\end{array}
Initial program 77.1%
add-log-exp7.5%
*-un-lft-identity7.5%
log-prod7.5%
metadata-eval7.5%
add-log-exp77.1%
associate-*l*77.1%
*-commutative77.1%
Applied egg-rr77.1%
+-lft-identity77.1%
*-rgt-identity77.1%
associate-*r/75.9%
*-commutative75.9%
associate-*r*75.9%
*-commutative75.9%
count-275.9%
associate-/r*76.4%
count-276.4%
associate-/r*76.4%
metadata-eval76.4%
Simplified76.4%
clear-num76.0%
un-div-inv77.1%
div-inv77.1%
associate-/r*77.1%
div-inv77.1%
metadata-eval77.1%
associate-/l*87.3%
div-inv87.3%
associate-/l/77.1%
associate-/r*87.4%
+-commutative87.4%
add-log-exp4.5%
exp-lft-sqr4.5%
log-prod4.5%
add-log-exp10.2%
add-log-exp87.4%
Applied egg-rr87.4%
Applied egg-rr2.6%
+-inverses2.6%
Simplified2.6%
Final simplification2.6%
(FPCore (x y) :precision binary64 x)
double code(double x, double y) {
return x;
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = x
end function
public static double code(double x, double y) {
return x;
}
def code(x, y): return x
function code(x, y) return x end
function tmp = code(x, y) tmp = x; end
code[x_, y_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 77.1%
add-log-exp7.5%
*-un-lft-identity7.5%
log-prod7.5%
metadata-eval7.5%
add-log-exp77.1%
associate-*l*77.1%
*-commutative77.1%
Applied egg-rr77.1%
+-lft-identity77.1%
*-rgt-identity77.1%
associate-*r/75.9%
*-commutative75.9%
associate-*r*75.9%
*-commutative75.9%
count-275.9%
associate-/r*76.4%
count-276.4%
associate-/r*76.4%
metadata-eval76.4%
Simplified76.4%
clear-num76.0%
un-div-inv77.1%
div-inv77.1%
associate-/r*77.1%
div-inv77.1%
metadata-eval77.1%
associate-/l*87.3%
div-inv87.3%
associate-/l/77.1%
associate-/r*87.4%
+-commutative87.4%
add-log-exp4.5%
exp-lft-sqr4.5%
log-prod4.5%
add-log-exp10.2%
add-log-exp87.4%
Applied egg-rr87.4%
Applied egg-rr2.8%
unpow12.8%
sqr-pow1.5%
fabs-sqr1.5%
sqr-pow3.3%
unpow13.3%
Simplified3.3%
Final simplification3.3%
(FPCore (x y) :precision binary64 (+ (/ 0.5 x) (/ 0.5 y)))
double code(double x, double y) {
return (0.5 / x) + (0.5 / y);
}
real(8) function code(x, y)
real(8), intent (in) :: x
real(8), intent (in) :: y
code = (0.5d0 / x) + (0.5d0 / y)
end function
public static double code(double x, double y) {
return (0.5 / x) + (0.5 / y);
}
def code(x, y): return (0.5 / x) + (0.5 / y)
function code(x, y) return Float64(Float64(0.5 / x) + Float64(0.5 / y)) end
function tmp = code(x, y) tmp = (0.5 / x) + (0.5 / y); end
code[x_, y_] := N[(N[(0.5 / x), $MachinePrecision] + N[(0.5 / y), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{0.5}{x} + \frac{0.5}{y}
\end{array}
herbie shell --seed 2023174
(FPCore (x y)
:name "Linear.Projection:inversePerspective from linear-1.19.1.3, C"
:precision binary64
:herbie-target
(+ (/ 0.5 x) (/ 0.5 y))
(/ (+ x y) (* (* x 2.0) y)))