
(FPCore (lo hi x) :precision binary64 (/ (- x lo) (- hi lo)))
double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / (hi - lo)
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
def code(lo, hi, x): return (x - lo) / (hi - lo)
function code(lo, hi, x) return Float64(Float64(x - lo) / Float64(hi - lo)) end
function tmp = code(lo, hi, x) tmp = (x - lo) / (hi - lo); end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / N[(hi - lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi - lo}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (lo hi x) :precision binary64 (/ (- x lo) (- hi lo)))
double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / (hi - lo)
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
def code(lo, hi, x): return (x - lo) / (hi - lo)
function code(lo, hi, x) return Float64(Float64(x - lo) / Float64(hi - lo)) end
function tmp = code(lo, hi, x) tmp = (x - lo) / (hi - lo); end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / N[(hi - lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi - lo}
\end{array}
(FPCore (lo hi x) :precision binary64 (+ 1.0 (* (exp (* 3.0 (log1p (* 0.3333333333333333 (/ hi lo))))) (/ (- hi x) lo))))
double code(double lo, double hi, double x) {
return 1.0 + (exp((3.0 * log1p((0.3333333333333333 * (hi / lo))))) * ((hi - x) / lo));
}
public static double code(double lo, double hi, double x) {
return 1.0 + (Math.exp((3.0 * Math.log1p((0.3333333333333333 * (hi / lo))))) * ((hi - x) / lo));
}
def code(lo, hi, x): return 1.0 + (math.exp((3.0 * math.log1p((0.3333333333333333 * (hi / lo))))) * ((hi - x) / lo))
function code(lo, hi, x) return Float64(1.0 + Float64(exp(Float64(3.0 * log1p(Float64(0.3333333333333333 * Float64(hi / lo))))) * Float64(Float64(hi - x) / lo))) end
code[lo_, hi_, x_] := N[(1.0 + N[(N[Exp[N[(3.0 * N[Log[1 + N[(0.3333333333333333 * N[(hi / lo), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[(N[(hi - x), $MachinePrecision] / lo), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + e^{3 \cdot \mathsf{log1p}\left(0.3333333333333333 \cdot \frac{hi}{lo}\right)} \cdot \frac{hi - x}{lo}
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 0.0%
Simplified18.8%
add-cube-cbrt18.8%
pow318.8%
+-commutative18.8%
Applied egg-rr18.8%
Taylor expanded in hi around 0 20.6%
add-exp-log20.6%
log-pow20.6%
log1p-define20.6%
Applied egg-rr20.6%
Final simplification20.6%
(FPCore (lo hi x) :precision binary64 (+ 1.0 (* (/ (- hi x) lo) (pow (+ 1.0 (* 0.3333333333333333 (/ hi lo))) 3.0))))
double code(double lo, double hi, double x) {
return 1.0 + (((hi - x) / lo) * pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0));
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0 + (((hi - x) / lo) * ((1.0d0 + (0.3333333333333333d0 * (hi / lo))) ** 3.0d0))
end function
public static double code(double lo, double hi, double x) {
return 1.0 + (((hi - x) / lo) * Math.pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0));
}
def code(lo, hi, x): return 1.0 + (((hi - x) / lo) * math.pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0))
function code(lo, hi, x) return Float64(1.0 + Float64(Float64(Float64(hi - x) / lo) * (Float64(1.0 + Float64(0.3333333333333333 * Float64(hi / lo))) ^ 3.0))) end
function tmp = code(lo, hi, x) tmp = 1.0 + (((hi - x) / lo) * ((1.0 + (0.3333333333333333 * (hi / lo))) ^ 3.0)); end
code[lo_, hi_, x_] := N[(1.0 + N[(N[(N[(hi - x), $MachinePrecision] / lo), $MachinePrecision] * N[Power[N[(1.0 + N[(0.3333333333333333 * N[(hi / lo), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \frac{hi - x}{lo} \cdot {\left(1 + 0.3333333333333333 \cdot \frac{hi}{lo}\right)}^{3}
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 0.0%
Simplified18.8%
add-cube-cbrt18.8%
pow318.8%
+-commutative18.8%
Applied egg-rr18.8%
Taylor expanded in hi around 0 20.6%
Final simplification20.6%
(FPCore (lo hi x) :precision binary64 (+ 1.0 (/ (* hi (pow (+ 1.0 (* 0.3333333333333333 (/ hi lo))) 3.0)) lo)))
double code(double lo, double hi, double x) {
return 1.0 + ((hi * pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0)) / lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0 + ((hi * ((1.0d0 + (0.3333333333333333d0 * (hi / lo))) ** 3.0d0)) / lo)
end function
public static double code(double lo, double hi, double x) {
return 1.0 + ((hi * Math.pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0)) / lo);
}
def code(lo, hi, x): return 1.0 + ((hi * math.pow((1.0 + (0.3333333333333333 * (hi / lo))), 3.0)) / lo)
function code(lo, hi, x) return Float64(1.0 + Float64(Float64(hi * (Float64(1.0 + Float64(0.3333333333333333 * Float64(hi / lo))) ^ 3.0)) / lo)) end
function tmp = code(lo, hi, x) tmp = 1.0 + ((hi * ((1.0 + (0.3333333333333333 * (hi / lo))) ^ 3.0)) / lo); end
code[lo_, hi_, x_] := N[(1.0 + N[(N[(hi * N[Power[N[(1.0 + N[(0.3333333333333333 * N[(hi / lo), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], 3.0], $MachinePrecision]), $MachinePrecision] / lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \frac{hi \cdot {\left(1 + 0.3333333333333333 \cdot \frac{hi}{lo}\right)}^{3}}{lo}
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 0.0%
Simplified18.8%
add-cube-cbrt18.8%
pow318.8%
+-commutative18.8%
Applied egg-rr18.8%
Taylor expanded in hi around 0 20.6%
Taylor expanded in x around 0 20.6%
Final simplification20.6%
(FPCore (lo hi x) :precision binary64 (+ 1.0 (* hi (/ (fabs (+ 1.0 (/ hi lo))) lo))))
double code(double lo, double hi, double x) {
return 1.0 + (hi * (fabs((1.0 + (hi / lo))) / lo));
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0 + (hi * (abs((1.0d0 + (hi / lo))) / lo))
end function
public static double code(double lo, double hi, double x) {
return 1.0 + (hi * (Math.abs((1.0 + (hi / lo))) / lo));
}
def code(lo, hi, x): return 1.0 + (hi * (math.fabs((1.0 + (hi / lo))) / lo))
function code(lo, hi, x) return Float64(1.0 + Float64(hi * Float64(abs(Float64(1.0 + Float64(hi / lo))) / lo))) end
function tmp = code(lo, hi, x) tmp = 1.0 + (hi * (abs((1.0 + (hi / lo))) / lo)); end
code[lo_, hi_, x_] := N[(1.0 + N[(hi * N[(N[Abs[N[(1.0 + N[(hi / lo), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / lo), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + hi \cdot \frac{\left|1 + \frac{hi}{lo}\right|}{lo}
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 0.0%
Simplified18.8%
add-sqr-sqrt8.9%
sqrt-unprod19.4%
pow219.4%
+-commutative19.4%
Applied egg-rr19.4%
unpow219.4%
rem-sqrt-square19.4%
Simplified19.4%
Taylor expanded in hi around inf 19.4%
associate-/l*19.4%
Simplified19.4%
Final simplification19.4%
(FPCore (lo hi x) :precision binary64 (+ 1.0 (/ (* hi (fabs (+ 1.0 (/ hi lo)))) lo)))
double code(double lo, double hi, double x) {
return 1.0 + ((hi * fabs((1.0 + (hi / lo)))) / lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0 + ((hi * abs((1.0d0 + (hi / lo)))) / lo)
end function
public static double code(double lo, double hi, double x) {
return 1.0 + ((hi * Math.abs((1.0 + (hi / lo)))) / lo);
}
def code(lo, hi, x): return 1.0 + ((hi * math.fabs((1.0 + (hi / lo)))) / lo)
function code(lo, hi, x) return Float64(1.0 + Float64(Float64(hi * abs(Float64(1.0 + Float64(hi / lo)))) / lo)) end
function tmp = code(lo, hi, x) tmp = 1.0 + ((hi * abs((1.0 + (hi / lo)))) / lo); end
code[lo_, hi_, x_] := N[(1.0 + N[(N[(hi * N[Abs[N[(1.0 + N[(hi / lo), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 + \frac{hi \cdot \left|1 + \frac{hi}{lo}\right|}{lo}
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 0.0%
Simplified18.8%
add-sqr-sqrt8.9%
sqrt-unprod19.4%
pow219.4%
+-commutative19.4%
Applied egg-rr19.4%
unpow219.4%
rem-sqrt-square19.4%
Simplified19.4%
Taylor expanded in hi around inf 19.4%
Final simplification19.4%
(FPCore (lo hi x) :precision binary64 (/ (- x lo) hi))
double code(double lo, double hi, double x) {
return (x - lo) / hi;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / hi
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / hi;
}
def code(lo, hi, x): return (x - lo) / hi
function code(lo, hi, x) return Float64(Float64(x - lo) / hi) end
function tmp = code(lo, hi, x) tmp = (x - lo) / hi; end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / hi), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi}
\end{array}
Initial program 3.1%
Taylor expanded in hi around inf 18.8%
Final simplification18.8%
(FPCore (lo hi x) :precision binary64 (/ lo (- hi)))
double code(double lo, double hi, double x) {
return lo / -hi;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = lo / -hi
end function
public static double code(double lo, double hi, double x) {
return lo / -hi;
}
def code(lo, hi, x): return lo / -hi
function code(lo, hi, x) return Float64(lo / Float64(-hi)) end
function tmp = code(lo, hi, x) tmp = lo / -hi; end
code[lo_, hi_, x_] := N[(lo / (-hi)), $MachinePrecision]
\begin{array}{l}
\\
\frac{lo}{-hi}
\end{array}
Initial program 3.1%
Taylor expanded in lo around 0 18.8%
+-commutative18.8%
mul-1-neg18.8%
unsub-neg18.8%
+-commutative18.8%
mul-1-neg18.8%
unsub-neg18.8%
Simplified18.8%
Taylor expanded in x around 0 18.8%
neg-mul-118.8%
distribute-neg-frac18.8%
Simplified18.8%
Final simplification18.8%
(FPCore (lo hi x) :precision binary64 1.0)
double code(double lo, double hi, double x) {
return 1.0;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double lo, double hi, double x) {
return 1.0;
}
def code(lo, hi, x): return 1.0
function code(lo, hi, x) return 1.0 end
function tmp = code(lo, hi, x) tmp = 1.0; end
code[lo_, hi_, x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 18.6%
Final simplification18.6%
herbie shell --seed 2024075
(FPCore (lo hi x)
:name "xlohi (overflows)"
:precision binary64
:pre (and (< lo -1e+308) (> hi 1e+308))
(/ (- x lo) (- hi lo)))