
(FPCore (lo hi x) :precision binary64 (/ (- x lo) (- hi lo)))
double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / (hi - lo)
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
def code(lo, hi, x): return (x - lo) / (hi - lo)
function code(lo, hi, x) return Float64(Float64(x - lo) / Float64(hi - lo)) end
function tmp = code(lo, hi, x) tmp = (x - lo) / (hi - lo); end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / N[(hi - lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi - lo}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (lo hi x) :precision binary64 (/ (- x lo) (- hi lo)))
double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / (hi - lo)
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / (hi - lo);
}
def code(lo, hi, x): return (x - lo) / (hi - lo)
function code(lo, hi, x) return Float64(Float64(x - lo) / Float64(hi - lo)) end
function tmp = code(lo, hi, x) tmp = (x - lo) / (hi - lo); end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / N[(hi - lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi - lo}
\end{array}
(FPCore (lo hi x) :precision binary64 (/ (exp (* lo (- (/ 1.0 hi) (/ (log (/ -1.0 lo)) lo)))) hi))
double code(double lo, double hi, double x) {
return exp((lo * ((1.0 / hi) - (log((-1.0 / lo)) / lo)))) / hi;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = exp((lo * ((1.0d0 / hi) - (log(((-1.0d0) / lo)) / lo)))) / hi
end function
public static double code(double lo, double hi, double x) {
return Math.exp((lo * ((1.0 / hi) - (Math.log((-1.0 / lo)) / lo)))) / hi;
}
def code(lo, hi, x): return math.exp((lo * ((1.0 / hi) - (math.log((-1.0 / lo)) / lo)))) / hi
function code(lo, hi, x) return Float64(exp(Float64(lo * Float64(Float64(1.0 / hi) - Float64(log(Float64(-1.0 / lo)) / lo)))) / hi) end
function tmp = code(lo, hi, x) tmp = exp((lo * ((1.0 / hi) - (log((-1.0 / lo)) / lo)))) / hi; end
code[lo_, hi_, x_] := N[(N[Exp[N[(lo * N[(N[(1.0 / hi), $MachinePrecision] - N[(N[Log[N[(-1.0 / lo), $MachinePrecision]], $MachinePrecision] / lo), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / hi), $MachinePrecision]
\begin{array}{l}
\\
\frac{e^{lo \cdot \left(\frac{1}{hi} - \frac{\log \left(\frac{-1}{lo}\right)}{lo}\right)}}{hi}
\end{array}
Initial program 3.1%
Taylor expanded in hi around inf 0.7%
+-commutative0.7%
associate--l+0.7%
+-commutative0.7%
*-rgt-identity0.7%
*-commutative0.7%
associate-/l*10.4%
distribute-lft-out10.7%
Simplified10.7%
add-exp-log10.0%
log-prod10.0%
log1p-define10.0%
Applied egg-rr10.0%
Taylor expanded in hi around inf 20.5%
+-commutative20.5%
Simplified20.5%
Taylor expanded in lo around -inf 20.5%
associate-*r*20.5%
neg-mul-120.5%
Simplified20.5%
Final simplification20.5%
(FPCore (lo hi x) :precision binary64 (/ (* (- lo) (exp (/ lo hi))) hi))
double code(double lo, double hi, double x) {
return (-lo * exp((lo / hi))) / hi;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (-lo * exp((lo / hi))) / hi
end function
public static double code(double lo, double hi, double x) {
return (-lo * Math.exp((lo / hi))) / hi;
}
def code(lo, hi, x): return (-lo * math.exp((lo / hi))) / hi
function code(lo, hi, x) return Float64(Float64(Float64(-lo) * exp(Float64(lo / hi))) / hi) end
function tmp = code(lo, hi, x) tmp = (-lo * exp((lo / hi))) / hi; end
code[lo_, hi_, x_] := N[(N[((-lo) * N[Exp[N[(lo / hi), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / hi), $MachinePrecision]
\begin{array}{l}
\\
\frac{\left(-lo\right) \cdot e^{\frac{lo}{hi}}}{hi}
\end{array}
Initial program 3.1%
Taylor expanded in hi around inf 0.7%
+-commutative0.7%
associate--l+0.7%
+-commutative0.7%
*-rgt-identity0.7%
*-commutative0.7%
associate-/l*10.4%
distribute-lft-out10.7%
Simplified10.7%
add-exp-log10.0%
log-prod10.0%
log1p-define10.0%
Applied egg-rr10.0%
Taylor expanded in hi around inf 20.5%
+-commutative20.5%
Simplified20.5%
Taylor expanded in x around 0 20.5%
exp-sum20.5%
rem-exp-log20.5%
Simplified20.5%
(FPCore (lo hi x) :precision binary64 (/ (- x lo) hi))
double code(double lo, double hi, double x) {
return (x - lo) / hi;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = (x - lo) / hi
end function
public static double code(double lo, double hi, double x) {
return (x - lo) / hi;
}
def code(lo, hi, x): return (x - lo) / hi
function code(lo, hi, x) return Float64(Float64(x - lo) / hi) end
function tmp = code(lo, hi, x) tmp = (x - lo) / hi; end
code[lo_, hi_, x_] := N[(N[(x - lo), $MachinePrecision] / hi), $MachinePrecision]
\begin{array}{l}
\\
\frac{x - lo}{hi}
\end{array}
Initial program 3.1%
Taylor expanded in hi around inf 18.9%
(FPCore (lo hi x) :precision binary64 (- 1.0 (/ x lo)))
double code(double lo, double hi, double x) {
return 1.0 - (x / lo);
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0 - (x / lo)
end function
public static double code(double lo, double hi, double x) {
return 1.0 - (x / lo);
}
def code(lo, hi, x): return 1.0 - (x / lo)
function code(lo, hi, x) return Float64(1.0 - Float64(x / lo)) end
function tmp = code(lo, hi, x) tmp = 1.0 - (x / lo); end
code[lo_, hi_, x_] := N[(1.0 - N[(x / lo), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
1 - \frac{x}{lo}
\end{array}
Initial program 3.1%
Taylor expanded in hi around 0 18.6%
div-sub18.6%
sub-neg18.6%
*-inverses18.6%
metadata-eval18.6%
distribute-lft-in18.6%
metadata-eval18.6%
+-commutative18.6%
mul-1-neg18.6%
unsub-neg18.6%
Simplified18.6%
(FPCore (lo hi x) :precision binary64 1.0)
double code(double lo, double hi, double x) {
return 1.0;
}
real(8) function code(lo, hi, x)
real(8), intent (in) :: lo
real(8), intent (in) :: hi
real(8), intent (in) :: x
code = 1.0d0
end function
public static double code(double lo, double hi, double x) {
return 1.0;
}
def code(lo, hi, x): return 1.0
function code(lo, hi, x) return 1.0 end
function tmp = code(lo, hi, x) tmp = 1.0; end
code[lo_, hi_, x_] := 1.0
\begin{array}{l}
\\
1
\end{array}
Initial program 3.1%
Taylor expanded in lo around inf 18.6%
herbie shell --seed 2024101
(FPCore (lo hi x)
:name "xlohi (overflows)"
:precision binary64
:pre (and (< lo -1e+308) (> hi 1e+308))
(/ (- x lo) (- hi lo)))