
(FPCore (t) :precision binary64 (- (* 1.7e+308 t) 1.7e+308))
double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = (1.7d+308 * t) - 1.7d+308
end function
public static double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
def code(t): return (1.7e+308 * t) - 1.7e+308
function code(t) return Float64(Float64(1.7e+308 * t) - 1.7e+308) end
function tmp = code(t) tmp = (1.7e+308 * t) - 1.7e+308; end
code[t_] := N[(N[(1.7e+308 * t), $MachinePrecision] - 1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
1.7 \cdot 10^{+308} \cdot t - 1.7 \cdot 10^{+308}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t) :precision binary64 (- (* 1.7e+308 t) 1.7e+308))
double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = (1.7d+308 * t) - 1.7d+308
end function
public static double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
def code(t): return (1.7e+308 * t) - 1.7e+308
function code(t) return Float64(Float64(1.7e+308 * t) - 1.7e+308) end
function tmp = code(t) tmp = (1.7e+308 * t) - 1.7e+308; end
code[t_] := N[(N[(1.7e+308 * t), $MachinePrecision] - 1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
1.7 \cdot 10^{+308} \cdot t - 1.7 \cdot 10^{+308}
\end{array}
(FPCore (t) :precision binary64 (fma t 1.7e+308 -1.7e+308))
double code(double t) {
return fma(t, 1.7e+308, -1.7e+308);
}
function code(t) return fma(t, 1.7e+308, -1.7e+308) end
code[t_] := N[(t * 1.7e+308 + -1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(t, 1.7 \cdot 10^{+308}, -1.7 \cdot 10^{+308}\right)
\end{array}
Initial program 31.6%
lift--.f64N/A
sub-negN/A
lift-*.f64N/A
*-commutativeN/A
lower-fma.f64N/A
metadata-eval99.6
Applied rewrites99.6%
(FPCore (t) :precision binary64 (* t 1.7e+308))
double code(double t) {
return t * 1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = t * 1.7d+308
end function
public static double code(double t) {
return t * 1.7e+308;
}
def code(t): return t * 1.7e+308
function code(t) return Float64(t * 1.7e+308) end
function tmp = code(t) tmp = t * 1.7e+308; end
code[t_] := N[(t * 1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
t \cdot 1.7 \cdot 10^{+308}
\end{array}
Initial program 31.6%
Taylor expanded in t around 0
Applied rewrites0.0%
Taylor expanded in t around inf
*-commutativeN/A
lower-*.f6431.6
Applied rewrites31.6%
(FPCore (t) :precision binary64 -1.7e+308)
double code(double t) {
return -1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = -1.7d+308
end function
public static double code(double t) {
return -1.7e+308;
}
def code(t): return -1.7e+308
function code(t) return -1.7e+308 end
function tmp = code(t) tmp = -1.7e+308; end
code[t_] := -1.7e+308
\begin{array}{l}
\\
-1.7 \cdot 10^{+308}
\end{array}
Initial program 31.6%
Taylor expanded in t around 0
Applied rewrites0.0%
(FPCore (t) :precision binary64 (fma 1.7e+308 t (- 1.7e+308)))
double code(double t) {
return fma(1.7e+308, t, -1.7e+308);
}
function code(t) return fma(1.7e+308, t, Float64(-1.7e+308)) end
code[t_] := N[(1.7e+308 * t + (-1.7e+308)), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(1.7 \cdot 10^{+308}, t, -1.7 \cdot 10^{+308}\right)
\end{array}
herbie shell --seed 2024323
(FPCore (t)
:name "fma_test2"
:precision binary64
:pre (and (<= 1.9 t) (<= t 2.1))
:alt
(! :herbie-platform default (let ((x 170000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000)) (fma x t (- x))))
(- (* 1.7e+308 t) 1.7e+308))