
(FPCore (t) :precision binary64 (- (* 1.7e+308 t) 1.7e+308))
double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = (1.7d+308 * t) - 1.7d+308
end function
public static double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
def code(t): return (1.7e+308 * t) - 1.7e+308
function code(t) return Float64(Float64(1.7e+308 * t) - 1.7e+308) end
function tmp = code(t) tmp = (1.7e+308 * t) - 1.7e+308; end
code[t_] := N[(N[(1.7e+308 * t), $MachinePrecision] - 1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
1.7 \cdot 10^{+308} \cdot t - 1.7 \cdot 10^{+308}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t) :precision binary64 (- (* 1.7e+308 t) 1.7e+308))
double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = (1.7d+308 * t) - 1.7d+308
end function
public static double code(double t) {
return (1.7e+308 * t) - 1.7e+308;
}
def code(t): return (1.7e+308 * t) - 1.7e+308
function code(t) return Float64(Float64(1.7e+308 * t) - 1.7e+308) end
function tmp = code(t) tmp = (1.7e+308 * t) - 1.7e+308; end
code[t_] := N[(N[(1.7e+308 * t), $MachinePrecision] - 1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
1.7 \cdot 10^{+308} \cdot t - 1.7 \cdot 10^{+308}
\end{array}
(FPCore (t) :precision binary64 (fma 1.7e+308 t -1.7e+308))
double code(double t) {
return fma(1.7e+308, t, -1.7e+308);
}
function code(t) return fma(1.7e+308, t, -1.7e+308) end
code[t_] := N[(1.7e+308 * t + -1.7e+308), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(1.7 \cdot 10^{+308}, t, -1.7 \cdot 10^{+308}\right)
\end{array}
Initial program 34.2%
fma-neg99.6%
metadata-eval99.6%
Simplified99.6%
Final simplification99.6%
(FPCore (t) :precision binary64 (* 1.7e+308 t))
double code(double t) {
return 1.7e+308 * t;
}
real(8) function code(t)
real(8), intent (in) :: t
code = 1.7d+308 * t
end function
public static double code(double t) {
return 1.7e+308 * t;
}
def code(t): return 1.7e+308 * t
function code(t) return Float64(1.7e+308 * t) end
function tmp = code(t) tmp = 1.7e+308 * t; end
code[t_] := N[(1.7e+308 * t), $MachinePrecision]
\begin{array}{l}
\\
1.7 \cdot 10^{+308} \cdot t
\end{array}
Initial program 34.2%
Taylor expanded in t around inf 34.2%
Final simplification34.2%
(FPCore (t) :precision binary64 -1.7e+308)
double code(double t) {
return -1.7e+308;
}
real(8) function code(t)
real(8), intent (in) :: t
code = -1.7d+308
end function
public static double code(double t) {
return -1.7e+308;
}
def code(t): return -1.7e+308
function code(t) return -1.7e+308 end
function tmp = code(t) tmp = -1.7e+308; end
code[t_] := -1.7e+308
\begin{array}{l}
\\
-1.7 \cdot 10^{+308}
\end{array}
Initial program 34.2%
Taylor expanded in t around 0 0.0%
Final simplification0.0%
(FPCore (t) :precision binary64 (fma 1.7e+308 t (- 1.7e+308)))
double code(double t) {
return fma(1.7e+308, t, -1.7e+308);
}
function code(t) return fma(1.7e+308, t, Float64(-1.7e+308)) end
code[t_] := N[(1.7e+308 * t + (-1.7e+308)), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(1.7 \cdot 10^{+308}, t, -1.7 \cdot 10^{+308}\right)
\end{array}
herbie shell --seed 2023178
(FPCore (t)
:name "fma_test2"
:precision binary64
:pre (and (<= 1.9 t) (<= t 2.1))
:herbie-target
(fma 1.7e+308 t (- 1.7e+308))
(- (* 1.7e+308 t) 1.7e+308))