
(FPCore (t) :precision binary64 (let* ((t_1 (+ 1.0 (* t 2e-16)))) (+ (* t_1 t_1) (- -1.0 (* 2.0 (* t 2e-16))))))
double code(double t) {
double t_1 = 1.0 + (t * 2e-16);
return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)));
}
real(8) function code(t)
real(8), intent (in) :: t
real(8) :: t_1
t_1 = 1.0d0 + (t * 2d-16)
code = (t_1 * t_1) + ((-1.0d0) - (2.0d0 * (t * 2d-16)))
end function
public static double code(double t) {
double t_1 = 1.0 + (t * 2e-16);
return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)));
}
def code(t): t_1 = 1.0 + (t * 2e-16) return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)))
function code(t) t_1 = Float64(1.0 + Float64(t * 2e-16)) return Float64(Float64(t_1 * t_1) + Float64(-1.0 - Float64(2.0 * Float64(t * 2e-16)))) end
function tmp = code(t) t_1 = 1.0 + (t * 2e-16); tmp = (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16))); end
code[t_] := Block[{t$95$1 = N[(1.0 + N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]}, N[(N[(t$95$1 * t$95$1), $MachinePrecision] + N[(-1.0 - N[(2.0 * N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := 1 + t \cdot 2 \cdot 10^{-16}\\
t_1 \cdot t_1 + \left(-1 - 2 \cdot \left(t \cdot 2 \cdot 10^{-16}\right)\right)
\end{array}
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (t) :precision binary64 (let* ((t_1 (+ 1.0 (* t 2e-16)))) (+ (* t_1 t_1) (- -1.0 (* 2.0 (* t 2e-16))))))
double code(double t) {
double t_1 = 1.0 + (t * 2e-16);
return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)));
}
real(8) function code(t)
real(8), intent (in) :: t
real(8) :: t_1
t_1 = 1.0d0 + (t * 2d-16)
code = (t_1 * t_1) + ((-1.0d0) - (2.0d0 * (t * 2d-16)))
end function
public static double code(double t) {
double t_1 = 1.0 + (t * 2e-16);
return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)));
}
def code(t): t_1 = 1.0 + (t * 2e-16) return (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16)))
function code(t) t_1 = Float64(1.0 + Float64(t * 2e-16)) return Float64(Float64(t_1 * t_1) + Float64(-1.0 - Float64(2.0 * Float64(t * 2e-16)))) end
function tmp = code(t) t_1 = 1.0 + (t * 2e-16); tmp = (t_1 * t_1) + (-1.0 - (2.0 * (t * 2e-16))); end
code[t_] := Block[{t$95$1 = N[(1.0 + N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]}, N[(N[(t$95$1 * t$95$1), $MachinePrecision] + N[(-1.0 - N[(2.0 * N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := 1 + t \cdot 2 \cdot 10^{-16}\\
t_1 \cdot t_1 + \left(-1 - 2 \cdot \left(t \cdot 2 \cdot 10^{-16}\right)\right)
\end{array}
\end{array}
(FPCore (t) :precision binary64 (* t (* t 4e-32)))
double code(double t) {
return t * (t * 4e-32);
}
real(8) function code(t)
real(8), intent (in) :: t
code = t * (t * 4d-32)
end function
public static double code(double t) {
return t * (t * 4e-32);
}
def code(t): return t * (t * 4e-32)
function code(t) return Float64(t * Float64(t * 4e-32)) end
function tmp = code(t) tmp = t * (t * 4e-32); end
code[t_] := N[(t * N[(t * 4e-32), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
t \cdot \left(t \cdot 4 \cdot 10^{-32}\right)
\end{array}
Initial program 3.4%
associate-+r-9.9%
sub-neg9.9%
+-commutative9.9%
difference-of-sqr--19.9%
+-commutative9.9%
associate--l+3.4%
metadata-eval3.4%
+-rgt-identity3.4%
distribute-lft1-in20.8%
*-commutative20.8%
associate-+r+20.8%
+-commutative20.8%
+-commutative20.8%
*-rgt-identity20.8%
Simplified99.5%
Final simplification99.5%
(FPCore (t) :precision binary64 (* 4e-32 (* t t)))
double code(double t) {
return 4e-32 * (t * t);
}
real(8) function code(t)
real(8), intent (in) :: t
code = 4d-32 * (t * t)
end function
public static double code(double t) {
return 4e-32 * (t * t);
}
def code(t): return 4e-32 * (t * t)
function code(t) return Float64(4e-32 * Float64(t * t)) end
function tmp = code(t) tmp = 4e-32 * (t * t); end
code[t_] := N[(4e-32 * N[(t * t), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
4 \cdot 10^{-32} \cdot \left(t \cdot t\right)
\end{array}
Initial program 3.4%
associate-+r-9.9%
sub-neg9.9%
+-commutative9.9%
difference-of-sqr--19.9%
+-commutative9.9%
associate--l+3.4%
metadata-eval3.4%
+-rgt-identity3.4%
distribute-lft1-in20.8%
*-commutative20.8%
associate-+r+20.8%
+-commutative20.8%
+-commutative20.8%
*-rgt-identity20.8%
Simplified99.5%
Taylor expanded in t around 0 99.4%
unpow299.4%
Simplified99.4%
Final simplification99.4%
(FPCore (t) :precision binary64 (let* ((t_1 (+ 1.0 (* t 2e-16)))) (fma t_1 t_1 (- -1.0 (* 2.0 (* t 2e-16))))))
double code(double t) {
double t_1 = 1.0 + (t * 2e-16);
return fma(t_1, t_1, (-1.0 - (2.0 * (t * 2e-16))));
}
function code(t) t_1 = Float64(1.0 + Float64(t * 2e-16)) return fma(t_1, t_1, Float64(-1.0 - Float64(2.0 * Float64(t * 2e-16)))) end
code[t_] := Block[{t$95$1 = N[(1.0 + N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]}, N[(t$95$1 * t$95$1 + N[(-1.0 - N[(2.0 * N[(t * 2e-16), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_1 := 1 + t \cdot 2 \cdot 10^{-16}\\
\mathsf{fma}\left(t_1, t_1, -1 - 2 \cdot \left(t \cdot 2 \cdot 10^{-16}\right)\right)
\end{array}
\end{array}
herbie shell --seed 2023178
(FPCore (t)
:name "fma_test1"
:precision binary64
:pre (and (<= 0.9 t) (<= t 1.1))
:herbie-target
(fma (+ 1.0 (* t 2e-16)) (+ 1.0 (* t 2e-16)) (- -1.0 (* 2.0 (* t 2e-16))))
(+ (* (+ 1.0 (* t 2e-16)) (+ 1.0 (* t 2e-16))) (- -1.0 (* 2.0 (* t 2e-16)))))