
(FPCore (a b c d) :precision binary64 (* a (+ (+ b c) d)))
double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = a * ((b + c) + d)
end function
public static double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
def code(a, b, c, d): return a * ((b + c) + d)
function code(a, b, c, d) return Float64(a * Float64(Float64(b + c) + d)) end
function tmp = code(a, b, c, d) tmp = a * ((b + c) + d); end
code[a_, b_, c_, d_] := N[(a * N[(N[(b + c), $MachinePrecision] + d), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(\left(b + c\right) + d\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (a b c d) :precision binary64 (* a (+ (+ b c) d)))
double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = a * ((b + c) + d)
end function
public static double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
def code(a, b, c, d): return a * ((b + c) + d)
function code(a, b, c, d) return Float64(a * Float64(Float64(b + c) + d)) end
function tmp = code(a, b, c, d) tmp = a * ((b + c) + d); end
code[a_, b_, c_, d_] := N[(a * N[(N[(b + c), $MachinePrecision] + d), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(\left(b + c\right) + d\right)
\end{array}
(FPCore (a b c d) :precision binary64 (+ (* a (+ b c)) (* a d)))
double code(double a, double b, double c, double d) {
return (a * (b + c)) + (a * d);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a * (b + c)) + (a * d)
end function
public static double code(double a, double b, double c, double d) {
return (a * (b + c)) + (a * d);
}
def code(a, b, c, d): return (a * (b + c)) + (a * d)
function code(a, b, c, d) return Float64(Float64(a * Float64(b + c)) + Float64(a * d)) end
function tmp = code(a, b, c, d) tmp = (a * (b + c)) + (a * d); end
code[a_, b_, c_, d_] := N[(N[(a * N[(b + c), $MachinePrecision]), $MachinePrecision] + N[(a * d), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(b + c\right) + a \cdot d
\end{array}
Initial program 99.9%
distribute-lft-in100.0%
Applied egg-rr100.0%
(FPCore (a b c d) :precision binary64 (* a (+ (+ b c) d)))
double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = a * ((b + c) + d)
end function
public static double code(double a, double b, double c, double d) {
return a * ((b + c) + d);
}
def code(a, b, c, d): return a * ((b + c) + d)
function code(a, b, c, d) return Float64(a * Float64(Float64(b + c) + d)) end
function tmp = code(a, b, c, d) tmp = a * ((b + c) + d); end
code[a_, b_, c_, d_] := N[(a * N[(N[(b + c), $MachinePrecision] + d), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(\left(b + c\right) + d\right)
\end{array}
Initial program 99.9%
(FPCore (a b c d) :precision binary64 (* a (+ c d)))
double code(double a, double b, double c, double d) {
return a * (c + d);
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = a * (c + d)
end function
public static double code(double a, double b, double c, double d) {
return a * (c + d);
}
def code(a, b, c, d): return a * (c + d)
function code(a, b, c, d) return Float64(a * Float64(c + d)) end
function tmp = code(a, b, c, d) tmp = a * (c + d); end
code[a_, b_, c_, d_] := N[(a * N[(c + d), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot \left(c + d\right)
\end{array}
Initial program 99.9%
Taylor expanded in b around 0 66.4%
(FPCore (a b c d) :precision binary64 (* a d))
double code(double a, double b, double c, double d) {
return a * d;
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = a * d
end function
public static double code(double a, double b, double c, double d) {
return a * d;
}
def code(a, b, c, d): return a * d
function code(a, b, c, d) return Float64(a * d) end
function tmp = code(a, b, c, d) tmp = a * d; end
code[a_, b_, c_, d_] := N[(a * d), $MachinePrecision]
\begin{array}{l}
\\
a \cdot d
\end{array}
Initial program 99.9%
Taylor expanded in d around inf 37.6%
(FPCore (a b c d) :precision binary64 (+ (* a b) (* a (+ c d))))
double code(double a, double b, double c, double d) {
return (a * b) + (a * (c + d));
}
real(8) function code(a, b, c, d)
real(8), intent (in) :: a
real(8), intent (in) :: b
real(8), intent (in) :: c
real(8), intent (in) :: d
code = (a * b) + (a * (c + d))
end function
public static double code(double a, double b, double c, double d) {
return (a * b) + (a * (c + d));
}
def code(a, b, c, d): return (a * b) + (a * (c + d))
function code(a, b, c, d) return Float64(Float64(a * b) + Float64(a * Float64(c + d))) end
function tmp = code(a, b, c, d) tmp = (a * b) + (a * (c + d)); end
code[a_, b_, c_, d_] := N[(N[(a * b), $MachinePrecision] + N[(a * N[(c + d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
a \cdot b + a \cdot \left(c + d\right)
\end{array}
herbie shell --seed 2024160
(FPCore (a b c d)
:name "Expression, p14"
:precision binary64
:pre (and (and (and (and (<= 56789.0 a) (<= a 98765.0)) (and (<= 0.0 b) (<= b 1.0))) (and (<= 0.0 c) (<= c 0.0016773))) (and (<= 0.0 d) (<= d 0.0016773)))
:alt
(! :herbie-platform default (+ (* a b) (* a (+ c d))))
(* a (+ (+ b c) d)))